summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--Documentation/acpi/apei/einj.txt49
-rw-r--r--Documentation/cpu-freq/governors.txt4
-rw-r--r--Documentation/cpu-freq/pcc-cpufreq.txt207
-rw-r--r--Documentation/dontdiff1
-rw-r--r--Documentation/driver-model/platform.txt2
-rw-r--r--Documentation/edac.txt153
-rw-r--r--Documentation/feature-removal-schedule.txt29
-rw-r--r--Documentation/filesystems/00-INDEX2
-rw-r--r--Documentation/filesystems/ceph.txt139
-rw-r--r--Documentation/filesystems/logfs.txt241
-rw-r--r--Documentation/filesystems/proc.txt2
-rw-r--r--Documentation/hwlat_detector.txt64
-rw-r--r--Documentation/hwmon/amc6821102
-rw-r--r--Documentation/hwmon/k10temp17
-rw-r--r--Documentation/ioctl/ioctl-number.txt204
-rw-r--r--Documentation/kernel-doc-nano-HOWTO.txt12
-rw-r--r--Documentation/kernel-parameters.txt13
-rw-r--r--Documentation/kvm/api.txt12
-rw-r--r--Documentation/lguest/lguest.c1
-rw-r--r--Documentation/networking/3c509.txt12
-rw-r--r--Documentation/networking/ip-sysctl.txt19
-rwxr-xr-xDocumentation/networking/ixgbevf.txt90
-rw-r--r--Documentation/networking/regulatory.txt24
-rw-r--r--Documentation/networking/timestamping/timestamping.c2
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/can.txt53
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/mpc5200.txt9
-rw-r--r--Documentation/sound/alsa/ALSA-Configuration.txt22
-rw-r--r--Documentation/trace/ring-buffer-design.txt56
-rw-r--r--Documentation/video4linux/gspca.txt1
-rw-r--r--Documentation/video4linux/videobuf341
-rw-r--r--MAINTAINERS75
-rw-r--r--Makefile3
-rw-r--r--arch/alpha/include/asm/local.h17
-rw-r--r--arch/arm/Kconfig19
-rw-r--r--arch/arm/boot/compressed/Makefile34
-rw-r--r--arch/arm/boot/compressed/misc.c116
-rw-r--r--arch/arm/boot/compressed/piggy.gzip.S6
-rw-r--r--arch/arm/boot/compressed/piggy.lzo.S (renamed from arch/arm/boot/compressed/piggy.S)2
-rw-r--r--arch/arm/common/clkdev.c10
-rw-r--r--arch/arm/common/dmabounce.c4
-rw-r--r--arch/arm/configs/raumfeld_defconfig1898
-rw-r--r--arch/arm/configs/u300_defconfig81
-rw-r--r--arch/arm/include/asm/cacheflush.h56
-rw-r--r--arch/arm/include/asm/clkdev.h3
-rw-r--r--arch/arm/include/asm/cpu.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h79
-rw-r--r--arch/arm/include/asm/dma.h4
-rw-r--r--arch/arm/include/asm/entry-macro-vic2.S57
-rw-r--r--arch/arm/include/asm/io.h15
-rw-r--r--arch/arm/include/asm/kgdb.h11
-rw-r--r--arch/arm/include/asm/kmap_types.h1
-rw-r--r--arch/arm/include/asm/mach/time.h8
-rw-r--r--arch/arm/include/asm/page.h7
-rw-r--r--arch/arm/include/asm/ptrace.h6
-rw-r--r--arch/arm/include/asm/setup.h12
-rw-r--r--arch/arm/include/asm/smp_plat.h5
-rw-r--r--arch/arm/include/asm/unistd.h1
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/asm-offsets.c5
-rw-r--r--arch/arm/kernel/entry-armv.S4
-rw-r--r--arch/arm/kernel/kgdb.c13
-rw-r--r--arch/arm/kernel/leds.c115
-rw-r--r--arch/arm/kernel/process.c3
-rw-r--r--arch/arm/kernel/setup.c77
-rw-r--r--arch/arm/kernel/time.c178
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/kernel/vmlinux.lds.S4
-rw-r--r--arch/arm/mach-bcmring/core.c3
-rw-r--r--arch/arm/mach-davinci/Kconfig4
-rw-r--r--arch/arm/mach-davinci/Makefile3
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c4
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c34
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c11
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c143
-rw-r--r--arch/arm/mach-davinci/cdce949.c289
-rw-r--r--arch/arm/mach-davinci/clock.c71
-rw-r--r--arch/arm/mach-davinci/clock.h25
-rw-r--r--arch/arm/mach-davinci/common.c2
-rw-r--r--arch/arm/mach-davinci/cp_intc.c11
-rw-r--r--arch/arm/mach-davinci/cpuidle.c38
-rw-r--r--arch/arm/mach-davinci/da830.c8
-rw-r--r--arch/arm/mach-davinci/da850.c82
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c155
-rw-r--r--arch/arm/mach-davinci/dm355.c8
-rw-r--r--arch/arm/mach-davinci/dm365.c3
-rw-r--r--arch/arm/mach-davinci/dm644x.c10
-rw-r--r--arch/arm/mach-davinci/dm646x.c12
-rw-r--r--arch/arm/mach-davinci/dma.c67
-rw-r--r--arch/arm/mach-davinci/include/mach/cdce949.h19
-rw-r--r--arch/arm/mach-davinci/include/mach/cpuidle.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/da8xx.h18
-rw-r--r--arch/arm/mach-davinci/include/mach/dm365.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/dm644x.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/dm646x.h4
-rw-r--r--arch/arm/mach-davinci/include/mach/edma.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/i2c.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/keyscan.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/memory.h5
-rw-r--r--arch/arm/mach-davinci/include/mach/mux.h1
-rw-r--r--arch/arm/mach-davinci/include/mach/pm.h54
-rw-r--r--arch/arm/mach-davinci/include/mach/psc.h15
-rw-r--r--arch/arm/mach-davinci/include/mach/timex.h7
-rw-r--r--arch/arm/mach-davinci/io.c2
-rw-r--r--arch/arm/mach-davinci/pm.c158
-rw-r--r--arch/arm/mach-davinci/psc.c11
-rw-r--r--arch/arm/mach-davinci/sleep.S224
-rw-r--r--arch/arm/mach-ep93xx/clock.c30
-rw-r--r--arch/arm/mach-ep93xx/core.c9
-rw-r--r--arch/arm/mach-ep93xx/dma-m2p.c6
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c21
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h7
-rw-r--r--arch/arm/mach-ep93xx/include/mach/ts72xx.h19
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c188
-rw-r--r--arch/arm/mach-footbridge/common.c7
-rw-r--r--arch/arm/mach-integrator/core.c3
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c4
-rw-r--r--arch/arm/mach-iop13xx/io.c7
-rw-r--r--arch/arm/mach-lh7a40x/clocks.c2
-rw-r--r--arch/arm/mach-mmp/clock.c8
-rw-r--r--arch/arm/mach-mmp/clock.h2
-rw-r--r--arch/arm/mach-mmp/pxa168.c2
-rw-r--r--arch/arm/mach-mmp/pxa910.c2
-rw-r--r--arch/arm/mach-mmp/ttc_dkb.c2
-rw-r--r--arch/arm/mach-msm/Kconfig671
-rw-r--r--arch/arm/mach-msm/Makefile75
-rw-r--r--arch/arm/mach-msm/Makefile.boot18
-rw-r--r--arch/arm/mach-msm/acpuclock-8x50.c723
-rw-r--r--arch/arm/mach-msm/acpuclock.c915
-rw-r--r--arch/arm/mach-msm/acpuclock.h39
-rw-r--r--arch/arm/mach-msm/avs.c323
-rw-r--r--arch/arm/mach-msm/avs.h52
-rw-r--r--arch/arm/mach-msm/avs_hw.S158
-rw-r--r--arch/arm/mach-msm/board-comet.c488
-rw-r--r--arch/arm/mach-msm/board-halibut-keypad.c177
-rw-r--r--arch/arm/mach-msm/board-halibut.c704
-rw-r--r--arch/arm/mach-msm/board-msm7x27.c1195
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c209
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c538
-rw-r--r--arch/arm/mach-msm/board-sapphire-gpio.c326
-rw-r--r--arch/arm/mach-msm/board-sapphire-h2w.c545
-rw-r--r--arch/arm/mach-msm/board-sapphire-keypad.c122
-rw-r--r--arch/arm/mach-msm/board-sapphire-mmc.c479
-rw-r--r--arch/arm/mach-msm/board-sapphire-panel.c656
-rw-r--r--arch/arm/mach-msm/board-sapphire-rfkill.c99
-rw-r--r--arch/arm/mach-msm/board-sapphire-wifi.c74
-rw-r--r--arch/arm/mach-msm/board-sapphire.c1175
-rw-r--r--arch/arm/mach-msm/board-sapphire.h219
-rw-r--r--arch/arm/mach-msm/board-trout-gpio.c305
-rw-r--r--arch/arm/mach-msm/board-trout-keypad.c345
-rw-r--r--arch/arm/mach-msm/board-trout-mmc.c437
-rw-r--r--arch/arm/mach-msm/board-trout-panel.c642
-rw-r--r--arch/arm/mach-msm/board-trout-rfkill.c93
-rw-r--r--arch/arm/mach-msm/board-trout-wifi.c74
-rw-r--r--arch/arm/mach-msm/board-trout.c841
-rw-r--r--arch/arm/mach-msm/board-trout.h162
-rw-r--r--arch/arm/mach-msm/clock-7x01a.c126
-rw-r--r--arch/arm/mach-msm/clock-7x30.c957
-rw-r--r--arch/arm/mach-msm/clock-7x30.h143
-rw-r--r--arch/arm/mach-msm/clock-pcom.c130
-rw-r--r--arch/arm/mach-msm/clock-pcom.h147
-rw-r--r--arch/arm/mach-msm/clock.c332
-rw-r--r--arch/arm/mach-msm/clock.h69
-rw-r--r--arch/arm/mach-msm/cpufreq.c126
-rw-r--r--arch/arm/mach-msm/dal.c1363
-rw-r--r--arch/arm/mach-msm/dal_remotetest.c454
-rw-r--r--arch/arm/mach-msm/dal_remotetest.h187
-rw-r--r--arch/arm/mach-msm/devices.c358
-rw-r--r--arch/arm/mach-msm/devices.h24
-rw-r--r--arch/arm/mach-msm/devices_htc.c450
-rw-r--r--arch/arm/mach-msm/dma.c81
-rw-r--r--arch/arm/mach-msm/dma_test.c404
-rw-r--r--arch/arm/mach-msm/fiq_glue.S64
-rw-r--r--arch/arm/mach-msm/generic_gpio.c274
-rw-r--r--arch/arm/mach-msm/gpio.c746
-rw-r--r--arch/arm/mach-msm/gpio.h35
-rw-r--r--arch/arm/mach-msm/gpio_chip.h61
-rw-r--r--arch/arm/mach-msm/gpio_hw-7x30.h112
-rw-r--r--arch/arm/mach-msm/gpio_hw-7xxx.h73
-rw-r--r--arch/arm/mach-msm/gpio_hw-8xxx.h112
-rw-r--r--arch/arm/mach-msm/gpio_hw.h49
-rw-r--r--arch/arm/mach-msm/htc_acoustic.c239
-rw-r--r--arch/arm/mach-msm/htc_akm_cal.c64
-rw-r--r--arch/arm/mach-msm/htc_battery.c769
-rw-r--r--arch/arm/mach-msm/htc_headset.c1246
-rw-r--r--arch/arm/mach-msm/htc_pwrsink.c281
-rw-r--r--arch/arm/mach-msm/htc_wifi_nvs.c56
-rw-r--r--arch/arm/mach-msm/idle-v6.S177
-rw-r--r--arch/arm/mach-msm/idle-v7.S175
-rw-r--r--arch/arm/mach-msm/idle.S36
-rw-r--r--arch/arm/mach-msm/idle.h36
-rw-r--r--arch/arm/mach-msm/include/mach/board.h30
-rw-r--r--arch/arm/mach-msm/include/mach/board_htc.h78
-rw-r--r--arch/arm/mach-msm/include/mach/camera.h297
-rw-r--r--arch/arm/mach-msm/include/mach/clk.h53
-rw-r--r--arch/arm/mach-msm/include/mach/dal.h155
-rw-r--r--arch/arm/mach-msm/include/mach/dma.h40
-rw-r--r--arch/arm/mach-msm/include/mach/dma_test.h67
-rw-r--r--arch/arm/mach-msm/include/mach/fiq.h33
-rw-r--r--arch/arm/mach-msm/include/mach/gpio.h167
-rw-r--r--arch/arm/mach-msm/include/mach/htc_headset.h173
-rw-r--r--arch/arm/mach-msm/include/mach/htc_pwrsink.h87
-rw-r--r--arch/arm/mach-msm/include/mach/internal_power_rail.h46
-rw-r--r--arch/arm/mach-msm/include/mach/io.h2
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-7x30.h158
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-7xxx.h76
-rw-r--r--arch/arm/mach-msm/include/mach/irqs-8xxx.h104
-rw-r--r--arch/arm/mach-msm/include/mach/irqs.h81
-rw-r--r--arch/arm/mach-msm/include/mach/memory.h29
-rw-r--r--arch/arm/mach-msm/include/mach/mmc.h18
-rw-r--r--arch/arm/mach-msm/include/mach/mpp.h73
-rw-r--r--arch/arm/mach-msm/include/mach/msm_handset.h34
-rw-r--r--arch/arm/mach-msm/include/mach/msm_i2ckbd.h41
-rw-r--r--arch/arm/mach-msm/include/mach/msm_iomap.h102
-rw-r--r--arch/arm/mach-msm/include/mach/msm_otg.h72
-rw-r--r--arch/arm/mach-msm/include/mach/msm_rpcrouter.h296
-rw-r--r--arch/arm/mach-msm/include/mach/msm_smd.h82
-rw-r--r--arch/arm/mach-msm/include/mach/msm_touch.h26
-rw-r--r--arch/arm/mach-msm/include/mach/msm_touchpad.h37
-rw-r--r--arch/arm/mach-msm/include/mach/oem_rapi_client.h91
-rw-r--r--arch/arm/mach-msm/include/mach/pmic.h557
-rw-r--r--arch/arm/mach-msm/include/mach/remote_spinlock.h115
-rw-r--r--arch/arm/mach-msm/include/mach/rpc_hsusb.h45
-rw-r--r--arch/arm/mach-msm/include/mach/sirc.h103
-rw-r--r--arch/arm/mach-msm/include/mach/smem_log.h245
-rw-r--r--arch/arm/mach-msm/include/mach/system.h5
-rw-r--r--arch/arm/mach-msm/include/mach/uncompress.h1
-rw-r--r--arch/arm/mach-msm/include/mach/usbdiag.h44
-rw-r--r--arch/arm/mach-msm/include/mach/vmalloc.h7
-rw-r--r--arch/arm/mach-msm/include/mach/vreg.h2
-rw-r--r--arch/arm/mach-msm/internal_power_rail.c90
-rw-r--r--arch/arm/mach-msm/io.c128
-rw-r--r--arch/arm/mach-msm/irq.c535
-rw-r--r--arch/arm/mach-msm/irq.h43
-rw-r--r--arch/arm/mach-msm/jtag-v7.S117
-rw-r--r--arch/arm/mach-msm/keypad-surf-ffa.c316
-rw-r--r--arch/arm/mach-msm/keypad-surf-ffa.h29
-rw-r--r--arch/arm/mach-msm/memory.c86
-rw-r--r--arch/arm/mach-msm/modem_notifier.c237
-rw-r--r--arch/arm/mach-msm/modem_notifier.h48
-rw-r--r--arch/arm/mach-msm/mpp.c176
-rw-r--r--arch/arm/mach-msm/msm-keypad-devices.h22
-rw-r--r--arch/arm/mach-msm/msm_vibrator.c137
-rw-r--r--arch/arm/mach-msm/nand_partitions.c187
-rw-r--r--arch/arm/mach-msm/nohlt.c86
-rw-r--r--arch/arm/mach-msm/oem_rapi_client.c489
-rw-r--r--arch/arm/mach-msm/ping_mdm_rpc_client.c772
-rw-r--r--arch/arm/mach-msm/pm.c954
-rw-r--r--arch/arm/mach-msm/pm.h42
-rw-r--r--arch/arm/mach-msm/pm2.c1666
-rw-r--r--arch/arm/mach-msm/pmic.c1095
-rw-r--r--arch/arm/mach-msm/pmic8058-gpio.c150
-rw-r--r--arch/arm/mach-msm/pmic8058-mpp.c105
-rw-r--r--arch/arm/mach-msm/pmic_debugfs.c1200
-rw-r--r--arch/arm/mach-msm/proc_comm.c93
-rw-r--r--arch/arm/mach-msm/proc_comm.h192
-rw-r--r--arch/arm/mach-msm/proc_comm_test.c169
-rw-r--r--arch/arm/mach-msm/remote_spinlock.c85
-rw-r--r--arch/arm/mach-msm/reset_modem.c226
-rw-r--r--arch/arm/mach-msm/rpc_hsusb.c574
-rw-r--r--arch/arm/mach-msm/rpc_server_dog_keepalive.c77
-rw-r--r--arch/arm/mach-msm/rpc_server_handset.c380
-rw-r--r--arch/arm/mach-msm/rpc_server_time_remote.c90
-rw-r--r--arch/arm/mach-msm/rpc_server_time_remote.h21
-rw-r--r--arch/arm/mach-msm/sirc.c239
-rw-r--r--arch/arm/mach-msm/smd.c1960
-rw-r--r--arch/arm/mach-msm/smd_ctl2.c677
-rw-r--r--arch/arm/mach-msm/smd_nmea.c249
-rw-r--r--arch/arm/mach-msm/smd_private.h256
-rw-r--r--arch/arm/mach-msm/smd_qmi.c844
-rw-r--r--arch/arm/mach-msm/smd_rpcrouter.c2199
-rw-r--r--arch/arm/mach-msm/smd_rpcrouter.h210
-rw-r--r--arch/arm/mach-msm/smd_rpcrouter_clients.c607
-rw-r--r--arch/arm/mach-msm/smd_rpcrouter_device.c380
-rw-r--r--arch/arm/mach-msm/smd_rpcrouter_servers.c434
-rw-r--r--arch/arm/mach-msm/smd_tty.c284
-rw-r--r--arch/arm/mach-msm/smem_log.c2024
-rw-r--r--arch/arm/mach-msm/socinfo.c430
-rw-r--r--arch/arm/mach-msm/socinfo.h97
-rw-r--r--arch/arm/mach-msm/timer.c714
-rw-r--r--arch/arm/mach-msm/timer.h38
-rw-r--r--arch/arm/mach-msm/vreg.c170
-rw-r--r--arch/arm/mach-mx1/clock.c4
-rw-r--r--arch/arm/mach-mx2/clock_imx21.c4
-rw-r--r--arch/arm/mach-mx2/clock_imx27.c4
-rw-r--r--arch/arm/mach-mx2/mxt_td60.c24
-rw-r--r--arch/arm/mach-mx25/clock.c8
-rw-r--r--arch/arm/mach-mx25/devices.c19
-rw-r--r--arch/arm/mach-mx25/devices.h1
-rw-r--r--arch/arm/mach-mx25/mx25pdk.c46
-rw-r--r--arch/arm/mach-mx3/Kconfig3
-rw-r--r--arch/arm/mach-mx3/clock-imx35.c4
-rw-r--r--arch/arm/mach-mx3/clock.c4
-rw-r--r--arch/arm/mach-mx3/mm.c5
-rw-r--r--arch/arm/mach-mx3/mx31ads.c5
-rw-r--r--arch/arm/mach-mx3/mx31lite.c9
-rw-r--r--arch/arm/mach-mx3/mx31moboard-devboard.c2
-rw-r--r--arch/arm/mach-mx3/mx31moboard-marxbot.c2
-rw-r--r--arch/arm/mach-mx3/mx31moboard.c7
-rw-r--r--arch/arm/mach-mx3/mx31pdk.c5
-rw-r--r--arch/arm/mach-mx3/pcm037.c32
-rw-r--r--arch/arm/mach-mxc91231/clock.c4
-rw-r--r--arch/arm/mach-nomadik/cpu-8815.c8
-rw-r--r--arch/arm/mach-omap1/Makefile2
-rw-r--r--arch/arm/mach-omap1/board-fsample.c9
-rw-r--r--arch/arm/mach-omap1/board-h2.c9
-rw-r--r--arch/arm/mach-omap1/board-h3.c9
-rw-r--r--arch/arm/mach-omap1/board-innovator.c9
-rw-r--r--arch/arm/mach-omap1/board-osk.c9
-rw-r--r--arch/arm/mach-omap1/board-palmte.c9
-rw-r--r--arch/arm/mach-omap1/board-palmtt.c9
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c10
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c9
-rw-r--r--arch/arm/mach-omap1/board-sx1.c11
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c9
-rw-r--r--arch/arm/mach-omap1/clock_data.c10
-rw-r--r--arch/arm/mach-omap1/devices.c35
-rw-r--r--arch/arm/mach-omap1/flash.c33
-rw-r--r--arch/arm/mach-omap1/mux.c8
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c7
-rw-r--r--[-rwxr-xr-x]arch/arm/mach-omap2/board-3630sdp.c0
-rw-r--r--arch/arm/mach-omap2/board-h4.c7
-rw-r--r--[-rwxr-xr-x]arch/arm/mach-omap2/board-zoom-peripherals.c17
-rw-r--r--arch/arm/mach-omap2/board-zoom3.c14
-rw-r--r--arch/arm/mach-omap2/clock2xxx.c57
-rw-r--r--arch/arm/mach-omap2/clock34xx.c1
-rw-r--r--arch/arm/mach-omap2/clock34xx_data.c6
-rw-r--r--arch/arm/mach-omap2/clockdomain.c4
-rw-r--r--arch/arm/mach-omap2/io.c19
-rw-r--r--arch/arm/mach-omap2/mux.c41
-rw-r--r--arch/arm/mach-omap2/opp2420_data.c38
-rw-r--r--arch/arm/mach-omap2/opp2430_data.c30
-rw-r--r--arch/arm/mach-omap2/pm34xx.c4
-rw-r--r--arch/arm/mach-omap2/serial.c31
-rw-r--r--arch/arm/mach-pnx4008/clock.c173
-rw-r--r--arch/arm/mach-pnx4008/clock.h6
-rw-r--r--arch/arm/mach-pnx4008/i2c.c108
-rw-r--r--arch/arm/mach-pnx4008/include/mach/clkdev.h7
-rw-r--r--arch/arm/mach-pnx4008/include/mach/timex.h54
-rw-r--r--arch/arm/mach-pnx4008/pm.c2
-rw-r--r--arch/arm/mach-pnx4008/time.c2
-rw-r--r--arch/arm/mach-pnx4008/time.h70
-rw-r--r--arch/arm/mach-pxa/Kconfig20
-rw-r--r--arch/arm/mach-pxa/Makefile3
-rw-r--r--arch/arm/mach-pxa/clock.c8
-rw-r--r--arch/arm/mach-pxa/clock.h4
-rw-r--r--arch/arm/mach-pxa/eseries.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/hardware.h9
-rw-r--r--arch/arm/mach-pxa/include/mach/zylonite.h7
-rw-r--r--arch/arm/mach-pxa/littleton.c6
-rw-r--r--arch/arm/mach-pxa/magician.c2
-rw-r--r--arch/arm/mach-pxa/palmld.c2
-rw-r--r--arch/arm/mach-pxa/palmt5.c2
-rw-r--r--arch/arm/mach-pxa/palmtc.c2
-rw-r--r--arch/arm/mach-pxa/palmte2.c2
-rw-r--r--arch/arm/mach-pxa/palmtreo.c2
-rw-r--r--arch/arm/mach-pxa/palmtx.c2
-rw-r--r--arch/arm/mach-pxa/palmz72.c2
-rw-r--r--arch/arm/mach-pxa/poodle.c2
-rw-r--r--arch/arm/mach-pxa/pxa25x.c4
-rw-r--r--arch/arm/mach-pxa/pxa27x.c2
-rw-r--r--arch/arm/mach-pxa/pxa300.c4
-rw-r--r--arch/arm/mach-pxa/pxa320.c2
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c2
-rw-r--r--arch/arm/mach-pxa/raumfeld.c1100
-rw-r--r--arch/arm/mach-pxa/spitz.c4
-rw-r--r--arch/arm/mach-pxa/viper.c2
-rw-r--r--arch/arm/mach-pxa/zeus.c36
-rw-r--r--arch/arm/mach-pxa/zylonite.c87
-rw-r--r--arch/arm/mach-pxa/zylonite_pxa300.c12
-rw-r--r--arch/arm/mach-pxa/zylonite_pxa320.c4
-rw-r--r--arch/arm/mach-realview/core.c5
-rw-r--r--arch/arm/mach-realview/include/mach/board-pb1176.h4
-rw-r--r--arch/arm/mach-realview/include/mach/platform.h2
-rw-r--r--arch/arm/mach-realview/realview_eb.c15
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c10
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c11
-rw-r--r--arch/arm/mach-realview/realview_pba8.c15
-rw-r--r--arch/arm/mach-realview/realview_pbx.c15
-rw-r--r--arch/arm/mach-s3c2410/include/mach/spi-gpio.h2
-rw-r--r--arch/arm/mach-s3c2412/clock.c52
-rw-r--r--arch/arm/mach-s3c2440/clock.c6
-rw-r--r--arch/arm/mach-s3c2442/clock.c6
-rw-r--r--arch/arm/mach-s3c2443/clock.c97
-rw-r--r--arch/arm/mach-s3c6400/include/mach/entry-macro.S28
-rw-r--r--arch/arm/mach-s3c6400/include/mach/map.h4
-rw-r--r--arch/arm/mach-s3c6400/include/mach/tick.h2
-rw-r--r--arch/arm/mach-s5pc100/setup-sdhci.c4
-rw-r--r--arch/arm/mach-u300/clock.c5
-rw-r--r--arch/arm/mach-ux500/clock.c5
-rw-r--r--arch/arm/mach-versatile/core.c3
-rw-r--r--arch/arm/mach-w90x900/clock.c9
-rw-r--r--arch/arm/mach-w90x900/clock.h1
-rw-r--r--arch/arm/mach-w90x900/cpu.c2
-rw-r--r--arch/arm/mach-w90x900/include/mach/system.h15
-rw-r--r--arch/arm/mach-w90x900/time.c64
-rw-r--r--arch/arm/mm/Makefile12
-rw-r--r--arch/arm/mm/alignment.c6
-rw-r--r--arch/arm/mm/cache-fa.S32
-rw-r--r--arch/arm/mm/cache-v3.S43
-rw-r--r--arch/arm/mm/cache-v4.S43
-rw-r--r--arch/arm/mm/cache-v4wb.S32
-rw-r--r--arch/arm/mm/cache-v4wt.S40
-rw-r--r--arch/arm/mm/cache-v6.S34
-rw-r--r--arch/arm/mm/cache-v7.S34
-rw-r--r--arch/arm/mm/cache-xsc3l2.c11
-rw-r--r--arch/arm/mm/copypage-feroceon.c3
-rw-r--r--arch/arm/mm/copypage-v3.c2
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-v4wb.c3
-rw-r--r--arch/arm/mm/copypage-v4wt.c2
-rw-r--r--arch/arm/mm/copypage-v6.c4
-rw-r--r--arch/arm/mm/copypage-xsc3.c3
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/dma-mapping.c159
-rw-r--r--arch/arm/mm/flush.c51
-rw-r--r--arch/arm/mm/init.c14
-rw-r--r--arch/arm/mm/ioremap.c57
-rw-r--r--arch/arm/mm/mmu.c41
-rw-r--r--arch/arm/mm/nommu.c12
-rw-r--r--arch/arm/mm/proc-arm1020.S32
-rw-r--r--arch/arm/mm/proc-arm1020e.S32
-rw-r--r--arch/arm/mm/proc-arm1022.S32
-rw-r--r--arch/arm/mm/proc-arm1026.S32
-rw-r--r--arch/arm/mm/proc-arm920.S32
-rw-r--r--arch/arm/mm/proc-arm922.S32
-rw-r--r--arch/arm/mm/proc-arm925.S32
-rw-r--r--arch/arm/mm/proc-arm926.S32
-rw-r--r--arch/arm/mm/proc-arm940.S32
-rw-r--r--arch/arm/mm/proc-arm946.S32
-rw-r--r--arch/arm/mm/proc-feroceon.S54
-rw-r--r--arch/arm/mm/proc-mohawk.S32
-rw-r--r--arch/arm/mm/proc-xsc3.S39
-rw-r--r--arch/arm/mm/proc-xscale.S49
-rw-r--r--arch/arm/mm/tlb-v7.S1
-rw-r--r--arch/arm/plat-iop/io.c3
-rw-r--r--arch/arm/plat-mxc/include/mach/iomux-mx25.h58
-rw-r--r--arch/arm/plat-mxc/include/mach/mx25.h4
-rw-r--r--arch/arm/plat-omap/clock.c16
-rw-r--r--arch/arm/plat-omap/cpu-omap.c1
-rw-r--r--arch/arm/plat-omap/dma.c2
-rw-r--r--arch/arm/plat-omap/gpio.c63
-rw-r--r--arch/arm/plat-omap/include/plat/board.h1
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h2
-rw-r--r--arch/arm/plat-omap/include/plat/control.h2
-rw-r--r--arch/arm/plat-omap/include/plat/flash.h16
-rw-r--r--arch/arm/plat-omap/include/plat/io.h36
-rw-r--r--arch/arm/plat-omap/include/plat/mux.h8
-rw-r--r--arch/arm/plat-omap/include/plat/omap7xx.h3
-rw-r--r--arch/arm/plat-omap/io.c14
-rw-r--r--arch/arm/plat-omap/iommu.c2
-rw-r--r--arch/arm/plat-omap/mcbsp.c20
-rw-r--r--arch/arm/plat-pxa/pwm.c8
-rw-r--r--arch/arm/plat-s3c/Kconfig75
-rw-r--r--arch/arm/plat-s3c/Makefile17
-rw-r--r--arch/arm/plat-s3c/include/plat/nand.h2
-rw-r--r--arch/arm/plat-s3c24xx/clock-dclk.c22
-rw-r--r--arch/arm/plat-s3c24xx/s3c244x-clock.c4
-rw-r--r--arch/arm/plat-s3c64xx/Kconfig3
-rw-r--r--arch/arm/plat-s3c64xx/clock.c10
-rw-r--r--arch/arm/plat-s3c64xx/cpu.c4
-rw-r--r--arch/arm/plat-s3c64xx/include/plat/irqs.h8
-rw-r--r--arch/arm/plat-s3c64xx/include/plat/regs-clock.h71
-rw-r--r--arch/arm/plat-s3c64xx/irq.c209
-rw-r--r--arch/arm/plat-s3c64xx/s3c6400-clock.c518
-rw-r--r--arch/arm/plat-s5pc1xx/clock.c20
-rw-r--r--arch/arm/plat-s5pc1xx/s5pc100-clock.c126
-rw-r--r--arch/arm/plat-samsung/Kconfig102
-rw-r--r--arch/arm/plat-samsung/Makefile22
-rw-r--r--arch/arm/plat-samsung/clock-clksrc.c203
-rw-r--r--arch/arm/plat-samsung/clock.c (renamed from arch/arm/plat-s3c/clock.c)55
-rw-r--r--arch/arm/plat-samsung/dev-fb.c (renamed from arch/arm/plat-s3c/dev-fb.c)0
-rw-r--r--arch/arm/plat-samsung/dev-hsmmc.c (renamed from arch/arm/plat-s3c/dev-hsmmc.c)0
-rw-r--r--arch/arm/plat-samsung/dev-hsmmc1.c (renamed from arch/arm/plat-s3c/dev-hsmmc1.c)0
-rw-r--r--arch/arm/plat-samsung/dev-hsmmc2.c (renamed from arch/arm/plat-s3c/dev-hsmmc2.c)0
-rw-r--r--arch/arm/plat-samsung/dev-i2c0.c (renamed from arch/arm/plat-s3c/dev-i2c0.c)0
-rw-r--r--arch/arm/plat-samsung/dev-i2c1.c (renamed from arch/arm/plat-s3c/dev-i2c1.c)0
-rw-r--r--arch/arm/plat-samsung/dev-nand.c (renamed from arch/arm/plat-s3c/dev-nand.c)0
-rw-r--r--arch/arm/plat-samsung/dev-usb-hsotg.c (renamed from arch/arm/plat-s3c/dev-usb-hsotg.c)0
-rw-r--r--arch/arm/plat-samsung/dev-usb.c (renamed from arch/arm/plat-s3c/dev-usb.c)0
-rw-r--r--arch/arm/plat-samsung/gpio-config.c (renamed from arch/arm/plat-s3c/gpio-config.c)0
-rw-r--r--arch/arm/plat-samsung/include/plat/clock-clksrc.h83
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h (renamed from arch/arm/plat-s3c/include/plat/clock.h)34
-rw-r--r--arch/arm/plat-samsung/include/plat/irq-uart.h20
-rw-r--r--arch/arm/plat-samsung/include/plat/irq-vic-timer.h13
-rw-r--r--arch/arm/plat-samsung/irq-uart.c143
-rw-r--r--arch/arm/plat-samsung/irq-vic-timer.c86
-rw-r--r--arch/arm/plat-samsung/pwm-clock.c (renamed from arch/arm/plat-s3c/pwm-clock.c)112
-rw-r--r--arch/arm/plat-stmp3xxx/clock.c3
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c15
-rw-r--r--arch/blackfin/include/asm/nand.h4
-rw-r--r--arch/blackfin/include/asm/page.h5
-rw-r--r--arch/blackfin/kernel/kgdb.c206
-rw-r--r--arch/blackfin/kernel/traps.c2
-rw-r--r--arch/blackfin/mach-common/entry.S4
-rw-r--r--arch/blackfin/mm/Makefile2
-rw-r--r--arch/blackfin/mm/maccess.c97
-rw-r--r--arch/cris/arch-v10/kernel/entry.S2
-rw-r--r--arch/cris/arch-v10/kernel/irq.c2
-rw-r--r--arch/cris/arch-v32/kernel/irq.c2
-rw-r--r--arch/cris/arch-v32/kernel/pinmux.c4
-rw-r--r--arch/cris/arch-v32/mach-a3/pinmux.c2
-rw-r--r--arch/cris/arch-v32/mach-fs/pinmux.c4
-rw-r--r--arch/cris/arch-v32/mm/mmu.S2
-rw-r--r--arch/cris/kernel/irq.c2
-rw-r--r--arch/frv/include/asm/page.h2
-rw-r--r--arch/ia64/include/asm/ftrace.h1
-rw-r--r--arch/ia64/include/asm/kprobes.h5
-rw-r--r--arch/ia64/include/asm/percpu.h4
-rw-r--r--arch/ia64/include/asm/tlb.h2
-rw-r--r--arch/ia64/include/asm/topology.h4
-rw-r--r--arch/ia64/include/asm/types.h5
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c4
-rw-r--r--arch/ia64/kernel/mca.c5
-rw-r--r--arch/ia64/kernel/perfmon.c2
-rw-r--r--arch/ia64/kvm/Kconfig1
-rw-r--r--arch/ia64/kvm/kvm-ia64.c47
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/mm/tlb.c32
-rw-r--r--arch/m32r/include/asm/local.h25
-rw-r--r--arch/m68k/include/asm/m5206sim.h2
-rw-r--r--arch/m68k/include/asm/m520xsim.h4
-rw-r--r--arch/m68k/include/asm/m523xsim.h8
-rw-r--r--arch/m68k/include/asm/m5249sim.h9
-rw-r--r--arch/m68k/include/asm/m527xsim.h10
-rw-r--r--arch/m68k/include/asm/m528xsim.h38
-rw-r--r--arch/m68k/include/asm/m5307sim.h3
-rw-r--r--arch/m68k/include/asm/m532xsim.h37
-rw-r--r--arch/m68k/include/asm/m5407sim.h2
-rw-r--r--arch/m68k/include/asm/mcfi2c.h29
-rw-r--r--arch/m68k/include/asm/mcfmbus.h77
-rw-r--r--arch/m68k/include/asm/thread_info_no.h1
-rw-r--r--arch/m68knommu/Kconfig4
-rw-r--r--arch/m68knommu/kernel/process.c4
-rw-r--r--arch/m68knommu/kernel/ptrace.c5
-rw-r--r--arch/m68knommu/kernel/sys_m68k.c36
-rw-r--r--arch/m68knommu/kernel/syscalltable.S4
-rw-r--r--arch/m68knommu/platform/5206/config.c41
-rw-r--r--arch/m68knommu/platform/5206e/config.c41
-rw-r--r--arch/m68knommu/platform/520x/config.c45
-rw-r--r--arch/m68knommu/platform/523x/config.c46
-rw-r--r--arch/m68knommu/platform/5249/config.c77
-rw-r--r--arch/m68knommu/platform/5272/intc.c39
-rw-r--r--arch/m68knommu/platform/527x/config.c60
-rw-r--r--arch/m68knommu/platform/528x/config.c51
-rw-r--r--arch/m68knommu/platform/5307/config.c42
-rw-r--r--arch/m68knommu/platform/532x/config.c45
-rw-r--r--arch/m68knommu/platform/5407/config.c41
-rw-r--r--arch/m68knommu/platform/68328/ints.c8
-rw-r--r--arch/m68knommu/platform/68360/ints.c8
-rw-r--r--arch/m68knommu/platform/coldfire/intc-2.c76
-rw-r--r--arch/m68knommu/platform/coldfire/intc-simr.c68
-rw-r--r--arch/m68knommu/platform/coldfire/intc.c59
-rw-r--r--arch/microblaze/include/asm/entry.h2
-rw-r--r--arch/microblaze/include/asm/ptrace.h14
-rw-r--r--arch/microblaze/include/asm/unistd.h5
-rw-r--r--arch/microblaze/kernel/head.S13
-rw-r--r--arch/microblaze/kernel/syscall_table.S2
-rw-r--r--arch/mips/alchemy/Kconfig28
-rw-r--r--arch/mips/alchemy/common/Makefile7
-rw-r--r--arch/mips/alchemy/common/dbdma.c74
-rw-r--r--arch/mips/alchemy/common/dma.c36
-rw-r--r--arch/mips/alchemy/common/gpiolib-au1000.c10
-rw-r--r--arch/mips/alchemy/common/irq.c436
-rw-r--r--arch/mips/alchemy/common/platform.c152
-rw-r--r--arch/mips/alchemy/common/puts.c68
-rw-r--r--arch/mips/alchemy/common/reset.c188
-rw-r--r--arch/mips/alchemy/common/setup.c12
-rw-r--r--arch/mips/alchemy/common/time.c35
-rw-r--r--arch/mips/alchemy/devboards/Makefile6
-rw-r--r--arch/mips/alchemy/devboards/bcsr.c148
-rw-r--r--arch/mips/alchemy/devboards/db1200/Makefile1
-rw-r--r--arch/mips/alchemy/devboards/db1200/platform.c561
-rw-r--r--arch/mips/alchemy/devboards/db1200/setup.c118
-rw-r--r--arch/mips/alchemy/devboards/db1x00/Makefile2
-rw-r--r--arch/mips/alchemy/devboards/db1x00/board_setup.c185
-rw-r--r--arch/mips/alchemy/devboards/db1x00/irqmap.c90
-rw-r--r--arch/mips/alchemy/devboards/db1x00/platform.c118
-rw-r--r--arch/mips/alchemy/devboards/pb1000/board_setup.c30
-rw-r--r--arch/mips/alchemy/devboards/pb1100/Makefile3
-rw-r--r--arch/mips/alchemy/devboards/pb1100/board_setup.c32
-rw-r--r--arch/mips/alchemy/devboards/pb1100/platform.c50
-rw-r--r--arch/mips/alchemy/devboards/pb1200/Makefile2
-rw-r--r--arch/mips/alchemy/devboards/pb1200/board_setup.c84
-rw-r--r--arch/mips/alchemy/devboards/pb1200/irqmap.c134
-rw-r--r--arch/mips/alchemy/devboards/pb1200/platform.c63
-rw-r--r--arch/mips/alchemy/devboards/pb1500/Makefile3
-rw-r--r--arch/mips/alchemy/devboards/pb1500/board_setup.c40
-rw-r--r--arch/mips/alchemy/devboards/pb1500/platform.c49
-rw-r--r--arch/mips/alchemy/devboards/pb1550/Makefile3
-rw-r--r--arch/mips/alchemy/devboards/pb1550/board_setup.c45
-rw-r--r--arch/mips/alchemy/devboards/pb1550/platform.c69
-rw-r--r--arch/mips/alchemy/devboards/platform.c222
-rw-r--r--arch/mips/alchemy/devboards/platform.h21
-rw-r--r--arch/mips/alchemy/devboards/prom.c5
-rw-r--r--arch/mips/alchemy/mtx-1/Makefile2
-rw-r--r--arch/mips/alchemy/mtx-1/board_setup.c39
-rw-r--r--arch/mips/alchemy/mtx-1/init.c6
-rw-r--r--arch/mips/alchemy/mtx-1/irqmap.c56
-rw-r--r--arch/mips/alchemy/xxs1500/Makefile4
-rw-r--r--arch/mips/alchemy/xxs1500/board_setup.c52
-rw-r--r--arch/mips/alchemy/xxs1500/init.c6
-rw-r--r--arch/mips/alchemy/xxs1500/irqmap.c52
-rw-r--r--arch/mips/alchemy/xxs1500/platform.c63
-rw-r--r--arch/mips/configs/db1200_defconfig1481
-rw-r--r--arch/mips/include/asm/kgdb.h2
-rw-r--r--arch/mips/include/asm/local.h25
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h884
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h23
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_eth.h18
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1000.h164
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio.h2
-rw-r--r--arch/mips/include/asm/mach-db1x00/bcsr.h238
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1200.h156
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1x00.h100
-rw-r--r--arch/mips/include/asm/mach-pb1x00/pb1100.h85
-rw-r--r--arch/mips/include/asm/mach-pb1x00/pb1200.h122
-rw-r--r--arch/mips/include/asm/mach-pb1x00/pb1500.h49
-rw-r--r--arch/mips/include/asm/mach-pb1x00/pb1550.h96
-rw-r--r--arch/mips/include/asm/page.h12
-rw-r--r--arch/mips/include/asm/param.h17
-rw-r--r--arch/mips/include/asm/pgalloc.h4
-rw-r--r--arch/mips/include/asm/pgtable-64.h36
-rw-r--r--arch/mips/include/asm/pgtable.h2
-rw-r--r--arch/mips/kernel/asm-offsets.c4
-rw-r--r--arch/mips/kernel/kgdb.c27
-rw-r--r--arch/mips/kernel/linux32.c1
-rw-r--r--arch/mips/kernel/rtlx.c1
-rw-r--r--arch/mips/kernel/traps.c14
-rw-r--r--arch/mips/kernel/vpe.c1
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/mm/pgtable-64.c44
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/mips/sibyte/common/sb_tbprof.c1
-rw-r--r--arch/mn10300/Makefile2
-rw-r--r--arch/mn10300/configs/asb2303_defconfig107
-rw-r--r--arch/mn10300/include/asm/bitops.h4
-rw-r--r--arch/mn10300/include/asm/div64.h2
-rw-r--r--arch/mn10300/include/asm/system.h1
-rw-r--r--arch/mn10300/include/asm/tlbflush.h2
-rw-r--r--arch/mn10300/include/asm/uaccess.h4
-rw-r--r--arch/mn10300/include/asm/unistd.h3
-rw-r--r--arch/mn10300/kernel/entry.S1
-rw-r--r--arch/mn10300/kernel/mn10300-serial.c3
-rw-r--r--arch/mn10300/kernel/signal.c2
-rw-r--r--arch/mn10300/lib/checksum.c1
-rw-r--r--arch/mn10300/lib/delay.c3
-rw-r--r--arch/mn10300/lib/usercopy.c6
-rw-r--r--arch/mn10300/mm/dma-alloc.c17
-rw-r--r--arch/mn10300/mm/init.c3
-rw-r--r--arch/mn10300/mm/misalignment.c4
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/serial.h2
-rw-r--r--arch/mn10300/unit-asb2305/include/unit/timex.h2
-rw-r--r--arch/mn10300/unit-asb2305/leds.c4
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.c39
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.h1
-rw-r--r--arch/mn10300/unit-asb2305/pci.c62
-rw-r--r--arch/mn10300/unit-asb2305/unit-init.c5
-rw-r--r--arch/parisc/include/asm/unistd.h4
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/parisc/lib/fixup.S8
-rwxr-xr-xarch/powerpc/boot/wrapper7
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig75
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig90
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig79
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig83
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig79
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig106
-rw-r--r--arch/powerpc/include/asm/kmap_types.h1
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h6
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h7
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64_asm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_host.h6
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h77
-rw-r--r--arch/powerpc/include/asm/local.h25
-rw-r--r--arch/powerpc/include/asm/paca.h5
-rw-r--r--arch/powerpc/include/asm/reg.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c32
-rw-r--r--arch/powerpc/kernel/kgdb.c12
-rw-r--r--arch/powerpc/kernel/legacy_serial.c2
-rw-r--r--arch/powerpc/kernel/traps.c7
-rw-r--r--arch/powerpc/kvm/44x_emulate.c25
-rw-r--r--arch/powerpc/kvm/44x_tlb.c20
-rw-r--r--arch/powerpc/kvm/Kconfig3
-rw-r--r--arch/powerpc/kvm/book3s.c90
-rw-r--r--arch/powerpc/kvm/book3s_64_emulate.c77
-rw-r--r--arch/powerpc/kvm/book3s_64_exports.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_interrupts.S336
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c10
-rw-r--r--arch/powerpc/kvm/book3s_64_rmhandlers.S85
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S160
-rw-r--r--arch/powerpc/kvm/booke.c32
-rw-r--r--arch/powerpc/kvm/booke_emulate.c107
-rw-r--r--arch/powerpc/kvm/e500_emulate.c95
-rw-r--r--arch/powerpc/kvm/e500_tlb.c4
-rw-r--r--arch/powerpc/kvm/emulate.c116
-rw-r--r--arch/powerpc/kvm/powerpc.c39
-rw-r--r--arch/s390/Kconfig.debug13
-rw-r--r--arch/s390/crypto/aes_s390.c6
-rw-r--r--arch/s390/include/asm/bug.h4
-rw-r--r--arch/s390/include/asm/sigp.h20
-rw-r--r--arch/s390/include/asm/thread_info.h12
-rw-r--r--arch/s390/include/asm/uaccess.h12
-rw-r--r--arch/s390/include/asm/unistd.h6
-rw-r--r--arch/s390/kernel/compat_linux.c41
-rw-r--r--arch/s390/kernel/compat_wrapper.S9
-rw-r--r--arch/s390/kernel/process.c3
-rw-r--r--arch/s390/kernel/ptrace.c3
-rw-r--r--arch/s390/kernel/setup.c8
-rw-r--r--arch/s390/kernel/signal.c10
-rw-r--r--arch/s390/kernel/smp.c9
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/topology.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c25
-rw-r--r--arch/s390/kvm/kvm-s390.h10
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/spinlock.c2
-rw-r--r--arch/s390/lib/usercopy.c8
-rw-r--r--arch/s390/mm/mmap.c4
-rw-r--r--arch/score/mm/init.c2
-rw-r--r--arch/sh/Kconfig5
-rw-r--r--arch/sh/boards/mach-se/7343/irq.c35
-rw-r--r--arch/sh/boards/mach-se/7343/setup.c16
-rw-r--r--arch/sh/include/asm/alignment.h21
-rw-r--r--arch/sh/include/asm/atomic-grb.h46
-rw-r--r--arch/sh/include/asm/atomic-llsc.h27
-rw-r--r--arch/sh/include/asm/atomic.h73
-rw-r--r--arch/sh/include/asm/pgalloc.h32
-rw-r--r--arch/sh/include/asm/pgtable.h26
-rw-r--r--arch/sh/include/asm/pgtable_64.h11
-rw-r--r--arch/sh/include/asm/pgtable_nopmd.h22
-rw-r--r--arch/sh/include/asm/pgtable_pmd.h55
-rw-r--r--arch/sh/include/asm/setup.h1
-rw-r--r--arch/sh/include/asm/sh_bios.h15
-rw-r--r--arch/sh/include/mach-se/mach/se7343.h52
-rw-r--r--arch/sh/kernel/Makefile2
-rw-r--r--arch/sh/kernel/early_printk.c85
-rw-r--r--arch/sh/kernel/head_64.S2
-rw-r--r--arch/sh/kernel/kgdb.c14
-rw-r--r--arch/sh/kernel/machine_kexec.c8
-rw-r--r--arch/sh/kernel/sh_bios.c129
-rw-r--r--arch/sh/kernel/traps_32.c180
-rw-r--r--arch/sh/mm/Kconfig20
-rw-r--r--arch/sh/mm/Makefile4
-rw-r--r--arch/sh/mm/alignment.c159
-rw-r--r--arch/sh/mm/cache-sh4.c13
-rw-r--r--arch/sh/mm/fault_32.c3
-rw-r--r--arch/sh/mm/init.c6
-rw-r--r--arch/sh/mm/nommu.c4
-rw-r--r--arch/sh/mm/pgtable.c57
-rw-r--r--arch/sparc/kernel/kgdb_32.c6
-rw-r--r--arch/sparc/kernel/kgdb_64.c6
-rw-r--r--arch/sparc/kernel/nmi.c7
-rw-r--r--arch/sparc/kernel/rtrap_64.S8
-rw-r--r--arch/x86/Kconfig8
-rw-r--r--arch/x86/boot/compressed/Makefile5
-rw-r--r--arch/x86/boot/compressed/misc.c4
-rw-r--r--arch/x86/ia32/ia32entry.S4
-rw-r--r--arch/x86/ia32/sys_ia32.c9
-rw-r--r--arch/x86/include/asm/kgdb.h3
-rw-r--r--arch/x86/include/asm/kvm_emulate.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h32
-rw-r--r--arch/x86/include/asm/local.h37
-rw-r--r--arch/x86/include/asm/nmi.h1
-rw-r--r--arch/x86/include/asm/pci_x86.h2
-rw-r--r--arch/x86/include/asm/percpu.h119
-rw-r--r--arch/x86/include/asm/sys_ia32.h3
-rw-r--r--arch/x86/include/asm/system.h8
-rw-r--r--arch/x86/include/asm/uaccess_32.h5
-rw-r--r--arch/x86/include/asm/uaccess_64.h5
-rw-r--r--arch/x86/include/asm/unistd_32.h6
-rw-r--r--arch/x86/include/asm/unistd_64.h8
-rw-r--r--arch/x86/include/asm/uv/bios.h7
-rw-r--r--arch/x86/include/asm/vmx.h3
-rw-r--r--arch/x86/kernel/apic/apic.c26
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c2
-rw-r--r--arch/x86/kernel/apic/io_apic.c7
-rw-r--r--arch/x86/kernel/apic/nmi.c6
-rw-r--r--arch/x86/kernel/apic/probe_64.c15
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c6
-rw-r--r--arch/x86/kernel/bios_uv.c20
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig14
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Makefile1
-rw-r--r--arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c620
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c10
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c11
-rw-r--r--arch/x86/kernel/e820.c4
-rw-r--r--arch/x86/kernel/head_32.S6
-rw-r--r--arch/x86/kernel/hw_breakpoint.c5
-rw-r--r--arch/x86/kernel/kgdb.c247
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/reboot.c2
-rw-r--r--arch/x86/kernel/syscall_table_32.S4
-rw-r--r--arch/x86/kernel/traps.c6
-rw-r--r--arch/x86/kernel/vmiclock_32.c2
-rw-r--r--arch/x86/kernel/vmlinux.lds.S4
-rw-r--r--arch/x86/kernel/vsyscall_64.c3
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/i8254.c8
-rw-r--r--arch/x86/kvm/i8259.c4
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h12
-rw-r--r--arch/x86/kvm/lapic.c11
-rw-r--r--arch/x86/kvm/mmu.c53
-rw-r--r--arch/x86/kvm/mmu.h9
-rw-r--r--arch/x86/kvm/svm.c37
-rw-r--r--arch/x86/kvm/vmx.c225
-rw-r--r--arch/x86/kvm/x86.c428
-rw-r--r--arch/x86/mm/init_32.c3
-rw-r--r--arch/x86/pci/intel_bus.c4
-rw-r--r--arch/x86/pci/legacy.c42
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--arch/x86/xen/xen-asm_32.S4
-rw-r--r--block/blk-ioc.c5
-rw-r--r--block/blk-settings.c61
-rw-r--r--block/cfq-iosched.c6
-rw-r--r--block/genhd.c2
-rw-r--r--crypto/Kconfig10
-rw-r--r--crypto/Makefile1
-rw-r--r--crypto/pcrypt.c445
-rw-r--r--crypto/testmgr.c84
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_pad.c37
-rw-r--r--drivers/acpi/apei/Kconfig16
-rw-r--r--drivers/acpi/apei/Makefile4
-rw-r--r--drivers/acpi/apei/apei-base.c589
-rw-r--r--drivers/acpi/apei/apei-internal.h97
-rw-r--r--drivers/acpi/apei/einj.c471
-rw-r--r--drivers/acpi/apei/hest.c147
-rw-r--r--drivers/acpi/atomicio.c360
-rw-r--r--drivers/acpi/bus.c10
-rw-r--r--drivers/acpi/ec.c126
-rw-r--r--drivers/acpi/proc.c2
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/sbshc.c2
-rw-r--r--drivers/acpi/video.c43
-rw-r--r--drivers/ata/ahci.c221
-rw-r--r--drivers/ata/libata-core.c2
-rw-r--r--drivers/ata/pata_hpt366.c21
-rw-r--r--drivers/ata/pata_hpt37x.c187
-rw-r--r--drivers/ata/pata_hpt3x2n.c118
-rw-r--r--drivers/atm/fore200e.c11
-rw-r--r--drivers/atm/idt77252.c5
-rw-r--r--drivers/atm/lanai.c14
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/base/power/main.c145
-rw-r--r--drivers/base/power/power.h6
-rw-r--r--drivers/base/power/sysfs.c47
-rw-r--r--drivers/block/DAC960.c2
-rw-r--r--drivers/block/aoe/aoecmd.c8
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/drbd/Kconfig2
-rw-r--r--drivers/block/drbd/drbd_int.h7
-rw-r--r--drivers/block/drbd/drbd_main.c1
-rw-r--r--drivers/block/drbd/drbd_nl.c19
-rw-r--r--drivers/block/drbd/drbd_receiver.c46
-rw-r--r--drivers/block/sx8.c2
-rw-r--r--drivers/block/ub.c2
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/block/xsysace.c2
-rw-r--r--drivers/bluetooth/Kconfig13
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/ath3k.c187
-rw-r--r--drivers/bluetooth/bcm203x.c2
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/backend.c13
-rw-r--r--drivers/char/agp/hp-agp.c6
-rw-r--r--drivers/char/hw_random/Kconfig12
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/nomadik-rng.c103
-rw-r--r--drivers/char/kdb_keyboard.c204
-rw-r--r--drivers/char/kdb_keyboard.h143
-rw-r--r--drivers/char/keyboard.c30
-rw-r--r--drivers/char/nvram.c3
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/cpufreq/cpufreq.c19
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c3
-rw-r--r--drivers/cpuidle/governors/menu.c12
-rw-r--r--drivers/crypto/geode-aes.c6
-rw-r--r--drivers/edac/Kconfig13
-rw-r--r--drivers/edac/Makefile2
-rw-r--r--drivers/edac/edac_core.h23
-rw-r--r--drivers/edac/edac_mc_sysfs.c175
-rw-r--r--drivers/edac/edac_mce.c61
-rw-r--r--drivers/edac/i7core_edac.c1977
-rw-r--r--drivers/firewire/core-device.c127
-rw-r--r--drivers/firewire/ohci.c2
-rw-r--r--drivers/firewire/sbp2.c5
-rw-r--r--drivers/firmware/iscsi_ibft.c6
-rw-r--r--drivers/gpio/Kconfig16
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/adp5588-gpio.c266
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpio/wm8350-gpiolib.c181
-rw-r--r--drivers/gpu/drm/ati_pcigart.c10
-rw-r--r--drivers/gpu/drm/drm_bufs.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c26
-rw-r--r--drivers/gpu/drm/drm_edid.c14
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c104
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_pci.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c31
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c28
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c172
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h123
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c251
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c46
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c32
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h11
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c12
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c528
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c83
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c3
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c243
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c34
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h72
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c33
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c87
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c30
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c35
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c41
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c34
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c159
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c115
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c61
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c53
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c116
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c17
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c23
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c6
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/ObjectID.h801
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c6
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c23
-rw-r--r--drivers/gpu/drm/radeon/r300.c17
-rw-r--r--drivers/gpu/drm/radeon/r420.c41
-rw-r--r--drivers/gpu/drm/radeon/r520.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c21
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c64
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h26
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r420795
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs60068
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5156
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770.c3
-rw-r--r--drivers/hid/Kconfig12
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-3m-pct.c291
-rw-r--r--drivers/hid/hid-apple.c24
-rw-r--r--drivers/hid/hid-core.c11
-rw-r--r--drivers/hid/hid-ids.h16
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-samsung.c25
-rw-r--r--drivers/hid/hid-stantum.c283
-rw-r--r--drivers/hid/hid-wacom.c4
-rw-r--r--drivers/hid/usbhid/hid-core.c2
-rw-r--r--drivers/hwmon/Kconfig12
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/amc6821.c1116
-rw-r--r--drivers/hwmon/asus_atk0110.c289
-rw-r--r--drivers/hwmon/coretemp.c16
-rw-r--r--drivers/hwmon/k10temp.c40
-rw-r--r--drivers/hwmon/k8temp.c2
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/via686a.c2
-rw-r--r--drivers/hwmon/vt8231.c2
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c10
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c4
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c4
-rw-r--r--drivers/i2c/busses/i2c-piix4.c6
-rw-r--r--drivers/i2c/busses/i2c-pnx.c285
-rw-r--r--drivers/i2c/busses/i2c-scmi.c33
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c2
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c6
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/ide/au1xxx-ide.c21
-rw-r--r--drivers/ide/ide-tape.c14
-rw-r--r--drivers/ieee1394/pcilynx.c2
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c11
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c6
-rw-r--r--drivers/input/evdev.c3
-rw-r--r--drivers/input/gameport/emu10k1-gp.c2
-rw-r--r--drivers/input/gameport/fm801-gp.c2
-rw-r--r--drivers/input/gameport/gameport.c98
-rw-r--r--drivers/input/gameport/ns558.c2
-rw-r--r--drivers/input/input.c51
-rw-r--r--drivers/input/joystick/gf2k.c2
-rw-r--r--drivers/input/joystick/xpad.c204
-rw-r--r--drivers/input/keyboard/atkbd.c352
-rw-r--r--drivers/input/keyboard/davinci_keyscan.c8
-rw-r--r--drivers/input/keyboard/qt2160.c2
-rw-r--r--drivers/input/misc/apanel.c2
-rw-r--r--drivers/input/mouse/bcm5974.c44
-rw-r--r--drivers/input/mouse/hgpk.c4
-rw-r--r--drivers/input/mouse/psmouse-base.c41
-rw-r--r--drivers/input/mouse/synaptics.c10
-rw-r--r--drivers/input/mouse/synaptics.h1
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/serio/pcips2.c2
-rw-r--r--drivers/input/serio/serio.c131
-rw-r--r--drivers/input/serio/xilinx_ps2.c2
-rw-r--r--drivers/input/tablet/aiptek.c2
-rw-r--r--drivers/input/tablet/gtco.c2
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c241
-rw-r--r--drivers/input/touchscreen/Kconfig12
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/elo.c225
-rw-r--r--drivers/input/touchscreen/tsc2007.c2
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c2
-rw-r--r--drivers/input/xen-kbdfront.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNisar.c4
-rw-r--r--drivers/isdn/mISDN/dsp_core.c4
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/leds/Kconfig7
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-88pm860x.c325
-rw-r--r--drivers/md/dm-table.c20
-rw-r--r--drivers/media/IR/Makefile2
-rw-r--r--drivers/media/IR/ir-functions.c2
-rw-r--r--drivers/media/IR/ir-keytable.c45
-rw-r--r--drivers/media/IR/ir-sysfs.c210
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.c20
-rw-r--r--drivers/media/common/tuners/tuner-xc2028.h2
-rw-r--r--drivers/media/dvb/Kconfig4
-rw-r--r--drivers/media/dvb/Makefile14
-rw-r--r--drivers/media/dvb/bt8xx/bt878.c4
-rw-r--r--drivers/media/dvb/dm1105/dm1105.c4
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c7
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c3
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c94
-rw-r--r--drivers/media/dvb/firewire/firedtv-1394.c19
-rw-r--r--drivers/media/dvb/firewire/firedtv-avc.c514
-rw-r--r--drivers/media/dvb/firewire/firedtv-dvb.c1
-rw-r--r--drivers/media/dvb/firewire/firedtv-fw.c41
-rw-r--r--drivers/media/dvb/firewire/firedtv.h6
-rw-r--r--drivers/media/dvb/frontends/Kconfig19
-rw-r--r--drivers/media/dvb/frontends/Makefile2
-rw-r--r--drivers/media/dvb/frontends/atbm8830.c16
-rw-r--r--drivers/media/dvb/frontends/dib8000.c2
-rw-r--r--drivers/media/dvb/frontends/dib8000.h2
-rw-r--r--drivers/media/dvb/frontends/lgdt3305.h6
-rw-r--r--drivers/media/dvb/frontends/mb86a16.c1878
-rw-r--r--drivers/media/dvb/frontends/mb86a16.h52
-rw-r--r--drivers/media/dvb/frontends/mb86a16_priv.h151
-rw-r--r--drivers/media/dvb/frontends/stv0900.h2
-rw-r--r--drivers/media/dvb/frontends/stv0900_core.c87
-rw-r--r--drivers/media/dvb/frontends/stv0900_priv.h11
-rw-r--r--drivers/media/dvb/frontends/stv0900_reg.h6
-rw-r--r--drivers/media/dvb/frontends/stv0900_sw.c44
-rw-r--r--drivers/media/dvb/frontends/tda10021.c4
-rw-r--r--drivers/media/dvb/frontends/tda665x.c257
-rw-r--r--drivers/media/dvb/frontends/tda665x.h52
-rw-r--r--drivers/media/dvb/mantis/Kconfig32
-rw-r--r--drivers/media/dvb/mantis/Makefile28
-rw-r--r--drivers/media/dvb/mantis/hopper_cards.c275
-rw-r--r--drivers/media/dvb/mantis/hopper_vp3028.c88
-rw-r--r--drivers/media/dvb/mantis/hopper_vp3028.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_ca.c207
-rw-r--r--drivers/media/dvb/mantis/mantis_ca.h27
-rw-r--r--drivers/media/dvb/mantis/mantis_cards.c305
-rw-r--r--drivers/media/dvb/mantis/mantis_common.h179
-rw-r--r--drivers/media/dvb/mantis/mantis_core.c238
-rw-r--r--drivers/media/dvb/mantis/mantis_core.h57
-rw-r--r--drivers/media/dvb/mantis/mantis_dma.c256
-rw-r--r--drivers/media/dvb/mantis/mantis_dma.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_dvb.c296
-rw-r--r--drivers/media/dvb/mantis/mantis_dvb.h35
-rw-r--r--drivers/media/dvb/mantis/mantis_evm.c117
-rw-r--r--drivers/media/dvb/mantis/mantis_hif.c240
-rw-r--r--drivers/media/dvb/mantis/mantis_hif.h29
-rw-r--r--drivers/media/dvb/mantis/mantis_i2c.c267
-rw-r--r--drivers/media/dvb/mantis/mantis_i2c.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_input.c148
-rw-r--r--drivers/media/dvb/mantis/mantis_ioc.c130
-rw-r--r--drivers/media/dvb/mantis/mantis_ioc.h51
-rw-r--r--drivers/media/dvb/mantis/mantis_link.h83
-rw-r--r--drivers/media/dvb/mantis/mantis_pci.c177
-rw-r--r--drivers/media/dvb/mantis/mantis_pci.h27
-rw-r--r--drivers/media/dvb/mantis/mantis_pcmcia.c120
-rw-r--r--drivers/media/dvb/mantis/mantis_reg.h197
-rw-r--r--drivers/media/dvb/mantis/mantis_uart.c186
-rw-r--r--drivers/media/dvb/mantis/mantis_uart.h58
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1033.c212
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1033.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1034.c119
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1034.h33
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1041.c358
-rw-r--r--drivers/media/dvb/mantis/mantis_vp1041.h33
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2033.c187
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2033.h30
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2040.c186
-rw-r--r--drivers/media/dvb/mantis/mantis_vp2040.h32
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3028.c38
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3028.h33
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3030.c105
-rw-r--r--drivers/media/dvb/mantis/mantis_vp3030.h30
-rw-r--r--drivers/media/dvb/siano/sms-cards.c1
-rw-r--r--drivers/media/dvb/siano/smscoreapi.h77
-rw-r--r--drivers/media/dvb/siano/smsdvb.c318
-rw-r--r--drivers/media/dvb/ttpci/budget-ci.c2
-rw-r--r--drivers/media/video/bt8xx/bttv-input.c4
-rw-r--r--drivers/media/video/cx231xx/cx231xx-input.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-dvb.c6
-rw-r--r--drivers/media/video/cx23885/cx23885-input.c2
-rw-r--r--drivers/media/video/cx88/cx88-input.c4
-rw-r--r--drivers/media/video/em28xx/em28xx-input.c83
-rw-r--r--drivers/media/video/gspca/gspca.c2
-rw-r--r--drivers/media/video/gspca/ov534.c2
-rw-r--r--drivers/media/video/gspca/sunplus.c1
-rw-r--r--drivers/media/video/ir-kbd-i2c.c4
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c4
-rw-r--r--drivers/media/video/uvc/uvc_ctrl.c2
-rw-r--r--drivers/media/video/uvc/uvc_driver.c65
-rw-r--r--drivers/media/video/uvc/uvc_queue.c14
-rw-r--r--drivers/media/video/uvc/uvc_video.c55
-rw-r--r--drivers/media/video/uvc/uvcvideo.h9
-rw-r--r--drivers/message/i2o/i2o_proc.c11
-rw-r--r--drivers/mfd/88pm8607.c302
-rw-r--r--drivers/mfd/88pm860x-core.c560
-rw-r--r--drivers/mfd/88pm860x-i2c.c268
-rw-r--r--drivers/mfd/Kconfig36
-rw-r--r--drivers/mfd/Makefile13
-rw-r--r--drivers/mfd/ab3100-core.c43
-rw-r--r--drivers/mfd/asic3.c44
-rw-r--r--drivers/mfd/max8925-core.c404
-rw-r--r--drivers/mfd/max8925-i2c.c210
-rw-r--r--drivers/mfd/mc13783-core.c4
-rw-r--r--drivers/mfd/sm501.c7
-rw-r--r--drivers/mfd/t7l66xb.c59
-rw-r--r--drivers/mfd/tc6387xb.c119
-rw-r--r--drivers/mfd/tc6393xb.c58
-rw-r--r--drivers/mfd/tmio_core.c52
-rw-r--r--drivers/mfd/wm8350-core.c3
-rw-r--r--drivers/mfd/wm8350-irq.c155
-rw-r--r--drivers/misc/Kconfig29
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/hwlat_detector.c1208
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c50
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h4
-rw-r--r--drivers/misc/iwmc3200top/log.h31
-rw-r--r--drivers/misc/iwmc3200top/main.c59
-rw-r--r--drivers/misc/kgdbts.c6
-rw-r--r--drivers/mmc/card/block.c8
-rw-r--r--drivers/mmc/card/queue.c18
-rw-r--r--drivers/mmc/card/sdio_uart.c2
-rw-r--r--drivers/mmc/core/core.c4
-rw-r--r--drivers/mmc/core/core.h10
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/au1xmmc.c8
-rw-r--r--drivers/mmc/host/tmio_mmc.c78
-rw-r--r--drivers/mmc/host/tmio_mmc.h46
-rw-r--r--[-rwxr-xr-x]drivers/mtd/chips/cfi_util.c0
-rw-r--r--[-rwxr-xr-x]drivers/mtd/inftlcore.c0
-rw-r--r--drivers/mtd/maps/Kconfig15
-rw-r--r--drivers/mtd/maps/Makefile2
-rw-r--r--drivers/mtd/maps/alchemy-flash.c166
-rw-r--r--drivers/mtd/maps/omap_nor.c188
-rw-r--r--drivers/mtd/maps/physmap_of.c2
-rw-r--r--drivers/mtd/maps/plat-ram.c2
-rw-r--r--drivers/mtd/mtdoops.c2
-rw-r--r--drivers/mtd/nand/Kconfig12
-rw-r--r--drivers/mtd/nand/Makefile3
-rw-r--r--drivers/mtd/nand/au1550nd.c16
-rw-r--r--drivers/mtd/nand/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/gpio.c12
-rw-r--r--drivers/mtd/nand/nomadik_nand.c6
-rw-r--r--drivers/mtd/nand/nuc900_nand.c (renamed from drivers/mtd/nand/w90p910_nand.c)144
-rw-r--r--drivers/mtd/nand/orion_nand.c10
-rw-r--r--drivers/mtd/nand/s3c2410.c2
-rw-r--r--drivers/mtd/nand/sh_flctl.c2
-rw-r--r--drivers/mtd/nand/tmio_nand.c14
-rw-r--r--drivers/mtd/nand/ts7250.c207
-rw-r--r--drivers/mtd/ubi/kapi.c15
-rw-r--r--drivers/mtd/ubi/vtbl.c1
-rw-r--r--drivers/net/3c59x.c2
-rw-r--r--drivers/net/8139cp.c2
-rw-r--r--drivers/net/8139too.c2
-rw-r--r--drivers/net/Kconfig38
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/acenic.c2
-rw-r--r--drivers/net/amd8111e.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/arm/Kconfig1
-rw-r--r--drivers/net/arm/ep93xx_eth.c140
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c9
-rw-r--r--drivers/net/atl1e/atl1e_main.c7
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/atlx/atl2.c9
-rw-r--r--drivers/net/au1000_eth.c449
-rw-r--r--drivers/net/au1000_eth.h9
-rw-r--r--drivers/net/b44.c2
-rw-r--r--drivers/net/benet/be_cmds.c30
-rw-r--r--drivers/net/benet/be_cmds.h17
-rw-r--r--drivers/net/benet/be_ethtool.c45
-rw-r--r--drivers/net/bnx2.c4
-rw-r--r--drivers/net/bnx2x_main.c2
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/can/at91_can.c3
-rw-r--r--drivers/net/can/bfin_can.c3
-rw-r--r--drivers/net/can/mcp251x.c8
-rw-r--r--drivers/net/can/mscan/Kconfig7
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c246
-rw-r--r--drivers/net/can/mscan/mscan.c57
-rw-r--r--drivers/net/can/mscan/mscan.h86
-rw-r--r--drivers/net/can/sja1000/ems_pci.c2
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c3
-rw-r--r--drivers/net/can/ti_hecc.c4
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/vcan.c12
-rw-r--r--drivers/net/cassini.c2
-rw-r--r--drivers/net/chelsio/common.h2
-rw-r--r--drivers/net/chelsio/subr.c2
-rw-r--r--drivers/net/cs89x0.c5
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/cxgb3/sge.c4
-rw-r--r--drivers/net/davinci_emac.c61
-rw-r--r--drivers/net/defxx.c9
-rw-r--r--drivers/net/dl2k.h2
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/e1000e/82571.c2
-rw-r--r--drivers/net/e1000e/es2lan.c2
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c2
-rw-r--r--drivers/net/e1000e/lib.c54
-rw-r--r--drivers/net/e1000e/netdev.c32
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_main.c194
-rw-r--r--drivers/net/enic/enic_res.c16
-rw-r--r--drivers/net/enic/vnic_dev.c1
-rw-r--r--drivers/net/enic/vnic_enet.h5
-rw-r--r--drivers/net/enic/vnic_intr.c8
-rw-r--r--drivers/net/enic/vnic_intr.h3
-rw-r--r--drivers/net/enic/vnic_nic.h12
-rw-r--r--drivers/net/epic100.c2
-rw-r--r--drivers/net/fealnx.c2
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/fsl_pq_mdio.c30
-rw-r--r--drivers/net/hamachi.c2
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/hp100.c2
-rw-r--r--drivers/net/igb/igb_main.c11
-rw-r--r--drivers/net/igbvf/netdev.c15
-rw-r--r--drivers/net/ioc3-eth.c2
-rw-r--r--drivers/net/ipg.c2
-rw-r--r--drivers/net/irda/au1k_ir.c14
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/irda-usb.c9
-rw-r--r--drivers/net/irda/via-ircc.c2
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ixgbe/Makefile3
-rw-r--r--drivers/net/ixgbe/ixgbe.h31
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c126
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c11
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c291
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c479
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h96
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c336
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h45
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h53
-rw-r--r--drivers/net/ixgbevf/Makefile38
-rw-r--r--drivers/net/ixgbevf/defines.h292
-rw-r--r--drivers/net/ixgbevf/ethtool.c716
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h318
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c3571
-rw-r--r--drivers/net/ixgbevf/mbx.c341
-rw-r--r--drivers/net/ixgbevf/mbx.h100
-rw-r--r--drivers/net/ixgbevf/regs.h85
-rw-r--r--drivers/net/ixgbevf/vf.c387
-rw-r--r--drivers/net/ixgbevf/vf.h168
-rw-r--r--drivers/net/jme.c2
-rw-r--r--drivers/net/lib82596.c8
-rw-r--r--drivers/net/ll_temac_main.c2
-rw-r--r--drivers/net/mac8390.c632
-rw-r--r--drivers/net/mlx4/main.c4
-rw-r--r--drivers/net/mv643xx_eth.c6
-rw-r--r--drivers/net/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/myri_sbus.c6
-rw-r--r--drivers/net/natsemi.c2
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h6
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c193
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h3
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c6
-rw-r--r--drivers/net/netxen/netxen_nic_init.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c217
-rw-r--r--drivers/net/niu.c4
-rw-r--r--drivers/net/ns83820.c2
-rw-r--r--drivers/net/octeon/octeon_mgmt.c7
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c3
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c1
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c4
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c6
-rw-r--r--drivers/net/pcnet32.c2
-rw-r--r--drivers/net/phy/broadcom.c4
-rw-r--r--drivers/net/phy/mdio_bus.c72
-rw-r--r--drivers/net/phy/phy_device.c30
-rw-r--r--drivers/net/phy/smsc.c21
-rw-r--r--drivers/net/qla3xxx.c2
-rw-r--r--drivers/net/qlge/qlge.h19
-rw-r--r--drivers/net/qlge/qlge_main.c326
-rw-r--r--drivers/net/r6040.c2
-rw-r--r--drivers/net/r8169.c11
-rw-r--r--drivers/net/rrunner.c4
-rw-r--r--drivers/net/s2io.c2
-rw-r--r--drivers/net/sc92031.c2
-rw-r--r--drivers/net/sfc/efx.c2
-rw-r--r--drivers/net/sfc/mcdi_pcol.h2
-rw-r--r--drivers/net/sh_eth.c12
-rw-r--r--drivers/net/sis190.c2
-rw-r--r--drivers/net/sis900.c2
-rw-r--r--drivers/net/skfp/skfddi.c21
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/sky2.c100
-rw-r--r--drivers/net/smc911x.c6
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/spider_net.c2
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/stmmac/Kconfig8
-rw-r--r--drivers/net/stmmac/Makefile5
-rw-r--r--drivers/net/stmmac/common.h277
-rw-r--r--drivers/net/stmmac/descs.h4
-rw-r--r--drivers/net/stmmac/dwmac100.c (renamed from drivers/net/stmmac/mac100.c)202
-rw-r--r--drivers/net/stmmac/dwmac100.h (renamed from drivers/net/stmmac/mac100.h)0
-rw-r--r--drivers/net/stmmac/dwmac1000.h (renamed from drivers/net/stmmac/gmac.h)18
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c245
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c (renamed from drivers/net/stmmac/gmac.c)347
-rw-r--r--drivers/net/stmmac/dwmac_dma.h107
-rw-r--r--drivers/net/stmmac/dwmac_lib.c263
-rw-r--r--drivers/net/stmmac/stmmac.h28
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c9
-rw-r--r--drivers/net/stmmac/stmmac_main.c436
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/sungem.c2
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/net/sunvnet.c5
-rw-r--r--drivers/net/tc35815.c2
-rw-r--r--drivers/net/tehuti.c2
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/tlan.c2
-rw-r--r--drivers/net/tokenring/3c359.c2
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/olympic.c2
-rw-r--r--drivers/net/tokenring/tmspci.c2
-rw-r--r--drivers/net/tulip/Kconfig4
-rw-r--r--drivers/net/tulip/de2104x.c2
-rw-r--r--drivers/net/tulip/dmfe.c23
-rw-r--r--drivers/net/tulip/tulip_core.c34
-rw-r--r--drivers/net/tulip/uli526x.c2
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c2
-rw-r--r--drivers/net/typhoon.c8
-rw-r--r--drivers/net/ucc_geth.c3
-rw-r--r--drivers/net/ucc_geth.h13
-rw-r--r--drivers/net/usb/catc.c6
-rw-r--r--drivers/net/usb/hso.c105
-rw-r--r--drivers/net/usb/rtl8150.c11
-rw-r--r--drivers/net/vbus-enet.c1560
-rw-r--r--drivers/net/via-rhine.c2
-rw-r--r--drivers/net/via-velocity.c16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vxge/vxge-main.c10
-rw-r--r--drivers/net/wan/cosa.c10
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/hdlc_cisco.c8
-rw-r--r--drivers/net/wan/hdlc_x25.c4
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pc300too.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c17
-rw-r--r--drivers/net/wimax/i2400m/fw.c13
-rw-r--r--drivers/net/wireless/adm8211.c14
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--drivers/net/wireless/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h9
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c110
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h24
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c42
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h8
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c121
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h77
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c156
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c428
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c863
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1342
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c59
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
-rw-r--r--drivers/net/wireless/atmel_pci.c2
-rw-r--r--drivers/net/wireless/b43/Kconfig17
-rw-r--r--drivers/net/wireless/b43/Makefile2
-rw-r--r--drivers/net/wireless/b43/b43.h13
-rw-r--r--drivers/net/wireless/b43/dma.c2
-rw-r--r--drivers/net/wireless/b43/main.c35
-rw-r--r--drivers/net/wireless/b43/phy_lp.c7
-rw-r--r--drivers/net/wireless/b43/pio.h40
-rw-r--r--drivers/net/wireless/b43legacy/main.c24
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c9
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c256
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c193
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c154
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c101
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c71
-rw-r--r--drivers/net/wireless/libertas/Kconfig6
-rw-r--r--drivers/net/wireless/libertas/Makefile2
-rw-r--r--drivers/net/wireless/libertas/assoc.c17
-rw-r--r--drivers/net/wireless/libertas/cmd.c22
-rw-r--r--drivers/net/wireless/libertas/cmd.h12
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c16
-rw-r--r--drivers/net/wireless/libertas/defs.h7
-rw-r--r--drivers/net/wireless/libertas/dev.h7
-rw-r--r--drivers/net/wireless/libertas/ethtool.c2
-rw-r--r--drivers/net/wireless/libertas/main.c42
-rw-r--r--drivers/net/wireless/libertas/mesh.c29
-rw-r--r--drivers/net/wireless/libertas/mesh.h32
-rw-r--r--drivers/net/wireless/libertas/scan.c2
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c26
-rw-r--r--drivers/net/wireless/libertas_tf/main.c10
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c114
-rw-r--r--drivers/net/wireless/mwl8k.c2115
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/main.c12
-rw-r--r--drivers/net/wireless/p54/p54pci.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c42
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c41
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c38
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c119
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c57
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c71
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h40
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c22
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c26
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c85
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c31
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c37
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c28
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c15
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.c4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h1
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h87
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.c83
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_debugfs.c23
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.c5
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.h47
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c351
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h17
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h35
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c134
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h37
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h33
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h100
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c62
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c20
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c492
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c15
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c43
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c25
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c14
-rw-r--r--drivers/net/xilinx_emaclite.c6
-rw-r--r--drivers/net/yellowfin.c2
-rw-r--r--drivers/pci/Makefile4
-rw-r--r--drivers/pci/dmar.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c2
-rw-r--r--drivers/pci/hotplug/cpqphp.h2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c57
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c27
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c106
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c132
-rw-r--r--drivers/pci/hotplug/pciehp_core.c25
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c72
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c24
-rw-r--r--drivers/pci/hotplug/shpchp.h2
-rw-r--r--drivers/pci/hotplug/shpchp_core.c35
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c13
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c149
-rw-r--r--drivers/pci/pci-sysfs.c6
-rw-r--r--drivers/pci/pci.c26
-rw-r--r--drivers/pci/pci.h7
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c28
-rw-r--r--drivers/pci/pcie/portdrv_core.c16
-rw-r--r--drivers/pci/pcie/portdrv_pci.c17
-rw-r--r--drivers/pci/probe.c194
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--drivers/pci/slot.c55
-rw-r--r--drivers/pcmcia/Kconfig30
-rw-r--r--drivers/pcmcia/Makefile16
-rw-r--r--drivers/pcmcia/au1000_db1x00.c305
-rw-r--r--drivers/pcmcia/au1000_generic.h12
-rw-r--r--drivers/pcmcia/au1000_pb1x00.c119
-rw-r--r--drivers/pcmcia/au1000_xxs1500.c188
-rw-r--r--drivers/pcmcia/cardbus.c175
-rw-r--r--drivers/pcmcia/cistpl.c524
-rw-r--r--drivers/pcmcia/cs.c89
-rw-r--r--drivers/pcmcia/cs_internal.h68
-rw-r--r--drivers/pcmcia/db1xxx_ss.c630
-rw-r--r--drivers/pcmcia/ds.c35
-rw-r--r--drivers/pcmcia/m32r_cfc.c2
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c2
-rw-r--r--drivers/pcmcia/pcmcia_resource.c33
-rw-r--r--drivers/pcmcia/rsrc_mgr.c44
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c188
-rw-r--r--drivers/pcmcia/socket_sysfs.c150
-rw-r--r--drivers/pcmcia/xxs1500_ss.c357
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c91
-rw-r--r--drivers/power/pmu_battery.c2
-rw-r--r--drivers/power/wm8350_power.c26
-rw-r--r--drivers/regulator/88pm8607.c318
-rw-r--r--drivers/regulator/Kconfig8
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/core.c48
-rw-r--r--drivers/regulator/lp3971.c10
-rw-r--r--drivers/regulator/max8925-regulator.c306
-rw-r--r--drivers/regulator/mc13783-regulator.c345
-rw-r--r--drivers/regulator/wm8350-regulator.c48
-rw-r--r--drivers/rtc/rtc-cmos.c9
-rw-r--r--drivers/rtc/rtc-wm8350.c11
-rw-r--r--drivers/s390/block/dasd_eckd.c14
-rw-r--r--drivers/s390/block/dasd_ioctl.c14
-rw-r--r--drivers/s390/char/con3215.c17
-rw-r--r--drivers/s390/char/fs3270.c17
-rw-r--r--drivers/s390/char/tape_block.c39
-rw-r--r--drivers/s390/char/vmcp.c12
-rw-r--r--drivers/s390/cio/chsc_sch.c23
-rw-r--r--drivers/s390/crypto/zcrypt_api.c4
-rw-r--r--drivers/s390/net/claw.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c24
-rw-r--r--drivers/scsi/cxgb3i/cxgb3i_offload.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c4
-rw-r--r--[-rwxr-xr-x]drivers/scsi/lpfc/lpfc_hbadisc.c25
-rwxr-xr-x[-rw-r--r--]drivers/scsi/lpfc/lpfc_hw4.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c48
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c4
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/pmcraid.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c32
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c64
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/stex.c5
-rw-r--r--drivers/serial/21285.c4
-rw-r--r--drivers/serial/8250.c19
-rw-r--r--drivers/serial/amba-pl011.c6
-rw-r--r--drivers/serial/kgdboc.c117
-rw-r--r--drivers/serial/serial_cs.c15
-rw-r--r--drivers/serial/sh-sci.c6
-rw-r--r--drivers/serial/sunzilog.c50
-rw-r--r--drivers/spi/au1550_spi.c6
-rw-r--r--drivers/spi/xilinx_spi.c28
-rw-r--r--drivers/staging/tm6000/Kconfig32
-rw-r--r--drivers/staging/tm6000/Makefile15
-rw-r--r--drivers/staging/tm6000/README11
-rw-r--r--drivers/staging/tm6000/hack.c252
-rw-r--r--drivers/staging/tm6000/hack.h45
-rw-r--r--drivers/staging/tm6000/tm6000-alsa.c413
-rw-r--r--drivers/staging/tm6000/tm6000-cards.c662
-rw-r--r--drivers/staging/tm6000/tm6000-core.c511
-rw-r--r--drivers/staging/tm6000/tm6000-dvb.c322
-rw-r--r--drivers/staging/tm6000/tm6000-i2c.c245
-rw-r--r--drivers/staging/tm6000/tm6000-regs.h86
-rw-r--r--drivers/staging/tm6000/tm6000-stds.c873
-rw-r--r--drivers/staging/tm6000/tm6000-usb-isoc.h53
-rw-r--r--drivers/staging/tm6000/tm6000-video.c1550
-rw-r--r--drivers/staging/tm6000/tm6000.h278
-rw-r--r--drivers/uwb/i1480/i1480-est.c4
-rw-r--r--drivers/uwb/wlp/messages.c106
-rw-r--r--drivers/vbus/Kconfig25
-rw-r--r--drivers/vbus/Makefile6
-rw-r--r--drivers/vbus/bus-proxy.c247
-rw-r--r--drivers/vbus/pci-bridge.c1015
-rw-r--r--drivers/video/backlight/88pm860x_bl.c304
-rw-r--r--drivers/video/backlight/Kconfig13
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/max8925_bl.c200
-rw-r--r--drivers/video/backlight/omap1_bl.c2
-rw-r--r--drivers/video/console/fbcon.c7
-rw-r--r--drivers/video/mbx/mbxfb.c2
-rw-r--r--drivers/video/omap/dispc.c18
-rw-r--r--drivers/video/omap/lcd_htcherald.c4
-rw-r--r--drivers/video/omap/omapfb.h2
-rw-r--r--drivers/video/omap/omapfb_main.c25
-rw-r--r--drivers/video/omap/rfbi.c4
-rw-r--r--drivers/video/omap2/dss/Kconfig7
-rw-r--r--drivers/video/omap2/dss/core.c10
-rw-r--r--drivers/video/omap2/dss/dispc.c74
-rw-r--r--drivers/video/omap2/dss/dsi.c159
-rw-r--r--drivers/video/omap2/dss/dss.c6
-rw-r--r--drivers/video/omap2/dss/dss.h14
-rw-r--r--drivers/video/omap2/dss/rfbi.c30
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c6
-rw-r--r--drivers/video/pxafb.c4
-rw-r--r--drivers/video/sstfb.c2
-rw-r--r--drivers/video/tdfxfb.c4
-rw-r--r--drivers/virtio/virtio_balloon.c108
-rw-r--r--drivers/w1/w1.c4
-rw-r--r--drivers/watchdog/ixp2000_wdt.c1
-rw-r--r--drivers/watchdog/pnx4008_wdt.c39
-rw-r--r--firmware/Makefile3
-rw-r--r--firmware/WHENCE1
-rw-r--r--firmware/cis/PE520.cis.ihex9
-rw-r--r--fs/Kconfig2
-rw-r--r--fs/Makefile2
-rw-r--r--fs/binfmt_elf_fdpic.c13
-rw-r--r--fs/binfmt_flat.c2
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/ceph/Kconfig26
-rw-r--r--fs/ceph/Makefile37
-rw-r--r--fs/ceph/README20
-rw-r--r--fs/ceph/addr.c1114
-rw-r--r--fs/ceph/auth.c225
-rw-r--r--fs/ceph/auth.h77
-rw-r--r--fs/ceph/auth_none.c120
-rw-r--r--fs/ceph/auth_none.h28
-rw-r--r--fs/ceph/buffer.c61
-rw-r--r--fs/ceph/buffer.h37
-rw-r--r--fs/ceph/caps.c2863
-rw-r--r--fs/ceph/ceph_debug.h37
-rw-r--r--fs/ceph/ceph_frag.c21
-rw-r--r--fs/ceph/ceph_frag.h109
-rw-r--r--fs/ceph/ceph_fs.c74
-rw-r--r--fs/ceph/ceph_fs.h648
-rw-r--r--fs/ceph/ceph_hash.c118
-rw-r--r--fs/ceph/ceph_hash.h13
-rw-r--r--fs/ceph/ceph_strings.c176
-rw-r--r--fs/ceph/crush/crush.c151
-rw-r--r--fs/ceph/crush/crush.h180
-rw-r--r--fs/ceph/crush/hash.c149
-rw-r--r--fs/ceph/crush/hash.h17
-rw-r--r--fs/ceph/crush/mapper.c596
-rw-r--r--fs/ceph/crush/mapper.h20
-rw-r--r--fs/ceph/debugfs.c450
-rw-r--r--fs/ceph/decode.h159
-rw-r--r--fs/ceph/dir.c1222
-rw-r--r--fs/ceph/export.c223
-rw-r--r--fs/ceph/file.c904
-rw-r--r--fs/ceph/inode.c1627
-rw-r--r--fs/ceph/ioctl.c160
-rw-r--r--fs/ceph/ioctl.h40
-rw-r--r--fs/ceph/mds_client.c2975
-rw-r--r--fs/ceph/mds_client.h332
-rw-r--r--fs/ceph/mdsmap.c170
-rw-r--r--fs/ceph/mdsmap.h54
-rw-r--r--fs/ceph/messenger.c2092
-rw-r--r--fs/ceph/messenger.h256
-rw-r--r--fs/ceph/mon_client.c751
-rw-r--r--fs/ceph/mon_client.h115
-rw-r--r--fs/ceph/msgpool.c181
-rw-r--r--fs/ceph/msgpool.h27
-rw-r--r--fs/ceph/msgr.h167
-rw-r--r--fs/ceph/osd_client.c1363
-rw-r--r--fs/ceph/osd_client.h155
-rw-r--r--fs/ceph/osdmap.c916
-rw-r--r--fs/ceph/osdmap.h124
-rw-r--r--fs/ceph/rados.h370
-rw-r--r--fs/ceph/snap.c887
-rw-r--r--fs/ceph/super.c984
-rw-r--r--fs/ceph/super.h895
-rw-r--r--fs/ceph/types.h29
-rw-r--r--fs/ceph/xattr.c844
-rw-r--r--fs/cifs/CHANGES4
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/compat.c5
-rw-r--r--fs/dlm/member.c2
-rw-r--r--fs/ecryptfs/file.c14
-rw-r--r--fs/ecryptfs/inode.c118
-rw-r--r--fs/exec.c4
-rw-r--r--fs/exofs/inode.c17
-rw-r--r--fs/exofs/pnfs.h10
-rw-r--r--fs/ext3/file.c4
-rw-r--r--fs/ext3/inode.c18
-rw-r--r--fs/ext3/xattr.c14
-rw-r--r--fs/fat/namei_vfat.c21
-rw-r--r--fs/fs-writeback.c31
-rw-r--r--fs/gfs2/aops.c4
-rw-r--r--fs/gfs2/file.c38
-rw-r--r--fs/gfs2/glock.c75
-rw-r--r--fs/gfs2/glock.h7
-rw-r--r--fs/gfs2/glops.c16
-rw-r--r--fs/gfs2/incore.h5
-rw-r--r--fs/gfs2/inode.c6
-rw-r--r--fs/gfs2/lock_dlm.c5
-rw-r--r--fs/gfs2/main.c28
-rw-r--r--fs/gfs2/meta_io.c46
-rw-r--r--fs/gfs2/meta_io.h12
-rw-r--r--fs/gfs2/ops_fstype.c4
-rw-r--r--fs/gfs2/ops_inode.c9
-rw-r--r--fs/gfs2/super.c27
-rw-r--r--fs/gfs2/sys.c2
-rw-r--r--fs/gfs2/util.c1
-rw-r--r--fs/gfs2/util.h1
-rw-r--r--fs/gfs2/xattr.c21
-rw-r--r--fs/inode.c8
-rw-r--r--fs/locks.c2
-rw-r--r--fs/logfs/Kconfig17
-rw-r--r--fs/logfs/Makefile13
-rw-r--r--fs/logfs/compr.c95
-rw-r--r--fs/logfs/dev_bdev.c263
-rw-r--r--fs/logfs/dev_mtd.c253
-rw-r--r--fs/logfs/dir.c827
-rw-r--r--fs/logfs/file.c263
-rw-r--r--fs/logfs/gc.c730
-rw-r--r--fs/logfs/inode.c417
-rw-r--r--fs/logfs/journal.c879
-rw-r--r--fs/logfs/logfs.h722
-rw-r--r--fs/logfs/logfs_abi.h627
-rw-r--r--fs/logfs/readwrite.c2246
-rw-r--r--fs/logfs/segment.c924
-rw-r--r--fs/logfs/super.c634
-rw-r--r--fs/mpage.c2
-rw-r--r--fs/namei.c2
-rw-r--r--fs/namespace.c5
-rw-r--r--fs/nfs/client.c1
-rw-r--r--fs/nfs/dir.c1
-rw-r--r--fs/nfs/file.c1
-rw-r--r--fs/nfs/inode.c16
-rw-r--r--fs/nfs/internal.h3
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nfs/write.c39
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/nfsd/vfs.c56
-rw-r--r--fs/notify/Kconfig1
-rw-r--r--fs/notify/Makefile4
-rw-r--r--fs/notify/dnotify/dnotify.c198
-rw-r--r--fs/notify/fanotify/Kconfig26
-rw-r--r--fs/notify/fanotify/Makefile1
-rw-r--r--fs/notify/fanotify/fanotify.c255
-rw-r--r--fs/notify/fanotify/fanotify_user.c776
-rw-r--r--fs/notify/fsnotify.c140
-rw-r--r--fs/notify/fsnotify.h33
-rw-r--r--fs/notify/group.c178
-rw-r--r--fs/notify/inode_mark.c309
-rw-r--r--fs/notify/inotify/Kconfig15
-rw-r--r--fs/notify/inotify/Makefile1
-rw-r--r--fs/notify/inotify/inotify.c933
-rw-r--r--fs/notify/inotify/inotify.h7
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c116
-rw-r--r--fs/notify/inotify/inotify_user.c307
-rw-r--r--fs/notify/mark.c325
-rw-r--r--fs/notify/notification.c176
-rw-r--r--fs/notify/vfsmount_mark.c176
-rw-r--r--fs/open.c3
-rw-r--r--fs/partitions/check.c7
-rw-r--r--fs/proc/array.c89
-rw-r--r--fs/proc/base.c101
-rw-r--r--fs/proc/internal.h4
-rw-r--r--fs/proc/meminfo.c15
-rw-r--r--fs/proc/mmu.c8
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/qnx4/inode.c3
-rw-r--r--fs/quota/dquot.c50
-rw-r--r--fs/read_write.c8
-rw-r--r--fs/reiserfs/inode.c19
-rw-r--r--fs/reiserfs/ioctl.c3
-rw-r--r--fs/reiserfs/xattr.c18
-rw-r--r--fs/reiserfs/xattr_acl.c2
-rw-r--r--fs/udf/inode.c34
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h1145
-rw-r--r--fs/xfs/xfs_alloc.c44
-rw-r--r--fs/xfs/xfs_inode.c10
-rw-r--r--fs/xfs/xfs_vnodeops.c93
-rw-r--r--include/acpi/apei.h13
-rw-r--r--include/acpi/atomicio.h10
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/asm-generic/fcntl.h8
-rw-r--r--include/asm-generic/kmap_types.h3
-rw-r--r--include/asm-generic/local.h19
-rw-r--r--include/asm-generic/percpu.h18
-rw-r--r--include/asm-generic/unistd.h6
-rw-r--r--include/crypto/pcrypt.h51
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/drm_crtc_helper.h2
-rw-r--r--include/drm/drm_fb_helper.h4
-rw-r--r--include/drm/drm_mode.h1
-rw-r--r--include/drm/i915_drm.h54
-rw-r--r--include/linux/Kbuild5
-rw-r--r--include/linux/backing-dev.h9
-rw-r--r--include/linux/blkdev.h24
-rw-r--r--include/linux/btree-128.h109
-rw-r--r--include/linux/btree-type.h147
-rw-r--r--include/linux/btree.h243
-rw-r--r--include/linux/can/dev.h15
-rw-r--r--include/linux/can/netlink.h1
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/davinci_emac.h (renamed from arch/arm/mach-davinci/include/mach/emac.h)7
-rw-r--r--include/linux/decompress/unlzo.h10
-rw-r--r--include/linux/device.h17
-rw-r--r--include/linux/dm9000.h2
-rw-r--r--include/linux/drbd.h2
-rw-r--r--include/linux/drbd_nl.h1
-rw-r--r--include/linux/edac_mce.h31
-rw-r--r--include/linux/ext3_fs.h33
-rw-r--r--include/linux/ext3_fs_i.h2
-rw-r--r--include/linux/fanotify.h105
-rw-r--r--include/linux/firewire.h11
-rw-r--r--include/linux/fs.h22
-rw-r--r--include/linux/fsnotify.h153
-rw-r--r--include/linux/fsnotify_backend.h162
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/gameport.h1
-rw-r--r--include/linux/genhd.h6
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/gfs2_ondisk.h30
-rw-r--r--include/linux/highmem.h2
-rw-r--r--include/linux/i2c-pnx.h19
-rw-r--r--include/linux/i2c/adp5588.h12
-rw-r--r--include/linux/ieee80211.h106
-rw-r--r--include/linux/in.h2
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/inotify.h174
-rw-r--r--include/linux/input.h52
-rw-r--r--include/linux/iocontext.h27
-rw-r--r--include/linux/ioq.h414
-rw-r--r--include/linux/kdb.h118
-rw-r--r--include/linux/kernel.h85
-rw-r--r--include/linux/kgdb.h74
-rw-r--r--include/linux/kmemcheck.h110
-rw-r--r--include/linux/kmsg_dump.h1
-rw-r--r--include/linux/kvm.h2
-rw-r--r--include/linux/kvm_host.h66
-rw-r--r--include/linux/llc.h7
-rw-r--r--include/linux/mfd/88pm8607.h217
-rw-r--r--include/linux/mfd/88pm860x.h381
-rw-r--r--include/linux/mfd/ab3100.h3
-rw-r--r--include/linux/mfd/max8925.h215
-rw-r--r--include/linux/mfd/tmio.h39
-rw-r--r--include/linux/mfd/wm8350/core.h47
-rw-r--r--include/linux/mfd/wm8350/gpio.h1
-rw-r--r--include/linux/mfd/wm8350/pmic.h28
-rw-r--r--include/linux/mfd/wm8350/rtc.h1
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mm_types.h2
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/module.h37
-rw-r--r--include/linux/mount.h6
-rw-r--r--include/linux/nl80211.h94
-rw-r--r--include/linux/padata.h88
-rw-r--r--include/linux/pci.h38
-rw-r--r--include/linux/pci_hotplug.h41
-rw-r--r--include/linux/pci_ids.h19
-rw-r--r--include/linux/percpu-defs.h40
-rw-r--r--include/linux/percpu.h44
-rw-r--r--include/linux/perf_event.h14
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/pm.h3
-rw-r--r--include/linux/pm_qos_params.h3
-rw-r--r--include/linux/poison.h16
-rw-r--r--include/linux/posix-timers.h2
-rw-r--r--include/linux/regulator/consumer.h4
-rw-r--r--include/linux/regulator/driver.h6
-rw-r--r--include/linux/resource.h2
-rw-r--r--include/linux/resume-trace.h7
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/security.h11
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/serio.h20
-rw-r--r--include/linux/shm_signal.h189
-rw-r--r--include/linux/slub_def.h27
-rw-r--r--include/linux/stmmac.h53
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/syscalls.h13
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--include/linux/vbus_driver.h83
-rw-r--r--include/linux/vbus_pci.h145
-rw-r--r--include/linux/venet.h133
-rw-r--r--include/linux/videodev2.h1
-rw-r--r--include/linux/virtio_balloon.h15
-rw-r--r--include/linux/virtio_config.h5
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/math-emu/op-common.h2
-rw-r--r--include/media/ir-common.h4
-rw-r--r--include/media/ir-core.h42
-rw-r--r--include/media/ir-kbd-i2c.h2
-rw-r--r--include/net/cfg80211.h112
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/inet_sock.h4
-rw-r--r--include/net/ip.h16
-rw-r--r--include/net/llc.h39
-rw-r--r--include/net/llc_conn.h2
-rw-r--r--include/net/mac80211.h233
-rw-r--r--include/net/phonet/pep.h3
-rw-r--r--include/net/tcp.h19
-rw-r--r--include/pcmcia/ss.h19
-rw-r--r--include/sound/cs46xx_dsp_spos.h6
-rw-r--r--include/sound/pcm.h48
-rw-r--r--include/sound/pcm_oss.h2
-rw-r--r--include/sound/sb.h1
-rw-r--r--include/sound/soc.h3
-rw-r--r--include/sound/tpa6130a2-plat.h6
-rw-r--r--include/sound/version.h2
-rw-r--r--include/sound/wm8904.h57
-rw-r--r--include/sound/wm8955.h26
-rw-r--r--include/trace/ftrace.h12
-rw-r--r--include/trace/syscall.h4
-rw-r--r--init/Kconfig53
-rw-r--r--init/main.c6
-rw-r--r--kernel/Makefile8
-rw-r--r--kernel/audit.c3
-rw-r--r--kernel/audit.h26
-rw-r--r--kernel/audit_tree.c232
-rw-r--r--kernel/audit_watch.c295
-rw-r--r--kernel/auditfilter.c39
-rw-r--r--kernel/auditsc.c10
-rw-r--r--kernel/capability.c4
-rw-r--r--kernel/cgroup.c1
-rw-r--r--kernel/compat.c98
-rw-r--r--kernel/debug/Makefile8
-rw-r--r--kernel/debug/debug_core.c986
-rw-r--r--kernel/debug/debug_core.h82
-rw-r--r--kernel/debug/gdbstub.c (renamed from kernel/kgdb.c)1092
-rw-r--r--kernel/debug/kdb/.gitignore1
-rw-r--r--kernel/debug/kdb/Makefile24
-rw-r--r--kernel/debug/kdb/kdb_bp.c567
-rw-r--r--kernel/debug/kdb/kdb_bt.c217
-rw-r--r--kernel/debug/kdb/kdb_cmds32
-rw-r--r--kernel/debug/kdb/kdb_debugger.c167
-rw-r--r--kernel/debug/kdb/kdb_io.c825
-rw-r--r--kernel/debug/kdb/kdb_main.c2857
-rw-r--r--kernel/debug/kdb/kdb_private.h399
-rw-r--r--kernel/debug/kdb/kdb_support.c1007
-rw-r--r--kernel/debug/kms_hooks.c62
-rw-r--r--kernel/futex_compat.c6
-rw-r--r--kernel/kallsyms.c21
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/kmod.c12
-rw-r--r--kernel/module.c50
-rw-r--r--kernel/padata.c690
-rw-r--r--kernel/panic.c10
-rw-r--r--kernel/perf_event.c31
-rw-r--r--kernel/pm_qos_params.c32
-rw-r--r--kernel/posix-cpu-timers.c10
-rw-r--r--kernel/power/Kconfig14
-rw-r--r--kernel/power/main.c31
-rw-r--r--kernel/printk.c26
-rw-r--r--kernel/rcutorture.c8
-rw-r--r--kernel/sched.c15
-rw-r--r--kernel/sched_cpupri.c2
-rw-r--r--kernel/signal.c45
-rw-r--r--kernel/softlockup.c16
-rw-r--r--kernel/sys.c151
-rw-r--r--kernel/sys_ni.c4
-rw-r--r--kernel/trace/Makefile7
-rw-r--r--kernel/trace/ring_buffer.c5
-rw-r--r--kernel/trace/ring_buffer_benchmark.c1
-rw-r--r--kernel/trace/trace.c54
-rw-r--r--kernel/trace/trace.h17
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_functions_graph.c4
-rw-r--r--kernel/trace/trace_kdb.c116
-rw-r--r--kernel/trace/trace_kprobe.c14
-rw-r--r--kernel/trace/trace_syscalls.c5
-rw-r--r--lib/Kconfig28
-rw-r--r--lib/Kconfig.kgdb22
-rw-r--r--lib/Makefile4
-rw-r--r--lib/btree.c797
-rw-r--r--lib/decompress.c5
-rw-r--r--lib/decompress_unlzo.c209
-rw-r--r--lib/dma-debug.c7
-rw-r--r--lib/hweight.c7
-rw-r--r--lib/ioq.c304
-rw-r--r--lib/lzo/lzo1x_decompress.c9
-rw-r--r--lib/rational.c1
-rw-r--r--lib/shm_signal.c196
-rw-r--r--lib/vsprintf.c16
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/backing-dev.c6
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/maccess.c11
-rw-r--r--mm/nommu.c17
-rw-r--r--mm/page-writeback.c30
-rw-r--r--mm/page_alloc.c202
-rw-r--r--mm/percpu.c22
-rw-r--r--mm/slab.c6
-rw-r--r--mm/slub.c305
-rw-r--r--mm/swapfile.c10
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/vmstat.c15
-rw-r--r--net/8021q/vlan_core.c2
-rw-r--r--net/8021q/vlan_dev.c7
-rw-r--r--net/bridge/netfilter/ebtables.c6
-rw-r--r--net/can/af_can.c118
-rw-r--r--net/can/af_can.h4
-rw-r--r--net/can/proc.c93
-rw-r--r--net/core/dev.c29
-rw-r--r--net/core/sock.c4
-rw-r--r--net/ethernet/eth.c6
-rw-r--r--net/ipv4/arp.c52
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c14
-rw-r--r--net/ipv4/route.c7
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/tcp.c30
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/tcp_output.c22
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/syncookies.c3
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/irda/ircomm/ircomm_tty.c6
-rw-r--r--net/llc/af_llc.c64
-rw-r--r--net/llc/llc_conn.c143
-rw-r--r--net/llc/llc_core.c53
-rw-r--r--net/llc/llc_output.c45
-rw-r--r--net/llc/llc_proc.c69
-rw-r--r--net/llc/llc_sap.c111
-rw-r--r--net/mac80211/Kconfig12
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/agg-rx.c15
-rw-r--r--net/mac80211/agg-tx.c30
-rw-r--r--net/mac80211/cfg.c156
-rw-r--r--net/mac80211/debugfs.c94
-rw-r--r--net/mac80211/debugfs_key.c2
-rw-r--r--net/mac80211/debugfs_netdev.c212
-rw-r--r--net/mac80211/debugfs_netdev.h9
-rw-r--r--net/mac80211/debugfs_sta.c64
-rw-r--r--net/mac80211/driver-ops.h115
-rw-r--r--net/mac80211/driver-trace.h86
-rw-r--r--net/mac80211/ht.c53
-rw-r--r--net/mac80211/ibss.c81
-rw-r--r--net/mac80211/ieee80211_i.h191
-rw-r--r--net/mac80211/iface.c149
-rw-r--r--net/mac80211/key.c10
-rw-r--r--net/mac80211/key.h8
-rw-r--r--net/mac80211/main.c68
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh_hwmp.c20
-rw-r--r--net/mac80211/mesh_pathtbl.c6
-rw-r--r--net/mac80211/mesh_plink.c6
-rw-r--r--net/mac80211/mlme.c1209
-rw-r--r--net/mac80211/offchannel.c168
-rw-r--r--net/mac80211/pm.c10
-rw-r--r--net/mac80211/rate.c88
-rw-r--r--net/mac80211/rate.h5
-rw-r--r--net/mac80211/rx.c322
-rw-r--r--net/mac80211/scan.c230
-rw-r--r--net/mac80211/spectmgmt.c4
-rw-r--r--net/mac80211/sta_info.c60
-rw-r--r--net/mac80211/sta_info.h32
-rw-r--r--net/mac80211/status.c48
-rw-r--r--net/mac80211/tkip.c38
-rw-r--r--net/mac80211/tx.c322
-rw-r--r--net/mac80211/util.c308
-rw-r--r--net/mac80211/wme.c96
-rw-r--r--net/mac80211/wme.h8
-rw-r--r--net/mac80211/work.c1098
-rw-r--r--net/netfilter/ipvs/Kconfig3
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c15
-rw-r--r--net/netfilter/nf_conntrack_ftp.c18
-rw-r--r--net/packet/af_packet.c19
-rw-r--r--net/phonet/datagram.c6
-rw-r--r--net/phonet/pep-gprs.c4
-rw-r--r--net/phonet/pep.c29
-rw-r--r--net/rose/rose_loopback.c2
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c17
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c4
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c2
-rw-r--r--net/sunrpc/svc_xprt.c3
-rw-r--r--net/tipc/Kconfig8
-rw-r--r--net/tipc/core.c10
-rw-r--r--net/wireless/.gitignore1
-rw-r--r--net/wireless/Kconfig13
-rw-r--r--net/wireless/Makefile6
-rw-r--r--net/wireless/chan.c41
-rw-r--r--net/wireless/core.c5
-rw-r--r--net/wireless/core.h8
-rw-r--r--net/wireless/db.txt17
-rw-r--r--net/wireless/genregdb.awk118
-rw-r--r--net/wireless/mlme.c48
-rw-r--r--net/wireless/nl80211.c412
-rw-r--r--net/wireless/nl80211.h15
-rw-r--r--net/wireless/reg.c474
-rw-r--r--net/wireless/regdb.h7
-rw-r--r--net/wireless/scan.c120
-rw-r--r--net/wireless/util.c132
-rw-r--r--net/wireless/wext-compat.c39
-rw-r--r--scripts/.gitignore1
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.lib14
-rw-r--r--scripts/binoffset.c163
-rwxr-xr-xscripts/decodecode48
-rwxr-xr-xscripts/extract-ikconfig127
-rw-r--r--scripts/genksyms/genksyms.c4
-rwxr-xr-xscripts/get_maintainer.pl84
-rw-r--r--scripts/kconfig/Makefile16
-rw-r--r--scripts/kconfig/expr.c27
-rw-r--r--scripts/kconfig/lkc.h7
-rw-r--r--scripts/kconfig/lkc_proto.h3
-rw-r--r--scripts/kconfig/mconf.c14
-rw-r--r--scripts/kconfig/menu.c16
-rw-r--r--scripts/kconfig/nconf.c1568
-rw-r--r--scripts/kconfig/nconf.gui.c617
-rw-r--r--scripts/kconfig/nconf.h95
-rw-r--r--scripts/kconfig/util.c2
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped2
-rw-r--r--scripts/kconfig/zconf.y2
-rwxr-xr-xscripts/kernel-doc16
-rw-r--r--security/capability.c3
-rw-r--r--security/security.c51
-rw-r--r--security/selinux/hooks.c10
-rw-r--r--security/selinux/ss/mls.c4
-rw-r--r--security/selinux/ss/services.c4
-rw-r--r--security/tomoyo/common.c204
-rw-r--r--security/tomoyo/common.h47
-rw-r--r--security/tomoyo/domain.c137
-rw-r--r--security/tomoyo/file.c217
-rw-r--r--security/tomoyo/realpath.c136
-rw-r--r--security/tomoyo/realpath.h7
-rw-r--r--security/tomoyo/tomoyo.c81
-rw-r--r--security/tomoyo/tomoyo.h12
-rw-r--r--sound/core/oss/pcm_oss.c32
-rw-r--r--sound/core/pcm.c4
-rw-r--r--sound/core/pcm_lib.c438
-rw-r--r--sound/core/pcm_memory.c55
-rw-r--r--sound/core/pcm_native.c8
-rw-r--r--sound/drivers/vx/vx_pcm.c59
-rw-r--r--sound/isa/Kconfig37
-rw-r--r--sound/isa/Makefile2
-rw-r--r--sound/isa/als100.c121
-rw-r--r--sound/isa/dt019x.c321
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c93
-rw-r--r--sound/isa/sb/Makefile2
-rw-r--r--sound/isa/sb/emu8000.c11
-rw-r--r--sound/isa/sb/jazz16.c404
-rw-r--r--sound/isa/sb/sb8_main.c118
-rw-r--r--sound/isa/sb/sb_common.c3
-rw-r--r--sound/isa/sb/sb_mixer.c333
-rw-r--r--sound/isa/wss/wss_lib.c80
-rw-r--r--sound/mips/sgio2audio.c31
-rw-r--r--sound/oss/au1550_ac97.c16
-rw-r--r--sound/oss/dev_table.c16
-rw-r--r--sound/oss/sound_config.h2
-rw-r--r--sound/oss/soundcard.c39
-rw-r--r--sound/pci/ac97/ac97_codec.c10
-rw-r--r--sound/pci/ac97/ac97_id.h2
-rw-r--r--sound/pci/ac97/ac97_patch.c18
-rw-r--r--sound/pci/atiixp.c1
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c2
-rw-r--r--sound/pci/cs46xx/dsp_spos.c42
-rw-r--r--sound/pci/cs46xx/dsp_spos.h4
-rw-r--r--sound/pci/cs46xx/dsp_spos_scb_lib.c33
-rw-r--r--sound/pci/hda/hda_codec.c75
-rw-r--r--sound/pci/hda/hda_codec.h2
-rw-r--r--sound/pci/hda/hda_generic.c3
-rw-r--r--sound/pci/hda/hda_hwdep.c7
-rw-r--r--sound/pci/hda/hda_intel.c30
-rw-r--r--sound/pci/hda/hda_local.h14
-rw-r--r--sound/pci/hda/hda_proc.c31
-rw-r--r--sound/pci/hda/patch_analog.c104
-rw-r--r--sound/pci/hda/patch_cirrus.c14
-rw-r--r--sound/pci/hda/patch_cmedia.c11
-rw-r--r--sound/pci/hda/patch_conexant.c383
-rw-r--r--sound/pci/hda/patch_realtek.c196
-rw-r--r--sound/pci/hda/patch_si3054.c1
-rw-r--r--sound/pci/hda/patch_sigmatel.c120
-rw-r--r--sound/pci/hda/patch_via.c274
-rw-r--r--sound/pci/riptide/riptide.c2
-rw-r--r--sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c51
-rw-r--r--sound/soc/au1x/Kconfig10
-rw-r--r--sound/soc/au1x/Makefile4
-rw-r--r--sound/soc/au1x/db1200.c141
-rw-r--r--sound/soc/au1x/dbdma2.c14
-rw-r--r--sound/soc/au1x/sample-ac97.c144
-rw-r--r--sound/soc/codecs/Kconfig12
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/ad1938.c64
-rw-r--r--sound/soc/codecs/cs4270.c43
-rw-r--r--sound/soc/codecs/da7210.c589
-rw-r--r--sound/soc/codecs/da7210.h24
-rw-r--r--sound/soc/codecs/tlv320aic3x.c75
-rw-r--r--sound/soc/codecs/tlv320dac33.c288
-rw-r--r--sound/soc/codecs/tpa6130a2.c115
-rw-r--r--sound/soc/codecs/wm8350.c10
-rw-r--r--sound/soc/codecs/wm8727.c66
-rw-r--r--sound/soc/codecs/wm8731.c3
-rw-r--r--sound/soc/codecs/wm8753.c8
-rw-r--r--sound/soc/codecs/wm8776.c2
-rw-r--r--sound/soc/codecs/wm8904.c2556
-rw-r--r--sound/soc/codecs/wm8904.h1681
-rw-r--r--sound/soc/codecs/wm8955.c1151
-rw-r--r--sound/soc/codecs/wm8955.h489
-rw-r--r--sound/soc/codecs/wm8961.c3
-rw-r--r--sound/soc/codecs/wm8990.c8
-rw-r--r--sound/soc/codecs/wm8993.c67
-rw-r--r--sound/soc/davinci/davinci-mcasp.c18
-rw-r--r--sound/soc/davinci/davinci-mcasp.h1
-rw-r--r--sound/soc/davinci/davinci-pcm.c2
-rw-r--r--sound/soc/s3c24xx/s3c64xx-i2s.c120
-rw-r--r--sound/soc/sh/Kconfig8
-rw-r--r--sound/soc/sh/Makefile2
-rw-r--r--sound/soc/sh/fsi-da7210.c83
-rw-r--r--sound/soc/sh/fsi.c164
-rw-r--r--sound/soc/soc-cache.c2
-rw-r--r--sound/soc/soc-core.c11
-rw-r--r--sound/soc/soc-dapm.c37
-rw-r--r--sound/usb/Kconfig12
-rw-r--r--sound/usb/Makefile2
-rw-r--r--sound/usb/ua101.c1419
-rw-r--r--sound/usb/usbaudio.c256
-rw-r--r--sound/usb/usbaudio.h16
-rw-r--r--sound/usb/usbmixer.c75
-rw-r--r--sound/usb/usbquirks.h145
-rw-r--r--tools/perf/Makefile3
-rw-r--r--tools/perf/builtin-annotate.c8
-rw-r--r--tools/perf/builtin-diff.c14
-rw-r--r--tools/perf/builtin-help.c3
-rw-r--r--tools/perf/builtin-kmem.c39
-rw-r--r--tools/perf/builtin-report.c32
-rw-r--r--tools/perf/builtin-sched.c25
-rw-r--r--tools/perf/builtin-timechart.c25
-rw-r--r--tools/perf/builtin-trace.c20
-rw-r--r--tools/perf/perf.c2
-rw-r--r--tools/perf/util/data_map.c252
-rw-r--r--tools/perf/util/debug.c1
-rw-r--r--tools/perf/util/debugfs.c17
-rw-r--r--tools/perf/util/debugfs.h2
-rw-r--r--tools/perf/util/event.h65
-rw-r--r--tools/perf/util/header.c82
-rw-r--r--tools/perf/util/map.h73
-rw-r--r--tools/perf/util/probe-finder.h2
-rw-r--r--tools/perf/util/session.c253
-rw-r--r--tools/perf/util/session.h24
-rw-r--r--tools/perf/util/symbol.c17
-rw-r--r--tools/perf/util/symbol.h5
-rw-r--r--tools/perf/util/trace-event-info.c62
-rw-r--r--tools/perf/util/util.c69
-rw-r--r--tools/perf/util/util.h3
-rw-r--r--usr/Kconfig26
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/assigned-dev.c8
-rw-r--r--virt/kvm/coalesced_mmio.c39
-rw-r--r--virt/kvm/coalesced_mmio.h15
-rw-r--r--virt/kvm/eventfd.c18
-rw-r--r--virt/kvm/ioapic.c23
-rw-r--r--virt/kvm/ioapic.h1
-rw-r--r--virt/kvm/iommu.c27
-rw-r--r--virt/kvm/kvm_main.c357
2394 files changed, 196831 insertions, 34206 deletions
diff --git a/.gitignore b/.gitignore
index fb2190c61af0..de6344e15706 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,6 +37,7 @@ modules.builtin
tags
TAGS
vmlinux
+vmlinuz
System.map
Module.markers
Module.symvers
diff --git a/Documentation/acpi/apei/einj.txt b/Documentation/acpi/apei/einj.txt
new file mode 100644
index 000000000000..438ef33c8fd0
--- /dev/null
+++ b/Documentation/acpi/apei/einj.txt
@@ -0,0 +1,49 @@
+ APEI Error INJection
+ ~~~~~~~~~~~~~~~~~~~~
+
+EINJ provides a hardware error injection mechanism, it is very useful
+for debugging and testing of other APEI and RAS features.
+
+To use EINJ, make the following is enabled in your kernel
+configuration:
+
+CONFIG_DEBUG_FS
+CONFIG_ACPI_APEI
+CONFIG_ACPI_APEI_EINJ
+
+The user interface of EINJ is in debug file system, under the
+directory apei/einj. The following files are provided.
+
+- available_error_type
+ Read this file will return the error injection capability of the
+ platform, that is, which error types are supported. The error type
+ definition is as follow, the left field is the error type value, the
+ right field is error description.
+
+ 0x00000001 Processor Correctable
+ 0x00000002 Processor Uncorrectable non-fatal
+ 0x00000004 Processor Uncorrectable fatal
+ 0x00000008 Memory Correctable
+ 0x00000010 Memory Uncorrectable non-fatal
+ 0x00000020 Memory Uncorrectable fatal
+ 0x00000040 PCI Express Correctable
+ 0x00000080 PCI Express Uncorrectable fatal
+ 0x00000100 PCI Express Uncorrectable non-fatal
+ 0x00000200 Platform Correctable
+ 0x00000400 Platform Uncorrectable non-fatal
+ 0x00000800 Platform Uncorrectable fatal
+
+ The format of file contents are as above, except there are only the
+ available error type lines.
+
+- error_type
+ This file is used to set the error type value. The error type value
+ is defined in "available_error_type" description.
+
+- error_inject
+ Write any integer to this file to trigger the error
+ injection. Before this, please specify all necessary error
+ parameters.
+
+For more information about EINJ, please refer to ACPI specification
+version 4.0, section 17.5.
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index aed082f49d09..737988fca64d 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -145,8 +145,8 @@ show_sampling_rate_max: THIS INTERFACE IS DEPRECATED, DON'T USE IT.
up_threshold: defines what the average CPU usage between the samplings
of 'sampling_rate' needs to be for the kernel to make a decision on
whether it should increase the frequency. For example when it is set
-to its default value of '80' it means that between the checking
-intervals the CPU needs to be on average more than 80% in use to then
+to its default value of '95' it means that between the checking
+intervals the CPU needs to be on average more than 95% in use to then
decide that the CPU frequency needs to be increased.
ignore_nice_load: this parameter takes a value of '0' or '1'. When
diff --git a/Documentation/cpu-freq/pcc-cpufreq.txt b/Documentation/cpu-freq/pcc-cpufreq.txt
new file mode 100644
index 000000000000..9e3c3b33514c
--- /dev/null
+++ b/Documentation/cpu-freq/pcc-cpufreq.txt
@@ -0,0 +1,207 @@
+/*
+ * pcc-cpufreq.txt - PCC interface documentation
+ *
+ * Copyright (C) 2009 Red Hat, Matthew Garrett <mjg@redhat.com>
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
+ * Nagananda Chumbalkar <nagananda.chumbalkar@hp.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or NON
+ * INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+
+ Processor Clocking Control Driver
+ ---------------------------------
+
+Contents:
+---------
+1. Introduction
+1.1 PCC interface
+1.1.1 Get Average Frequency
+1.1.2 Set Desired Frequency
+1.2 Platforms affected
+2. Driver and /sys details
+2.1 scaling_available_frequencies
+2.2 cpuinfo_transition_latency
+2.3 cpuinfo_cur_freq
+2.4 related_cpus
+3. Caveats
+
+1. Introduction:
+----------------
+Processor Clocking Control (PCC) is an interface between the platform
+firmware and OSPM. It is a mechanism for coordinating processor
+performance (ie: frequency) between the platform firmware and the OS.
+
+The PCC driver (pcc-cpufreq) allows OSPM to take advantage of the PCC
+interface.
+
+OS utilizes the PCC interface to inform platform firmware what frequency the
+OS wants for a logical processor. The platform firmware attempts to achieve
+the requested frequency. If the request for the target frequency could not be
+satisfied by platform firmware, then it usually means that power budget
+conditions are in place, and "power capping" is taking place.
+
+1.1 PCC interface:
+------------------
+The complete PCC specification is available here:
+http://www.acpica.org/download/Processor-Clocking-Control-v1p0.pdf
+
+PCC relies on a shared memory region that provides a channel for communication
+between the OS and platform firmware. PCC also implements a "doorbell" that
+is used by the OS to inform the platform firmware that a command has been
+sent.
+
+The ACPI PCCH() method is used to discover the location of the PCC shared
+memory region. The shared memory region header contains the "command" and
+"status" interface. PCCH() also contains details on how to access the platform
+doorbell.
+
+The following commands are supported by the PCC interface:
+* Get Average Frequency
+* Set Desired Frequency
+
+The ACPI PCCP() method is implemented for each logical processor and is
+used to discover the offsets for the input and output buffers in the shared
+memory region.
+
+When PCC mode is enabled, the platform will not expose processor performance
+or throttle states (_PSS, _TSS and related ACPI objects) to OSPM. Therefore,
+the native P-state driver (such as acpi-cpufreq for Intel, powernow-k8 for
+AMD) will not load.
+
+However, OSPM remains in control of policy. The governor (eg: "ondemand")
+computes the required performance for each processor based on server workload.
+The PCC driver fills in the command interface, and the input buffer and
+communicates the request to the platform firmware. The platform firmware is
+responsible for delivering the requested performance.
+
+Each PCC command is "global" in scope and can affect all the logical CPUs in
+the system. Therefore, PCC is capable of performing "group" updates. With PCC
+the OS is capable of getting/setting the frequency of all the logical CPUs in
+the system with a single call to the BIOS.
+
+1.1.1 Get Average Frequency:
+----------------------------
+This command is used by the OSPM to query the running frequency of the
+processor since the last time this command was completed. The output buffer
+indicates the average unhalted frequency of the logical processor expressed as
+a percentage of the nominal (ie: maximum) CPU frequency. The output buffer
+also signifies if the CPU frequency is limited by a power budget condition.
+
+1.1.2 Set Desired Frequency:
+----------------------------
+This command is used by the OSPM to communicate to the platform firmware the
+desired frequency for a logical processor. The output buffer is currently
+ignored by OSPM. The next invocation of "Get Average Frequency" will inform
+OSPM if the desired frequency was achieved or not.
+
+1.2 Platforms affected:
+-----------------------
+The PCC driver will load on any system where the platform firmware:
+* supports the PCC interface, and the associated PCCH() and PCCP() methods
+* assumes responsibility for managing the hardware clocking controls in order
+to deliver the requested processor performance
+
+Currently, certain HP ProLiant platforms implement the PCC interface. On those
+platforms PCC is the "default" choice.
+
+However, it is possible to disable this interface via a BIOS setting. In
+such an instance, as is also the case on platforms where the PCC interface
+is not implemented, the PCC driver will fail to load silently.
+
+2. Driver and /sys details:
+---------------------------
+When the driver loads, it merely prints the lowest and the highest CPU
+frequencies supported by the platform firmware.
+
+The PCC driver loads with a message such as:
+pcc-cpufreq: (v1.00.00) driver loaded with frequency limits: 1600 MHz, 2933
+MHz
+
+This means that the OPSM can request the CPU to run at any frequency in
+between the limits (1600 MHz, and 2933 MHz) specified in the message.
+
+Internally, there is no need for the driver to convert the "target" frequency
+to a corresponding P-state.
+
+The VERSION number for the driver will be of the format v.xy.ab.
+eg: 1.00.02
+ ----- --
+ | |
+ | -- this will increase with bug fixes/enhancements to the driver
+ |-- this is the version of the PCC specification the driver adheres to
+
+
+The following is a brief discussion on some of the fields exported via the
+/sys filesystem and how their values are affected by the PCC driver:
+
+2.1 scaling_available_frequencies:
+----------------------------------
+scaling_available_frequencies is not created in /sys. No intermediate
+frequencies need to be listed because the BIOS will try to achieve any
+frequency, within limits, requested by the governor. A frequency does not have
+to be strictly associated with a P-state.
+
+2.2 cpuinfo_transition_latency:
+-------------------------------
+The cpuinfo_transition_latency field is 0. The PCC specification does
+not include a field to expose this value currently.
+
+2.3 cpuinfo_cur_freq:
+---------------------
+A) Often cpuinfo_cur_freq will show a value different than what is declared
+in the scaling_available_frequencies or scaling_cur_freq, or scaling_max_freq.
+This is due to "turbo boost" available on recent Intel processors. If certain
+conditions are met the BIOS can achieve a slightly higher speed than requested
+by OSPM. An example:
+
+scaling_cur_freq : 2933000
+cpuinfo_cur_freq : 3196000
+
+B) There is a round-off error associated with the cpuinfo_cur_freq value.
+Since the driver obtains the current frequency as a "percentage" (%) of the
+nominal frequency from the BIOS, sometimes, the values displayed by
+scaling_cur_freq and cpuinfo_cur_freq may not match. An example:
+
+scaling_cur_freq : 1600000
+cpuinfo_cur_freq : 1583000
+
+In this example, the nominal frequency is 2933 MHz. The driver obtains the
+current frequency, cpuinfo_cur_freq, as 54% of the nominal frequency:
+
+ 54% of 2933 MHz = 1583 MHz
+
+Nominal frequency is the maximum frequency of the processor, and it usually
+corresponds to the frequency of the P0 P-state.
+
+2.4 related_cpus:
+-----------------
+The related_cpus field is identical to affected_cpus.
+
+affected_cpus : 4
+related_cpus : 4
+
+Currently, the PCC driver does not evaluate _PSD. The platforms that support
+PCC do not implement SW_ALL. So OSPM doesn't need to perform any coordination
+to ensure that the same frequency is requested of all dependent CPUs.
+
+3. Caveats:
+-----------
+The "cpufreq_stats" module in its present form cannot be loaded and
+expected to work with the PCC driver. Since the "cpufreq_stats" module
+provides information wrt each P-state, it is not applicable to the PCC driver.
diff --git a/Documentation/dontdiff b/Documentation/dontdiff
index 3ad6acead949..d9bcffd59433 100644
--- a/Documentation/dontdiff
+++ b/Documentation/dontdiff
@@ -69,7 +69,6 @@ av_permissions.h
bbootsect
bin2c
binkernel.spec
-binoffset
bootsect
bounds.h
bsetup
diff --git a/Documentation/driver-model/platform.txt b/Documentation/driver-model/platform.txt
index 2e2c2ea90ceb..41f41632ee55 100644
--- a/Documentation/driver-model/platform.txt
+++ b/Documentation/driver-model/platform.txt
@@ -192,7 +192,7 @@ command line. This will execute all matching early_param() callbacks.
User specified early platform devices will be registered at this point.
For the early serial console case the user can specify port on the
kernel command line as "earlyprintk=serial.0" where "earlyprintk" is
-the class string, "serial" is the name of the platfrom driver and
+the class string, "serial" is the name of the platform driver and
0 is the platform device id. If the id is -1 then the dot and the
id can be omitted.
diff --git a/Documentation/edac.txt b/Documentation/edac.txt
index 79c533223762..55d7ae746a4f 100644
--- a/Documentation/edac.txt
+++ b/Documentation/edac.txt
@@ -6,6 +6,8 @@ Written by Doug Thompson <dougthompson@xmission.com>
7 Dec 2005
17 Jul 2007 Updated
+(c) Mauro Carvalho Chehab <mchehab@redhat.com>
+05 Aug 2009 Nehalem interface
EDAC is maintained and written by:
@@ -717,3 +719,154 @@ unique drivers for their hardware systems.
The 'test_device_edac' sample driver is located at the
bluesmoke.sourceforge.net project site for EDAC.
+=======================================================================
+NEHALEM USAGE OF EDAC APIs
+
+This chapter documents some EXPERIMENTAL mappings for EDAC API to handle
+Nehalem EDAC driver. They will likely be changed on future versions
+of the driver.
+
+Due to the way Nehalem exports Memory Controller data, some adjustments
+were done at i7core_edac driver. This chapter will cover those differences
+
+1) On Nehalem, there are one Memory Controller per Quick Patch Interconnect
+ (QPI). At the driver, the term "socket" means one QPI. This is
+ associated with a physical CPU socket.
+
+ Each MC have 3 physical read channels, 3 physical write channels and
+ 3 logic channels. The driver currenty sees it as just 3 channels.
+ Each channel can have up to 3 DIMMs.
+
+ The minimum known unity is DIMMs. There are no information about csrows.
+ As EDAC API maps the minimum unity is csrows, the driver sequencially
+ maps channel/dimm into different csrows.
+
+ For example, suposing the following layout:
+ Ch0 phy rd0, wr0 (0x063f4031): 2 ranks, UDIMMs
+ dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
+ dimm 1 1024 Mb offset: 4, bank: 8, rank: 1, row: 0x4000, col: 0x400
+ Ch1 phy rd1, wr1 (0x063f4031): 2 ranks, UDIMMs
+ dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
+ Ch2 phy rd3, wr3 (0x063f4031): 2 ranks, UDIMMs
+ dimm 0 1024 Mb offset: 0, bank: 8, rank: 1, row: 0x4000, col: 0x400
+ The driver will map it as:
+ csrow0: channel 0, dimm0
+ csrow1: channel 0, dimm1
+ csrow2: channel 1, dimm0
+ csrow3: channel 2, dimm0
+
+exports one
+ DIMM per csrow.
+
+ Each QPI is exported as a different memory controller.
+
+2) Nehalem MC has the hability to generate errors. The driver implements this
+ functionality via some error injection nodes:
+
+ For injecting a memory error, there are some sysfs nodes, under
+ /sys/devices/system/edac/mc/mc?/:
+
+ inject_addrmatch/*:
+ Controls the error injection mask register. It is possible to specify
+ several characteristics of the address to match an error code:
+ dimm = the affected dimm. Numbers are relative to a channel;
+ rank = the memory rank;
+ channel = the channel that will generate an error;
+ bank = the affected bank;
+ page = the page address;
+ column (or col) = the address column.
+ each of the above values can be set to "any" to match any valid value.
+
+ At driver init, all values are set to any.
+
+ For example, to generate an error at rank 1 of dimm 2, for any channel,
+ any bank, any page, any column:
+ echo 2 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/dimm
+ echo 1 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/rank
+
+ To return to the default behaviour of matching any, you can do:
+ echo any >/sys/devices/system/edac/mc/mc0/inject_addrmatch/dimm
+ echo any >/sys/devices/system/edac/mc/mc0/inject_addrmatch/rank
+
+ inject_eccmask:
+ specifies what bits will have troubles,
+
+ inject_section:
+ specifies what ECC cache section will get the error:
+ 3 for both
+ 2 for the highest
+ 1 for the lowest
+
+ inject_type:
+ specifies the type of error, being a combination of the following bits:
+ bit 0 - repeat
+ bit 1 - ecc
+ bit 2 - parity
+
+ inject_enable starts the error generation when something different
+ than 0 is written.
+
+ All inject vars can be read. root permission is needed for write.
+
+ Datasheet states that the error will only be generated after a write on an
+ address that matches inject_addrmatch. It seems, however, that reading will
+ also produce an error.
+
+ For example, the following code will generate an error for any write access
+ at socket 0, on any DIMM/address on channel 2:
+
+ echo 2 >/sys/devices/system/edac/mc/mc0/inject_addrmatch/channel
+ echo 2 >/sys/devices/system/edac/mc/mc0/inject_type
+ echo 64 >/sys/devices/system/edac/mc/mc0/inject_eccmask
+ echo 3 >/sys/devices/system/edac/mc/mc0/inject_section
+ echo 1 >/sys/devices/system/edac/mc/mc0/inject_enable
+ dd if=/dev/mem of=/dev/null seek=16k bs=4k count=1 >& /dev/null
+
+ For socket 1, it is needed to replace "mc0" by "mc1" at the above
+ commands.
+
+ The generated error message will look like:
+
+ EDAC MC0: UE row 0, channel-a= 0 channel-b= 0 labels "-": NON_FATAL (addr = 0x0075b980, socket=0, Dimm=0, Channel=2, syndrome=0x00000040, count=1, Err=8c0000400001009f:4000080482 (read error: read ECC error))
+
+3) Nehalem specific Corrected Error memory counters
+
+ Nehalem have some registers to count memory errors. The driver uses those
+ registers to report Corrected Errors on devices with Registered Dimms.
+
+ However, those counters don't work with Unregistered Dimms. As the chipset
+ offers some counters that also work with UDIMMS (but with a worse level of
+ granularity than the default ones), the driver exposes those registers for
+ UDIMM memories.
+
+ They can be read by looking at the contents of all_channel_counts/
+
+ $ for i in /sys/devices/system/edac/mc/mc0/all_channel_counts/*; do echo $i; cat $i; done
+ /sys/devices/system/edac/mc/mc0/all_channel_counts/udimm0
+ 0
+ /sys/devices/system/edac/mc/mc0/all_channel_counts/udimm1
+ 0
+ /sys/devices/system/edac/mc/mc0/all_channel_counts/udimm2
+ 0
+
+ What happens here is that errors on different csrows, but at the same
+ dimm number will increment the same counter.
+ So, in this memory mapping:
+ csrow0: channel 0, dimm0
+ csrow1: channel 0, dimm1
+ csrow2: channel 1, dimm0
+ csrow3: channel 2, dimm0
+ The hardware will increment udimm0 for an error at the first dimm at either
+ csrow0, csrow2 or csrow3;
+ The hardware will increment udimm1 for an error at the second dimm at either
+ csrow0, csrow2 or csrow3;
+ The hardware will increment udimm2 for an error at the third dimm at either
+ csrow0, csrow2 or csrow3;
+
+4) Standard error counters
+
+ The standard error counters are generated when an mcelog error is received
+ by the driver. Since, with udimm, this is counted by software, it is
+ possible that some errors could be lost. With rdimm's, they displays the
+ contents of the registers
+
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 870d190fe617..b26ba93553d3 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -88,27 +88,6 @@ Who: Luis R. Rodriguez <lrodriguez@atheros.com>
---------------------------
-What: CONFIG_WIRELESS_OLD_REGULATORY - old static regulatory information
-When: March 2010 / desktop catchup
-
-Why: The old regulatory infrastructure has been replaced with a new one
- which does not require statically defined regulatory domains. We do
- not want to keep static regulatory domains in the kernel due to the
- the dynamic nature of regulatory law and localization. We kept around
- the old static definitions for the regulatory domains of:
-
- * US
- * JP
- * EU
-
- and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was
- set. We will remove this option once the standard Linux desktop catches
- up with the new userspace APIs we have implemented.
-
-Who: Luis R. Rodriguez <lrodriguez@atheros.com>
-
----------------------------
-
What: dev->power.power_state
When: July 2007
Why: Broken design for runtime control over driver power states, confusing
@@ -396,14 +375,6 @@ When: 2.6.33
Why: Should be implemented in userspace, policy daemon.
Who: Johannes Berg <johannes@sipsolutions.net>
----------------------------
-
-What: CONFIG_INOTIFY
-When: 2.6.33
-Why: last user (audit) will be converted to the newer more generic
- and more easily maintained fsnotify subsystem
-Who: Eric Paris <eparis@redhat.com>
-
----------------------------
What: lock_policy_rwsem_* and unlock_policy_rwsem_* will not be
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 875d49696b6e..5139b8c9d5af 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -62,6 +62,8 @@ jfs.txt
- info and mount options for the JFS filesystem.
locks.txt
- info on file locking implementations, flock() vs. fcntl(), etc.
+logfs.txt
+ - info on the LogFS flash filesystem.
mandatory-locking.txt
- info on the Linux implementation of Sys V mandatory file locking.
ncpfs.txt
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt
new file mode 100644
index 000000000000..6e03917316bd
--- /dev/null
+++ b/Documentation/filesystems/ceph.txt
@@ -0,0 +1,139 @@
+Ceph Distributed File System
+============================
+
+Ceph is a distributed network file system designed to provide good
+performance, reliability, and scalability.
+
+Basic features include:
+
+ * POSIX semantics
+ * Seamless scaling from 1 to many thousands of nodes
+ * High availability and reliability. No single points of failure.
+ * N-way replication of data across storage nodes
+ * Fast recovery from node failures
+ * Automatic rebalancing of data on node addition/removal
+ * Easy deployment: most FS components are userspace daemons
+
+Also,
+ * Flexible snapshots (on any directory)
+ * Recursive accounting (nested files, directories, bytes)
+
+In contrast to cluster filesystems like GFS, OCFS2, and GPFS that rely
+on symmetric access by all clients to shared block devices, Ceph
+separates data and metadata management into independent server
+clusters, similar to Lustre. Unlike Lustre, however, metadata and
+storage nodes run entirely as user space daemons. Storage nodes
+utilize btrfs to store data objects, leveraging its advanced features
+(checksumming, metadata replication, etc.). File data is striped
+across storage nodes in large chunks to distribute workload and
+facilitate high throughputs. When storage nodes fail, data is
+re-replicated in a distributed fashion by the storage nodes themselves
+(with some minimal coordination from a cluster monitor), making the
+system extremely efficient and scalable.
+
+Metadata servers effectively form a large, consistent, distributed
+in-memory cache above the file namespace that is extremely scalable,
+dynamically redistributes metadata in response to workload changes,
+and can tolerate arbitrary (well, non-Byzantine) node failures. The
+metadata server takes a somewhat unconventional approach to metadata
+storage to significantly improve performance for common workloads. In
+particular, inodes with only a single link are embedded in
+directories, allowing entire directories of dentries and inodes to be
+loaded into its cache with a single I/O operation. The contents of
+extremely large directories can be fragmented and managed by
+independent metadata servers, allowing scalable concurrent access.
+
+The system offers automatic data rebalancing/migration when scaling
+from a small cluster of just a few nodes to many hundreds, without
+requiring an administrator carve the data set into static volumes or
+go through the tedious process of migrating data between servers.
+When the file system approaches full, new nodes can be easily added
+and things will "just work."
+
+Ceph includes flexible snapshot mechanism that allows a user to create
+a snapshot on any subdirectory (and its nested contents) in the
+system. Snapshot creation and deletion are as simple as 'mkdir
+.snap/foo' and 'rmdir .snap/foo'.
+
+Ceph also provides some recursive accounting on directories for nested
+files and bytes. That is, a 'getfattr -d foo' on any directory in the
+system will reveal the total number of nested regular files and
+subdirectories, and a summation of all nested file sizes. This makes
+the identification of large disk space consumers relatively quick, as
+no 'du' or similar recursive scan of the file system is required.
+
+
+Mount Syntax
+============
+
+The basic mount syntax is:
+
+ # mount -t ceph monip[:port][,monip2[:port]...]:/[subdir] mnt
+
+You only need to specify a single monitor, as the client will get the
+full list when it connects. (However, if the monitor you specify
+happens to be down, the mount won't succeed.) The port can be left
+off if the monitor is using the default. So if the monitor is at
+1.2.3.4,
+
+ # mount -t ceph 1.2.3.4:/ /mnt/ceph
+
+is sufficient. If /sbin/mount.ceph is installed, a hostname can be
+used instead of an IP address.
+
+
+
+Mount Options
+=============
+
+ ip=A.B.C.D[:N]
+ Specify the IP and/or port the client should bind to locally.
+ There is normally not much reason to do this. If the IP is not
+ specified, the client's IP address is determined by looking at the
+ address it's connection to the monitor originates from.
+
+ wsize=X
+ Specify the maximum write size in bytes. By default there is no
+ maximu. Ceph will normally size writes based on the file stripe
+ size.
+
+ rsize=X
+ Specify the maximum readahead.
+
+ mount_timeout=X
+ Specify the timeout value for mount (in seconds), in the case
+ of a non-responsive Ceph file system. The default is 30
+ seconds.
+
+ rbytes
+ When stat() is called on a directory, set st_size to 'rbytes',
+ the summation of file sizes over all files nested beneath that
+ directory. This is the default.
+
+ norbytes
+ When stat() is called on a directory, set st_size to the
+ number of entries in that directory.
+
+ nocrc
+ Disable CRC32C calculation for data writes. If set, the OSD
+ must rely on TCP's error correction to detect data corruption
+ in the data payload.
+
+ noasyncreaddir
+ Disable client's use its local cache to satisfy readdir
+ requests. (This does not change correctness; the client uses
+ cached metadata only when a lease or capability ensures it is
+ valid.)
+
+
+More Information
+================
+
+For more information on Ceph, see the home page at
+ http://ceph.newdream.net/
+
+The Linux kernel client source tree is available at
+ git://ceph.newdream.net/linux-ceph-client.git
+
+and the source for the full system is at
+ git://ceph.newdream.net/ceph.git
diff --git a/Documentation/filesystems/logfs.txt b/Documentation/filesystems/logfs.txt
new file mode 100644
index 000000000000..e64c94ba401a
--- /dev/null
+++ b/Documentation/filesystems/logfs.txt
@@ -0,0 +1,241 @@
+
+The LogFS Flash Filesystem
+==========================
+
+Specification
+=============
+
+Superblocks
+-----------
+
+Two superblocks exist at the beginning and end of the filesystem.
+Each superblock is 256 Bytes large, with another 3840 Bytes reserved
+for future purposes, making a total of 4096 Bytes.
+
+Superblock locations may differ for MTD and block devices. On MTD the
+first non-bad block contains a superblock in the first 4096 Bytes and
+the last non-bad block contains a superblock in the last 4096 Bytes.
+On block devices, the first 4096 Bytes of the device contain the first
+superblock and the last aligned 4096 Byte-block contains the second
+superblock.
+
+For the most part, the superblocks can be considered read-only. They
+are written only to correct errors detected within the superblocks,
+move the journal and change the filesystem parameters through tunefs.
+As a result, the superblock does not contain any fields that require
+constant updates, like the amount of free space, etc.
+
+Segments
+--------
+
+The space in the device is split up into equal-sized segments.
+Segments are the primary write unit of LogFS. Within each segments,
+writes happen from front (low addresses) to back (high addresses. If
+only a partial segment has been written, the segment number, the
+current position within and optionally a write buffer are stored in
+the journal.
+
+Segments are erased as a whole. Therefore Garbage Collection may be
+required to completely free a segment before doing so.
+
+Journal
+--------
+
+The journal contains all global information about the filesystem that
+is subject to frequent change. At mount time, it has to be scanned
+for the most recent commit entry, which contains a list of pointers to
+all currently valid entries.
+
+Object Store
+------------
+
+All space except for the superblocks and journal is part of the object
+store. Each segment contains a segment header and a number of
+objects, each consisting of the object header and the payload.
+Objects are either inodes, directory entries (dentries), file data
+blocks or indirect blocks.
+
+Levels
+------
+
+Garbage collection (GC) may fail if all data is written
+indiscriminately. One requirement of GC is that data is seperated
+roughly according to the distance between the tree root and the data.
+Effectively that means all file data is on level 0, indirect blocks
+are on levels 1, 2, 3 4 or 5 for 1x, 2x, 3x, 4x or 5x indirect blocks,
+respectively. Inode file data is on level 6 for the inodes and 7-11
+for indirect blocks.
+
+Each segment contains objects of a single level only. As a result,
+each level requires its own seperate segment to be open for writing.
+
+Inode File
+----------
+
+All inodes are stored in a special file, the inode file. Single
+exception is the inode file's inode (master inode) which for obvious
+reasons is stored in the journal instead. Instead of data blocks, the
+leaf nodes of the inode files are inodes.
+
+Aliases
+-------
+
+Writes in LogFS are done by means of a wandering tree. A naïve
+implementation would require that for each write or a block, all
+parent blocks are written as well, since the block pointers have
+changed. Such an implementation would not be very efficient.
+
+In LogFS, the block pointer changes are cached in the journal by means
+of alias entries. Each alias consists of its logical address - inode
+number, block index, level and child number (index into block) - and
+the changed data. Any 8-byte word can be changes in this manner.
+
+Currently aliases are used for block pointers, file size, file used
+bytes and the height of an inodes indirect tree.
+
+Segment Aliases
+---------------
+
+Related to regular aliases, these are used to handle bad blocks.
+Initially, bad blocks are handled by moving the affected segment
+content to a spare segment and noting this move in the journal with a
+segment alias, a simple (to, from) tupel. GC will later empty this
+segment and the alias can be removed again. This is used on MTD only.
+
+Vim
+---
+
+By cleverly predicting the life time of data, it is possible to
+seperate long-living data from short-living data and thereby reduce
+the GC overhead later. Each type of distinc life expectency (vim) can
+have a seperate segment open for writing. Each (level, vim) tupel can
+be open just once. If an open segment with unknown vim is encountered
+at mount time, it is closed and ignored henceforth.
+
+Indirect Tree
+-------------
+
+Inodes in LogFS are similar to FFS-style filesystems with direct and
+indirect block pointers. One difference is that LogFS uses a single
+indirect pointer that can be either a 1x, 2x, etc. indirect pointer.
+A height field in the inode defines the height of the indirect tree
+and thereby the indirection of the pointer.
+
+Another difference is the addressing of indirect blocks. In LogFS,
+the first 16 pointers in the first indirect block are left empty,
+corresponding to the 16 direct pointers in the inode. In ext2 (maybe
+others as well) the first pointer in the first indirect block
+corresponds to logical block 12, skipping the 12 direct pointers.
+So where ext2 is using arithmetic to better utilize space, LogFS keeps
+arithmetic simple and uses compression to save space.
+
+Compression
+-----------
+
+Both file data and metadata can be compressed. Compression for file
+data can be enabled with chattr +c and disabled with chattr -c. Doing
+so has no effect on existing data, but new data will be stored
+accordingly. New inodes will inherit the compression flag of the
+parent directory.
+
+Metadata is always compressed. However, the space accounting ignores
+this and charges for the uncompressed size. Failing to do so could
+result in GC failures when, after moving some data, indirect blocks
+compress worse than previously. Even on a 100% full medium, GC may
+not consume any extra space, so the compression gains are lost space
+to the user.
+
+However, they are not lost space to the filesystem internals. By
+cheating the user for those bytes, the filesystem gained some slack
+space and GC will run less often and faster.
+
+Garbage Collection and Wear Leveling
+------------------------------------
+
+Garbage collection is invoked whenever the number of free segments
+falls below a threshold. The best (known) candidate is picked based
+on the least amount of valid data contained in the segment. All
+remaining valid data is copied elsewhere, thereby invalidating it.
+
+The GC code also checks for aliases and writes then back if their
+number gets too large.
+
+Wear leveling is done by occasionally picking a suboptimal segment for
+garbage collection. If a stale segments erase count is significantly
+lower than the active segments' erase counts, it will be picked. Wear
+leveling is rate limited, so it will never monopolize the device for
+more than one segment worth at a time.
+
+Values for "occasionally", "significantly lower" are compile time
+constants.
+
+Hashed directories
+------------------
+
+To satisfy efficient lookup(), directory entries are hashed and
+located based on the hash. In order to both support large directories
+and not be overly inefficient for small directories, several hash
+tables of increasing size are used. For each table, the hash value
+modulo the table size gives the table index.
+
+Tables sizes are chosen to limit the number of indirect blocks with a
+fully populated table to 0, 1, 2 or 3 respectively. So the first
+table contains 16 entries, the second 512-16, etc.
+
+The last table is special in several ways. First its size depends on
+the effective 32bit limit on telldir/seekdir cookies. Since logfs
+uses the upper half of the address space for indirect blocks, the size
+is limited to 2^31. Secondly the table contains hash buckets with 16
+entries each.
+
+Using single-entry buckets would result in birthday "attacks". At
+just 2^16 used entries, hash collisions would be likely (P >= 0.5).
+My math skills are insufficient to do the combinatorics for the 17x
+collisions necessary to overflow a bucket, but testing showed that in
+10,000 runs the lowest directory fill before a bucket overflow was
+188,057,130 entries with an average of 315,149,915 entries. So for
+directory sizes of up to a million, bucket overflows should be
+virtually impossible under normal circumstances.
+
+With carefully chosen filenames, it is obviously possible to cause an
+overflow with just 21 entries (4 higher tables + 16 entries + 1). So
+there may be a security concern if a malicious user has write access
+to a directory.
+
+Open For Discussion
+===================
+
+Device Address Space
+--------------------
+
+A device address space is used for caching. Both block devices and
+MTD provide functions to either read a single page or write a segment.
+Partial segments may be written for data integrity, but where possible
+complete segments are written for performance on simple block device
+flash media.
+
+Meta Inodes
+-----------
+
+Inodes are stored in the inode file, which is just a regular file for
+most purposes. At umount time, however, the inode file needs to
+remain open until all dirty inodes are written. So
+generic_shutdown_super() may not close this inode, but shouldn't
+complain about remaining inodes due to the inode file either. Same
+goes for mapping inode of the device address space.
+
+Currently logfs uses a hack that essentially copies part of fs/inode.c
+code over. A general solution would be preferred.
+
+Indirect block mapping
+----------------------
+
+With compression, the block device (or mapping inode) cannot be used
+to cache indirect blocks. Some other place is required. Currently
+logfs uses the top half of each inode's address space. The low 8TB
+(on 32bit) are filled with file data, the high 8TB are used for
+indirect blocks.
+
+One problem is that 16TB files created on 64bit systems actually have
+data in the top 8TB. But files >16TB would cause problems anyway, so
+only the limit has changed.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 220cc6376ef8..0d07513a67a6 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -177,7 +177,6 @@ read the file /proc/PID/status:
CapBnd: ffffffffffffffff
voluntary_ctxt_switches: 0
nonvoluntary_ctxt_switches: 1
- Stack usage: 12 kB
This shows you nearly the same information you would get if you viewed it with
the ps command. In fact, ps uses the proc file system to obtain its
@@ -231,7 +230,6 @@ Table 1-2: Contents of the statm files (as of 2.6.30-rc7)
Mems_allowed_list Same as previous, but in "list format"
voluntary_ctxt_switches number of voluntary context switches
nonvoluntary_ctxt_switches number of non voluntary context switches
- Stack usage: stack usage high water mark (round up to page size)
..............................................................................
Table 1-3: Contents of the statm files (as of 2.6.8-rc3)
diff --git a/Documentation/hwlat_detector.txt b/Documentation/hwlat_detector.txt
new file mode 100644
index 000000000000..cb61516483d3
--- /dev/null
+++ b/Documentation/hwlat_detector.txt
@@ -0,0 +1,64 @@
+Introduction:
+-------------
+
+The module hwlat_detector is a special purpose kernel module that is used to
+detect large system latencies induced by the behavior of certain underlying
+hardware or firmware, independent of Linux itself. The code was developed
+originally to detect SMIs (System Management Interrupts) on x86 systems,
+however there is nothing x86 specific about this patchset. It was
+originally written for use by the "RT" patch since the Real Time
+kernel is highly latency sensitive.
+
+SMIs are usually not serviced by the Linux kernel, which typically does not
+even know that they are occuring. SMIs are instead are set up by BIOS code
+and are serviced by BIOS code, usually for "critical" events such as
+management of thermal sensors and fans. Sometimes though, SMIs are used for
+other tasks and those tasks can spend an inordinate amount of time in the
+handler (sometimes measured in milliseconds). Obviously this is a problem if
+you are trying to keep event service latencies down in the microsecond range.
+
+The hardware latency detector works by hogging all of the cpus for configurable
+amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
+for some period, then looking for gaps in the TSC data. Any gap indicates a
+time when the polling was interrupted and since the machine is stopped and
+interrupts turned off the only thing that could do that would be an SMI.
+
+Note that the SMI detector should *NEVER* be used in a production environment.
+It is intended to be run manually to determine if the hardware platform has a
+problem with long system firmware service routines.
+
+Usage:
+------
+
+Loading the module hwlat_detector passing the parameter "enabled=1" (or by
+setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
+step required to start the hwlat_detector. It is possible to redefine the
+threshold in microseconds (us) above which latency spikes will be taken
+into account (parameter "threshold=").
+
+Example:
+
+ # modprobe hwlat_detector enabled=1 threshold=100
+
+After the module is loaded, it creates a directory named "hwlat_detector" under
+the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
+to have debugfs mounted, which might be on /sys/debug on your system.
+
+The /debug/hwlat_detector interface contains the following files:
+
+count - number of latency spikes observed since last reset
+enable - a global enable/disable toggle (0/1), resets count
+max - maximum hardware latency actually observed (usecs)
+sample - a pipe from which to read current raw sample data
+ in the format <timestamp> <latency observed usecs>
+ (can be opened O_NONBLOCK for a single sample)
+threshold - minimum latency value to be considered (usecs)
+width - time period to sample with CPUs held (usecs)
+ must be less than the total window size (enforced)
+window - total period of sampling, width being inside (usecs)
+
+By default we will set width to 500,000 and window to 1,000,000, meaning that
+we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
+observe any latencies that exceed the threshold (initially 100 usecs),
+then we write to a global sample ring buffer of 8K samples, which is
+consumed by reading from the "sample" (pipe) debugfs file interface.
diff --git a/Documentation/hwmon/amc6821 b/Documentation/hwmon/amc6821
new file mode 100644
index 000000000000..ced8359c50f8
--- /dev/null
+++ b/Documentation/hwmon/amc6821
@@ -0,0 +1,102 @@
+Kernel driver amc6821
+=====================
+
+Supported chips:
+ Texas Instruments AMC6821
+ Prefix: 'amc6821'
+ Addresses scanned: 0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e, 0x4c, 0x4d, 0x4e
+ Datasheet: http://focus.ti.com/docs/prod/folders/print/amc6821.html
+
+Authors:
+ Tomaz Mertelj <tomaz.mertelj@guest.arnes.si>
+
+
+Description
+-----------
+
+This driver implements support for the Texas Instruments amc6821 chip.
+The chip has one on-chip and one remote temperature sensor and one pwm fan
+regulator.
+The pwm can be controlled either from software or automatically.
+
+The driver provides the following sensor accesses in sysfs:
+
+temp1_input ro on-chip temperature
+temp1_min rw "
+temp1_max rw "
+temp1_crit rw "
+temp1_min_alarm ro "
+temp1_max_alarm ro "
+temp1_crit_alarm ro "
+
+temp2_input ro remote temperature
+temp2_min rw "
+temp2_max rw "
+temp2_crit rw "
+temp2_min_alarm ro "
+temp2_max_alarm ro "
+temp2_crit_alarm ro "
+temp2_fault ro "
+
+fan1_input ro tachometer speed
+fan1_min rw "
+fan1_max rw "
+fan1_fault ro "
+fan1_div rw Fan divisor can be either 2 or 4.
+
+pwm1 rw pwm1
+pwm1_enable rw regulator mode, 1=open loop, 2=fan controlled
+ by remote temperature, 3=fan controlled by
+ combination of the on-chip temperature and
+ remote-sensor temperature,
+pwm1_auto_channels_temp ro 1 if pwm_enable==2, 3 if pwm_enable==3
+pwm1_auto_point1_pwm ro Hardwired to 0, shared for both
+ temperature channels.
+pwm1_auto_point2_pwm rw This value is shared for both temperature
+ channels.
+pwm1_auto_point3_pwm rw Hardwired to 255, shared for both
+ temperature channels.
+
+temp1_auto_point1_temp ro Hardwired to temp2_auto_point1_temp
+ which is rw. Below this temperature fan stops.
+temp1_auto_point2_temp rw The low-temperature limit of the proportional
+ range. Below this temperature
+ pwm1 = pwm1_auto_point2_pwm. It can go from
+ 0 degree C to 124 degree C in steps of
+ 4 degree C. Read it out after writing to get
+ the actual value.
+temp1_auto_point3_temp rw Above this temperature fan runs at maximum
+ speed. It can go from temp1_auto_point2_temp.
+ It can only have certain discrete values
+ which depend on temp1_auto_point2_temp and
+ pwm1_auto_point2_pwm. Read it out after
+ writing to get the actual value.
+
+temp2_auto_point1_temp rw Must be between 0 degree C and 63 degree C and
+ it defines the passive cooling temperature.
+ Below this temperature the fan stops in
+ the closed loop mode.
+temp2_auto_point2_temp rw The low-temperature limit of the proportional
+ range. Below this temperature
+ pwm1 = pwm1_auto_point2_pwm. It can go from
+ 0 degree C to 124 degree C in steps
+ of 4 degree C.
+
+temp2_auto_point3_temp rw Above this temperature fan runs at maximum
+ speed. It can only have certain discrete
+ values which depend on temp2_auto_point2_temp
+ and pwm1_auto_point2_pwm. Read it out after
+ writing to get actual value.
+
+
+Module parameters
+-----------------
+
+If your board has a BIOS that initializes the amc6821 correctly, you should
+load the module with: init=0.
+
+If your board BIOS doesn't initialize the chip, or you want
+different settings, you can set the following parameters:
+init=1,
+pwminv: 0 default pwm output, 1 inverts pwm output.
+
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
index a7a18d453a51..6526eee525a6 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp
@@ -3,8 +3,8 @@ Kernel driver k10temp
Supported chips:
* AMD Family 10h processors:
- Socket F: Quad-Core/Six-Core/Embedded Opteron
- Socket AM2+: Opteron, Phenom (II) X3/X4
+ Socket F: Quad-Core/Six-Core/Embedded Opteron (but see below)
+ Socket AM2+: Quad-Core Opteron, Phenom (II) X3/X4, Athlon X2 (but see below)
Socket AM3: Quad-Core Opteron, Athlon/Phenom II X2/X3/X4, Sempron II
Socket S1G3: Athlon II, Sempron, Turion II
* AMD Family 11h processors:
@@ -36,10 +36,15 @@ Description
This driver permits reading of the internal temperature sensor of AMD
Family 10h and 11h processors.
-All these processors have a sensor, but on older revisions of Family 10h
-processors, the sensor may return inconsistent values (erratum 319). The
-driver will refuse to load on these revisions unless you specify the
-"force=1" module parameter.
+All these processors have a sensor, but on those for Socket F or AM2+,
+the sensor may return inconsistent values (erratum 319). The driver
+will refuse to load on these revisions unless you specify the "force=1"
+module parameter.
+
+Due to technical reasons, the driver can detect only the mainboard's
+socket type, not the processor's actual capabilities. Therefore, if you
+are using an AM3 processor on an AM2+ mainboard, you can safely use the
+"force=1" parameter.
There is one temperature measurement value, available as temp1_input in
sysfs. It is measured in degrees Celsius with a resolution of 1/8th degree.
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 947374977ca5..e07e5b5346ea 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -56,10 +56,11 @@ Following this convention is good because:
(5) When following the convention, the driver code can use generic
code to copy the parameters between user and kernel space.
-This table lists ioctls visible from user land for Linux/i386. It contains
-most drivers up to 2.3.14, but I know I am missing some.
+This table lists ioctls visible from user land for Linux/x86. It contains
+most drivers up to 2.6.31, but I know I am missing some. There has been
+no attempt to list non-X86 architectures or ioctls from drivers/staging/.
-Code Seq# Include File Comments
+Code Seq#(hex) Include File Comments
========================================================
0x00 00-1F linux/fs.h conflict!
0x00 00-1F scsi/scsi_ioctl.h conflict!
@@ -69,119 +70,229 @@ Code Seq# Include File Comments
0x03 all linux/hdreg.h
0x04 D2-DC linux/umsdos_fs.h Dead since 2.6.11, but don't reuse these.
0x06 all linux/lp.h
-0x09 all linux/md.h
+0x09 all linux/raid/md_u.h
+0x10 00-0F drivers/char/s390/vmcp.h
0x12 all linux/fs.h
linux/blkpg.h
0x1b all InfiniBand Subsystem <http://www.openib.org/>
0x20 all drivers/cdrom/cm206.h
0x22 all scsi/sg.h
'#' 00-3F IEEE 1394 Subsystem Block for the entire subsystem
+'$' 00-0F linux/perf_counter.h, linux/perf_event.h
'1' 00-1F <linux/timepps.h> PPS kit from Ulrich Windl
<ftp://ftp.de.kernel.org/pub/linux/daemons/ntp/PPS/>
+'2' 01-04 linux/i2o.h
+'3' 00-0F drivers/s390/char/raw3270.h conflict!
+'3' 00-1F linux/suspend_ioctls.h conflict!
+ and kernel/power/user.c
'8' all SNP8023 advanced NIC card
<mailto:mcr@solidum.com>
-'A' 00-1F linux/apm_bios.h
+'@' 00-0F linux/radeonfb.h conflict!
+'@' 00-0F drivers/video/aty/aty128fb.c conflict!
+'A' 00-1F linux/apm_bios.h conflict!
+'A' 00-0F linux/agpgart.h conflict!
+ and drivers/char/agp/compat_ioctl.h
+'A' 00-7F sound/asound.h conflict!
+'B' 00-1F linux/cciss_ioctl.h conflict!
+'B' 00-0F include/linux/pmu.h conflict!
'B' C0-FF advanced bbus
<mailto:maassen@uni-freiburg.de>
-'C' all linux/soundcard.h
+'C' all linux/soundcard.h conflict!
+'C' 01-2F linux/capi.h conflict!
+'C' F0-FF drivers/net/wan/cosa.h conflict!
'D' all arch/s390/include/asm/dasd.h
-'E' all linux/input.h
-'F' all linux/fb.h
-'H' all linux/hiddev.h
-'I' all linux/isdn.h
+'D' 40-5F drivers/scsi/dpt/dtpi_ioctl.h
+'D' 05 drivers/scsi/pmcraid.h
+'E' all linux/input.h conflict!
+'E' 00-0F xen/evtchn.h conflict!
+'F' all linux/fb.h conflict!
+'F' 01-02 drivers/scsi/pmcraid.h conflict!
+'F' 20 drivers/video/fsl-diu-fb.h conflict!
+'F' 20 drivers/video/intelfb/intelfb.h conflict!
+'F' 20 linux/ivtvfb.h conflict!
+'F' 20 linux/matroxfb.h conflict!
+'F' 20 drivers/video/aty/atyfb_base.c conflict!
+'F' 00-0F video/da8xx-fb.h conflict!
+'F' 80-8F linux/arcfb.h conflict!
+'F' DD video/sstfb.h conflict!
+'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict!
+'G' 00-0F linux/gigaset_dev.h conflict!
+'H' 00-7F linux/hiddev.h conflict!
+'H' 00-0F linux/hidraw.h conflict!
+'H' 00-0F sound/asound.h conflict!
+'H' 20-40 sound/asound_fm.h conflict!
+'H' 80-8F sound/sfnt_info.h conflict!
+'H' 10-8F sound/emu10k1.h conflict!
+'H' 10-1F sound/sb16_csp.h conflict!
+'H' 10-1F sound/hda_hwdep.h conflict!
+'H' 40-4F sound/hdspm.h conflict!
+'H' 40-4F sound/hdsp.h conflict!
+'H' 90 sound/usb/usx2y/usb_stream.h
+'H' C0-F0 net/bluetooth/hci.h conflict!
+'H' C0-DF net/bluetooth/hidp/hidp.h conflict!
+'H' C0-DF net/bluetooth/cmtp/cmtp.h conflict!
+'H' C0-DF net/bluetooth/bnep/bnep.h conflict!
+'I' all linux/isdn.h conflict!
+'I' 00-0F drivers/isdn/divert/isdn_divert.h conflict!
+'I' 40-4F linux/mISDNif.h conflict!
'J' 00-1F drivers/scsi/gdth_ioctl.h
'K' all linux/kd.h
-'L' 00-1F linux/loop.h
-'L' 20-2F driver/usb/misc/vstusb.h
+'L' 00-1F linux/loop.h conflict!
+'L' 10-1F drivers/scsi/mpt2sas/mpt2sas_ctl.h conflict!
+'L' 20-2F linux/usb/vstusb.h
'L' E0-FF linux/ppdd.h encrypted disk device driver
<http://linux01.gwdg.de/~alatham/ppdd.html>
-'M' all linux/soundcard.h
+'M' all linux/soundcard.h conflict!
+'M' 01-16 mtd/mtd-abi.h conflict!
+ and drivers/mtd/mtdchar.c
+'M' 01-03 drivers/scsi/megaraid/megaraid_sas.h
+'M' 00-0F drivers/video/fsl-diu-fb.h conflict!
'N' 00-1F drivers/usb/scanner.h
-'O' 00-02 include/mtd/ubi-user.h UBI
-'P' all linux/soundcard.h
+'O' 00-06 mtd/ubi-user.h UBI
+'P' all linux/soundcard.h conflict!
+'P' 60-6F sound/sscape_ioctl.h conflict!
+'P' 00-0F drivers/usb/class/usblp.c conflict!
'Q' all linux/soundcard.h
-'R' 00-1F linux/random.h
+'R' 00-1F linux/random.h conflict!
+'R' 01 linux/rfkill.h conflict!
+'R' 01-0F media/rds.h conflict!
+'R' C0-DF net/bluetooth/rfcomm.h
'S' all linux/cdrom.h conflict!
'S' 80-81 scsi/scsi_ioctl.h conflict!
'S' 82-FF scsi/scsi.h conflict!
+'S' 00-7F sound/asequencer.h conflict!
'T' all linux/soundcard.h conflict!
+'T' 00-AF sound/asound.h conflict!
'T' all arch/x86/include/asm/ioctls.h conflict!
-'U' 00-EF linux/drivers/usb/usb.h
-'V' all linux/vt.h
+'T' C0-DF linux/if_tun.h conflict!
+'U' all sound/asound.h conflict!
+'U' 00-0F drivers/media/video/uvc/uvcvideo.h conflict!
+'U' 00-CF linux/uinput.h conflict!
+'U' 00-EF linux/usbdevice_fs.h
+'U' C0-CF drivers/bluetooth/hci_uart.h
+'V' all linux/vt.h conflict!
+'V' all linux/videodev2.h conflict!
+'V' C0 linux/ivtvfb.h conflict!
+'V' C0 linux/ivtv.h conflict!
+'V' C0 media/davinci/vpfe_capture.h conflict!
+'V' C0 media/si4713.h conflict!
+'V' C0-CF drivers/media/video/mxb.h conflict!
'W' 00-1F linux/watchdog.h conflict!
'W' 00-1F linux/wanrouter.h conflict!
-'X' all linux/xfs_fs.h
+'W' 00-3F sound/asound.h conflict!
+'X' all fs/xfs/xfs_fs.h conflict!
+ and fs/xfs/linux-2.6/xfs_ioctl32.h
+ and include/linux/falloc.h
+ and linux/fs.h
+'X' all fs/ocfs2/ocfs_fs.h conflict!
+'X' 01 linux/pktcdvd.h conflict!
'Y' all linux/cyclades.h
-'[' 00-07 linux/usb/usbtmc.h USB Test and Measurement Devices
+'Z' 14-15 drivers/message/fusion/mptctl.h
+'[' 00-07 linux/usb/tmc.h USB Test and Measurement Devices
<mailto:gregkh@suse.de>
-'a' all ATM on linux
+'a' all linux/atm*.h, linux/sonet.h ATM on linux
<http://lrcwww.epfl.ch/linux-atm/magic.html>
-'b' 00-FF bit3 vme host bridge
+'b' 00-FF conflict! bit3 vme host bridge
<mailto:natalia@nikhefk.nikhef.nl>
+'b' 00-0F media/bt819.h conflict!
+'c' all linux/cm4000_cs.h conflict!
'c' 00-7F linux/comstats.h conflict!
'c' 00-7F linux/coda.h conflict!
-'c' 80-9F arch/s390/include/asm/chsc.h
-'c' A0-AF arch/x86/include/asm/msr.h
+'c' 00-1F linux/chio.h conflict!
+'c' 80-9F arch/s390/include/asm/chsc.h conflict!
+'c' A0-AF arch/x86/include/asm/msr.h conflict!
'd' 00-FF linux/char/drm/drm/h conflict!
+'d' 02-40 pcmcia/ds.h conflict!
+'d' 10-3F drivers/media/video/dabusb.h conflict!
+'d' C0-CF drivers/media/video/saa7191.h conflict!
'd' F0-FF linux/digi1.h
'e' all linux/digi1.h conflict!
-'e' 00-1F net/irda/irtty.h conflict!
-'f' 00-1F linux/ext2_fs.h
-'h' 00-7F Charon filesystem
+'e' 00-1F drivers/net/irda/irtty-sir.h conflict!
+'f' 00-1F linux/ext2_fs.h conflict!
+'f' 00-1F linux/ext3_fs.h conflict!
+'f' 00-0F fs/jfs/jfs_dinode.h conflict!
+'f' 00-0F fs/ext4/ext4.h conflict!
+'f' 00-0F linux/fs.h conflict!
+'f' 00-0F fs/ocfs2/ocfs2_fs.h conflict!
+'g' 00-0F linux/usb/gadgetfs.h
+'g' 20-2F linux/usb/g_printer.h
+'h' 00-7F conflict! Charon filesystem
<mailto:zapman@interlan.net>
-'i' 00-3F linux/i2o.h
+'h' 00-1F linux/hpet.h conflict!
+'i' 00-3F linux/i2o-dev.h conflict!
+'i' 0B-1F linux/ipmi.h conflict!
+'i' 80-8F linux/i8k.h
'j' 00-3F linux/joystick.h
+'k' 00-0F linux/spi/spidev.h conflict!
+'k' 00-05 video/kyro.h conflict!
'l' 00-3F linux/tcfs_fs.h transparent cryptographic file system
<http://mikonos.dia.unisa.it/tcfs>
'l' 40-7F linux/udf_fs_i.h in development:
<http://sourceforge.net/projects/linux-udf/>
-'m' 00-09 linux/mmtimer.h
+'m' 00-09 linux/mmtimer.h conflict!
'm' all linux/mtio.h conflict!
'm' all linux/soundcard.h conflict!
'm' all linux/synclink.h conflict!
+'m' 00-19 drivers/message/fusion/mptctl.h conflict!
+'m' 00 drivers/scsi/megaraid/megaraid_ioctl.h conflict!
'm' 00-1F net/irda/irmod.h conflict!
-'n' 00-7F linux/ncp_fs.h
+'n' 00-7F linux/ncp_fs.h and fs/ncpfs/ioctl.c
'n' 80-8F linux/nilfs2_fs.h NILFS2
-'n' E0-FF video/matrox.h matroxfb
+'n' E0-FF linux/matroxfb.h matroxfb
'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2
-'o' 00-03 include/mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps)
-'o' 40-41 include/mtd/ubi-user.h UBI
-'o' 01-A1 include/linux/dvb/*.h DVB
+'o' 00-03 mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps)
+'o' 40-41 mtd/ubi-user.h UBI
+'o' 01-A1 linux/dvb/*.h DVB
'p' 00-0F linux/phantom.h conflict! (OpenHaptics needs this)
+'p' 00-1F linux/rtc.h conflict!
'p' 00-3F linux/mc146818rtc.h conflict!
'p' 40-7F linux/nvram.h
-'p' 80-9F user-space parport
+'p' 80-9F linux/ppdev.h user-space parport
<mailto:tim@cyberelk.net>
-'p' a1-a4 linux/pps.h LinuxPPS
+'p' A1-A4 linux/pps.h LinuxPPS
<mailto:giometti@linux.it>
'q' 00-1F linux/serio.h
-'q' 80-FF Internet PhoneJACK, Internet LineJACK
- <http://www.quicknet.net>
-'r' 00-1F linux/msdos_fs.h
+'q' 80-FF linux/telephony.h Internet PhoneJACK, Internet LineJACK
+ linux/ixjuser.h <http://www.quicknet.net>
+'r' 00-1F linux/msdos_fs.h and fs/fat/dir.c
's' all linux/cdk.h
't' 00-7F linux/if_ppp.h
't' 80-8F linux/isdn_ppp.h
+'t' 90 linux/toshiba.h
'u' 00-1F linux/smb_fs.h
-'v' 00-1F linux/ext2_fs.h conflict!
'v' all linux/videodev.h conflict!
+'v' 00-1F linux/ext2_fs.h conflict!
+'v' 00-1F linux/fs.h conflict!
+'v' 00-0F linux/sonypi.h conflict!
+'v' C0-CF drivers/media/video/ov511.h conflict!
+'v' C0-DF media/pwc-ioctl.h conflict!
+'v' C0-FF linux/meye.h conflict!
+'v' C0-CF drivers/media/video/zoran/zoran.h conflict!
+'v' D0-DF drivers/media/video/cpia2/cpia2dev.h conflict!
'w' all CERN SCI driver
'y' 00-1F packet based user level communications
<mailto:zapman@interlan.net>
-'z' 00-3F CAN bus card
+'z' 00-3F CAN bus card conflict!
<mailto:hdstich@connectu.ulm.circular.de>
-'z' 40-7F CAN bus card
+'z' 40-7F CAN bus card conflict!
<mailto:oe@port.de>
+'z' 10-4F drivers/s390/crypto/zcrypt_api.h conflict!
0x80 00-1F linux/fb.h
0x81 00-1F linux/videotext.h
+0x88 00-3F media/ovcamchip.h
0x89 00-06 arch/x86/include/asm/sockios.h
0x89 0B-DF linux/sockios.h
0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range
+0x89 E0-EF linux/dn.h PROTOPRIVATE range
0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range
0x8B all linux/wireless.h
0x8C 00-3F WiNRADiO driver
<http://www.proximity.com.au/~brian/winradio/>
0x90 00 drivers/cdrom/sbpcd.h
+0x92 00-0F drivers/usb/mon/mon_bin.c
0x93 60-7F linux/auto_fs.h
+0x94 all fs/btrfs/ioctl.h
+0x97 00-7F fs/ceph/ioctl.h Ceph file system
0x99 00-0F 537-Addinboard driver
<mailto:buk@buks.ipn.de>
0xA0 all linux/sdp/sdp.h Industrial Device Project
@@ -192,17 +303,22 @@ Code Seq# Include File Comments
0xAB 00-1F linux/nbd.h
0xAC 00-1F linux/raw.h
0xAD 00 Netfilter device in development:
- <mailto:rusty@rustcorp.com.au>
+ <mailto:rusty@rustcorp.com.au>
0xAE all linux/kvm.h Kernel-based Virtual Machine
<mailto:kvm@vger.kernel.org>
0xB0 all RATIO devices in development:
<mailto:vgo@ratio.de>
0xB1 00-1F PPPoX <mailto:mostrows@styx.uwaterloo.ca>
+0xC0 00-0F linux/usb/iowarrior.h
0xCB 00-1F CBM serial IEC bus in development:
<mailto:michael.klein@puffin.lb.shuttle.de>
+0xCD 01 linux/reiserfs_fs.h
+0xCF 02 fs/cifs/ioctl.c
+0xDB 00-0F drivers/char/mwave/mwavepub.h
0xDD 00-3F ZFCP device driver see drivers/s390/scsi/
<mailto:aherrman@de.ibm.com>
-0xF3 00-3F video/sisfb.h sisfb (in development)
+0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development)
<mailto:thomas@winischhofer.net>
0xF4 00-1F video/mbxfb.h mbxfb
<mailto:raph@8d.com>
+0xFD all linux/dm-ioctl.h
diff --git a/Documentation/kernel-doc-nano-HOWTO.txt b/Documentation/kernel-doc-nano-HOWTO.txt
index 348b9e5e28fc..27a52b35d55b 100644
--- a/Documentation/kernel-doc-nano-HOWTO.txt
+++ b/Documentation/kernel-doc-nano-HOWTO.txt
@@ -214,11 +214,13 @@ The format of the block comment is like this:
* (section header: (section description)? )*
(*)?*/
-The short function description ***cannot be multiline***, but the other
-descriptions can be (and they can contain blank lines). If you continue
-that initial short description onto a second line, that second line will
-appear further down at the beginning of the description section, which is
-almost certainly not what you had in mind.
+All "description" text can span multiple lines, although the
+function_name & its short description are traditionally on a single line.
+Description text may also contain blank lines (i.e., lines that contain
+only a "*").
+
+"section header:" names must be unique per function (or struct,
+union, typedef, enum).
Avoid putting a spurious blank line after the function name, or else the
description will be repeated!
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 736d45602886..03e3d09b0f2e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -341,6 +341,11 @@ and is between 256 and 4096 characters. It is defined in the file
not play well with APC CPU idle - disable it if you have
APC and your system crashes randomly.
+ apei.hest_disable= [ACPI]
+ Disable Hardware Error Source Table (HEST) support,
+ corresponding firmware-first mode error processing
+ logic will be disabled.
+
apic= [APIC,X86-32] Advanced Programmable Interrupt Controller
Change the output verbosity whilst booting
Format: { quiet (default) | verbose | debug }
@@ -1103,9 +1108,11 @@ and is between 256 and 4096 characters. It is defined in the file
zone if it does not.
kgdboc= [HW] kgdb over consoles.
- Requires a tty driver that supports console polling.
- (only serial supported for now)
- Format: <serial_device>[,baud]
+ Requires a tty driver that supports console polling,
+ or a supported polling keyboard driver (non-usb).
+ Serial only format: <serial_device>[,baud]
+ keyboard only format: kbd
+ keyboard and serial format: kbd,<serial_device>[,baud]
kmac= [MIPS] korina ethernet MAC address.
Configure the RouterBoard 532 series on-chip
diff --git a/Documentation/kvm/api.txt b/Documentation/kvm/api.txt
index 2811e452f756..c6416a398163 100644
--- a/Documentation/kvm/api.txt
+++ b/Documentation/kvm/api.txt
@@ -23,12 +23,12 @@ of a virtual machine. The ioctls belong to three classes
Only run vcpu ioctls from the same thread that was used to create the
vcpu.
-2. File descritpors
+2. File descriptors
The kvm API is centered around file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
-handle will create a VM file descripror which can be used to issue VM
+handle will create a VM file descriptor which can be used to issue VM
ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
and return a file descriptor pointing to it. Finally, ioctls on a vcpu
fd can be used to control the vcpu, including the important task of
@@ -643,7 +643,7 @@ Type: vm ioctl
Parameters: struct kvm_clock_data (in)
Returns: 0 on success, -1 on error
-Sets the current timestamp of kvmclock to the valued specific in its parameter.
+Sets the current timestamp of kvmclock to the value specified in its parameter.
In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios
such as migration.
@@ -795,11 +795,11 @@ Unused.
__u64 data_offset; /* relative to kvm_run start */
} io;
-If exit_reason is KVM_EXIT_IO_IN or KVM_EXIT_IO_OUT, then the vcpu has
+If exit_reason is KVM_EXIT_IO, then the vcpu has
executed a port I/O instruction which could not be satisfied by kvm.
data_offset describes where the data is located (KVM_EXIT_IO_OUT) or
where kvm expects application code to place the data for the next
-KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a patcked array.
+KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a packed array.
struct {
struct kvm_debug_exit_arch arch;
@@ -815,7 +815,7 @@ Unused.
__u8 is_write;
} mmio;
-If exit_reason is KVM_EXIT_MMIO or KVM_EXIT_IO_OUT, then the vcpu has
+If exit_reason is KVM_EXIT_MMIO, then the vcpu has
executed a memory-mapped I/O instruction which could not be satisfied
by kvm. The 'data' member contains the written data if 'is_write' is
true, and should be filled by application code otherwise.
diff --git a/Documentation/lguest/lguest.c b/Documentation/lguest/lguest.c
index 42208511b5c0..3119f5db75bd 100644
--- a/Documentation/lguest/lguest.c
+++ b/Documentation/lguest/lguest.c
@@ -34,7 +34,6 @@
#include <sys/uio.h>
#include <termios.h>
#include <getopt.h>
-#include <zlib.h>
#include <assert.h>
#include <sched.h>
#include <limits.h>
diff --git a/Documentation/networking/3c509.txt b/Documentation/networking/3c509.txt
index 0643e3b7168c..3c45d5dcd63b 100644
--- a/Documentation/networking/3c509.txt
+++ b/Documentation/networking/3c509.txt
@@ -48,11 +48,11 @@ for LILO parameters for doing this:
This configures the first found 3c509 card for IRQ 10, base I/O 0x310, and
transceiver type 3 (10base2). The flag "0x3c509" must be set to avoid conflicts
with other card types when overriding the I/O address. When the driver is
-loaded as a module, only the IRQ and transceiver setting may be overridden.
-For example, setting two cards to 10base2/IRQ10 and AUI/IRQ11 is done by using
-the xcvr and irq module options:
+loaded as a module, only the IRQ may be overridden. For example,
+setting two cards to IRQ10 and IRQ11 is done by using the irq module
+option:
- options 3c509 xcvr=3,1 irq=10,11
+ options 3c509 irq=10,11
(2) Full-duplex mode
@@ -77,6 +77,8 @@ operation.
itself full-duplex capable. This is almost certainly one of two things: a full-
duplex-capable Ethernet switch (*not* a hub), or a full-duplex-capable NIC on
another system that's connected directly to the 3c509B via a crossover cable.
+
+Full-duplex mode can be enabled using 'ethtool'.
/////Extremely important caution concerning full-duplex mode/////
Understand that the 3c509B's hardware's full-duplex support is much more
@@ -113,6 +115,8 @@ This insured that merely upgrading the driver from an earlier version would
never automatically enable full-duplex mode in an existing installation;
it must always be explicitly enabled via one of these code in order to be
activated.
+
+The transceiver type can be changed using 'ethtool'.
(4a) Interpretation of error messages and common problems
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 006b39dec87d..c532884f4fec 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -692,6 +692,25 @@ proxy_arp - BOOLEAN
conf/{all,interface}/proxy_arp is set to TRUE,
it will be disabled otherwise
+proxy_arp_pvlan - BOOLEAN
+ Private VLAN proxy arp.
+ Basically allow proxy arp replies back to the same interface
+ (from which the ARP request/solicitation was received).
+
+ This is done to support (ethernet) switch features, like RFC
+ 3069, where the individual ports are NOT allowed to
+ communicate with each other, but they are allowed to talk to
+ the upstream router. As described in RFC 3069, it is possible
+ to allow these hosts to communicate through the upstream
+ router by proxy_arp'ing. Don't need to be used together with
+ proxy_arp.
+
+ This technology is known by different names:
+ In RFC 3069 it is called VLAN Aggregation.
+ Cisco and Allied Telesyn call it Private VLAN.
+ Hewlett-Packard call it Source-Port filtering or port-isolation.
+ Ericsson call it MAC-Forced Forwarding (RFC Draft).
+
shared_media - BOOLEAN
Send(router) or accept(host) RFC1620 shared media redirects.
Overrides ip_secure_redirects.
diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt
new file mode 100755
index 000000000000..19015de6725f
--- /dev/null
+++ b/Documentation/networking/ixgbevf.txt
@@ -0,0 +1,90 @@
+Linux* Base Driver for Intel(R) Network Connection
+==================================================
+
+November 24, 2009
+
+Contents
+========
+
+- In This Release
+- Identifying Your Adapter
+- Known Issues/Troubleshooting
+- Support
+
+In This Release
+===============
+
+This file describes the ixgbevf Linux* Base Driver for Intel Network
+Connection.
+
+The ixgbevf driver supports 82599-based virtual function devices that can only
+be activated on kernels with CONFIG_PCI_IOV enabled.
+
+The ixgbevf driver supports virtual functions generated by the ixgbe driver
+with a max_vfs value of 1 or greater.
+
+The guest OS loading the ixgbevf driver must support MSI-X interrupts.
+
+VLANs: There is a limit of a total of 32 shared VLANs to 1 or more VFs.
+
+Identifying Your Adapter
+========================
+
+For more information on how to identify your adapter, go to the Adapter &
+Driver ID Guide at:
+
+ http://support.intel.com/support/network/sb/CS-008441.htm
+
+Known Issues/Troubleshooting
+============================
+
+ Unloading Physical Function (PF) Driver Causes System Reboots When VM is
+ Running and VF is Loaded on the VM
+ ------------------------------------------------------------------------
+ Do not unload the PF driver (ixgbe) while VFs are assigned to guests.
+
+Support
+=======
+
+For general information, go to the Intel support website at:
+
+ http://support.intel.com
+
+or the Intel Wired Networking project hosted by Sourceforge at:
+
+ http://sourceforge.net/projects/e1000
+
+If an issue is identified with the released source code on the supported
+kernel with a supported adapter, email the specific information related
+to the issue to e1000-devel@lists.sf.net
+
+License
+=======
+
+Intel 10 Gigabit Linux driver.
+Copyright(c) 1999 - 2009 Intel Corporation.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms and conditions of the GNU General Public License,
+version 2, as published by the Free Software Foundation.
+
+This program is distributed in the hope it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+The full GNU General Public License is included in this distribution in
+the file called "COPYING".
+
+Trademarks
+==========
+
+Intel, Itanium, and Pentium are trademarks or registered trademarks of
+Intel Corporation or its subsidiaries in the United States and other
+countries.
+
+* Other names and brands may be claimed as the property of others.
diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt
index ee31369e9e5b..9551622d0a7b 100644
--- a/Documentation/networking/regulatory.txt
+++ b/Documentation/networking/regulatory.txt
@@ -188,3 +188,27 @@ Then in some part of your code after your wiphy has been registered:
&mydriver_jp_regdom.reg_rules[i],
sizeof(struct ieee80211_reg_rule));
regulatory_struct_hint(rd);
+
+Statically compiled regulatory database
+---------------------------------------
+
+In most situations the userland solution using CRDA as described
+above is the preferred solution. However in some cases a set of
+rules built into the kernel itself may be desirable. To account
+for this situation, a configuration option has been provided
+(i.e. CONFIG_CFG80211_INTERNAL_REGDB). With this option enabled,
+the wireless database information contained in net/wireless/db.txt is
+used to generate a data structure encoded in net/wireless/regdb.c.
+That option also enables code in net/wireless/reg.c which queries
+the data in regdb.c as an alternative to using CRDA.
+
+The file net/wireless/db.txt should be kept up-to-date with the db.txt
+file available in the git repository here:
+
+ git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
+
+Again, most users in most situations should be using the CRDA package
+provided with their distribution, and in most other situations users
+should be building and using CRDA on their own rather than using
+this option. If you are not absolutely sure that you should be using
+CONFIG_CFG80211_INTERNAL_REGDB then _DO_NOT_USE_IT_.
diff --git a/Documentation/networking/timestamping/timestamping.c b/Documentation/networking/timestamping/timestamping.c
index a7936fe8444a..bab619a48214 100644
--- a/Documentation/networking/timestamping/timestamping.c
+++ b/Documentation/networking/timestamping/timestamping.c
@@ -370,7 +370,7 @@ int main(int argc, char **argv)
}
sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP);
- if (socket < 0)
+ if (sock < 0)
bail("socket");
memset(&device, 0, sizeof(device));
diff --git a/Documentation/powerpc/dts-bindings/fsl/can.txt b/Documentation/powerpc/dts-bindings/fsl/can.txt
new file mode 100644
index 000000000000..2fa4fcd38fd6
--- /dev/null
+++ b/Documentation/powerpc/dts-bindings/fsl/can.txt
@@ -0,0 +1,53 @@
+CAN Device Tree Bindings
+------------------------
+
+(c) 2006-2009 Secret Lab Technologies Ltd
+Grant Likely <grant.likely@secretlab.ca>
+
+fsl,mpc5200-mscan nodes
+-----------------------
+In addition to the required compatible-, reg- and interrupt-properties, you can
+also specify which clock source shall be used for the controller:
+
+- fsl,mscan-clock-source : a string describing the clock source. Valid values
+ are: "ip" for ip bus clock
+ "ref" for reference clock (XTAL)
+ "ref" is default in case this property is not
+ present.
+
+fsl,mpc5121-mscan nodes
+-----------------------
+In addition to the required compatible-, reg- and interrupt-properties, you can
+also specify which clock source and divider shall be used for the controller:
+
+- fsl,mscan-clock-source : a string describing the clock source. Valid values
+ are: "ip" for ip bus clock
+ "ref" for reference clock
+ "sys" for system clock
+ If this property is not present, an optimal CAN
+ clock source and frequency based on the system
+ clock will be selected. If this is not possible,
+ the reference clock will be used.
+
+- fsl,mscan-clock-divider: for the reference and system clock, an additional
+ clock divider can be specified. By default, a
+ value of 1 is used.
+
+Note that the MPC5121 Rev. 1 processor is not supported.
+
+Examples:
+ can@1300 {
+ compatible = "fsl,mpc5121-mscan";
+ interrupts = <12 0x8>;
+ interrupt-parent = <&ipic>;
+ reg = <0x1300 0x80>;
+ };
+
+ can@1380 {
+ compatible = "fsl,mpc5121-mscan";
+ interrupts = <13 0x8>;
+ interrupt-parent = <&ipic>;
+ reg = <0x1380 0x80>;
+ fsl,mscan-clock-source = "ref";
+ fsl,mscan-clock-divider = <3>;
+ };
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
index 5c6602dbfdc2..4ccb2cd5df94 100644
--- a/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/mpc5200.txt
@@ -195,11 +195,4 @@ External interrupts:
fsl,mpc5200-mscan nodes
-----------------------
-In addition to the required compatible-, reg- and interrupt-properites, you can
-also specify which clock source shall be used for the controller:
-
-- fsl,mscan-clock-source- a string describing the clock source. Valid values
- are: "ip" for ip bus clock
- "ref" for reference clock (XTAL)
- "ref" is default in case this property is not
- present.
+See file can.txt in this directory.
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index 8923597bd2bd..c540637eb16a 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -1123,6 +1123,21 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
This module supports multiple cards, autoprobe and ISA PnP.
+ Module snd-jazz16
+ -------------------
+
+ Module for Media Vision Jazz16 chipset. The chipset consists of 3 chips:
+ MVD1216 + MVA416 + MVA514.
+
+ port - port # for SB DSP chip (0x210,0x220,0x230,0x240,0x250,0x260)
+ irq - IRQ # for SB DSP chip (3,5,7,9,10,15)
+ dma8 - DMA # for SB DSP chip (1,3)
+ dma16 - DMA # for SB DSP chip (5,7)
+ mpu_port - MPU-401 port # (0x300,0x310,0x320,0x330)
+ mpu_irq - MPU-401 irq # (2,3,5,7)
+
+ This module supports multiple cards.
+
Module snd-korg1212
-------------------
@@ -1791,6 +1806,13 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
The power-management is supported.
+ Module snd-ua101
+ ----------------
+
+ Module for the Edirol UA-101 audio/MIDI interface.
+
+ This module supports multiple devices, autoprobe and hotplugging.
+
Module snd-usb-audio
--------------------
diff --git a/Documentation/trace/ring-buffer-design.txt b/Documentation/trace/ring-buffer-design.txt
index 5b1d23d604c5..d299ff31df57 100644
--- a/Documentation/trace/ring-buffer-design.txt
+++ b/Documentation/trace/ring-buffer-design.txt
@@ -33,9 +33,9 @@ head_page - a pointer to the page that the reader will use next
tail_page - a pointer to the page that will be written to next
-commit_page - a pointer to the page with the last finished non nested write.
+commit_page - a pointer to the page with the last finished non-nested write.
-cmpxchg - hardware assisted atomic transaction that performs the following:
+cmpxchg - hardware-assisted atomic transaction that performs the following:
A = B iff previous A == C
@@ -52,15 +52,15 @@ The Generic Ring Buffer
The ring buffer can be used in either an overwrite mode or in
producer/consumer mode.
-Producer/consumer mode is where the producer were to fill up the
+Producer/consumer mode is where if the producer were to fill up the
buffer before the consumer could free up anything, the producer
will stop writing to the buffer. This will lose most recent events.
-Overwrite mode is where the produce were to fill up the buffer
+Overwrite mode is where if the producer were to fill up the buffer
before the consumer could free up anything, the producer will
overwrite the older data. This will lose the oldest events.
-No two writers can write at the same time (on the same per cpu buffer),
+No two writers can write at the same time (on the same per-cpu buffer),
but a writer may interrupt another writer, but it must finish writing
before the previous writer may continue. This is very important to the
algorithm. The writers act like a "stack". The way interrupts works
@@ -79,16 +79,16 @@ the interrupt doing a write as well.
Readers can happen at any time. But no two readers may run at the
same time, nor can a reader preempt/interrupt another reader. A reader
-can not preempt/interrupt a writer, but it may read/consume from the
+cannot preempt/interrupt a writer, but it may read/consume from the
buffer at the same time as a writer is writing, but the reader must be
on another processor to do so. A reader may read on its own processor
and can be preempted by a writer.
-A writer can preempt a reader, but a reader can not preempt a writer.
+A writer can preempt a reader, but a reader cannot preempt a writer.
But a reader can read the buffer at the same time (on another processor)
as a writer.
-The ring buffer is made up of a list of pages held together by a link list.
+The ring buffer is made up of a list of pages held together by a linked list.
At initialization a reader page is allocated for the reader that is not
part of the ring buffer.
@@ -102,7 +102,7 @@ the head page.
The reader has its own page to use. At start up time, this page is
allocated but is not attached to the list. When the reader wants
-to read from the buffer, if its page is empty (like it is on start up)
+to read from the buffer, if its page is empty (like it is on start-up),
it will swap its page with the head_page. The old reader page will
become part of the ring buffer and the head_page will be removed.
The page after the inserted page (old reader_page) will become the
@@ -206,7 +206,7 @@ The main pointers:
commit page - the page that last finished a write.
-The commit page only is updated by the outer most writer in the
+The commit page only is updated by the outermost writer in the
writer stack. A writer that preempts another writer will not move the
commit page.
@@ -281,7 +281,7 @@ with the previous write.
The commit pointer points to the last write location that was
committed without preempting another write. When a write that
preempted another write is committed, it only becomes a pending commit
-and will not be a full commit till all writes have been committed.
+and will not be a full commit until all writes have been committed.
The commit page points to the page that has the last full commit.
The tail page points to the page with the last write (before
@@ -292,7 +292,7 @@ be several pages ahead. If the tail page catches up to the commit
page then no more writes may take place (regardless of the mode
of the ring buffer: overwrite and produce/consumer).
-The order of pages are:
+The order of pages is:
head page
commit page
@@ -311,7 +311,7 @@ Possible scenario:
There is a special case that the head page is after either the commit page
and possibly the tail page. That is when the commit (and tail) page has been
swapped with the reader page. This is because the head page is always
-part of the ring buffer, but the reader page is not. When ever there
+part of the ring buffer, but the reader page is not. Whenever there
has been less than a full page that has been committed inside the ring buffer,
and a reader swaps out a page, it will be swapping out the commit page.
@@ -338,7 +338,7 @@ and a reader swaps out a page, it will be swapping out the commit page.
In this case, the head page will not move when the tail and commit
move back into the ring buffer.
-The reader can not swap a page into the ring buffer if the commit page
+The reader cannot swap a page into the ring buffer if the commit page
is still on that page. If the read meets the last commit (real commit
not pending or reserved), then there is nothing more to read.
The buffer is considered empty until another full commit finishes.
@@ -395,7 +395,7 @@ The main idea behind the lockless algorithm is to combine the moving
of the head_page pointer with the swapping of pages with the reader.
State flags are placed inside the pointer to the page. To do this,
each page must be aligned in memory by 4 bytes. This will allow the 2
-least significant bits of the address to be used as flags. Since
+least significant bits of the address to be used as flags, since
they will always be zero for the address. To get the address,
simply mask out the flags.
@@ -460,7 +460,7 @@ When the reader tries to swap the page with the ring buffer, it
will also use cmpxchg. If the flag bit in the pointer to the
head page does not have the HEADER flag set, the compare will fail
and the reader will need to look for the new head page and try again.
-Note, the flag UPDATE and HEADER are never set at the same time.
+Note, the flags UPDATE and HEADER are never set at the same time.
The reader swaps the reader page as follows:
@@ -539,7 +539,7 @@ updated to the reader page.
| +-----------------------------+ |
+------------------------------------+
-Another important point. The page that the reader page points back to
+Another important point: The page that the reader page points back to
by its previous pointer (the one that now points to the new head page)
never points back to the reader page. That is because the reader page is
not part of the ring buffer. Traversing the ring buffer via the next pointers
@@ -572,7 +572,7 @@ not be able to swap the head page from the buffer, nor will it be able to
move the head page, until the writer is finished with the move.
This eliminates any races that the reader can have on the writer. The reader
-must spin, and this is why the reader can not preempt the writer.
+must spin, and this is why the reader cannot preempt the writer.
tail page
|
@@ -659,9 +659,9 @@ before pushing the head page. If it is, then it can be assumed that the
tail page wrapped the buffer, and we must drop new writes.
This is not a race condition, because the commit page can only be moved
-by the outter most writer (the writer that was preempted).
+by the outermost writer (the writer that was preempted).
This means that the commit will not move while a writer is moving the
-tail page. The reader can not swap the reader page if it is also being
+tail page. The reader cannot swap the reader page if it is also being
used as the commit page. The reader can simply check that the commit
is off the reader page. Once the commit page leaves the reader page
it will never go back on it unless a reader does another swap with the
@@ -733,7 +733,7 @@ The write converts the head page pointer to UPDATE.
--->| |<---| |<---| |<---| |<---
+---+ +---+ +---+ +---+
-But if a nested writer preempts here. It will see that the next
+But if a nested writer preempts here, it will see that the next
page is a head page, but it is also nested. It will detect that
it is nested and will save that information. The detection is the
fact that it sees the UPDATE flag instead of a HEADER or NORMAL
@@ -761,7 +761,7 @@ to NORMAL.
--->| |<---| |<---| |<---| |<---
+---+ +---+ +---+ +---+
-After the nested writer finishes, the outer most writer will convert
+After the nested writer finishes, the outermost writer will convert
the UPDATE pointer to NORMAL.
@@ -812,7 +812,7 @@ head page.
+---+ +---+ +---+ +---+
The nested writer moves the tail page forward. But does not set the old
-update page to NORMAL because it is not the outer most writer.
+update page to NORMAL because it is not the outermost writer.
tail page
|
@@ -892,7 +892,7 @@ It will return to the first writer.
--->| |<---| |<---| |<---| |<---
+---+ +---+ +---+ +---+
-The first writer can not know atomically test if the tail page moved
+The first writer cannot know atomically if the tail page moved
while it updates the HEAD page. It will then update the head page to
what it thinks is the new head page.
@@ -923,9 +923,9 @@ if the tail page is either where it use to be or on the next page:
--->| |<---| |<---| |<---| |<---
+---+ +---+ +---+ +---+
-If tail page != A and tail page does not equal B, then it must reset the
-pointer back to NORMAL. The fact that it only needs to worry about
-nested writers, it only needs to check this after setting the HEAD page.
+If tail page != A and tail page != B, then it must reset the pointer
+back to NORMAL. The fact that it only needs to worry about nested
+writers means that it only needs to check this after setting the HEAD page.
(first writer)
@@ -939,7 +939,7 @@ nested writers, it only needs to check this after setting the HEAD page.
+---+ +---+ +---+ +---+
Now the writer can update the head page. This is also why the head page must
-remain in UPDATE and only reset by the outer most writer. This prevents
+remain in UPDATE and only reset by the outermost writer. This prevents
the reader from seeing the incorrect head page.
diff --git a/Documentation/video4linux/gspca.txt b/Documentation/video4linux/gspca.txt
index 1800a62cf135..98ee599b4eb8 100644
--- a/Documentation/video4linux/gspca.txt
+++ b/Documentation/video4linux/gspca.txt
@@ -142,6 +142,7 @@ sunplus 04fc:5360 Sunplus Generic
spca500 04fc:7333 PalmPixDC85
sunplus 04fc:ffff Pure DigitalDakota
spca501 0506:00df 3Com HomeConnect Lite
+sunplus 052b:1507 Megapixel 5 Pretec DC-1007
sunplus 052b:1513 Megapix V4
sunplus 052b:1803 MegaImage VI
tv8532 0545:808b Veo Stingray
diff --git a/Documentation/video4linux/videobuf b/Documentation/video4linux/videobuf
new file mode 100644
index 000000000000..4e21ea7e36a8
--- /dev/null
+++ b/Documentation/video4linux/videobuf
@@ -0,0 +1,341 @@
+An introduction to the videobuf layer
+Jonathan Corbet <corbet@lwn.net>
+Current as of 2.6.32
+
+The videobuf layer functions as a sort of glue layer between a V4L2 driver
+and user space. It handles the allocation and management of buffers for
+the storage of video frames. There is a set of functions which can be used
+to implement many of the standard POSIX I/O system calls, including read(),
+poll(), and, happily, mmap(). Another set of functions can be used to
+implement the bulk of the V4L2 ioctl() calls related to streaming I/O,
+including buffer allocation, queueing and dequeueing, and streaming
+control. Using videobuf imposes a few design decisions on the driver
+author, but the payback comes in the form of reduced code in the driver and
+a consistent implementation of the V4L2 user-space API.
+
+Buffer types
+
+Not all video devices use the same kind of buffers. In fact, there are (at
+least) three common variations:
+
+ - Buffers which are scattered in both the physical and (kernel) virtual
+ address spaces. All user-space buffers are like this, but it makes
+ great sense to allocate kernel-space buffers this way as well when it is
+ possible. Unfortunately, it is not always possible; working with this
+ kind of buffer normally requires hardware which can do scatter/gather
+ DMA operations.
+
+ - Buffers which are physically scattered, but which are virtually
+ contiguous; buffers allocated with vmalloc(), in other words. These
+ buffers are just as hard to use for DMA operations, but they can be
+ useful in situations where DMA is not available but virtually-contiguous
+ buffers are convenient.
+
+ - Buffers which are physically contiguous. Allocation of this kind of
+ buffer can be unreliable on fragmented systems, but simpler DMA
+ controllers cannot deal with anything else.
+
+Videobuf can work with all three types of buffers, but the driver author
+must pick one at the outset and design the driver around that decision.
+
+Data structures, callbacks, and initialization
+
+Depending on which type of buffers are being used, the driver should
+include one of the following files:
+
+ <media/videobuf-dma-sg.h> /* Physically scattered */
+ <media/videobuf-vmalloc.h> /* vmalloc() buffers */
+ <media/videobuf-dma-contig.h> /* Physically contiguous */
+
+The driver's data structure describing a V4L2 device should include a
+struct videobuf_queue instance for the management of the buffer queue,
+along with a list_head for the queue of available buffers. There will also
+need to be an interrupt-safe spinlock which is used to protect (at least)
+the queue.
+
+The next step is to write four simple callbacks to help videobuf deal with
+the management of buffers:
+
+ struct videobuf_queue_ops {
+ int (*buf_setup)(struct videobuf_queue *q,
+ unsigned int *count, unsigned int *size);
+ int (*buf_prepare)(struct videobuf_queue *q,
+ struct videobuf_buffer *vb,
+ enum v4l2_field field);
+ void (*buf_queue)(struct videobuf_queue *q,
+ struct videobuf_buffer *vb);
+ void (*buf_release)(struct videobuf_queue *q,
+ struct videobuf_buffer *vb);
+ };
+
+buf_setup() is called early in the I/O process, when streaming is being
+initiated; its purpose is to tell videobuf about the I/O stream. The count
+parameter will be a suggested number of buffers to use; the driver should
+check it for rationality and adjust it if need be. As a practical rule, a
+minimum of two buffers are needed for proper streaming, and there is
+usually a maximum (which cannot exceed 32) which makes sense for each
+device. The size parameter should be set to the expected (maximum) size
+for each frame of data.
+
+Each buffer (in the form of a struct videobuf_buffer pointer) will be
+passed to buf_prepare(), which should set the buffer's size, width, height,
+and field fields properly. If the buffer's state field is
+VIDEOBUF_NEEDS_INIT, the driver should pass it to:
+
+ int videobuf_iolock(struct videobuf_queue* q, struct videobuf_buffer *vb,
+ struct v4l2_framebuffer *fbuf);
+
+Among other things, this call will usually allocate memory for the buffer.
+Finally, the buf_setup() function should set the buffer's state to
+VIDEOBUF_PREPARED.
+
+When a buffer is queued for I/O, it is passed to buf_queue(), which should
+put it onto the driver's list of available buffers and set its state to
+VIDEOBUF_QUEUED. Note that this function is called with the queue spinlock
+held; if it tries to acquire it as well things will come to a screeching
+halt. Yes, this is the voice of experience. Note also that videobuf may
+wait on the first buffer in the queue; placing other buffers in front of it
+could again gum up the works. So use list_add_tail() to enqueue buffers.
+
+Finally, buf_release() is called when a buffer is no longer intended to be
+used. The driver should ensure that there is no I/O active on the buffer,
+then pass it to the appropriate free routine(s):
+
+ /* Scatter/gather drivers */
+ int videobuf_dma_unmap(struct videobuf_queue *q,
+ struct videobuf_dmabuf *dma);
+ int videobuf_dma_free(struct videobuf_dmabuf *dma);
+
+ /* vmalloc drivers */
+ void videobuf_vmalloc_free (struct videobuf_buffer *buf);
+
+ /* Contiguous drivers */
+ void videobuf_dma_contig_free(struct videobuf_queue *q,
+ struct videobuf_buffer *buf);
+
+One way to ensure that a buffer is no longer under I/O is to pass it to:
+
+ int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr);
+
+Here, vb is the buffer, non_blocking indicates whether non-blocking I/O
+should be used (it should be zero in the buf_release() case), and intr
+controls whether an interruptible wait is used.
+
+File operations
+
+At this point, much of the work is done; much of the rest is slipping
+videobuf calls into the implementation of the other driver callbacks. The
+first step is in the open() function, which must initialize the
+videobuf queue. The function to use depends on the type of buffer used:
+
+ void videobuf_queue_sg_init(struct videobuf_queue *q,
+ struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv);
+
+ void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
+ struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv);
+
+ void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
+ struct videobuf_queue_ops *ops,
+ struct device *dev,
+ spinlock_t *irqlock,
+ enum v4l2_buf_type type,
+ enum v4l2_field field,
+ unsigned int msize,
+ void *priv);
+
+In each case, the parameters are the same: q is the queue structure for the
+device, ops is the set of callbacks as described above, dev is the device
+structure for this video device, irqlock is an interrupt-safe spinlock to
+protect access to the data structures, type is the buffer type used by the
+device (cameras will use V4L2_BUF_TYPE_VIDEO_CAPTURE, for example), field
+describes which field is being captured (often V4L2_FIELD_NONE for
+progressive devices), msize is the size of any containing structure used
+around struct videobuf_buffer, and priv is a private data pointer which
+shows up in the priv_data field of struct videobuf_queue. Note that these
+are void functions which, evidently, are immune to failure.
+
+V4L2 capture drivers can be written to support either of two APIs: the
+read() system call and the rather more complicated streaming mechanism. As
+a general rule, it is necessary to support both to ensure that all
+applications have a chance of working with the device. Videobuf makes it
+easy to do that with the same code. To implement read(), the driver need
+only make a call to one of:
+
+ ssize_t videobuf_read_one(struct videobuf_queue *q,
+ char __user *data, size_t count,
+ loff_t *ppos, int nonblocking);
+
+ ssize_t videobuf_read_stream(struct videobuf_queue *q,
+ char __user *data, size_t count,
+ loff_t *ppos, int vbihack, int nonblocking);
+
+Either one of these functions will read frame data into data, returning the
+amount actually read; the difference is that videobuf_read_one() will only
+read a single frame, while videobuf_read_stream() will read multiple frames
+if they are needed to satisfy the count requested by the application. A
+typical driver read() implementation will start the capture engine, call
+one of the above functions, then stop the engine before returning (though a
+smarter implementation might leave the engine running for a little while in
+anticipation of another read() call happening in the near future).
+
+The poll() function can usually be implemented with a direct call to:
+
+ unsigned int videobuf_poll_stream(struct file *file,
+ struct videobuf_queue *q,
+ poll_table *wait);
+
+Note that the actual wait queue eventually used will be the one associated
+with the first available buffer.
+
+When streaming I/O is done to kernel-space buffers, the driver must support
+the mmap() system call to enable user space to access the data. In many
+V4L2 drivers, the often-complex mmap() implementation simplifies to a
+single call to:
+
+ int videobuf_mmap_mapper(struct videobuf_queue *q,
+ struct vm_area_struct *vma);
+
+Everything else is handled by the videobuf code.
+
+The release() function requires two separate videobuf calls:
+
+ void videobuf_stop(struct videobuf_queue *q);
+ int videobuf_mmap_free(struct videobuf_queue *q);
+
+The call to videobuf_stop() terminates any I/O in progress - though it is
+still up to the driver to stop the capture engine. The call to
+videobuf_mmap_free() will ensure that all buffers have been unmapped; if
+so, they will all be passed to the buf_release() callback. If buffers
+remain mapped, videobuf_mmap_free() returns an error code instead. The
+purpose is clearly to cause the closing of the file descriptor to fail if
+buffers are still mapped, but every driver in the 2.6.32 kernel cheerfully
+ignores its return value.
+
+ioctl() operations
+
+The V4L2 API includes a very long list of driver callbacks to respond to
+the many ioctl() commands made available to user space. A number of these
+- those associated with streaming I/O - turn almost directly into videobuf
+calls. The relevant helper functions are:
+
+ int videobuf_reqbufs(struct videobuf_queue *q,
+ struct v4l2_requestbuffers *req);
+ int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b);
+ int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b);
+ int videobuf_dqbuf(struct videobuf_queue *q, struct v4l2_buffer *b,
+ int nonblocking);
+ int videobuf_streamon(struct videobuf_queue *q);
+ int videobuf_streamoff(struct videobuf_queue *q);
+ int videobuf_cgmbuf(struct videobuf_queue *q, struct video_mbuf *mbuf,
+ int count);
+
+So, for example, a VIDIOC_REQBUFS call turns into a call to the driver's
+vidioc_reqbufs() callback which, in turn, usually only needs to locate the
+proper struct videobuf_queue pointer and pass it to videobuf_reqbufs().
+These support functions can replace a great deal of buffer management
+boilerplate in a lot of V4L2 drivers.
+
+The vidioc_streamon() and vidioc_streamoff() functions will be a bit more
+complex, of course, since they will also need to deal with starting and
+stopping the capture engine. videobuf_cgmbuf(), called from the driver's
+vidiocgmbuf() function, only exists if the V4L1 compatibility module has
+been selected with CONFIG_VIDEO_V4L1_COMPAT, so its use must be surrounded
+with #ifdef directives.
+
+Buffer allocation
+
+Thus far, we have talked about buffers, but have not looked at how they are
+allocated. The scatter/gather case is the most complex on this front. For
+allocation, the driver can leave buffer allocation entirely up to the
+videobuf layer; in this case, buffers will be allocated with vmalloc_32()
+and will be very scattered indeed. If the application is using user-space
+buffers, no allocation is needed; the videobuf layer will take care of
+calling get_user_pages() and filling in the scatterlist array.
+
+If the driver needs to do its own memory allocation, it should be done in
+the vidioc_reqbufs() function, *after* calling videobuf_reqbufs(). The
+first step is a call to:
+
+ struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf);
+
+The returned videobuf_dmabuf structure (defined in
+<media/videobuf-dma-sg.h>) includes a couple of relevant fields:
+
+ struct scatterlist *sglist;
+ int sglen;
+
+The driver must allocate an appropriately-sized scatterlist array and
+populate it with pointers to the pieces of the allocated buffer; sglen
+should be set to the length of the array.
+
+Drivers using the vmalloc() method need not (and cannot) concern themselves
+with buffer allocation at all; videobuf will handle those details. The
+same is true of contiguous-DMA drivers; videobuf will allocate the buffers
+(with dma_alloc_coherent()) when it sees fit. That means that these
+drivers may be trying to do high-order allocations at any time, an
+operation which is not always guaranteed to work. Some drivers play tricks
+by allocating DMA space at system boot time; videobuf does not currently
+play well with those drivers.
+
+Filling the buffers
+
+The final part of a videobuf implementation has no direct callback - its
+the portion of the code which actually puts frame data into the buffers,
+usually in response to interrupts from the device. For all types of
+drivers, this process works approximately as follows:
+
+ - Obtain the next available buffer and make sure that somebody is actually
+ waiting for it.
+
+ - Get a pointer to the memory and put video data there.
+
+ - Mark the buffer as done and wake up the process waiting for it.
+
+Step (1) above is done by looking at the driver-managed list_head structure
+- the one which is filled in the buf_queue() callback. Because starting
+the engine and enqueueing buffers are done in separate steps, it's possible
+for the engine to be running without any buffers available - in the
+vmalloc() case especially. So the driver should be prepared for the list
+to be empty. It is equally possible that nobody is yet interested in the
+buffer; the driver should not remove it from the list or fill it until a
+process is waiting on it. That test can be done by examining the buffer's
+done field (a wait_queue_head_t structure) with waitqueue_active().
+
+For scatter/gather drivers, the needed memory pointers will be found in the
+scatterlist structure described above. Drivers using the vmalloc() method
+can get a memory pointer with:
+
+ void *videobuf_to_vmalloc(struct videobuf_buffer *buf);
+
+For contiguous DMA drivers, the function to use is:
+
+ dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf);
+
+The contiguous DMA API goes out of its way to hide the kernel-space address
+of the DMA buffer from drivers.
+
+The final step is to set the size field of the relevant videobuf_buffer
+structure to the actual size of the captured image, set state to
+VIDEOBUF_DONE, then call wake_up() on the done queue. At this point, the
+buffer is owned by the videobuf layer and the driver should not touch it
+again.
+
+Developers who are interested in more information can go into the relevant
+header files; there are a few low-level functions declared there which have
+not been talked about here. Also worthwhile is the vivi driver
+(drivers/media/video/vivi.c), which is maintained as an example of how V4L2
+drivers should be written. Vivi only uses the vmalloc() API, but it's good
+enough to get started with. Note also that all of these calls are exported
+GPL-only, so they will not be available to non-GPL kernel modules.
diff --git a/MAINTAINERS b/MAINTAINERS
index 6914588eef89..fd74390913f0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1396,6 +1396,15 @@ F: arch/powerpc/include/asm/spu*.h
F: arch/powerpc/oprofile/*cell*
F: arch/powerpc/platforms/cell/
+CEPH DISTRIBUTED FILE SYSTEM CLIENT
+M: Sage Weil <sage@newdream.net>
+L: ceph-devel@lists.sourceforge.net
+W: http://ceph.newdream.net/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+S: Supported
+F: Documentation/filesystems/ceph.txt
+F: fs/ceph
+
CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
M: David Vrabel <david.vrabel@csr.com>
L: linux-usb@vger.kernel.org
@@ -2384,6 +2393,15 @@ W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
S: Maintained
F: drivers/hwmon/hdaps.c
+HARDWARE LATENCY DETECTOR
+P: Jon Masters
+M: jcm@jonmasters.org
+W: http://www.kernel.org/pub/linux/kernel/people/jcm/hwlat_detector/
+S: Supported
+L: linux-kernel@vger.kernel.org
+F: Documentation/hwlat_detector.txt
+F: drivers/misc/hwlat_detector.c
+
HWPOISON MEMORY FAILURE HANDLING
M: Andi Kleen <andi@firstfloor.org>
L: linux-mm@kvack.org
@@ -2903,6 +2921,12 @@ L: linux-mips@linux-mips.org
S: Maintained
F: drivers/serial/ioc3_serial.c
+IOQ LIBRARY
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+F: include/linux/ioq.h
+F: lib/ioq.c
+
IP MASQUERADING
M: Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
S: Maintained
@@ -3138,7 +3162,7 @@ F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c
KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
-M: Hollis Blanchard <hollisb@us.ibm.com>
+M: Alexander Graf <agraf@suse.de>
L: kvm-ppc@vger.kernel.org
W: http://kvm.qumranet.com
S: Supported
@@ -3174,15 +3198,17 @@ S: Maintained
F: include/linux/kexec.h
F: kernel/kexec.c
-KGDB
+KGDB / KDB / debug_core
M: Jason Wessel <jason.wessel@windriver.com>
L: kgdb-bugreport@lists.sourceforge.net
S: Maintained
F: Documentation/DocBook/kgdb.tmpl
+F: drivers/char/kdb_keyboard.*
F: drivers/misc/kgdbts.c
F: drivers/serial/kgdboc.c
F: include/linux/kgdb.h
-F: kernel/kgdb.c
+F: include/linux/kdb.h
+F: kernel/debug/
KMEMCHECK
M: Vegard Nossum <vegardno@ifi.uio.no>
@@ -3397,6 +3423,13 @@ S: Maintained
F: Documentation/ldm.txt
F: fs/partitions/ldm.*
+LogFS
+M: Joern Engel <joern@logfs.org>
+L: logfs@logfs.org
+W: logfs.org
+S: Maintained
+F: fs/logfs/
+
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Eric Moore <Eric.Moore@lsi.com>
M: support@lsi.com
@@ -3940,29 +3973,20 @@ S: Maintained
F: sound/soc/omap/
OMAP FRAMEBUFFER SUPPORT
-M: Imre Deak <imre.deak@nokia.com>
+M: Tomi Valkeinen <tomi.valkeinen@nokia.com>
L: linux-fbdev@vger.kernel.org
L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/video/omap/
-OMAP DISPLAY SUBSYSTEM SUPPORT (DSS2)
+OMAP DISPLAY SUBSYSTEM and FRAMEBUFFER SUPPORT (DSS2)
M: Tomi Valkeinen <tomi.valkeinen@nokia.com>
L: linux-omap@vger.kernel.org
-L: linux-fbdev@vger.kernel.org (moderated for non-subscribers)
+L: linux-fbdev@vger.kernel.org
S: Maintained
-F: drivers/video/omap2/dss/
-F: drivers/video/omap2/vrfb.c
-F: drivers/video/omap2/vram.c
+F: drivers/video/omap2/
F: Documentation/arm/OMAP/DSS
-OMAP FRAMEBUFFER SUPPORT (FOR DSS2)
-M: Tomi Valkeinen <tomi.valkeinen@nokia.com>
-L: linux-omap@vger.kernel.org
-L: linux-fbdev@vger.kernel.org (moderated for non-subscribers)
-S: Maintained
-F: drivers/video/omap2/omapfb/
-
OMAP MMC SUPPORT
M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
L: linux-omap@vger.kernel.org
@@ -4889,6 +4913,12 @@ F: drivers/serial/serial_lh7a40x.c
F: drivers/usb/gadget/lh7a40*
F: drivers/usb/host/ohci-lh7a40*
+SHM-SIGNAL LIBRARY
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+F: include/linux/shm_signal.h
+F: lib/shm_signal.c
+
SIMPLE FIRMWARE INTERFACE (SFI)
M: Len Brown <lenb@kernel.org>
L: sfi-devel@simplefirmware.org
@@ -5799,6 +5829,19 @@ S: Maintained
F: Documentation/fb/uvesafb.txt
F: drivers/video/uvesafb.*
+VBUS
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+F: include/linux/vbus*
+F: drivers/vbus/*
+
+VBUS ETHERNET DRIVER
+M: Gregory Haskins <ghaskins@novell.com>
+S: Maintained
+W: http://developer.novell.com/wiki/index.php/AlacrityVM
+F: include/linux/venet.h
+F: drivers/net/vbus-enet.c
+
VFAT/FAT/MSDOS FILESYSTEM
M: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
S: Maintained
diff --git a/Makefile b/Makefile
index 12310947156e..e13503fc1a1f 100644
--- a/Makefile
+++ b/Makefile
@@ -184,11 +184,14 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
# CROSS_COMPILE can be set on the command line
# make CROSS_COMPILE=ia64-linux-
# Alternatively CROSS_COMPILE can be set in the environment.
+# A third alternative is to store a setting in .config so that plain
+# "make" in the configured kernel build directory always uses that.
# Default value for CROSS_COMPILE is not to prefix executables
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
export KBUILD_BUILDHOST := $(SUBARCH)
ARCH ?= $(SUBARCH)
CROSS_COMPILE ?=
+CROSS_COMPILE ?= $(CONFIG_CROSS_COMPILE:"%"=%)
# Architecture as present in compile.h
UTS_MACHINE := $(ARCH)
diff --git a/arch/alpha/include/asm/local.h b/arch/alpha/include/asm/local.h
index 6ad3ea696421..b9e3e3318371 100644
--- a/arch/alpha/include/asm/local.h
+++ b/arch/alpha/include/asm/local.h
@@ -98,21 +98,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
#define __local_add(i,l) ((l)->a.counter+=(i))
#define __local_sub(i,l) ((l)->a.counter-=(i))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable, not an address.
- */
-#define cpu_local_read(l) local_read(&__get_cpu_var(l))
-#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
-
-#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
-#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
-#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
-#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
-
-#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
-#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
-#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
-#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
-
#endif /* _ALPHA_LOCAL_H */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 233a222752c0..2e5784e1ae42 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -18,6 +18,8 @@ config ARM
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
select HAVE_GENERIC_DMA_COHERENT
+ select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZO
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
@@ -50,6 +52,9 @@ config HAVE_TCM
bool
select GENERIC_ALLOCATOR
+config HAVE_PROC_CPU
+ bool
+
config NO_IOPORT
bool
@@ -552,6 +557,7 @@ config ARCH_PNX4008
bool "Philips Nexperia PNX4008 Mobile"
select CPU_ARM926T
select HAVE_CLK
+ select COMMON_CLKDEV
help
This enables support for Philips PNX4008 mobile platform.
@@ -573,14 +579,14 @@ config ARCH_PXA
config ARCH_MSM
bool "Qualcomm MSM"
- select CPU_V6
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
help
- Support for Qualcomm MSM7K based systems. This runs on the ARM11
- apps processor of the MSM7K and depends on a shared memory
- interface to the ARM9 modem processor which runs the baseband stack
- and controls some vital subsystems (clock and power control, etc).
+ Support for Qualcomm MSM/QSD based systems. This runs on the
+ apps processor of the MSM/QSD and depends on a shared memory
+ interface to the modem processor which runs the baseband
+ stack and controls some vital subsystems
+ (clock and power control, etc).
config ARCH_RPC
bool "RiscPC"
@@ -688,6 +694,7 @@ config ARCH_DAVINCI
select HAVE_IDE
select COMMON_CLKDEV
select GENERIC_ALLOCATOR
+ select ARCH_HAS_HOLES_MEMORYMODEL
help
Support for TI's DaVinci platform.
@@ -699,6 +706,7 @@ config ARCH_OMAP
select ARCH_HAS_CPUFREQ
select GENERIC_TIME
select GENERIC_CLOCKEVENTS
+ select ARCH_HAS_HOLES_MEMORYMODEL
help
Support for TI's OMAP platform (OMAP1 and OMAP2).
@@ -1226,6 +1234,7 @@ config ALIGNMENT_TRAP
bool
depends on CPU_CP15_MMU
default y if !ARCH_EBSA110
+ select HAVE_PROC_CPU if PROC_FS
help
ARM processors cannot fetch/store information which is not
naturally aligned on the bus, i.e., a 4 byte fetch must start at an
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index ce39dc540085..2d4d88ba73bf 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -63,8 +63,12 @@ endif
SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
-targets := vmlinux vmlinux.lds piggy.gz piggy.o font.o font.c \
- head.o misc.o $(OBJS)
+suffix_$(CONFIG_KERNEL_GZIP) = gzip
+suffix_$(CONFIG_KERNEL_LZO) = lzo
+
+targets := vmlinux vmlinux.lds \
+ piggy.$(suffix_y) piggy.$(suffix_y).o \
+ font.o font.c head.o misc.o $(OBJS)
ifeq ($(CONFIG_FUNCTION_TRACER),y)
ORIG_CFLAGS := $(KBUILD_CFLAGS)
@@ -87,22 +91,34 @@ endif
ifneq ($(PARAMS_PHYS),)
LDFLAGS_vmlinux += --defsym params_phys=$(PARAMS_PHYS)
endif
-LDFLAGS_vmlinux += -p --no-undefined -X \
- $(shell $(CC) $(KBUILD_CFLAGS) --print-libgcc-file-name) -T
+# ?
+LDFLAGS_vmlinux += -p
+# Report unresolved symbol references
+LDFLAGS_vmlinux += --no-undefined
+# Delete all temporary local symbols
+LDFLAGS_vmlinux += -X
+# Next argument is a linker script
+LDFLAGS_vmlinux += -T
+
+# For __aeabi_uidivmod
+lib1funcs = $(obj)/lib1funcs.o
+
+$(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S FORCE
+ $(call cmd,shipped)
# Don't allow any static data in misc.o, which
# would otherwise mess up our GOT table
CFLAGS_misc.o := -Dstatic=
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.o \
- $(addprefix $(obj)/, $(OBJS)) FORCE
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \
+ $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE
$(call if_changed,ld)
@:
-$(obj)/piggy.gz: $(obj)/../Image FORCE
- $(call if_changed,gzip)
+$(obj)/piggy.$(suffix_y): $(obj)/../Image FORCE
+ $(call if_changed,$(suffix_y))
-$(obj)/piggy.o: $(obj)/piggy.gz FORCE
+$(obj)/piggy.$(suffix_y).o: $(obj)/piggy.$(suffix_y) FORCE
CFLAGS_font.o := -Dstatic=
diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
index 17153b54613b..7e0fe4d42c7b 100644
--- a/arch/arm/boot/compressed/misc.c
+++ b/arch/arm/boot/compressed/misc.c
@@ -18,10 +18,15 @@
unsigned int __machine_arch_type;
+#define _LINUX_STRING_H_
+
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
#include <asm/string.h>
+#include <linux/linkage.h>
+
+#include <asm/unaligned.h>
#ifdef STANDALONE_DEBUG
#define putstr printf
@@ -188,34 +193,8 @@ static inline __ptr_t memcpy(__ptr_t __dest, __const __ptr_t __src,
/*
* gzip delarations
*/
-#define OF(args) args
#define STATIC static
-typedef unsigned char uch;
-typedef unsigned short ush;
-typedef unsigned long ulg;
-
-#define WSIZE 0x8000 /* Window size must be at least 32k, */
- /* and a power of two */
-
-static uch *inbuf; /* input buffer */
-static uch window[WSIZE]; /* Sliding window buffer */
-
-static unsigned insize; /* valid bytes in inbuf */
-static unsigned inptr; /* index of next byte to be processed in inbuf */
-static unsigned outcnt; /* bytes in output buffer */
-
-/* gzip flag byte */
-#define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */
-#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gzip file */
-#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
-#define ORIG_NAME 0x08 /* bit 3 set: original file name present */
-#define COMMENT 0x10 /* bit 4 set: file comment present */
-#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
-#define RESERVED 0xC0 /* bit 6,7: reserved */
-
-#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
-
/* Diagnostic functions */
#ifdef DEBUG
# define Assert(cond,msg) {if(!(cond)) error(msg);}
@@ -233,24 +212,20 @@ static unsigned outcnt; /* bytes in output buffer */
# define Tracecv(c,x)
#endif
-static int fill_inbuf(void);
-static void flush_window(void);
static void error(char *m);
extern char input_data[];
extern char input_data_end[];
-static uch *output_data;
-static ulg output_ptr;
-static ulg bytes_out;
+static unsigned char *output_data;
+static unsigned long output_ptr;
static void error(char *m);
static void putstr(const char *);
-extern int end;
-static ulg free_mem_ptr;
-static ulg free_mem_end_ptr;
+static unsigned long free_mem_ptr;
+static unsigned long free_mem_end_ptr;
#ifdef STANDALONE_DEBUG
#define NO_INFLATE_MALLOC
@@ -258,46 +233,13 @@ static ulg free_mem_end_ptr;
#define ARCH_HAS_DECOMP_WDOG
-#include "../../../../lib/inflate.c"
-
-/* ===========================================================================
- * Fill the input buffer. This is called only when the buffer is empty
- * and at least one byte is really needed.
- */
-int fill_inbuf(void)
-{
- if (insize != 0)
- error("ran out of input data");
-
- inbuf = input_data;
- insize = &input_data_end[0] - &input_data[0];
-
- inptr = 1;
- return inbuf[0];
-}
+#ifdef CONFIG_KERNEL_GZIP
+#include "../../../../lib/decompress_inflate.c"
+#endif
-/* ===========================================================================
- * Write the output window window[0..outcnt-1] and update crc and bytes_out.
- * (Used for the decompressed data only.)
- */
-void flush_window(void)
-{
- ulg c = crc;
- unsigned n;
- uch *in, *out, ch;
-
- in = window;
- out = &output_data[output_ptr];
- for (n = 0; n < outcnt; n++) {
- ch = *out++ = *in++;
- c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
- }
- crc = c;
- bytes_out += (ulg)outcnt;
- output_ptr += (ulg)outcnt;
- outcnt = 0;
- putstr(".");
-}
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
#ifndef arch_error
#define arch_error(x)
@@ -314,22 +256,33 @@ static void error(char *x)
while(1); /* Halt */
}
+asmlinkage void __div0(void)
+{
+ error("Attempting division by 0!");
+}
+
#ifndef STANDALONE_DEBUG
-ulg
-decompress_kernel(ulg output_start, ulg free_mem_ptr_p, ulg free_mem_ptr_end_p,
- int arch_id)
+unsigned long
+decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
+ unsigned long free_mem_ptr_end_p,
+ int arch_id)
{
- output_data = (uch *)output_start; /* Points to kernel start */
+ unsigned char *tmp;
+
+ output_data = (unsigned char *)output_start;
free_mem_ptr = free_mem_ptr_p;
free_mem_end_ptr = free_mem_ptr_end_p;
__machine_arch_type = arch_id;
arch_decomp_setup();
- makecrc();
+ tmp = (unsigned char *) (((unsigned long)input_data_end) - 4);
+ output_ptr = get_unaligned_le32(tmp);
+
putstr("Uncompressing Linux...");
- gunzip();
+ decompress(input_data, input_data_end - input_data,
+ NULL, NULL, output_data, NULL, error);
putstr(" done, booting the kernel.\n");
return output_ptr;
}
@@ -341,11 +294,10 @@ int main()
{
output_data = output_buffer;
- makecrc();
putstr("Uncompressing Linux...");
- gunzip();
+ decompress(input_data, input_data_end - input_data,
+ NULL, NULL, output_data, NULL, error);
putstr("done.\n");
return 0;
}
#endif
-
diff --git a/arch/arm/boot/compressed/piggy.gzip.S b/arch/arm/boot/compressed/piggy.gzip.S
new file mode 100644
index 000000000000..a68adf91a165
--- /dev/null
+++ b/arch/arm/boot/compressed/piggy.gzip.S
@@ -0,0 +1,6 @@
+ .section .piggydata,#alloc
+ .globl input_data
+input_data:
+ .incbin "arch/arm/boot/compressed/piggy.gzip"
+ .globl input_data_end
+input_data_end:
diff --git a/arch/arm/boot/compressed/piggy.S b/arch/arm/boot/compressed/piggy.lzo.S
index 54c951800ebd..a425ad95959a 100644
--- a/arch/arm/boot/compressed/piggy.S
+++ b/arch/arm/boot/compressed/piggy.lzo.S
@@ -1,6 +1,6 @@
.section .piggydata,#alloc
.globl input_data
input_data:
- .incbin "arch/arm/boot/compressed/piggy.gz"
+ .incbin "arch/arm/boot/compressed/piggy.lzo"
.globl input_data_end
input_data_end:
diff --git a/arch/arm/common/clkdev.c b/arch/arm/common/clkdev.c
index aae5bc01acc8..446b696196e3 100644
--- a/arch/arm/common/clkdev.c
+++ b/arch/arm/common/clkdev.c
@@ -99,6 +99,16 @@ void clkdev_add(struct clk_lookup *cl)
}
EXPORT_SYMBOL(clkdev_add);
+void __init clkdev_add_table(struct clk_lookup *cl, size_t num)
+{
+ mutex_lock(&clocks_mutex);
+ while (num--) {
+ list_add_tail(&cl->node, &clocks);
+ cl++;
+ }
+ mutex_unlock(&clocks_mutex);
+}
+
#define MAX_DEV_ID 20
#define MAX_CON_ID 16
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index cc32c1e54a59..cc0a932bbea9 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -277,7 +277,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
* We don't need to sync the DMA buffer since
* it was allocated via the coherent allocators.
*/
- dma_cache_maint(ptr, size, dir);
+ __dma_single_cpu_to_dev(ptr, size, dir);
}
return dma_addr;
@@ -315,6 +315,8 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
__cpuc_flush_dcache_area(ptr, size);
}
free_safe_buffer(dev->archdata.dmabounce, buf);
+ } else {
+ __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
}
}
diff --git a/arch/arm/configs/raumfeld_defconfig b/arch/arm/configs/raumfeld_defconfig
new file mode 100644
index 000000000000..acb1a8f30e31
--- /dev/null
+++ b/arch/arm/configs/raumfeld_defconfig
@@ -0,0 +1,1898 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.32-rc5
+# Sun Nov 1 21:57:32 2009
+#
+CONFIG_ARM=y
+CONFIG_HAVE_PWM=y
+CONFIG_SYS_SUPPORTS_APM_EMULATION=y
+CONFIG_GENERIC_GPIO=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_LATENCYTOP_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_ARCH_HAS_CPUFREQ=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_ARCH_MTD_XIP=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_VECTORS_BASE=0xffff0000
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+# CONFIG_SYSVIPC is not set
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=17
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
+# CONFIG_RELAY is not set
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+# CONFIG_EMBEDDED is not set
+CONFIG_UID16=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_COMPAT_BRK=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_CLK=y
+
+#
+# GCOV-based kernel profiling
+#
+CONFIG_SLOW_WORK=y
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_BLOCK=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_FREEZER=y
+
+#
+# System Type
+#
+CONFIG_MMU=y
+# CONFIG_ARCH_AAEC2000 is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_AT91 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_GEMINI is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_EP93XX is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_MXC is not set
+# CONFIG_ARCH_STMP3XXX is not set
+# CONFIG_ARCH_NETX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_NOMADIK is not set
+# CONFIG_ARCH_IOP13XX is not set
+# CONFIG_ARCH_IOP32X is not set
+# CONFIG_ARCH_IOP33X is not set
+# CONFIG_ARCH_IXP23XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_KIRKWOOD is not set
+# CONFIG_ARCH_LOKI is not set
+# CONFIG_ARCH_MV78XX0 is not set
+# CONFIG_ARCH_ORION5X is not set
+# CONFIG_ARCH_MMP is not set
+# CONFIG_ARCH_KS8695 is not set
+# CONFIG_ARCH_NS9XXX is not set
+# CONFIG_ARCH_W90X900 is not set
+# CONFIG_ARCH_PNX4008 is not set
+CONFIG_ARCH_PXA=y
+# CONFIG_ARCH_MSM is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_S3C64XX is not set
+# CONFIG_ARCH_S5PC1XX is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_U300 is not set
+# CONFIG_ARCH_DAVINCI is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_BCMRING is not set
+
+#
+# Intel PXA2xx/PXA3xx Implementations
+#
+
+#
+# Supported PXA3xx Processor Variants
+#
+CONFIG_CPU_PXA300=y
+# CONFIG_CPU_PXA310 is not set
+CONFIG_CPU_PXA320=y
+# CONFIG_CPU_PXA930 is not set
+# CONFIG_CPU_PXA935 is not set
+# CONFIG_CPU_PXA950 is not set
+
+#
+# Intel/Marvell Dev Platforms (sorted by hardware release time)
+#
+# CONFIG_ARCH_LUBBOCK is not set
+# CONFIG_MACH_MAINSTONE is not set
+# CONFIG_MACH_ZYLONITE is not set
+# CONFIG_MACH_LITTLETON is not set
+# CONFIG_MACH_TAVOREVB is not set
+# CONFIG_MACH_SAAR is not set
+
+#
+# Third Party Dev Platforms (sorted by vendor name)
+#
+# CONFIG_ARCH_PXA_IDP is not set
+# CONFIG_ARCH_VIPER is not set
+# CONFIG_MACH_BALLOON3 is not set
+# CONFIG_MACH_CSB726 is not set
+# CONFIG_MACH_ARMCORE is not set
+# CONFIG_MACH_EM_X270 is not set
+# CONFIG_MACH_EXEDA is not set
+# CONFIG_MACH_CM_X300 is not set
+# CONFIG_ARCH_GUMSTIX is not set
+# CONFIG_MACH_INTELMOTE2 is not set
+# CONFIG_MACH_STARGATE2 is not set
+# CONFIG_MACH_XCEP is not set
+# CONFIG_TRIZEPS_PXA is not set
+# CONFIG_MACH_LOGICPD_PXA270 is not set
+# CONFIG_MACH_PCM027 is not set
+# CONFIG_MACH_COLIBRI is not set
+# CONFIG_MACH_COLIBRI300 is not set
+# CONFIG_MACH_COLIBRI320 is not set
+
+#
+# End-user Products (sorted by vendor name)
+#
+# CONFIG_MACH_H4700 is not set
+# CONFIG_MACH_H5000 is not set
+# CONFIG_MACH_HIMALAYA is not set
+# CONFIG_MACH_MAGICIAN is not set
+# CONFIG_MACH_MIOA701 is not set
+# CONFIG_PXA_EZX is not set
+# CONFIG_MACH_MP900C is not set
+# CONFIG_ARCH_PXA_PALM is not set
+CONFIG_MACH_RAUMFELD_RC=y
+CONFIG_MACH_RAUMFELD_CONNECTOR=y
+CONFIG_MACH_RAUMFELD_PROTO=y
+CONFIG_MACH_RAUMFELD_SPEAKER=y
+# CONFIG_PXA_SHARPSL is not set
+# CONFIG_ARCH_PXA_ESERIES is not set
+CONFIG_PXA3xx=y
+CONFIG_PXA_SSP=y
+CONFIG_PLAT_PXA=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_XSC3=y
+CONFIG_CPU_32v5=y
+CONFIG_CPU_ABRT_EV5T=y
+CONFIG_CPU_PABRT_LEGACY=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_TLB_V4WBI=y
+CONFIG_CPU_CP15=y
+CONFIG_CPU_CP15_MMU=y
+CONFIG_IO_36=y
+
+#
+# Processor Features
+#
+CONFIG_ARM_THUMB=y
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_BPREDICT_DISABLE is not set
+CONFIG_OUTER_CACHE=y
+CONFIG_CACHE_XSC3L2=y
+CONFIG_ARM_L1_CACHE_SHIFT=5
+CONFIG_IWMMXT=y
+CONFIG_COMMON_CLKDEV=y
+
+#
+# Bus support
+#
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_VMSPLIT_3G=y
+# CONFIG_VMSPLIT_2G is not set
+# CONFIG_VMSPLIT_1G is not set
+CONFIG_PAGE_OFFSET=0xC0000000
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_HZ=100
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set
+# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set
+# CONFIG_HIGHMEM is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ALIGNMENT_TRAP=y
+# CONFIG_UACCESS_WITH_MEMCPY is not set
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0
+CONFIG_ZBOOT_ROM_BSS=0
+CONFIG_CMDLINE="console=ttyS0,115200 rw"
+# CONFIG_XIP_KERNEL is not set
+# CONFIG_KEXEC is not set
+
+#
+# CPU Power Management
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TABLE=y
+# CONFIG_CPU_FREQ_DEBUG is not set
+CONFIG_CPU_FREQ_STAT=y
+# CONFIG_CPU_FREQ_STAT_DETAILS is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_LADDER=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_HAVE_AOUT=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+
+#
+# Power management options
+#
+CONFIG_PM=y
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_APM_EMULATION=y
+# CONFIG_PM_RUNTIME is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+CONFIG_PACKET_MMAP=y
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+# CONFIG_XFRM_USER is not set
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_PRIVACY is not set
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+# CONFIG_INET6_AH is not set
+# CONFIG_INET6_ESP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+# CONFIG_IPV6_TUNNEL is not set
+# CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
+# CONFIG_NET_SCHED is not set
+# CONFIG_DCB is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=y
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+CONFIG_CFG80211_REG_DEBUG=y
+CONFIG_CFG80211_DEFAULT_PS=y
+CONFIG_CFG80211_DEFAULT_PS_VALUE=1
+CONFIG_WIRELESS_OLD_REGULATORY=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WIRELESS_EXT_SYSFS=y
+CONFIG_LIB80211=y
+# CONFIG_LIB80211_DEBUG is not set
+CONFIG_MAC80211=y
+CONFIG_MAC80211_RC_MINSTREL=y
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel"
+# CONFIG_MAC80211_MESH is not set
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+CONFIG_NFTL=y
+CONFIG_NFTL_RW=y
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+CONFIG_MTD_BLOCK2MTD=y
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_NAND=y
+# CONFIG_MTD_NAND_VERIFY_WRITE is not set
+# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
+# CONFIG_MTD_NAND_H1900 is not set
+# CONFIG_MTD_NAND_GPIO is not set
+CONFIG_MTD_NAND_IDS=y
+# CONFIG_MTD_NAND_DISKONCHIP is not set
+# CONFIG_MTD_NAND_SHARPSL is not set
+CONFIG_MTD_NAND_PXA3xx=y
+# CONFIG_MTD_NAND_PXA3xx_BUILTIN is not set
+# CONFIG_MTD_NAND_NANDSIM is not set
+# CONFIG_MTD_NAND_PLATFORM is not set
+# CONFIG_MTD_ALAUDA is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+
+#
+# UBI - Unsorted block images
+#
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_RESERVE=1
+# CONFIG_MTD_UBI_GLUEBI is not set
+
+#
+# UBI debugging options
+#
+# CONFIG_MTD_UBI_DEBUG is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+# CONFIG_BLK_DEV_RAM is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MG_DISK is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_ISL29003=y
+CONFIG_TI_DAC7512=y
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_AT24 is not set
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_LEGACY is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_SMC911X is not set
+CONFIG_SMSC911X=y
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_PRE80211 is not set
+CONFIG_WLAN_80211=y
+CONFIG_LIBERTAS=y
+# CONFIG_LIBERTAS_USB is not set
+CONFIG_LIBERTAS_SDIO=m
+# CONFIG_LIBERTAS_SPI is not set
+# CONFIG_LIBERTAS_DEBUG is not set
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AT76C50X_USB is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_ATH_COMMON is not set
+# CONFIG_HOSTAP is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_WL12XX is not set
+# CONFIG_IWM is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX8817X is not set
+CONFIG_USB_NET_CDCETHER=y
+# CONFIG_USB_NET_CDC_EEM is not set
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_GL620A is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_PLUSB is not set
+CONFIG_USB_NET_MCS7830=y
+# CONFIG_USB_NET_RNDIS_HOST is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+CONFIG_INPUT_POLLDEV=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_QT2160 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_PXA27x is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_ADS7846 is not set
+# CONFIG_TOUCHSCREEN_AD7877 is not set
+# CONFIG_TOUCHSCREEN_AD7879_I2C is not set
+# CONFIG_TOUCHSCREEN_AD7879_SPI is not set
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+CONFIG_TOUCHSCREEN_EETI=m
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MCS5000 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC2007 is not set
+# CONFIG_TOUCHSCREEN_W90X900 is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_ATI_REMOTE is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_UINPUT is not set
+CONFIG_INPUT_GPIO_ROTARY_ENCODER=y
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_SERPORT=y
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_VT_HW_CONSOLE_BINDING is not set
+CONFIG_DEVKMEM=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MAX3100 is not set
+CONFIG_SERIAL_PXA=y
+CONFIG_SERIAL_PXA_CONSOLE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_COMPAT=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_HELPER_AUTO=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+# CONFIG_I2C_DESIGNWARE is not set
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+CONFIG_I2C_PXA=y
+# CONFIG_I2C_PXA_SLAVE is not set
+# CONFIG_I2C_SIMTEC is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+CONFIG_SPI_DEBUG=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_BITBANG=y
+CONFIG_SPI_GPIO=y
+# CONFIG_SPI_PXA2XX is not set
+
+#
+# SPI Protocol Masters
+#
+CONFIG_SPI_SPIDEV=y
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_REQUIRE_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_DEBUG_GPIO=y
+# CONFIG_GPIO_SYSFS is not set
+
+#
+# Memory mapped GPIO expanders:
+#
+
+#
+# I2C GPIO expanders:
+#
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
+
+#
+# PCI GPIO expanders:
+#
+
+#
+# SPI GPIO expanders:
+#
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+CONFIG_W1=m
+
+#
+# 1-wire Bus Masters
+#
+# CONFIG_W1_MASTER_DS2490 is not set
+# CONFIG_W1_MASTER_DS2482 is not set
+# CONFIG_W1_MASTER_DS1WM is not set
+CONFIG_W1_MASTER_GPIO=m
+
+#
+# 1-wire Slaves
+#
+# CONFIG_W1_SLAVE_THERM is not set
+# CONFIG_W1_SLAVE_SMEM is not set
+# CONFIG_W1_SLAVE_DS2431 is not set
+# CONFIG_W1_SLAVE_DS2433 is not set
+CONFIG_W1_SLAVE_DS2760=m
+# CONFIG_W1_SLAVE_BQ27000 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+CONFIG_PDA_POWER=y
+# CONFIG_APM_POWER is not set
+CONFIG_BATTERY_DS2760=m
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_BATTERY_MAX17040 is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+CONFIG_SENSORS_LIS3_SPI=y
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_T7L66XB is not set
+# CONFIG_MFD_TC6387XB is not set
+# CONFIG_MFD_TC6393XB is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_DEBUG=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set
+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
+# CONFIG_REGULATOR_BQ24022 is not set
+# CONFIG_REGULATOR_MAX1586 is not set
+CONFIG_REGULATOR_MAX8660=y
+# CONFIG_REGULATOR_LP3971 is not set
+# CONFIG_REGULATOR_TPS65023 is not set
+# CONFIG_REGULATOR_TPS6507X is not set
+# CONFIG_MEDIA_SUPPORT is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
+# CONFIG_FB_S1D13XXX is not set
+CONFIG_FB_PXA=y
+# CONFIG_FB_PXA_OVERLAY is not set
+# CONFIG_FB_PXA_SMARTPANEL is not set
+# CONFIG_FB_PXA_PARAMETERS is not set
+CONFIG_PXA3XX_GCU=y
+# CONFIG_FB_MBX is not set
+# CONFIG_FB_W100 is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_BACKLIGHT_PWM=y
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
+CONFIG_FONT_8x16=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_LOGO_RAUMFELD_CLUT224=y
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_ARM=y
+CONFIG_SND_PXA2XX_LIB=y
+# CONFIG_SND_PXA2XX_AC97 is not set
+CONFIG_SND_SPI=y
+# CONFIG_SND_USB is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_PXA2XX_SOC=y
+CONFIG_SND_PXA_SOC_SSP=y
+CONFIG_SND_SOC_RAUMFELD=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_AK4104=y
+CONFIG_SND_SOC_CS4270=y
+# CONFIG_SOUND_PRIME is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HIDRAW is not set
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+# CONFIG_USB_HIDDEV is not set
+
+#
+# Special HID drivers
+#
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+# CONFIG_DRAGONRISE_FF is not set
+CONFIG_HID_EZKEY=y
+CONFIG_HID_KYE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+# CONFIG_LOGITECH_FF is not set
+# CONFIG_LOGIRUMBLEPAD2_FF is not set
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+# CONFIG_PANTHERLORD_FF is not set
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+# CONFIG_GREENASIA_FF is not set
+CONFIG_HID_SMARTJOYPLUS=y
+# CONFIG_SMARTJOYPLUS_FF is not set
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+# CONFIG_THRUSTMASTER_FF is not set
+CONFIG_HID_ZEROPLUS=y
+# CONFIG_ZEROPLUS_FF is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+# CONFIG_USB_DEVICEFS is not set
+CONFIG_USB_DEVICE_CLASS=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_OTG is not set
+CONFIG_USB_MON=y
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+CONFIG_USB_STORAGE_FREECOM=y
+CONFIG_USB_STORAGE_ISD200=y
+CONFIG_USB_STORAGE_USBAT=y
+CONFIG_USB_STORAGE_SDDR09=y
+CONFIG_USB_STORAGE_SDDR55=y
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_UNSAFE_RESUME is not set
+
+#
+# MMC/SD/SDIO Card Drivers
+#
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_BOUNCE=y
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+CONFIG_MMC_PXA=m
+# CONFIG_MMC_SDHCI is not set
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_GPIO_PLATFORM=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_PWM is not set
+# CONFIG_LEDS_BD2802 is not set
+CONFIG_LEDS_LT3593=y
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
+
+#
+# SPI RTC drivers
+#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_RTC_DRV_SA1100 is not set
+CONFIG_RTC_DRV_PXA=y
+CONFIG_DMADEVICES=y
+
+#
+# DMA Devices
+#
+# CONFIG_AUXDISPLAY is not set
+CONFIG_UIO=y
+# CONFIG_UIO_PDRV is not set
+# CONFIG_UIO_PDRV_GENIRQ is not set
+# CONFIG_UIO_SMX is not set
+# CONFIG_UIO_SERCOS3 is not set
+
+#
+# TI VLYNQ
+#
+# CONFIG_STAGING is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT2_FS_XIP=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4_FS is not set
+CONFIG_FS_XIP=y
+CONFIG_JBD=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# Caches
+#
+CONFIG_FSCACHE=y
+CONFIG_FSCACHE_STATS=y
+# CONFIG_FSCACHE_HISTOGRAM is not set
+# CONFIG_FSCACHE_DEBUG is not set
+CONFIG_CACHEFILES=y
+# CONFIG_CACHEFILES_DEBUG is not set
+# CONFIG_CACHEFILES_HISTOGRAM is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_UBIFS_FS=y
+# CONFIG_UBIFS_FS_XATTR is not set
+# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set
+CONFIG_UBIFS_FS_LZO=y
+CONFIG_UBIFS_FS_ZLIB=y
+# CONFIG_UBIFS_FS_DEBUG is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_SQUASHFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFS_FSCACHE=y
+# CONFIG_NFSD is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_KMEMLEAK is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
+# CONFIG_PAGE_POISONING is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_BOOT_TRACER is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_KMEMTRACE is not set
+# CONFIG_WORKQUEUE_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARM_UNWIND=y
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_ERRORS=y
+# CONFIG_DEBUG_STACK_USAGE is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+# CONFIG_CRYPTO_CBC is not set
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_GHASH is not set
+# CONFIG_CRYPTO_MD4 is not set
+# CONFIG_CRYPTO_MD5 is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_DES is not set
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+CONFIG_CRYPTO_LZO=y
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 610ac3c47b0f..9155196e623b 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32-rc5
-# Sat Oct 17 23:32:24 2009
+# Linux kernel version: 2.6.33-rc2
+# Wed Jan 6 00:01:36 2010
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
@@ -46,6 +46,7 @@ CONFIG_SYSVIPC_SYSCTL=y
#
CONFIG_TREE_RCU=y
# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
@@ -119,14 +120,41 @@ CONFIG_BLOCK=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
CONFIG_IOSCHED_DEADLINE=y
# CONFIG_IOSCHED_CFQ is not set
-# CONFIG_DEFAULT_AS is not set
CONFIG_DEFAULT_DEADLINE=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
# CONFIG_FREEZER is not set
#
@@ -155,6 +183,7 @@ CONFIG_MMU=y
# CONFIG_ARCH_IXP2000 is not set
# CONFIG_ARCH_IXP4XX is not set
# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_DOVE is not set
# CONFIG_ARCH_KIRKWOOD is not set
# CONFIG_ARCH_LOKI is not set
# CONFIG_ARCH_MV78XX0 is not set
@@ -177,6 +206,7 @@ CONFIG_ARCH_U300=y
# CONFIG_ARCH_DAVINCI is not set
# CONFIG_ARCH_OMAP is not set
# CONFIG_ARCH_BCMRING is not set
+# CONFIG_ARCH_U8500 is not set
#
# ST-Ericsson AB U300/U330/U335/U365 Platform
@@ -265,12 +295,10 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
-CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_SPLIT_PTLOCK_CPUS=999999
# CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=0
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_ALIGNMENT_TRAP=y
@@ -499,14 +527,21 @@ CONFIG_MTD_NAND_IDS=y
CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_RAM is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_ISL29003 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
# CONFIG_C2PORT is not set
#
@@ -517,6 +552,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_EEPROM_LEGACY is not set
# CONFIG_EEPROM_MAX6875 is not set
# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_IWMC3200TOP is not set
CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
@@ -539,6 +575,7 @@ CONFIG_HAVE_IDE=y
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
@@ -645,7 +682,6 @@ CONFIG_I2C_STU300=y
#
# Miscellaneous I2C Chip support
#
-# CONFIG_DS1682 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -661,6 +697,8 @@ CONFIG_SPI_MASTER=y
# CONFIG_SPI_BITBANG is not set
# CONFIG_SPI_GPIO is not set
CONFIG_SPI_PL022=y
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
#
# SPI Protocol Masters
@@ -708,6 +746,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_T7L66XB is not set
# CONFIG_MFD_TC6387XB is not set
# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
@@ -716,6 +755,8 @@ CONFIG_SSB_POSSIBLE=y
CONFIG_AB3100_CORE=y
CONFIG_AB3100_OTP=y
# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_88PM8607 is not set
+# CONFIG_AB4500_CORE is not set
CONFIG_REGULATOR=y
# CONFIG_REGULATOR_DEBUG is not set
# CONFIG_REGULATOR_FIXED_VOLTAGE is not set
@@ -723,6 +764,7 @@ CONFIG_REGULATOR=y
# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set
# CONFIG_REGULATOR_BQ24022 is not set
# CONFIG_REGULATOR_MAX1586 is not set
+# CONFIG_REGULATOR_MAX8660 is not set
# CONFIG_REGULATOR_LP3971 is not set
CONFIG_REGULATOR_AB3100=y
# CONFIG_REGULATOR_TPS65023 is not set
@@ -840,7 +882,9 @@ CONFIG_LEDS_CLASS=y
# CONFIG_LEDS_LP3944 is not set
# CONFIG_LEDS_PCA955X is not set
# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_REGULATOR is not set
# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
#
# LED Triggers
@@ -882,6 +926,7 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_PCF8563 is not set
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
@@ -911,7 +956,9 @@ CONFIG_RTC_INTF_DEV=y
# CONFIG_RTC_DRV_M48T86 is not set
# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
# CONFIG_RTC_DRV_V3020 is not set
CONFIG_RTC_DRV_AB3100=y
@@ -926,6 +973,15 @@ CONFIG_DMADEVICES=y
#
# DMA Devices
#
+CONFIG_COH901318=y
+CONFIG_DMA_ENGINE=y
+
+#
+# DMA Clients
+#
+# CONFIG_NET_DMA is not set
+# CONFIG_ASYNC_TX_DMA is not set
+# CONFIG_DMATEST is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
@@ -1018,7 +1074,7 @@ CONFIG_MISC_FILESYSTEMS=y
CONFIG_MSDOS_PARTITION=y
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
-# CONFIG_NLS_CODEPAGE_437 is not set
+CONFIG_NLS_CODEPAGE_437=y
# CONFIG_NLS_CODEPAGE_737 is not set
# CONFIG_NLS_CODEPAGE_775 is not set
# CONFIG_NLS_CODEPAGE_850 is not set
@@ -1135,6 +1191,7 @@ CONFIG_ARM_UNWIND=y
# CONFIG_DEBUG_ERRORS is not set
# CONFIG_DEBUG_STACK_USAGE is not set
# CONFIG_DEBUG_LL is not set
+# CONFIG_OC_ETM is not set
#
# Security options
@@ -1142,7 +1199,11 @@ CONFIG_ARM_UNWIND=y
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
# CONFIG_BINARY_PRINTF is not set
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 730aefcfbee3..be8b4d79cf41 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -182,21 +182,6 @@
* DMA Cache Coherency
* ===================
*
- * dma_inv_range(start, end)
- *
- * Invalidate (discard) the specified virtual address range.
- * May not write back any entries. If 'start' or 'end'
- * are not cache line aligned, those lines must be written
- * back.
- * - start - virtual start address
- * - end - virtual end address
- *
- * dma_clean_range(start, end)
- *
- * Clean (write back) the specified virtual address range.
- * - start - virtual start address
- * - end - virtual end address
- *
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -213,8 +198,9 @@ struct cpu_cache_fns {
void (*coherent_user_range)(unsigned long, unsigned long);
void (*flush_kern_dcache_area)(void *, size_t);
- void (*dma_inv_range)(const void *, const void *);
- void (*dma_clean_range)(const void *, const void *);
+ void (*dma_map_area)(const void *, size_t, int);
+ void (*dma_unmap_area)(const void *, size_t, int);
+
void (*dma_flush_range)(const void *, const void *);
};
@@ -244,8 +230,8 @@ extern struct cpu_cache_fns cpu_cache;
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-#define dmac_inv_range cpu_cache.dma_inv_range
-#define dmac_clean_range cpu_cache.dma_clean_range
+#define dmac_map_area cpu_cache.dma_map_area
+#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -270,12 +256,12 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
-#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
+#define dmac_map_area __glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
-extern void dmac_inv_range(const void *, const void *);
-extern void dmac_clean_range(const void *, const void *);
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
#endif
@@ -316,12 +302,8 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
* processes address space. Really, we want to allow our "user
* space" model to handle this.
*/
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- memcpy(dst, src, len); \
- flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
- } while (0)
-
+extern void copy_to_user_page(struct vm_area_struct *, struct page *,
+ unsigned long, void *, const void *, unsigned long);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
@@ -355,17 +337,6 @@ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
}
}
-static inline void
-vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- unsigned long uaddr, void *kaddr,
- unsigned long len, int write)
-{
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
- unsigned long addr = (unsigned long)kaddr;
- __cpuc_coherent_kern_range(addr, addr + len);
- }
-}
-
#ifndef CONFIG_CPU_CACHE_VIPT
#define flush_cache_mm(mm) \
vivt_flush_cache_mm(mm)
@@ -373,15 +344,10 @@ vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
vivt_flush_cache_range(vma,start,end)
#define flush_cache_page(vma,addr,pfn) \
vivt_flush_cache_page(vma,addr,pfn)
-#define flush_ptrace_access(vma,page,ua,ka,len,write) \
- vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
#else
extern void flush_cache_mm(struct mm_struct *mm);
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
-extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- unsigned long uaddr, void *kaddr,
- unsigned long len, int write);
#endif
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index b6ec7c627b39..7a0690da5e63 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -27,4 +27,7 @@ struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
+void clkdev_add_table(struct clk_lookup *, size_t);
+int clk_add_alias(const char *, const char *, char *, struct device *);
+
#endif
diff --git a/arch/arm/include/asm/cpu.h b/arch/arm/include/asm/cpu.h
index 634b2d7c612a..793968173bef 100644
--- a/arch/arm/include/asm/cpu.h
+++ b/arch/arm/include/asm/cpu.h
@@ -11,6 +11,7 @@
#define __ASM_ARM_CPU_H
#include <linux/percpu.h>
+#include <linux/cpu.h>
struct cpuinfo_arm {
struct cpu cpu;
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index a96300bf83fd..256ee1c9f51a 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -57,18 +57,58 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
#endif
/*
- * DMA-consistent mapping functions. These allocate/free a region of
- * uncached, unwrite-buffered mapped memory space for use with DMA
- * devices. This is the "generic" version. The PCI specific version
- * is in pci.h
+ * The DMA API is built upon the notion of "buffer ownership". A buffer
+ * is either exclusively owned by the CPU (and therefore may be accessed
+ * by it) or exclusively owned by the DMA device. These helper functions
+ * represent the transitions between these two ownership states.
*
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ * Note, however, that on later ARMs, this notion does not work due to
+ * speculative prefetches. We model our approach on the assumption that
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+ * Private support functions: these are not part of the API and are
+ * liable to change. Drivers must not use these.
*/
-extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
-extern void dma_cache_maint_page(struct page *page, unsigned long offset,
- size_t size, int rw);
+static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ extern void ___dma_single_cpu_to_dev(const void *, size_t,
+ enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_single_cpu_to_dev(kaddr, size, dir);
+}
+
+static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
+ enum dma_data_direction dir)
+{
+ extern void ___dma_single_dev_to_cpu(const void *, size_t,
+ enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_single_dev_to_cpu(kaddr, size, dir);
+}
+
+static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_page_cpu_to_dev(page, off, size, dir);
+}
+
+static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+
+ if (!arch_is_coherent())
+ ___dma_page_dev_to_cpu(page, off, size, dir);
+}
/*
* Return whether the given device DMA address mask can be supported
@@ -304,8 +344,7 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
{
BUG_ON(!valid_dma_direction(dir));
- if (!arch_is_coherent())
- dma_cache_maint(cpu_addr, size, dir);
+ __dma_single_cpu_to_dev(cpu_addr, size, dir);
return virt_to_dma(dev, cpu_addr);
}
@@ -329,8 +368,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
{
BUG_ON(!valid_dma_direction(dir));
- if (!arch_is_coherent())
- dma_cache_maint_page(page, offset, size, dir);
+ __dma_page_cpu_to_dev(page, offset, size, dir);
return page_to_dma(dev, page) + offset;
}
@@ -352,7 +390,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- /* nothing to do */
+ __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
}
/**
@@ -372,7 +410,8 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
- /* nothing to do */
+ __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
+ size, dir);
}
#endif /* CONFIG_DMABOUNCE */
@@ -400,7 +439,10 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
{
BUG_ON(!valid_dma_direction(dir));
- dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
+ if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
+ return;
+
+ __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
}
static inline void dma_sync_single_range_for_device(struct device *dev,
@@ -412,8 +454,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
return;
- if (!arch_is_coherent())
- dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
+ __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
}
static inline void dma_sync_single_for_cpu(struct device *dev,
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index 7edf3536df24..ca51143f97f1 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -138,12 +138,12 @@ extern int get_dma_residue(unsigned int chan);
#define NO_DMA 255
#endif
+#endif /* CONFIG_ISA_DMA_API */
+
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
-#endif /* CONFIG_ISA_DMA_API */
-
#endif /* __ASM_ARM_DMA_H */
diff --git a/arch/arm/include/asm/entry-macro-vic2.S b/arch/arm/include/asm/entry-macro-vic2.S
new file mode 100644
index 000000000000..3ceb85e43850
--- /dev/null
+++ b/arch/arm/include/asm/entry-macro-vic2.S
@@ -0,0 +1,57 @@
+/* arch/arm/include/asm/entry-macro-vic2.S
+ *
+ * Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * http://armlinux.simtec.co.uk/
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Low-level IRQ helper macros for a device with two VICs
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+*/
+
+/* This should be included from <mach/entry-macro.S> with the necessary
+ * defines for virtual addresses and IRQ bases for the two vics.
+ *
+ * The code needs the following defined:
+ * IRQ_VIC0_BASE IRQ number of VIC0's first IRQ
+ * IRQ_VIC1_BASE IRQ number of VIC1's first IRQ
+ * VA_VIC0 Virtual address of VIC0
+ * VA_VIC1 Virtual address of VIC1
+ *
+ * Note, code assumes VIC0's virtual address is an ARM immediate constant
+ * away from VIC1.
+*/
+
+#include <asm/hardware/vic.h>
+
+ .macro disable_fiq
+ .endm
+
+ .macro get_irqnr_preamble, base, tmp
+ ldr \base, =VA_VIC0
+ .endm
+
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+
+ .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
+
+ @ check the vic0
+ mov \irqnr, #IRQ_VIC0_BASE + 31
+ ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
+ teq \irqstat, #0
+
+ @ otherwise try vic1
+ addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
+ addeq \irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE)
+ ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
+ teqeq \irqstat, #0
+
+ clzne \irqstat, \irqstat
+ subne \irqnr, \irqnr, \irqstat
+ .endm
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index d2a59cfc30ce..3082d5b70e3b 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -69,9 +69,16 @@ extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
/*
* __arm_ioremap takes CPU physical address.
* __arm_ioremap_pfn takes a Page Frame Number and an offset into that page
+ * The _caller variety takes a __builtin_return_address(0) value for
+ * /proc/vmalloc to use - and should only be used in non-inline functions.
*/
-extern void __iomem * __arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem * __arm_ioremap(unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
+ size_t, unsigned int, void *);
+extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int,
+ void *);
+
+extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
+extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int);
extern void __iounmap(volatile void __iomem *addr);
/*
@@ -240,10 +247,14 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define ioread8(p) ({ unsigned int __v = __raw_readb(p); __v; })
#define ioread16(p) ({ unsigned int __v = le16_to_cpu((__force __le16)__raw_readw(p)); __v; })
#define ioread32(p) ({ unsigned int __v = le32_to_cpu((__force __le32)__raw_readl(p)); __v; })
+#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __v; })
+#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __v; })
#define iowrite8(v,p) __raw_writeb(v, p)
#define iowrite16(v,p) __raw_writew((__force __u16)cpu_to_le16(v), p)
#define iowrite32(v,p) __raw_writel((__force __u32)cpu_to_le32(v), p)
+#define iowrite16be(v,p) __raw_writew((__force __u16)cpu_to_be16(v), p)
+#define iowrite32be(v,p) __raw_writel((__force __u32)cpu_to_be32(v), p)
#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 67af4b841984..17c69ae4c309 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -11,6 +11,7 @@
#define __ARM_KGDB_H__
#include <linux/ptrace.h>
+#include <linux/notifier.h>
/*
* GDB assumes that we're a user process being debugged, so
@@ -95,6 +96,16 @@ extern int kgdb_fault_expected;
#define _PC 15
#define _CPSR (GDB_MAX_REGS - 1)
+#ifdef CONFIG_KGDB
+int kgdb_die_hook(int cmd, const char *str, struct pt_regs *regs, int err);
+#else
+static inline int kgdb_die_hook(int cmd, const char *str,
+ struct pt_regs *regs, int err)
+{
+ return NOTIFY_DONE;
+}
+#endif
+
/*
* So that we can denote the end of a frame for tracing,
* in the simple case:
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index c019949a5189..3a9fb574fd8d 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -19,6 +19,7 @@ enum km_type {
KM_SOFTIRQ0,
KM_SOFTIRQ1,
KM_L2_CACHE,
+ KM_KDB,
KM_TYPE_NR
};
diff --git a/arch/arm/include/asm/mach/time.h b/arch/arm/include/asm/mach/time.h
index b2cc1fcd0400..8bffc3ff3acf 100644
--- a/arch/arm/include/asm/mach/time.h
+++ b/arch/arm/include/asm/mach/time.h
@@ -46,12 +46,4 @@ struct sys_timer {
extern struct sys_timer *system_timer;
extern void timer_tick(void);
-/*
- * Kernel time keeping support.
- */
-struct timespec;
-extern int (*set_rtc)(void);
-extern void save_time_delta(struct timespec *delta, struct timespec *rtc);
-extern void restore_time_delta(struct timespec *delta, struct timespec *rtc);
-
#endif
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 3a32af4cce30..a485ac3c8696 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -117,11 +117,12 @@
#endif
struct page;
+struct vm_area_struct;
struct cpu_user_fns {
void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
- unsigned long vaddr);
+ unsigned long vaddr, struct vm_area_struct *vma);
};
#ifdef MULTI_USER
@@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user;
extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr);
+ unsigned long vaddr, struct vm_area_struct *vma);
#endif
#define clear_user_highpage(page,vaddr) \
@@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
#define copy_user_highpage(to,from,vaddr,vma) \
- __cpu_copy_user_highpage(to, from, vaddr)
+ __cpu_copy_user_highpage(to, from, vaddr, vma)
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index bbecccda76d0..eec6e897ceb2 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -97,9 +97,15 @@
* stack during a system call. Note that sizeof(struct pt_regs)
* has to be a multiple of 8.
*/
+#ifndef __KERNEL__
struct pt_regs {
long uregs[18];
};
+#else /* __KERNEL__ */
+struct pt_regs {
+ unsigned long uregs[18];
+};
+#endif /* __KERNEL__ */
#define ARM_cpsr uregs[16]
#define ARM_pc uregs[15]
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index 5ccce0a9b03c..f392fb4437af 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -223,18 +223,6 @@ extern struct meminfo meminfo;
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
-/*
- * Early command line parameters.
- */
-struct early_params {
- const char *arg;
- void (*fn)(char **p);
-};
-
-#define __early_param(name,fn) \
-static struct early_params __early_##fn __used \
-__attribute__((__section__(".early_param.init"))) = { name, fn }
-
#endif /* __KERNEL__ */
#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 59303e200845..e6215305544a 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -13,4 +13,9 @@ static inline int tlb_ops_need_broadcast(void)
return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2;
}
+static inline int cache_ops_need_broadcast(void)
+{
+ return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 1;
+}
+
#endif
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 4e506d09e5f9..cf9cdaa2d4d4 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -391,6 +391,7 @@
#define __NR_pwritev (__NR_SYSCALL_BASE+362)
#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE+363)
#define __NR_perf_event_open (__NR_SYSCALL_BASE+364)
+#define __NR_recvmmsg (__NR_SYSCALL_BASE+365)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index dd00f747e2ad..eb0284cd8661 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,6 +17,7 @@ obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \
process.o ptrace.o return_address.o setup.o signal.o \
sys_arm.o stacktrace.o time.o traps.o
+obj-$(CONFIG_LEDS) += leds.o
obj-$(CONFIG_OC_ETM) += etm.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 4a881258bb17..883511522fca 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -12,6 +12,7 @@
*/
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/dma-mapping.h>
#include <asm/mach/arch.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
@@ -112,5 +113,9 @@ int main(void)
#ifdef MULTI_PABORT
DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort));
#endif
+ BLANK();
+ DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
+ DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
+ DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
return 0;
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index d2903e3bc861..6c5cf369183b 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -957,9 +957,7 @@ kuser_cmpxchg_fixup:
#else
-#ifdef CONFIG_SMP
- mcr p15, 0, r0, c7, c10, 5 @ dmb
-#endif
+ smp_dmb
1: ldrex r3, [r2]
subs r3, r3, r0
strexeq r3, r1, [r2]
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index ba8ccfede964..3c0e5e533e95 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -10,6 +10,7 @@
* Deepak Saxena <dsaxena@plexity.net>
*/
#include <linux/kgdb.h>
+#include <linux/notifier.h>
#include <asm/traps.h>
/* Make a local copy of the registers passed into the handler (bletch) */
@@ -97,6 +98,11 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
gdb_regs[_CPSR] = thread_regs->ARM_cpsr;
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->ARM_pc = pc;
+}
+
static int compiled_break;
int kgdb_arch_handle_exception(int exception_vector, int signo,
@@ -184,6 +190,13 @@ void kgdb_arch_exit(void)
unregister_undef_hook(&kgdb_compiled_brkpt_hook);
}
+int kgdb_die_hook(int cmd, const char *str, struct pt_regs *regs, int err)
+{
+ if (kgdb_handle_exception(1, err, cmd, regs))
+ return NOTIFY_DONE;
+ return NOTIFY_STOP;
+}
+
/*
* Register our undef instruction hooks with ARM undef core.
* We regsiter a hook specifically looking for the KGB break inst
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c
new file mode 100644
index 000000000000..31a316c1777b
--- /dev/null
+++ b/arch/arm/kernel/leds.c
@@ -0,0 +1,115 @@
+/*
+ * LED support code, ripped out of arch/arm/kernel/time.c
+ *
+ * Copyright (C) 1994-2001 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sysdev.h>
+
+#include <asm/leds.h>
+
+static void dummy_leds_event(led_event_t evt)
+{
+}
+
+void (*leds_event)(led_event_t) = dummy_leds_event;
+
+struct leds_evt_name {
+ const char name[8];
+ int on;
+ int off;
+};
+
+static const struct leds_evt_name evt_names[] = {
+ { "amber", led_amber_on, led_amber_off },
+ { "blue", led_blue_on, led_blue_off },
+ { "green", led_green_on, led_green_off },
+ { "red", led_red_on, led_red_off },
+};
+
+static ssize_t leds_store(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = -EINVAL, len = strcspn(buf, " ");
+
+ if (len > 0 && buf[len] == '\0')
+ len--;
+
+ if (strncmp(buf, "claim", len) == 0) {
+ leds_event(led_claim);
+ ret = size;
+ } else if (strncmp(buf, "release", len) == 0) {
+ leds_event(led_release);
+ ret = size;
+ } else {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(evt_names); i++) {
+ if (strlen(evt_names[i].name) != len ||
+ strncmp(buf, evt_names[i].name, len) != 0)
+ continue;
+ if (strncmp(buf+len, " on", 3) == 0) {
+ leds_event(evt_names[i].on);
+ ret = size;
+ } else if (strncmp(buf+len, " off", 4) == 0) {
+ leds_event(evt_names[i].off);
+ ret = size;
+ }
+ break;
+ }
+ }
+ return ret;
+}
+
+static SYSDEV_ATTR(event, 0200, NULL, leds_store);
+
+static int leds_suspend(struct sys_device *dev, pm_message_t state)
+{
+ leds_event(led_stop);
+ return 0;
+}
+
+static int leds_resume(struct sys_device *dev)
+{
+ leds_event(led_start);
+ return 0;
+}
+
+static int leds_shutdown(struct sys_device *dev)
+{
+ leds_event(led_halted);
+ return 0;
+}
+
+static struct sysdev_class leds_sysclass = {
+ .name = "leds",
+ .shutdown = leds_shutdown,
+ .suspend = leds_suspend,
+ .resume = leds_resume,
+};
+
+static struct sys_device leds_device = {
+ .id = 0,
+ .cls = &leds_sysclass,
+};
+
+static int __init leds_init(void)
+{
+ int ret;
+ ret = sysdev_class_register(&leds_sysclass);
+ if (ret == 0)
+ ret = sysdev_register(&leds_device);
+ if (ret == 0)
+ ret = sysdev_create_file(&leds_device, &attr_event);
+ return ret;
+}
+
+device_initcall(leds_init);
+
+EXPORT_SYMBOL(leds_event);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 67304138a2ca..ba2adefa53f7 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -212,7 +212,8 @@ void __show_regs(struct pt_regs *regs)
char buf[64];
printk("CPU: %d %s (%s %.*s)\n",
- smp_processor_id(), print_tainted(), init_utsname()->release,
+ raw_smp_processor_id(), print_tainted(),
+ init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
print_symbol("PC is at %s\n", instruction_pointer(regs));
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c6c57b640b6b..b01a56a03ed8 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -24,6 +24,7 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/fs.h>
+#include <linux/proc_fs.h>
#include <asm/unified.h>
#include <asm/cpu.h>
@@ -417,10 +418,11 @@ static int __init arm_add_memory(unsigned long start, unsigned long size)
* Pick out the memory size. We look for mem=size@start,
* where start and size are "size[KkMm]"
*/
-static void __init early_mem(char **p)
+static int __init early_mem(char *p)
{
static int usermem __initdata = 0;
unsigned long size, start;
+ char *endp;
/*
* If the user specifies memory size, we
@@ -433,52 +435,15 @@ static void __init early_mem(char **p)
}
start = PHYS_OFFSET;
- size = memparse(*p, p);
- if (**p == '@')
- start = memparse(*p + 1, p);
+ size = memparse(p, &endp);
+ if (*endp == '@')
+ start = memparse(endp + 1, NULL);
arm_add_memory(start, size);
-}
-__early_param("mem=", early_mem);
-/*
- * Initial parsing of the command line.
- */
-static void __init parse_cmdline(char **cmdline_p, char *from)
-{
- char c = ' ', *to = command_line;
- int len = 0;
-
- for (;;) {
- if (c == ' ') {
- extern struct early_params __early_begin, __early_end;
- struct early_params *p;
-
- for (p = &__early_begin; p < &__early_end; p++) {
- int arglen = strlen(p->arg);
-
- if (memcmp(from, p->arg, arglen) == 0) {
- if (to != command_line)
- to -= 1;
- from += arglen;
- p->fn(&from);
-
- while (*from != ' ' && *from != '\0')
- from++;
- break;
- }
- }
- }
- c = *from++;
- if (!c)
- break;
- if (COMMAND_LINE_SIZE <= ++len)
- break;
- *to++ = c;
- }
- *to = '\0';
- *cmdline_p = command_line;
+ return 0;
}
+early_param("mem", early_mem);
static void __init
setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
@@ -739,9 +704,15 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = (unsigned long) _end;
- memcpy(boot_command_line, from, COMMAND_LINE_SIZE);
- boot_command_line[COMMAND_LINE_SIZE-1] = '\0';
- parse_cmdline(cmdline_p, from);
+ /* parse_early_param needs a boot_command_line */
+ strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
+
+ /* populate command_line too for later use, preserving boot_command_line */
+ strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ parse_early_param();
+
paging_init(mdesc);
request_standard_resources(&meminfo, mdesc);
@@ -782,9 +753,21 @@ static int __init topology_init(void)
return 0;
}
-
subsys_initcall(topology_init);
+#ifdef CONFIG_HAVE_PROC_CPU
+static int __init proc_cpu_init(void)
+{
+ struct proc_dir_entry *res;
+
+ res = proc_mkdir("cpu", NULL);
+ if (!res)
+ return -ENOMEM;
+ return 0;
+}
+fs_initcall(proc_cpu_init);
+#endif
+
static const char *hwcap_str[] = {
"swp",
"half",
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index d38cdf2c8276..28753805d2d1 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -10,11 +10,6 @@
*
* This file contains the ARM-specific time handling details:
* reading the RTC at bootup, etc...
- *
- * 1994-07-02 Alan Modra
- * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
- * 1998-12-20 Updated NTP code according to technical memorandum Jan '96
- * "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/module.h>
#include <linux/kernel.h>
@@ -77,11 +72,6 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-/*
- * hook for setting the RTC's idea of the current time.
- */
-int (*set_rtc)(void);
-
#ifndef CONFIG_GENERIC_TIME
static unsigned long dummy_gettimeoffset(void)
{
@@ -89,140 +79,6 @@ static unsigned long dummy_gettimeoffset(void)
}
#endif
-static unsigned long next_rtc_update;
-
-/*
- * If we have an externally synchronized linux clock, then update
- * CMOS clock accordingly every ~11 minutes. set_rtc() has to be
- * called as close as possible to 500 ms before the new second
- * starts.
- */
-static inline void do_set_rtc(void)
-{
- if (!ntp_synced() || set_rtc == NULL)
- return;
-
- if (next_rtc_update &&
- time_before((unsigned long)xtime.tv_sec, next_rtc_update))
- return;
-
- if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) &&
- xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1))
- return;
-
- if (set_rtc())
- /*
- * rtc update failed. Try again in 60s
- */
- next_rtc_update = xtime.tv_sec + 60;
- else
- next_rtc_update = xtime.tv_sec + 660;
-}
-
-#ifdef CONFIG_LEDS
-
-static void dummy_leds_event(led_event_t evt)
-{
-}
-
-void (*leds_event)(led_event_t) = dummy_leds_event;
-
-struct leds_evt_name {
- const char name[8];
- int on;
- int off;
-};
-
-static const struct leds_evt_name evt_names[] = {
- { "amber", led_amber_on, led_amber_off },
- { "blue", led_blue_on, led_blue_off },
- { "green", led_green_on, led_green_off },
- { "red", led_red_on, led_red_off },
-};
-
-static ssize_t leds_store(struct sys_device *dev,
- struct sysdev_attribute *attr,
- const char *buf, size_t size)
-{
- int ret = -EINVAL, len = strcspn(buf, " ");
-
- if (len > 0 && buf[len] == '\0')
- len--;
-
- if (strncmp(buf, "claim", len) == 0) {
- leds_event(led_claim);
- ret = size;
- } else if (strncmp(buf, "release", len) == 0) {
- leds_event(led_release);
- ret = size;
- } else {
- int i;
-
- for (i = 0; i < ARRAY_SIZE(evt_names); i++) {
- if (strlen(evt_names[i].name) != len ||
- strncmp(buf, evt_names[i].name, len) != 0)
- continue;
- if (strncmp(buf+len, " on", 3) == 0) {
- leds_event(evt_names[i].on);
- ret = size;
- } else if (strncmp(buf+len, " off", 4) == 0) {
- leds_event(evt_names[i].off);
- ret = size;
- }
- break;
- }
- }
- return ret;
-}
-
-static SYSDEV_ATTR(event, 0200, NULL, leds_store);
-
-static int leds_suspend(struct sys_device *dev, pm_message_t state)
-{
- leds_event(led_stop);
- return 0;
-}
-
-static int leds_resume(struct sys_device *dev)
-{
- leds_event(led_start);
- return 0;
-}
-
-static int leds_shutdown(struct sys_device *dev)
-{
- leds_event(led_halted);
- return 0;
-}
-
-static struct sysdev_class leds_sysclass = {
- .name = "leds",
- .shutdown = leds_shutdown,
- .suspend = leds_suspend,
- .resume = leds_resume,
-};
-
-static struct sys_device leds_device = {
- .id = 0,
- .cls = &leds_sysclass,
-};
-
-static int __init leds_init(void)
-{
- int ret;
- ret = sysdev_class_register(&leds_sysclass);
- if (ret == 0)
- ret = sysdev_register(&leds_device);
- if (ret == 0)
- ret = sysdev_create_file(&leds_device, &attr_event);
- return ret;
-}
-
-device_initcall(leds_init);
-
-EXPORT_SYMBOL(leds_event);
-#endif
-
#ifdef CONFIG_LEDS_TIMER
static inline void do_leds(void)
{
@@ -295,39 +151,6 @@ int do_settimeofday(struct timespec *tv)
EXPORT_SYMBOL(do_settimeofday);
#endif /* !CONFIG_GENERIC_TIME */
-/**
- * save_time_delta - Save the offset between system time and RTC time
- * @delta: pointer to timespec to store delta
- * @rtc: pointer to timespec for current RTC time
- *
- * Return a delta between the system time and the RTC time, such
- * that system time can be restored later with restore_time_delta()
- */
-void save_time_delta(struct timespec *delta, struct timespec *rtc)
-{
- set_normalized_timespec(delta,
- xtime.tv_sec - rtc->tv_sec,
- xtime.tv_nsec - rtc->tv_nsec);
-}
-EXPORT_SYMBOL(save_time_delta);
-
-/**
- * restore_time_delta - Restore the current system time
- * @delta: delta returned by save_time_delta()
- * @rtc: pointer to timespec for current RTC time
- */
-void restore_time_delta(struct timespec *delta, struct timespec *rtc)
-{
- struct timespec ts;
-
- set_normalized_timespec(&ts,
- delta->tv_sec + rtc->tv_sec,
- delta->tv_nsec + rtc->tv_nsec);
-
- do_settimeofday(&ts);
-}
-EXPORT_SYMBOL(restore_time_delta);
-
#ifndef CONFIG_GENERIC_CLOCKEVENTS
/*
* Kernel system timer support.
@@ -336,7 +159,6 @@ void timer_tick(void)
{
profile_tick(CPU_PROFILING);
do_leds();
- do_set_rtc();
write_seqlock(&xtime_lock);
do_timer(1);
write_sequnlock(&xtime_lock);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 3f361a783f43..707e82418885 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -21,6 +21,9 @@
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/uaccess.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/notifier.h>
#include <asm/atomic.h>
#include <asm/cacheflush.h>
@@ -254,6 +257,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
{
struct thread_info *thread = current_thread_info();
+ kgdb_die_hook(DIE_OOPS, str, regs, err);
+
oops_enter();
spin_lock_irq(&die_lock);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 4957e13ef55b..b16c07914b55 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -43,10 +43,6 @@ SECTIONS
INIT_SETUP(16)
- __early_begin = .;
- *(.early_param.init)
- __early_end = .;
-
INIT_CALLS
CON_INITCALL
SECURITY_INITCALL
diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c
index e590bbe0a7b4..72e405df0fb0 100644
--- a/arch/arm/mach-bcmring/core.c
+++ b/arch/arm/mach-bcmring/core.c
@@ -142,8 +142,7 @@ void __init bcmring_amba_init(void)
chipcHw_busInterfaceClockEnable(bus_clock);
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig
index 033bfede6b67..0ebe185610bf 100644
--- a/arch/arm/mach-davinci/Kconfig
+++ b/arch/arm/mach-davinci/Kconfig
@@ -91,10 +91,14 @@ config MACH_DAVINCI_DM6467_EVM
bool "TI DM6467 EVM"
default ARCH_DAVINCI_DM646x
depends on ARCH_DAVINCI_DM646x
+ select MACH_DAVINCI_DM6467TEVM
help
Configure this option to specify the whether the board used
for development is a DM6467 EVM
+config MACH_DAVINCI_DM6467TEVM
+ bool
+
config MACH_DAVINCI_DM365_EVM
bool "TI DM365 EVM"
default ARCH_DAVINCI_DM365
diff --git a/arch/arm/mach-davinci/Makefile b/arch/arm/mach-davinci/Makefile
index eeb9230d8844..6aac880eb794 100644
--- a/arch/arm/mach-davinci/Makefile
+++ b/arch/arm/mach-davinci/Makefile
@@ -26,7 +26,7 @@ obj-$(CONFIG_MACH_SFFSDR) += board-sffsdr.o
obj-$(CONFIG_MACH_NEUROS_OSD2) += board-neuros-osd2.o
obj-$(CONFIG_MACH_DAVINCI_DM355_EVM) += board-dm355-evm.o
obj-$(CONFIG_MACH_DM355_LEOPARD) += board-dm355-leopard.o
-obj-$(CONFIG_MACH_DAVINCI_DM6467_EVM) += board-dm646x-evm.o
+obj-$(CONFIG_MACH_DAVINCI_DM6467_EVM) += board-dm646x-evm.o cdce949.o
obj-$(CONFIG_MACH_DAVINCI_DM365_EVM) += board-dm365-evm.o
obj-$(CONFIG_MACH_DAVINCI_DA830_EVM) += board-da830-evm.o
obj-$(CONFIG_MACH_DAVINCI_DA850_EVM) += board-da850-evm.o
@@ -34,3 +34,4 @@ obj-$(CONFIG_MACH_DAVINCI_DA850_EVM) += board-da850-evm.o
# Power Management
obj-$(CONFIG_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
+obj-$(CONFIG_SUSPEND) += pm.o sleep.o
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 31dc9901e556..dc19870b23cd 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -112,7 +112,7 @@ static __init void da830_evm_usb_init(void)
* Set up USB clock/mode in the CFGCHIP2 register.
* FYI: CFGCHIP2 is 0x0000ef00 initially.
*/
- cfgchip2 = __raw_readl(DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG));
+ cfgchip2 = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
/* USB2.0 PHY reference clock is 24 MHz */
cfgchip2 &= ~CFGCHIP2_REFFREQ;
@@ -139,7 +139,7 @@ static __init void da830_evm_usb_init(void)
cfgchip2 |= CFGCHIP2_SESENDEN | CFGCHIP2_VBDTCTEN;
#endif
- __raw_writel(cfgchip2, DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG));
+ __raw_writel(cfgchip2, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP2_REG));
/* USB_REFCLKIN is not used. */
ret = davinci_cfg_reg(DA830_USB0_DRVVBUS);
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 07de8db14581..411284d0b0fa 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -46,8 +46,20 @@
static struct mtd_partition da850_evm_norflash_partition[] = {
{
- .name = "NOR filesystem",
+ .name = "bootloaders + env",
.offset = 0,
+ .size = SZ_512K,
+ .mask_flags = MTD_WRITEABLE,
+ },
+ {
+ .name = "kernel",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_2M,
+ .mask_flags = 0,
+ },
+ {
+ .name = "filesystem",
+ .offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0,
},
@@ -77,6 +89,18 @@ static struct platform_device da850_evm_norflash_device = {
.resource = da850_evm_norflash_resource,
};
+static struct davinci_pm_config da850_pm_pdata = {
+ .sleepcount = 128,
+};
+
+static struct platform_device da850_pm_device = {
+ .name = "pm-davinci",
+ .dev = {
+ .platform_data = &da850_pm_pdata,
+ },
+ .id = -1,
+};
+
/* DA850/OMAP-L138 EVM includes a 512 MByte large-page NAND flash
* (128K blocks). It may be used instead of the (default) SPI flash
* to boot, using TI's tools to install the secondary boot loader
@@ -119,6 +143,7 @@ static struct davinci_nand_pdata da850_evm_nandflash_data = {
.parts = da850_evm_nandflash_partition,
.nr_parts = ARRAY_SIZE(da850_evm_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 4,
.options = NAND_USE_FLASH_BBT,
};
@@ -537,7 +562,7 @@ static int __init da850_evm_config_emac(void)
if (!machine_is_davinci_da850_evm())
return 0;
- cfg_chip3_base = DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP3_REG);
+ cfg_chip3_base = DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG);
val = __raw_readl(cfg_chip3_base);
@@ -696,6 +721,11 @@ static __init void da850_evm_init(void)
if (ret)
pr_warning("da850_evm_init: cpuidle registration failed: %d\n",
ret);
+
+ ret = da850_register_pm(&da850_pm_device);
+ if (ret)
+ pr_warning("da850_evm_init: suspend registration failed: %d\n",
+ ret);
}
#ifdef CONFIG_SERIAL_8250_CONSOLE
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index a9b650dcc172..077ecf4fecda 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -236,6 +236,7 @@ static struct vpfe_subdev_info vpfe_sub_devs[] = {
static struct vpfe_config vpfe_cfg = {
.num_subdevs = ARRAY_SIZE(vpfe_sub_devs),
+ .i2c_adapter_id = 1,
.sub_devs = vpfe_sub_devs,
.card_name = "DM355 EVM",
.ccdc = "DM355 CCDC",
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 289fe1b7d25a..b476395d2cd4 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -192,7 +192,11 @@ static struct davinci_i2c_platform_data i2c_pdata = {
.bus_delay = 0 /* usec */,
};
-#ifdef CONFIG_KEYBOARD_DAVINCI
+static int dm365evm_keyscan_enable(struct device *dev)
+{
+ return davinci_cfg_reg(DM365_KEYSCAN);
+}
+
static unsigned short dm365evm_keymap[] = {
KEY_KP2,
KEY_LEFT,
@@ -214,6 +218,7 @@ static unsigned short dm365evm_keymap[] = {
};
static struct davinci_ks_platform_data dm365evm_ks_data = {
+ .device_enable = dm365evm_keyscan_enable,
.keymap = dm365evm_keymap,
.keymapsize = ARRAY_SIZE(dm365evm_keymap),
.rep = 1,
@@ -222,7 +227,6 @@ static struct davinci_ks_platform_data dm365evm_ks_data = {
.interval = 0x2,
.matrix_type = DAVINCI_KEYSCAN_MATRIX_4X4,
};
-#endif
static int cpld_mmc_get_cd(int module)
{
@@ -511,10 +515,7 @@ static __init void dm365_evm_init(void)
dm365_init_asp(&dm365_evm_snd_data);
dm365_init_rtc();
-
-#ifdef CONFIG_KEYBOARD_DAVINCI
dm365_init_ks(&dm365evm_ks_data);
-#endif
}
static __init void dm365_evm_irq_init(void)
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index fd0398bc6db3..e9612cf727b7 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -247,6 +247,7 @@ static struct vpfe_subdev_info vpfe_sub_devs[] = {
static struct vpfe_config vpfe_cfg = {
.num_subdevs = ARRAY_SIZE(vpfe_sub_devs),
+ .i2c_adapter_id = 1,
.sub_devs = vpfe_sub_devs,
.card_name = "DM6446 EVM",
.ccdc = "DM6446 CCDC",
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index 8d0b0e01c59b..815067b2413e 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -30,6 +30,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -39,54 +40,13 @@
#include <mach/serial.h>
#include <mach/i2c.h>
#include <mach/nand.h>
+#include <mach/clock.h>
+#include <mach/cdce949.h>
-#if defined(CONFIG_BLK_DEV_PALMCHIP_BK3710) || \
- defined(CONFIG_BLK_DEV_PALMCHIP_BK3710_MODULE)
-#define HAS_ATA 1
-#else
-#define HAS_ATA 0
-#endif
-
-#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x20008000
-#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x42000000
+#include "clock.h"
#define NAND_BLOCK_SIZE SZ_128K
-/* CPLD Register 0 bits to control ATA */
-#define DM646X_EVM_ATA_RST BIT(0)
-#define DM646X_EVM_ATA_PWD BIT(1)
-
-#define DM646X_EVM_PHY_MASK (0x2)
-#define DM646X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
-
-#define VIDCLKCTL_OFFSET (DAVINCI_SYSTEM_MODULE_BASE + 0x38)
-#define VSCLKDIS_OFFSET (DAVINCI_SYSTEM_MODULE_BASE + 0x6c)
-#define VCH2CLK_MASK (BIT_MASK(10) | BIT_MASK(9) | BIT_MASK(8))
-#define VCH2CLK_SYSCLK8 (BIT(9))
-#define VCH2CLK_AUXCLK (BIT(9) | BIT(8))
-#define VCH3CLK_MASK (BIT_MASK(14) | BIT_MASK(13) | BIT_MASK(12))
-#define VCH3CLK_SYSCLK8 (BIT(13))
-#define VCH3CLK_AUXCLK (BIT(14) | BIT(13))
-
-#define VIDCH2CLK (BIT(10))
-#define VIDCH3CLK (BIT(11))
-#define VIDCH1CLK (BIT(4))
-#define TVP7002_INPUT (BIT(4))
-#define TVP5147_INPUT (~BIT(4))
-#define VPIF_INPUT_ONE_CHANNEL (BIT(5))
-#define VPIF_INPUT_TWO_CHANNEL (~BIT(5))
-#define TVP5147_CH0 "tvp514x-0"
-#define TVP5147_CH1 "tvp514x-1"
-
-static void __iomem *vpif_vidclkctl_reg;
-static void __iomem *vpif_vsclkdis_reg;
-/* spin lock for updating above registers */
-static spinlock_t vpif_reg_lock;
-
-static struct davinci_uart_config uart_config __initdata = {
- .enabled_uarts = (1 << 0),
-};
-
/* Note: We are setting first partition as 'bootloader' constituting UBL, U-Boot
* and U-Boot environment this avoids dependency on any particular combination
* of UBL, U-Boot or flashing tools etc.
@@ -120,6 +80,9 @@ static struct davinci_nand_pdata davinci_nand_data = {
.options = 0,
};
+#define DAVINCI_ASYNC_EMIF_CONTROL_BASE 0x20008000
+#define DAVINCI_ASYNC_EMIF_DATA_CE0_BASE 0x42000000
+
static struct resource davinci_nand_resources[] = {
{
.start = DAVINCI_ASYNC_EMIF_DATA_CE0_BASE,
@@ -144,6 +107,17 @@ static struct platform_device davinci_nand_device = {
},
};
+#if defined(CONFIG_BLK_DEV_PALMCHIP_BK3710) || \
+ defined(CONFIG_BLK_DEV_PALMCHIP_BK3710_MODULE)
+#define HAS_ATA 1
+#else
+#define HAS_ATA 0
+#endif
+
+/* CPLD Register 0 bits to control ATA */
+#define DM646X_EVM_ATA_RST BIT(0)
+#define DM646X_EVM_ATA_PWD BIT(1)
+
/* CPLD Register 0 Client: used for I/O Control */
static int cpld_reg0_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -417,6 +391,9 @@ static struct i2c_board_info __initdata i2c_info[] = {
{
I2C_BOARD_INFO("cpld_video", 0x3b),
},
+ {
+ I2C_BOARD_INFO("cdce949", 0x6c),
+ },
};
static struct davinci_i2c_platform_data i2c_pdata = {
@@ -424,6 +401,30 @@ static struct davinci_i2c_platform_data i2c_pdata = {
.bus_delay = 0 /* usec */,
};
+#define VIDCLKCTL_OFFSET (DAVINCI_SYSTEM_MODULE_BASE + 0x38)
+#define VSCLKDIS_OFFSET (DAVINCI_SYSTEM_MODULE_BASE + 0x6c)
+#define VCH2CLK_MASK (BIT_MASK(10) | BIT_MASK(9) | BIT_MASK(8))
+#define VCH2CLK_SYSCLK8 (BIT(9))
+#define VCH2CLK_AUXCLK (BIT(9) | BIT(8))
+#define VCH3CLK_MASK (BIT_MASK(14) | BIT_MASK(13) | BIT_MASK(12))
+#define VCH3CLK_SYSCLK8 (BIT(13))
+#define VCH3CLK_AUXCLK (BIT(14) | BIT(13))
+
+#define VIDCH2CLK (BIT(10))
+#define VIDCH3CLK (BIT(11))
+#define VIDCH1CLK (BIT(4))
+#define TVP7002_INPUT (BIT(4))
+#define TVP5147_INPUT (~BIT(4))
+#define VPIF_INPUT_ONE_CHANNEL (BIT(5))
+#define VPIF_INPUT_TWO_CHANNEL (~BIT(5))
+#define TVP5147_CH0 "tvp514x-0"
+#define TVP5147_CH1 "tvp514x-1"
+
+static void __iomem *vpif_vidclkctl_reg;
+static void __iomem *vpif_vsclkdis_reg;
+/* spin lock for updating above registers */
+static spinlock_t vpif_reg_lock;
+
static int set_vpif_clock(int mux_mode, int hd)
{
unsigned long flags;
@@ -685,11 +686,44 @@ static void __init evm_init_i2c(void)
evm_init_video();
}
+#define CDCE949_XIN_RATE 27000000
+
+/* CDCE949 support - "lpsc" field is overridden to work as clock number */
+static struct clk cdce_clk_in = {
+ .name = "cdce_xin",
+ .rate = CDCE949_XIN_RATE,
+};
+
+static struct davinci_clk cdce_clks[] = {
+ CLK(NULL, "xin", &cdce_clk_in),
+ CLK(NULL, NULL, NULL),
+};
+
+static void __init cdce_clk_init(void)
+{
+ struct davinci_clk *c;
+ struct clk *clk;
+
+ for (c = cdce_clks; c->lk.clk; c++) {
+ clk = c->lk.clk;
+ clkdev_add(&c->lk);
+ clk_register(clk);
+ }
+}
+
static void __init davinci_map_io(void)
{
dm646x_init();
+ cdce_clk_init();
}
+static struct davinci_uart_config uart_config __initdata = {
+ .enabled_uarts = (1 << 0),
+};
+
+#define DM646X_EVM_PHY_MASK (0x2)
+#define DM646X_EVM_MDIO_FREQUENCY (2200000) /* PHY bus frequency */
+
static __init void evm_init(void)
{
struct davinci_soc_info *soc_info = &davinci_soc_info;
@@ -713,6 +747,17 @@ static __init void davinci_dm646x_evm_irq_init(void)
davinci_irq_init();
}
+#define DM646X_EVM_REF_FREQ 27000000
+#define DM6467T_EVM_REF_FREQ 33000000
+
+void __init dm646x_board_setup_refclk(struct clk *clk)
+{
+ if (machine_is_davinci_dm6467tevm())
+ clk->rate = DM6467T_EVM_REF_FREQ;
+ else
+ clk->rate = DM646X_EVM_REF_FREQ;
+}
+
MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
.phys_io = IO_PHYS,
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
@@ -723,3 +768,13 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
.init_machine = evm_init,
MACHINE_END
+MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
+ .phys_io = IO_PHYS,
+ .io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
+ .boot_params = (0x80000100),
+ .map_io = davinci_map_io,
+ .init_irq = davinci_dm646x_evm_irq_init,
+ .timer = &davinci_timer,
+ .init_machine = evm_init,
+MACHINE_END
+
diff --git a/arch/arm/mach-davinci/cdce949.c b/arch/arm/mach-davinci/cdce949.c
new file mode 100644
index 000000000000..6af3289e0527
--- /dev/null
+++ b/arch/arm/mach-davinci/cdce949.c
@@ -0,0 +1,289 @@
+/*
+ * TI CDCE949 clock synthesizer driver
+ *
+ * Note: This implementation assumes an input of 27MHz to the CDCE.
+ * This is by no means constrained by CDCE hardware although the datasheet
+ * does use this as an example for all illustrations and more importantly:
+ * that is the crystal input on boards it is currently used on.
+ *
+ * Copyright (C) 2009 Texas Instruments Incorporated. http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+
+#include <mach/clock.h>
+
+#include "clock.h"
+
+static struct i2c_client *cdce_i2c_client;
+
+/* CDCE register descriptor */
+struct cdce_reg {
+ u8 addr;
+ u8 val;
+};
+
+/* Per-Output (Y1, Y2 etc.) frequency descriptor */
+struct cdce_freq {
+ /* Frequency in KHz */
+ unsigned long frequency;
+ /*
+ * List of registers to program to obtain a particular frequency.
+ * 0x0 in register address and value is the end of list marker.
+ */
+ struct cdce_reg *reglist;
+};
+
+#define CDCE_FREQ_TABLE_ENTRY(line, out) \
+{ \
+ .reglist = cdce_y ##line## _ ##out, \
+ .frequency = out, \
+}
+
+/* List of CDCE outputs */
+struct cdce_output {
+ /* List of frequencies on this output */
+ struct cdce_freq *freq_table;
+ /* Number of possible frequencies */
+ int size;
+};
+
+/*
+ * Finding out the values to program into CDCE949 registers for a particular
+ * frequency output is not a simple calculation. Have a look at the datasheet
+ * for the details. There is desktop software available to help users with
+ * the calculations. Here, we just depend on the output of that software
+ * (or hand calculations) instead trying to runtime calculate the register
+ * values and inflicting misery on ourselves.
+ */
+static struct cdce_reg cdce_y1_148500[] = {
+ { 0x13, 0x00 },
+ /* program PLL1_0 multiplier */
+ { 0x18, 0xaf },
+ { 0x19, 0x50 },
+ { 0x1a, 0x02 },
+ { 0x1b, 0xc9 },
+ /* program PLL1_11 multiplier */
+ { 0x1c, 0x00 },
+ { 0x1d, 0x40 },
+ { 0x1e, 0x02 },
+ { 0x1f, 0xc9 },
+ /* output state selection */
+ { 0x15, 0x00 },
+ { 0x14, 0xef },
+ /* switch MUX to PLL1 output */
+ { 0x14, 0x6f },
+ { 0x16, 0x06 },
+ /* set P2DIV divider, P3DIV and input crystal */
+ { 0x17, 0x06 },
+ { 0x01, 0x00 },
+ { 0x05, 0x48 },
+ { 0x02, 0x80 },
+ /* enable and disable PLL */
+ { 0x02, 0xbc },
+ { 0x03, 0x01 },
+ { },
+};
+
+static struct cdce_reg cdce_y1_74250[] = {
+ { 0x13, 0x00 },
+ { 0x18, 0xaf },
+ { 0x19, 0x50 },
+ { 0x1a, 0x02 },
+ { 0x1b, 0xc9 },
+ { 0x1c, 0x00 },
+ { 0x1d, 0x40 },
+ { 0x1e, 0x02 },
+ { 0x1f, 0xc9 },
+ /* output state selection */
+ { 0x15, 0x00 },
+ { 0x14, 0xef },
+ /* switch MUX to PLL1 output */
+ { 0x14, 0x6f },
+ { 0x16, 0x06 },
+ /* set P2DIV divider, P3DIV and input crystal */
+ { 0x17, 0x06 },
+ { 0x01, 0x00 },
+ { 0x05, 0x48 },
+ { 0x02, 0x80 },
+ /* enable and disable PLL */
+ { 0x02, 0xbc },
+ { 0x03, 0x02 },
+ { },
+};
+
+static struct cdce_reg cdce_y1_27000[] = {
+ { 0x13, 0x00 },
+ { 0x18, 0x00 },
+ { 0x19, 0x40 },
+ { 0x1a, 0x02 },
+ { 0x1b, 0x08 },
+ { 0x1c, 0x00 },
+ { 0x1d, 0x40 },
+ { 0x1e, 0x02 },
+ { 0x1f, 0x08 },
+ { 0x15, 0x02 },
+ { 0x14, 0xed },
+ { 0x16, 0x01 },
+ { 0x17, 0x01 },
+ { 0x01, 0x00 },
+ { 0x05, 0x50 },
+ { 0x02, 0xb4 },
+ { 0x03, 0x01 },
+ { },
+};
+
+static struct cdce_freq cdce_y1_freqs[] = {
+ CDCE_FREQ_TABLE_ENTRY(1, 148500),
+ CDCE_FREQ_TABLE_ENTRY(1, 74250),
+ CDCE_FREQ_TABLE_ENTRY(1, 27000),
+};
+
+static struct cdce_reg cdce_y5_13500[] = {
+ { 0x27, 0x08 },
+ { 0x28, 0x00 },
+ { 0x29, 0x40 },
+ { 0x2a, 0x02 },
+ { 0x2b, 0x08 },
+ { 0x24, 0x6f },
+ { },
+};
+
+static struct cdce_reg cdce_y5_16875[] = {
+ { 0x27, 0x08 },
+ { 0x28, 0x9f },
+ { 0x29, 0xb0 },
+ { 0x2a, 0x02 },
+ { 0x2b, 0x89 },
+ { 0x24, 0x6f },
+ { },
+};
+
+static struct cdce_reg cdce_y5_27000[] = {
+ { 0x27, 0x04 },
+ { 0x28, 0x00 },
+ { 0x29, 0x40 },
+ { 0x2a, 0x02 },
+ { 0x2b, 0x08 },
+ { 0x24, 0x6f },
+ { },
+};
+static struct cdce_reg cdce_y5_54000[] = {
+ { 0x27, 0x04 },
+ { 0x28, 0xff },
+ { 0x29, 0x80 },
+ { 0x2a, 0x02 },
+ { 0x2b, 0x07 },
+ { 0x24, 0x6f },
+ { },
+};
+
+static struct cdce_reg cdce_y5_81000[] = {
+ { 0x27, 0x02 },
+ { 0x28, 0xbf },
+ { 0x29, 0xa0 },
+ { 0x2a, 0x03 },
+ { 0x2b, 0x0a },
+ { 0x24, 0x6f },
+ { },
+};
+
+static struct cdce_freq cdce_y5_freqs[] = {
+ CDCE_FREQ_TABLE_ENTRY(5, 13500),
+ CDCE_FREQ_TABLE_ENTRY(5, 16875),
+ CDCE_FREQ_TABLE_ENTRY(5, 27000),
+ CDCE_FREQ_TABLE_ENTRY(5, 54000),
+ CDCE_FREQ_TABLE_ENTRY(5, 81000),
+};
+
+
+static struct cdce_output output_list[] = {
+ [1] = { cdce_y1_freqs, ARRAY_SIZE(cdce_y1_freqs) },
+ [5] = { cdce_y5_freqs, ARRAY_SIZE(cdce_y5_freqs) },
+};
+
+int cdce_set_rate(struct clk *clk, unsigned long rate)
+{
+ int i, ret = 0;
+ struct cdce_freq *freq_table = output_list[clk->lpsc].freq_table;
+ struct cdce_reg *regs = NULL;
+
+ if (!cdce_i2c_client)
+ return -ENODEV;
+
+ if (!freq_table)
+ return -EINVAL;
+
+ for (i = 0; i < output_list[clk->lpsc].size; i++) {
+ if (freq_table[i].frequency == rate / 1000) {
+ regs = freq_table[i].reglist;
+ break;
+ }
+ }
+
+ if (!regs)
+ return -EINVAL;
+
+ for (i = 0; regs[i].addr; i++) {
+ ret = i2c_smbus_write_byte_data(cdce_i2c_client,
+ regs[i].addr | 0x80, regs[i].val);
+ if (ret)
+ return ret;
+ }
+
+ clk->rate = rate;
+
+ return 0;
+}
+
+static int cdce_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ cdce_i2c_client = client;
+ return 0;
+}
+
+static int __devexit cdce_remove(struct i2c_client *client)
+{
+ cdce_i2c_client = NULL;
+ return 0;
+}
+
+static const struct i2c_device_id cdce_id[] = {
+ {"cdce949", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, cdce_id);
+
+static struct i2c_driver cdce_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "cdce949",
+ },
+ .probe = cdce_probe,
+ .remove = __devexit_p(cdce_remove),
+ .id_table = cdce_id,
+};
+
+static int __init cdce_init(void)
+{
+ return i2c_add_driver(&cdce_driver);
+}
+subsys_initcall(cdce_init);
+
+static void __exit cdce_exit(void)
+{
+ i2c_del_driver(&cdce_driver);
+}
+module_exit(cdce_exit);
+
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("CDCE949 clock synthesizer driver");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index baece65cb9c0..123839332d50 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -49,7 +49,8 @@ static void __clk_disable(struct clk *clk)
{
if (WARN_ON(clk->usecount == 0))
return;
- if (--clk->usecount == 0 && !(clk->flags & CLK_PLL))
+ if (--clk->usecount == 0 && !(clk->flags & CLK_PLL) &&
+ (clk->flags & CLK_PSC))
davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 0);
if (clk->parent)
__clk_disable(clk->parent);
@@ -376,7 +377,7 @@ int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
locktime = ((2000 * prediv) / 100);
prediv = (prediv - 1) | PLLDIV_EN;
} else {
- locktime = 20;
+ locktime = PLL_LOCK_TIME;
}
if (postdiv)
postdiv = (postdiv - 1) | PLLDIV_EN;
@@ -389,12 +390,7 @@ int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
__raw_writel(ctrl, pll->base + PLLCTL);
- /*
- * Wait for 4 OSCIN/CLKIN cycles to ensure that the PLLC has switched
- * to bypass mode. Delay of 1us ensures we are good for all > 4MHz
- * OSCIN/CLKIN inputs. Typically the input is ~25MHz.
- */
- udelay(1);
+ udelay(PLL_BYPASS_TIME);
/* Reset and enable PLL */
ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
@@ -408,11 +404,7 @@ int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
if (pll->flags & PLL_HAS_POSTDIV)
__raw_writel(postdiv, pll->base + POSTDIV);
- /*
- * Wait for PLL to reset properly, OMAP-L138 datasheet says
- * 'min' time = 125ns
- */
- udelay(1);
+ udelay(PLL_RESET_TIME);
/* Bring PLL out of reset */
ctrl |= PLLCTL_PLLRST;
@@ -468,24 +460,10 @@ int __init davinci_clk_init(struct davinci_clk *clocks)
return 0;
}
-#ifdef CONFIG_PROC_FS
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
-static void *davinci_ck_start(struct seq_file *m, loff_t *pos)
-{
- return *pos < 1 ? (void *)1 : NULL;
-}
+#ifdef CONFIG_DEBUG_FS
-static void *davinci_ck_next(struct seq_file *m, void *v, loff_t *pos)
-{
- ++*pos;
- return NULL;
-}
-
-static void davinci_ck_stop(struct seq_file *m, void *v)
-{
-}
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#define CLKNAME_MAX 10 /* longest clock name */
#define NEST_DELTA 2
@@ -525,41 +503,38 @@ dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
static int davinci_ck_show(struct seq_file *m, void *v)
{
- /* Show clock tree; we know the main oscillator is first.
- * We trust nonzero usecounts equate to PSC enables...
+ struct clk *clk;
+
+ /*
+ * Show clock tree; We trust nonzero usecounts equate to PSC enables...
*/
mutex_lock(&clocks_mutex);
- if (!list_empty(&clocks))
- dump_clock(m, 0, list_first_entry(&clocks, struct clk, node));
+ list_for_each_entry(clk, &clocks, node)
+ if (!clk->parent)
+ dump_clock(m, 0, clk);
mutex_unlock(&clocks_mutex);
return 0;
}
-static const struct seq_operations davinci_ck_op = {
- .start = davinci_ck_start,
- .next = davinci_ck_next,
- .stop = davinci_ck_stop,
- .show = davinci_ck_show
-};
-
static int davinci_ck_open(struct inode *inode, struct file *file)
{
- return seq_open(file, &davinci_ck_op);
+ return single_open(file, davinci_ck_show, NULL);
}
-static const struct file_operations proc_davinci_ck_operations = {
+static const struct file_operations davinci_ck_operations = {
.open = davinci_ck_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = single_release,
};
-static int __init davinci_ck_proc_init(void)
+static int __init davinci_clk_debugfs_init(void)
{
- proc_create("davinci_clocks", 0, NULL, &proc_davinci_ck_operations);
+ debugfs_create_file("davinci_clocks", S_IFREG | S_IRUGO, NULL, NULL,
+ &davinci_ck_operations);
return 0;
}
-__initcall(davinci_ck_proc_init);
-#endif /* CONFIG_DEBUG_PROC_FS */
+device_initcall(davinci_clk_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/arm/mach-davinci/clock.h b/arch/arm/mach-davinci/clock.h
index c92d77a3008d..31fb6eac712c 100644
--- a/arch/arm/mach-davinci/clock.h
+++ b/arch/arm/mach-davinci/clock.h
@@ -12,9 +12,6 @@
#ifndef __ARCH_ARM_DAVINCI_CLOCK_H
#define __ARCH_ARM_DAVINCI_CLOCK_H
-#include <linux/list.h>
-#include <asm/clkdev.h>
-
#define DAVINCI_PLL1_BASE 0x01c40800
#define DAVINCI_PLL2_BASE 0x01c40c00
#define MAX_PLL 2
@@ -53,6 +50,26 @@
#define PLLDIV_EN BIT(15)
#define PLLDIV_RATIO_MASK 0x1f
+/*
+ * OMAP-L138 system reference guide recommends a wait for 4 OSCIN/CLKIN
+ * cycles to ensure that the PLLC has switched to bypass mode. Delay of 1us
+ * ensures we are good for all > 4MHz OSCIN/CLKIN inputs. Typically the input
+ * is ~25MHz. Units are micro seconds.
+ */
+#define PLL_BYPASS_TIME 1
+/* From OMAP-L138 datasheet table 6-4. Units are micro seconds */
+#define PLL_RESET_TIME 1
+/*
+ * From OMAP-L138 datasheet table 6-4; assuming prediv = 1, sqrt(pllm) = 4
+ * Units are micro seconds.
+ */
+#define PLL_LOCK_TIME 20
+
+#ifndef __ASSEMBLER__
+
+#include <linux/list.h>
+#include <asm/clkdev.h>
+
struct pll_data {
u32 phys_base;
void __iomem *base;
@@ -109,3 +126,5 @@ int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
extern struct platform_device davinci_wdt_device;
#endif
+
+#endif
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index c2de94cde56a..94f27cbcd55a 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -11,13 +11,13 @@
#include <linux/module.h>
#include <linux/io.h>
#include <linux/etherdevice.h>
+#include <linux/davinci_emac.h>
#include <asm/tlb.h>
#include <asm/mach/map.h>
#include <mach/common.h>
#include <mach/cputype.h>
-#include <mach/emac.h>
#include "clock.h"
diff --git a/arch/arm/mach-davinci/cp_intc.c b/arch/arm/mach-davinci/cp_intc.c
index 52b287cf3a42..37311d1830eb 100644
--- a/arch/arm/mach-davinci/cp_intc.c
+++ b/arch/arm/mach-davinci/cp_intc.c
@@ -81,12 +81,23 @@ static int cp_intc_set_irq_type(unsigned int irq, unsigned int flow_type)
return 0;
}
+/*
+ * Faking this allows us to to work with suspend functions of
+ * generic drivers which call {enable|disable}_irq_wake for
+ * wake up interrupt sources (eg RTC on DA850).
+ */
+static int cp_intc_set_wake(unsigned int irq, unsigned int on)
+{
+ return 0;
+}
+
static struct irq_chip cp_intc_irq_chip = {
.name = "cp_intc",
.ack = cp_intc_ack_irq,
.mask = cp_intc_mask_irq,
.unmask = cp_intc_unmask_irq,
.set_type = cp_intc_set_irq_type,
+ .set_wake = cp_intc_set_wake,
};
void __init cp_intc_init(void __iomem *base, unsigned short num_irq,
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 97a90f36fc92..bd59f31b8a95 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -19,6 +19,7 @@
#include <asm/proc-fns.h>
#include <mach/cpuidle.h>
+#include <mach/memory.h>
#define DAVINCI_CPUIDLE_MAX_STATES 2
@@ -39,10 +40,6 @@ static struct cpuidle_driver davinci_idle_driver = {
static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device);
static void __iomem *ddr2_reg_base;
-#define DDR2_SDRCR_OFFSET 0xc
-#define DDR2_SRPD_BIT BIT(23)
-#define DDR2_LPMODEN_BIT BIT(31)
-
static void davinci_save_ddr_power(int enter, bool pdown)
{
u32 val;
@@ -109,8 +106,6 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
int ret;
struct cpuidle_device *device;
struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;
- struct resource *ddr2_regs;
- resource_size_t len;
device = &per_cpu(davinci_cpuidle_device, smp_processor_id());
@@ -119,28 +114,12 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
return -ENOENT;
}
- ddr2_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!ddr2_regs) {
- dev_err(&pdev->dev, "cannot get DDR2 controller register base");
- return -ENODEV;
- }
-
- len = resource_size(ddr2_regs);
-
- ddr2_regs = request_mem_region(ddr2_regs->start, len, ddr2_regs->name);
- if (!ddr2_regs)
- return -EBUSY;
-
- ddr2_reg_base = ioremap(ddr2_regs->start, len);
- if (!ddr2_reg_base) {
- ret = -ENOMEM;
- goto ioremap_fail;
- }
+ ddr2_reg_base = pdata->ddr2_ctlr_base;
ret = cpuidle_register_driver(&davinci_idle_driver);
if (ret) {
dev_err(&pdev->dev, "failed to register driver\n");
- goto driver_register_fail;
+ return ret;
}
/* Wait for interrupt state */
@@ -167,18 +146,11 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)
ret = cpuidle_register_device(device);
if (ret) {
dev_err(&pdev->dev, "failed to register device\n");
- goto device_register_fail;
+ cpuidle_unregister_driver(&davinci_idle_driver);
+ return ret;
}
return 0;
-
-device_register_fail:
- cpuidle_unregister_driver(&davinci_idle_driver);
-driver_register_fail:
- iounmap(ddr2_reg_base);
-ioremap_fail:
- release_mem_region(ddr2_regs->start, len);
- return ret;
}
static struct platform_driver davinci_cpuidle_driver = {
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c
index b22b5cf04250..54796050a2ff 100644
--- a/arch/arm/mach-davinci/da830.c
+++ b/arch/arm/mach-davinci/da830.c
@@ -1208,13 +1208,13 @@ static struct davinci_soc_info davinci_soc_info_da830 = {
void __init da830_init(void)
{
- da8xx_syscfg_base = ioremap(DA8XX_SYSCFG_BASE, SZ_4K);
- if (WARN(!da8xx_syscfg_base, "Unable to map syscfg module"))
+ da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
+ if (WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module"))
return;
davinci_soc_info_da830.jtag_id_base =
- DA8XX_SYSCFG_VIRT(DA8XX_JTAG_ID_REG);
- davinci_soc_info_da830.pinmux_base = DA8XX_SYSCFG_VIRT(0x120);
+ DA8XX_SYSCFG0_VIRT(DA8XX_JTAG_ID_REG);
+ davinci_soc_info_da830.pinmux_base = DA8XX_SYSCFG0_VIRT(0x120);
davinci_common_init(&davinci_soc_info_da830);
}
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c
index 717806c6cef9..b9a7b3bc36b2 100644
--- a/arch/arm/mach-davinci/da850.c
+++ b/arch/arm/mach-davinci/da850.c
@@ -26,6 +26,7 @@
#include <mach/time.h>
#include <mach/da8xx.h>
#include <mach/cpufreq.h>
+#include <mach/pm.h>
#include "clock.h"
#include "mux.h"
@@ -40,6 +41,7 @@
#define DA850_REF_FREQ 24000000
#define CFGCHIP3_ASYNC3_CLKSRC BIT(4)
+#define CFGCHIP3_PLL1_MASTER_LOCK BIT(5)
#define CFGCHIP0_PLL_MASTER_LOCK BIT(4)
static int da850_set_armrate(struct clk *clk, unsigned long rate);
@@ -535,6 +537,7 @@ static const struct mux_config da850_pins[] = {
MUX_CFG(DA850, GPIO2_15, 5, 0, 15, 8, false)
MUX_CFG(DA850, GPIO4_0, 10, 28, 15, 8, false)
MUX_CFG(DA850, GPIO4_1, 10, 24, 15, 8, false)
+ MUX_CFG(DA850, RTC_ALARM, 0, 28, 15, 2, false)
#endif
};
@@ -770,6 +773,12 @@ static struct map_desc da850_io_desc[] = {
.length = DA8XX_CP_INTC_SIZE,
.type = MT_DEVICE
},
+ {
+ .virtual = SRAM_VIRT,
+ .pfn = __phys_to_pfn(DA8XX_ARM_RAM_BASE),
+ .length = SZ_8K,
+ .type = MT_DEVICE
+ },
};
static void __iomem *da850_psc_bases[] = {
@@ -838,12 +847,12 @@ static void da850_set_async3_src(int pllnum)
}
}
- v = __raw_readl(DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP3_REG));
+ v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
if (pllnum)
v |= CFGCHIP3_ASYNC3_CLKSRC;
else
v &= ~CFGCHIP3_ASYNC3_CLKSRC;
- __raw_writel(v, DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP3_REG));
+ __raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
}
#ifdef CONFIG_CPU_FREQ
@@ -987,7 +996,6 @@ static int da850_set_pll0rate(struct clk *clk, unsigned long index)
unsigned int prediv, mult, postdiv;
struct da850_opp *opp;
struct pll_data *pll = clk->pll_data;
- unsigned int v;
int ret;
opp = (struct da850_opp *) da850_freq_table[index].index;
@@ -995,11 +1003,6 @@ static int da850_set_pll0rate(struct clk *clk, unsigned long index)
mult = opp->mult;
postdiv = opp->postdiv;
- /* Unlock writing to PLL registers */
- v = __raw_readl(DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP0_REG));
- v &= ~CFGCHIP0_PLL_MASTER_LOCK;
- __raw_writel(v, DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP0_REG));
-
ret = davinci_set_pllrate(pll, prediv, mult, postdiv);
if (WARN_ON(ret))
return ret;
@@ -1028,6 +1031,43 @@ static int da850_round_armrate(struct clk *clk, unsigned long rate)
}
#endif
+int da850_register_pm(struct platform_device *pdev)
+{
+ int ret;
+ struct davinci_pm_config *pdata = pdev->dev.platform_data;
+
+ ret = davinci_cfg_reg(DA850_RTC_ALARM);
+ if (ret)
+ return ret;
+
+ pdata->ddr2_ctlr_base = da8xx_get_mem_ctlr();
+ pdata->deepsleep_reg = DA8XX_SYSCFG1_VIRT(DA8XX_DEEPSLEEP_REG);
+ pdata->ddrpsc_num = DA8XX_LPSC1_EMIF3C;
+
+ pdata->cpupll_reg_base = ioremap(DA8XX_PLL0_BASE, SZ_4K);
+ if (!pdata->cpupll_reg_base)
+ return -ENOMEM;
+
+ pdata->ddrpll_reg_base = ioremap(DA8XX_PLL1_BASE, SZ_4K);
+ if (!pdata->ddrpll_reg_base) {
+ ret = -ENOMEM;
+ goto no_ddrpll_mem;
+ }
+
+ pdata->ddrpsc_reg_base = ioremap(DA8XX_PSC1_BASE, SZ_4K);
+ if (!pdata->ddrpsc_reg_base) {
+ ret = -ENOMEM;
+ goto no_ddrpsc_mem;
+ }
+
+ return platform_device_register(pdev);
+
+no_ddrpsc_mem:
+ iounmap(pdata->ddrpll_reg_base);
+no_ddrpll_mem:
+ iounmap(pdata->cpupll_reg_base);
+ return ret;
+}
static struct davinci_soc_info davinci_soc_info_da850 = {
.io_desc = da850_io_desc,
@@ -1049,17 +1089,25 @@ static struct davinci_soc_info davinci_soc_info_da850 = {
.gpio_irq = IRQ_DA8XX_GPIO0,
.serial_dev = &da8xx_serial_device,
.emac_pdata = &da8xx_emac_pdata,
+ .sram_dma = DA8XX_ARM_RAM_BASE,
+ .sram_len = SZ_8K,
};
void __init da850_init(void)
{
- da8xx_syscfg_base = ioremap(DA8XX_SYSCFG_BASE, SZ_4K);
- if (WARN(!da8xx_syscfg_base, "Unable to map syscfg module"))
+ unsigned int v;
+
+ da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K);
+ if (WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module"))
+ return;
+
+ da8xx_syscfg1_base = ioremap(DA8XX_SYSCFG1_BASE, SZ_4K);
+ if (WARN(!da8xx_syscfg1_base, "Unable to map syscfg1 module"))
return;
davinci_soc_info_da850.jtag_id_base =
- DA8XX_SYSCFG_VIRT(DA8XX_JTAG_ID_REG);
- davinci_soc_info_da850.pinmux_base = DA8XX_SYSCFG_VIRT(0x120);
+ DA8XX_SYSCFG0_VIRT(DA8XX_JTAG_ID_REG);
+ davinci_soc_info_da850.pinmux_base = DA8XX_SYSCFG0_VIRT(0x120);
davinci_common_init(&davinci_soc_info_da850);
@@ -1071,4 +1119,14 @@ void __init da850_init(void)
* be any noticible change even in non-DVFS use cases.
*/
da850_set_async3_src(1);
+
+ /* Unlock writing to PLL0 registers */
+ v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP0_REG));
+ v &= ~CFGCHIP0_PLL_MASTER_LOCK;
+ __raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP0_REG));
+
+ /* Unlock writing to PLL1 registers */
+ v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
+ v &= ~CFGCHIP3_PLL1_MASTER_LOCK;
+ __raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG));
}
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index dd2d32c4ce86..0a96791d3b0f 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -24,8 +24,10 @@
#include "clock.h"
#define DA8XX_TPCC_BASE 0x01c00000
+#define DA850_TPCC1_BASE 0x01e30000
#define DA8XX_TPTC0_BASE 0x01c08000
#define DA8XX_TPTC1_BASE 0x01c08400
+#define DA850_TPTC2_BASE 0x01e38000
#define DA8XX_WDOG_BASE 0x01c21000 /* DA8XX_TIMER64P1_BASE */
#define DA8XX_I2C0_BASE 0x01c22000
#define DA8XX_RTC_BASE 0x01C23000
@@ -42,7 +44,8 @@
#define DA8XX_MDIO_REG_OFFSET 0x4000
#define DA8XX_EMAC_CTRL_RAM_SIZE SZ_8K
-void __iomem *da8xx_syscfg_base;
+void __iomem *da8xx_syscfg0_base;
+void __iomem *da8xx_syscfg1_base;
static struct plat_serial8250_port da8xx_serial_pdata[] = {
{
@@ -82,11 +85,6 @@ struct platform_device da8xx_serial_device = {
},
};
-static const s8 da8xx_dma_chan_no_event[] = {
- 20, 21,
- -1
-};
-
static const s8 da8xx_queue_tc_mapping[][2] = {
/* {event queue no, TC no} */
{0, 0},
@@ -101,20 +99,52 @@ static const s8 da8xx_queue_priority_mapping[][2] = {
{-1, -1}
};
-static struct edma_soc_info da8xx_edma_info[] = {
+static const s8 da850_queue_tc_mapping[][2] = {
+ /* {event queue no, TC no} */
+ {0, 0},
+ {-1, -1}
+};
+
+static const s8 da850_queue_priority_mapping[][2] = {
+ /* {event queue no, Priority} */
+ {0, 3},
+ {-1, -1}
+};
+
+static struct edma_soc_info da830_edma_info[] = {
+ {
+ .n_channel = 32,
+ .n_region = 4,
+ .n_slot = 128,
+ .n_tc = 2,
+ .n_cc = 1,
+ .queue_tc_mapping = da8xx_queue_tc_mapping,
+ .queue_priority_mapping = da8xx_queue_priority_mapping,
+ },
+};
+
+static struct edma_soc_info da850_edma_info[] = {
{
.n_channel = 32,
.n_region = 4,
.n_slot = 128,
.n_tc = 2,
.n_cc = 1,
- .noevent = da8xx_dma_chan_no_event,
.queue_tc_mapping = da8xx_queue_tc_mapping,
.queue_priority_mapping = da8xx_queue_priority_mapping,
},
+ {
+ .n_channel = 32,
+ .n_region = 4,
+ .n_slot = 128,
+ .n_tc = 1,
+ .n_cc = 1,
+ .queue_tc_mapping = da850_queue_tc_mapping,
+ .queue_priority_mapping = da850_queue_priority_mapping,
+ },
};
-static struct resource da8xx_edma_resources[] = {
+static struct resource da830_edma_resources[] = {
{
.name = "edma_cc0",
.start = DA8XX_TPCC_BASE,
@@ -145,19 +175,91 @@ static struct resource da8xx_edma_resources[] = {
},
};
-static struct platform_device da8xx_edma_device = {
+static struct resource da850_edma_resources[] = {
+ {
+ .name = "edma_cc0",
+ .start = DA8XX_TPCC_BASE,
+ .end = DA8XX_TPCC_BASE + SZ_32K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "edma_tc0",
+ .start = DA8XX_TPTC0_BASE,
+ .end = DA8XX_TPTC0_BASE + SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "edma_tc1",
+ .start = DA8XX_TPTC1_BASE,
+ .end = DA8XX_TPTC1_BASE + SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "edma_cc1",
+ .start = DA850_TPCC1_BASE,
+ .end = DA850_TPCC1_BASE + SZ_32K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "edma_tc2",
+ .start = DA850_TPTC2_BASE,
+ .end = DA850_TPTC2_BASE + SZ_1K - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .name = "edma0",
+ .start = IRQ_DA8XX_CCINT0,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "edma0_err",
+ .start = IRQ_DA8XX_CCERRINT,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "edma1",
+ .start = IRQ_DA850_CCINT1,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "edma1_err",
+ .start = IRQ_DA850_CCERRINT1,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device da830_edma_device = {
.name = "edma",
.id = -1,
.dev = {
- .platform_data = da8xx_edma_info,
+ .platform_data = da830_edma_info,
},
- .num_resources = ARRAY_SIZE(da8xx_edma_resources),
- .resource = da8xx_edma_resources,
+ .num_resources = ARRAY_SIZE(da830_edma_resources),
+ .resource = da830_edma_resources,
+};
+
+static struct platform_device da850_edma_device = {
+ .name = "edma",
+ .id = -1,
+ .dev = {
+ .platform_data = da850_edma_info,
+ },
+ .num_resources = ARRAY_SIZE(da850_edma_resources),
+ .resource = da850_edma_resources,
};
int __init da8xx_register_edma(void)
{
- return platform_device_register(&da8xx_edma_device);
+ struct platform_device *pdev;
+
+ if (cpu_is_davinci_da830())
+ pdev = &da830_edma_device;
+ else if (cpu_is_davinci_da850())
+ pdev = &da850_edma_device;
+ else
+ return -ENODEV;
+
+ return platform_device_register(pdev);
}
static struct resource da8xx_i2c_resources0[] = {
@@ -481,11 +583,31 @@ static struct platform_device da8xx_rtc_device = {
int da8xx_register_rtc(void)
{
+ int ret;
+
/* Unlock the rtc's registers */
__raw_writel(0x83e70b13, IO_ADDRESS(DA8XX_RTC_BASE + 0x6c));
__raw_writel(0x95a4f1e0, IO_ADDRESS(DA8XX_RTC_BASE + 0x70));
- return platform_device_register(&da8xx_rtc_device);
+ ret = platform_device_register(&da8xx_rtc_device);
+ if (!ret)
+ /* Atleast on DA850, RTC is a wakeup source */
+ device_init_wakeup(&da8xx_rtc_device.dev, true);
+
+ return ret;
+}
+
+static void __iomem *da8xx_ddr2_ctlr_base;
+void __iomem * __init da8xx_get_mem_ctlr(void)
+{
+ if (da8xx_ddr2_ctlr_base)
+ return da8xx_ddr2_ctlr_base;
+
+ da8xx_ddr2_ctlr_base = ioremap(DA8XX_DDR2_CTL_BASE, SZ_32K);
+ if (!da8xx_ddr2_ctlr_base)
+ pr_warning("%s: Unable to map DDR2 controller", __func__);
+
+ return da8xx_ddr2_ctlr_base;
}
static struct resource da8xx_cpuidle_resources[] = {
@@ -513,6 +635,7 @@ static struct platform_device da8xx_cpuidle_device = {
int __init da8xx_register_cpuidle(void)
{
+ da8xx_cpuidle_pdata.ddr2_ctlr_base = da8xx_get_mem_ctlr();
+
return platform_device_register(&da8xx_cpuidle_device);
}
-
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index dedf4d4f3a27..b1185f82ffb3 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -564,13 +564,6 @@ static u8 dm355_default_priorities[DAVINCI_N_AINTC_IRQ] = {
/*----------------------------------------------------------------------*/
-static const s8 dma_chan_dm355_no_event[] = {
- 12, 13, 24, 56, 57,
- 58, 59, 60, 61, 62,
- 63,
- -1
-};
-
static const s8
queue_tc_mapping[][2] = {
/* {event queue no, TC no} */
@@ -594,7 +587,6 @@ static struct edma_soc_info dm355_edma_info[] = {
.n_slot = 128,
.n_tc = 2,
.n_cc = 1,
- .noevent = dma_chan_dm355_no_event,
.queue_tc_mapping = queue_tc_mapping,
.queue_priority_mapping = queue_priority_mapping,
},
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 2ec619ec1657..b4a00c4a6e78 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -754,7 +754,7 @@ static struct edma_soc_info dm365_edma_info[] = {
.n_cc = 1,
.queue_tc_mapping = dm365_queue_tc_mapping,
.queue_priority_mapping = dm365_queue_priority_mapping,
- .default_queue = EVENTQ_2,
+ .default_queue = EVENTQ_3,
},
};
@@ -993,7 +993,6 @@ void __init dm365_init_asp(struct snd_platform_data *pdata)
void __init dm365_init_ks(struct davinci_ks_platform_data *pdata)
{
- davinci_cfg_reg(DM365_KEYSCAN);
dm365_ks_device.dev.platform_data = pdata;
platform_device_register(&dm365_ks_device);
}
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 2cd008156dea..fc060e7aefdf 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -479,15 +479,6 @@ static u8 dm644x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
/*----------------------------------------------------------------------*/
-static const s8 dma_chan_dm644x_no_event[] = {
- 0, 1, 12, 13, 14,
- 15, 25, 30, 31, 45,
- 46, 47, 55, 56, 57,
- 58, 59, 60, 61, 62,
- 63,
- -1
-};
-
static const s8
queue_tc_mapping[][2] = {
/* {event queue no, TC no} */
@@ -511,7 +502,6 @@ static struct edma_soc_info dm644x_edma_info[] = {
.n_slot = 128,
.n_tc = 2,
.n_cc = 1,
- .noevent = dma_chan_dm644x_no_event,
.queue_tc_mapping = queue_tc_mapping,
.queue_priority_mapping = queue_priority_mapping,
},
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 829a44bcf799..7eb34e9253c6 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -42,7 +42,6 @@
/*
* Device specific clocks
*/
-#define DM646X_REF_FREQ 27000000
#define DM646X_AUX_FREQ 24000000
static struct pll_data pll1_data = {
@@ -57,7 +56,6 @@ static struct pll_data pll2_data = {
static struct clk ref_clk = {
.name = "ref_clk",
- .rate = DM646X_REF_FREQ,
};
static struct clk aux_clkin = {
@@ -513,14 +511,6 @@ static u8 dm646x_default_priorities[DAVINCI_N_AINTC_IRQ] = {
/*----------------------------------------------------------------------*/
-static const s8 dma_chan_dm646x_no_event[] = {
- 0, 1, 2, 3, 13,
- 14, 15, 24, 25, 26,
- 27, 30, 31, 54, 55,
- 56,
- -1
-};
-
/* Four Transfer Controllers on DM646x */
static const s8
dm646x_queue_tc_mapping[][2] = {
@@ -549,7 +539,6 @@ static struct edma_soc_info dm646x_edma_info[] = {
.n_slot = 512,
.n_tc = 4,
.n_cc = 1,
- .noevent = dma_chan_dm646x_no_event,
.queue_tc_mapping = dm646x_queue_tc_mapping,
.queue_priority_mapping = dm646x_queue_priority_mapping,
},
@@ -925,6 +914,7 @@ void dm646x_setup_vpif(struct vpif_display_config *display_config,
void __init dm646x_init(void)
{
+ dm646x_board_setup_refclk(&ref_clk);
davinci_common_init(&davinci_soc_info_dm646x);
}
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index 648fbb760ae1..15dd886df04c 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -226,11 +226,11 @@ struct edma {
*/
DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
- /* The edma_noevent bit for each channel is clear unless
- * it doesn't trigger DMA events on this platform. It uses a
- * bit of SOC-specific initialization code.
+ /* The edma_unused bit for each channel is clear unless
+ * it is not being used on this platform. It uses a bit
+ * of SOC-specific initialization code.
*/
- DECLARE_BITMAP(edma_noevent, EDMA_MAX_DMACH);
+ DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
unsigned irq_res_start;
unsigned irq_res_end;
@@ -243,6 +243,7 @@ struct edma {
};
static struct edma *edma_info[EDMA_MAX_CC];
+static int arch_num_cc;
/* dummy param set used to (re)initialize parameter RAM slots */
static const struct edmacc_param dummy_paramset = {
@@ -555,8 +556,27 @@ static int reserve_contiguous_slots(int ctlr, unsigned int id,
return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
}
+static int prepare_unused_channel_list(struct device *dev, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int i, ctlr;
+
+ for (i = 0; i < pdev->num_resources; i++) {
+ if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
+ (int)pdev->resource[i].start >= 0) {
+ ctlr = EDMA_CTLR(pdev->resource[i].start);
+ clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
+ edma_info[ctlr]->edma_unused);
+ }
+ }
+
+ return 0;
+}
+
/*-----------------------------------------------------------------------*/
+static bool unused_chan_list_done;
+
/* Resource alloc/free: dma channels, parameter RAM slots */
/**
@@ -594,7 +614,22 @@ int edma_alloc_channel(int channel,
void *data,
enum dma_event_q eventq_no)
{
- unsigned i, done, ctlr = 0;
+ unsigned i, done = 0, ctlr = 0;
+ int ret = 0;
+
+ if (!unused_chan_list_done) {
+ /*
+ * Scan all the platform devices to find out the EDMA channels
+ * used and clear them in the unused list, making the rest
+ * available for ARM usage.
+ */
+ ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
+ prepare_unused_channel_list);
+ if (ret < 0)
+ return ret;
+
+ unused_chan_list_done = true;
+ }
if (channel >= 0) {
ctlr = EDMA_CTLR(channel);
@@ -602,15 +637,15 @@ int edma_alloc_channel(int channel,
}
if (channel < 0) {
- for (i = 0; i < EDMA_MAX_CC; i++) {
+ for (i = 0; i < arch_num_cc; i++) {
channel = 0;
for (;;) {
channel = find_next_bit(edma_info[i]->
- edma_noevent,
+ edma_unused,
edma_info[i]->num_channels,
channel);
if (channel == edma_info[i]->num_channels)
- return -ENOMEM;
+ break;
if (!test_and_set_bit(channel,
edma_info[i]->edma_inuse)) {
done = 1;
@@ -622,6 +657,8 @@ int edma_alloc_channel(int channel,
if (done)
break;
}
+ if (!done)
+ return -ENOMEM;
} else if (channel >= edma_info[ctlr]->num_channels) {
return -EINVAL;
} else if (test_and_set_bit(channel, edma_info[ctlr]->edma_inuse)) {
@@ -642,7 +679,7 @@ int edma_alloc_channel(int channel,
map_dmach_queue(ctlr, channel, eventq_no);
- return channel;
+ return EDMA_CTLR_CHAN(ctlr, channel);
}
EXPORT_SYMBOL(edma_alloc_channel);
@@ -1219,7 +1256,7 @@ int edma_start(unsigned channel)
unsigned int mask = (1 << (channel & 0x1f));
/* EDMA channels without event association */
- if (test_bit(channel, edma_info[ctlr]->edma_noevent)) {
+ if (test_bit(channel, edma_info[ctlr]->edma_unused)) {
pr_debug("EDMA: ESR%d %08x\n", j,
edma_shadow0_read_array(ctlr, SH_ESR, j));
edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
@@ -1344,7 +1381,6 @@ static int __init edma_probe(struct platform_device *pdev)
const s8 (*queue_tc_mapping)[2];
int i, j, found = 0;
int status = -1;
- const s8 *noevent;
int irq[EDMA_MAX_CC] = {0, 0};
int err_irq[EDMA_MAX_CC] = {0, 0};
struct resource *r[EDMA_MAX_CC] = {NULL};
@@ -1407,11 +1443,9 @@ static int __init edma_probe(struct platform_device *pdev)
memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
&dummy_paramset, PARM_SIZE);
- noevent = info[j].noevent;
- if (noevent) {
- while (*noevent != -1)
- set_bit(*noevent++, edma_info[j]->edma_noevent);
- }
+ /* Mark all channels as unused */
+ memset(edma_info[j]->edma_unused, 0xff,
+ sizeof(edma_info[j]->edma_unused));
sprintf(irq_name, "edma%d", j);
irq[j] = platform_get_irq_byname(pdev, irq_name);
@@ -1467,6 +1501,7 @@ static int __init edma_probe(struct platform_device *pdev)
edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
edma_write_array(j, EDMA_QRAE, i, 0x0);
}
+ arch_num_cc++;
}
if (tc_errs_handled) {
diff --git a/arch/arm/mach-davinci/include/mach/cdce949.h b/arch/arm/mach-davinci/include/mach/cdce949.h
new file mode 100644
index 000000000000..c73331fae341
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/cdce949.h
@@ -0,0 +1,19 @@
+/*
+ * TI CDCE949 off-chip clock synthesizer support
+ *
+ * 2009 (C) Texas Instruments, Inc. http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#ifndef _MACH_DAVINCI_CDCE949_H
+#define _MACH_DAVINCI_CDCE949_H
+
+#include <linux/clk.h>
+
+#include <mach/clock.h>
+
+int cdce_set_rate(struct clk *clk, unsigned long rate);
+
+#endif
diff --git a/arch/arm/mach-davinci/include/mach/cpuidle.h b/arch/arm/mach-davinci/include/mach/cpuidle.h
index cbfc6a9c81b4..74f088b0edfb 100644
--- a/arch/arm/mach-davinci/include/mach/cpuidle.h
+++ b/arch/arm/mach-davinci/include/mach/cpuidle.h
@@ -12,6 +12,7 @@
struct davinci_cpuidle_config {
u32 ddr2_pdown;
+ void __iomem *ddr2_ctlr_base;
};
#endif
diff --git a/arch/arm/mach-davinci/include/mach/da8xx.h b/arch/arm/mach-davinci/include/mach/da8xx.h
index 90704910d343..cc9be7fee627 100644
--- a/arch/arm/mach-davinci/include/mach/da8xx.h
+++ b/arch/arm/mach-davinci/include/mach/da8xx.h
@@ -13,15 +13,17 @@
#include <video/da8xx-fb.h>
+#include <linux/davinci_emac.h>
#include <mach/serial.h>
#include <mach/edma.h>
#include <mach/i2c.h>
-#include <mach/emac.h>
#include <mach/asp.h>
#include <mach/mmc.h>
#include <mach/usb.h>
+#include <mach/pm.h>
-extern void __iomem *da8xx_syscfg_base;
+extern void __iomem *da8xx_syscfg0_base;
+extern void __iomem *da8xx_syscfg1_base;
/*
* The cp_intc interrupt controller for the da8xx isn't in the same
@@ -34,13 +36,17 @@ extern void __iomem *da8xx_syscfg_base;
#define DA8XX_CP_INTC_SIZE SZ_8K
#define DA8XX_CP_INTC_VIRT (IO_VIRT - DA8XX_CP_INTC_SIZE - SZ_4K)
-#define DA8XX_SYSCFG_BASE (IO_PHYS + 0x14000)
-#define DA8XX_SYSCFG_VIRT(x) (da8xx_syscfg_base + (x))
+#define DA8XX_SYSCFG0_BASE (IO_PHYS + 0x14000)
+#define DA8XX_SYSCFG0_VIRT(x) (da8xx_syscfg0_base + (x))
#define DA8XX_JTAG_ID_REG 0x18
#define DA8XX_CFGCHIP0_REG 0x17c
#define DA8XX_CFGCHIP2_REG 0x184
#define DA8XX_CFGCHIP3_REG 0x188
+#define DA8XX_SYSCFG1_BASE (IO_PHYS + 0x22C000)
+#define DA8XX_SYSCFG1_VIRT(x) (da8xx_syscfg1_base + (x))
+#define DA8XX_DEEPSLEEP_REG 0x8
+
#define DA8XX_PSC0_BASE 0x01c10000
#define DA8XX_PLL0_BASE 0x01c11000
#define DA8XX_TIMER64P0_BASE 0x01c20000
@@ -48,11 +54,13 @@ extern void __iomem *da8xx_syscfg_base;
#define DA8XX_GPIO_BASE 0x01e26000
#define DA8XX_PSC1_BASE 0x01e27000
#define DA8XX_LCD_CNTRL_BASE 0x01e13000
+#define DA8XX_PLL1_BASE 0x01e1a000
#define DA8XX_MMCSD0_BASE 0x01c40000
#define DA8XX_AEMIF_CS2_BASE 0x60000000
#define DA8XX_AEMIF_CS3_BASE 0x62000000
#define DA8XX_AEMIF_CTL_BASE 0x68000000
#define DA8XX_DDR2_CTL_BASE 0xb0000000
+#define DA8XX_ARM_RAM_BASE 0xffff0000
#define PINMUX0 0x00
#define PINMUX1 0x04
@@ -90,6 +98,8 @@ void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata);
int da8xx_register_rtc(void);
int da850_register_cpufreq(void);
int da8xx_register_cpuidle(void);
+void __iomem * __init da8xx_get_mem_ctlr(void);
+int da850_register_pm(struct platform_device *pdev);
extern struct platform_device da8xx_serial_device;
extern struct emac_platform_data da8xx_emac_pdata;
diff --git a/arch/arm/mach-davinci/include/mach/dm365.h b/arch/arm/mach-davinci/include/mach/dm365.h
index f1710a30e7ba..3c07a88b4249 100644
--- a/arch/arm/mach-davinci/include/mach/dm365.h
+++ b/arch/arm/mach-davinci/include/mach/dm365.h
@@ -14,8 +14,8 @@
#define __ASM_ARCH_DM665_H
#include <linux/platform_device.h>
+#include <linux/davinci_emac.h>
#include <mach/hardware.h>
-#include <mach/emac.h>
#include <mach/asp.h>
#include <mach/keyscan.h>
diff --git a/arch/arm/mach-davinci/include/mach/dm644x.h b/arch/arm/mach-davinci/include/mach/dm644x.h
index 44e8f0fae9ea..1a8b09ccc3c8 100644
--- a/arch/arm/mach-davinci/include/mach/dm644x.h
+++ b/arch/arm/mach-davinci/include/mach/dm644x.h
@@ -22,8 +22,8 @@
#ifndef __ASM_ARCH_DM644X_H
#define __ASM_ARCH_DM644X_H
+#include <linux/davinci_emac.h>
#include <mach/hardware.h>
-#include <mach/emac.h>
#include <mach/asp.h>
#include <media/davinci/vpfe_capture.h>
diff --git a/arch/arm/mach-davinci/include/mach/dm646x.h b/arch/arm/mach-davinci/include/mach/dm646x.h
index 8cec746ae9d2..846da98b619a 100644
--- a/arch/arm/mach-davinci/include/mach/dm646x.h
+++ b/arch/arm/mach-davinci/include/mach/dm646x.h
@@ -12,10 +12,11 @@
#define __ASM_ARCH_DM646X_H
#include <mach/hardware.h>
-#include <mach/emac.h>
#include <mach/asp.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
+#include <linux/clk.h>
+#include <linux/davinci_emac.h>
#define DM646X_EMAC_BASE (0x01C80000)
#define DM646X_EMAC_CNTRL_OFFSET (0x0000)
@@ -30,6 +31,7 @@ void __init dm646x_init(void);
void __init dm646x_init_ide(void);
void __init dm646x_init_mcasp0(struct snd_platform_data *pdata);
void __init dm646x_init_mcasp1(struct snd_platform_data *pdata);
+void __init dm646x_board_setup_refclk(struct clk *clk);
void dm646x_video_init(void);
diff --git a/arch/arm/mach-davinci/include/mach/edma.h b/arch/arm/mach-davinci/include/mach/edma.h
index eb8bfd7925e7..ced3092af5ba 100644
--- a/arch/arm/mach-davinci/include/mach/edma.h
+++ b/arch/arm/mach-davinci/include/mach/edma.h
@@ -280,8 +280,6 @@ struct edma_soc_info {
unsigned n_cc;
enum dma_event_q default_queue;
- /* list of channels with no even trigger; terminated by "-1" */
- const s8 *noevent;
const s8 (*queue_tc_mapping)[2];
const s8 (*queue_priority_mapping)[2];
};
diff --git a/arch/arm/mach-davinci/include/mach/i2c.h b/arch/arm/mach-davinci/include/mach/i2c.h
index c248e9b7e825..44bdea13cc8c 100644
--- a/arch/arm/mach-davinci/include/mach/i2c.h
+++ b/arch/arm/mach-davinci/include/mach/i2c.h
@@ -1,5 +1,5 @@
/*
- * DaVinci I2C controller platfrom_device info
+ * DaVinci I2C controller platform_device info
*
* Author: Vladimir Barinov, MontaVista Software, Inc. <source@mvista.com>
*
diff --git a/arch/arm/mach-davinci/include/mach/keyscan.h b/arch/arm/mach-davinci/include/mach/keyscan.h
index b4e21a2976d1..7a560e05bda8 100644
--- a/arch/arm/mach-davinci/include/mach/keyscan.h
+++ b/arch/arm/mach-davinci/include/mach/keyscan.h
@@ -29,6 +29,7 @@ enum davinci_matrix_types {
};
struct davinci_ks_platform_data {
+ int (*device_enable)(struct device *dev);
unsigned short *keymap;
u32 keymapsize;
u8 rep:1;
diff --git a/arch/arm/mach-davinci/include/mach/memory.h b/arch/arm/mach-davinci/include/mach/memory.h
index 80309aed534a..a91edfb8beea 100644
--- a/arch/arm/mach-davinci/include/mach/memory.h
+++ b/arch/arm/mach-davinci/include/mach/memory.h
@@ -31,6 +31,11 @@
#define PHYS_OFFSET DAVINCI_DDR_BASE
#endif
+#define DDR2_SDRCR_OFFSET 0xc
+#define DDR2_SRPD_BIT BIT(23)
+#define DDR2_MCLKSTOPEN_BIT BIT(30)
+#define DDR2_LPMODEN_BIT BIT(31)
+
/*
* Increase size of DMA-consistent memory region
*/
diff --git a/arch/arm/mach-davinci/include/mach/mux.h b/arch/arm/mach-davinci/include/mach/mux.h
index b60c693985ff..137bfba51d1f 100644
--- a/arch/arm/mach-davinci/include/mach/mux.h
+++ b/arch/arm/mach-davinci/include/mach/mux.h
@@ -899,6 +899,7 @@ enum davinci_da850_index {
DA850_GPIO2_15,
DA850_GPIO4_0,
DA850_GPIO4_1,
+ DA850_RTC_ALARM,
};
#ifdef CONFIG_DAVINCI_MUX
diff --git a/arch/arm/mach-davinci/include/mach/pm.h b/arch/arm/mach-davinci/include/mach/pm.h
new file mode 100644
index 000000000000..37b19bf35a85
--- /dev/null
+++ b/arch/arm/mach-davinci/include/mach/pm.h
@@ -0,0 +1,54 @@
+/*
+ * TI DaVinci platform support for power management.
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MACH_DAVINCI_PM_H
+#define _MACH_DAVINCI_PM_H
+
+/*
+ * Caution: Assembly code in sleep.S makes assumtion on the order
+ * of the members of this structure.
+ */
+struct davinci_pm_config {
+ void __iomem *ddr2_ctlr_base;
+ void __iomem *ddrpsc_reg_base;
+ int ddrpsc_num;
+ void __iomem *ddrpll_reg_base;
+ void __iomem *deepsleep_reg;
+ void __iomem *cpupll_reg_base;
+ /*
+ * Note on SLEEPCOUNT:
+ * The SLEEPCOUNT feature is mainly intended for cases in which
+ * the internal oscillator is used. The internal oscillator is
+ * fully disabled in deep sleep mode. When you exist deep sleep
+ * mode, the oscillator will be turned on and will generate very
+ * small oscillations which will not be detected by the deep sleep
+ * counter. Eventually those oscillations will grow to an amplitude
+ * large enough to start incrementing the deep sleep counter.
+ * In this case recommendation from hardware engineers is that the
+ * SLEEPCOUNT be set to 4096. This means that 4096 valid clock cycles
+ * must be detected before the clock is passed to the rest of the
+ * system.
+ * In the case that the internal oscillator is not used and the
+ * clock is generated externally, the SLEEPCOUNT value can be very
+ * small since the clock input is assumed to be stable before SoC
+ * is taken out of deepsleep mode. A value of 128 would be more than
+ * adequate.
+ */
+ int sleepcount;
+};
+
+extern unsigned int davinci_cpu_suspend_sz;
+extern void davinci_cpu_suspend(struct davinci_pm_config *);
+
+#endif
diff --git a/arch/arm/mach-davinci/include/mach/psc.h b/arch/arm/mach-davinci/include/mach/psc.h
index 171173c1dbad..651f6d8158fa 100644
--- a/arch/arm/mach-davinci/include/mach/psc.h
+++ b/arch/arm/mach-davinci/include/mach/psc.h
@@ -180,8 +180,23 @@
#define DA8XX_LPSC1_CR_P3_SS 26
#define DA8XX_LPSC1_L3_CBA_RAM 31
+/* PSC register offsets */
+#define EPCPR 0x070
+#define PTCMD 0x120
+#define PTSTAT 0x128
+#define PDSTAT 0x200
+#define PDCTL1 0x304
+#define MDSTAT 0x800
+#define MDCTL 0xA00
+
+#define MDSTAT_STATE_MASK 0x1f
+
+#ifndef __ASSEMBLER__
+
extern int davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id);
extern void davinci_psc_config(unsigned int domain, unsigned int ctlr,
unsigned int id, char enable);
+#endif
+
#endif /* __ASM_ARCH_PSC_H */
diff --git a/arch/arm/mach-davinci/include/mach/timex.h b/arch/arm/mach-davinci/include/mach/timex.h
index 52827567841d..9b885298f106 100644
--- a/arch/arm/mach-davinci/include/mach/timex.h
+++ b/arch/arm/mach-davinci/include/mach/timex.h
@@ -11,7 +11,12 @@
#ifndef __ASM_ARCH_TIMEX_H
#define __ASM_ARCH_TIMEX_H
-/* The source frequency for the timers is the 27MHz clock */
+/*
+ * Alert: Not all timers of the DaVinci family run at a frequency of 27MHz,
+ * but we should be fine as long as CLOCK_TICK_RATE or LATCH (see include/
+ * linux/jiffies.h) are not used directly in code. Currently none of the
+ * code relevant to DaVinci platform depends on these values directly.
+ */
#define CLOCK_TICK_RATE 27000000
#endif /* __ASM_ARCH_TIMEX_H__ */
diff --git a/arch/arm/mach-davinci/io.c b/arch/arm/mach-davinci/io.c
index 49912b48b1b0..a1c0b6b99edf 100644
--- a/arch/arm/mach-davinci/io.c
+++ b/arch/arm/mach-davinci/io.c
@@ -24,7 +24,7 @@ void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type)
if (BETWEEN(p, IO_PHYS, IO_SIZE))
return XLATE(p, IO_PHYS, IO_VIRT);
- return __arm_ioremap(p, size, type);
+ return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
}
EXPORT_SYMBOL(davinci_ioremap);
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
new file mode 100644
index 000000000000..fab953b43dea
--- /dev/null
+++ b/arch/arm/mach-davinci/pm.c
@@ -0,0 +1,158 @@
+/*
+ * DaVinci Power Management Routines
+ *
+ * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/spinlock.h>
+
+#include <asm/cacheflush.h>
+#include <asm/delay.h>
+
+#include <mach/da8xx.h>
+#include <mach/sram.h>
+#include <mach/pm.h>
+
+#include "clock.h"
+
+#define DEEPSLEEP_SLEEPCOUNT_MASK 0xFFFF
+
+static void (*davinci_sram_suspend) (struct davinci_pm_config *);
+static struct davinci_pm_config *pdata;
+
+static void davinci_sram_push(void *dest, void *src, unsigned int size)
+{
+ memcpy(dest, src, size);
+ flush_icache_range((unsigned long)dest, (unsigned long)(dest + size));
+}
+
+static void davinci_pm_suspend(void)
+{
+ unsigned val;
+
+ if (pdata->cpupll_reg_base != pdata->ddrpll_reg_base) {
+
+ /* Switch CPU PLL to bypass mode */
+ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+ val &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
+ __raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+
+ udelay(PLL_BYPASS_TIME);
+
+ /* Powerdown CPU PLL */
+ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+ val |= PLLCTL_PLLPWRDN;
+ __raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+ }
+
+ /* Configure sleep count in deep sleep register */
+ val = __raw_readl(pdata->deepsleep_reg);
+ val &= ~DEEPSLEEP_SLEEPCOUNT_MASK,
+ val |= pdata->sleepcount;
+ __raw_writel(val, pdata->deepsleep_reg);
+
+ /* System goes to sleep in this call */
+ davinci_sram_suspend(pdata);
+
+ if (pdata->cpupll_reg_base != pdata->ddrpll_reg_base) {
+
+ /* put CPU PLL in reset */
+ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+ val &= ~PLLCTL_PLLRST;
+ __raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+
+ /* put CPU PLL in power down */
+ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+ val &= ~PLLCTL_PLLPWRDN;
+ __raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+
+ /* wait for CPU PLL reset */
+ udelay(PLL_RESET_TIME);
+
+ /* bring CPU PLL out of reset */
+ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+ val |= PLLCTL_PLLRST;
+ __raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+
+ /* Wait for CPU PLL to lock */
+ udelay(PLL_LOCK_TIME);
+
+ /* Remove CPU PLL from bypass mode */
+ val = __raw_readl(pdata->cpupll_reg_base + PLLCTL);
+ val &= ~PLLCTL_PLLENSRC;
+ val |= PLLCTL_PLLEN;
+ __raw_writel(val, pdata->cpupll_reg_base + PLLCTL);
+ }
+}
+
+static int davinci_pm_enter(suspend_state_t state)
+{
+ int ret = 0;
+
+ switch (state) {
+ case PM_SUSPEND_STANDBY:
+ case PM_SUSPEND_MEM:
+ davinci_pm_suspend();
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static struct platform_suspend_ops davinci_pm_ops = {
+ .enter = davinci_pm_enter,
+ .valid = suspend_valid_only_mem,
+};
+
+static int __init davinci_pm_probe(struct platform_device *pdev)
+{
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "cannot get platform data\n");
+ return -ENOENT;
+ }
+
+ davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
+ if (!davinci_sram_suspend) {
+ dev_err(&pdev->dev, "cannot allocate SRAM memory\n");
+ return -ENOMEM;
+ }
+
+ davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
+ davinci_cpu_suspend_sz);
+
+ suspend_set_ops(&davinci_pm_ops);
+
+ return 0;
+}
+
+static int __exit davinci_pm_remove(struct platform_device *pdev)
+{
+ sram_free(davinci_sram_suspend, davinci_cpu_suspend_sz);
+ return 0;
+}
+
+static struct platform_driver davinci_pm_driver = {
+ .driver = {
+ .name = "pm-davinci",
+ .owner = THIS_MODULE,
+ },
+ .remove = __exit_p(davinci_pm_remove),
+};
+
+static int __init davinci_pm_init(void)
+{
+ return platform_driver_probe(&davinci_pm_driver, davinci_pm_probe);
+}
+late_initcall(davinci_pm_init);
diff --git a/arch/arm/mach-davinci/psc.c b/arch/arm/mach-davinci/psc.c
index 04a3cb72c5ab..adf6b5c7f1e5 100644
--- a/arch/arm/mach-davinci/psc.c
+++ b/arch/arm/mach-davinci/psc.c
@@ -25,17 +25,6 @@
#include <mach/cputype.h>
#include <mach/psc.h>
-/* PSC register offsets */
-#define EPCPR 0x070
-#define PTCMD 0x120
-#define PTSTAT 0x128
-#define PDSTAT 0x200
-#define PDCTL1 0x304
-#define MDSTAT 0x800
-#define MDCTL 0xA00
-
-#define MDSTAT_STATE_MASK 0x1f
-
/* Return nonzero iff the domain's clock is active */
int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
{
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
new file mode 100644
index 000000000000..fb5e72b532b0
--- /dev/null
+++ b/arch/arm/mach-davinci/sleep.S
@@ -0,0 +1,224 @@
+/*
+ * (C) Copyright 2009, Texas Instruments, Inc. http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+/* replicated define because linux/bitops.h cannot be included in assembly */
+#define BIT(nr) (1 << (nr))
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <mach/psc.h>
+#include <mach/memory.h>
+
+#include "clock.h"
+
+/* Arbitrary, hardware currently does not update PHYRDY correctly */
+#define PHYRDY_CYCLES 0x1000
+
+/* Assume 25 MHz speed for the cycle conversions since PLLs are bypassed */
+#define PLL_BYPASS_CYCLES (PLL_BYPASS_TIME * 25)
+#define PLL_RESET_CYCLES (PLL_RESET_TIME * 25)
+#define PLL_LOCK_CYCLES (PLL_LOCK_TIME * 25)
+
+#define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
+
+ .text
+/*
+ * Move DaVinci into deep sleep state
+ *
+ * Note: This code is copied to internal SRAM by PM code. When the DaVinci
+ * wakes up it continues execution at the point it went to sleep.
+ * Register Usage:
+ * r0: contains virtual base for DDR2 controller
+ * r1: contains virtual base for DDR2 Power and Sleep controller (PSC)
+ * r2: contains PSC number for DDR2
+ * r3: contains virtual base DDR2 PLL controller
+ * r4: contains virtual address of the DEEPSLEEP register
+ */
+ENTRY(davinci_cpu_suspend)
+ stmfd sp!, {r0-r12, lr} @ save registers on stack
+
+ ldr ip, CACHE_FLUSH
+ blx ip
+
+ ldmia r0, {r0-r4}
+
+ /*
+ * Switch DDR to self-refresh mode.
+ */
+
+ /* calculate SDRCR address */
+ ldr ip, [r0, #DDR2_SDRCR_OFFSET]
+ bic ip, ip, #DDR2_SRPD_BIT
+ orr ip, ip, #DDR2_LPMODEN_BIT
+ str ip, [r0, #DDR2_SDRCR_OFFSET]
+
+ ldr ip, [r0, #DDR2_SDRCR_OFFSET]
+ orr ip, ip, #DDR2_MCLKSTOPEN_BIT
+ str ip, [r0, #DDR2_SDRCR_OFFSET]
+
+ mov ip, #PHYRDY_CYCLES
+1: subs ip, ip, #0x1
+ bne 1b
+
+ /* Disable DDR2 LPSC */
+ mov r7, r0
+ mov r0, #0x2
+ bl davinci_ddr_psc_config
+ mov r0, r7
+
+ /* Disable clock to DDR PHY */
+ ldr ip, [r3, #PLLDIV1]
+ bic ip, ip, #PLLDIV_EN
+ str ip, [r3, #PLLDIV1]
+
+ /* Put the DDR PLL in bypass and power down */
+ ldr ip, [r3, #PLLCTL]
+ bic ip, ip, #PLLCTL_PLLENSRC
+ bic ip, ip, #PLLCTL_PLLEN
+ str ip, [r3, #PLLCTL]
+
+ /* Wait for PLL to switch to bypass */
+ mov ip, #PLL_BYPASS_CYCLES
+2: subs ip, ip, #0x1
+ bne 2b
+
+ /* Power down the PLL */
+ ldr ip, [r3, #PLLCTL]
+ orr ip, ip, #PLLCTL_PLLPWRDN
+ str ip, [r3, #PLLCTL]
+
+ /* Go to deep sleep */
+ ldr ip, [r4]
+ orr ip, ip, #DEEPSLEEP_SLEEPENABLE_BIT
+ /* System goes to sleep beyond after this instruction */
+ str ip, [r4]
+
+ /* Wake up from sleep */
+
+ /* Clear sleep enable */
+ ldr ip, [r4]
+ bic ip, ip, #DEEPSLEEP_SLEEPENABLE_BIT
+ str ip, [r4]
+
+ /* initialize the DDR PLL controller */
+
+ /* Put PLL in reset */
+ ldr ip, [r3, #PLLCTL]
+ bic ip, ip, #PLLCTL_PLLRST
+ str ip, [r3, #PLLCTL]
+
+ /* Clear PLL power down */
+ ldr ip, [r3, #PLLCTL]
+ bic ip, ip, #PLLCTL_PLLPWRDN
+ str ip, [r3, #PLLCTL]
+
+ mov ip, #PLL_RESET_CYCLES
+3: subs ip, ip, #0x1
+ bne 3b
+
+ /* Bring PLL out of reset */
+ ldr ip, [r3, #PLLCTL]
+ orr ip, ip, #PLLCTL_PLLRST
+ str ip, [r3, #PLLCTL]
+
+ /* Wait for PLL to lock (assume prediv = 1, 25MHz OSCIN) */
+ mov ip, #PLL_LOCK_CYCLES
+4: subs ip, ip, #0x1
+ bne 4b
+
+ /* Remove PLL from bypass mode */
+ ldr ip, [r3, #PLLCTL]
+ bic ip, ip, #PLLCTL_PLLENSRC
+ orr ip, ip, #PLLCTL_PLLEN
+ str ip, [r3, #PLLCTL]
+
+ /* Start 2x clock to DDR2 */
+
+ ldr ip, [r3, #PLLDIV1]
+ orr ip, ip, #PLLDIV_EN
+ str ip, [r3, #PLLDIV1]
+
+ /* Enable VCLK */
+
+ /* Enable DDR2 LPSC */
+ mov r7, r0
+ mov r0, #0x3
+ bl davinci_ddr_psc_config
+ mov r0, r7
+
+ /* clear MCLKSTOPEN */
+
+ ldr ip, [r0, #DDR2_SDRCR_OFFSET]
+ bic ip, ip, #DDR2_MCLKSTOPEN_BIT
+ str ip, [r0, #DDR2_SDRCR_OFFSET]
+
+ ldr ip, [r0, #DDR2_SDRCR_OFFSET]
+ bic ip, ip, #DDR2_LPMODEN_BIT
+ str ip, [r0, #DDR2_SDRCR_OFFSET]
+
+ /* Restore registers and return */
+ ldmfd sp!, {r0-r12, pc}
+
+ENDPROC(davinci_cpu_suspend)
+
+/*
+ * Disables or Enables DDR2 LPSC
+ * Register Usage:
+ * r0: Enable or Disable LPSC r0 = 0x3 => Enable, r0 = 0x2 => Disable LPSC
+ * r1: contains virtual base for DDR2 Power and Sleep controller (PSC)
+ * r2: contains PSC number for DDR2
+ */
+ENTRY(davinci_ddr_psc_config)
+ /* Set next state in mdctl for DDR2 */
+ mov r6, #MDCTL
+ add r6, r6, r2, lsl #2
+ ldr ip, [r1, r6]
+ bic ip, ip, #MDSTAT_STATE_MASK
+ orr ip, ip, r0
+ str ip, [r1, r6]
+
+ /* Enable the Power Domain Transition Command */
+ ldr ip, [r1, #PTCMD]
+ orr ip, ip, #0x1
+ str ip, [r1, #PTCMD]
+
+ /* Check for Transition Complete (PTSTAT) */
+ptstat_done:
+ ldr ip, [r1, #PTSTAT]
+ and ip, ip, #0x1
+ cmp ip, #0x0
+ bne ptstat_done
+
+ /* Check for DDR2 clock disable completion; */
+ mov r6, #MDSTAT
+ add r6, r6, r2, lsl #2
+ddr2clk_stop_done:
+ ldr ip, [r1, r6]
+ and ip, ip, #MDSTAT_STATE_MASK
+ cmp ip, r0
+ bne ddr2clk_stop_done
+
+ mov pc, lr
+ENDPROC(davinci_ddr_psc_config)
+
+CACHE_FLUSH:
+ .word arm926_flush_kern_cache_all
+
+ENTRY(davinci_cpu_suspend_sz)
+ .word . - davinci_cpu_suspend
+ENDPROC(davinci_cpu_suspend_sz)
diff --git a/arch/arm/mach-ep93xx/clock.c b/arch/arm/mach-ep93xx/clock.c
index 1d0f9d8aff2e..e894ee0d603d 100644
--- a/arch/arm/mach-ep93xx/clock.c
+++ b/arch/arm/mach-ep93xx/clock.c
@@ -10,6 +10,8 @@
* your option) any later version.
*/
+#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -445,37 +447,39 @@ static void __init ep93xx_dma_clock_init(void)
static int __init ep93xx_clock_init(void)
{
u32 value;
- int i;
- value = __raw_readl(EP93XX_SYSCON_CLOCK_SET1);
- if (!(value & 0x00800000)) { /* PLL1 bypassed? */
+ /* Determine the bootloader configured pll1 rate */
+ value = __raw_readl(EP93XX_SYSCON_CLKSET1);
+ if (!(value & EP93XX_SYSCON_CLKSET1_NBYP1))
clk_pll1.rate = clk_xtali.rate;
- } else {
+ else
clk_pll1.rate = calc_pll_rate(value);
- }
+
+ /* Initialize the pll1 derived clocks */
clk_f.rate = clk_pll1.rate / fclk_divisors[(value >> 25) & 0x7];
clk_h.rate = clk_pll1.rate / hclk_divisors[(value >> 20) & 0x7];
clk_p.rate = clk_h.rate / pclk_divisors[(value >> 18) & 0x3];
ep93xx_dma_clock_init();
+ /* Determine the bootloader configured pll2 rate */
value = __raw_readl(EP93XX_SYSCON_CLOCK_SET2);
- if (!(value & 0x00080000)) { /* PLL2 bypassed? */
+ if (!(value & EP93XX_SYSCON_CLKSET2_NBYP2))
clk_pll2.rate = clk_xtali.rate;
- } else if (value & 0x00040000) { /* PLL2 enabled? */
+ else if (value & EP93XX_SYSCON_CLKSET2_PLL2_EN)
clk_pll2.rate = calc_pll_rate(value);
- } else {
+ else
clk_pll2.rate = 0;
- }
+
+ /* Initialize the pll2 derived clocks */
clk_usb_host.rate = clk_pll2.rate / (((value >> 28) & 0xf) + 1);
- printk(KERN_INFO "ep93xx: PLL1 running at %ld MHz, PLL2 at %ld MHz\n",
+ pr_info("PLL1 running at %ld MHz, PLL2 at %ld MHz\n",
clk_pll1.rate / 1000000, clk_pll2.rate / 1000000);
- printk(KERN_INFO "ep93xx: FCLK %ld MHz, HCLK %ld MHz, PCLK %ld MHz\n",
+ pr_info("FCLK %ld MHz, HCLK %ld MHz, PCLK %ld MHz\n",
clk_f.rate / 1000000, clk_h.rate / 1000000,
clk_p.rate / 1000000);
- for (i = 0; i < ARRAY_SIZE(clocks); i++)
- clkdev_add(&clocks[i]);
+ clkdev_add_table(clocks, ARRAY_SIZE(clocks));
return 0;
}
arch_initcall(ep93xx_clock_init);
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 1f0d66561bbe..41064bd63e38 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -14,6 +14,8 @@
* your option) any later version.
*/
+#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
@@ -318,8 +320,7 @@ static int ep93xx_gpio_irq_type(unsigned int irq, unsigned int type)
desc->handle_irq = handle_edge_irq;
break;
default:
- pr_err("ep93xx: failed to set irq type %d for gpio %d\n",
- type, gpio);
+ pr_err("failed to set irq type %d for gpio %d\n", type, gpio);
return -EINVAL;
}
@@ -572,9 +573,9 @@ void __init ep93xx_register_i2c(struct i2c_gpio_platform_data *data,
* CMOS driver.
*/
if (data->sda_is_open_drain && data->sda_pin != EP93XX_GPIO_LINE_EEDAT)
- pr_warning("ep93xx: sda != EEDAT, open drain has no effect\n");
+ pr_warning("sda != EEDAT, open drain has no effect\n");
if (data->scl_is_open_drain && data->scl_pin != EP93XX_GPIO_LINE_EECLK)
- pr_warning("ep93xx: scl != EECLK, open drain has no effect\n");
+ pr_warning("scl != EECLK, open drain has no effect\n");
__raw_writel((data->sda_is_open_drain << 1) |
(data->scl_is_open_drain << 0),
diff --git a/arch/arm/mach-ep93xx/dma-m2p.c b/arch/arm/mach-ep93xx/dma-m2p.c
index dbcac9c40a28..8904ca4e2e24 100644
--- a/arch/arm/mach-ep93xx/dma-m2p.c
+++ b/arch/arm/mach-ep93xx/dma-m2p.c
@@ -28,6 +28,8 @@
* with this implementation.
*/
+#define pr_fmt(fmt) "ep93xx " KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/err.h>
@@ -173,7 +175,7 @@ static irqreturn_t m2p_irq(int irq, void *dev_id)
switch (m2p_channel_state(ch)) {
case STATE_IDLE:
- pr_crit("m2p_irq: dma interrupt without a dma buffer\n");
+ pr_crit("dma interrupt without a dma buffer\n");
BUG();
break;
@@ -197,7 +199,7 @@ static irqreturn_t m2p_irq(int irq, void *dev_id)
break;
case STATE_NEXT:
- pr_crit("m2p_irq: dma interrupt while next\n");
+ pr_crit("dma interrupt while next\n");
BUG();
break;
}
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index a4a7be308000..d22d67ac8b99 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -118,12 +118,33 @@ static void __init edb93xx_register_i2c(void)
}
}
+
+/*************************************************************************
+ * EDB93xx pwm
+ *************************************************************************/
+static void __init edb93xx_register_pwm(void)
+{
+ if (machine_is_edb9301() ||
+ machine_is_edb9302() || machine_is_edb9302a()) {
+ /* EP9301 and EP9302 only have pwm.1 (EGPIO14) */
+ ep93xx_register_pwm(0, 1);
+ } else if (machine_is_edb9307() || machine_is_edb9307a()) {
+ /* EP9307 only has pwm.0 (PWMOUT) */
+ ep93xx_register_pwm(1, 0);
+ } else {
+ /* EP9312 and EP9315 have both */
+ ep93xx_register_pwm(1, 1);
+ }
+}
+
+
static void __init edb93xx_init_machine(void)
{
ep93xx_init_devices();
edb93xx_register_flash();
ep93xx_register_eth(&edb93xx_eth_data, 1);
edb93xx_register_i2c();
+ edb93xx_register_pwm();
}
diff --git a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
index d55194a4c093..cd359120c1f5 100644
--- a/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
+++ b/arch/arm/mach-ep93xx/include/mach/ep93xx-regs.h
@@ -167,8 +167,11 @@
#define EP93XX_SYSCON_PWRCNT_DMA_M2P1 (1<<16)
#define EP93XX_SYSCON_HALT EP93XX_SYSCON_REG(0x08)
#define EP93XX_SYSCON_STANDBY EP93XX_SYSCON_REG(0x0c)
-#define EP93XX_SYSCON_CLOCK_SET1 EP93XX_SYSCON_REG(0x20)
-#define EP93XX_SYSCON_CLOCK_SET2 EP93XX_SYSCON_REG(0x24)
+#define EP93XX_SYSCON_CLKSET1 EP93XX_SYSCON_REG(0x20)
+#define EP93XX_SYSCON_CLKSET1_NBYP1 (1<<23)
+#define EP93XX_SYSCON_CLKSET2 EP93XX_SYSCON_REG(0x24)
+#define EP93XX_SYSCON_CLKSET2_NBYP2 (1<<19)
+#define EP93XX_SYSCON_CLKSET2_PLL2_EN (1<<18)
#define EP93XX_SYSCON_DEVCFG EP93XX_SYSCON_REG(0x80)
#define EP93XX_SYSCON_DEVCFG_SWRST (1<<31)
#define EP93XX_SYSCON_DEVCFG_D1ONG (1<<30)
diff --git a/arch/arm/mach-ep93xx/include/mach/ts72xx.h b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
index 3bd934e9a7f1..61c0e132c63e 100644
--- a/arch/arm/mach-ep93xx/include/mach/ts72xx.h
+++ b/arch/arm/mach-ep93xx/include/mach/ts72xx.h
@@ -9,9 +9,6 @@
* febff000 22000000 4K model number register
* febfe000 22400000 4K options register
* febfd000 22800000 4K options register #2
- * febfc000 [67]0000000 4K NAND data register
- * febfb000 [67]0400000 4K NAND control register
- * febfa000 [67]0800000 4K NAND busy register
* febf9000 10800000 4K TS-5620 RTC index register
* febf8000 11700000 4K TS-5620 RTC data register
*/
@@ -41,22 +38,6 @@
#define TS72XX_OPTIONS2_TS9420_BOOT 0x02
-#define TS72XX_NAND1_DATA_PHYS_BASE 0x60000000
-#define TS72XX_NAND2_DATA_PHYS_BASE 0x70000000
-#define TS72XX_NAND_DATA_VIRT_BASE 0xfebfc000
-#define TS72XX_NAND_DATA_SIZE 0x00001000
-
-#define TS72XX_NAND1_CONTROL_PHYS_BASE 0x60400000
-#define TS72XX_NAND2_CONTROL_PHYS_BASE 0x70400000
-#define TS72XX_NAND_CONTROL_VIRT_BASE 0xfebfb000
-#define TS72XX_NAND_CONTROL_SIZE 0x00001000
-
-#define TS72XX_NAND1_BUSY_PHYS_BASE 0x60800000
-#define TS72XX_NAND2_BUSY_PHYS_BASE 0x70800000
-#define TS72XX_NAND_BUSY_VIRT_BASE 0xfebfa000
-#define TS72XX_NAND_BUSY_SIZE 0x00001000
-
-
#define TS72XX_RTC_INDEX_VIRT_BASE 0xfebf9000
#define TS72XX_RTC_INDEX_PHYS_BASE 0x10800000
#define TS72XX_RTC_INDEX_SIZE 0x00001000
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 259f7822ba52..47a86f07831d 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -10,12 +10,16 @@
* your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/m48t86.h>
#include <linux/mtd/physmap.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
#include <mach/hardware.h>
#include <mach/ts72xx.h>
@@ -54,92 +58,162 @@ static struct map_desc ts72xx_io_desc[] __initdata = {
}
};
-static struct map_desc ts72xx_nand_io_desc[] __initdata = {
- {
- .virtual = TS72XX_NAND_DATA_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND1_DATA_PHYS_BASE),
- .length = TS72XX_NAND_DATA_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = TS72XX_NAND_CONTROL_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND1_CONTROL_PHYS_BASE),
- .length = TS72XX_NAND_CONTROL_SIZE,
- .type = MT_DEVICE,
- }, {
- .virtual = TS72XX_NAND_BUSY_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND1_BUSY_PHYS_BASE),
- .length = TS72XX_NAND_BUSY_SIZE,
- .type = MT_DEVICE,
+static void __init ts72xx_map_io(void)
+{
+ ep93xx_map_io();
+ iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc));
+}
+
+
+/*************************************************************************
+ * NAND flash
+ *************************************************************************/
+#define TS72XX_NAND_CONTROL_ADDR_LINE 22 /* 0xN0400000 */
+#define TS72XX_NAND_BUSY_ADDR_LINE 23 /* 0xN0800000 */
+
+static void ts72xx_nand_hwcontrol(struct mtd_info *mtd,
+ int cmd, unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (ctrl & NAND_CTRL_CHANGE) {
+ void __iomem *addr = chip->IO_ADDR_R;
+ unsigned char bits;
+
+ addr += (1 << TS72XX_NAND_CONTROL_ADDR_LINE);
+
+ bits = __raw_readb(addr) & ~0x07;
+ bits |= (ctrl & NAND_NCE) << 2; /* bit 0 -> bit 2 */
+ bits |= (ctrl & NAND_CLE); /* bit 1 -> bit 1 */
+ bits |= (ctrl & NAND_ALE) >> 2; /* bit 2 -> bit 0 */
+
+ __raw_writeb(bits, addr);
}
-};
-static struct map_desc ts72xx_alternate_nand_io_desc[] __initdata = {
+ if (cmd != NAND_CMD_NONE)
+ __raw_writeb(cmd, chip->IO_ADDR_W);
+}
+
+static int ts72xx_nand_device_ready(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd->priv;
+ void __iomem *addr = chip->IO_ADDR_R;
+
+ addr += (1 << TS72XX_NAND_BUSY_ADDR_LINE);
+
+ return !!(__raw_readb(addr) & 0x20);
+}
+
+static const char *ts72xx_nand_part_probes[] = { "cmdlinepart", NULL };
+
+#define TS72XX_BOOTROM_PART_SIZE (SZ_16K)
+#define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M)
+
+static struct mtd_partition ts72xx_nand_parts[] = {
{
- .virtual = TS72XX_NAND_DATA_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND2_DATA_PHYS_BASE),
- .length = TS72XX_NAND_DATA_SIZE,
- .type = MT_DEVICE,
+ .name = "TS-BOOTROM",
+ .offset = 0,
+ .size = TS72XX_BOOTROM_PART_SIZE,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
- .virtual = TS72XX_NAND_CONTROL_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND2_CONTROL_PHYS_BASE),
- .length = TS72XX_NAND_CONTROL_SIZE,
- .type = MT_DEVICE,
+ .name = "Linux",
+ .offset = MTDPART_OFS_APPEND,
+ .size = 0, /* filled in later */
}, {
- .virtual = TS72XX_NAND_BUSY_VIRT_BASE,
- .pfn = __phys_to_pfn(TS72XX_NAND2_BUSY_PHYS_BASE),
- .length = TS72XX_NAND_BUSY_SIZE,
- .type = MT_DEVICE,
- }
+ .name = "RedBoot",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
};
-static void __init ts72xx_map_io(void)
+static void ts72xx_nand_set_parts(uint64_t size,
+ struct platform_nand_chip *chip)
{
- ep93xx_map_io();
- iotable_init(ts72xx_io_desc, ARRAY_SIZE(ts72xx_io_desc));
+ /* Factory TS-72xx boards only come with 32MiB or 128MiB NAND options */
+ if (size == SZ_32M || size == SZ_128M) {
+ /* Set the "Linux" partition size */
+ ts72xx_nand_parts[1].size = size - TS72XX_REDBOOT_PART_SIZE;
- /*
- * The TS-7200 has NOR flash, the other models have NAND flash.
- */
- if (!board_is_ts7200()) {
- if (is_ts9420_installed()) {
- iotable_init(ts72xx_alternate_nand_io_desc,
- ARRAY_SIZE(ts72xx_alternate_nand_io_desc));
- } else {
- iotable_init(ts72xx_nand_io_desc,
- ARRAY_SIZE(ts72xx_nand_io_desc));
- }
+ chip->partitions = ts72xx_nand_parts;
+ chip->nr_partitions = ARRAY_SIZE(ts72xx_nand_parts);
+ } else {
+ pr_warning("Unknown nand disk size:%lluMiB\n", size >> 20);
}
}
+static struct platform_nand_data ts72xx_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .chip_delay = 15,
+ .part_probe_types = ts72xx_nand_part_probes,
+ .set_parts = ts72xx_nand_set_parts,
+ },
+ .ctrl = {
+ .cmd_ctrl = ts72xx_nand_hwcontrol,
+ .dev_ready = ts72xx_nand_device_ready,
+ },
+};
+
+static struct resource ts72xx_nand_resource[] = {
+ {
+ .start = 0, /* filled in later */
+ .end = 0, /* filled in later */
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device ts72xx_nand_flash = {
+ .name = "gen_nand",
+ .id = -1,
+ .dev.platform_data = &ts72xx_nand_data,
+ .resource = ts72xx_nand_resource,
+ .num_resources = ARRAY_SIZE(ts72xx_nand_resource),
+};
+
+
/*************************************************************************
* NOR flash (TS-7200 only)
*************************************************************************/
-static struct physmap_flash_data ts72xx_flash_data = {
+static struct physmap_flash_data ts72xx_nor_data = {
.width = 2,
};
-static struct resource ts72xx_flash_resource = {
+static struct resource ts72xx_nor_resource = {
.start = EP93XX_CS6_PHYS_BASE,
.end = EP93XX_CS6_PHYS_BASE + SZ_16M - 1,
.flags = IORESOURCE_MEM,
};
-static struct platform_device ts72xx_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = {
- .platform_data = &ts72xx_flash_data,
- },
- .num_resources = 1,
- .resource = &ts72xx_flash_resource,
+static struct platform_device ts72xx_nor_flash = {
+ .name = "physmap-flash",
+ .id = 0,
+ .dev.platform_data = &ts72xx_nor_data,
+ .resource = &ts72xx_nor_resource,
+ .num_resources = 1,
};
static void __init ts72xx_register_flash(void)
{
- if (board_is_ts7200())
- platform_device_register(&ts72xx_flash);
+ if (board_is_ts7200()) {
+ platform_device_register(&ts72xx_nor_flash);
+ } else {
+ resource_size_t start;
+
+ if (is_ts9420_installed())
+ start = EP93XX_CS7_PHYS_BASE;
+ else
+ start = EP93XX_CS6_PHYS_BASE;
+
+ ts72xx_nand_resource[0].start = start;
+ ts72xx_nand_resource[0].end = start + SZ_16M - 1;
+
+ platform_device_register(&ts72xx_nand_flash);
+ }
}
+
static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c
index 41febc796b1c..e3bc3f6f6b10 100644
--- a/arch/arm/mach-footbridge/common.c
+++ b/arch/arm/mach-footbridge/common.c
@@ -32,12 +32,13 @@ unsigned int mem_fclk_21285 = 50000000;
EXPORT_SYMBOL(mem_fclk_21285);
-static void __init early_fclk(char **arg)
+static int __init early_fclk(char *arg)
{
- mem_fclk_21285 = simple_strtoul(*arg, arg, 0);
+ mem_fclk_21285 = simple_strtoul(arg, NULL, 0);
+ return 0;
}
-__early_param("mem_fclk_21285=", early_fclk);
+early_param("mem_fclk_21285", early_fclk);
static int __init parse_tag_memclk(const struct tag *tag)
{
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c
index a0f60e55da6a..8b390e36ba69 100644
--- a/arch/arm/mach-integrator/core.c
+++ b/arch/arm/mach-integrator/core.c
@@ -144,8 +144,7 @@ static int __init integrator_init(void)
{
int i;
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
struct amba_device *d = amba_devs[i];
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index 3f35293d457a..66ef86d6d9e3 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -558,9 +558,7 @@ static void __init intcp_init(void)
{
int i;
- for (i = 0; i < ARRAY_SIZE(cp_lookups); i++)
- clkdev_add(&cp_lookups[i]);
-
+ clkdev_add_table(cp_lookups, ARRAY_SIZE(cp_lookups));
platform_add_devices(intcp_devs, ARRAY_SIZE(intcp_devs));
for (i = 0; i < ARRAY_SIZE(amba_devs); i++) {
diff --git a/arch/arm/mach-iop13xx/io.c b/arch/arm/mach-iop13xx/io.c
index 529580997814..48642e66c566 100644
--- a/arch/arm/mach-iop13xx/io.c
+++ b/arch/arm/mach-iop13xx/io.c
@@ -61,9 +61,9 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size,
(cookie - IOP13XX_PCIE_LOWER_MEM_RA));
break;
case IOP13XX_PBI_LOWER_MEM_RA ... IOP13XX_PBI_UPPER_MEM_RA:
- retval = __arm_ioremap(IOP13XX_PBI_LOWER_MEM_PA +
+ retval = __arm_ioremap_caller(IOP13XX_PBI_LOWER_MEM_PA +
(cookie - IOP13XX_PBI_LOWER_MEM_RA),
- size, mtype);
+ size, mtype, __builtin_return_address(0));
break;
case IOP13XX_PCIE_LOWER_IO_PA ... IOP13XX_PCIE_UPPER_IO_PA:
retval = (void *) IOP13XX_PCIE_IO_PHYS_TO_VIRT(cookie);
@@ -75,7 +75,8 @@ void * __iomem __iop13xx_ioremap(unsigned long cookie, size_t size,
retval = (void *) IOP13XX_PMMR_PHYS_TO_VIRT(cookie);
break;
default:
- retval = __arm_ioremap(cookie, size, mtype);
+ retval = __arm_ioremap_caller(cookie, size, mtype,
+ __builtin_return_address(0));
}
return retval;
diff --git a/arch/arm/mach-lh7a40x/clocks.c b/arch/arm/mach-lh7a40x/clocks.c
index fcaf876f19b6..0651f96653f9 100644
--- a/arch/arm/mach-lh7a40x/clocks.c
+++ b/arch/arm/mach-lh7a40x/clocks.c
@@ -10,6 +10,8 @@
#include <mach/hardware.h>
#include <mach/clocks.h>
#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/string.h>
struct module;
diff --git a/arch/arm/mach-mmp/clock.c b/arch/arm/mach-mmp/clock.c
index 2a46ed5cc2a2..886e05648f08 100644
--- a/arch/arm/mach-mmp/clock.c
+++ b/arch/arm/mach-mmp/clock.c
@@ -88,11 +88,3 @@ unsigned long clk_get_rate(struct clk *clk)
return rate;
}
EXPORT_SYMBOL(clk_get_rate);
-
-void clks_register(struct clk_lookup *clks, size_t num)
-{
- int i;
-
- for (i = 0; i < num; i++)
- clkdev_add(&clks[i]);
-}
diff --git a/arch/arm/mach-mmp/clock.h b/arch/arm/mach-mmp/clock.h
index eefffbe683b0..016ae94691c0 100644
--- a/arch/arm/mach-mmp/clock.h
+++ b/arch/arm/mach-mmp/clock.h
@@ -68,5 +68,3 @@ struct clk clk_##_name = { \
extern struct clk clk_pxa168_gpio;
extern struct clk clk_pxa168_timers;
-
-extern void clks_register(struct clk_lookup *, size_t);
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c
index 37dbdde17fac..1873c821df90 100644
--- a/arch/arm/mach-mmp/pxa168.c
+++ b/arch/arm/mach-mmp/pxa168.c
@@ -94,7 +94,7 @@ static int __init pxa168_init(void)
mfp_init_base(MFPR_VIRT_BASE);
mfp_init_addr(pxa168_mfp_addr_map);
pxa_init_dma(IRQ_PXA168_DMA_INT0, 32);
- clks_register(ARRAY_AND_SIZE(pxa168_clkregs));
+ clkdev_add_table(ARRAY_AND_SIZE(pxa168_clkregs));
}
return 0;
diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c
index d4049508a4df..46f2d69bef3c 100644
--- a/arch/arm/mach-mmp/pxa910.c
+++ b/arch/arm/mach-mmp/pxa910.c
@@ -131,7 +131,7 @@ static int __init pxa910_init(void)
mfp_init_base(MFPR_VIRT_BASE);
mfp_init_addr(pxa910_mfp_addr_map);
pxa_init_dma(IRQ_PXA910_DMA_INT0, 32);
- clks_register(ARRAY_AND_SIZE(pxa910_clkregs));
+ clkdev_add_table(ARRAY_AND_SIZE(pxa910_clkregs));
}
return 0;
diff --git a/arch/arm/mach-mmp/ttc_dkb.c b/arch/arm/mach-mmp/ttc_dkb.c
index 8f49b2b12608..b22dec4abf78 100644
--- a/arch/arm/mach-mmp/ttc_dkb.c
+++ b/arch/arm/mach-mmp/ttc_dkb.c
@@ -24,8 +24,6 @@
#include "common.h"
-#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x)
-
static unsigned long ttc_dkb_pin_config[] __initdata = {
/* UART2 */
GPIO47_UART2_RXD,
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index f780086befd7..57012e82e3cd 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -1,7 +1,189 @@
if ARCH_MSM
-comment "MSM Board Type"
- depends on ARCH_MSM
+choice
+ prompt "MSM SoC Type"
+ default ARCH_MSM7X00A
+
+config ARCH_MSM7X01A
+ bool "MSM7x00A / MSM7x01A"
+ select ARCH_MSM_ARM11
+ select CPU_V6
+
+config ARCH_MSM7X27
+ bool "MSM7x27"
+ select ARCH_MSM_ARM11
+ select CPU_V6
+
+config ARCH_MSM7X30
+ bool "MSM7x30"
+ select ARCH_MSM_SCORPION
+ select CPU_V7
+
+config ARCH_QSD8X50
+ bool "QSD8X50"
+ select ARCH_MSM_SCORPION
+ select VERIFY_PERMISSION_FAULT
+ select CPU_V7
+
+endchoice
+
+config ARCH_MSM_ARM11
+ bool
+config ARCH_MSM_SCORPION
+ bool
+
+
+menu "MSM Board Selection"
+
+config MACH_HALIBUT
+ select CPU_V6
+ depends on ARCH_MSM7X01A
+ depends on MSM_STACKED_MEMORY
+ default y
+ bool "Halibut Board (QCT SURF7201A)"
+ help
+ Support for the Qualcomm SURF7201A eval board.
+
+config MACH_MSM7201A_SURF
+ depends on ARCH_MSM7X01A
+ depends on MSM_STACKED_MEMORY
+ default y
+ bool "MSM7201A SURF"
+ help
+ Support for the Qualcomm MSM7201A SURF eval board.
+
+config MACH_MSM7201A_FFA
+ depends on ARCH_MSM7X01A
+ depends on MSM_STACKED_MEMORY
+ default y
+ bool "MSM7201A FFA"
+ help
+ Support for the Qualcomm MSM7201A FFA eval board.
+
+config MACH_TROUT
+ select CPU_V6
+ depends on ARCH_MSM7X01A
+ depends on MSM_STACKED_MEMORY
+ default y
+ bool "HTC Dream (aka trout)"
+ help
+ Support for the HTC Dream, T-Mobile G1, Android ADP1 devices.
+
+config MACH_MSM7X27_SURF
+ depends on ARCH_MSM7X27
+ depends on !MSM_STACKED_MEMORY
+ default y
+ bool "MSM7x27 SURF"
+ help
+ Support for the Qualcomm MSM7x27 SURF eval board.
+
+config MACH_MSM7X27_FFA
+ depends on ARCH_MSM7X27
+ depends on !MSM_STACKED_MEMORY
+ default y
+ bool "MSM7x27 FFA"
+ help
+ Support for the Qualcomm MSM7x27 FFA eval board.
+
+config MACH_MSM7X30_SURF
+ depends on ARCH_MSM7X30
+ depends on !MSM_STACKED_MEMORY
+ default y
+ bool "MSM7x30 SURF"
+ help
+ Support for the Qualcomm MSM7x30 SURF eval board.
+
+config MACH_MSM7X30_FFA
+ depends on ARCH_MSM7X30
+ depends on !MSM_STACKED_MEMORY
+ default y
+ bool "MSM7x30 FFA"
+ help
+ Support for the Qualcomm MSM7x30 FFA eval board.
+
+config MACH_MSM7X30_FLUID
+ depends on ARCH_MSM7X30
+ depends on !MSM_STACKED_MEMORY
+ default y
+ bool "MSM7x30 FLUID"
+ help
+ Support for the Qualcomm MSM7x30 FLUID eval board.
+
+config MACH_SAPPHIRE
+ depends on ARCH_MSM7X01A
+ default n
+ bool "Sapphire"
+
+config MACH_QSD8X50_SURF
+ depends on ARCH_QSD8X50
+ depends on MSM_STACKED_MEMORY
+ default y
+ bool "QSD8x50 SURF"
+ help
+ Support for the Qualcomm QSD8x50 SURF eval board.
+
+config MACH_QSD8X50_FFA
+ depends on ARCH_QSD8X50
+ depends on MSM_STACKED_MEMORY
+ default y
+ bool "QSD8x50 FFA"
+ help
+ Support for the Qualcomm QSD8x50 FFA eval board.
+
+config MACH_QSD8X50_COMET
+ depends on ARCH_QSD8X50
+ depends on MSM_STACKED_MEMORY
+ default n
+ bool "QSD8x50 Comet"
+ help
+ Support for the Qualcomm Comet eval board.
+
+config MACH_QSD8X50_GRAPEFRUIT
+ depends on ARCH_QSD8X50
+ depends on MSM_STACKED_MEMORY
+ default n
+ bool "QSD8x50 Grapefruit"
+ help
+ Support for the Qualcomm Grapefruit eval board.
+
+config MACH_QSD8X50_ST1
+ depends on ARCH_QSD8X50
+ depends on MSM_STACKED_MEMORY
+ default n
+ bool "QSD8x50 ST1"
+ help
+ Support for the Qualcomm ST1.
+
+endmenu
+
+config MSM_STACKED_MEMORY
+ bool "Stacked Memory"
+ default y
+ help
+ This option is used to indicate the presence of on-die stacked
+ memory. When present this memory bank is used for a high speed
+ shared memory interface. When not present regular RAM is used.
+
+config MSM_AMSS_VERSION
+ int
+ default 6210 if MSM_AMSS_VERSION_6210
+ default 6220 if MSM_AMSS_VERSION_6220
+ default 6225 if MSM_AMSS_VERSION_6225
+
+choice
+ prompt "AMSS modem firmware version"
+
+ default MSM_AMSS_VERSION_6225
+
+ config MSM_AMSS_VERSION_6210
+ bool "6.2.10"
+
+ config MSM_AMSS_VERSION_6220
+ bool "6.2.20"
+
+ config MSM_AMSS_VERSION_6225
+ bool "6.2.20 + New ADSP"
+endchoice
config MSM_DEBUG_UART
int
@@ -27,17 +209,488 @@ choice
bool "UART3"
endchoice
-config MACH_HALIBUT
- depends on ARCH_MSM
+choice
+ prompt "Default Timer"
+ default MSM7X00A_USE_GP_TIMER
+
+ config MSM7X00A_USE_GP_TIMER
+ bool "GP Timer"
+ help
+ Low resolution timer that allows power collapse from idle.
+
+ config MSM7X00A_USE_DG_TIMER
+ bool "DG Timer"
+ help
+ High resolution timer.
+endchoice
+
+choice
+ prompt "Suspend sleep mode"
+ default MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND
+ help
+ Allows overriding the sleep mode used. Leave at power
+ collapse suspend unless the arm9 image has problems.
+
+ config MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND
+ bool "Power collapse suspend"
+ help
+ Lowest sleep state. Returns through reset vector.
+
+ config MSM7X00A_SLEEP_MODE_POWER_COLLAPSE
+ bool "Power collapse"
+ help
+ Sleep state that returns through reset vector.
+
+ config MSM7X00A_SLEEP_MODE_APPS_SLEEP
+ bool "Apps Sleep"
+
+ config MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT
+ bool "Ramp down cpu clock and wait for interrupt"
+
+ config MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT
+ bool "Wait for interrupt"
+endchoice
+
+config MSM7X00A_SLEEP_MODE
+ int
+ default 0 if MSM7X00A_SLEEP_MODE_POWER_COLLAPSE_SUSPEND
+ default 1 if MSM7X00A_SLEEP_MODE_POWER_COLLAPSE
+ default 2 if MSM7X00A_SLEEP_MODE_APPS_SLEEP
+ default 3 if MSM7X00A_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT
+ default 4 if MSM7X00A_SLEEP_WAIT_FOR_INTERRUPT
+
+choice
+ prompt "Idle sleep mode"
+ default MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE
+ help
+ Allows overriding the sleep mode used from idle. Leave at power
+ collapse suspend unless the arm9 image has problems.
+
+ config MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND
+ bool "Power collapse suspend"
+ help
+ Lowest sleep state. Returns through reset vector.
+
+ config MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE
+ bool "Power collapse"
+ help
+ Sleep state that returns through reset vector.
+
+ config MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP
+ bool "Apps Sleep"
+
+ config MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT
+ bool "Ramp down cpu clock and wait for interrupt"
+
+ config MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT
+ bool "Wait for interrupt"
+endchoice
+
+config MSM7X00A_IDLE_SLEEP_MODE
+ int
+ default 0 if MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE_SUSPEND
+ default 1 if MSM7X00A_IDLE_SLEEP_MODE_POWER_COLLAPSE
+ default 2 if MSM7X00A_IDLE_SLEEP_MODE_APPS_SLEEP
+ default 3 if MSM7X00A_IDLE_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT
+ default 4 if MSM7X00A_IDLE_SLEEP_WAIT_FOR_INTERRUPT
+
+config MSM7X00A_IDLE_SLEEP_MIN_TIME
+ int "Minimum idle time before sleep"
+ default 20000000
+ help
+ Minimum idle time in nanoseconds before entering low power mode.
+
+config MSM7X00A_IDLE_SPIN_TIME
+ int "Idle spin time before cpu ramp down"
+ default 80000
+ help
+ Spin time in nanoseconds before ramping down cpu clock and entering
+ any low power state.
+
+menuconfig MSM_IDLE_STATS
+ bool "Collect idle statistics"
default y
- bool "Halibut Board (QCT SURF7201A)"
help
- Support for the Qualcomm SURF7201A eval board.
+ Collect idle statistics and export them in proc/msm_pm_stats.
-config MACH_TROUT
+if MSM_IDLE_STATS
+
+config MSM_IDLE_STATS_FIRST_BUCKET
+ int "First bucket time"
+ default 62500
+ help
+ Upper time limit in nanoseconds of first bucket.
+
+config MSM_IDLE_STATS_BUCKET_SHIFT
+ int "Bucket shift"
+ default 2
+
+config MSM_IDLE_STATS_BUCKET_COUNT
+ int "Bucket count"
+ default 10
+
+config MSM_SUSPEND_STATS_FIRST_BUCKET
+ int "First bucket time for suspend"
+ default 1000000000
+ help
+ Upper time limit in nanoseconds of first bucket of the
+ histogram. This is for collecting statistics on suspend.
+
+endif # MSM_IDLE_STATS
+
+config MSM_JTAG_V7
+ depends on CPU_V7
+ default y if DEBUG_KERNEL
+ bool "JTAG debug support"
+ help
+ Add additional support for JTAG kernel debugging.
+
+config HTC_HEADSET
+ tristate "HTC 2 Wire detection driver"
+ default n
+ help
+ Provides support for detecting HTC 2 wire devices, such as wired
+ headset, on the trout platform. Can be used with the msm serial
+ debugger, but not with serial console.
+
+config TROUT_BATTCHG
+ depends on MACH_TROUT && POWER_SUPPLY
default y
- bool "HTC Dream (aka trout)"
+ bool "Trout battery / charger driver"
+
+config HTC_PWRSINK
+ depends on MSM_SMD
+ default n
+ bool "HTC Power Sink Driver"
+
+config QSD_SVS
+ bool "QSD Static Voltage Scaling"
+ depends on (MACH_QSD8X50_SURF || MACH_QSD8X50_FFA || MACH_QSD8X50_COMET)
+ default y
+ select TPS65023
help
- Support for the HTC Dream, T-Mobile G1, Android ADP1 devices.
+ Enables static voltage scaling using the TPS65023 PMIC.
+
+config QSD_PMIC_DEFAULT_DCDC1
+ int "PMIC default output voltage"
+ depends on (MACH_QSD8X50_SURF || MACH_QSD8X50_FFA || MACH_QSD8X50_COMET)
+ default 1250
+ help
+ This is the PMIC voltage at Linux kernel boot.
+
+config MSM_FIQ_SUPPORT
+ default y
+ bool "Enable installation of an FIQ handler."
+
+config MSM_SMD
+ default y
+ bool "MSM Shared Memory Driver (SMD)"
+ help
+ Support for the shared memory interface between the apps
+ processor and the baseband processor. Provides access to
+ the "shared heap", as well as virtual serial channels
+ used to communicate with various services on the baseband
+ processor.
+
+choice
+ prompt "MSM Shared memory interface version"
+ depends on MSM_SMD
+ default MSM_SMD_PKG3 if ARCH_MSM_ARM11
+ default MSM_SMD_PKG4 if ARCH_MSM_SCORPION
+
+ config MSM_SMD_PKG3
+ bool "Package 3"
+
+ config MSM_SMD_PKG4
+ bool "Package 4"
+endchoice
+
+config MSM_SMD_DEBUG
+ depends on MSM_SMD
+ default y
+ bool "MSM SMD debug support"
+ help
+ Support for debugging the SMD for communication
+ between the ARM9 and ARM11
+
+config MSM_N_WAY_SMD
+ depends on (MSM_SMD && (ARCH_MSM_SCORPION || ARCH_MSM7X27))
+ default y
+ bool "MSM N-WAY SMD support"
+ help
+ Supports APPS-QDSP SMD communication along with
+ normal APPS-MODEM SMD communication.
+
+config MSM_N_WAY_SMSM
+ depends on (MSM_SMD && (ARCH_MSM_SCORPION || ARCH_MSM7X27))
+ default y
+ bool "MSM N-WAY SMSM support"
+ help
+ Supports APPS-QDSP SMSM communication along with
+ normal APPS-MODEM SMSM communication.
+
+config MSM_RESET_MODEM
+ tristate "Reset Modem Driver"
+ depends on MSM_SMD
+ default m
+ help
+ Allows the user to reset the modem through a device node.
+
+config MSM_SMD_LOGGING
+ depends on MSM_SMD
+ default y
+ bool "MSM Shared Memory Logger"
+ help
+ This option exposes the shared memory logger at /dev/smem_log
+ and a debugfs node named smem_log.
+
+ If in doubt, say yes.
+
+config MSM_SMD_NMEA
+ bool "NMEA GPS Driver"
+ depends on MSM_SMD
+ default y
+ help
+ Enable this to support the NMEA GPS device.
+
+ If in doubt, say yes.
+
+config MSM_SMD_TTY
+ bool "SMD TTY Driver"
+ depends on MSM_SMD
+ default y
+ help
+ Provides TTY interfaces to interact with the modem.
+
+ If in doubt, say yes.
+
+config MSM_SMD_QMI
+ bool "SMD QMI Driver"
+ depends on MSM_SMD
+ default y
+ help
+ Manages network data connections.
+
+ If in doubt, say yes.
+
+config MSM_SMD_CTL
+ bool "SMD Control Driver"
+ depends on MSM_SMD
+ default y
+ help
+ Provides a binary SMD non-muxed control port interface.
+
+ If in doubt, say yes.
+
+config MSM_ONCRPCROUTER
+ depends on MSM_SMD
+ default y
+ bool "MSM ONCRPC router support"
+ help
+ Support for the MSM ONCRPC router for communication between
+ the ARM9 and ARM11
+
+config MSM_ONCRPCROUTER_DEBUG
+ depends on MSM_ONCRPCROUTER
+ default y
+ bool "MSM debug ONCRPC router support"
+ help
+ Support for debugging the ONCRPC router for communication
+ between the ARM9 and ARM11
+
+config MSM_RPCSERVERS
+ depends on MSM_ONCRPCROUTER
+ select RTC_HCTOSYS
+ default y
+ bool "Kernel side RPC server bundle"
+ help
+ none
+
+config MSM_RPC_PING
+ depends on MSM_ONCRPCROUTER && DEBUG_FS
+ default m
+ bool "MSM rpc ping"
+ help
+ Implements MSM rpc ping test module.
+
+config MSM_RPC_PROC_COMM_TEST
+ depends on DEBUG_FS
+ default m
+ bool "MSM rpc proc comm test"
+ help
+ Implements MSM rpc proc comm test module.
+
+config MSM_RPC_OEM_RAPI
+ depends on MSM_ONCRPCROUTER
+ default m
+ bool "MSM oem rapi"
+ help
+ Implements MSM oem rapi client module.
+
+config MSM_RPCSERVER_HANDSET
+ depends on MSM_ONCRPCROUTER
+ default y
+ bool "Handset events RPC server"
+ help
+ Support for receiving handset events like headset detect,
+ headset switch and clamshell state.
+
+config MSM_DALRPC
+ bool "DAL RPC support"
+ depends on ARCH_MSM_SCORPION
+ default y
+ help
+ Supports RPC calls to DAL devices on remote processor cores.
+
+config MSM_DALRPC_TEST
+ tristate "DAL RPC test module"
+ depends on (MSM_DALRPC && DEBUG_FS)
+ default m
+ help
+ Exercises DAL RPC calls to QDSP6.
+
+config MSM_CPU_FREQ_SCREEN
+ depends on (HAS_EARLYSUSPEND && !CPU_FREQ_MSM)
+ default n
+ bool "Enable simple cpu frequency scaling"
+ help
+ Simple cpufreq scaling based on screen ON/OFF.
+
+if MSM_CPU_FREQ_SCREEN
+
+config MSM_CPU_FREQ_SCREEN_OFF
+ int "Screen off cpu frequency"
+ default 245760
+
+config MSM_CPU_FREQ_SCREEN_ON
+ int "Screen on cpu frequency"
+ default 384000
+
+endif # MSM_CPU_FREQ_SCREEN
+
+if CPU_FREQ_MSM
+
+config MSM_CPU_FREQ_SET_MIN_MAX
+ bool "Set Min/Max CPU frequencies."
+ default n
+ help
+ Allow setting min and max CPU frequencies. Sysfs can be used
+ to override these values.
+
+config MSM_CPU_FREQ_MAX
+ int "Max CPU Frequency"
+ depends on MSM_CPU_FREQ_SET_MIN_MAX
+ default 384000
+
+config MSM_CPU_FREQ_MIN
+ int "Min CPU Frequency"
+ depends on MSM_CPU_FREQ_SET_MIN_MAX
+ default 245760
+
+endif # CPU_FREQ_MSM
+
+config MSM_CPU_AVS
+ bool "Enable Adaptive Voltage Scaling (AVS)"
+ depends on (ARCH_MSM_SCORPION && QSD_SVS)
+ depends on ARCH_QSD8X50
+ default y
+ help
+ This enables the Adaptive Voltage Scaling feature of
+ Qualcomm ARMv7 CPUs. It adjusts the voltage for each frequency
+ based on feedback from three ring oscillators in the CPU.
+
+config MSM_VREG_SWITCH_INVERTED
+ bool "Reverse vreg switch polarity"
+ default n
+ help
+ Reverses the enable and disable for vreg switch.
+
+config MSM_DMA_TEST
+ tristate "MSM DMA test module"
+ default m
+ help
+ Intended to be compiled as a module. Provides a device node
+ and ioctls for testing the MSM dma system.
+
+config WIFI_CONTROL_FUNC
+ bool "Enable WiFi control function abstraction"
+ help
+ Enables Power/Reset/Carddetect function abstraction
+
+config WIFI_MEM_PREALLOC
+ depends on WIFI_CONTROL_FUNC
+ bool "Preallocate memory for WiFi buffers"
+ help
+ Preallocates memory buffers for WiFi driver
+
+config QSD_AUDIO
+ bool "QSD audio"
+ depends on (ARCH_MSM_SCORPION && MSM_DALRPC)
+ default y
+ help
+ Provides PCM, MP3, and AAC audio playback.
+
+config AUDIO_AAC_PLUS
+ depends on (MSM_ADSP || QSD_AUDIO)
+ bool "AAC+ Audio"
+ default y
+ help
+ Provides AAC+ decoding
+
+config AUDIO_ENHANCED_AAC_PLUS
+ depends on AUDIO_AAC_PLUS
+ bool "Enhanced AAC+ Audio"
+ default y
+ help
+ Provides Enhanced AAC+ decoding
+
+config SURF_FFA_GPIO_KEYPAD
+ bool "MSM SURF/FFA GPIO keypad"
+ depends on INPUT_GPIO = "y"
+ default y
+ help
+ Select if the GPIO keypad is attached.
+
+config CLOCK_BASED_SLEEP_LIMIT
+ default y
+ bool "Set sleep limitation based on clock usage"
+ help
+ The application processor checks for enabled clocks and
+ decides accordingly the sleep limitation which it informs
+ the modem to use.
+
+config MSM_ADM_OFF_AT_POWER_COLLAPSE
+ bool "Turn off ADM clock during power collapse"
+ default n
+ help
+ The application processor turns off the ADM clock before
+ entering power collapse and turns it back on after exiting
+ power collapse.
+
+config MSM_SLEEP_TIME_OVERRIDE
+ bool "Allow overriding suspend/sleep time with PM module parameter"
+ default y
+ help
+ Enable the module parameter sleep_time_override. Specified
+ in units of seconds, it overwrites the normal sleep time of
+ suspend. The feature is required for automated power management
+ testing.
+
+choice
+ prompt "Power management timeout action"
+ default MSM_PM_TIMEOUT_HALT
+ help
+ Selects the Application Processor's action when Power Management
+ times out waiting for Modem's handshake.
+
+ config MSM_PM_TIMEOUT_HALT
+ bool "Halt the Application Processor"
+
+ config MSM_PM_TIMEOUT_RESET_MODEM
+ bool "Reset the Modem Processor"
+
+ config MSM_PM_TIMEOUT_RESET_CHIP
+ bool "Reset the entire chip"
+endchoice
endif
diff --git a/arch/arm/mach-msm/Makefile b/arch/arm/mach-msm/Makefile
index 91e6f5c95dc1..6d7937fa50ce 100644
--- a/arch/arm/mach-msm/Makefile
+++ b/arch/arm/mach-msm/Makefile
@@ -1,9 +1,78 @@
-obj-y += io.o idle.o irq.o timer.o dma.o
+obj-y += io.o irq.o timer.o dma.o
obj-y += devices.o
obj-y += proc_comm.o
+obj-y += vreg.o mpp.o
obj-y += vreg.o
-obj-y += clock.o clock-7x01a.o
+obj-y += clock.o clock-pcom.o
+obj-y += gpio.o generic_gpio.o
+obj-y += nand_partitions.o
+obj-y += remote_spinlock.o modem_notifier.o
+obj-y += rpc_hsusb.o
+obj-y += socinfo.o
+obj-y += cpufreq.o
+obj-y += nohlt.o
+obj-y += pmic.o
+obj-y += internal_power_rail.o
-obj-$(CONFIG_MACH_HALIBUT) += board-halibut.o
+obj-$(CONFIG_ARCH_MSM_ARM11) += acpuclock.o
+obj-$(CONFIG_ARCH_MSM_SCORPION) += acpuclock-8x50.o
+obj-$(CONFIG_MSM_CPU_AVS) += avs.o avs_hw.o
+obj-$(CONFIG_CPU_V6) += idle-v6.o
+obj-$(CONFIG_CPU_V7) += idle-v7.o
+obj-$(CONFIG_MSM_JTAG_V7) += jtag-v7.o
+
+obj-$(CONFIG_ARCH_QSD8X50) += sirc.o
+obj-$(CONFIG_MSM_FIQ_SUPPORT) += fiq_glue.o
+obj-$(CONFIG_MACH_TROUT) += board-trout-rfkill.o
+obj-$(CONFIG_MSM_SMD) += smd.o
+obj-$(CONFIG_MSM_SMD_LOGGING) += smem_log.o
+obj-$(CONFIG_MSM_SMD_TTY) += smd_tty.o
+obj-$(CONFIG_MSM_SMD_QMI) += smd_qmi.o
+obj-$(CONFIG_MSM_SMD_CTL) += smd_ctl2.o
+obj-$(CONFIG_MSM_SMD_NMEA) += smd_nmea.o
+obj-$(CONFIG_DEBUG_FS) += pmic_debugfs.o
+obj-$(CONFIG_MSM_RESET_MODEM) += reset_modem.o
+obj-$(CONFIG_MSM_ONCRPCROUTER) += smd_rpcrouter.o
+obj-$(CONFIG_MSM_ONCRPCROUTER) += smd_rpcrouter_device.o
+obj-$(CONFIG_MSM_ONCRPCROUTER) += smd_rpcrouter_servers.o
+obj-$(CONFIG_MSM_ONCRPCROUTER) += smd_rpcrouter_clients.o
+obj-$(CONFIG_MSM_RPC_PING) += ping_mdm_rpc_client.o
+obj-$(CONFIG_MSM_RPC_PROC_COMM_TEST) += proc_comm_test.o
+obj-$(CONFIG_MSM_RPC_OEM_RAPI) += oem_rapi_client.o
+obj-$(CONFIG_MSM_RPCSERVERS) += rpc_server_dog_keepalive.o
+obj-$(CONFIG_MSM_DALRPC) += dal.o
+obj-$(CONFIG_MSM_DALRPC_TEST) += dal_remotetest.o
+obj-$(CONFIG_MSM_RPCSERVER_HANDSET) += rpc_server_handset.o
+ifdef CONFIG_MSM_N_WAY_SMSM
+ obj-$(CONFIG_PM) += pm2.o
+else
+ obj-$(CONFIG_PM) += pm.o
+endif
+obj-$(CONFIG_MSM_DMA_TEST) += dma_test.o
+obj-$(CONFIG_SURF_FFA_GPIO_KEYPAD) += keypad-surf-ffa.o
obj-$(CONFIG_MACH_TROUT) += board-dream.o
+obj-$(CONFIG_ARCH_MSM7X01A) += board-halibut.o
+obj-$(CONFIG_MACH_TROUT) += board-trout.o board-trout-gpio.o
+obj-$(CONFIG_MACH_TROUT) += board-trout-keypad.o board-trout-panel.o
+obj-$(CONFIG_MACH_TROUT) += htc_akm_cal.o htc_wifi_nvs.o htc_acoustic.o
+obj-$(CONFIG_MACH_TROUT) += board-trout-mmc.o board-trout-wifi.o
+obj-$(CONFIG_MACH_QSD8X50_SURF) += board-qsd8x50.o
+obj-$(CONFIG_MACH_QSD8X50_FFA) += board-qsd8x50.o
+obj-$(CONFIG_MACH_QSD8X50_COMET) += board-comet.o
+obj-$(CONFIG_TROUT_H2W) += board-trout-h2w.o
+obj-$(CONFIG_TROUT_BATTCHG) += htc_battery.o
+obj-$(CONFIG_TROUT_PWRSINK) += htc_pwrsink.o
+obj-$(CONFIG_ARCH_MSM7X27) += board-msm7x27.o
+obj-$(CONFIG_ARCH_MSM7X30) += board-msm7x30.o clock-7x30.o
+
+obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire.o board-sapphire-gpio.o
+obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-keypad.o board-sapphire-panel.o
+obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-mmc.o board-sapphire-wifi.o
+obj-$(CONFIG_MACH_SAPPHIRE) += board-sapphire-rfkill.o msm_vibrator.o
+
+obj-$(CONFIG_TROUT_BATTCHG) += htc_battery.o
+
+obj-$(CONFIG_HTC_PWRSINK) += htc_pwrsink.o
+obj-$(CONFIG_HTC_HEADSET) += htc_headset.o
+obj-$(CONFIG_PMIC8058) += pmic8058-gpio.o pmic8058-mpp.o
diff --git a/arch/arm/mach-msm/Makefile.boot b/arch/arm/mach-msm/Makefile.boot
index 24dfbf8c07c4..9acaeef05ea9 100644
--- a/arch/arm/mach-msm/Makefile.boot
+++ b/arch/arm/mach-msm/Makefile.boot
@@ -1,3 +1,21 @@
+ifeq ($(CONFIG_ARCH_MSM_SCORPION),y)
+ifeq ($(CONFIG_MSM_STACKED_MEMORY), y)
+ zreladdr-y := 0x20008000
+params_phys-y := 0x20000100
+initrd_phys-y := 0x24000000
+else # !CONFIG_MSM_STACKED_MEMORY
+ zreladdr-y := 0x00208000
+params_phys-y := 0x00200100
+initrd_phys-y := 0x01200000
+endif # CONFIG_MSM_STACKED_MEMORY
+else # !CONFIG_ARCH_MSM_SCORPION
+ifeq ($(CONFIG_MSM_STACKED_MEMORY), y)
zreladdr-y := 0x10008000
params_phys-y := 0x10000100
initrd_phys-y := 0x10800000
+else # !CONFIG_MSM_STACKED_MEMORY
+ zreladdr-y := 0x00208000
+params_phys-y := 0x00200100
+initrd_phys-y := 0x0A000000
+endif # CONFIG_MSM_STACKED_MEMORY
+endif # CONFIG_ARCH_MSM_SCORPION
diff --git a/arch/arm/mach-msm/acpuclock-8x50.c b/arch/arm/mach-msm/acpuclock-8x50.c
new file mode 100644
index 000000000000..dde6900b0f0a
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock-8x50.c
@@ -0,0 +1,723 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/cpufreq.h>
+
+#include <mach/board.h>
+#include <mach/msm_iomap.h>
+
+#include "acpuclock.h"
+#include "avs.h"
+#include "clock.h"
+
+#define SHOT_SWITCH 4
+#define HOP_SWITCH 5
+#define SIMPLE_SLEW 6
+#define COMPLEX_SLEW 7
+
+#define SPSS_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100)
+#define SPSS_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
+
+/* Scorpion PLL registers */
+#define SCPLL_CTL_ADDR (MSM_SCPLL_BASE + 0x4)
+#define SCPLL_STATUS_ADDR (MSM_SCPLL_BASE + 0x18)
+#define SCPLL_FSM_CTL_EXT_ADDR (MSM_SCPLL_BASE + 0x10)
+
+enum {
+ ACPU_PLL_TCXO = -1,
+ ACPU_PLL_0 = 0,
+ ACPU_PLL_1,
+ ACPU_PLL_2,
+ ACPU_PLL_3,
+ ACPU_PLL_END,
+};
+
+struct clkctl_acpu_speed {
+ unsigned int use_for_scaling;
+ unsigned int acpuclk_khz;
+ int pll;
+ unsigned int acpuclk_src_sel;
+ unsigned int acpuclk_src_div;
+ unsigned int ahbclk_khz;
+ unsigned int ahbclk_div;
+ unsigned int axiclk_khz;
+ unsigned int sc_core_src_sel_mask;
+ unsigned int sc_l_value;
+ int vdd;
+ unsigned long lpj; /* loops_per_jiffy */
+};
+
+struct clkctl_acpu_speed acpu_freq_tbl[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 0, 0, 14000, 0, 0, 1000},
+ { 0, 128000, ACPU_PLL_1, 1, 5, 0, 0, 14000, 2, 0, 1000},
+ { 1, 245760, ACPU_PLL_0, 4, 0, 0, 0, 29000, 0, 0, 1000},
+ { 1, 384000, ACPU_PLL_3, 0, 0, 0, 0, 58000, 1, 0xA, 1000},
+ { 0, 422400, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xB, 1000},
+ { 0, 460800, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xC, 1000},
+ { 0, 499200, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xD, 1025},
+ { 0, 537600, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xE, 1050},
+ { 1, 576000, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0xF, 1050},
+ { 0, 614400, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x10, 1075},
+ { 0, 652800, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x11, 1100},
+ { 0, 691200, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x12, 1125},
+ { 0, 729600, ACPU_PLL_3, 0, 0, 0, 0, 117000, 1, 0x13, 1150},
+ { 1, 768000, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x14, 1150},
+ { 0, 806400, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x15, 1175},
+ { 0, 844800, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x16, 1200},
+ { 0, 883200, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x17, 1225},
+ { 0, 921600, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x18, 1250},
+ { 0, 960000, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x19, 1250},
+ { 1, 998400, ACPU_PLL_3, 0, 0, 0, 0, 128000, 1, 0x1A, 1250},
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
+#ifdef CONFIG_CPU_FREQ_MSM
+static struct cpufreq_frequency_table freq_table[20];
+
+static void __init cpufreq_table_init(void)
+{
+ unsigned int i;
+ unsigned int freq_cnt = 0;
+
+ /* Construct the freq_table table from acpu_freq_tbl since the
+ * freq_table values need to match frequencies specified in
+ * acpu_freq_tbl and acpu_freq_tbl needs to be fixed up during init.
+ */
+ for (i = 0; acpu_freq_tbl[i].acpuclk_khz != 0
+ && freq_cnt < ARRAY_SIZE(freq_table)-1; i++) {
+ if (acpu_freq_tbl[i].use_for_scaling) {
+ freq_table[freq_cnt].index = freq_cnt;
+ freq_table[freq_cnt].frequency
+ = acpu_freq_tbl[i].acpuclk_khz;
+ freq_cnt++;
+ }
+ }
+
+ /* freq_table not big enough to store all usable freqs. */
+ BUG_ON(acpu_freq_tbl[i].acpuclk_khz != 0);
+
+ freq_table[freq_cnt].index = freq_cnt;
+ freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END;
+
+ pr_info("%d scaling frequencies supported.\n", freq_cnt);
+}
+#endif
+
+struct clock_state {
+ struct clkctl_acpu_speed *current_speed;
+ struct mutex lock;
+ uint32_t acpu_switch_time_us;
+ uint32_t max_speed_delta_khz;
+ uint32_t vdd_switch_time_us;
+ unsigned long power_collapse_khz;
+ unsigned long wait_for_irq_khz;
+ unsigned int max_vdd;
+ int (*acpu_set_vdd) (int mvolts);
+};
+
+static struct clock_state drv_state = { 0 };
+
+unsigned long clk_get_max_axi_khz(void)
+{
+ return 128000;
+}
+EXPORT_SYMBOL(clk_get_max_axi_khz);
+
+static void scpll_set_freq(uint32_t lval, unsigned freq_switch)
+{
+ uint32_t regval;
+
+ if (lval > 33)
+ lval = 33;
+ if (lval < 10)
+ lval = 10;
+
+ /* wait for any calibrations or frequency switches to finish */
+ while (readl(SCPLL_STATUS_ADDR) & 0x3)
+ ;
+
+ /* write the new L val and switch mode */
+ regval = readl(SCPLL_FSM_CTL_EXT_ADDR);
+ regval &= ~(0x3f << 3);
+ regval |= (lval << 3);
+ if (freq_switch == SIMPLE_SLEW)
+ regval |= (0x1 << 9);
+
+ regval &= ~(0x3 << 0);
+ regval |= (freq_switch << 0);
+ writel(regval, SCPLL_FSM_CTL_EXT_ADDR);
+
+ dmb();
+
+ /* put in normal mode */
+ regval = readl(SCPLL_CTL_ADDR);
+ regval |= 0x7;
+ writel(regval, SCPLL_CTL_ADDR);
+
+ dmb();
+
+ /* wait for frequency switch to finish */
+ while (readl(SCPLL_STATUS_ADDR) & 0x1)
+ ;
+
+ /* status bit seems to clear early, using
+ * 100us to handle the worst case. */
+ udelay(100);
+}
+
+static void scpll_apps_enable(bool state)
+{
+ uint32_t regval;
+
+ /* Wait for any frequency switches to finish. */
+ while (readl(SCPLL_STATUS_ADDR) & 0x1)
+ ;
+
+ /* put the pll in standby mode */
+ regval = readl(SCPLL_CTL_ADDR);
+ regval &= ~(0x7);
+ regval |= (0x2);
+ writel(regval, SCPLL_CTL_ADDR);
+
+ dmb();
+
+ if (state) {
+ /* put the pll in normal mode */
+ regval = readl(SCPLL_CTL_ADDR);
+ regval |= (0x7);
+ writel(regval, SCPLL_CTL_ADDR);
+ udelay(200);
+ } else {
+ /* put the pll in power down mode */
+ regval = readl(SCPLL_CTL_ADDR);
+ regval &= ~(0x7);
+ writel(regval, SCPLL_CTL_ADDR);
+ }
+ udelay(drv_state.vdd_switch_time_us);
+}
+
+static void scpll_init(void)
+{
+ uint32_t regval;
+#define L_VAL_384MHZ 0xA
+#define L_VAL_768MHZ 0x14
+
+ /* power down scpll */
+ writel(0x0, SCPLL_CTL_ADDR);
+
+ dmb();
+
+ /* set bypassnl, put into standby */
+ writel(0x00400002, SCPLL_CTL_ADDR);
+
+ /* set bypassnl, reset_n, full calibration */
+ writel(0x00600004, SCPLL_CTL_ADDR);
+
+ /* Ensure register write to initiate calibration has taken
+ effect before reading status flag */
+ dmb();
+
+ /* wait for cal_all_done */
+ while (readl(SCPLL_STATUS_ADDR) & 0x2)
+ ;
+
+ /* Start: Set of experimentally derived steps
+ * to work around a h/w bug. */
+
+ /* Put the pll in normal mode */
+ scpll_apps_enable(1);
+
+ /* SHOT switch to 384 MHz */
+ regval = readl(SCPLL_FSM_CTL_EXT_ADDR);
+ regval &= ~(0x3f << 3);
+ regval |= (L_VAL_384MHZ << 3);
+ writel(regval, SCPLL_FSM_CTL_EXT_ADDR);
+
+ regval &= ~0x7;
+ regval |= SHOT_SWITCH;
+ writel(regval, SCPLL_FSM_CTL_EXT_ADDR);
+
+ /* Wait for frequency switch to finish */
+ while (readl(SCPLL_STATUS_ADDR) & 0x1)
+ ;
+
+ /* Status bit seems to clear early, using
+ * 800 microseconds for the worst case. */
+ udelay(800);
+
+ /* HOP switch to 768 MHz. */
+ regval = readl(SCPLL_FSM_CTL_EXT_ADDR);
+ regval &= ~(0x3f << 3);
+ regval |= (L_VAL_768MHZ << 3);
+ writel(regval, SCPLL_FSM_CTL_EXT_ADDR);
+
+ regval &= ~0x7;
+ regval |= HOP_SWITCH;
+ writel(regval, SCPLL_FSM_CTL_EXT_ADDR);
+
+ /* Wait for frequency switch to finish */
+ while (readl(SCPLL_STATUS_ADDR) & 0x1)
+ ;
+
+ /* Status bit seems to clear early, using
+ * 100 microseconds for the worst case. */
+ udelay(100);
+
+ /* End: Work around for h/w bug */
+
+ /* Power down scpll */
+ scpll_apps_enable(0);
+}
+
+static void config_pll(struct clkctl_acpu_speed *s)
+{
+ uint32_t regval;
+
+ if (s->pll == ACPU_PLL_3)
+ scpll_set_freq(s->sc_l_value, HOP_SWITCH);
+ /* Configure the PLL divider mux if we plan to use it. */
+ else if (s->sc_core_src_sel_mask == 0) {
+ /* get the current clock source selection */
+ regval = readl(SPSS_CLK_SEL_ADDR) & 0x1;
+
+ /* configure the other clock source, then switch to it,
+ * using the glitch free mux */
+ switch (regval) {
+ case 0x0:
+ regval = readl(SPSS_CLK_CNTL_ADDR);
+ regval &= ~(0x7 << 4 | 0xf);
+ regval |= (s->acpuclk_src_sel << 4);
+ regval |= (s->acpuclk_src_div << 0);
+ writel(regval, SPSS_CLK_CNTL_ADDR);
+
+ regval = readl(SPSS_CLK_SEL_ADDR);
+ regval |= 0x1;
+ writel(regval, SPSS_CLK_SEL_ADDR);
+ break;
+
+ case 0x1:
+ regval = readl(SPSS_CLK_CNTL_ADDR);
+ regval &= ~(0x7 << 12 | 0xf << 8);
+ regval |= (s->acpuclk_src_sel << 12);
+ regval |= (s->acpuclk_src_div << 8);
+ writel(regval, SPSS_CLK_CNTL_ADDR);
+
+ regval = readl(SPSS_CLK_SEL_ADDR);
+ regval &= ~0x1;
+ writel(regval, SPSS_CLK_SEL_ADDR);
+ break;
+ }
+ dmb();
+ }
+
+ regval = readl(SPSS_CLK_SEL_ADDR);
+ regval &= ~(0x3 << 1);
+ regval |= (s->sc_core_src_sel_mask << 1);
+ writel(regval, SPSS_CLK_SEL_ADDR);
+}
+
+void config_switching_pll(void)
+{
+ uint32_t regval;
+
+ /* Use AXI clock temporarily when we're changing
+ * scpll. PLL0 is faster, but it may not be available during
+ * early modem initialization, and we will only be using this
+ * a very short time (while scpll is reconfigured).
+ */
+
+ regval = readl(SPSS_CLK_SEL_ADDR);
+ regval &= ~(0x3 << 1);
+ regval |= (0x2 << 1);
+ writel(regval, SPSS_CLK_SEL_ADDR);
+}
+
+static int acpuclk_set_vdd_level(int vdd)
+{
+ if (drv_state.acpu_set_vdd)
+ return drv_state.acpu_set_vdd(vdd);
+ else {
+ /* Assume that the PMIC supports scaling the processor
+ * to its maximum frequency at its default voltage.
+ */
+ return 0;
+ }
+}
+
+int acpuclk_set_rate(unsigned long rate, enum setrate_reason reason)
+{
+ struct clkctl_acpu_speed *tgt_s, *strt_s;
+ int rc = 0;
+ int freq_index = 0;
+
+ if (reason == SETRATE_CPUFREQ)
+ mutex_lock(&drv_state.lock);
+
+ strt_s = drv_state.current_speed;
+
+ if (rate == (strt_s->acpuclk_khz * 1000))
+ goto out;
+
+ for (tgt_s = acpu_freq_tbl; tgt_s->acpuclk_khz != 0; tgt_s++) {
+ if (tgt_s->acpuclk_khz == (rate / 1000))
+ break;
+ freq_index++;
+ }
+
+ if (tgt_s->acpuclk_khz == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (reason == SETRATE_CPUFREQ) {
+#ifdef CONFIG_MSM_CPU_AVS
+ /* Notify avs before changing frequency */
+ rc = avs_adjust_freq(freq_index, 1);
+ if (rc) {
+ printk(KERN_ERR
+ "acpuclock: Unable to increase ACPU "
+ "vdd.\n");
+ goto out;
+ }
+#endif
+ /* Increase VDD if needed. */
+ if (tgt_s->vdd > strt_s->vdd) {
+ rc = acpuclk_set_vdd_level(tgt_s->vdd);
+ if (rc) {
+ printk(KERN_ERR
+ "acpuclock: Unable to increase ACPU "
+ "vdd.\n");
+ goto out;
+ }
+ }
+ }
+
+ if (strt_s->pll != ACPU_PLL_3 && tgt_s->pll != ACPU_PLL_3) {
+ config_pll(tgt_s);
+ } else if (strt_s->pll != ACPU_PLL_3 && tgt_s->pll == ACPU_PLL_3) {
+ scpll_apps_enable(1);
+ config_pll(tgt_s);
+ } else if (strt_s->pll == ACPU_PLL_3 && tgt_s->pll != ACPU_PLL_3) {
+ config_pll(tgt_s);
+ scpll_apps_enable(0);
+ } else {
+ config_switching_pll();
+ config_pll(tgt_s);
+ }
+
+ /* Update the driver state with the new clock freq */
+ drv_state.current_speed = tgt_s;
+
+ /* Re-adjust lpj for the new clock speed. */
+ loops_per_jiffy = tgt_s->lpj;
+
+ /* Nothing else to do for SWFI. */
+ if (reason == SETRATE_SWFI)
+ goto out;
+
+ if (strt_s->axiclk_khz != tgt_s->axiclk_khz) {
+ rc = ebi1_clk_set_min_rate(CLKVOTE_ACPUCLK,
+ tgt_s->axiclk_khz * 1000);
+ if (rc < 0)
+ pr_err("Setting AXI min rate failed!\n");
+ }
+
+ /* Nothing else to do for power collapse */
+ if (reason == SETRATE_PC)
+ goto out;
+
+#ifdef CONFIG_MSM_CPU_AVS
+ /* notify avs after changing frequency */
+ rc = avs_adjust_freq(freq_index, 0);
+ if (rc)
+ printk(KERN_ERR
+ "acpuclock: Unable to drop ACPU vdd.\n");
+#endif
+
+ /* Drop VDD level if we can. */
+ if (tgt_s->vdd < strt_s->vdd) {
+ rc = acpuclk_set_vdd_level(tgt_s->vdd);
+ if (rc)
+ printk(KERN_ERR
+ "acpuclock: Unable to drop ACPU vdd.\n");
+ }
+out:
+ if (reason == SETRATE_CPUFREQ)
+ mutex_unlock(&drv_state.lock);
+ return rc;
+}
+
+static void __init acpuclk_init(void)
+{
+ struct clkctl_acpu_speed *speed;
+ uint32_t div, sel, regval;
+ int rc;
+
+ /* Determine the source of the Scorpion clock. */
+ regval = readl(SPSS_CLK_SEL_ADDR);
+ switch ((regval & 0x6) >> 1) {
+ case 0: /* raw source clock */
+ case 3: /* low jitter PLL1 (768Mhz) */
+ if (regval & 0x1) {
+ sel = ((readl(SPSS_CLK_CNTL_ADDR) >> 4) & 0x7);
+ div = ((readl(SPSS_CLK_CNTL_ADDR) >> 0) & 0xf);
+ } else {
+ sel = ((readl(SPSS_CLK_CNTL_ADDR) >> 12) & 0x7);
+ div = ((readl(SPSS_CLK_CNTL_ADDR) >> 8) & 0xf);
+ }
+
+ /* Find the matching clock rate. */
+ for (speed = acpu_freq_tbl; speed->acpuclk_khz != 0; speed++) {
+ if (speed->acpuclk_src_sel == sel &&
+ speed->acpuclk_src_div == div)
+ break;
+ }
+ break;
+
+ case 1: /* unbuffered scorpion pll (384Mhz to 998.4Mhz) */
+ sel = ((readl(SCPLL_FSM_CTL_EXT_ADDR) >> 3) & 0x3f);
+
+ /* Find the matching clock rate. */
+ for (speed = acpu_freq_tbl; speed->acpuclk_khz != 0; speed++) {
+ if (speed->sc_l_value == sel &&
+ speed->sc_core_src_sel_mask == 1)
+ break;
+ }
+ break;
+
+ case 2: /* AXI bus clock (128Mhz) */
+ default:
+ speed = &acpu_freq_tbl[4];
+ }
+
+ /* Initialize scpll only if it wasn't already initialized by the boot
+ * loader. If the CPU is already running on scpll, then the scpll was
+ * initialized by the boot loader. */
+ if (speed->pll != ACPU_PLL_3)
+ scpll_init();
+
+ if (speed->acpuclk_khz == 0) {
+ printk(KERN_WARNING "Warning - ACPU clock reports invalid "
+ "speed\n");
+ return;
+ }
+
+ drv_state.current_speed = speed;
+ rc = ebi1_clk_set_min_rate(CLKVOTE_ACPUCLK, speed->axiclk_khz * 1000);
+ if (rc < 0)
+ pr_err("Setting AXI min rate failed!\n");
+
+ printk(KERN_INFO "ACPU running at %d KHz\n", speed->acpuclk_khz);
+}
+
+unsigned long acpuclk_get_rate(void)
+{
+ return drv_state.current_speed->acpuclk_khz;
+}
+
+uint32_t acpuclk_get_switch_time(void)
+{
+ return drv_state.acpu_switch_time_us;
+}
+
+unsigned long acpuclk_power_collapse(void)
+{
+ int ret = acpuclk_get_rate();
+ acpuclk_set_rate(drv_state.power_collapse_khz, SETRATE_PC);
+ return ret * 1000;
+}
+
+unsigned long acpuclk_wait_for_irq(void)
+{
+ int ret = acpuclk_get_rate();
+ acpuclk_set_rate(drv_state.wait_for_irq_khz, SETRATE_SWFI);
+ return ret * 1000;
+}
+
+/* Spare register populated with efuse data on max ACPU freq. */
+#define CT_CSR_PHYS 0xA8700000
+#define TCSR_SPARE2_ADDR (ct_csr_base + 0x60)
+
+#define PLL0_M_VAL_ADDR (MSM_CLK_CTL_BASE + 0x308)
+
+static void __init acpu_freq_tbl_fixup(void)
+{
+ void __iomem *ct_csr_base;
+ uint32_t tcsr_spare2, pll0_m_val;
+ unsigned int max_acpu_khz, pll0_fixup;
+ unsigned int i;
+
+ ct_csr_base = ioremap(CT_CSR_PHYS, PAGE_SIZE);
+ BUG_ON(ct_csr_base == NULL);
+
+ tcsr_spare2 = readl(TCSR_SPARE2_ADDR);
+
+ /* Check if the register is supported and meaningful. */
+ if ((tcsr_spare2 & 0xF000) != 0xA000) {
+ pr_info("Efuse data on Max ACPU freq not present.\n");
+ goto skip_efuse_fixup;
+ }
+
+ switch (tcsr_spare2 & 0xF0) {
+ case 0x70:
+ max_acpu_khz = 768000;
+ break;
+ case 0x30:
+ case 0x00:
+ max_acpu_khz = 998400;
+ break;
+ case 0x10:
+ max_acpu_khz = 1267200;
+ break;
+ default:
+ pr_warning("Invalid efuse data (%x) on Max ACPU freq!\n",
+ tcsr_spare2);
+ goto skip_efuse_fixup;
+ }
+
+ pr_info("Max ACPU freq from efuse data is %d KHz\n", max_acpu_khz);
+
+ for (i = 0; acpu_freq_tbl[i].acpuclk_khz != 0; i++) {
+ if (acpu_freq_tbl[i].acpuclk_khz > max_acpu_khz) {
+ acpu_freq_tbl[i].acpuclk_khz = 0;
+ break;
+ }
+ }
+
+skip_efuse_fixup:
+ iounmap(ct_csr_base);
+ BUG_ON(drv_state.max_vdd == 0);
+
+ /* pll0_m_val will be 36 when PLL0 is run at 235MHz
+ * instead of the usual 245MHz. */
+ pll0_m_val = readl(PLL0_M_VAL_ADDR) & 0x7FFFF;
+ pll0_fixup = (pll0_m_val == 36);
+
+ for (i = 0; acpu_freq_tbl[i].acpuclk_khz != 0; i++) {
+ if (acpu_freq_tbl[i].pll == ACPU_PLL_0
+ && acpu_freq_tbl[i].acpuclk_khz == 245760
+ && pll0_fixup) {
+ acpu_freq_tbl[i].acpuclk_khz = 235930;
+ }
+ if (acpu_freq_tbl[i].vdd > drv_state.max_vdd) {
+ acpu_freq_tbl[i].acpuclk_khz = 0;
+ break;
+ }
+ }
+}
+
+/* Initalize the lpj field in the acpu_freq_tbl. */
+static void __init lpj_init(void)
+{
+ int i;
+ const struct clkctl_acpu_speed *base_clk = drv_state.current_speed;
+ for (i = 0; acpu_freq_tbl[i].acpuclk_khz; i++) {
+ acpu_freq_tbl[i].lpj = cpufreq_scale(loops_per_jiffy,
+ base_clk->acpuclk_khz,
+ acpu_freq_tbl[i].acpuclk_khz);
+ }
+}
+
+#ifdef CONFIG_MSM_CPU_AVS
+static int __init acpu_avs_init(int (*set_vdd) (int), int khz)
+{
+ int i;
+ int freq_count = 0;
+ int freq_index = -1;
+
+ for (i = 0; acpu_freq_tbl[i].acpuclk_khz; i++) {
+ freq_count++;
+ if (acpu_freq_tbl[i].acpuclk_khz == khz)
+ freq_index = i;
+ }
+
+ return avs_init(set_vdd, freq_count, freq_index);
+}
+#endif
+
+void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata)
+{
+ mutex_init(&drv_state.lock);
+ drv_state.acpu_switch_time_us = clkdata->acpu_switch_time_us;
+ drv_state.max_speed_delta_khz = clkdata->max_speed_delta_khz;
+ drv_state.vdd_switch_time_us = clkdata->vdd_switch_time_us;
+ drv_state.power_collapse_khz = clkdata->power_collapse_khz;
+ drv_state.wait_for_irq_khz = clkdata->wait_for_irq_khz;
+ drv_state.max_vdd = clkdata->max_vdd;
+ drv_state.acpu_set_vdd = clkdata->acpu_set_vdd;
+
+ acpu_freq_tbl_fixup();
+ acpuclk_init();
+ lpj_init();
+#ifdef CONFIG_CPU_FREQ_MSM
+ cpufreq_table_init();
+ cpufreq_frequency_table_get_attr(freq_table, smp_processor_id());
+#endif
+#ifdef CONFIG_MSM_CPU_AVS
+ if (!acpu_avs_init(drv_state.acpu_set_vdd,
+ drv_state.current_speed->acpuclk_khz)) {
+ /* avs init successful. avs will handle voltage changes */
+ drv_state.acpu_set_vdd = NULL;
+ }
+#endif
+}
diff --git a/arch/arm/mach-msm/acpuclock.c b/arch/arm/mach-msm/acpuclock.c
new file mode 100644
index 000000000000..bcc8491ab342
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock.c
@@ -0,0 +1,915 @@
+/* arch/arm/mach-msm/acpuclock.c
+ *
+ * MSM architecture clock driver
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/sort.h>
+#include <linux/remote_spinlock.h>
+#include <mach/board.h>
+#include <mach/msm_iomap.h>
+#include <asm/mach-types.h>
+
+#include "proc_comm.h"
+#include "smd_private.h"
+#include "clock.h"
+#include "acpuclock.h"
+#include "socinfo.h"
+
+#define PERF_SWITCH_DEBUG 0
+#define PERF_SWITCH_STEP_DEBUG 0
+
+#define A11S_CLK_CNTL_ADDR (MSM_CSR_BASE + 0x100)
+#define A11S_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
+#define A11S_VDD_SVS_PLEVEL_ADDR (MSM_CSR_BASE + 0x124)
+#define PLLn_MODE(n) (MSM_CLK_CTL_BASE + 0x300 + 28 * (n))
+#define PLLn_L_VAL(n) (MSM_CLK_CTL_BASE + 0x304 + 28 * (n))
+
+enum {
+ ACPU_PLL_TCXO = -1,
+ ACPU_PLL_0 = 0,
+ ACPU_PLL_1,
+ ACPU_PLL_2,
+ ACPU_PLL_3,
+ ACPU_PLL_END,
+};
+
+struct clock_state
+{
+ struct clkctl_acpu_speed *current_speed;
+ struct mutex lock;
+ uint32_t acpu_switch_time_us;
+ uint32_t max_speed_delta_khz;
+ uint32_t vdd_switch_time_us;
+ unsigned long power_collapse_khz;
+ unsigned long wait_for_irq_khz;
+ unsigned long max_axi_khz;
+};
+
+#define PLL_BASE 7
+
+struct shared_pll_control {
+ uint32_t version;
+ struct {
+ /* Denotes if the PLL is ON. Technically, this can be read
+ * directly from the PLL registers, but this feild is here,
+ * so let's use it.
+ */
+ uint32_t on;
+ /* One bit for each processor core. The application processor
+ * is allocated bit position 1. All other bits should be
+ * considered as votes from other processors.
+ */
+ uint32_t votes;
+ } pll[PLL_BASE + ACPU_PLL_END];
+};
+
+struct clkctl_acpu_speed {
+ unsigned int use_for_scaling;
+ unsigned int a11clk_khz;
+ int pll;
+ unsigned int a11clk_src_sel;
+ unsigned int a11clk_src_div;
+ unsigned int ahbclk_khz;
+ unsigned int ahbclk_div;
+ int vdd;
+ unsigned int axiclk_khz;
+ unsigned long lpj; /* loops_per_jiffy */
+/* Pointers in acpu_freq_tbl[] for max up/down steppings. */
+ struct clkctl_acpu_speed *down[3];
+ struct clkctl_acpu_speed *up[3];
+};
+
+static remote_spinlock_t pll_lock;
+static struct shared_pll_control *pll_control;
+static struct clock_state drv_state = { 0 };
+static struct clkctl_acpu_speed *acpu_freq_tbl;
+
+static void __init acpuclk_init(void);
+
+/*
+ * ACPU freq tables used for different PLLs frequency combinations. The
+ * correct table is selected during init.
+ *
+ * Table stepping up/down entries are calculated during boot to choose the
+ * largest frequency jump that's less than max_speed_delta_khz on each PLL.
+ */
+
+/* 7x01/7x25 normal with GSM capable modem */
+static struct clkctl_acpu_speed pll0_245_pll1_768_pll2_1056[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 0, 30720 },
+ { 1, 122880, ACPU_PLL_0, 4, 1, 61440, 1, 3, 61440 },
+ { 0, 128000, ACPU_PLL_1, 1, 5, 64000, 1, 3, 61440 },
+ { 0, 176000, ACPU_PLL_2, 2, 5, 88000, 1, 3, 61440 },
+ { 1, 245760, ACPU_PLL_0, 4, 0, 81920, 2, 4, 61440 },
+ { 1, 256000, ACPU_PLL_1, 1, 2, 128000, 1, 5, 128000 },
+ { 0, 352000, ACPU_PLL_2, 2, 2, 88000, 3, 5, 128000 },
+ { 1, 384000, ACPU_PLL_1, 1, 1, 128000, 2, 6, 128000 },
+ { 1, 528000, ACPU_PLL_2, 2, 1, 132000, 3, 7, 128000 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0}, {0, 0, 0} }
+};
+
+/* 7x01/7x25 normal with CDMA-only modem */
+static struct clkctl_acpu_speed pll0_196_pll1_768_pll2_1056[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 0, 24576 },
+ { 1, 98304, ACPU_PLL_0, 4, 1, 49152, 1, 3, 24576 },
+ { 0, 128000, ACPU_PLL_1, 1, 5, 64000, 1, 3, 24576 },
+ { 0, 176000, ACPU_PLL_2, 2, 5, 88000, 1, 3, 24576 },
+ { 1, 196608, ACPU_PLL_0, 4, 0, 65536, 2, 4, 24576 },
+ { 1, 256000, ACPU_PLL_1, 1, 2, 128000, 1, 5, 128000 },
+ { 0, 352000, ACPU_PLL_2, 2, 2, 88000, 3, 5, 128000 },
+ { 1, 384000, ACPU_PLL_1, 1, 1, 128000, 2, 6, 128000 },
+ { 1, 528000, ACPU_PLL_2, 2, 1, 132000, 3, 7, 128000 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0}, {0, 0, 0} }
+};
+
+/* 7x01/7x25 turbo with GSM capable modem */
+static struct clkctl_acpu_speed pll0_245_pll1_960_pll2_1056[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 0, 30720 },
+ { 0, 120000, ACPU_PLL_1, 1, 7, 60000, 1, 3, 61440 },
+ { 1, 122880, ACPU_PLL_0, 4, 1, 61440, 1, 3, 61440 },
+ { 0, 176000, ACPU_PLL_2, 2, 5, 88000, 1, 3, 61440 },
+ { 1, 245760, ACPU_PLL_0, 4, 0, 81920, 2, 4, 61440 },
+ { 1, 320000, ACPU_PLL_1, 1, 2, 107000, 2, 5, 120000 },
+ { 0, 352000, ACPU_PLL_2, 2, 2, 88000, 3, 5, 120000 },
+ { 1, 480000, ACPU_PLL_1, 1, 1, 120000, 3, 6, 120000 },
+ { 1, 528000, ACPU_PLL_2, 2, 1, 132000, 3, 7, 122880 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0}, {0, 0, 0} }
+};
+
+/* 7x01/7x25 turbo with CDMA-only modem */
+static struct clkctl_acpu_speed pll0_196_pll1_960_pll2_1056[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 0, 24576 },
+ { 1, 98304, ACPU_PLL_0, 4, 1, 49152, 1, 3, 24576 },
+ { 0, 120000, ACPU_PLL_1, 1, 7, 60000, 1, 3, 24576 },
+ { 0, 176000, ACPU_PLL_2, 2, 5, 88000, 1, 3, 24576 },
+ { 1, 196608, ACPU_PLL_0, 4, 0, 65536, 2, 4, 24576 },
+ { 1, 320000, ACPU_PLL_1, 1, 2, 107000, 2, 5, 120000 },
+ { 0, 352000, ACPU_PLL_2, 2, 2, 88000, 3, 5, 120000 },
+ { 1, 480000, ACPU_PLL_1, 1, 1, 120000, 3, 6, 120000 },
+ { 1, 528000, ACPU_PLL_2, 2, 1, 132000, 3, 7, 120000 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0}, {0, 0, 0} }
+};
+
+/* 7x27 normal with GSM capable modem */
+static struct clkctl_acpu_speed pll0_245_pll1_960_pll2_1200[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 0, 30720 },
+ { 0, 120000, ACPU_PLL_1, 1, 7, 60000, 1, 3, 61440 },
+ { 1, 122880, ACPU_PLL_0, 4, 1, 61440, 1, 3, 61440 },
+ { 0, 200000, ACPU_PLL_2, 2, 5, 66667, 2, 4, 61440 },
+ { 1, 245760, ACPU_PLL_0, 4, 0, 122880, 1, 4, 61440 },
+ { 1, 320000, ACPU_PLL_1, 1, 2, 160000, 1, 5, 122880 },
+ { 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 5, 122880 },
+ { 1, 480000, ACPU_PLL_1, 1, 1, 160000, 2, 6, 122880 },
+ { 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 7, 122880 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0}, {0, 0, 0} }
+};
+
+/* 7x27 normal with CDMA-only modem */
+static struct clkctl_acpu_speed pll0_196_pll1_960_pll2_1200[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 0, 24576 },
+ { 1, 98304, ACPU_PLL_0, 4, 1, 98304, 0, 3, 49152 },
+ { 0, 120000, ACPU_PLL_1, 1, 7, 60000, 1, 3, 49152 },
+ { 1, 196608, ACPU_PLL_0, 4, 0, 65536, 2, 4, 98304 },
+ { 0, 200000, ACPU_PLL_2, 2, 5, 66667, 2, 4, 98304 },
+ { 1, 320000, ACPU_PLL_1, 1, 2, 160000, 1, 5, 120000 },
+ { 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 5, 120000 },
+ { 1, 480000, ACPU_PLL_1, 1, 1, 160000, 2, 6, 120000 },
+ { 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 7, 120000 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0}, {0, 0, 0} }
+};
+
+/* 7x27 normal with GSM capable modem - PLL0 and PLL1 swapped */
+static struct clkctl_acpu_speed pll0_960_pll1_245_pll2_1200[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 0, 30720 },
+ { 0, 120000, ACPU_PLL_0, 4, 7, 60000, 1, 3, 61440 },
+ { 1, 122880, ACPU_PLL_1, 1, 1, 61440, 1, 3, 61440 },
+ { 0, 200000, ACPU_PLL_2, 2, 5, 66667, 2, 4, 61440 },
+ { 1, 245760, ACPU_PLL_1, 1, 0, 122880, 1, 4, 61440 },
+ { 1, 320000, ACPU_PLL_0, 4, 2, 160000, 1, 5, 122880 },
+ { 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 5, 122880 },
+ { 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 122880 },
+ { 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 7, 122880 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0}, {0, 0, 0} }
+};
+
+/* 7x27 normal with CDMA-only modem - PLL0 and PLL1 swapped */
+static struct clkctl_acpu_speed pll0_960_pll1_196_pll2_1200[] = {
+ { 0, 19200, ACPU_PLL_TCXO, 0, 0, 19200, 0, 0, 24576 },
+ { 1, 98304, ACPU_PLL_1, 1, 1, 98304, 0, 3, 49152 },
+ { 0, 120000, ACPU_PLL_0, 4, 7, 60000, 1, 3, 49152 },
+ { 1, 196608, ACPU_PLL_1, 1, 0, 65536, 2, 4, 98304 },
+ { 0, 200000, ACPU_PLL_2, 2, 5, 66667, 2, 4, 98304 },
+ { 1, 320000, ACPU_PLL_0, 4, 2, 160000, 1, 5, 120000 },
+ { 0, 400000, ACPU_PLL_2, 2, 2, 133333, 2, 5, 120000 },
+ { 1, 480000, ACPU_PLL_0, 4, 1, 160000, 2, 6, 120000 },
+ { 1, 600000, ACPU_PLL_2, 2, 1, 200000, 2, 7, 120000 },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0}, {0, 0, 0} }
+};
+
+#define PLL_196_MHZ 10
+#define PLL_245_MHZ 12
+#define PLL_491_MHZ 25
+#define PLL_768_MHZ 40
+#define PLL_960_MHZ 50
+#define PLL_1056_MHZ 55
+#define PLL_1200_MHZ 62
+
+#define PLL_CONFIG(m0, m1, m2) { \
+ PLL_##m0##_MHZ, PLL_##m1##_MHZ, PLL_##m2##_MHZ, \
+ pll0_##m0##_pll1_##m1##_pll2_##m2 \
+}
+
+struct pll_freq_tbl_map {
+ unsigned int pll0_l;
+ unsigned int pll1_l;
+ unsigned int pll2_l;
+ struct clkctl_acpu_speed *tbl;
+};
+
+static struct pll_freq_tbl_map acpu_freq_tbl_list[] = {
+ PLL_CONFIG(196, 768, 1056),
+ PLL_CONFIG(245, 768, 1056),
+ PLL_CONFIG(196, 960, 1056),
+ PLL_CONFIG(245, 960, 1056),
+ PLL_CONFIG(196, 960, 1200),
+ PLL_CONFIG(245, 960, 1200),
+ PLL_CONFIG(960, 196, 1200),
+ PLL_CONFIG(960, 245, 1200),
+ { 0, 0, 0, 0 }
+};
+
+#ifdef CONFIG_CPU_FREQ_MSM
+static struct cpufreq_frequency_table freq_table[20];
+
+static void __init cpufreq_table_init(void)
+{
+ unsigned int i;
+ unsigned int freq_cnt = 0;
+
+ /* Construct the freq_table table from acpu_freq_tbl since the
+ * freq_table values need to match frequencies specified in
+ * acpu_freq_tbl and acpu_freq_tbl needs to be fixed up during init.
+ */
+ for (i = 0; acpu_freq_tbl[i].a11clk_khz != 0
+ && freq_cnt < ARRAY_SIZE(freq_table)-1; i++) {
+ if (acpu_freq_tbl[i].use_for_scaling) {
+ freq_table[freq_cnt].index = freq_cnt;
+ freq_table[freq_cnt].frequency
+ = acpu_freq_tbl[i].a11clk_khz;
+ freq_cnt++;
+ }
+ }
+
+ /* freq_table not big enough to store all usable freqs. */
+ BUG_ON(acpu_freq_tbl[i].a11clk_khz != 0);
+
+ freq_table[freq_cnt].index = freq_cnt;
+ freq_table[freq_cnt].frequency = CPUFREQ_TABLE_END;
+
+ pr_info("%d scaling frequencies supported.\n", freq_cnt);
+}
+#endif
+
+unsigned long clk_get_max_axi_khz(void)
+{
+ return drv_state.max_axi_khz;
+}
+EXPORT_SYMBOL(clk_get_max_axi_khz);
+
+static int pc_pll_request(unsigned id, unsigned on)
+{
+ int res = 0;
+ on = !!on;
+
+#if PERF_SWITCH_DEBUG
+ if (on)
+ printk(KERN_DEBUG "Enabling PLL %d\n", id);
+ else
+ printk(KERN_DEBUG "Disabling PLL %d\n", id);
+#endif
+
+ if (id >= ACPU_PLL_END)
+ return -EINVAL;
+
+ if (pll_control) {
+ remote_spin_lock(&pll_lock);
+ if (on) {
+ pll_control->pll[PLL_BASE + id].votes |= 2;
+ if (!pll_control->pll[PLL_BASE + id].on) {
+ writel(6, PLLn_MODE(id));
+ udelay(50);
+ writel(7, PLLn_MODE(id));
+ pll_control->pll[PLL_BASE + id].on = 1;
+ }
+ } else {
+ pll_control->pll[PLL_BASE + id].votes &= ~2;
+ if (pll_control->pll[PLL_BASE + id].on
+ && !pll_control->pll[PLL_BASE + id].votes) {
+ writel(0, PLLn_MODE(id));
+ pll_control->pll[PLL_BASE + id].on = 0;
+ }
+ }
+ remote_spin_unlock(&pll_lock);
+ } else {
+ res = msm_proc_comm(PCOM_CLKCTL_RPC_PLL_REQUEST, &id, &on);
+ if (res < 0)
+ return res;
+ else if ((int) id < 0)
+ return -EINVAL;
+ }
+
+#if PERF_SWITCH_DEBUG
+ if (on)
+ printk(KERN_DEBUG "PLL enabled\n");
+ else
+ printk(KERN_DEBUG "PLL disabled\n");
+#endif
+ return res;
+}
+
+
+/*----------------------------------------------------------------------------
+ * ARM11 'owned' clock control
+ *---------------------------------------------------------------------------*/
+
+unsigned long acpuclk_power_collapse(void) {
+ int ret = acpuclk_get_rate();
+ acpuclk_set_rate(drv_state.power_collapse_khz, SETRATE_PC);
+ return ret * 1000;
+}
+
+unsigned long acpuclk_wait_for_irq(void) {
+ int ret = acpuclk_get_rate();
+ acpuclk_set_rate(drv_state.wait_for_irq_khz, SETRATE_SWFI);
+ return ret * 1000;
+}
+
+static int acpuclk_set_vdd_level(int vdd)
+{
+ uint32_t current_vdd;
+
+ current_vdd = readl(A11S_VDD_SVS_PLEVEL_ADDR) & 0x07;
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_DEBUG "acpuclock: Switching VDD from %u -> %d\n",
+ current_vdd, vdd);
+#endif
+ writel((1 << 7) | (vdd << 3), A11S_VDD_SVS_PLEVEL_ADDR);
+ udelay(drv_state.vdd_switch_time_us);
+ if ((readl(A11S_VDD_SVS_PLEVEL_ADDR) & 0x7) != vdd) {
+#if PERF_SWITCH_DEBUG
+ printk(KERN_ERR "acpuclock: VDD set failed\n");
+#endif
+ return -EIO;
+ }
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_DEBUG "acpuclock: VDD switched\n");
+#endif
+ return 0;
+}
+
+/* Set proper dividers for the given clock speed. */
+static void acpuclk_set_div(const struct clkctl_acpu_speed *hunt_s) {
+ uint32_t reg_clkctl, reg_clksel, clk_div, src_sel;
+
+ reg_clksel = readl(A11S_CLK_SEL_ADDR);
+
+ /* AHB_CLK_DIV */
+ clk_div = (reg_clksel >> 1) & 0x03;
+ /* CLK_SEL_SRC1NO */
+ src_sel = reg_clksel & 1;
+
+ /*
+ * If the new clock divider is higher than the previous, then
+ * program the divider before switching the clock
+ */
+ if (hunt_s->ahbclk_div > clk_div) {
+ reg_clksel &= ~(0x3 << 1);
+ reg_clksel |= (hunt_s->ahbclk_div << 1);
+ writel(reg_clksel, A11S_CLK_SEL_ADDR);
+ }
+
+ /* Program clock source and divider */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl &= ~(0xFF << (8 * src_sel));
+ reg_clkctl |= hunt_s->a11clk_src_sel << (4 + 8 * src_sel);
+ reg_clkctl |= hunt_s->a11clk_src_div << (0 + 8 * src_sel);
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+ /* Program clock source selection */
+ reg_clksel ^= 1;
+ writel(reg_clksel, A11S_CLK_SEL_ADDR);
+
+ /*
+ * If the new clock divider is lower than the previous, then
+ * program the divider after switching the clock
+ */
+ if (hunt_s->ahbclk_div < clk_div) {
+ reg_clksel &= ~(0x3 << 1);
+ reg_clksel |= (hunt_s->ahbclk_div << 1);
+ writel(reg_clksel, A11S_CLK_SEL_ADDR);
+ }
+}
+
+int acpuclk_set_rate(unsigned long rate, enum setrate_reason reason)
+{
+ uint32_t reg_clkctl;
+ struct clkctl_acpu_speed *cur_s, *tgt_s, *strt_s;
+ int rc = 0;
+ unsigned int plls_enabled = 0, pll;
+
+ if (reason == SETRATE_CPUFREQ)
+ mutex_lock(&drv_state.lock);
+
+ strt_s = cur_s = drv_state.current_speed;
+
+ WARN_ONCE(cur_s == NULL, "acpuclk_set_rate: not initialized\n");
+ if (cur_s == NULL) {
+ rc = -ENOENT;
+ goto out;
+ }
+
+ if (rate == (cur_s->a11clk_khz * 1000))
+ goto out;
+
+ for (tgt_s = acpu_freq_tbl; tgt_s->a11clk_khz != 0; tgt_s++) {
+ if (tgt_s->a11clk_khz == (rate / 1000))
+ break;
+ }
+
+ if (tgt_s->a11clk_khz == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* Choose the highest speed at or below 'rate' with same PLL. */
+ if (reason != SETRATE_CPUFREQ
+ && tgt_s->a11clk_khz < cur_s->a11clk_khz) {
+ while (tgt_s->pll != ACPU_PLL_TCXO && tgt_s->pll != cur_s->pll)
+ tgt_s--;
+ }
+
+ if (strt_s->pll != ACPU_PLL_TCXO)
+ plls_enabled |= 1 << strt_s->pll;
+
+ if (reason == SETRATE_CPUFREQ) {
+ if (strt_s->pll != tgt_s->pll && tgt_s->pll != ACPU_PLL_TCXO) {
+ rc = pc_pll_request(tgt_s->pll, 1);
+ if (rc < 0) {
+ pr_err("PLL%d enable failed (%d)\n",
+ tgt_s->pll, rc);
+ goto out;
+ }
+ plls_enabled |= 1 << tgt_s->pll;
+ }
+ }
+ /* Need to do this when coming out of power collapse since some modem
+ * firmwares reset the VDD when the application processor enters power
+ * collapse. */
+ if (reason == SETRATE_CPUFREQ || reason == SETRATE_PC) {
+ /* Increase VDD if needed. */
+ if (tgt_s->vdd > cur_s->vdd) {
+ if ((rc = acpuclk_set_vdd_level(tgt_s->vdd)) < 0) {
+ printk(KERN_ERR "Unable to switch ACPU vdd\n");
+ goto out;
+ }
+ }
+ }
+
+ /* Set wait states for CPU inbetween frequency changes */
+ reg_clkctl = readl(A11S_CLK_CNTL_ADDR);
+ reg_clkctl |= (100 << 16); /* set WT_ST_CNT */
+ writel(reg_clkctl, A11S_CLK_CNTL_ADDR);
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_INFO "acpuclock: Switching from ACPU rate %u -> %u\n",
+ strt_s->a11clk_khz * 1000, tgt_s->a11clk_khz * 1000);
+#endif
+
+ while (cur_s != tgt_s) {
+ /*
+ * Always jump to target freq if within 256mhz, regulardless of
+ * PLL. If differnece is greater, use the predefinied
+ * steppings in the table.
+ */
+ int d = abs((int)(cur_s->a11clk_khz - tgt_s->a11clk_khz));
+ if (d > drv_state.max_speed_delta_khz) {
+
+ if (tgt_s->a11clk_khz > cur_s->a11clk_khz) {
+ /* Step up: jump to target PLL as early as
+ * possible so indexing using TCXO (up[-1])
+ * never occurs. */
+ if (likely(cur_s->up[tgt_s->pll]))
+ cur_s = cur_s->up[tgt_s->pll];
+ else
+ cur_s = cur_s->up[cur_s->pll];
+ } else {
+ /* Step down: stay on current PLL as long as
+ * possible so indexing using TCXO (down[-1])
+ * never occurs. */
+ if (likely(cur_s->down[cur_s->pll]))
+ cur_s = cur_s->down[cur_s->pll];
+ else
+ cur_s = cur_s->down[tgt_s->pll];
+ }
+
+ if (cur_s == NULL) { /* This should not happen. */
+ pr_err("No stepping frequencies found. "
+ "strt_s:%u tgt_s:%u\n",
+ strt_s->a11clk_khz, tgt_s->a11clk_khz);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ } else {
+ cur_s = tgt_s;
+ }
+#if PERF_SWITCH_STEP_DEBUG
+ printk(KERN_DEBUG "%s: STEP khz = %u, pll = %d\n",
+ __FUNCTION__, cur_s->a11clk_khz, cur_s->pll);
+#endif
+ if (cur_s->pll != ACPU_PLL_TCXO
+ && !(plls_enabled & (1 << cur_s->pll))) {
+ rc = pc_pll_request(cur_s->pll, 1);
+ if (rc < 0) {
+ pr_err("PLL%d enable failed (%d)\n",
+ cur_s->pll, rc);
+ goto out;
+ }
+ plls_enabled |= 1 << cur_s->pll;
+ }
+
+ acpuclk_set_div(cur_s);
+ drv_state.current_speed = cur_s;
+ /* Re-adjust lpj for the new clock speed. */
+ loops_per_jiffy = cur_s->lpj;
+ udelay(drv_state.acpu_switch_time_us);
+ }
+
+ /* Nothing else to do for SWFI. */
+ if (reason == SETRATE_SWFI)
+ goto out;
+
+ /* Change the AXI bus frequency if we can. */
+ if (strt_s->axiclk_khz != tgt_s->axiclk_khz) {
+ rc = ebi1_clk_set_min_rate(CLKVOTE_ACPUCLK,
+ tgt_s->axiclk_khz * 1000);
+ if (rc < 0)
+ pr_err("Setting AXI min rate failed!\n");
+ }
+
+ /* Nothing else to do for power collapse if not 7x27. */
+ if (reason == SETRATE_PC && !cpu_is_msm7x27())
+ goto out;
+
+ /* Disable PLLs we are not using anymore. */
+ if (tgt_s->pll != ACPU_PLL_TCXO)
+ plls_enabled &= ~(1 << tgt_s->pll);
+ for (pll = ACPU_PLL_0; pll <= ACPU_PLL_2; pll++)
+ if (plls_enabled & (1 << pll)) {
+ rc = pc_pll_request(pll, 0);
+ if (rc < 0) {
+ pr_err("PLL%d disable failed (%d)\n", pll, rc);
+ goto out;
+ }
+ }
+
+ /* Nothing else to do for power collapse. */
+ if (reason == SETRATE_PC)
+ goto out;
+
+ /* Drop VDD level if we can. */
+ if (tgt_s->vdd < strt_s->vdd) {
+ if (acpuclk_set_vdd_level(tgt_s->vdd) < 0)
+ printk(KERN_ERR "acpuclock: Unable to drop ACPU vdd\n");
+ }
+
+#if PERF_SWITCH_DEBUG
+ printk(KERN_DEBUG "%s: ACPU speed change complete\n", __FUNCTION__);
+#endif
+out:
+ if (reason == SETRATE_CPUFREQ)
+ mutex_unlock(&drv_state.lock);
+ return rc;
+}
+
+static void __init acpuclk_init(void)
+{
+ struct clkctl_acpu_speed *speed;
+ uint32_t div, sel;
+ int rc;
+
+ /*
+ * Determine the rate of ACPU clock
+ */
+
+ if (!(readl(A11S_CLK_SEL_ADDR) & 0x01)) { /* CLK_SEL_SRC1N0 */
+ /* CLK_SRC0_SEL */
+ sel = (readl(A11S_CLK_CNTL_ADDR) >> 12) & 0x7;
+ /* CLK_SRC0_DIV */
+ div = (readl(A11S_CLK_CNTL_ADDR) >> 8) & 0x0f;
+ } else {
+ /* CLK_SRC1_SEL */
+ sel = (readl(A11S_CLK_CNTL_ADDR) >> 4) & 0x07;
+ /* CLK_SRC1_DIV */
+ div = readl(A11S_CLK_CNTL_ADDR) & 0x0f;
+ }
+
+ for (speed = acpu_freq_tbl; speed->a11clk_khz != 0; speed++) {
+ if (speed->a11clk_src_sel == sel
+ && (speed->a11clk_src_div == div))
+ break;
+ }
+ if (speed->a11clk_khz == 0) {
+ printk(KERN_WARNING "Warning - ACPU clock reports invalid speed\n");
+ return;
+ }
+
+ drv_state.current_speed = speed;
+
+ rc = ebi1_clk_set_min_rate(CLKVOTE_ACPUCLK, speed->axiclk_khz * 1000);
+ if (rc < 0)
+ pr_err("Setting AXI min rate failed!\n");
+
+ printk(KERN_INFO "ACPU running at %d KHz\n", speed->a11clk_khz);
+}
+
+unsigned long acpuclk_get_rate(void)
+{
+ WARN_ONCE(drv_state.current_speed == NULL,
+ "acpuclk_get_rate: not initialized\n");
+ if (drv_state.current_speed)
+ return drv_state.current_speed->a11clk_khz;
+ else
+ return 0;
+}
+
+uint32_t acpuclk_get_switch_time(void)
+{
+ return drv_state.acpu_switch_time_us;
+}
+
+/*----------------------------------------------------------------------------
+ * Clock driver initialization
+ *---------------------------------------------------------------------------*/
+
+#define DIV2REG(n) ((n)-1)
+#define REG2DIV(n) ((n)+1)
+#define SLOWER_BY(div, factor) div = DIV2REG(REG2DIV(div) * factor)
+
+static void __init acpu_freq_tbl_fixup(void)
+{
+ unsigned long pll0_l, pll1_l, pll2_l;
+ int axi_160mhz = 0, axi_200mhz = 0;
+ struct pll_freq_tbl_map *lst;
+ struct clkctl_acpu_speed *t;
+ unsigned int pll0_needs_fixup = 0;
+
+ /* Wait for the PLLs to be initialized and then read their frequency.
+ */
+ do {
+ pll0_l = readl(PLLn_L_VAL(0)) & 0x3f;
+ cpu_relax();
+ udelay(50);
+ } while (pll0_l == 0);
+ do {
+ pll1_l = readl(PLLn_L_VAL(1)) & 0x3f;
+ cpu_relax();
+ udelay(50);
+ } while (pll1_l == 0);
+ do {
+ pll2_l = readl(PLLn_L_VAL(2)) & 0x3f;
+ cpu_relax();
+ udelay(50);
+ } while (pll2_l == 0);
+
+ printk(KERN_INFO "L val: PLL0: %d, PLL1: %d, PLL2: %d\n",
+ (int)pll0_l, (int)pll1_l, (int)pll2_l);
+
+ /* Some configurations run PLL0 twice as fast. Instead of having
+ * separate tables for this case, we simply fix up the ACPU clock
+ * source divider since it's a simple fix up.
+ */
+ if (pll0_l == PLL_491_MHZ) {
+ pll0_l = PLL_245_MHZ;
+ pll0_needs_fixup = 1;
+ }
+
+ /* Select the right table to use. */
+ for (lst = acpu_freq_tbl_list; lst->tbl != 0; lst++) {
+ if (lst->pll0_l == pll0_l && lst->pll1_l == pll1_l
+ && lst->pll2_l == pll2_l) {
+ acpu_freq_tbl = lst->tbl;
+ break;
+ }
+ }
+
+ if (acpu_freq_tbl == NULL) {
+ pr_crit("Unknown PLL configuration!\n");
+ BUG();
+ }
+
+ /* Fix up PLL0 source divider if necessary. Also, fix up the AXI to
+ * the max that's supported by the board (RAM used in board).
+ */
+ axi_160mhz = (pll0_l == PLL_960_MHZ || pll1_l == PLL_960_MHZ);
+ axi_200mhz = (pll2_l == PLL_1200_MHZ);
+ for (t = &acpu_freq_tbl[0]; t->a11clk_khz != 0; t++) {
+
+ if (pll0_needs_fixup && t->pll == ACPU_PLL_0)
+ SLOWER_BY(t->a11clk_src_div, 2);
+ if (axi_160mhz && drv_state.max_axi_khz >= 160000
+ && t->ahbclk_khz > 128000)
+ t->axiclk_khz = 160000;
+ if (axi_200mhz && drv_state.max_axi_khz >= 200000
+ && t->ahbclk_khz > 160000)
+ t->axiclk_khz = 200000;
+ }
+
+ t--;
+ drv_state.max_axi_khz = t->axiclk_khz;
+
+ /* The default 7x27 ACPU clock plan supports running the AXI bus at
+ * 200 MHz. So we don't classify it as Turbo mode.
+ */
+ if (cpu_is_msm7x27())
+ return;
+
+ if (!axi_160mhz)
+ pr_info("Turbo mode not supported.\n");
+ else if (t->axiclk_khz == 160000)
+ pr_info("Turbo mode supported and enabled.\n");
+ else
+ pr_info("Turbo mode supported but not enabled.\n");
+}
+
+/* Initalize the lpj field in the acpu_freq_tbl. */
+static void __init lpj_init(void)
+{
+ int i;
+ const struct clkctl_acpu_speed *base_clk = drv_state.current_speed;
+ for (i = 0; acpu_freq_tbl[i].a11clk_khz; i++) {
+ acpu_freq_tbl[i].lpj = cpufreq_scale(loops_per_jiffy,
+ base_clk->a11clk_khz,
+ acpu_freq_tbl[i].a11clk_khz);
+ }
+}
+
+static void __init precompute_stepping(void)
+{
+ int i, step_idx;
+
+#define cur_freq acpu_freq_tbl[i].a11clk_khz
+#define step_freq acpu_freq_tbl[step_idx].a11clk_khz
+#define cur_pll acpu_freq_tbl[i].pll
+#define step_pll acpu_freq_tbl[step_idx].pll
+
+ for (i = 0; acpu_freq_tbl[i].a11clk_khz; i++) {
+
+ /* Calculate max "up" step for each destination PLL */
+ step_idx = i + 1;
+ while (step_freq && (step_freq - cur_freq)
+ <= drv_state.max_speed_delta_khz) {
+ acpu_freq_tbl[i].up[step_pll] =
+ &acpu_freq_tbl[step_idx];
+ step_idx++;
+ }
+ if (step_idx == (i + 1) && step_freq) {
+ pr_crit("Delta between freqs %u KHz and %u KHz is"
+ " too high!\n", cur_freq, step_freq);
+ BUG();
+ }
+
+ /* Calculate max "down" step for each destination PLL */
+ step_idx = i - 1;
+ while (step_idx >= 0 && (cur_freq - step_freq)
+ <= drv_state.max_speed_delta_khz) {
+ acpu_freq_tbl[i].down[step_pll] =
+ &acpu_freq_tbl[step_idx];
+ step_idx--;
+ }
+ if (step_idx == (i - 1) && i > 0) {
+ pr_crit("Delta between freqs %u KHz and %u KHz is"
+ " too high!\n", cur_freq, step_freq);
+ BUG();
+ }
+ }
+}
+
+static void __init print_acpu_freq_tbl(void)
+{
+ struct clkctl_acpu_speed *t;
+ short down_idx[3];
+ short up_idx[3];
+ int i, j;
+
+#define FREQ_IDX(freq_ptr) (freq_ptr - acpu_freq_tbl)
+ pr_info("Id CPU-KHz PLL DIV AHB-KHz ADIV AXI-KHz "
+ "D0 D1 D2 U0 U1 U2\n");
+
+ t = &acpu_freq_tbl[0];
+ for (i = 0; t->a11clk_khz != 0; i++) {
+
+ for (j = 0; j < 3; j++) {
+ down_idx[j] = t->down[j] ? FREQ_IDX(t->down[j]) : -1;
+ up_idx[j] = t->up[j] ? FREQ_IDX(t->up[j]) : -1;
+ }
+
+ pr_info("%2d %7d %3d %3d %7d %4d %7d "
+ "%2d %2d %2d %2d %2d %2d\n",
+ i, t->a11clk_khz, t->pll, t->a11clk_src_div + 1,
+ t->ahbclk_khz, t->ahbclk_div + 1, t->axiclk_khz,
+ down_idx[0], down_idx[1], down_idx[2],
+ up_idx[0], up_idx[1], up_idx[2]);
+
+ t++;
+ }
+}
+
+static void msm7x25_acpu_pll_hw_bug_fix(void)
+{
+ unsigned int n;
+
+ /* The 7625 has a hardware bug and in order to select PLL2 we
+ * must program PLL3. Use the same table, and just fix up the
+ * numbers on this target. */
+ for (n = 0; acpu_freq_tbl[n].a11clk_khz != 0; n++)
+ if (acpu_freq_tbl[n].pll == ACPU_PLL_2)
+ acpu_freq_tbl[n].a11clk_src_sel = 3;
+}
+
+static void shared_pll_control_init(void)
+{
+#define PLL_REMOTE_SPINLOCK_ID 7
+ unsigned smem_size;
+ remote_spin_lock_init(&pll_lock, PLL_REMOTE_SPINLOCK_ID);
+ pll_control = smem_get_entry(SMEM_CLKREGIM_SOURCES, &smem_size);
+
+ if (!pll_control)
+ pr_err("Unable to find shared PLL control data structure!\n");
+ /* There might be more PLLs than what the application processor knows
+ * about. But the index used for each PLL is guaranteed to remain the
+ * same. */
+ else if (smem_size < sizeof(struct shared_pll_control))
+ pr_err("Shared PLL control data structure too small!\n");
+ else if (pll_control->version != 0xCCEE0001)
+ pr_err("Shared PLL control version mismatch!\n");
+ else {
+ pr_info("Shared PLL control available.\n");
+ return;
+ }
+
+ pll_control = NULL;
+ pr_err("Falling back to proc_comm PLL control.\n");
+}
+
+void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata)
+{
+ pr_info("acpu_clock_init()\n");
+
+ mutex_init(&drv_state.lock);
+ drv_state.acpu_switch_time_us = clkdata->acpu_switch_time_us;
+ drv_state.max_speed_delta_khz = clkdata->max_speed_delta_khz;
+ drv_state.vdd_switch_time_us = clkdata->vdd_switch_time_us;
+ drv_state.power_collapse_khz = clkdata->power_collapse_khz;
+ drv_state.wait_for_irq_khz = clkdata->wait_for_irq_khz;
+ drv_state.max_axi_khz = clkdata->max_axi_khz;
+ acpu_freq_tbl_fixup();
+ precompute_stepping();
+ acpuclk_init();
+ lpj_init();
+ print_acpu_freq_tbl();
+ if (cpu_is_msm7x25())
+ msm7x25_acpu_pll_hw_bug_fix();
+ if (cpu_is_msm7x27())
+ shared_pll_control_init();
+#ifdef CONFIG_CPU_FREQ_MSM
+ cpufreq_table_init();
+ cpufreq_frequency_table_get_attr(freq_table, smp_processor_id());
+#endif
+}
diff --git a/arch/arm/mach-msm/acpuclock.h b/arch/arm/mach-msm/acpuclock.h
new file mode 100644
index 000000000000..ad7b4cd2399d
--- /dev/null
+++ b/arch/arm/mach-msm/acpuclock.h
@@ -0,0 +1,39 @@
+/* arch/arm/mach-msm/acpuclock.h
+ *
+ * MSM architecture clock driver header
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_ACPUCLOCK_H
+#define __ARCH_ARM_MACH_MSM_ACPUCLOCK_H
+
+#include <linux/list.h>
+
+enum setrate_reason {
+ SETRATE_CPUFREQ = 0,
+ SETRATE_SWFI,
+ SETRATE_PC,
+};
+
+int acpuclk_set_rate(unsigned long rate, enum setrate_reason reason);
+unsigned long acpuclk_get_rate(void);
+uint32_t acpuclk_get_switch_time(void);
+unsigned long acpuclk_wait_for_irq(void);
+unsigned long acpuclk_power_collapse(void);
+
+
+#endif
+
diff --git a/arch/arm/mach-msm/avs.c b/arch/arm/mach-msm/avs.c
new file mode 100644
index 000000000000..297d3e1a4d52
--- /dev/null
+++ b/arch/arm/mach-msm/avs.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/kernel_stat.h>
+#include <linux/workqueue.h>
+
+#include "avs.h"
+
+#define AVSDSCR_INPUT 0x01004860 /* magic # from circuit designer */
+#define TSCSR_INPUT 0x00000001 /* enable temperature sense */
+
+#define TEMPRS 16 /* total number of temperature regions */
+#define GET_TEMPR() (avs_get_tscsr() >> 28) /* scale TSCSR[CTEMP] to regions */
+
+struct mutex avs_lock;
+
+static struct avs_state_s
+{
+ u32 freq_cnt; /* Frequencies supported list */
+ short *avs_v; /* Dyanmically allocated storage for
+ * 2D table of voltages over temp &
+ * freq. Used as a set of 1D tables.
+ * Each table is for a single temp.
+ * For usage see avs_get_voltage
+ */
+ int (*set_vdd) (int); /* Function Ptr for setting voltage */
+ int changing; /* Clock frequency is changing */
+ u32 freq_idx; /* Current frequency index */
+ int vdd; /* Current ACPU voltage */
+} avs_state;
+
+/*
+ * Update the AVS voltage vs frequency table, for current temperature
+ * Adjust based on the AVS delay circuit hardware status
+ */
+static void avs_update_voltage_table(short *vdd_table)
+{
+ u32 avscsr;
+ int cpu;
+ int vu;
+ int l2;
+ int i;
+ u32 cur_freq_idx;
+ short cur_voltage;
+
+ cur_freq_idx = avs_state.freq_idx;
+ cur_voltage = avs_state.vdd;
+
+ avscsr = avs_test_delays();
+ AVSDEBUG("avscsr=%x, avsdscr=%x\n", avscsr, avs_get_avsdscr());
+
+ /*
+ * Read the results for the various unit's AVS delay circuits
+ * 2=> up, 1=>down, 0=>no-change
+ */
+ cpu = ((avscsr >> 23) & 2) + ((avscsr >> 16) & 1);
+ vu = ((avscsr >> 28) & 2) + ((avscsr >> 21) & 1);
+ l2 = ((avscsr >> 29) & 2) + ((avscsr >> 22) & 1);
+
+ if ((cpu == 3) || (vu == 3) || (l2 == 3)) {
+ printk(KERN_ERR "AVS: Dly Synth O/P error\n");
+ } else if ((cpu == 2) || (l2 == 2) || (vu == 2)) {
+ /*
+ * even if one oscillator asks for up, increase the voltage,
+ * as its an indication we are running outside the
+ * critical acceptable range of v-f combination.
+ */
+ AVSDEBUG("cpu=%d l2=%d vu=%d\n", cpu, l2, vu);
+ AVSDEBUG("Voltage up at %d\n", cur_freq_idx);
+
+ if (cur_voltage >= VOLTAGE_MAX)
+ printk(KERN_ERR
+ "AVS: Voltage can not get high enough!\n");
+
+ /* Raise the voltage for all frequencies */
+ for (i = 0; i < avs_state.freq_cnt; i++) {
+ vdd_table[i] = cur_voltage + VOLTAGE_STEP;
+ if (vdd_table[i] > VOLTAGE_MAX)
+ vdd_table[i] = VOLTAGE_MAX;
+ }
+ } else if ((cpu == 1) && (l2 == 1) && (vu == 1)) {
+ if ((cur_voltage - VOLTAGE_STEP >= VOLTAGE_MIN) &&
+ (cur_voltage <= vdd_table[cur_freq_idx])) {
+ vdd_table[cur_freq_idx] = cur_voltage - VOLTAGE_STEP;
+ AVSDEBUG("Voltage down for %d and lower levels\n",
+ cur_freq_idx);
+
+ /* clamp to this voltage for all lower levels */
+ for (i = 0; i < cur_freq_idx; i++) {
+ if (vdd_table[i] > vdd_table[cur_freq_idx])
+ vdd_table[i] = vdd_table[cur_freq_idx];
+ }
+ }
+ }
+}
+
+/*
+ * Return the voltage for the target performance freq_idx and optionally
+ * use AVS hardware to check the present voltage freq_idx
+ */
+static short avs_get_target_voltage(int freq_idx, bool update_table)
+{
+ unsigned cur_tempr = GET_TEMPR();
+ unsigned temp_index = cur_tempr*avs_state.freq_cnt;
+
+ /* Table of voltages vs frequencies for this temp */
+ short *vdd_table = avs_state.avs_v + temp_index;
+
+ if (update_table)
+ avs_update_voltage_table(vdd_table);
+
+ return vdd_table[freq_idx];
+}
+
+
+/*
+ * Set the voltage for the freq_idx and optionally
+ * use AVS hardware to update the voltage
+ */
+static int avs_set_target_voltage(int freq_idx, bool update_table)
+{
+ int rc = 0;
+ int new_voltage = avs_get_target_voltage(freq_idx, update_table);
+ if (avs_state.vdd != new_voltage) {
+ AVSDEBUG("AVS setting V to %d mV @%d\n",
+ new_voltage, freq_idx);
+ rc = avs_state.set_vdd(new_voltage);
+ if (rc)
+ return rc;
+ avs_state.vdd = new_voltage;
+ }
+ return rc;
+}
+
+/*
+ * Notify avs of clk frquency transition begin & end
+ */
+int avs_adjust_freq(u32 freq_idx, int begin)
+{
+ int rc = 0;
+
+ if (!avs_state.set_vdd) {
+ /* AVS not initialized */
+ return 0;
+ }
+
+ if (freq_idx >= avs_state.freq_cnt) {
+ AVSDEBUG("Out of range :%d\n", freq_idx);
+ return -EINVAL;
+ }
+
+ mutex_lock(&avs_lock);
+ if ((begin && (freq_idx > avs_state.freq_idx)) ||
+ (!begin && (freq_idx < avs_state.freq_idx))) {
+ /* Update voltage before increasing frequency &
+ * after decreasing frequency
+ */
+ rc = avs_set_target_voltage(freq_idx, 0);
+ if (rc)
+ goto aaf_out;
+
+ avs_state.freq_idx = freq_idx;
+ }
+ avs_state.changing = begin;
+aaf_out:
+ mutex_unlock(&avs_lock);
+
+ return rc;
+}
+
+
+static struct delayed_work avs_work;
+static struct workqueue_struct *kavs_wq;
+#define AVS_DELAY ((CONFIG_HZ * 50 + 999) / 1000)
+
+static void do_avs_timer(struct work_struct *work)
+{
+ int cur_freq_idx;
+
+ mutex_lock(&avs_lock);
+ if (!avs_state.changing) {
+ /* Only adjust the voltage if clk is stable */
+ cur_freq_idx = avs_state.freq_idx;
+ avs_set_target_voltage(cur_freq_idx, 1);
+ }
+ mutex_unlock(&avs_lock);
+ queue_delayed_work_on(0, kavs_wq, &avs_work, AVS_DELAY);
+}
+
+
+static void __init avs_timer_init(void)
+{
+ INIT_DELAYED_WORK_DEFERRABLE(&avs_work, do_avs_timer);
+ queue_delayed_work_on(0, kavs_wq, &avs_work, AVS_DELAY);
+}
+
+static void __exit avs_timer_exit(void)
+{
+ cancel_delayed_work(&avs_work);
+}
+
+static int __init avs_work_init(void)
+{
+ kavs_wq = create_workqueue("avs");
+ if (!kavs_wq) {
+ printk(KERN_ERR "AVS initialization failed\n");
+ return -EFAULT;
+ }
+ avs_timer_init();
+
+ return 1;
+}
+
+static void __exit avs_work_exit(void)
+{
+ avs_timer_exit();
+ destroy_workqueue(kavs_wq);
+}
+
+int __init avs_init(int (*set_vdd)(int), u32 freq_cnt, u32 freq_idx)
+{
+ int i;
+
+ mutex_init(&avs_lock);
+
+ if (freq_cnt == 0)
+ return -EINVAL;
+
+ avs_state.freq_cnt = freq_cnt;
+
+ if (freq_idx >= avs_state.freq_cnt)
+ return -EINVAL;
+
+ avs_state.avs_v = kmalloc(TEMPRS * avs_state.freq_cnt *
+ sizeof(avs_state.avs_v[0]), GFP_KERNEL);
+
+ if (avs_state.avs_v == 0)
+ return -ENOMEM;
+
+ for (i = 0; i < TEMPRS*avs_state.freq_cnt; i++)
+ avs_state.avs_v[i] = VOLTAGE_MAX;
+
+ avs_reset_delays(AVSDSCR_INPUT);
+ avs_set_tscsr(TSCSR_INPUT);
+
+ avs_state.set_vdd = set_vdd;
+ avs_state.changing = 0;
+ avs_state.freq_idx = -1;
+ avs_state.vdd = -1;
+ avs_adjust_freq(freq_idx, 0);
+
+ avs_work_init();
+
+ return 0;
+}
+
+void __exit avs_exit()
+{
+ avs_work_exit();
+
+ kfree(avs_state.avs_v);
+}
+
+
diff --git a/arch/arm/mach-msm/avs.h b/arch/arm/mach-msm/avs.h
new file mode 100644
index 000000000000..daae78938297
--- /dev/null
+++ b/arch/arm/mach-msm/avs.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AVS_H
+#define AVS_H
+
+#define VOLTAGE_MIN 1000 /* mV */
+#define VOLTAGE_MAX 1250
+#define VOLTAGE_STEP 25
+
+int __init avs_init(int (*set_vdd)(int), u32 freq_cnt, u32 freq_idx);
+void __exit avs_exit(void);
+
+int avs_adjust_freq(u32 freq_index, int begin);
+
+/* Routines exported from avs_hw.S */
+u32 avs_test_delays(void);
+u32 avs_reset_delays(u32 avsdscr);
+u32 avs_get_avscsr(void);
+u32 avs_get_avsdscr(void);
+u32 avs_get_tscsr(void);
+void avs_set_tscsr(u32 to_tscsr);
+
+/*#define AVSDEBUG(x...) pr_info("AVS: " x);*/
+#define AVSDEBUG(...)
+
+#endif /* AVS_H */
diff --git a/arch/arm/mach-msm/avs_hw.S b/arch/arm/mach-msm/avs_hw.S
new file mode 100644
index 000000000000..57521b60d8f0
--- /dev/null
+++ b/arch/arm/mach-msm/avs_hw.S
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+ .text
+
+ .global avs_test_delays
+avs_test_delays:
+
+/* Read r1=CPMR and enable Never Sleep for VSLPDLY */
+ mrc p15, 7, r1, c15, c0, 5
+ orr r12, r1, #3, 24
+ mcr p15, 7, r12, c15, c0, 5
+
+/* Read r2=CPACR and enable full access to CP10 and CP11 space */
+ mrc p15, 0, r2, c1, c0, 2
+ orr r12, r2, #(0xf << 20)
+ mcr p15, 0, r12, c1, c0, 2
+ isb
+
+/* Read r3=FPEXC and or in FP enable, VFP/ASE enable = FPEXC[30]; */
+ fmrx r3, fpexc
+ orr r12, r3, #1, 2
+ fmxr fpexc, r12
+
+/*
+ * Do floating-point operations to prime the VFP pipeline. Use
+ * fcpyd d0, d0 as a floating point nop. This avoids changing VFP
+ * state.
+ */
+ fcpyd d0, d0
+ fcpyd d0, d0
+ fcpyd d0, d0
+
+/* Read r0=AVSCSR to get status from CPU, VFP, and L2 ring oscillators */
+ mrc p15, 7, r0, c15, c1, 7
+
+/* Restore FPEXC */
+ fmxr fpexc, r3
+
+/* Restore CPACR */
+ MCR p15, 0, r2, c1, c0, 2
+
+/* Restore CPMR */
+ mcr p15, 7, r1, c15, c0, 5
+ isb
+
+ bx lr
+
+
+
+
+ .global avs_get_avscsr
+/* Read r0=AVSCSR to get status from CPU, VFP, and L2 ring oscillators */
+
+avs_get_avscsr:
+ mrc p15, 7, r0, c15, c1, 7
+ bx lr
+
+ .global avs_get_avsdscr
+/* Read r0=AVSDSCR to get the AVS Delay Synthesizer control settings */
+
+avs_get_avsdscr:
+ mrc p15, 7, r0, c15, c0, 6
+ bx lr
+
+
+
+
+ .global avs_get_tscsr
+/* Read r0=TSCSR to get temperature sensor control and status */
+
+avs_get_tscsr:
+ mrc p15, 7, r0, c15, c1, 0
+ bx lr
+
+ .global avs_set_tscsr
+/* Write TSCSR=r0 to set temperature sensor control and status */
+
+avs_set_tscsr:
+ mcr p15, 7, r0, c15, c1, 0
+ bx lr
+
+
+
+
+
+ .global avs_reset_delays
+avs_reset_delays:
+
+/* AVSCSR(0x61) to enable CPU, V and L2 AVS module */
+ mov r3, #0x61
+ mcr p15, 7, r3, c15, c1, 7
+
+/* AVSDSCR(dly) to program delay */
+ mcr p15, 7, r0, c15, c0, 6
+
+/* Read r0=AVSDSCR */
+ mrc p15, 7, r0, c15, c0, 6
+
+ bx lr
+
+ .end
+
+
diff --git a/arch/arm/mach-msm/board-comet.c b/arch/arm/mach-msm/board-comet.c
new file mode 100644
index 000000000000..9d8e290e8930
--- /dev/null
+++ b/arch/arm/mach-msm/board-comet.c
@@ -0,0 +1,488 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/bootmem.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <asm/mach/mmc.h>
+#include <mach/vreg.h>
+#include <mach/board.h>
+#include <mach/sirc.h>
+#include <linux/spi/spi.h>
+
+#include "devices.h"
+#include "timer.h"
+#include "pm.h"
+
+#define TOUCHPAD_SUSPEND 34
+#define TOUCHPAD_IRQ 42
+
+#define MSM_PMEM_MDP_SIZE 0x800000
+#define MSM_FB_SIZE 0x500000
+#define MSM_AUDIO_SIZE 0x200000
+
+#define MSM_SMI_BASE 0x2b00000
+#define MSM_SMI_SIZE 0x1500000
+
+#define MSM_FB_BASE MSM_SMI_BASE
+#define MSM_PMEM_GPU0_BASE (MSM_FB_BASE + MSM_FB_SIZE)
+#define MSM_PMEM_GPU0_SIZE (MSM_SMI_SIZE - MSM_FB_SIZE)
+
+#define COMET_CPLD_START 0x70004000
+#define COMET_CPLD_PER_ENABLE 0x00000010
+#define COMET_CPLD_PER_RESET 0x00000018
+#define COMET_CPLD_STATUS 0x00000028
+#define COMET_CPLD_EXT_PER_ENABLE 0x00000030
+#define COMET_CPLD_I2C_ENABLE 0x00000038
+#define COMET_CPLD_EXT_PER_RESET 0x00000048
+#define COMET_CPLD_VERSION 0x00000058
+
+#define COMET_CPLD_SIZE 0x00000060
+#define COMET_CPLD_STATUS_WVGA 0x0004
+#define COMET_CPLD_VERSION_MAJOR 0xFF00
+#define COMET_CPLD_PER_ENABLE_WVGA 0x0400
+#define COMET_CPLD_PER_ENABLE_LVDS 0x0200
+#define COMET_CPLD_PER_ENABLE_WXGA 0x0040
+#define COMET_CPLD_EXT_PER_ENABLE_WXGA 0x0080
+
+static unsigned long vreg_sts, gpio_sts;
+static struct vreg *vreg_mmc;
+static int gp6_enabled;
+
+static int cpld_version;
+static bool wvga_present;
+static bool wxga_present;
+static struct comet_cpld_t {
+ u16 per_reset_all_reset;
+ u16 ext_per_reset_all_reset;
+ u16 i2c_enable;
+ u16 per_enable_all;
+ u16 ext_per_enable_all;
+ u16 bt_reset_reg;
+ u16 bt_reset_mask;
+} comet_cpld[] = {
+ [0] = {
+ .per_reset_all_reset = 0x00FF,
+ /* enable all peripherals except microphones and */
+ /* reset line for i2c touchpad */
+ .per_enable_all = 0xFFD8,
+ .bt_reset_reg = 0x0018,
+ .bt_reset_mask = 0x0001,
+ },
+ [1] = {
+ .per_reset_all_reset = 0x00BF,
+ .ext_per_reset_all_reset = 0x0007,
+ .i2c_enable = 0x07F7,
+ /* enable all peripherals except microphones and */
+ /* displays */
+ .per_enable_all = 0xF9B8,
+ .ext_per_enable_all = 0x007D,
+ .bt_reset_reg = 0x0048,
+ .bt_reset_mask = 0x0004,
+ },
+};
+static struct comet_cpld_t *cpld_info;
+
+static struct resource smc911x_resources[] = {
+ [0] = {
+ .start = 0x84000000,
+ .end = 0x84000100,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = MSM_GPIO_TO_INT(156),
+ .end = 156,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smc911x_device = {
+ .name = "smc911x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc911x_resources),
+ .resource = smc911x_resources,
+};
+
+static void __iomem *comet_cpld_base(void)
+{
+ static void __iomem *comet_cpld_base_addr;
+
+ if (!comet_cpld_base_addr) {
+ if (!request_mem_region(COMET_CPLD_START, COMET_CPLD_SIZE,
+ "cpld")) {
+ printk(KERN_ERR
+ "%s: request_mem_region for comet cpld failed\n",
+ __func__);
+ goto cpld_base_exit;
+ }
+ comet_cpld_base_addr = ioremap(COMET_CPLD_START,
+ COMET_CPLD_SIZE);
+ if (!comet_cpld_base_addr) {
+ release_mem_region(COMET_CPLD_START,
+ COMET_CPLD_SIZE);
+ printk(KERN_ERR "%s: Could not map comet cpld\n",
+ __func__);
+ }
+ }
+cpld_base_exit:
+ return comet_cpld_base_addr;
+}
+
+static struct platform_device *devices[] __initdata = {
+ &msm_device_smd,
+ &msm_device_dmov,
+ &smc911x_device,
+ &msm_device_nand,
+};
+
+
+#define KBD_RST 35
+#define KBD_IRQ 144
+
+static void kbd_gpio_release(void)
+{
+ gpio_free(KBD_IRQ);
+ gpio_free(KBD_RST);
+}
+
+static int kbd_gpio_setup(void)
+{
+ int rc;
+ int respin = KBD_RST;
+ int irqpin = KBD_IRQ;
+ unsigned rescfg =
+ GPIO_CFG(respin, 0, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA);
+ unsigned irqcfg =
+ GPIO_CFG(irqpin, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA);
+
+ rc = gpio_request(irqpin, "gpio_keybd_irq");
+ if (rc) {
+ pr_err("gpio_request failed on pin %d (rc=%d)\n",
+ irqpin, rc);
+ goto err_gpioconfig;
+ }
+ rc = gpio_request(respin, "gpio_keybd_reset");
+ if (rc) {
+ pr_err("gpio_request failed on pin %d (rc=%d)\n",
+ respin, rc);
+ goto err_gpioconfig;
+ }
+ rc = gpio_tlmm_config(rescfg, GPIO_ENABLE);
+ if (rc) {
+ pr_err("gpio_tlmm_config failed on pin %d (rc=%d)\n",
+ respin, rc);
+ goto err_gpioconfig;
+ }
+ rc = gpio_tlmm_config(irqcfg, GPIO_ENABLE);
+ if (rc) {
+ pr_err("gpio_tlmm_config failed on pin %d (rc=%d)\n",
+ irqpin, rc);
+ goto err_gpioconfig;
+ }
+ return rc;
+
+err_gpioconfig:
+ kbd_gpio_release();
+ return rc;
+}
+
+static void __init comet_init_irq(void)
+{
+ msm_init_irq();
+ msm_init_sirc();
+}
+
+static void sdcc_gpio_init(void)
+{
+ /* SDC1 GPIOs */
+ if (gpio_request(51, "sdc1_data_3"))
+ pr_err("failed to request gpio sdc1_data_3\n");
+ if (gpio_request(52, "sdc1_data_2"))
+ pr_err("failed to request gpio sdc1_data_2\n");
+ if (gpio_request(53, "sdc1_data_1"))
+ pr_err("failed to request gpio sdc1_data_1\n");
+ if (gpio_request(54, "sdc1_data_0"))
+ pr_err("failed to request gpio sdc1_data_0\n");
+ if (gpio_request(55, "sdc1_cmd"))
+ pr_err("failed to request gpio sdc1_cmd\n");
+ if (gpio_request(56, "sdc1_clk"))
+ pr_err("failed to request gpio sdc1_clk\n");
+
+ /* SDC2 GPIOs */
+ if (gpio_request(62, "sdc2_clk"))
+ pr_err("failed to request gpio sdc2_clk\n");
+ if (gpio_request(63, "sdc2_cmd"))
+ pr_err("failed to request gpio sdc2_cmd\n");
+ if (gpio_request(64, "sdc2_data_3"))
+ pr_err("failed to request gpio sdc2_data_3\n");
+ if (gpio_request(65, "sdc2_data_2"))
+ pr_err("failed to request gpio sdc2_data_2\n");
+ if (gpio_request(66, "sdc2_data_1"))
+ pr_err("failed to request gpio sdc2_data_1\n");
+ if (gpio_request(67, "sdc2_data_0"))
+ pr_err("failed to request gpio sdc2_data_0\n");
+
+ /* SDC3 GPIOs */
+ if (gpio_request(88, "sdc3_clk"))
+ pr_err("failed to request gpio sdc3_clk\n");
+ if (gpio_request(89, "sdc3_cmd"))
+ pr_err("failed to request gpio sdc3_cmd\n");
+ if (gpio_request(90, "sdc3_data_3"))
+ pr_err("failed to request gpio sdc3_data_3\n");
+ if (gpio_request(91, "sdc3_data_2"))
+ pr_err("failed to request gpio sdc3_data_2\n");
+ if (gpio_request(92, "sdc3_data_1"))
+ pr_err("failed to request gpio sdc3_data_1\n");
+ if (gpio_request(93, "sdc3_data_0"))
+ pr_err("failed to request gpio sdc3_data_0\n");
+
+}
+
+static unsigned sdcc_cfg_data[][6] = {
+ /* SDC1 configs */
+ {
+ GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ },
+ /* SDC2 configs */
+ {
+ GPIO_CFG(62, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ GPIO_CFG(63, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(64, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(65, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(66, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(67, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ },
+ /* SDC3 configs */
+ {
+ GPIO_CFG(88, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ GPIO_CFG(89, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(90, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(91, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(92, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(93, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ },
+};
+
+static void msm_sdcc_setup_gpio(int dev_id, unsigned int enable)
+{
+ int i, rc;
+
+ if (!(test_bit(dev_id, &gpio_sts)^enable))
+ return;
+
+ if (enable)
+ set_bit(dev_id, &gpio_sts);
+ else
+ clear_bit(dev_id, &gpio_sts);
+
+ for (i = 0; i < ARRAY_SIZE(sdcc_cfg_data[dev_id - 1]); i++) {
+ rc = gpio_tlmm_config(sdcc_cfg_data[dev_id - 1][i],
+ enable ? GPIO_ENABLE : GPIO_DISABLE);
+ if (rc)
+ printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, sdcc_cfg_data[dev_id - 1][i], rc);
+ }
+}
+
+static uint32_t msm_sdcc_setup_power(struct device *dv, unsigned int vdd)
+{
+ int rc = 0;
+ struct platform_device *pdev;
+
+ pdev = container_of(dv, struct platform_device, dev);
+ msm_sdcc_setup_gpio(pdev->id, !!vdd);
+
+ if (vdd == 0) {
+ if (!vreg_sts)
+ return 0;
+
+ clear_bit(pdev->id, &vreg_sts);
+
+ if (!vreg_sts && !gp6_enabled) {
+ rc = vreg_disable(vreg_mmc);
+ if (rc)
+ printk(KERN_ERR "%s: return val: %d \n",
+ __func__, rc);
+ }
+ return 0;
+ }
+
+ if (!vreg_sts && !gp6_enabled) {
+ rc = vreg_set_level(vreg_mmc, 2850);
+ if (!rc)
+ rc = vreg_enable(vreg_mmc);
+ if (rc)
+ printk(KERN_ERR "%s: return val: %d \n",
+ __func__, rc);
+ }
+ set_bit(pdev->id, &vreg_sts);
+ return 0;
+}
+
+static struct mmc_platform_data comet_sdcc_data = {
+ .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
+ .translate_vdd = msm_sdcc_setup_power,
+};
+
+static struct msm_pm_platform_data msm_pm_data[MSM_PM_SLEEP_MODE_NR] = {
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].supported = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].suspend_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].idle_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].latency = 8594,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].residency = 23740,
+
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].supported = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].suspend_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].idle_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].latency = 4594,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].residency = 23740,
+
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].supported = 1,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].suspend_enabled
+ = 1,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].idle_enabled = 0,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency = 443,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].residency = 1098,
+
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].supported = 1,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].suspend_enabled = 1,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].idle_enabled = 1,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].latency = 2,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].residency = 0,
+};
+
+static void __init comet_init(void)
+{
+ char __iomem *cpld_base;
+ int per_enable;
+ int ext_per_enable;
+
+ cpld_base = comet_cpld_base();
+
+ if (!cpld_base)
+ return;
+
+ cpld_version = (readw(cpld_base + COMET_CPLD_VERSION) &
+ COMET_CPLD_VERSION_MAJOR) >> 8;
+ if (cpld_version >= 2) {
+ cpld_info = &comet_cpld[1];
+ per_enable = cpld_info->per_enable_all;
+ wvga_present = (readw(cpld_base + COMET_CPLD_STATUS)
+ & COMET_CPLD_STATUS_WVGA) != 0;
+ wxga_present = !wvga_present;
+ ext_per_enable = cpld_info->ext_per_enable_all;
+ if (wvga_present)
+ per_enable |= COMET_CPLD_PER_ENABLE_WVGA;
+ else {
+ per_enable |= COMET_CPLD_PER_ENABLE_LVDS |
+ COMET_CPLD_PER_ENABLE_WXGA;
+ ext_per_enable |= COMET_CPLD_EXT_PER_ENABLE_WXGA;
+ }
+ writew(ext_per_enable,
+ cpld_base + COMET_CPLD_EXT_PER_ENABLE);
+ writew(cpld_info->i2c_enable,
+ cpld_base + COMET_CPLD_I2C_ENABLE);
+ writew(cpld_info->ext_per_reset_all_reset,
+ cpld_base + COMET_CPLD_EXT_PER_RESET);
+ } else {
+ cpld_info = &comet_cpld[0];
+ wvga_present = 1;
+ wxga_present = 0;
+ per_enable = cpld_info->per_enable_all;
+ smc911x_resources[0].start = 0x90000000;
+ smc911x_resources[0].end = 0x90000100;
+ }
+
+ writew(per_enable,
+ cpld_base + COMET_CPLD_PER_ENABLE);
+ writew(cpld_info->per_reset_all_reset,
+ cpld_base + COMET_CPLD_PER_RESET);
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+ msm_pm_set_platform_data(msm_pm_data);
+}
+
+static void __init comet_map_io(void)
+{
+ msm_map_comet_io();
+ msm_clock_init(msm_clocks_8x50, msm_num_clocks_8x50);
+}
+
+MACHINE_START(QSD8X50_COMET, "QCT QSD8x50 Comet")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x0,
+ .map_io = comet_map_io,
+ .init_irq = comet_init_irq,
+ .init_machine = comet_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-halibut-keypad.c b/arch/arm/mach-msm/board-halibut-keypad.c
new file mode 100644
index 000000000000..49c1075627d3
--- /dev/null
+++ b/arch/arm/mach-msm/board-halibut-keypad.c
@@ -0,0 +1,177 @@
+/* linux/arch/arm/mach-msm/board-halibut-keypad.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/mach-types.h>
+#include <linux/platform_device.h>
+#include <linux/gpio_event.h>
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "board_halibut."
+static int halibut_ffa;
+module_param_named(ffa, halibut_ffa, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define SCAN_FUNCTION_KEYS 0 /* don't turn this on without updating the ffa support */
+
+static unsigned int halibut_row_gpios[] = {
+ 31, 32, 33, 34, 35, 41
+#if SCAN_FUNCTION_KEYS
+ , 42
+#endif
+};
+
+static unsigned int halibut_col_gpios[] = { 36, 37, 38, 39, 40 };
+
+/* FFA:
+ 36: KEYSENSE_N(0)
+ 37: KEYSENSE_N(1)
+ 38: KEYSENSE_N(2)
+ 39: KEYSENSE_N(3)
+ 40: KEYSENSE_N(4)
+
+ 31: KYPD_17
+ 32: KYPD_15
+ 33: KYPD_13
+ 34: KYPD_11
+ 35: KYPD_9
+ 41: KYPD_MEMO
+*/
+
+#define KEYMAP_INDEX(row, col) ((row)*ARRAY_SIZE(halibut_col_gpios) + (col))
+
+static const unsigned short halibut_keymap[ARRAY_SIZE(halibut_col_gpios) * ARRAY_SIZE(halibut_row_gpios)] = {
+ [KEYMAP_INDEX(0, 0)] = KEY_5,
+ [KEYMAP_INDEX(0, 1)] = KEY_9,
+ [KEYMAP_INDEX(0, 2)] = 229, /* SOFT1 */
+ [KEYMAP_INDEX(0, 3)] = KEY_6,
+ [KEYMAP_INDEX(0, 4)] = KEY_LEFT,
+
+ [KEYMAP_INDEX(1, 0)] = KEY_0,
+ [KEYMAP_INDEX(1, 1)] = KEY_RIGHT,
+ [KEYMAP_INDEX(1, 2)] = KEY_1,
+ [KEYMAP_INDEX(1, 3)] = 228, /* KEY_SHARP */
+ [KEYMAP_INDEX(1, 4)] = KEY_SEND,
+
+ [KEYMAP_INDEX(2, 0)] = KEY_VOLUMEUP,
+ [KEYMAP_INDEX(2, 1)] = KEY_HOME, /* FA */
+ [KEYMAP_INDEX(2, 2)] = KEY_F8, /* QCHT */
+ [KEYMAP_INDEX(2, 3)] = KEY_F6, /* R+ */
+ [KEYMAP_INDEX(2, 4)] = KEY_F7, /* R- */
+
+ [KEYMAP_INDEX(3, 0)] = KEY_UP,
+ [KEYMAP_INDEX(3, 1)] = KEY_CLEAR,
+ [KEYMAP_INDEX(3, 2)] = KEY_4,
+ [KEYMAP_INDEX(3, 3)] = KEY_MUTE, /* SPKR */
+ [KEYMAP_INDEX(3, 4)] = KEY_2,
+
+ [KEYMAP_INDEX(4, 0)] = 230, /* SOFT2 */
+ [KEYMAP_INDEX(4, 1)] = 232, /* KEY_CENTER */
+ [KEYMAP_INDEX(4, 2)] = KEY_DOWN,
+ [KEYMAP_INDEX(4, 3)] = KEY_BACK, /* FB */
+ [KEYMAP_INDEX(4, 4)] = KEY_8,
+
+ [KEYMAP_INDEX(5, 0)] = KEY_VOLUMEDOWN,
+ [KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */
+ [KEYMAP_INDEX(5, 2)] = KEY_MAIL, /* MESG */
+ [KEYMAP_INDEX(5, 3)] = KEY_3,
+ [KEYMAP_INDEX(5, 4)] = KEY_7,
+
+#if SCAN_FUNCTION_KEYS
+ [KEYMAP_INDEX(6, 0)] = KEY_F5,
+ [KEYMAP_INDEX(6, 1)] = KEY_F4,
+ [KEYMAP_INDEX(6, 2)] = KEY_F3,
+ [KEYMAP_INDEX(6, 3)] = KEY_F2,
+ [KEYMAP_INDEX(6, 4)] = KEY_F1
+#endif
+};
+
+static const unsigned short halibut_keymap_ffa[ARRAY_SIZE(halibut_col_gpios) * ARRAY_SIZE(halibut_row_gpios)] = {
+ /*[KEYMAP_INDEX(0, 0)] = ,*/
+ /*[KEYMAP_INDEX(0, 1)] = ,*/
+ [KEYMAP_INDEX(0, 2)] = KEY_1,
+ [KEYMAP_INDEX(0, 3)] = KEY_SEND,
+ [KEYMAP_INDEX(0, 4)] = KEY_LEFT,
+
+ [KEYMAP_INDEX(1, 0)] = KEY_3,
+ [KEYMAP_INDEX(1, 1)] = KEY_RIGHT,
+ [KEYMAP_INDEX(1, 2)] = KEY_VOLUMEUP,
+ /*[KEYMAP_INDEX(1, 3)] = ,*/
+ [KEYMAP_INDEX(1, 4)] = KEY_6,
+
+ [KEYMAP_INDEX(2, 0)] = KEY_HOME, /* A */
+ [KEYMAP_INDEX(2, 1)] = KEY_BACK, /* B */
+ [KEYMAP_INDEX(2, 2)] = KEY_0,
+ [KEYMAP_INDEX(2, 3)] = 228, /* KEY_SHARP */
+ [KEYMAP_INDEX(2, 4)] = KEY_9,
+
+ [KEYMAP_INDEX(3, 0)] = KEY_UP,
+ [KEYMAP_INDEX(3, 1)] = 232, /* KEY_CENTER */ /* i */
+ [KEYMAP_INDEX(3, 2)] = KEY_4,
+ /*[KEYMAP_INDEX(3, 3)] = ,*/
+ [KEYMAP_INDEX(3, 4)] = KEY_2,
+
+ [KEYMAP_INDEX(4, 0)] = KEY_VOLUMEDOWN,
+ [KEYMAP_INDEX(4, 1)] = KEY_SOUND,
+ [KEYMAP_INDEX(4, 2)] = KEY_DOWN,
+ [KEYMAP_INDEX(4, 3)] = KEY_8,
+ [KEYMAP_INDEX(4, 4)] = KEY_5,
+
+ /*[KEYMAP_INDEX(5, 0)] = ,*/
+ [KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */
+ [KEYMAP_INDEX(5, 2)] = 230, /*SOFT2*/ /* 2 */
+ [KEYMAP_INDEX(5, 3)] = KEY_MENU, /* 1 */
+ [KEYMAP_INDEX(5, 4)] = KEY_7,
+};
+
+static struct gpio_event_matrix_info halibut_matrix_info = {
+ .info.func = gpio_event_matrix_func,
+ .keymap = halibut_keymap,
+ .output_gpios = halibut_row_gpios,
+ .input_gpios = halibut_col_gpios,
+ .noutputs = ARRAY_SIZE(halibut_row_gpios),
+ .ninputs = ARRAY_SIZE(halibut_col_gpios),
+ .settle_time.tv.nsec = 0,
+ .poll_time.tv.nsec = 20 * NSEC_PER_MSEC,
+ .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE | GPIOKPF_PRINT_UNMAPPED_KEYS /*| GPIOKPF_PRINT_MAPPED_KEYS*/
+};
+
+struct gpio_event_info *halibut_keypad_info[] = {
+ &halibut_matrix_info.info
+};
+
+static struct gpio_event_platform_data halibut_keypad_data = {
+ .name = "halibut_keypad",
+ .info = halibut_keypad_info,
+ .info_count = ARRAY_SIZE(halibut_keypad_info)
+};
+
+static struct platform_device halibut_keypad_device = {
+ .name = GPIO_EVENT_DEV_NAME,
+ .id = -1,
+ .dev = {
+ .platform_data = &halibut_keypad_data,
+ },
+};
+
+static int __init halibut_init_keypad(void)
+{
+ if (!machine_is_halibut())
+ return 0;
+ if (halibut_ffa)
+ halibut_matrix_info.keymap = halibut_keymap_ffa;
+ return platform_device_register(&halibut_keypad_device);
+}
+
+device_initcall(halibut_init_keypad);
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index e61967dde9a1..537516723f16 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -1,6 +1,7 @@
/* linux/arch/arm/mach-msm/board-halibut.c
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -13,33 +14,64 @@
* GNU General Public License for more details.
*
*/
-
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/i2c.h>
+#include <linux/android_pmem.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
#include <mach/hardware.h>
+#include <mach/irqs.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/flash.h>
+#include <asm/setup.h>
+#include <asm/mach/mmc.h>
+#include <mach/vreg.h>
+#include <mach/mpp.h>
+#include <mach/gpio.h>
#include <mach/irqs.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
-
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
+#include <mach/msm_serial_hs.h>
+#include <mach/msm_hsusb.h>
+#include <mach/vreg.h>
+#include <mach/msm_rpcrouter.h>
+#include <mach/memory.h>
+#include <mach/camera.h>
#include "devices.h"
+#include "socinfo.h"
+#include "clock.h"
+#include "msm-keypad-devices.h"
+#include "pm.h"
+
+#ifdef CONFIG_MSM_STACKED_MEMORY
+#define MSM_SMI_BASE 0x100000
+#define MSM_SMI_SIZE 0x800000
+
+#define MSM_PMEM_GPU0_BASE MSM_SMI_BASE
+#define MSM_PMEM_GPU0_SIZE 0x800000
+#endif
+
+#define MSM_PMEM_MDP_SIZE 0x800000
+#define MSM_PMEM_CAMERA_SIZE 0xa00000
+#define MSM_PMEM_ADSP_SIZE 0x800000
+#define MSM_PMEM_GPU1_SIZE 0x800000
+#define MSM_FB_SIZE 0x200000
static struct resource smc91x_resources[] = {
[0] = {
.start = 0x9C004300,
- .end = 0x9C004400,
+ .end = 0x9C0043ff,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -49,6 +81,132 @@ static struct resource smc91x_resources[] = {
},
};
+#ifdef CONFIG_USB_FUNCTION
+static struct usb_mass_storage_platform_data usb_mass_storage_pdata = {
+ .nluns = 0x02,
+ .buf_size = 16384,
+ .vendor = "GOOGLE",
+ .product = "Mass storage",
+ .release = 0xffff,
+};
+
+static struct platform_device mass_storage_device = {
+ .name = "usb_mass_storage",
+ .id = -1,
+ .dev = {
+ .platform_data = &usb_mass_storage_pdata,
+ },
+};
+#endif
+
+#ifdef CONFIG_USB_ANDROID
+static struct android_usb_platform_data android_usb_pdata = {
+ .vendor_id = 0x05C6,
+ .product_id = 0xF000,
+ .adb_product_id = 0x9015,
+ .version = 0x0100,
+ .product_name = "Qualcomm HSUSB Device",
+ .manufacturer_name = "Qualcomm Incorporated",
+ .nluns = 1,
+};
+
+static struct platform_device android_usb_device = {
+ .name = "android_usb",
+ .id = -1,
+ .dev = {
+ .platform_data = &android_usb_pdata,
+ },
+};
+#endif
+
+#ifdef CONFIG_USB_FUNCTION
+static void hsusb_gpio_init(void)
+{
+ if (gpio_request(111, "ulpi_data_0"))
+ pr_err("failed to request gpio ulpi_data_0\n");
+ if (gpio_request(112, "ulpi_data_1"))
+ pr_err("failed to request gpio ulpi_data_1\n");
+ if (gpio_request(113, "ulpi_data_2"))
+ pr_err("failed to request gpio ulpi_data_2\n");
+ if (gpio_request(114, "ulpi_data_3"))
+ pr_err("failed to request gpio ulpi_data_3\n");
+ if (gpio_request(115, "ulpi_data_4"))
+ pr_err("failed to request gpio ulpi_data_4\n");
+ if (gpio_request(116, "ulpi_data_5"))
+ pr_err("failed to request gpio ulpi_data_5\n");
+ if (gpio_request(117, "ulpi_data_6"))
+ pr_err("failed to request gpio ulpi_data_6\n");
+ if (gpio_request(118, "ulpi_data_7"))
+ pr_err("failed to request gpio ulpi_data_7\n");
+ if (gpio_request(119, "ulpi_dir"))
+ pr_err("failed to request gpio ulpi_dir\n");
+ if (gpio_request(120, "ulpi_next"))
+ pr_err("failed to request gpio ulpi_next\n");
+ if (gpio_request(121, "ulpi_stop"))
+ pr_err("failed to request gpio ulpi_stop\n");
+}
+
+static unsigned usb_gpio_lpm_config[] = {
+ GPIO_CFG(111, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* DATA 0 */
+ GPIO_CFG(112, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* DATA 1 */
+ GPIO_CFG(113, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* DATA 2 */
+ GPIO_CFG(114, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* DATA 3 */
+ GPIO_CFG(115, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* DATA 4 */
+ GPIO_CFG(116, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* DATA 5 */
+ GPIO_CFG(117, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* DATA 6 */
+ GPIO_CFG(118, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* DATA 7 */
+ GPIO_CFG(119, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* DIR */
+ GPIO_CFG(120, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* NEXT */
+ GPIO_CFG(121, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* STOP */
+};
+
+static unsigned usb_gpio_lpm_unconfig[] = {
+ GPIO_CFG(111, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DATA 0 */
+ GPIO_CFG(112, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DATA 1 */
+ GPIO_CFG(113, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DATA 2 */
+ GPIO_CFG(114, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DATA 3 */
+ GPIO_CFG(115, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DATA 4 */
+ GPIO_CFG(116, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DATA 5 */
+ GPIO_CFG(117, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DATA 6 */
+ GPIO_CFG(118, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DATA 7 */
+ GPIO_CFG(119, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DIR */
+ GPIO_CFG(120, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* NEXT */
+ GPIO_CFG(121, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_2MA), /* STOP */
+};
+
+static int usb_config_gpio(int config)
+{
+ int pin, rc;
+
+ if (config) {
+ for (pin = 0; pin < ARRAY_SIZE(usb_gpio_lpm_config); pin++) {
+ rc = gpio_tlmm_config(usb_gpio_lpm_config[pin],
+ GPIO_ENABLE);
+ if (rc) {
+ printk(KERN_ERR
+ "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, usb_gpio_lpm_config[pin], rc);
+ return -EIO;
+ }
+ }
+ } else {
+ for (pin = 0; pin < ARRAY_SIZE(usb_gpio_lpm_unconfig); pin++) {
+ rc = gpio_tlmm_config(usb_gpio_lpm_unconfig[pin],
+ GPIO_ENABLE);
+ if (rc) {
+ printk(KERN_ERR
+ "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, usb_gpio_lpm_config[pin], rc);
+ return -EIO;
+ }
+ }
+ }
+
+ return 0;
+}
+#endif
+
+
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
@@ -57,33 +215,565 @@ static struct platform_device smc91x_device = {
};
static struct platform_device *devices[] __initdata = {
+#if !defined(CONFIG_MSM_SERIAL_DEBUGGER)
&msm_device_uart3,
+#endif
+ &msm_device_uart_dm1,
&msm_device_smd,
+ &msm_device_dmov,
&msm_device_nand,
- &msm_device_hsusb,
&msm_device_i2c,
&smc91x_device,
+ &msm_device_tssc,
+ &android_pmem_camera_device,
+ &android_pmem_device,
+ &android_pmem_adsp_device,
+#ifdef CONFIG_MSM_STACKED_MEMORY
+ &android_pmem_gpu0_device,
+#endif
+ &android_pmem_gpu1_device,
+ &msm_device_hsusb_otg,
+ &msm_device_hsusb_host,
+#if defined(CONFIG_USB_FUNCTION) || defined(CONFIG_USB_ANDROID)
+ &msm_device_hsusb_peripheral,
+#endif
+#ifdef CONFIG_USB_FUNCTION
+ &mass_storage_device,
+#endif
+#ifdef CONFIG_USB_ANDROID
+ &android_usb_device,
+#endif
+
+#ifdef CONFIG_BT
+ &msm_bt_power_device,
+#endif
+ &halibut_snd,
+ &msm_bluesleep_device,
+ &msm_fb_device,
+ &mddi_toshiba_device,
+ &mddi_sharp_device,
};
extern struct sys_timer msm_timer;
+static struct i2c_board_info i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("mt9d112", 0x78 >> 1),
+ },
+ {
+ I2C_BOARD_INFO("s5k3e2fx", 0x20 >> 1),
+ },
+ {
+ I2C_BOARD_INFO("mt9p012", 0x6C >> 1),
+ },
+ {
+ I2C_BOARD_INFO("mt9t013", 0x6C),
+ },
+};
+
+static uint32_t camera_off_gpio_table[] = {
+ /* parallel CAMERA interfaces */
+ GPIO_CFG(0, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT0 */
+ GPIO_CFG(1, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT1 */
+ GPIO_CFG(2, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT2 */
+ GPIO_CFG(3, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT3 */
+ GPIO_CFG(4, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT4 */
+ GPIO_CFG(5, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT5 */
+ GPIO_CFG(6, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT6 */
+ GPIO_CFG(7, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT7 */
+ GPIO_CFG(8, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT8 */
+ GPIO_CFG(9, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT9 */
+ GPIO_CFG(10, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT10 */
+ GPIO_CFG(11, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT11 */
+ GPIO_CFG(12, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* PCLK */
+ GPIO_CFG(13, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HSYNC_IN */
+ GPIO_CFG(14, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* VSYNC_IN */
+ GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* MCLK */
+};
+
+static uint32_t camera_on_gpio_table[] = {
+ /* parallel CAMERA interfaces */
+ GPIO_CFG(0, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT0 */
+ GPIO_CFG(1, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT1 */
+ GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT2 */
+ GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT3 */
+ GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT4 */
+ GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT5 */
+ GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT6 */
+ GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT7 */
+ GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT8 */
+ GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT9 */
+ GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT10 */
+ GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT11 */
+ GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_16MA), /* PCLK */
+ GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HSYNC_IN */
+ GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* VSYNC_IN */
+ GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_16MA), /* MCLK */
+};
+
+static void config_gpio_table(uint32_t *table, int len)
+{
+ int n, rc;
+ for (n = 0; n < len; n++) {
+ rc = gpio_tlmm_config(table[n], GPIO_ENABLE);
+ if (rc) {
+ printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, table[n], rc);
+ break;
+ }
+ }
+}
+
+static void config_camera_on_gpios(void)
+{
+ config_gpio_table(camera_on_gpio_table,
+ ARRAY_SIZE(camera_on_gpio_table));
+}
+
+static void config_camera_off_gpios(void)
+{
+ config_gpio_table(camera_off_gpio_table,
+ ARRAY_SIZE(camera_off_gpio_table));
+}
+
+#define MSM_PROBE_INIT(name) name##_probe_init
+static struct msm_camera_sensor_info msm_camera_sensor[] = {
+ {
+ .sensor_reset = 89,
+ .sensor_pwd = 85,
+ .vcm_pwd = 0,
+ .sensor_name = "mt9d112",
+ .flash_type = MSM_CAMERA_FLASH_NONE,
+#ifdef CONFIG_MSM_CAMERA
+ .sensor_probe = MSM_PROBE_INIT(mt9d112),
+#endif
+ },
+ {
+ .sensor_reset = 89,
+ .sensor_pwd = 85,
+ .vcm_pwd = 0,
+ .sensor_name = "s5k3e2fx",
+ .flash_type = MSM_CAMERA_FLASH_NONE,
+#ifdef CONFIG_MSM_CAMERA
+ .sensor_probe = MSM_PROBE_INIT(s5k3e2fx),
+#endif
+ },
+ {
+ .sensor_reset = 89,
+ .sensor_pwd = 85,
+ .vcm_pwd = 88,
+ .sensor_name = "mt9p012",
+ .flash_type = MSM_CAMERA_FLASH_LED,
+#ifdef CONFIG_MSM_CAMERA
+ .sensor_probe = MSM_PROBE_INIT(mt9p012),
+#endif
+ },
+ {
+ .sensor_reset = 89,
+ .sensor_pwd = 85,
+ .vcm_pwd = 0,
+ .sensor_name = "mt9t013",
+ .flash_type = MSM_CAMERA_FLASH_NONE,
+#ifdef CONFIG_MSM_CAMERA
+ .sensor_probe = MSM_PROBE_INIT(mt9t013),
+#endif
+ },
+};
+#undef MSM_PROBE_INIT
+
+static struct msm_camera_device_platform_data msm_camera_device_data = {
+ .camera_gpio_on = config_camera_on_gpios,
+ .camera_gpio_off = config_camera_off_gpios,
+ .snum = ARRAY_SIZE(msm_camera_sensor),
+ .sinfo = &msm_camera_sensor[0],
+ .ioext.mdcphy = MSM_MDC_PHYS,
+ .ioext.mdcsz = MSM_MDC_SIZE,
+ .ioext.appphy = MSM_CLK_CTL_PHYS,
+ .ioext.appsz = MSM_CLK_CTL_SIZE,
+};
+
+static void __init msm_camera_add_device(void)
+{
+ msm_camera_register_device(NULL, 0, &msm_camera_device_data);
+ config_camera_off_gpios();
+}
+
static void __init halibut_init_irq(void)
{
msm_init_irq();
}
+static struct msm_acpu_clock_platform_data halibut_clock_data = {
+ .acpu_switch_time_us = 50,
+ .max_speed_delta_khz = 256000,
+ .vdd_switch_time_us = 62,
+ .power_collapse_khz = 19200000,
+ .wait_for_irq_khz = 128000000,
+ .max_axi_khz = 128000,
+};
+
+void msm_serial_debug_init(unsigned int base, int irq,
+ struct device *clk_device, int signal_irq);
+static void sdcc_gpio_init(void)
+{
+#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
+ int rc = 0;
+ if (gpio_request(49, "sdc1_status_irq"))
+ pr_err("failed to request gpio sdc1_status_irq\n");
+ rc = gpio_tlmm_config(GPIO_CFG(49, 0, GPIO_INPUT, GPIO_PULL_UP,
+ GPIO_2MA), GPIO_ENABLE);
+ if (rc)
+ printk(KERN_ERR "%s: Failed to configure GPIO %d\n",
+ __func__, rc);
+#endif
+ /* SDC1 GPIOs */
+#ifdef CONFIG_MMC_MSM_SDC1_SUPPORT
+ if (gpio_request(51, "sdc1_data_3"))
+ pr_err("failed to request gpio sdc1_data_3\n");
+ if (gpio_request(52, "sdc1_data_2"))
+ pr_err("failed to request gpio sdc1_data_2\n");
+ if (gpio_request(53, "sdc1_data_1"))
+ pr_err("failed to request gpio sdc1_data_1\n");
+ if (gpio_request(54, "sdc1_data_0"))
+ pr_err("failed to request gpio sdc1_data_0\n");
+ if (gpio_request(55, "sdc1_cmd"))
+ pr_err("failed to request gpio sdc1_cmd\n");
+ if (gpio_request(56, "sdc1_clk"))
+ pr_err("failed to request gpio sdc1_clk\n");
+#endif
+
+ if (machine_is_msm7201a_ffa())
+ return;
+
+ /* SDC2 GPIOs */
+#ifdef CONFIG_MMC_MSM_SDC2_SUPPORT
+ if (gpio_request(62, "sdc2_clk"))
+ pr_err("failed to request gpio sdc2_clk\n");
+ if (gpio_request(63, "sdc2_cmd"))
+ pr_err("failed to request gpio sdc2_cmd\n");
+ if (gpio_request(64, "sdc2_data_3"))
+ pr_err("failed to request gpio sdc2_data_3\n");
+ if (gpio_request(65, "sdc2_data_2"))
+ pr_err("failed to request gpio sdc2_data_2\n");
+ if (gpio_request(66, "sdc2_data_1"))
+ pr_err("failed to request gpio sdc2_data_1\n");
+ if (gpio_request(67, "sdc2_data_0"))
+ pr_err("failed to request gpio sdc2_data_0\n");
+#endif
+
+ /* SDC4 GPIOs */
+#ifdef CONFIG_MMC_MSM_SDC4_SUPPORT
+ if (gpio_request(19, "sdc4_data_3"))
+ pr_err("failed to request gpio sdc4_data_3\n");
+ if (gpio_request(20, "sdc4_data_2"))
+ pr_err("failed to request gpio sdc4_data_2\n");
+ if (gpio_request(21, "sdc4_data_1"))
+ pr_err("failed to request gpio sdc4_data_1\n");
+ if (gpio_request(107, "sdc4_cmd"))
+ pr_err("failed to request gpio sdc4_cmd\n");
+ if (gpio_request(108, "sdc4_data_0"))
+ pr_err("failed to request gpio sdc4_data_0\n");
+ if (gpio_request(109, "sdc4_clk"))
+ pr_err("failed to request gpio sdc4_clk\n");
+#endif
+}
+
+static unsigned sdcc_cfg_data[][6] = {
+ /* SDC1 configs */
+ {
+ GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ },
+ /* SDC2 configs */
+ {
+ GPIO_CFG(62, 2, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ GPIO_CFG(63, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(64, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(65, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(66, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(67, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ },
+ {
+ /* SDC3 configs */
+ },
+ /* SDC4 configs */
+ {
+ GPIO_CFG(19, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(20, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(21, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(107, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(108, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(109, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ }
+};
+
+static unsigned long vreg_sts, gpio_sts;
+static struct mpp *mpp_mmc;
+static struct vreg *vreg_mmc;
+
+static void msm_sdcc_setup_gpio(int dev_id, unsigned int enable)
+{
+ int i, rc;
+
+ if (!(test_bit(dev_id, &gpio_sts)^enable))
+ return;
+
+ if (enable)
+ set_bit(dev_id, &gpio_sts);
+ else
+ clear_bit(dev_id, &gpio_sts);
+
+ for (i = 0; i < ARRAY_SIZE(sdcc_cfg_data[dev_id - 1]); i++) {
+ rc = gpio_tlmm_config(sdcc_cfg_data[dev_id - 1][i],
+ enable ? GPIO_ENABLE : GPIO_DISABLE);
+ if (rc) {
+ printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, sdcc_cfg_data[dev_id - 1][i], rc);
+ }
+ }
+}
+
+static uint32_t msm_sdcc_setup_power(struct device *dv, unsigned int vdd)
+{
+ int rc = 0;
+ struct platform_device *pdev;
+
+ pdev = container_of(dv, struct platform_device, dev);
+ msm_sdcc_setup_gpio(pdev->id, !!vdd);
+
+ if (vdd == 0) {
+ if (!vreg_sts)
+ return 0;
+
+ clear_bit(pdev->id, &vreg_sts);
+
+ if (!vreg_sts) {
+ if (machine_is_msm7201a_ffa())
+ rc = mpp_config_digital_out(mpp_mmc,
+ MPP_CFG(MPP_DLOGIC_LVL_MSMP,
+ MPP_DLOGIC_OUT_CTRL_LOW));
+ else
+ rc = vreg_disable(vreg_mmc);
+ if (rc)
+ printk(KERN_ERR "%s: return val: %d \n",
+ __func__, rc);
+ }
+ return 0;
+ }
+
+ if (!vreg_sts) {
+ if (machine_is_msm7201a_ffa())
+ rc = mpp_config_digital_out(mpp_mmc,
+ MPP_CFG(MPP_DLOGIC_LVL_MSMP,
+ MPP_DLOGIC_OUT_CTRL_HIGH));
+ else {
+ rc = vreg_set_level(vreg_mmc, 2850);
+ if (!rc)
+ rc = vreg_enable(vreg_mmc);
+ }
+ if (rc)
+ printk(KERN_ERR "%s: return val: %d \n",
+ __func__, rc);
+ }
+ set_bit(pdev->id, &vreg_sts);
+ return 0;
+}
+
+#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
+static unsigned int halibut_sdcc_slot_status(struct device *dev)
+{
+ return (unsinged int) gpio_get_value(49);
+}
+#endif
+
+static struct mmc_platform_data halibut_sdcc_data = {
+ .ocr_mask = MMC_VDD_28_29,
+ .translate_vdd = msm_sdcc_setup_power,
+#ifdef CONFIG_MMC_MSM_CARD_HW_DETECTION
+ .status = halibut_sdcc_slot_status,
+ .status_irq = MSM_GPIO_TO_INT(49),
+ .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+#endif
+};
+
+static void __init halibut_init_mmc(void)
+{
+ if (machine_is_msm7201a_ffa()) {
+ mpp_mmc = mpp_get(NULL, "mpp3");
+ if (!mpp_mmc) {
+ printk(KERN_ERR "%s: mpp get failed (%ld)\n",
+ __func__, PTR_ERR(vreg_mmc));
+ return;
+ }
+ } else {
+ vreg_mmc = vreg_get(NULL, "mmc");
+ if (IS_ERR(vreg_mmc)) {
+ printk(KERN_ERR "%s: vreg get failed (%ld)\n",
+ __func__, PTR_ERR(vreg_mmc));
+ return;
+ }
+ }
+
+ sdcc_gpio_init();
+#ifdef CONFIG_MMC_MSM_SDC1_SUPPORT
+ msm_add_sdcc(1, &halibut_sdcc_data);
+#endif
+
+ if (machine_is_msm7201a_surf()) {
+#ifdef CONFIG_MMC_MSM_SDC2_SUPPORT
+ msm_add_sdcc(2, &halibut_sdcc_data);
+#endif
+#ifdef CONFIG_MMC_MSM_SDC4_SUPPORT
+ msm_add_sdcc(4, &halibut_sdcc_data);
+#endif
+ }
+}
+
+static struct msm_panel_common_pdata mdp_pdata = {
+ .gpio = 97,
+};
+
+static void __init msm_fb_add_devices(void)
+{
+ msm_fb_register_device("mdp", &mdp_pdata);
+ msm_fb_register_device("ebi2", 0);
+ msm_fb_register_device("pmdh", &mddi_pdata);
+ msm_fb_register_device("emdh", 0);
+ msm_fb_register_device("tvenc", &tvenc_pdata);
+}
+
+static struct msm_i2c_platform_data msm_i2c_pdata = {
+ .clk_freq = 100000,
+};
+
+static void __init msm_device_i2c_init(void)
+{
+ msm_device_i2c.dev.platform_data = &msm_i2c_pdata;
+}
+
+static struct msm_pm_platform_data msm_pm_data[MSM_PM_SLEEP_MODE_NR] = {
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].latency = 16000,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].latency = 12000,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency = 2000,
+};
+
static void __init halibut_init(void)
{
+ if (socinfo_init() < 0)
+ BUG();
+
+ if (machine_is_msm7201a_ffa()) {
+ smc91x_resources[0].start = 0x98000300;
+ smc91x_resources[0].end = 0x980003ff;
+ smc91x_resources[1].start = MSM_GPIO_TO_INT(85);
+ smc91x_resources[1].end = MSM_GPIO_TO_INT(85);
+ }
+
+ /* All 7x01 2.0 based boards are expected to have RAM chips capable
+ * of 160 MHz. */
+ if (cpu_is_msm7x01()
+ && SOCINFO_VERSION_MAJOR(socinfo_get_version()) == 2)
+ halibut_clock_data.max_axi_khz = 160000;
+
+ msm_acpu_clock_init(&halibut_clock_data);
+
+#if defined(CONFIG_MSM_SERIAL_DEBUGGER)
+ msm_serial_debug_init(MSM_UART3_PHYS, INT_UART3,
+ &msm_device_uart3.dev, 1);
+#endif
+ msm_hsusb_pdata.soc_version = socinfo_get_version();
+ msm_acpu_clock_init(&halibut_clock_data);
+ msm_device_hsusb_peripheral.dev.platform_data = &msm_hsusb_pdata,
+ msm_device_hsusb_host.dev.platform_data = &msm_hsusb_pdata,
platform_add_devices(devices, ARRAY_SIZE(devices));
+ halibut_init_mmc();
+ msm_pm_set_platform_data(msm_pm_data);
+}
+
+static void __init msm_halibut_allocate_memory_regions(void)
+{
+ void *addr;
+ unsigned long size;
+
+ size = MSM_PMEM_MDP_SIZE;
+ addr = alloc_bootmem(size);
+ android_pmem_pdata.start = __pa(addr);
+ android_pmem_pdata.size = size;
+ printk(KERN_INFO "allocating %lu bytes at %p (%lx physical)"
+ "for pmem\n", size, addr, __pa(addr));
+
+ size = MSM_PMEM_CAMERA_SIZE;
+ addr = alloc_bootmem(size);
+ android_pmem_camera_pdata.start = __pa(addr);
+ android_pmem_camera_pdata.size = size;
+ printk(KERN_INFO "allocating %lu bytes at %p (%lx physical)"
+ "for camera pmem\n", size, addr, __pa(addr));
+
+ size = MSM_PMEM_ADSP_SIZE;
+ addr = alloc_bootmem(size);
+ android_pmem_adsp_pdata.start = __pa(addr);
+ android_pmem_adsp_pdata.size = size;
+ printk(KERN_INFO "allocating %lu bytes at %p (%lx physical)"
+ "for adsp pmem\n", size, addr, __pa(addr));
+
+ size = MSM_PMEM_GPU1_SIZE;
+ addr = alloc_bootmem_aligned(size, 0x100000);
+ android_pmem_gpu1_pdata.start = __pa(addr);
+ android_pmem_gpu1_pdata.size = size;
+ printk(KERN_INFO "allocating %lu bytes at %p (%lx physical)"
+ "for gpu1 pmem\n", size, addr, __pa(addr));
+
+ size = MSM_FB_SIZE;
+ addr = alloc_bootmem(size);
+ msm_fb_resources[0].start = __pa(addr);
+ msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1;
+ printk(KERN_INFO "allocating %lu bytes at %p (%lx physical) for fb\n",
+ size, addr, __pa(addr));
+
}
static void __init halibut_map_io(void)
{
+ msm_shared_ram_phys = 0x01F00000;
+
msm_map_common_io();
- msm_clock_init();
+ msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
+ msm_halibut_allocate_memory_regions();
}
MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x10000100,
+ .map_io = halibut_map_io,
+ .init_irq = halibut_init_irq,
+ .init_machine = halibut_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7201A_FFA, "QCT FFA7201A Board")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x10000100,
+ .map_io = halibut_map_io,
+ .init_irq = halibut_init_irq,
+ .init_machine = halibut_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7201A_SURF, "QCT SURF7201A Board")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
.boot_params = 0x10000100,
.map_io = halibut_map_io,
.init_irq = halibut_init_irq,
diff --git a/arch/arm/mach-msm/board-msm7x27.c b/arch/arm/mach-msm/board-msm7x27.c
new file mode 100644
index 000000000000..32cdf4cabc72
--- /dev/null
+++ b/arch/arm/mach-msm/board-msm7x27.c
@@ -0,0 +1,1195 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/bootmem.h>
+#include <linux/usb/mass_storage_function.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+#include <asm/setup.h>
+#ifdef CONFIG_CACHE_L2X0
+#include <asm/hardware/cache-l2x0.h>
+#endif
+
+#include <asm/mach/mmc.h>
+#include <mach/vreg.h>
+#include <mach/mpp.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/msm_iomap.h>
+#include <mach/msm_rpcrouter.h>
+#include <mach/msm_hsusb.h>
+#include <mach/msm_serial_hs.h>
+#include <mach/memory.h>
+
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/i2c.h>
+#include <linux/android_pmem.h>
+#include <mach/camera.h>
+
+#include "devices.h"
+#include "socinfo.h"
+#include "clock.h"
+#include "msm-keypad-devices.h"
+#include "pm.h"
+
+#define MSM_PMEM_MDP_SIZE 0x800000
+#define MSM_PMEM_ADSP_SIZE 0x800000
+#define MSM_PMEM_GPU1_SIZE 0x800000
+#define MSM_FB_SIZE 0x200000
+
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .start = 0x9C004300,
+ .end = 0x9C0043ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = MSM_GPIO_TO_INT(132),
+ .end = MSM_GPIO_TO_INT(132),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct usb_mass_storage_platform_data usb_mass_storage_pdata = {
+ .nluns = 0x02,
+ .buf_size = 16384,
+ .vendor = "GOOGLE",
+ .product = "Mass storage",
+ .release = 0xffff,
+};
+
+static struct platform_device mass_storage_device = {
+ .name = "usb_mass_storage",
+ .id = -1,
+ .dev = {
+ .platform_data = &usb_mass_storage_pdata,
+ },
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+
+static struct usb_function_map usb_functions_map[] = {
+ {"diag", 0},
+ {"adb", 1},
+ {"modem", 2},
+ {"nmea", 3},
+ {"mass_storage", 4},
+ {"ethernet", 5},
+};
+
+/* dynamic composition */
+static struct usb_composition usb_func_composition[] = {
+ {
+ .product_id = 0x9012,
+ .functions = 0x5, /* 0101 */
+ },
+
+ {
+ .product_id = 0x9013,
+ .functions = 0x15, /* 10101 */
+ },
+
+ {
+ .product_id = 0x9014,
+ .functions = 0x30, /* 110000 */
+ },
+
+ {
+ .product_id = 0x9016,
+ .functions = 0xD, /* 01101 */
+ },
+
+ {
+ .product_id = 0x9017,
+ .functions = 0x1D, /* 11101 */
+ },
+
+ {
+ .product_id = 0xF000,
+ .functions = 0x10, /* 10000 */
+ },
+
+ {
+ .product_id = 0xF009,
+ .functions = 0x20, /* 100000 */
+ },
+
+ {
+ .product_id = 0x9018,
+ .functions = 0x1F, /* 011111 */
+ },
+
+};
+
+static struct msm_hsusb_platform_data msm_hsusb_pdata = {
+ .version = 0x0100,
+ .phy_info = (USB_PHY_INTEGRATED | USB_PHY_MODEL_65NM),
+ .vendor_id = 0x5c6,
+ .product_name = "Qualcomm HSUSB Device",
+ .serial_number = "1234567890ABCDEF",
+ .manufacturer_name = "Qualcomm Incorporated",
+ .compositions = usb_func_composition,
+ .num_compositions = ARRAY_SIZE(usb_func_composition),
+ .function_map = usb_functions_map,
+ .num_functions = ARRAY_SIZE(usb_functions_map),
+};
+
+#define SND(desc, num) { .name = #desc, .id = num }
+static struct snd_endpoint snd_endpoints_list[] = {
+ SND(HANDSET, 0),
+ SND(MONO_HEADSET, 2),
+ SND(HEADSET, 3),
+ SND(SPEAKER, 6),
+ SND(TTY_HEADSET, 8),
+ SND(TTY_VCO, 9),
+ SND(TTY_HCO, 10),
+ SND(BT, 12),
+ SND(IN_S_SADC_OUT_HANDSET, 16),
+ SND(IN_S_SADC_OUT_SPEAKER_PHONE, 25),
+ SND(CURRENT, 27),
+};
+#undef SND
+
+static struct msm_snd_endpoints msm_device_snd_endpoints = {
+ .endpoints = snd_endpoints_list,
+ .num = sizeof(snd_endpoints_list) / sizeof(struct snd_endpoint)
+};
+
+static struct platform_device msm_device_snd = {
+ .name = "msm_snd",
+ .id = -1,
+ .dev = {
+ .platform_data = &msm_device_snd_endpoints
+ },
+};
+
+#define DEC0_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \
+ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \
+ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \
+ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \
+ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \
+ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP))
+#define DEC1_FORMAT ((1<<MSM_ADSP_CODEC_WAV)|(1<<MSM_ADSP_CODEC_ADPCM)| \
+ (1<<MSM_ADSP_CODEC_YADPCM)|(1<<MSM_ADSP_CODEC_QCELP)| \
+ (1<<MSM_ADSP_CODEC_MP3))
+#define DEC2_FORMAT ((1<<MSM_ADSP_CODEC_WAV)|(1<<MSM_ADSP_CODEC_ADPCM)| \
+ (1<<MSM_ADSP_CODEC_YADPCM)|(1<<MSM_ADSP_CODEC_QCELP)| \
+ (1<<MSM_ADSP_CODEC_MP3))
+#define DEC3_FORMAT ((1<<MSM_ADSP_CODEC_WAV)|(1<<MSM_ADSP_CODEC_ADPCM)| \
+ (1<<MSM_ADSP_CODEC_YADPCM)|(1<<MSM_ADSP_CODEC_QCELP))
+#define DEC4_FORMAT (1<<MSM_ADSP_CODEC_MIDI)
+
+static unsigned int dec_concurrency_table[] = {
+ /* Audio LP */
+ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DMA)), 0,
+ 0, 0, 0,
+
+ /* Concurrency 1 */
+ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC4_FORMAT),
+
+ /* Concurrency 2 */
+ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC4_FORMAT),
+
+ /* Concurrency 3 */
+ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC4_FORMAT),
+
+ /* Concurrency 4 */
+ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC4_FORMAT),
+
+ /* Concurrency 5 */
+ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC4_FORMAT),
+
+ /* Concurrency 6 */
+ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)),
+ (DEC4_FORMAT),
+};
+
+#define DEC_INFO(name, queueid, decid, nr_codec) { .module_name = name, \
+ .module_queueid = queueid, .module_decid = decid, \
+ .nr_codec_support = nr_codec}
+
+static struct msm_adspdec_info dec_info_list[] = {
+ DEC_INFO("AUDPLAY0TASK", 13, 0, 11), /* AudPlay0BitStreamCtrlQueue */
+ DEC_INFO("AUDPLAY1TASK", 14, 1, 5), /* AudPlay1BitStreamCtrlQueue */
+ DEC_INFO("AUDPLAY2TASK", 15, 2, 5), /* AudPlay2BitStreamCtrlQueue */
+ DEC_INFO("AUDPLAY3TASK", 16, 3, 4), /* AudPlay3BitStreamCtrlQueue */
+ DEC_INFO("AUDPLAY4TASK", 17, 4, 1), /* AudPlay4BitStreamCtrlQueue */
+};
+
+static struct msm_adspdec_database msm_device_adspdec_database = {
+ .num_dec = ARRAY_SIZE(dec_info_list),
+ .num_concurrency_support = (ARRAY_SIZE(dec_concurrency_table) / \
+ ARRAY_SIZE(dec_info_list)),
+ .dec_concurrency_table = dec_concurrency_table,
+ .dec_info_list = dec_info_list,
+};
+
+static struct platform_device msm_device_adspdec = {
+ .name = "msm_adspdec",
+ .id = -1,
+ .dev = {
+ .platform_data = &msm_device_adspdec_database
+ },
+};
+
+static struct android_pmem_platform_data android_pmem_pdata = {
+ .name = "pmem",
+ .no_allocator = 0,
+ .cached = 1,
+};
+
+static struct android_pmem_platform_data android_pmem_adsp_pdata = {
+ .name = "pmem_adsp",
+ .no_allocator = 0,
+ .cached = 0,
+};
+
+static struct android_pmem_platform_data android_pmem_gpu1_pdata = {
+ .name = "pmem_gpu1",
+ .no_allocator = 1,
+ .cached = 0,
+};
+
+static struct platform_device android_pmem_device = {
+ .name = "android_pmem",
+ .id = 0,
+ .dev = { .platform_data = &android_pmem_pdata },
+};
+
+static struct platform_device android_pmem_adsp_device = {
+ .name = "android_pmem",
+ .id = 1,
+ .dev = { .platform_data = &android_pmem_adsp_pdata },
+};
+
+static struct platform_device android_pmem_gpu1_device = {
+ .name = "android_pmem",
+ .id = 3,
+ .dev = { .platform_data = &android_pmem_gpu1_pdata },
+};
+
+#define LCDC_CONFIG_PROC 21
+#define LCDC_UN_CONFIG_PROC 22
+#define LCDC_API_PROG 0x30000066
+#define LCDC_API_VERS 0x00010001
+
+#define GPIO_OUT_132 132
+#define GPIO_OUT_131 131
+#define GPIO_OUT_103 103
+#define GPIO_OUT_102 102
+#define GPIO_OUT_88 88
+
+static struct msm_rpc_endpoint *lcdc_ep;
+
+static int msm_fb_lcdc_config(int on)
+{
+ int rc = 0;
+ struct rpc_request_hdr hdr;
+
+ if (on)
+ printk(KERN_INFO "lcdc config\n");
+ else
+ printk(KERN_INFO "lcdc un-config\n");
+
+ lcdc_ep = msm_rpc_connect_compatible(LCDC_API_PROG, LCDC_API_VERS, 0);
+ if (IS_ERR(lcdc_ep)) {
+ printk(KERN_ERR "%s: msm_rpc_connect failed! rc = %ld\n",
+ __func__, PTR_ERR(lcdc_ep));
+ return -EINVAL;
+ }
+
+ rc = msm_rpc_call(lcdc_ep,
+ (on) ? LCDC_CONFIG_PROC : LCDC_UN_CONFIG_PROC,
+ &hdr, sizeof(hdr),
+ 5 * HZ);
+ if (rc)
+ printk(KERN_ERR
+ "%s: msm_rpc_call failed! rc = %d\n", __func__, rc);
+
+ msm_rpc_close(lcdc_ep);
+ return rc;
+}
+
+static int gpio_array_num[] = {
+ GPIO_OUT_132, /* spi_clk */
+ GPIO_OUT_131, /* spi_cs */
+ GPIO_OUT_103, /* spi_sdi */
+ GPIO_OUT_102, /* spi_sdoi */
+ GPIO_OUT_88
+ };
+
+static void lcdc_gordon_gpio_init(void)
+{
+ if (gpio_request(GPIO_OUT_132, "spi_clk"))
+ pr_err("failed to request gpio spi_clk\n");
+ if (gpio_request(GPIO_OUT_131, "spi_cs"))
+ pr_err("failed to request gpio spi_cs\n");
+ if (gpio_request(GPIO_OUT_103, "spi_sdi"))
+ pr_err("failed to request gpio spi_sdi\n");
+ if (gpio_request(GPIO_OUT_102, "spi_sdoi"))
+ pr_err("failed to request gpio spi_sdoi\n");
+ if (gpio_request(GPIO_OUT_88, "gpio_dac"))
+ pr_err("failed to request gpio_dac\n");
+}
+
+static uint32_t lcdc_gpio_table[] = {
+ GPIO_CFG(GPIO_OUT_132, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA),
+ GPIO_CFG(GPIO_OUT_131, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA),
+ GPIO_CFG(GPIO_OUT_103, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA),
+ GPIO_CFG(GPIO_OUT_102, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA),
+ GPIO_CFG(GPIO_OUT_88, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA),
+};
+
+static void config_lcdc_gpio_table(uint32_t *table, int len, unsigned enable)
+{
+ int n, rc;
+ for (n = 0; n < len; n++) {
+ rc = gpio_tlmm_config(table[n],
+ enable ? GPIO_ENABLE : GPIO_DISABLE);
+ if (rc) {
+ printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, table[n], rc);
+ break;
+ }
+ }
+}
+
+static void lcdc_gordon_config_gpios(int enable)
+{
+ config_lcdc_gpio_table(lcdc_gpio_table,
+ ARRAY_SIZE(lcdc_gpio_table), enable);
+}
+
+static struct lcdc_platform_data lcdc_pdata = {
+ .lcdc_gpio_config = msm_fb_lcdc_config
+};
+
+static struct msm_panel_common_pdata lcdc_gordon_panel_data = {
+ .panel_config_gpio = lcdc_gordon_config_gpios,
+ .gpio_num = gpio_array_num,
+};
+
+static struct platform_device lcdc_gordon_panel_device = {
+ .name = "lcdc_gordon_vga",
+ .id = 0,
+ .dev = {
+ .platform_data = &lcdc_gordon_panel_data,
+ }
+};
+
+static struct resource msm_fb_resources[] = {
+ {
+ .flags = IORESOURCE_DMA,
+ }
+};
+
+static int msm_fb_detect_panel(const char *name)
+{
+ int ret = -EPERM;
+
+ if (machine_is_msm7x27_ffa() || machine_is_msm7x27_ffa()) {
+ if (!strcmp(name, "lcdc_gordon_vga"))
+ ret = 0;
+ else
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static struct msm_fb_platform_data msm_fb_pdata = {
+ .detect_client = msm_fb_detect_panel,
+};
+
+static struct platform_device msm_fb_device = {
+ .name = "msm_fb",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(msm_fb_resources),
+ .resource = msm_fb_resources,
+ .dev = {
+ .platform_data = &msm_fb_pdata,
+ }
+};
+
+#ifdef CONFIG_BT
+static struct platform_device msm_bt_power_device = {
+ .name = "bt_power",
+};
+
+enum {
+ BT_WAKE,
+ BT_RFR,
+ BT_CTS,
+ BT_RX,
+ BT_TX,
+ BT_PCM_DOUT,
+ BT_PCM_DIN,
+ BT_PCM_SYNC,
+ BT_PCM_CLK,
+ BT_HOST_WAKE,
+};
+
+static unsigned bt_config_power_on[] = {
+ GPIO_CFG(42, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* WAKE */
+ GPIO_CFG(43, 2, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* RFR */
+ GPIO_CFG(44, 2, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* CTS */
+ GPIO_CFG(45, 2, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* Rx */
+ GPIO_CFG(46, 3, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* Tx */
+ GPIO_CFG(68, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* PCM_DOUT */
+ GPIO_CFG(69, 1, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* PCM_DIN */
+ GPIO_CFG(70, 2, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* PCM_SYNC */
+ GPIO_CFG(71, 2, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* PCM_CLK */
+ GPIO_CFG(83, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA), /* HOST_WAKE */
+};
+static unsigned bt_config_power_off[] = {
+ GPIO_CFG(42, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* WAKE */
+ GPIO_CFG(43, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* RFR */
+ GPIO_CFG(44, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* CTS */
+ GPIO_CFG(45, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* Rx */
+ GPIO_CFG(46, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* Tx */
+ GPIO_CFG(68, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* PCM_DOUT */
+ GPIO_CFG(69, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* PCM_DIN */
+ GPIO_CFG(70, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* PCM_SYNC */
+ GPIO_CFG(71, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* PCM_CLK */
+ GPIO_CFG(83, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HOST_WAKE */
+};
+
+static int bluetooth_power(int on)
+{
+ struct vreg *vreg_bt;
+ int pin, rc;
+
+ printk(KERN_DEBUG "%s\n", __func__);
+
+ /* do not have vreg bt defined, gp6 is the same */
+ /* vreg_get parameter 1 (struct device *) is ignored */
+ vreg_bt = vreg_get(NULL, "gp6");
+
+ if (IS_ERR(vreg_bt)) {
+ printk(KERN_ERR "%s: vreg get failed (%ld)\n",
+ __func__, PTR_ERR(vreg_bt));
+ return PTR_ERR(vreg_bt);
+ }
+
+ if (on) {
+ for (pin = 0; pin < ARRAY_SIZE(bt_config_power_on); pin++) {
+ rc = gpio_tlmm_config(bt_config_power_on[pin],
+ GPIO_ENABLE);
+ if (rc) {
+ printk(KERN_ERR
+ "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, bt_config_power_on[pin], rc);
+ return -EIO;
+ }
+ }
+
+ /* units of mV, steps of 50 mV */
+ rc = vreg_set_level(vreg_bt, 2600);
+ if (rc) {
+ printk(KERN_ERR "%s: vreg set level failed (%d)\n",
+ __func__, rc);
+ return -EIO;
+ }
+ rc = vreg_enable(vreg_bt);
+ if (rc) {
+ printk(KERN_ERR "%s: vreg enable failed (%d)\n",
+ __func__, rc);
+ return -EIO;
+ }
+ } else {
+ rc = vreg_disable(vreg_bt);
+ if (rc) {
+ printk(KERN_ERR "%s: vreg disable failed (%d)\n",
+ __func__, rc);
+ return -EIO;
+ }
+ for (pin = 0; pin < ARRAY_SIZE(bt_config_power_off); pin++) {
+ rc = gpio_tlmm_config(bt_config_power_off[pin],
+ GPIO_ENABLE);
+ if (rc) {
+ printk(KERN_ERR
+ "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, bt_config_power_off[pin], rc);
+ return -EIO;
+ }
+ }
+ }
+ return 0;
+}
+
+static void __init bt_power_init(void)
+{
+ msm_bt_power_device.dev.platform_data = &bluetooth_power;
+}
+#else
+#define bt_power_init(x) do {} while (0)
+#endif
+
+static struct resource bluesleep_resources[] = {
+ {
+ .name = "gpio_host_wake",
+ .start = 83,
+ .end = 83,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "gpio_ext_wake",
+ .start = 42,
+ .end = 42,
+ .flags = IORESOURCE_IO,
+ },
+ {
+ .name = "host_wake",
+ .start = MSM_GPIO_TO_INT(83),
+ .end = MSM_GPIO_TO_INT(83),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device msm_bluesleep_device = {
+ .name = "bluesleep",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(bluesleep_resources),
+ .resource = bluesleep_resources,
+};
+
+static struct i2c_board_info i2c_devices[] = {
+ {
+ I2C_BOARD_INFO("mt9d112", 0x78 >> 1),
+ },
+ {
+ I2C_BOARD_INFO("s5k3e2fx", 0x20 >> 1),
+ },
+ {
+ I2C_BOARD_INFO("mt9p012", 0x6C >> 1),
+ },
+ {
+ I2C_BOARD_INFO("mt9t013", 0x6C),
+ },
+};
+
+static uint32_t camera_off_gpio_table[] = {
+ /* parallel CAMERA interfaces */
+ GPIO_CFG(0, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT0 */
+ GPIO_CFG(1, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT1 */
+ GPIO_CFG(2, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT2 */
+ GPIO_CFG(3, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT3 */
+ GPIO_CFG(4, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT4 */
+ GPIO_CFG(5, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT5 */
+ GPIO_CFG(6, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT6 */
+ GPIO_CFG(7, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT7 */
+ GPIO_CFG(8, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT8 */
+ GPIO_CFG(9, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT9 */
+ GPIO_CFG(10, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT10 */
+ GPIO_CFG(11, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT11 */
+ GPIO_CFG(12, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* PCLK */
+ GPIO_CFG(13, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HSYNC_IN */
+ GPIO_CFG(14, 0, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* VSYNC_IN */
+ GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA), /* MCLK */
+};
+
+static uint32_t camera_on_gpio_table[] = {
+ /* parallel CAMERA interfaces */
+ GPIO_CFG(0, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT0 */
+ GPIO_CFG(1, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT1 */
+ GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT2 */
+ GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT3 */
+ GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT4 */
+ GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT5 */
+ GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT6 */
+ GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT7 */
+ GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT8 */
+ GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT9 */
+ GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT10 */
+ GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT11 */
+ GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_16MA), /* PCLK */
+ GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HSYNC_IN */
+ GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* VSYNC_IN */
+ GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_16MA), /* MCLK */
+};
+
+static void config_gpio_table(uint32_t *table, int len)
+{
+ int n, rc;
+ for (n = 0; n < len; n++) {
+ rc = gpio_tlmm_config(table[n], GPIO_ENABLE);
+ if (rc) {
+ printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, table[n], rc);
+ break;
+ }
+ }
+}
+
+static void config_camera_on_gpios(void)
+{
+ config_gpio_table(camera_on_gpio_table,
+ ARRAY_SIZE(camera_on_gpio_table));
+}
+
+static void config_camera_off_gpios(void)
+{
+ config_gpio_table(camera_off_gpio_table,
+ ARRAY_SIZE(camera_off_gpio_table));
+}
+
+#define MSM_PROBE_INIT(name) name##_probe_init
+static struct msm_camera_sensor_info msm_camera_sensor[] = {
+ {
+ .sensor_reset = 89,
+ .sensor_pwd = 85,
+ .vcm_pwd = 0,
+ .sensor_name = "mt9d112",
+ .flash_type = MSM_CAMERA_FLASH_NONE,
+#ifdef CONFIG_MSM_CAMERA
+ .sensor_probe = MSM_PROBE_INIT(mt9d112),
+#endif
+ },
+ {
+ .sensor_reset = 89,
+ .sensor_pwd = 85,
+ .vcm_pwd = 0,
+ .sensor_name = "s5k3e2fx",
+ .flash_type = MSM_CAMERA_FLASH_NONE,
+#ifdef CONFIG_MSM_CAMERA
+ .sensor_probe = MSM_PROBE_INIT(s5k3e2fx),
+#endif
+ },
+ {
+ .sensor_reset = 89,
+ .sensor_pwd = 85,
+ .vcm_pwd = 88,
+ .sensor_name = "mt9p012",
+ .flash_type = MSM_CAMERA_FLASH_LED,
+#ifdef CONFIG_MSM_CAMERA
+ .sensor_probe = MSM_PROBE_INIT(mt9p012),
+#endif
+ },
+ {
+ .sensor_reset = 89,
+ .sensor_pwd = 85,
+ .vcm_pwd = 0,
+ .sensor_name = "mt9t013",
+ .flash_type = MSM_CAMERA_FLASH_NONE,
+#ifdef CONFIG_MSM_CAMERA
+ .sensor_probe = MSM_PROBE_INIT(mt9t013),
+#endif
+ },
+};
+#undef MSM_PROBE_INIT
+
+static struct msm_camera_device_platform_data msm_camera_device_data = {
+ .camera_gpio_on = config_camera_on_gpios,
+ .camera_gpio_off = config_camera_off_gpios,
+ .snum = ARRAY_SIZE(msm_camera_sensor),
+ .sinfo = &msm_camera_sensor[0],
+ .ioext.mdcphy = MSM_MDC_PHYS,
+ .ioext.mdcsz = MSM_MDC_SIZE,
+ .ioext.appphy = MSM_CLK_CTL_PHYS,
+ .ioext.appsz = MSM_CLK_CTL_SIZE,
+};
+
+static void __init msm_camera_add_device(void)
+{
+ msm_camera_register_device(NULL, 0, &msm_camera_device_data);
+ config_camera_off_gpios();
+}
+
+static struct platform_device *devices[] __initdata = {
+#if !defined(CONFIG_MSM_SERIAL_DEBUGGER)
+ &msm_device_uart3,
+#endif
+ &msm_device_smd,
+ &msm_device_dmov,
+ &msm_device_nand,
+ &msm_device_hsusb_otg,
+ &msm_device_hsusb_host,
+ &msm_device_hsusb_peripheral,
+ &mass_storage_device,
+ &msm_device_i2c,
+ &smc91x_device,
+ &msm_device_tssc,
+ &android_pmem_device,
+ &android_pmem_adsp_device,
+ &android_pmem_gpu1_device,
+ &msm_fb_device,
+ &lcdc_gordon_panel_device,
+ &msm_device_uart_dm1,
+#ifdef CONFIG_BT
+ &msm_bt_power_device,
+#endif
+ &msm_device_snd,
+ &msm_device_adspdec,
+ &msm_bluesleep_device,
+};
+
+static struct msm_panel_common_pdata mdp_pdata = {
+ .gpio = 97,
+};
+
+static void __init msm_fb_add_devices(void)
+{
+ msm_fb_register_device("mdp", &mdp_pdata);
+ msm_fb_register_device("pmdh", 0);
+ msm_fb_register_device("lcdc", &lcdc_pdata);
+}
+
+extern struct sys_timer msm_timer;
+
+static void __init msm7x27_init_irq(void)
+{
+ msm_init_irq();
+}
+
+static struct msm_acpu_clock_platform_data msm7x27_clock_data = {
+ .acpu_switch_time_us = 50,
+ .max_speed_delta_khz = 256000,
+ .vdd_switch_time_us = 62,
+ .power_collapse_khz = 19200000,
+ .wait_for_irq_khz = 128000000,
+ .max_axi_khz = 128000,
+};
+
+void msm_serial_debug_init(unsigned int base, int irq,
+ struct device *clk_device, int signal_irq);
+
+static void sdcc_gpio_init(void)
+{
+ /* SDC1 GPIOs */
+#ifdef CONFIG_MMC_MSM_SDC1_SUPPORT
+ if (gpio_request(51, "sdc1_data_3"))
+ pr_err("failed to request gpio sdc1_data_3\n");
+ if (gpio_request(52, "sdc1_data_2"))
+ pr_err("failed to request gpio sdc1_data_2\n");
+ if (gpio_request(53, "sdc1_data_1"))
+ pr_err("failed to request gpio sdc1_data_1\n");
+ if (gpio_request(54, "sdc1_data_0"))
+ pr_err("failed to request gpio sdc1_data_0\n");
+ if (gpio_request(55, "sdc1_cmd"))
+ pr_err("failed to request gpio sdc1_cmd\n");
+ if (gpio_request(56, "sdc1_clk"))
+ pr_err("failed to request gpio sdc1_clk\n");
+#endif
+
+ if (machine_is_msm7x27_ffa() || machine_is_msm7x27_ffa())
+ return;
+
+ /* SDC2 GPIOs */
+#ifdef CONFIG_MMC_MSM_SDC2_SUPPORT
+ if (gpio_request(62, "sdc2_clk"))
+ pr_err("failed to request gpio sdc2_clk\n");
+ if (gpio_request(63, "sdc2_cmd"))
+ pr_err("failed to request gpio sdc2_cmd\n");
+ if (gpio_request(64, "sdc2_data_3"))
+ pr_err("failed to request gpio sdc2_data_3\n");
+ if (gpio_request(65, "sdc2_data_2"))
+ pr_err("failed to request gpio sdc2_data_2\n");
+ if (gpio_request(66, "sdc2_data_1"))
+ pr_err("failed to request gpio sdc2_data_1\n");
+ if (gpio_request(67, "sdc2_data_0"))
+ pr_err("failed to request gpio sdc2_data_0\n");
+#endif
+
+ /* SDC3 GPIOs */
+#ifdef CONFIG_MMC_MSM_SDC3_SUPPORT
+ if (gpio_request(88, "sdc3_clk"))
+ pr_err("failed to request gpio sdc3_clk\n");
+ if (gpio_request(89, "sdc3_cmd"))
+ pr_err("failed to request gpio sdc3_cmd\n");
+ if (gpio_request(90, "sdc3_data_3"))
+ pr_err("failed to request gpio sdc3_data_3\n");
+ if (gpio_request(91, "sdc3_data_2"))
+ pr_err("failed to request gpio sdc3_data_2\n");
+ if (gpio_request(92, "sdc3_data_1"))
+ pr_err("failed to request gpio sdc3_data_1\n");
+ if (gpio_request(93, "sdc3_data_0"))
+ pr_err("failed to request gpio sdc3_data_0\n");
+#endif
+
+ /* SDC4 GPIOs */
+#ifdef CONFIG_MMC_MSM_SDC4_SUPPORT
+ if (gpio_request(19, "sdc4_data_3"))
+ pr_err("failed to request gpio sdc4_data_3\n");
+ if (gpio_request(20, "sdc4_data_2"))
+ pr_err("failed to request gpio sdc4_data_2\n");
+ if (gpio_request(21, "sdc4_data_1"))
+ pr_err("failed to request gpio sdc4_data_1\n");
+ if (gpio_request(107, "sdc4_cmd"))
+ pr_err("failed to request gpio sdc4_cmd\n");
+ if (gpio_request(108, "sdc4_data_0"))
+ pr_err("failed to request gpio sdc4_data_0\n");
+ if (gpio_request(109, "sdc4_clk"))
+ pr_err("failed to request gpio sdc4_clk\n");
+#endif
+}
+
+static unsigned sdcc_cfg_data[][6] = {
+ /* SDC1 configs */
+ {
+ GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ },
+ /* SDC2 configs */
+ {
+ GPIO_CFG(62, 2, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ GPIO_CFG(63, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(64, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(65, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(66, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(67, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ },
+ /* SDC3 configs */
+ {
+ GPIO_CFG(88, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ GPIO_CFG(89, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(90, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(91, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(92, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(93, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ },
+ /* SDC4 configs */
+ {
+ GPIO_CFG(19, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(20, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(21, 4, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(107, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(108, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(109, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ }
+};
+
+static unsigned long vreg_sts, gpio_sts;
+static struct mpp *mpp_mmc;
+static struct vreg *vreg_mmc;
+
+static void msm_sdcc_setup_gpio(int dev_id, unsigned int enable)
+{
+ int i, rc;
+
+ if (!(test_bit(dev_id, &gpio_sts)^enable))
+ return;
+
+ if (enable)
+ set_bit(dev_id, &gpio_sts);
+ else
+ clear_bit(dev_id, &gpio_sts);
+
+ for (i = 0; i < ARRAY_SIZE(sdcc_cfg_data[dev_id - 1]); i++) {
+ rc = gpio_tlmm_config(sdcc_cfg_data[dev_id - 1][i],
+ enable ? GPIO_ENABLE : GPIO_DISABLE);
+ if (rc)
+ printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, sdcc_cfg_data[dev_id - 1][i], rc);
+ }
+}
+
+static uint32_t msm_sdcc_setup_power(struct device *dv, unsigned int vdd)
+{
+ int rc = 0;
+ struct platform_device *pdev;
+
+ pdev = container_of(dv, struct platform_device, dev);
+ msm_sdcc_setup_gpio(pdev->id, !!vdd);
+
+ if (vdd == 0) {
+ if (!vreg_sts)
+ return 0;
+
+ clear_bit(pdev->id, &vreg_sts);
+
+ if (!vreg_sts) {
+ if (machine_is_msm7x27_ffa()) {
+ rc = mpp_config_digital_out(mpp_mmc,
+ MPP_CFG(MPP_DLOGIC_LVL_MSMP,
+ MPP_DLOGIC_OUT_CTRL_LOW));
+ } else
+ rc = vreg_disable(vreg_mmc);
+ if (rc)
+ printk(KERN_ERR "%s: return val: %d \n",
+ __func__, rc);
+ }
+ return 0;
+ }
+
+ if (!vreg_sts) {
+ if (machine_is_msm7x27_ffa()) {
+ rc = mpp_config_digital_out(mpp_mmc,
+ MPP_CFG(MPP_DLOGIC_LVL_MSMP,
+ MPP_DLOGIC_OUT_CTRL_HIGH));
+ } else {
+ rc = vreg_set_level(vreg_mmc, 2850);
+ if (!rc)
+ rc = vreg_enable(vreg_mmc);
+ }
+ if (rc)
+ printk(KERN_ERR "%s: return val: %d \n",
+ __func__, rc);
+ }
+ set_bit(pdev->id, &vreg_sts);
+ return 0;
+}
+
+static struct mmc_platform_data msm7x27_sdcc_data = {
+ .ocr_mask = MMC_VDD_28_29,
+ .translate_vdd = msm_sdcc_setup_power,
+};
+
+static void __init msm7x27_init_mmc(void)
+{
+ if (machine_is_msm7x27_ffa()) {
+ mpp_mmc = mpp_get(NULL, "mpp3");
+ if (!mpp_mmc) {
+ printk(KERN_ERR "%s: mpp get failed (%ld)\n",
+ __func__, PTR_ERR(vreg_mmc));
+ return;
+ }
+ } else {
+ vreg_mmc = vreg_get(NULL, "mmc");
+ if (IS_ERR(vreg_mmc)) {
+ printk(KERN_ERR "%s: vreg get failed (%ld)\n",
+ __func__, PTR_ERR(vreg_mmc));
+ return;
+ }
+ }
+
+ sdcc_gpio_init();
+#ifdef CONFIG_MMC_MSM_SDC1_SUPPORT
+ msm_add_sdcc(1, &msm7x27_sdcc_data);
+#endif
+
+ if (machine_is_msm7x27_surf()) {
+#ifdef CONFIG_MMC_MSM_SDC2_SUPPORT
+ msm_add_sdcc(2, &msm7x27_sdcc_data);
+#endif
+#ifdef CONFIG_MMC_MSM_SDC3_SUPPORT
+ msm_add_sdcc(3, &msm7x27_sdcc_data);
+#endif
+#ifdef CONFIG_MMC_MSM_SDC4_SUPPORT
+ msm_add_sdcc(4, &msm7x27_sdcc_data);
+#endif
+ }
+}
+
+static struct msm_i2c_platform_data msm_i2c_pdata = {
+ .clk_freq = 100000,
+};
+
+static void __init msm_device_i2c_init(void)
+{
+ msm_device_i2c.dev.platform_data = &msm_i2c_pdata;
+}
+
+static struct msm_pm_platform_data msm7x27_pm_data[MSM_PM_SLEEP_MODE_NR] = {
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].supported = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].suspend_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].idle_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].latency = 16000,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].residency = 20000,
+
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].supported = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].suspend_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].idle_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].latency = 12000,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].residency = 20000,
+
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].supported = 1,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].suspend_enabled
+ = 1,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].idle_enabled = 1,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency = 2000,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].residency = 0,
+};
+
+static void __init msm7x27_init(void)
+{
+ if (socinfo_init() < 0)
+ BUG();
+
+#if defined(CONFIG_MSM_SERIAL_DEBUGGER)
+ msm_serial_debug_init(MSM_UART3_PHYS, INT_UART3,
+ &msm_device_uart3.dev, 1);
+#endif
+ if (machine_is_msm7x27_ffa()) {
+ smc91x_resources[0].start = 0x98000300;
+ smc91x_resources[0].end = 0x980003ff;
+ smc91x_resources[1].start = MSM_GPIO_TO_INT(85);
+ smc91x_resources[1].end = MSM_GPIO_TO_INT(85);
+ if (gpio_tlmm_config(GPIO_CFG(85, 0,
+ GPIO_INPUT,
+ GPIO_PULL_DOWN,
+ GPIO_2MA),
+ GPIO_ENABLE)) {
+ printk(KERN_ERR
+ "%s: Err: Config GPIO-85 INT\n",
+ __func__);
+ }
+
+ msm7x27_clock_data.max_axi_khz = 160000;
+ }
+
+ if (cpu_is_msm7x27())
+ msm7x27_clock_data.max_axi_khz = 200000;
+
+ msm_acpu_clock_init(&msm7x27_clock_data);
+ msm_device_hsusb_peripheral.dev.platform_data = &msm_hsusb_pdata;
+ msm_device_hsusb_host.dev.platform_data = &msm_hsusb_pdata;
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+ msm_camera_add_device();
+ msm_device_i2c_init();
+ i2c_register_board_info(0, i2c_devices, ARRAY_SIZE(i2c_devices));
+
+#ifdef CONFIG_SURF_FFA_GPIO_KEYPAD
+ if (machine_is_msm7x27_ffa())
+ platform_device_register(&keypad_device_7k_ffa);
+ else
+ platform_device_register(&keypad_device_surf);
+#endif
+ lcdc_gordon_gpio_init();
+ msm_fb_add_devices();
+ msm7x27_init_mmc();
+ bt_power_init();
+
+ if (cpu_is_msm7x27())
+ msm_pm_set_platform_data(msm7x27_pm_data);
+ else
+ msm_pm_set_platform_data(msm7x27_pm_data);
+}
+
+static void __init msm_msm7x27_allocate_memory_regions(void)
+{
+ void *addr;
+ unsigned long size;
+
+ size = MSM_PMEM_MDP_SIZE;
+ addr = alloc_bootmem(size);
+ android_pmem_pdata.start = __pa(addr);
+ android_pmem_pdata.size = size;
+ printk(KERN_INFO "allocating %lu bytes at %p (%lx physical)"
+ "for pmem\n", size, addr, __pa(addr));
+
+ size = MSM_PMEM_ADSP_SIZE;
+ addr = alloc_bootmem(size);
+ android_pmem_adsp_pdata.start = __pa(addr);
+ android_pmem_adsp_pdata.size = size;
+ printk(KERN_INFO "allocating %lu bytes at %p (%lx physical)"
+ "for adsp pmem\n", size, addr, __pa(addr));
+
+ size = MSM_PMEM_GPU1_SIZE;
+ addr = alloc_bootmem_aligned(size, 0x100000);
+ android_pmem_gpu1_pdata.start = __pa(addr);
+ android_pmem_gpu1_pdata.size = size;
+ printk(KERN_INFO "allocating %lu bytes at %p (%lx physical)"
+ "for gpu1 pmem\n", size, addr, __pa(addr));
+
+ size = MSM_FB_SIZE;
+ addr = alloc_bootmem(size);
+ msm_fb_resources[0].start = __pa(addr);
+ msm_fb_resources[0].end = msm_fb_resources[0].start + size - 1;
+ printk(KERN_INFO "allocating %lu bytes at %p (%lx physical) for fb\n",
+ size, addr, __pa(addr));
+}
+
+static void __init msm7x27_map_io(void)
+{
+ msm_map_common_io();
+ /* Technically dependent on the SoC but using machine_is
+ * macros since socinfo is not available this early and there
+ * are plans to restructure the code which will eliminate the
+ * need for socinfo.
+ */
+ if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa())
+ msm_clock_init(msm_clocks_7x27, msm_num_clocks_7x27);
+ else
+ msm_clock_init(msm_clocks_7x27, msm_num_clocks_7x27);
+ msm_msm7x27_allocate_memory_regions();
+
+#ifdef CONFIG_CACHE_L2X0
+ if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa()) {
+ /* 7x27 has 256KB L2 cache:
+ 64Kb/Way and 4-Way Associativity;
+ R/W latency: 3 cycles;
+ evmon/parity/share disabled. */
+ l2x0_init(MSM_L2CC_BASE, 0x00068012, 0xfe000000);
+ }
+#endif
+}
+
+MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x00200100,
+ .map_io = msm7x27_map_io,
+ .init_irq = msm7x27_init_irq,
+ .init_machine = msm7x27_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X27_FFA, "QCT MSM7x27 FFA")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x00200100,
+ .map_io = msm7x27_map_io,
+ .init_irq = msm7x27_init_irq,
+ .init_machine = msm7x27_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
new file mode 100644
index 000000000000..3dc0fc54322a
--- /dev/null
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -0,0 +1,209 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/memory.h>
+#include <mach/msm_iomap.h>
+
+#include "devices.h"
+#include "timer.h"
+#include "socinfo.h"
+#include "pm.h"
+
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .start = 0x8A000300,
+ .end = 0x8A0003ff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = MSM_GPIO_TO_INT(156),
+ .end = MSM_GPIO_TO_INT(156),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+
+static struct platform_device *devices[] __initdata = {
+ &msm_device_smd,
+ &msm_device_dmov,
+ &smc91x_device,
+ &msm_device_nand,
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+ &msm_device_uart2,
+#endif
+};
+
+static void __init msm7x30_init_irq(void)
+{
+ msm_init_irq();
+}
+
+static struct msm_pm_platform_data msm_pm_data[MSM_PM_SLEEP_MODE_NR] = {
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].supported = 0,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].suspend_enabled = 0,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].idle_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].latency = 16000,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].residency = 20000,
+
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].supported = 0,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].suspend_enabled = 0,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].idle_enabled = 0,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].latency = 12000,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].residency = 20000,
+
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].supported = 0,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].suspend_enabled
+ = 0,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].idle_enabled = 0,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency = 2000,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].residency = 10000,
+
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].supported = 0,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].suspend_enabled = 0,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].idle_enabled = 0,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].latency = 500,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].residency = 0,
+};
+
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+static struct msm_gpio uart2_config_data[] = {
+ { GPIO_CFG(49, 2, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "UART2_RFR"},
+ { GPIO_CFG(50, 2, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), "UART2_CTS"},
+ { GPIO_CFG(51, 2, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), "UART2_Rx"},
+ { GPIO_CFG(52, 2, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_2MA), "UART2_Tx"},
+};
+
+static void msm7x30_init_uart2(void)
+{
+ msm_gpios_request_enable(uart2_config_data,
+ ARRAY_SIZE(uart2_config_data));
+
+}
+#endif
+
+static void __init msm7x30_init(void)
+{
+ if (socinfo_init() < 0)
+ printk(KERN_ERR "%s: socinfo_init() failed!\n",
+ __func__);
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+ msm_pm_set_platform_data(msm_pm_data);
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+ msm7x30_init_uart2();
+#endif
+}
+
+static void __init msm7x30_map_io(void)
+{
+ msm_shared_ram_phys = 0x00100000;
+ msm_map_msm7x30_io();
+ msm_clock_init(msm_clocks_7x30, msm_num_clocks_7x30);
+}
+
+MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x00200100,
+ .map_io = msm7x30_map_io,
+ .init_irq = msm7x30_init_irq,
+ .init_machine = msm7x30_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x00200100,
+ .map_io = msm7x30_map_io,
+ .init_irq = msm7x30_init_irq,
+ .init_machine = msm7x30_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x00200100,
+ .map_io = msm7x30_map_io,
+ .init_irq = msm7x30_init_irq,
+ .init_machine = msm7x30_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
new file mode 100644
index 000000000000..75e59be7fd4b
--- /dev/null
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -0,0 +1,538 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/bootmem.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <asm/mach/mmc.h>
+#include <mach/vreg.h>
+#include <mach/mpp.h>
+#include <mach/gpio.h>
+#include <mach/board.h>
+#include <mach/sirc.h>
+#include <mach/msm_touchpad.h>
+#include <mach/msm_i2ckbd.h>
+#include <mach/pmic.h>
+#include <mach/memory.h>
+
+#include "devices.h"
+#include "timer.h"
+#include "socinfo.h"
+#include "msm-keypad-devices.h"
+#include "pm.h"
+
+#define TOUCHPAD_SUSPEND 34
+#define TOUCHPAD_IRQ 38
+
+#define MSM_PMEM_MDP_SIZE 0x800000
+#define MSM_PMEM_CAMERA_SIZE 0xa00000
+#define MSM_PMEM_ADSP_SIZE 0x1100000
+#define MSM_PMEM_GPU1_SIZE 0x800000
+#define MSM_FB_SIZE 0x500000
+#define MSM_AUDIO_SIZE 0x200000
+#define MSM_GPU_PHYS_SIZE SZ_2M
+
+#define MSM_SMI_BASE 0x2b00000
+#define MSM_SMI_SIZE 0x1500000
+
+#define MSM_FB_BASE MSM_SMI_BASE
+#define MSM_GPU_PHYS_BASE (MSM_FB_BASE + MSM_FB_SIZE)
+#define MSM_PMEM_GPU0_BASE (MSM_GPU_PHYS_BASE + MSM_GPU_PHYS_SIZE)
+#define MSM_PMEM_GPU0_SIZE (MSM_SMI_SIZE - MSM_FB_SIZE - MSM_GPU_PHYS_SIZE)
+
+static struct resource smc91x_resources[] = {
+ [0] = {
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smc91x",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+};
+
+
+static struct platform_device *devices[] __initdata = {
+ &smc91x_device,
+ &msm_device_smd,
+ &msm_device_dmov,
+ &msm_device_nand,
+#if !defined(CONFIG_MSM_SERIAL_DEBUGGER)
+ &msm_device_uart3,
+#endif
+};
+
+#define KBD_RST 35
+#define KBD_IRQ 36
+
+static void kbd_gpio_release(void)
+{
+ gpio_free(KBD_IRQ);
+ gpio_free(KBD_RST);
+}
+
+static int kbd_gpio_setup(void)
+{
+ int rc;
+ int respin = KBD_RST;
+ int irqpin = KBD_IRQ;
+ unsigned rescfg =
+ GPIO_CFG(respin, 0, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA);
+ unsigned irqcfg =
+ GPIO_CFG(irqpin, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA);
+
+ rc = gpio_request(irqpin, "gpio_keybd_irq");
+ if (rc) {
+ pr_err("gpio_request failed on pin %d (rc=%d)\n",
+ irqpin, rc);
+ goto err_gpioconfig;
+ }
+ rc = gpio_request(respin, "gpio_keybd_reset");
+ if (rc) {
+ pr_err("gpio_request failed on pin %d (rc=%d)\n",
+ respin, rc);
+ goto err_gpioconfig;
+ }
+ rc = gpio_tlmm_config(rescfg, GPIO_ENABLE);
+ if (rc) {
+ pr_err("gpio_tlmm_config failed on pin %d (rc=%d)\n",
+ respin, rc);
+ goto err_gpioconfig;
+ }
+ rc = gpio_tlmm_config(irqcfg, GPIO_ENABLE);
+ if (rc) {
+ pr_err("gpio_tlmm_config failed on pin %d (rc=%d)\n",
+ irqpin, rc);
+ goto err_gpioconfig;
+ }
+ return rc;
+
+err_gpioconfig:
+ kbd_gpio_release();
+ return rc;
+}
+
+static struct msm_i2ckbd_platform_data msm_kybd_data = {
+ .hwrepeat = 0,
+ .scanset1 = 1,
+ .gpioreset = KBD_RST,
+ .gpioirq = KBD_IRQ,
+ .gpio_setup = kbd_gpio_setup,
+ .gpio_shutdown = kbd_gpio_release,
+};
+static void config_gpio_table(uint32_t *table, int len)
+{
+ int n, rc;
+ for (n = 0; n < len; n++) {
+ rc = gpio_tlmm_config(table[n], GPIO_ENABLE);
+ if (rc) {
+ printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, table[n], rc);
+ break;
+ }
+ }
+}
+
+static void __init qsd8x50_init_irq(void)
+{
+ msm_init_irq();
+ msm_init_sirc();
+}
+
+static void sdcc_gpio_init(void)
+{
+ /* SDC1 GPIOs */
+#ifdef CONFIG_MMC_MSM_SDC1_SUPPORT
+ if (gpio_request(51, "sdc1_data_3"))
+ pr_err("failed to request gpio sdc1_data_3\n");
+ if (gpio_request(52, "sdc1_data_2"))
+ pr_err("failed to request gpio sdc1_data_2\n");
+ if (gpio_request(53, "sdc1_data_1"))
+ pr_err("failed to request gpio sdc1_data_1\n");
+ if (gpio_request(54, "sdc1_data_0"))
+ pr_err("failed to request gpio sdc1_data_0\n");
+ if (gpio_request(55, "sdc1_cmd"))
+ pr_err("failed to request gpio sdc1_cmd\n");
+ if (gpio_request(56, "sdc1_clk"))
+ pr_err("failed to request gpio sdc1_clk\n");
+#endif
+
+ if (machine_is_qsd8x50_ffa())
+ return;
+
+ /* SDC2 GPIOs */
+#ifdef CONFIG_MMC_MSM_SDC2_SUPPORT
+ if (gpio_request(62, "sdc2_clk"))
+ pr_err("failed to request gpio sdc2_clk\n");
+ if (gpio_request(63, "sdc2_cmd"))
+ pr_err("failed to request gpio sdc2_cmd\n");
+ if (gpio_request(64, "sdc2_data_3"))
+ pr_err("failed to request gpio sdc2_data_3\n");
+ if (gpio_request(65, "sdc2_data_2"))
+ pr_err("failed to request gpio sdc2_data_2\n");
+ if (gpio_request(66, "sdc2_data_1"))
+ pr_err("failed to request gpio sdc2_data_1\n");
+ if (gpio_request(67, "sdc2_data_0"))
+ pr_err("failed to request gpio sdc2_data_0\n");
+#endif
+
+ /* SDC3 GPIOs */
+#ifdef CONFIG_MMC_MSM_SDC3_SUPPORT
+ if (gpio_request(88, "sdc3_clk"))
+ pr_err("failed to request gpio sdc3_clk\n");
+ if (gpio_request(89, "sdc3_cmd"))
+ pr_err("failed to request gpio sdc3_cmd\n");
+ if (gpio_request(90, "sdc3_data_3"))
+ pr_err("failed to request gpio sdc3_data_3\n");
+ if (gpio_request(91, "sdc3_data_2"))
+ pr_err("failed to request gpio sdc3_data_2\n");
+ if (gpio_request(92, "sdc3_data_1"))
+ pr_err("failed to request gpio sdc3_data_1\n");
+ if (gpio_request(93, "sdc3_data_0"))
+ pr_err("failed to request gpio sdc3_data_0\n");
+#endif
+
+ /* SDC4 GPIOs */
+#ifdef CONFIG_MMC_MSM_SDC4_SUPPORT
+ if (gpio_request(142, "sdc4_clk"))
+ pr_err("failed to request gpio sdc4_clk\n");
+ if (gpio_request(143, "sdc4_cmd"))
+ pr_err("failed to request gpio sdc4_cmd\n");
+ if (gpio_request(144, "sdc4_data_0"))
+ pr_err("failed to request gpio sdc4_data_0\n");
+ if (gpio_request(145, "sdc4_data_1"))
+ pr_err("failed to request gpio sdc4_data_1\n");
+ if (gpio_request(146, "sdc4_data_2"))
+ pr_err("failed to request gpio sdc4_data_2\n");
+ if (gpio_request(147, "sdc4_data_3"))
+ pr_err("failed to request gpio sdc4_data_3\n");
+#endif
+}
+
+static unsigned sdcc_cfg_data[][6] = {
+ /* SDC1 configs */
+ {
+ GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ },
+ /* SDC2 configs */
+ {
+ GPIO_CFG(62, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ GPIO_CFG(63, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(64, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(65, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(66, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(67, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ },
+ /* SDC3 configs */
+ {
+ GPIO_CFG(88, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ GPIO_CFG(89, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(90, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(91, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(92, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(93, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ },
+ /* SDC4 configs */
+ {
+ GPIO_CFG(142, 3, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA),
+ GPIO_CFG(143, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(144, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(145, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(146, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ GPIO_CFG(147, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ }
+};
+
+static unsigned long vreg_sts, gpio_sts;
+static struct vreg *vreg_mmc;
+
+static void msm_sdcc_setup_gpio(int dev_id, unsigned int enable)
+{
+ int i, rc;
+
+ if (!(test_bit(dev_id, &gpio_sts)^enable))
+ return;
+
+ if (enable)
+ set_bit(dev_id, &gpio_sts);
+ else
+ clear_bit(dev_id, &gpio_sts);
+
+ for (i = 0; i < ARRAY_SIZE(sdcc_cfg_data[dev_id - 1]); i++) {
+ rc = gpio_tlmm_config(sdcc_cfg_data[dev_id - 1][i],
+ enable ? GPIO_ENABLE : GPIO_DISABLE);
+ if (rc)
+ printk(KERN_ERR "%s: gpio_tlmm_config(%#x)=%d\n",
+ __func__, sdcc_cfg_data[dev_id - 1][i], rc);
+ }
+}
+
+static uint32_t msm_sdcc_setup_power(struct device *dv, unsigned int vdd)
+{
+ int rc = 0;
+ struct platform_device *pdev;
+
+ pdev = container_of(dv, struct platform_device, dev);
+ msm_sdcc_setup_gpio(pdev->id, !!vdd);
+
+ if (vdd == 0) {
+ if (!vreg_sts)
+ return 0;
+
+ clear_bit(pdev->id, &vreg_sts);
+
+ if (!vreg_sts && !machine_is_qsd8x50_ffa()) {
+ rc = vreg_disable(vreg_mmc);
+ if (rc)
+ printk(KERN_ERR "%s: return val: %d \n",
+ __func__, rc);
+ }
+ return 0;
+ }
+
+ if (!vreg_sts && !machine_is_qsd8x50_ffa()) {
+ rc = vreg_set_level(vreg_mmc, 2850);
+ if (!rc)
+ rc = vreg_enable(vreg_mmc);
+ if (rc)
+ printk(KERN_ERR "%s: return val: %d \n",
+ __func__, rc);
+ }
+ set_bit(pdev->id, &vreg_sts);
+ return 0;
+}
+
+static struct mmc_platform_data qsd8x50_sdcc_data = {
+ .ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29,
+ .translate_vdd = msm_sdcc_setup_power,
+};
+
+static void __init qsd8x50_cfg_smc91x(void)
+{
+ int rc = 0;
+
+ if (machine_is_qsd8x50_surf()) {
+ smc91x_resources[0].start = 0x70000300;
+ smc91x_resources[0].end = 0x700003ff;
+ smc91x_resources[1].start = MSM_GPIO_TO_INT(156);
+ smc91x_resources[1].end = MSM_GPIO_TO_INT(156);
+ } else if (machine_is_qsd8x50_ffa()) {
+ smc91x_resources[0].start = 0x84000300;
+ smc91x_resources[0].end = 0x840003ff;
+ smc91x_resources[1].start = MSM_GPIO_TO_INT(87);
+ smc91x_resources[1].end = MSM_GPIO_TO_INT(87);
+
+ rc = gpio_tlmm_config(GPIO_CFG(87, 0, GPIO_INPUT,
+ GPIO_PULL_DOWN, GPIO_2MA),
+ GPIO_ENABLE);
+ if (rc) {
+ printk(KERN_ERR "%s: gpio_tlmm_config=%d\n",
+ __func__, rc);
+ }
+ } else
+ printk(KERN_ERR "%s: invalid machine type\n", __func__);
+}
+
+static struct msm_pm_platform_data msm_pm_data[MSM_PM_SLEEP_MODE_NR] = {
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].supported = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].suspend_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].idle_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].latency = 8594,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].residency = 23740,
+
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].supported = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].suspend_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].idle_enabled = 1,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].latency = 4594,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].residency = 23740,
+
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].supported = 1,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].suspend_enabled
+ = 1,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].idle_enabled = 0,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency = 443,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].residency = 1098,
+
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].supported = 1,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].suspend_enabled = 1,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].idle_enabled = 1,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].latency = 2,
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT].residency = 0,
+};
+
+static void __init gp6_init(void)
+{
+ struct vreg *vreg;
+ int rc;
+
+ vreg = vreg_get(NULL, "gp6");
+ if (IS_ERR(vreg)) {
+ printk(KERN_ERR "%s: vreg get failed (%ld)\n",
+ __func__, PTR_ERR(vreg));
+ return;
+ }
+
+ /* units of mV, steps of 50 mV */
+ rc = vreg_set_level(vreg, 2850);
+ if (rc) {
+ printk(KERN_ERR "%s: vreg set level failed (%d)\n",
+ __func__, rc);
+ return;
+ }
+
+ rc = vreg_enable(vreg);
+ if (rc) {
+ printk(KERN_ERR "%s: vreg enable failed (%d)\n",
+ __func__, rc);
+ return;
+ }
+
+ if (machine_is_qsd8x50_ffa())
+ vreg_mmc = vreg;
+}
+
+static void __init qsd8x50_init(void)
+{
+ if (socinfo_init() < 0)
+ printk(KERN_ERR "%s: socinfo_init() failed!\n",
+ __func__);
+ qsd8x50_cfg_smc91x();
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+ gp6_init();
+ msm_pm_set_platform_data(msm_pm_data);
+
+#ifdef CONFIG_SURF_FFA_GPIO_KEYPAD
+ if (machine_is_qsd8x50_ffa())
+ platform_device_register(&keypad_device_8k_ffa);
+ else
+ platform_device_register(&keypad_device_surf);
+#endif
+}
+
+
+static void __init qsd8x50_map_io(void)
+{
+ msm_map_qsd8x50_io();
+ msm_clock_init(msm_clocks_8x50, msm_num_clocks_8x50);
+}
+
+MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x20000100,
+ .map_io = qsd8x50_map_io,
+ .init_irq = qsd8x50_init_irq,
+ .init_machine = qsd8x50_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(QSD8X50_GRAPEFRUIT, "QCT QSD8X50 GRAPEFRUIT")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x20000100,
+ .map_io = qsd8x50_map_io,
+ .init_irq = qsd8x50_init_irq,
+ .init_machine = qsd8x50_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(QSD8X50_FFA, "QCT QSD8X50 FFA")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x20000100,
+ .map_io = qsd8x50_map_io,
+ .init_irq = qsd8x50_init_irq,
+ .init_machine = qsd8x50_init,
+ .timer = &msm_timer,
+MACHINE_END
+
+MACHINE_START(QSD8X50_ST1, "QCT QSD8X50 ST1")
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x20000100,
+ .map_io = qsd8x50_map_io,
+ .init_irq = qsd8x50_init_irq,
+ .init_machine = qsd8x50_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-sapphire-gpio.c b/arch/arm/mach-msm/board-sapphire-gpio.c
new file mode 100644
index 000000000000..2f2df97956cf
--- /dev/null
+++ b/arch/arm/mach-msm/board-sapphire-gpio.c
@@ -0,0 +1,326 @@
+/* arch/arm/mach-msm/board-sapphire-gpio.c
+ * Copyright (C) 2007-2009 HTC Corporation.
+ * Author: Thomas Tsai <thomas_tsai@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/pm.h>
+#include <linux/sysdev.h>
+
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <asm/mach-types.h>
+
+#include "gpio_chip.h"
+#include "board-sapphire.h"
+
+#ifdef DEBUG_SAPPHIRE_GPIO
+#define DBG(fmt, arg...) printk(KERN_INFO "%s: " fmt "\n", __func__, ## arg)
+#else
+#define DBG(fmt, arg...) do {} while (0)
+#endif
+
+#define SAPPHIRE_CPLD_INT_STATUS (SAPPHIRE_CPLD_BASE + 0x0E)
+#define SAPPHIRE_CPLD_INT_LEVEL (SAPPHIRE_CPLD_BASE + 0x08)
+#define SAPPHIRE_CPLD_INT_MASK (SAPPHIRE_CPLD_BASE + 0x0C)
+
+/*CPLD misc reg offset*/
+static const int _g_CPLD_MISCn_Offset[] = { 0x0A, /*misc1 reg*/
+ 0x00, /*misc2 reg*/
+ 0x02, /*misc3 reg*/
+ 0x04, /*misc4 reg*/
+ 0x06}; /*misc5 reg*/
+/*CPLD INT Bank*/
+/*BANK0: int1 status, int2 level, int3 mask*/
+static const int _g_INT_BANK_Offset[][3] = {{0x0E, 0x08, 0x0C} };
+
+static uint8_t sapphire_cpld_initdata[4] = {
+ [0] = 0x80, /* for serial debug UART3, low current misc2*/
+ [1] = 0x34, /* jog & tp enable, I2C pull misc3*/
+ [3] = 0x04, /* mmdi 32k en misc5*/
+};
+
+/*save current working int mask, so the value can be restored after resume.
+Sapphire has only bank0.*/
+static uint8_t sapphire_int_mask[] = {
+ [0] = 0xfb, /* enable all interrupts, bit 2 is not used */
+};
+
+/*Sleep have to prepare the wake up source in advance.
+default to disable all wakeup sources when suspend.*/
+static uint8_t sapphire_sleep_int_mask[] = {
+ [0] = 0x00, /* bit2 is not used */
+};
+
+static int sapphire_suspended;
+
+static int sapphire_gpio_read(struct gpio_chip *chip, unsigned n)
+{
+ if (n < SAPPHIRE_GPIO_INT_B0_BASE) /*MISCn*/
+ return !!(readb(CPLD_GPIO_REG(n)) & CPLD_GPIO_BIT_POS_MASK(n));
+ else if (n <= SAPPHIRE_GPIO_END) /*gpio n is INT pin*/
+ return !!(readb(CPLD_INT_LEVEL_REG_G(n)) &
+ CPLD_GPIO_BIT_POS_MASK(n));
+ return 0;
+}
+
+/*CPLD Write only register :MISC2, MISC3, MISC4, MISC5 => reg=0,2,4,6
+Reading from write-only registers is undefined, so the writing value
+should be kept in shadow for later usage.*/
+int sapphire_gpio_write(struct gpio_chip *chip, unsigned n, unsigned on)
+{
+ unsigned long flags;
+ uint8_t reg_val;
+ if (n > SAPPHIRE_GPIO_END)
+ return -1;
+
+ local_irq_save(flags);
+ reg_val = readb(CPLD_GPIO_REG(n));
+ if (on)
+ reg_val |= CPLD_GPIO_BIT_POS_MASK(n);
+ else
+ reg_val &= ~CPLD_GPIO_BIT_POS_MASK(n);
+ writeb(reg_val, CPLD_GPIO_REG(n));
+
+ DBG("gpio=%d, l=0x%x\r\n", n, readb(SAPPHIRE_CPLD_INT_LEVEL));
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+static int sapphire_gpio_configure(struct gpio_chip *chip, unsigned int gpio,
+ unsigned long flags)
+{
+ if (flags & (GPIOF_OUTPUT_LOW | GPIOF_OUTPUT_HIGH))
+ sapphire_gpio_write(chip, gpio, flags & GPIOF_OUTPUT_HIGH);
+
+ DBG("gpio=%d, l=0x%x\r\n", gpio, readb(SAPPHIRE_CPLD_INT_LEVEL));
+
+ return 0;
+}
+
+static int sapphire_gpio_get_irq_num(struct gpio_chip *chip, unsigned int gpio,
+ unsigned int *irqp, unsigned long *irqnumflagsp)
+{
+ DBG("gpio=%d, l=0x%x\r\n", gpio, readb(SAPPHIRE_CPLD_INT_LEVEL));
+ DBG("SAPPHIRE_GPIO_INT_B0_BASE=%d, SAPPHIRE_GPIO_LAST_INT=%d\r\n",
+ SAPPHIRE_GPIO_INT_B0_BASE, SAPPHIRE_GPIO_LAST_INT);
+ if ((gpio < SAPPHIRE_GPIO_INT_B0_BASE) ||
+ (gpio > SAPPHIRE_GPIO_LAST_INT))
+ return -ENOENT;
+ *irqp = SAPPHIRE_GPIO_TO_INT(gpio);
+ DBG("*irqp=%d\r\n", *irqp);
+ if (irqnumflagsp)
+ *irqnumflagsp = 0;
+ return 0;
+}
+
+/*write 1 to clear INT status bit.*/
+static void sapphire_gpio_irq_ack(unsigned int irq)
+{
+ /*write 1 to clear*/
+ writeb(SAPPHIRE_INT_BIT_MASK(irq), CPLD_INT_STATUS_REG(irq));
+}
+
+/*unmask/enable the INT
+static void sapphire_gpio_irq_unmask(unsigned int irq)*/
+static void sapphire_gpio_irq_enable(unsigned int irq)
+{
+ unsigned long flags;
+ uint8_t reg_val;
+
+ local_irq_save(flags); /*disabling all interrupts*/
+
+ reg_val = readb(CPLD_INT_MASK_REG(irq)) | SAPPHIRE_INT_BIT_MASK(irq);
+ DBG("(irq=%d,0x%x, 0x%x)\r\n", irq, CPLD_INT_MASK_REG(irq),
+ SAPPHIRE_INT_BIT_MASK(irq));
+ DBG("sapphire_suspended=%d\r\n", sapphire_suspended);
+ /*printk(KERN_INFO "sapphire_gpio_irq_mask irq %d => %d:%02x\n",
+ irq, bank, reg_val);*/
+ if (!sapphire_suspended)
+ writeb(reg_val, CPLD_INT_MASK_REG(irq));
+
+ reg_val = readb(CPLD_INT_MASK_REG(irq));
+ DBG("reg_val= 0x%x\r\n", reg_val);
+ DBG("l=0x%x\r\n", readb(SAPPHIRE_CPLD_INT_LEVEL));
+
+ local_irq_restore(flags); /*restore the interrupts*/
+}
+
+/*mask/disable INT
+static void sapphire_gpio_irq_mask(unsigned int irq)*/
+static void sapphire_gpio_irq_disable(unsigned int irq)
+{
+ unsigned long flags;
+ uint8_t reg_val;
+
+ local_irq_save(flags);
+ reg_val = readb(CPLD_INT_MASK_REG(irq)) & ~SAPPHIRE_INT_BIT_MASK(irq);
+ /*CPLD INT MASK is r/w now.*/
+
+ /*printk(KERN_INFO "sapphire_gpio_irq_unmask irq %d => %d:%02x\n",
+ irq, bank, reg_val);*/
+ DBG("(%d,0x%x, 0x%x, 0x%x)\r\n", irq, reg_val, CPLD_INT_MASK_REG(irq),
+ SAPPHIRE_INT_BIT_MASK(irq));
+ DBG("sapphire_suspended=%d\r\n", sapphire_suspended);
+ if (!sapphire_suspended)
+ writeb(reg_val, CPLD_INT_MASK_REG(irq));
+
+ reg_val = readb(CPLD_INT_MASK_REG(irq));
+ DBG("reg_val= 0x%x\r\n", reg_val);
+ DBG("l=0x%x\r\n", readb(SAPPHIRE_CPLD_INT_LEVEL));
+
+ local_irq_restore(flags);
+}
+
+/*preparing enable/disable wake source before sleep*/
+int sapphire_gpio_irq_set_wake(unsigned int irq, unsigned int on)
+{
+ unsigned long flags;
+ uint8_t mask = SAPPHIRE_INT_BIT_MASK(irq);
+
+ local_irq_save(flags);
+
+ if (on) /*wake on -> mask the bit*/
+ sapphire_sleep_int_mask[CPLD_INT_TO_BANK(irq)] |= mask;
+ else /*no wake -> unmask the bit*/
+ sapphire_sleep_int_mask[CPLD_INT_TO_BANK(irq)] &= ~mask;
+ local_irq_restore(flags);
+ return 0;
+}
+
+/*Sapphire has only one INT Bank.*/
+static void sapphire_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ int j;
+ unsigned v;
+ int int_base = SAPPHIRE_INT_START;
+
+ v = readb(SAPPHIRE_CPLD_INT_STATUS); /*INT1 status reg, BANK0*/
+
+ for (j = 0; j < 8 ; j++) { /*8 bit per bank*/
+ if (v & (1U << j)) { /*got the INT Bit*/
+ DBG("generic_handle_irq j=0x%x\r\n", j);
+ generic_handle_irq(int_base + j);
+ }
+ }
+
+ desc->chip->ack(irq); /*clear CPLD INT in SOC side.*/
+ DBG("irq=%d, l=0x%x\r\n", irq, readb(SAPPHIRE_CPLD_INT_LEVEL));
+}
+
+/*Save current working sources before sleep, so we can restore it after
+ * resume.*/
+static int sapphire_sysdev_suspend(struct sys_device *dev, pm_message_t state)
+{
+ sapphire_suspended = 1;
+ /*save current masking*/
+ sapphire_int_mask[0] = readb(SAPPHIRE_CPLD_BASE +
+ SAPPHIRE_GPIO_INT_B0_MASK_REG);
+
+ /*set waking source before sleep.*/
+ writeb(sapphire_sleep_int_mask[0],
+ SAPPHIRE_CPLD_BASE + SAPPHIRE_GPIO_INT_B0_MASK_REG);
+
+ return 0;
+}
+
+/*All the registers will be kept till a power loss...*/
+int sapphire_sysdev_resume(struct sys_device *dev)
+{
+ /*restore the working mask saved before sleep*/
+ writeb(sapphire_int_mask[0], SAPPHIRE_CPLD_BASE +
+ SAPPHIRE_GPIO_INT_B0_MASK_REG);
+ sapphire_suspended = 0;
+ return 0;
+}
+
+/**
+ * linux/irq.h :: struct irq_chip
+ * @enable: enable the interrupt (defaults to chip->unmask if NULL)
+ * @disable: disable the interrupt (defaults to chip->mask if NULL)
+ * @ack: start of a new interrupt
+ * @mask: mask an interrupt source
+ * @mask_ack: ack and mask an interrupt source
+ * @unmask: unmask an interrupt source
+ */
+static struct irq_chip sapphire_gpio_irq_chip = {
+ .name = "sapphiregpio",
+ .ack = sapphire_gpio_irq_ack,
+ .mask = sapphire_gpio_irq_disable, /*sapphire_gpio_irq_mask,*/
+ .unmask = sapphire_gpio_irq_enable, /*sapphire_gpio_irq_unmask,*/
+ .set_wake = sapphire_gpio_irq_set_wake,
+ /*.set_type = sapphire_gpio_irq_set_type,*/
+};
+
+/*Thomas:For CPLD*/
+static struct gpio_chip sapphire_gpio_chip = {
+ .start = SAPPHIRE_GPIO_START,
+ .end = SAPPHIRE_GPIO_END,
+ .configure = sapphire_gpio_configure,
+ .get_irq_num = sapphire_gpio_get_irq_num,
+ .read = sapphire_gpio_read,
+ .write = sapphire_gpio_write,
+/* .read_detect_status = sapphire_gpio_read_detect_status,
+ .clear_detect_status = sapphire_gpio_clear_detect_status */
+};
+
+struct sysdev_class sapphire_sysdev_class = {
+ .name = "sapphiregpio_irq",
+ .suspend = sapphire_sysdev_suspend,
+ .resume = sapphire_sysdev_resume,
+};
+
+static struct sys_device sapphire_irq_device = {
+ .cls = &sapphire_sysdev_class,
+};
+
+int sapphire_init_gpio(void)
+{
+ int i;
+ if (!machine_is_sapphire())
+ return 0;
+
+ DBG("%d,%d\r\n", SAPPHIRE_INT_START, SAPPHIRE_INT_END);
+ DBG("NR_MSM_IRQS=%d, NR_GPIO_IRQS=%d\r\n", NR_MSM_IRQS, NR_GPIO_IRQS);
+ for (i = SAPPHIRE_INT_START; i <= SAPPHIRE_INT_END; i++) {
+ set_irq_chip(i, &sapphire_gpio_irq_chip);
+ set_irq_handler(i, handle_edge_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+
+ register_gpio_chip(&sapphire_gpio_chip);
+
+ /*setup CPLD INT connecting to SOC's gpio 17 */
+ set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH);
+ set_irq_chained_handler(MSM_GPIO_TO_INT(17), sapphire_gpio_irq_handler);
+ set_irq_wake(MSM_GPIO_TO_INT(17), 1);
+
+ if (sysdev_class_register(&sapphire_sysdev_class) == 0)
+ sysdev_register(&sapphire_irq_device);
+
+ return 0;
+}
+
+int sapphire_init_cpld(unsigned int sys_rev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sapphire_cpld_initdata); i++)
+ writeb(sapphire_cpld_initdata[i], SAPPHIRE_CPLD_BASE + i * 2);
+ return 0;
+}
+
+arch_initcall(sapphire_init_gpio);
diff --git a/arch/arm/mach-msm/board-sapphire-h2w.c b/arch/arm/mach-msm/board-sapphire-h2w.c
new file mode 100644
index 000000000000..aa83e216974d
--- /dev/null
+++ b/arch/arm/mach-msm/board-sapphire-h2w.c
@@ -0,0 +1,545 @@
+/*
+ * H2W device detection driver.
+ *
+ * Copyright (C) 2008 HTC Corporation.
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Authors:
+ * Laurence Chen <Laurence_Chen@htc.com>
+ * Nick Pelly <npelly@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* For detecting HTC 2 Wire devices, such as wired headset.
+
+ Logically, the H2W driver is always present, and H2W state (hi->state)
+ indicates what is currently plugged into the H2W interface.
+
+ When the headset is plugged in, CABLE_IN1 is pulled low. When the headset
+ button is pressed, CABLE_IN2 is pulled low. These two lines are shared with
+ the TX and RX (respectively) of UART3 - used for serial debugging.
+
+ This headset driver keeps the CPLD configured as UART3 for as long as
+ possible, so that we can do serial FIQ debugging even when the kernel is
+ locked and this driver no longer runs. So it only configures the CPLD to
+ GPIO while the headset is plugged in, and for 10ms during detection work.
+
+ Unfortunately we can't leave the CPLD as UART3 while a headset is plugged
+ in, UART3 is pullup on TX but the headset is pull-down, causing a 55 mA
+ drain on sapphire.
+
+ The headset detection work involves setting CPLD to GPIO, and then pulling
+ CABLE_IN1 high with a stronger pullup than usual. A H2W headset will still
+ pull this line low, whereas other attachments such as a serial console
+ would get pulled up by this stronger pullup.
+
+ Headset insertion/removal causes UEvent's to be sent, and
+ /sys/class/switch/h2w/state to be updated.
+
+ Button presses are interpreted as input event (KEY_MEDIA). Button presses
+ are ignored if the headset is plugged in, so the buttons on 11 pin -> 3.5mm
+ jack adapters do not work until a headset is plugged into the adapter. This
+ is to avoid serial RX traffic causing spurious button press events.
+
+ We tend to check the status of CABLE_IN1 a few more times than strictly
+ necessary during headset detection, to avoid spurious headset insertion
+ events caused by serial debugger TX traffic.
+*/
+
+
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/hrtimer.h>
+#include <linux/switch.h>
+#include <linux/input.h>
+#include <linux/debugfs.h>
+#include <linux/gpio.h>
+#include <asm/atomic.h>
+#include <mach/board.h>
+#include <mach/vreg.h>
+#include <asm/mach-types.h>
+#include "board-sapphire.h"
+
+#ifdef CONFIG_DEBUG_SAPPHIRE_H2W
+#define H2W_DBG(fmt, arg...) printk(KERN_INFO "[H2W] %s " fmt "\n", __FUNCTION__, ## arg)
+#else
+#define H2W_DBG(fmt, arg...) do {} while (0)
+#endif
+
+static struct workqueue_struct *g_detection_work_queue;
+static void detection_work(struct work_struct *work);
+static DECLARE_WORK(g_detection_work, detection_work);
+enum {
+ NO_DEVICE = 0,
+ HTC_HEADSET = 1,
+};
+
+enum {
+ UART3 = 0,
+ GPIO = 1,
+};
+
+struct h2w_info {
+ struct switch_dev sdev;
+ struct input_dev *input;
+
+ atomic_t btn_state;
+ int ignore_btn;
+
+ unsigned int irq;
+ unsigned int irq_btn;
+
+ struct hrtimer timer;
+ ktime_t debounce_time;
+
+ struct hrtimer btn_timer;
+ ktime_t btn_debounce_time;
+};
+static struct h2w_info *hi;
+
+static ssize_t sapphire_h2w_print_name(struct switch_dev *sdev, char *buf)
+{
+ switch (switch_get_state(&hi->sdev)) {
+ case NO_DEVICE:
+ return sprintf(buf, "No Device\n");
+ case HTC_HEADSET:
+ return sprintf(buf, "Headset\n");
+ }
+ return -EINVAL;
+}
+
+static void configure_cpld(int route)
+{
+ H2W_DBG(" route = %s", route == UART3 ? "UART3" : "GPIO");
+ switch (route) {
+ case UART3:
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 0);
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 1);
+ break;
+ case GPIO:
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 0);
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 0);
+ break;
+ }
+}
+
+static void button_pressed(void)
+{
+ H2W_DBG("");
+ atomic_set(&hi->btn_state, 1);
+ input_report_key(hi->input, KEY_MEDIA, 1);
+ input_sync(hi->input);
+}
+
+static void button_released(void)
+{
+ H2W_DBG("");
+ atomic_set(&hi->btn_state, 0);
+ input_report_key(hi->input, KEY_MEDIA, 0);
+ input_sync(hi->input);
+}
+
+#ifdef CONFIG_MSM_SERIAL_DEBUGGER
+extern void msm_serial_debug_enable(int);
+#endif
+
+static void insert_headset(void)
+{
+ unsigned long irq_flags;
+
+ H2W_DBG("");
+
+ switch_set_state(&hi->sdev, HTC_HEADSET);
+ configure_cpld(GPIO);
+
+#ifdef CONFIG_MSM_SERIAL_DEBUGGER
+ msm_serial_debug_enable(false);
+#endif
+
+
+ /* On some non-standard headset adapters (usually those without a
+ * button) the btn line is pulled down at the same time as the detect
+ * line. We can check here by sampling the button line, if it is
+ * low then it is probably a bad adapter so ignore the button.
+ * If the button is released then we stop ignoring the button, so that
+ * the user can recover from the situation where a headset is plugged
+ * in with button held down.
+ */
+ hi->ignore_btn = !gpio_get_value(SAPPHIRE_GPIO_CABLE_IN2);
+
+ /* Enable button irq */
+ local_irq_save(irq_flags);
+ enable_irq(hi->irq_btn);
+ local_irq_restore(irq_flags);
+
+ hi->debounce_time = ktime_set(0, 20000000); /* 20 ms */
+}
+
+static void remove_headset(void)
+{
+ unsigned long irq_flags;
+
+ H2W_DBG("");
+
+ switch_set_state(&hi->sdev, NO_DEVICE);
+ configure_cpld(UART3);
+
+ /* Disable button */
+ local_irq_save(irq_flags);
+ disable_irq(hi->irq_btn);
+ local_irq_restore(irq_flags);
+
+ if (atomic_read(&hi->btn_state))
+ button_released();
+
+ hi->debounce_time = ktime_set(0, 100000000); /* 100 ms */
+}
+
+static void detection_work(struct work_struct *work)
+{
+ unsigned long irq_flags;
+ int clk, cable_in1;
+
+ H2W_DBG("");
+
+ if (gpio_get_value(SAPPHIRE_GPIO_CABLE_IN1) != 0) {
+ /* Headset not plugged in */
+ if (switch_get_state(&hi->sdev) == HTC_HEADSET)
+ remove_headset();
+ return;
+ }
+
+ /* Something plugged in, lets make sure its a headset */
+
+ /* Switch CPLD to GPIO to do detection */
+ configure_cpld(GPIO);
+ /* Disable headset interrupt while detecting.*/
+ local_irq_save(irq_flags);
+ disable_irq(hi->irq);
+ local_irq_restore(irq_flags);
+
+ /* Set GPIO_CABLE_IN1 as output high */
+ gpio_direction_output(SAPPHIRE_GPIO_CABLE_IN1, 1);
+ /* Delay 10ms for pin stable. */
+ msleep(10);
+ /* Save H2W_CLK */
+ clk = gpio_get_value(SAPPHIRE_GPIO_H2W_CLK_GPI);
+ /* Set GPIO_CABLE_IN1 as input */
+ gpio_direction_input(SAPPHIRE_GPIO_CABLE_IN1);
+
+ /* Restore IRQs */
+ local_irq_save(irq_flags);
+ enable_irq(hi->irq);
+ local_irq_restore(irq_flags);
+
+ cable_in1 = gpio_get_value(SAPPHIRE_GPIO_CABLE_IN1);
+
+ if (cable_in1 == 0 && clk == 0) {
+ if (switch_get_state(&hi->sdev) == NO_DEVICE)
+ insert_headset();
+ } else {
+ configure_cpld(UART3);
+ H2W_DBG("CABLE_IN1 was low, but not a headset "
+ "(recent cable_in1 = %d, clk = %d)", cable_in1, clk);
+ }
+}
+
+static enum hrtimer_restart button_event_timer_func(struct hrtimer *data)
+{
+ H2W_DBG("");
+
+ if (switch_get_state(&hi->sdev) == HTC_HEADSET) {
+ if (gpio_get_value(SAPPHIRE_GPIO_CABLE_IN2)) {
+ if (hi->ignore_btn)
+ hi->ignore_btn = 0;
+ else if (atomic_read(&hi->btn_state))
+ button_released();
+ } else {
+ if (!hi->ignore_btn && !atomic_read(&hi->btn_state))
+ button_pressed();
+ }
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart detect_event_timer_func(struct hrtimer *data)
+{
+ H2W_DBG("");
+
+ queue_work(g_detection_work_queue, &g_detection_work);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t detect_irq_handler(int irq, void *dev_id)
+{
+ int value1, value2;
+ int retry_limit = 10;
+
+ H2W_DBG("");
+ do {
+ value1 = gpio_get_value(SAPPHIRE_GPIO_CABLE_IN1);
+ set_irq_type(hi->irq, value1 ?
+ IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH);
+ value2 = gpio_get_value(SAPPHIRE_GPIO_CABLE_IN1);
+ } while (value1 != value2 && retry_limit-- > 0);
+
+ H2W_DBG("value2 = %d (%d retries)", value2, (10-retry_limit));
+
+ if ((switch_get_state(&hi->sdev) == NO_DEVICE) ^ value2) {
+ if (switch_get_state(&hi->sdev) == HTC_HEADSET)
+ hi->ignore_btn = 1;
+ /* Do the rest of the work in timer context */
+ hrtimer_start(&hi->timer, hi->debounce_time, HRTIMER_MODE_REL);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t button_irq_handler(int irq, void *dev_id)
+{
+ int value1, value2;
+ int retry_limit = 10;
+
+ H2W_DBG("");
+ do {
+ value1 = gpio_get_value(SAPPHIRE_GPIO_CABLE_IN2);
+ set_irq_type(hi->irq_btn, value1 ?
+ IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH);
+ value2 = gpio_get_value(SAPPHIRE_GPIO_CABLE_IN2);
+ } while (value1 != value2 && retry_limit-- > 0);
+
+ H2W_DBG("value2 = %d (%d retries)", value2, (10-retry_limit));
+
+ hrtimer_start(&hi->btn_timer, hi->btn_debounce_time, HRTIMER_MODE_REL);
+
+ return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static void h2w_debug_set(void *data, u64 val)
+{
+ switch_set_state(&hi->sdev, (int)val);
+}
+
+static u64 h2w_debug_get(void *data)
+{
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(h2w_debug_fops, h2w_debug_get, h2w_debug_set, "%llu\n");
+static int __init h2w_debug_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("h2w", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ debugfs_create_file("state", 0644, dent, NULL, &h2w_debug_fops);
+
+ return 0;
+}
+
+device_initcall(h2w_debug_init);
+#endif
+
+static int sapphire_h2w_probe(struct platform_device *pdev)
+{
+ int ret;
+ unsigned long irq_flags;
+
+ printk(KERN_INFO "H2W: Registering H2W (headset) driver\n");
+ hi = kzalloc(sizeof(struct h2w_info), GFP_KERNEL);
+ if (!hi)
+ return -ENOMEM;
+
+ atomic_set(&hi->btn_state, 0);
+ hi->ignore_btn = 0;
+
+ hi->debounce_time = ktime_set(0, 100000000); /* 100 ms */
+ hi->btn_debounce_time = ktime_set(0, 10000000); /* 10 ms */
+ hi->sdev.name = "h2w";
+ hi->sdev.print_name = sapphire_h2w_print_name;
+
+ ret = switch_dev_register(&hi->sdev);
+ if (ret < 0)
+ goto err_switch_dev_register;
+
+ g_detection_work_queue = create_workqueue("detection");
+ if (g_detection_work_queue == NULL) {
+ ret = -ENOMEM;
+ goto err_create_work_queue;
+ }
+
+ ret = gpio_request(SAPPHIRE_GPIO_CABLE_IN1, "h2w_detect");
+ if (ret < 0)
+ goto err_request_detect_gpio;
+
+ ret = gpio_request(SAPPHIRE_GPIO_CABLE_IN2, "h2w_button");
+ if (ret < 0)
+ goto err_request_button_gpio;
+
+ ret = gpio_direction_input(SAPPHIRE_GPIO_CABLE_IN1);
+ if (ret < 0)
+ goto err_set_detect_gpio;
+
+ ret = gpio_direction_input(SAPPHIRE_GPIO_CABLE_IN2);
+ if (ret < 0)
+ goto err_set_button_gpio;
+
+ hi->irq = gpio_to_irq(SAPPHIRE_GPIO_CABLE_IN1);
+ if (hi->irq < 0) {
+ ret = hi->irq;
+ goto err_get_h2w_detect_irq_num_failed;
+ }
+
+ hi->irq_btn = gpio_to_irq(SAPPHIRE_GPIO_CABLE_IN2);
+ if (hi->irq_btn < 0) {
+ ret = hi->irq_btn;
+ goto err_get_button_irq_num_failed;
+ }
+
+ /* Set CPLD MUX to H2W <-> CPLD GPIO */
+ configure_cpld(UART3);
+ /* Set the CPLD connected H2W GPIO's to input */
+ gpio_set_value(SAPPHIRE_GPIO_H2W_CLK_DIR, 0);
+ gpio_set_value(SAPPHIRE_GPIO_H2W_DAT_DIR, 0);
+
+ hrtimer_init(&hi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hi->timer.function = detect_event_timer_func;
+ hrtimer_init(&hi->btn_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hi->btn_timer.function = button_event_timer_func;
+
+ ret = request_irq(hi->irq, detect_irq_handler,
+ IRQF_TRIGGER_LOW, "h2w_detect", NULL);
+ if (ret < 0)
+ goto err_request_detect_irq;
+
+ /* Disable button until plugged in */
+ set_irq_flags(hi->irq_btn, IRQF_VALID | IRQF_NOAUTOEN);
+ ret = request_irq(hi->irq_btn, button_irq_handler,
+ IRQF_TRIGGER_LOW, "h2w_button", NULL);
+ if (ret < 0)
+ goto err_request_h2w_headset_button_irq;
+
+ ret = set_irq_wake(hi->irq, 1);
+ if (ret < 0)
+ goto err_request_input_dev;
+ ret = set_irq_wake(hi->irq_btn, 1);
+ if (ret < 0)
+ goto err_request_input_dev;
+
+ hi->input = input_allocate_device();
+ if (!hi->input) {
+ ret = -ENOMEM;
+ goto err_request_input_dev;
+ }
+
+ hi->input->name = "h2w headset";
+ hi->input->evbit[0] = BIT_MASK(EV_KEY);
+ hi->input->keybit[BIT_WORD(KEY_MEDIA)] = BIT_MASK(KEY_MEDIA);
+
+ ret = input_register_device(hi->input);
+ if (ret < 0)
+ goto err_register_input_dev;
+
+ return 0;
+
+err_register_input_dev:
+ input_free_device(hi->input);
+err_request_input_dev:
+ free_irq(hi->irq_btn, 0);
+err_request_h2w_headset_button_irq:
+ free_irq(hi->irq, 0);
+err_request_detect_irq:
+err_get_button_irq_num_failed:
+err_get_h2w_detect_irq_num_failed:
+err_set_button_gpio:
+err_set_detect_gpio:
+ gpio_free(SAPPHIRE_GPIO_CABLE_IN2);
+err_request_button_gpio:
+ gpio_free(SAPPHIRE_GPIO_CABLE_IN1);
+err_request_detect_gpio:
+ destroy_workqueue(g_detection_work_queue);
+err_create_work_queue:
+ switch_dev_unregister(&hi->sdev);
+err_switch_dev_register:
+ printk(KERN_ERR "H2W: Failed to register driver\n");
+
+ return ret;
+}
+
+static int sapphire_h2w_remove(struct platform_device *pdev)
+{
+ H2W_DBG("");
+ if (switch_get_state(&hi->sdev))
+ remove_headset();
+ input_unregister_device(hi->input);
+ gpio_free(SAPPHIRE_GPIO_CABLE_IN2);
+ gpio_free(SAPPHIRE_GPIO_CABLE_IN1);
+ free_irq(hi->irq_btn, 0);
+ free_irq(hi->irq, 0);
+ destroy_workqueue(g_detection_work_queue);
+ switch_dev_unregister(&hi->sdev);
+
+ return 0;
+}
+
+static struct platform_device sapphire_h2w_device = {
+ .name = "sapphire-h2w",
+};
+
+static struct platform_driver sapphire_h2w_driver = {
+ .probe = sapphire_h2w_probe,
+ .remove = sapphire_h2w_remove,
+ .driver = {
+ .name = "sapphire-h2w",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sapphire_h2w_init(void)
+{
+ if (!machine_is_sapphire())
+ return 0;
+ int ret;
+ H2W_DBG("");
+ ret = platform_driver_register(&sapphire_h2w_driver);
+ if (ret)
+ return ret;
+ return platform_device_register(&sapphire_h2w_device);
+}
+
+static void __exit sapphire_h2w_exit(void)
+{
+ platform_device_unregister(&sapphire_h2w_device);
+ platform_driver_unregister(&sapphire_h2w_driver);
+}
+
+module_init(sapphire_h2w_init);
+module_exit(sapphire_h2w_exit);
+
+MODULE_AUTHOR("Laurence Chen <Laurence_Chen@htc.com>");
+MODULE_DESCRIPTION("HTC 2 Wire detection driver for sapphire");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-msm/board-sapphire-keypad.c b/arch/arm/mach-msm/board-sapphire-keypad.c
new file mode 100644
index 000000000000..14f12e5e865c
--- /dev/null
+++ b/arch/arm/mach-msm/board-sapphire-keypad.c
@@ -0,0 +1,122 @@
+/* arch/arm/mach-msm/board-sapphire-keypad.c
+ * Copyright (C) 2007-2009 HTC Corporation.
+ * Author: Thomas Tsai <thomas_tsai@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/gpio_event.h>
+#include <asm/mach-types.h>
+#include "gpio_chip.h"
+#include "board-sapphire.h"
+static char *keycaps = "--qwerty";
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "board_sapphire."
+module_param_named(keycaps, keycaps, charp, 0);
+
+
+static unsigned int sapphire_col_gpios[] = { 35, 34 };
+
+/* KP_MKIN2 (GPIO40) is not used? */
+static unsigned int sapphire_row_gpios[] = { 42, 41 };
+
+#define KEYMAP_INDEX(col, row) ((col)*ARRAY_SIZE(sapphire_row_gpios) + (row))
+
+/*scan matrix key*/
+/* HOME(up) + MENU (down)*/
+static const unsigned short sapphire_keymap1[ARRAY_SIZE(sapphire_col_gpios) *
+ ARRAY_SIZE(sapphire_row_gpios)] = {
+ [KEYMAP_INDEX(0, 0)] = KEY_BACK,
+ [KEYMAP_INDEX(0, 1)] = KEY_MENU,
+
+ [KEYMAP_INDEX(1, 0)] = KEY_HOME,
+ [KEYMAP_INDEX(1, 1)] = KEY_SEND,
+};
+
+/* MENU(up) + HOME (down)*/
+static const unsigned short sapphire_keymap0[ARRAY_SIZE(sapphire_col_gpios) *
+ ARRAY_SIZE(sapphire_row_gpios)] = {
+ [KEYMAP_INDEX(0, 0)] = KEY_BACK,
+ [KEYMAP_INDEX(0, 1)] = KEY_HOME,
+
+ [KEYMAP_INDEX(1, 0)] = KEY_MENU,
+ [KEYMAP_INDEX(1, 1)] = KEY_SEND,
+};
+
+static struct gpio_event_matrix_info sapphire_keypad_matrix_info = {
+ .info.func = gpio_event_matrix_func,
+ .keymap = sapphire_keymap1,
+ .output_gpios = sapphire_col_gpios,
+ .input_gpios = sapphire_row_gpios,
+ .noutputs = ARRAY_SIZE(sapphire_col_gpios),
+ .ninputs = ARRAY_SIZE(sapphire_row_gpios),
+ .settle_time.tv.nsec = 40 * NSEC_PER_USEC,
+ .poll_time.tv.nsec = 20 * NSEC_PER_MSEC,
+ .debounce_delay.tv.nsec = 50 * NSEC_PER_MSEC,
+ .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ |
+ GPIOKPF_REMOVE_PHANTOM_KEYS |
+ GPIOKPF_PRINT_UNMAPPED_KEYS /*| GPIOKPF_PRINT_MAPPED_KEYS*/
+};
+
+static struct gpio_event_direct_entry sapphire_keypad_nav_map[] = {
+ { SAPPHIRE_POWER_KEY, KEY_END },
+ { SAPPHIRE_VOLUME_UP, KEY_VOLUMEUP },
+ { SAPPHIRE_VOLUME_DOWN, KEY_VOLUMEDOWN },
+};
+
+static struct gpio_event_input_info sapphire_keypad_nav_info = {
+ .info.func = gpio_event_input_func,
+ .flags = 0,
+ .type = EV_KEY,
+ .keymap = sapphire_keypad_nav_map,
+ .debounce_time.tv.nsec = 20 * NSEC_PER_MSEC,
+ .keymap_size = ARRAY_SIZE(sapphire_keypad_nav_map)
+};
+
+static struct gpio_event_info *sapphire_keypad_info[] = {
+ &sapphire_keypad_matrix_info.info,
+ &sapphire_keypad_nav_info.info,
+};
+
+static struct gpio_event_platform_data sapphire_keypad_data = {
+ .name = "sapphire-keypad",
+ .info = sapphire_keypad_info,
+ .info_count = ARRAY_SIZE(sapphire_keypad_info)
+};
+
+static struct platform_device sapphire_keypad_device = {
+ .name = GPIO_EVENT_DEV_NAME,
+ .id = 0,
+ .dev = {
+ .platform_data = &sapphire_keypad_data,
+ },
+};
+
+static int __init sapphire_init_keypad(void)
+{
+ if (!machine_is_sapphire())
+ return 0;
+
+ switch (sapphire_get_hwid()) {
+ case 0:
+ sapphire_keypad_matrix_info.keymap = sapphire_keymap0;
+ break;
+ default:
+ sapphire_keypad_matrix_info.keymap = sapphire_keymap1;
+ break;
+ }
+ return platform_device_register(&sapphire_keypad_device);
+}
+
+device_initcall(sapphire_init_keypad);
+
diff --git a/arch/arm/mach-msm/board-sapphire-mmc.c b/arch/arm/mach-msm/board-sapphire-mmc.c
new file mode 100644
index 000000000000..ff2a5fdc6c14
--- /dev/null
+++ b/arch/arm/mach-msm/board-sapphire-mmc.c
@@ -0,0 +1,479 @@
+/* linux/arch/arm/mach-msm/board-sapphire-mmc.c
+ * Copyright (C) 2007-2009 HTC Corporation.
+ * Author: Thomas Tsai <thomas_tsai@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <asm/mach-types.h>
+
+#include <mach/vreg.h>
+#include <mach/htc_pwrsink.h>
+
+#include <asm/mach/mmc.h>
+
+#include "devices.h"
+#include "gpio_chip.h"
+#include "board-sapphire.h"
+#include "proc_comm.h"
+
+#define DEBUG_SDSLOT_VDD 1
+
+extern int msm_add_sdcc(unsigned int controller,
+ struct mmc_platform_data *plat);
+
+/* ---- COMMON ---- */
+static void config_gpio_table(uint32_t *table, int len)
+{
+ int n;
+ unsigned id;
+ for (n = 0; n < len; n++) {
+ id = table[n];
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0);
+ }
+}
+
+/* ---- SDCARD ---- */
+
+static uint32_t sdcard_on_gpio_table[] = {
+ PCOM_GPIO_CFG(62, 2, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */
+ PCOM_GPIO_CFG(63, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */
+ PCOM_GPIO_CFG(64, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* DAT3 */
+ PCOM_GPIO_CFG(65, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* DAT2 */
+ PCOM_GPIO_CFG(66, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */
+ PCOM_GPIO_CFG(67, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */
+};
+
+static uint32_t sdcard_off_gpio_table[] = {
+ PCOM_GPIO_CFG(62, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */
+ PCOM_GPIO_CFG(63, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */
+ PCOM_GPIO_CFG(64, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */
+ PCOM_GPIO_CFG(65, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */
+ PCOM_GPIO_CFG(66, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */
+ PCOM_GPIO_CFG(67, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */
+};
+
+static uint opt_disable_sdcard;
+
+static int __init sapphire_disablesdcard_setup(char *str)
+{
+ int cal = simple_strtol(str, NULL, 0);
+
+ opt_disable_sdcard = cal;
+ return 1;
+}
+
+__setup("board_sapphire.disable_sdcard=", sapphire_disablesdcard_setup);
+
+static struct vreg *vreg_sdslot; /* SD slot power */
+
+struct mmc_vdd_xlat {
+ int mask;
+ int level;
+};
+
+static struct mmc_vdd_xlat mmc_vdd_table[] = {
+ { MMC_VDD_165_195, 1800 },
+ { MMC_VDD_20_21, 2050 },
+ { MMC_VDD_21_22, 2150 },
+ { MMC_VDD_22_23, 2250 },
+ { MMC_VDD_23_24, 2350 },
+ { MMC_VDD_24_25, 2450 },
+ { MMC_VDD_25_26, 2550 },
+ { MMC_VDD_26_27, 2650 },
+ { MMC_VDD_27_28, 2750 },
+ { MMC_VDD_28_29, 2850 },
+ { MMC_VDD_29_30, 2950 },
+};
+
+static unsigned int sdslot_vdd = 0xffffffff;
+static unsigned int sdslot_vreg_enabled;
+
+static uint32_t sapphire_sdslot_switchvdd(struct device *dev, unsigned int vdd)
+{
+ int i, rc;
+
+ BUG_ON(!vreg_sdslot);
+
+ if (vdd == sdslot_vdd)
+ return 0;
+
+ sdslot_vdd = vdd;
+
+ if (vdd == 0) {
+#if DEBUG_SDSLOT_VDD
+ printk(KERN_DEBUG "%s: Disabling SD slot power\n", __func__);
+#endif
+ config_gpio_table(sdcard_off_gpio_table,
+ ARRAY_SIZE(sdcard_off_gpio_table));
+ vreg_disable(vreg_sdslot);
+ sdslot_vreg_enabled = 0;
+ return 0;
+ }
+
+ if (!sdslot_vreg_enabled) {
+ rc = vreg_enable(vreg_sdslot);
+ if (rc) {
+ printk(KERN_ERR "%s: Error enabling vreg (%d)\n",
+ __func__, rc);
+ }
+ config_gpio_table(sdcard_on_gpio_table,
+ ARRAY_SIZE(sdcard_on_gpio_table));
+ sdslot_vreg_enabled = 1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mmc_vdd_table); i++) {
+ if (mmc_vdd_table[i].mask == (1 << vdd)) {
+#if DEBUG_SDSLOT_VDD
+ printk(KERN_DEBUG "%s: Setting level to %u\n",
+ __func__, mmc_vdd_table[i].level);
+#endif
+ rc = vreg_set_level(vreg_sdslot,
+ mmc_vdd_table[i].level);
+ if (rc) {
+ printk(KERN_ERR
+ "%s: Error setting vreg level (%d)\n",
+ __func__, rc);
+ }
+ return 0;
+ }
+ }
+
+ printk(KERN_ERR "%s: Invalid VDD %d specified\n", __func__, vdd);
+ return 0;
+}
+
+static unsigned int sapphire_sdslot_status(struct device *dev)
+{
+ unsigned int status;
+
+ status = (unsigned int) gpio_get_value(SAPPHIRE_GPIO_SDMC_CD_N);
+ return !status;
+}
+
+#define SAPPHIRE_MMC_VDD (MMC_VDD_165_195 | MMC_VDD_20_21 | MMC_VDD_21_22 \
+ | MMC_VDD_22_23 | MMC_VDD_23_24 | MMC_VDD_24_25 \
+ | MMC_VDD_25_26 | MMC_VDD_26_27 | MMC_VDD_27_28 \
+ | MMC_VDD_28_29 | MMC_VDD_29_30)
+
+static struct mmc_platform_data sapphire_sdslot_data = {
+ .ocr_mask = SAPPHIRE_MMC_VDD,
+ .status_irq = SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_SDMC_CD_N),
+ .status = sapphire_sdslot_status,
+ .translate_vdd = sapphire_sdslot_switchvdd,
+};
+
+/* ---- WIFI ---- */
+
+static uint32_t wifi_on_gpio_table[] = {
+ PCOM_GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT3 */
+ PCOM_GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT2 */
+ PCOM_GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */
+ PCOM_GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */
+ PCOM_GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */
+ PCOM_GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */
+ PCOM_GPIO_CFG(29, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */
+};
+
+static uint32_t wifi_off_gpio_table[] = {
+ PCOM_GPIO_CFG(51, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */
+ PCOM_GPIO_CFG(52, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */
+ PCOM_GPIO_CFG(53, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */
+ PCOM_GPIO_CFG(54, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */
+ PCOM_GPIO_CFG(55, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */
+ PCOM_GPIO_CFG(56, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */
+ PCOM_GPIO_CFG(29, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */
+};
+
+static struct vreg *vreg_wifi_osc; /* WIFI 32khz oscilator */
+static int sapphire_wifi_cd = 0; /* WIFI virtual 'card detect' status */
+
+static struct sdio_embedded_func wifi_func = {
+ .f_class = SDIO_CLASS_WLAN,
+ .f_maxblksize = 512,
+};
+
+static struct embedded_sdio_data sapphire_wifi_emb_data = {
+ .cis = {
+ .vendor = 0x104c,
+ .device = 0x9066,
+ .blksize = 512,
+ .max_dtr = 20000000,
+ },
+ .cccr = {
+ .multi_block = 0,
+ .low_speed = 0,
+ .wide_bus = 1,
+ .high_power = 0,
+ .high_speed = 0,
+ },
+ .funcs = &wifi_func,
+ .num_funcs = 1,
+};
+
+static void (*wifi_status_cb)(int card_present, void *dev_id);
+static void *wifi_status_cb_devid;
+
+static int sapphire_wifi_status_register(void (*callback)(int card_present,
+ void *dev_id),
+ void *dev_id)
+{
+ if (wifi_status_cb)
+ return -EAGAIN;
+ wifi_status_cb = callback;
+ wifi_status_cb_devid = dev_id;
+ return 0;
+}
+
+static unsigned int sapphire_wifi_status(struct device *dev)
+{
+ return sapphire_wifi_cd;
+}
+
+int sapphire_wifi_set_carddetect(int val)
+{
+ printk(KERN_DEBUG "%s: %d\n", __func__, val);
+ sapphire_wifi_cd = val;
+ if (wifi_status_cb)
+ wifi_status_cb(val, wifi_status_cb_devid);
+ else
+ printk(KERN_WARNING "%s: Nobody to notify\n", __func__);
+ return 0;
+}
+#ifndef CONFIG_WIFI_CONTROL_FUNC
+EXPORT_SYMBOL(sapphire_wifi_set_carddetect);
+#endif
+
+static int sapphire_wifi_power_state;
+static int sapphire_bt_power_state;
+
+int sapphire_wifi_power(int on)
+{
+ int rc;
+
+ printk(KERN_DEBUG "%s: %d\n", __func__, on);
+
+ if (on) {
+ config_gpio_table(wifi_on_gpio_table,
+ ARRAY_SIZE(wifi_on_gpio_table));
+ rc = vreg_enable(vreg_wifi_osc);
+ if (rc)
+ return rc;
+ htc_pwrsink_set(PWRSINK_WIFI, 70);
+ } else {
+ config_gpio_table(wifi_off_gpio_table,
+ ARRAY_SIZE(wifi_off_gpio_table));
+ htc_pwrsink_set(PWRSINK_WIFI, 0);
+ }
+ gpio_set_value(SAPPHIRE_GPIO_MAC_32K_EN, on);
+ mdelay(100);
+ gpio_set_value(SAPPHIRE_GPIO_WIFI_EN, on);
+ mdelay(100);
+ if (!on)
+ vreg_disable(vreg_wifi_osc);
+ sapphire_wifi_power_state = on;
+ return 0;
+}
+#ifndef CONFIG_WIFI_CONTROL_FUNC
+EXPORT_SYMBOL(sapphire_wifi_power);
+#endif
+
+/* Eenable VREG_MMC pin to turn on fastclock oscillator : colin */
+int sapphire_bt_fastclock_power(int on)
+{
+ int rc;
+
+ printk(KERN_DEBUG "sapphire_bt_fastclock_power on = %d\n", on);
+ if (vreg_wifi_osc) {
+ if (on) {
+ rc = vreg_enable(vreg_wifi_osc);
+ printk(KERN_DEBUG "BT vreg_enable vreg_mmc, rc=%d\n",
+ rc);
+ if (rc) {
+ printk("Error turn sapphire_bt_fastclock_power rc=%d\n", rc);
+ return rc;
+ }
+ } else {
+ if (!sapphire_wifi_power_state) {
+ vreg_disable(vreg_wifi_osc);
+ printk(KERN_DEBUG "BT disable vreg_wifi_osc.\n");
+ } else
+ printk(KERN_DEBUG "BT shouldn't disable vreg_wifi_osc. WiFi is using it!!\n");
+ }
+ }
+ sapphire_bt_power_state = on;
+ return 0;
+}
+EXPORT_SYMBOL(sapphire_bt_fastclock_power);
+
+static int sapphire_wifi_reset_state;
+void sapphire_wifi_reset(int on)
+{
+ printk(KERN_DEBUG "%s: %d\n", __func__, on);
+ gpio_set_value(SAPPHIRE_GPIO_WIFI_PA_RESETX, !on);
+ sapphire_wifi_reset_state = on;
+ mdelay(50);
+}
+#ifndef CONFIG_WIFI_CONTROL_FUNC
+EXPORT_SYMBOL(sapphire_wifi_reset);
+#endif
+
+static struct mmc_platform_data sapphire_wifi_data = {
+ .ocr_mask = MMC_VDD_28_29,
+ .status = sapphire_wifi_status,
+ .register_status_notify = sapphire_wifi_status_register,
+ .embedded_sdio = &sapphire_wifi_emb_data,
+};
+
+int __init sapphire_init_mmc(unsigned int sys_rev)
+{
+ wifi_status_cb = NULL;
+
+ sdslot_vreg_enabled = 0;
+
+ vreg_sdslot = vreg_get(0, "gp6");
+ if (IS_ERR(vreg_sdslot))
+ return PTR_ERR(vreg_sdslot);
+ vreg_wifi_osc = vreg_get(0, "mmc");
+ if (IS_ERR(vreg_wifi_osc))
+ return PTR_ERR(vreg_wifi_osc);
+
+ set_irq_wake(SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_SDMC_CD_N), 1);
+
+ msm_add_sdcc(1, &sapphire_wifi_data);
+
+ if (!opt_disable_sdcard)
+ msm_add_sdcc(2, &sapphire_sdslot_data);
+ else
+ printk(KERN_INFO "sapphire: SD-Card interface disabled\n");
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int sapphiremmc_dbg_wifi_reset_set(void *data, u64 val)
+{
+ sapphire_wifi_reset((int) val);
+ return 0;
+}
+
+static int sapphiremmc_dbg_wifi_reset_get(void *data, u64 *val)
+{
+ *val = sapphire_wifi_reset_state;
+ return 0;
+}
+
+static int sapphiremmc_dbg_wifi_cd_set(void *data, u64 val)
+{
+ sapphire_wifi_set_carddetect((int) val);
+ return 0;
+}
+
+static int sapphiremmc_dbg_wifi_cd_get(void *data, u64 *val)
+{
+ *val = sapphire_wifi_cd;
+ return 0;
+}
+
+static int sapphiremmc_dbg_wifi_pwr_set(void *data, u64 val)
+{
+ sapphire_wifi_power((int) val);
+ return 0;
+}
+
+static int sapphiremmc_dbg_wifi_pwr_get(void *data, u64 *val)
+{
+
+ *val = sapphire_wifi_power_state;
+ return 0;
+}
+
+static int sapphiremmc_dbg_sd_pwr_set(void *data, u64 val)
+{
+ sapphire_sdslot_switchvdd(NULL, (unsigned int) val);
+ return 0;
+}
+
+static int sapphiremmc_dbg_sd_pwr_get(void *data, u64 *val)
+{
+ *val = sdslot_vdd;
+ return 0;
+}
+
+static int sapphiremmc_dbg_sd_cd_set(void *data, u64 val)
+{
+ return -ENOSYS;
+}
+
+static int sapphiremmc_dbg_sd_cd_get(void *data, u64 *val)
+{
+ *val = sapphire_sdslot_status(NULL);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(sapphiremmc_dbg_wifi_reset_fops,
+ sapphiremmc_dbg_wifi_reset_get,
+ sapphiremmc_dbg_wifi_reset_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(sapphiremmc_dbg_wifi_cd_fops,
+ sapphiremmc_dbg_wifi_cd_get,
+ sapphiremmc_dbg_wifi_cd_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(sapphiremmc_dbg_wifi_pwr_fops,
+ sapphiremmc_dbg_wifi_pwr_get,
+ sapphiremmc_dbg_wifi_pwr_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(sapphiremmc_dbg_sd_pwr_fops,
+ sapphiremmc_dbg_sd_pwr_get,
+ sapphiremmc_dbg_sd_pwr_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(sapphiremmc_dbg_sd_cd_fops,
+ sapphiremmc_dbg_sd_cd_get,
+ sapphiremmc_dbg_sd_cd_set, "%llu\n");
+
+static int __init sapphiremmc_dbg_init(void)
+{
+ struct dentry *dent;
+
+ if (!machine_is_sapphire())
+ return 0;
+
+ dent = debugfs_create_dir("sapphiremmc_dbg", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ debugfs_create_file("wifi_reset", 0644, dent, NULL,
+ &sapphiremmc_dbg_wifi_reset_fops);
+ debugfs_create_file("wifi_cd", 0644, dent, NULL,
+ &sapphiremmc_dbg_wifi_cd_fops);
+ debugfs_create_file("wifi_pwr", 0644, dent, NULL,
+ &sapphiremmc_dbg_wifi_pwr_fops);
+
+ debugfs_create_file("sd_pwr", 0644, dent, NULL,
+ &sapphiremmc_dbg_sd_pwr_fops);
+ debugfs_create_file("sd_cd", 0644, dent, NULL,
+ &sapphiremmc_dbg_sd_cd_fops);
+
+ return 0;
+}
+
+device_initcall(sapphiremmc_dbg_init);
+
+#endif
diff --git a/arch/arm/mach-msm/board-sapphire-panel.c b/arch/arm/mach-msm/board-sapphire-panel.c
new file mode 100644
index 000000000000..9129f4cdcb5f
--- /dev/null
+++ b/arch/arm/mach-msm/board-sapphire-panel.c
@@ -0,0 +1,656 @@
+/* linux/arch/arm/mach-msm/board-sapphire-panel.c
+ * Copyright (C) 2007-2009 HTC Corporation.
+ * Author: Thomas Tsai <thomas_tsai@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/leds.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <asm/mach-types.h>
+
+#include <mach/msm_fb.h>
+#include <mach/vreg.h>
+#include <mach/htc_pwrsink.h>
+
+#include "gpio_chip.h"
+#include "board-sapphire.h"
+#include "proc_comm.h"
+#include "devices.h"
+
+enum sapphire_panel_type {
+ SAPPHIRE_PANEL_SHARP = 0,
+ SAPPHIRE_PANEL_TOPPOLY,
+ NUM_OF_SAPPHIRE_PANELS,
+};
+static int g_panel_id = -1 ;
+
+#define SAPPHIRE_DEFAULT_BACKLIGHT_BRIGHTNESS 132
+
+static int sapphire_backlight_off;
+static int sapphire_backlight_brightness =
+ SAPPHIRE_DEFAULT_BACKLIGHT_BRIGHTNESS;
+
+static uint8_t sapphire_backlight_last_level = 33;
+static DEFINE_MUTEX(sapphire_backlight_lock);
+
+/* Divide dimming level into 12 sections, and restrict maximum level to 27 */
+#define DIMMING_STEPS 12
+static unsigned dimming_levels[NUM_OF_SAPPHIRE_PANELS][DIMMING_STEPS] = {
+ {0, 1, 2, 3, 6, 9, 11, 13, 16, 19, 22, 25}, /* Sharp */
+ {0, 1, 2, 4, 7, 10, 13, 15, 18, 21, 24, 27}, /* Toppolly */
+};
+static unsigned pwrsink_percents[] = {0, 6, 8, 15, 26, 34, 46, 54, 65, 77, 87,
+ 100};
+
+static void sapphire_set_backlight_level(uint8_t level)
+{
+ unsigned dimming_factor = 255/DIMMING_STEPS + 1;
+ int index = (level + dimming_factor - 1) / dimming_factor;
+ unsigned percent;
+ unsigned long flags;
+ int i = 0;
+
+ printk(KERN_INFO "level=%d, new level=dimming_levels[%d]=%d\n",
+ level, index, dimming_levels[g_panel_id][index]);
+ percent = pwrsink_percents[index];
+ level = dimming_levels[g_panel_id][index];
+
+ if (sapphire_backlight_last_level == level)
+ return;
+
+ if (level == 0) {
+ gpio_set_value(27, 0);
+ msleep(2);
+ } else {
+ local_irq_save(flags);
+ if (sapphire_backlight_last_level == 0) {
+ gpio_set_value(27, 1);
+ udelay(40);
+ sapphire_backlight_last_level = 33;
+ }
+ i = (sapphire_backlight_last_level - level + 33) % 33;
+ while (i-- > 0) {
+ gpio_set_value(27, 0);
+ udelay(1);
+ gpio_set_value(27, 1);
+ udelay(1);
+ }
+ local_irq_restore(flags);
+ }
+ sapphire_backlight_last_level = level;
+ htc_pwrsink_set(PWRSINK_BACKLIGHT, percent);
+}
+
+#define MDDI_CLIENT_CORE_BASE 0x108000
+#define LCD_CONTROL_BLOCK_BASE 0x110000
+#define SPI_BLOCK_BASE 0x120000
+#define I2C_BLOCK_BASE 0x130000
+#define PWM_BLOCK_BASE 0x140000
+#define GPIO_BLOCK_BASE 0x150000
+#define SYSTEM_BLOCK1_BASE 0x160000
+#define SYSTEM_BLOCK2_BASE 0x170000
+
+
+#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
+#define SYSCLKENA (MDDI_CLIENT_CORE_BASE|0x2C)
+#define PWM0OFF (PWM_BLOCK_BASE|0x1C)
+
+#define V_VDDE2E_VDD2_GPIO 0
+#define V_VDDE2E_VDD2_GPIO_5M 89
+#define MDDI_RST_N 82
+
+#define MDDICAP0 (MDDI_CLIENT_CORE_BASE|0x00)
+#define MDDICAP1 (MDDI_CLIENT_CORE_BASE|0x04)
+#define MDDICAP2 (MDDI_CLIENT_CORE_BASE|0x08)
+#define MDDICAP3 (MDDI_CLIENT_CORE_BASE|0x0C)
+#define MDCAPCHG (MDDI_CLIENT_CORE_BASE|0x10)
+#define MDCRCERC (MDDI_CLIENT_CORE_BASE|0x14)
+#define TTBUSSEL (MDDI_CLIENT_CORE_BASE|0x18)
+#define DPSET0 (MDDI_CLIENT_CORE_BASE|0x1C)
+#define DPSET1 (MDDI_CLIENT_CORE_BASE|0x20)
+#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
+#define DPRUN (MDDI_CLIENT_CORE_BASE|0x28)
+#define SYSCKENA (MDDI_CLIENT_CORE_BASE|0x2C)
+#define TESTMODE (MDDI_CLIENT_CORE_BASE|0x30)
+#define FIFOMONI (MDDI_CLIENT_CORE_BASE|0x34)
+#define INTMONI (MDDI_CLIENT_CORE_BASE|0x38)
+#define MDIOBIST (MDDI_CLIENT_CORE_BASE|0x3C)
+#define MDIOPSET (MDDI_CLIENT_CORE_BASE|0x40)
+#define BITMAP0 (MDDI_CLIENT_CORE_BASE|0x44)
+#define BITMAP1 (MDDI_CLIENT_CORE_BASE|0x48)
+#define BITMAP2 (MDDI_CLIENT_CORE_BASE|0x4C)
+#define BITMAP3 (MDDI_CLIENT_CORE_BASE|0x50)
+#define BITMAP4 (MDDI_CLIENT_CORE_BASE|0x54)
+
+#define SRST (LCD_CONTROL_BLOCK_BASE|0x00)
+#define PORT_ENB (LCD_CONTROL_BLOCK_BASE|0x04)
+#define START (LCD_CONTROL_BLOCK_BASE|0x08)
+#define PORT (LCD_CONTROL_BLOCK_BASE|0x0C)
+#define CMN (LCD_CONTROL_BLOCK_BASE|0x10)
+#define GAMMA (LCD_CONTROL_BLOCK_BASE|0x14)
+#define INTFLG (LCD_CONTROL_BLOCK_BASE|0x18)
+#define INTMSK (LCD_CONTROL_BLOCK_BASE|0x1C)
+#define MPLFBUF (LCD_CONTROL_BLOCK_BASE|0x20)
+#define HDE_LEFT (LCD_CONTROL_BLOCK_BASE|0x24)
+#define VDE_TOP (LCD_CONTROL_BLOCK_BASE|0x28)
+#define PXL (LCD_CONTROL_BLOCK_BASE|0x30)
+#define HCYCLE (LCD_CONTROL_BLOCK_BASE|0x34)
+#define HSW (LCD_CONTROL_BLOCK_BASE|0x38)
+#define HDE_START (LCD_CONTROL_BLOCK_BASE|0x3C)
+#define HDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x40)
+#define VCYCLE (LCD_CONTROL_BLOCK_BASE|0x44)
+#define VSW (LCD_CONTROL_BLOCK_BASE|0x48)
+#define VDE_START (LCD_CONTROL_BLOCK_BASE|0x4C)
+#define VDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x50)
+#define WAKEUP (LCD_CONTROL_BLOCK_BASE|0x54)
+#define WSYN_DLY (LCD_CONTROL_BLOCK_BASE|0x58)
+#define REGENB (LCD_CONTROL_BLOCK_BASE|0x5C)
+#define VSYNIF (LCD_CONTROL_BLOCK_BASE|0x60)
+#define WRSTB (LCD_CONTROL_BLOCK_BASE|0x64)
+#define RDSTB (LCD_CONTROL_BLOCK_BASE|0x68)
+#define ASY_DATA (LCD_CONTROL_BLOCK_BASE|0x6C)
+#define ASY_DATB (LCD_CONTROL_BLOCK_BASE|0x70)
+#define ASY_DATC (LCD_CONTROL_BLOCK_BASE|0x74)
+#define ASY_DATD (LCD_CONTROL_BLOCK_BASE|0x78)
+#define ASY_DATE (LCD_CONTROL_BLOCK_BASE|0x7C)
+#define ASY_DATF (LCD_CONTROL_BLOCK_BASE|0x80)
+#define ASY_DATG (LCD_CONTROL_BLOCK_BASE|0x84)
+#define ASY_DATH (LCD_CONTROL_BLOCK_BASE|0x88)
+#define ASY_CMDSET (LCD_CONTROL_BLOCK_BASE|0x8C)
+
+#define SSICTL (SPI_BLOCK_BASE|0x00)
+#define SSITIME (SPI_BLOCK_BASE|0x04)
+#define SSITX (SPI_BLOCK_BASE|0x08)
+#define SSIRX (SPI_BLOCK_BASE|0x0C)
+#define SSIINTC (SPI_BLOCK_BASE|0x10)
+#define SSIINTS (SPI_BLOCK_BASE|0x14)
+#define SSIDBG1 (SPI_BLOCK_BASE|0x18)
+#define SSIDBG2 (SPI_BLOCK_BASE|0x1C)
+#define SSIID (SPI_BLOCK_BASE|0x20)
+
+#define WKREQ (SYSTEM_BLOCK1_BASE|0x00)
+#define CLKENB (SYSTEM_BLOCK1_BASE|0x04)
+#define DRAMPWR (SYSTEM_BLOCK1_BASE|0x08)
+#define INTMASK (SYSTEM_BLOCK1_BASE|0x0C)
+#define GPIOSEL (SYSTEM_BLOCK2_BASE|0x00)
+
+#define GPIODATA (GPIO_BLOCK_BASE|0x00)
+#define GPIODIR (GPIO_BLOCK_BASE|0x04)
+#define GPIOIS (GPIO_BLOCK_BASE|0x08)
+#define GPIOIBE (GPIO_BLOCK_BASE|0x0C)
+#define GPIOIEV (GPIO_BLOCK_BASE|0x10)
+#define GPIOIE (GPIO_BLOCK_BASE|0x14)
+#define GPIORIS (GPIO_BLOCK_BASE|0x18)
+#define GPIOMIS (GPIO_BLOCK_BASE|0x1C)
+#define GPIOIC (GPIO_BLOCK_BASE|0x20)
+#define GPIOOMS (GPIO_BLOCK_BASE|0x24)
+#define GPIOPC (GPIO_BLOCK_BASE|0x28)
+#define GPIOID (GPIO_BLOCK_BASE|0x30)
+
+#define SPI_WRITE(reg, val) \
+ { SSITX, 0x00010000 | (((reg) & 0xff) << 8) | ((val) & 0xff) }, \
+ { 0, 5 },
+
+#define SPI_WRITE1(reg) \
+ { SSITX, (reg) & 0xff }, \
+ { 0, 5 },
+
+struct mddi_table {
+ uint32_t reg;
+ uint32_t value;
+};
+static struct mddi_table mddi_toshiba_init_table[] = {
+ { DPSET0, 0x09e90046 },
+ { DPSET1, 0x00000118 },
+ { DPSUS, 0x00000000 },
+ { DPRUN, 0x00000001 },
+ { 1, 14 }, /* msleep 14 */
+ { SYSCKENA, 0x00000001 },
+ /*{ CLKENB, 0x000000EF } */
+ { CLKENB, 0x0000A1EF }, /* # SYS.CLKENB # Enable clocks for each module (without DCLK , i2cCLK) */
+ /*{ CLKENB, 0x000025CB }, Clock enable register */
+
+ { GPIODATA, 0x02000200 }, /* # GPI .GPIODATA # GPIO2(RESET_LCD_N) set to 0 , GPIO3(eDRAM_Power) set to 0 */
+ { GPIODIR, 0x000030D }, /* 24D # GPI .GPIODIR # Select direction of GPIO port (0,2,3,6,9 output) */
+ { GPIOSEL, 0/*0x00000173*/}, /* # SYS.GPIOSEL # GPIO port multiplexing control */
+ { GPIOPC, 0x03C300C0 }, /* # GPI .GPIOPC # GPIO2,3 PD cut */
+ { WKREQ, 0x00000000 }, /* # SYS.WKREQ # Wake-up request event is VSYNC alignment */
+
+ { GPIOIBE, 0x000003FF },
+ { GPIOIS, 0x00000000 },
+ { GPIOIC, 0x000003FF },
+ { GPIOIE, 0x00000000 },
+
+ { GPIODATA, 0x00040004 }, /* # GPI .GPIODATA # eDRAM VD supply */
+ { 1, 1 }, /* msleep 1 */
+ { GPIODATA, 0x02040004 }, /* # GPI .GPIODATA # eDRAM VD supply */
+ { DRAMPWR, 0x00000001 }, /* eDRAM power */
+};
+
+static struct mddi_table mddi_toshiba_panel_init_table[] = {
+ { SRST, 0x00000003 }, /* FIFO/LCDC not reset */
+ { PORT_ENB, 0x00000001 }, /* Enable sync. Port */
+ { START, 0x00000000 }, /* To stop operation */
+ /*{ START, 0x00000001 }, To start operation */
+ { PORT, 0x00000004 }, /* Polarity of VS/HS/DE. */
+ { CMN, 0x00000000 },
+ { GAMMA, 0x00000000 }, /* No Gamma correction */
+ { INTFLG, 0x00000000 }, /* VSYNC interrupt flag clear/status */
+ { INTMSK, 0x00000000 }, /* VSYNC interrupt mask is off. */
+ { MPLFBUF, 0x00000000 }, /* Select frame buffer's base address. */
+ { HDE_LEFT, 0x00000000 }, /* The value of HDE_LEFT. */
+ { VDE_TOP, 0x00000000 }, /* The value of VDE_TPO. */
+ { PXL, 0x00000001 }, /* 1. RGB666 */
+ /* 2. Data is valid from 1st frame of beginning. */
+ { HDE_START, 0x00000006 }, /* HDE_START= 14 PCLK */
+ { HDE_SIZE, 0x0000009F }, /* HDE_SIZE=320 PCLK */
+ { HSW, 0x00000004 }, /* HSW= 10 PCLK */
+ { VSW, 0x00000001 }, /* VSW=2 HCYCLE */
+ { VDE_START, 0x00000003 }, /* VDE_START=4 HCYCLE */
+ { VDE_SIZE, 0x000001DF }, /* VDE_SIZE=480 HCYCLE */
+ { WAKEUP, 0x000001e2 }, /* Wakeup position in VSYNC mode. */
+ { WSYN_DLY, 0x00000000 }, /* Wakeup position in VSIN mode. */
+ { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */
+ { CLKENB, 0x000025CB }, /* Clock enable register */
+
+ { SSICTL, 0x00000170 }, /* SSI control register */
+ { SSITIME, 0x00000250 }, /* SSI timing control register */
+ { SSICTL, 0x00000172 }, /* SSI control register */
+};
+
+
+static struct mddi_table mddi_sharp_init_table[] = {
+ { VCYCLE, 0x000001eb },
+ { HCYCLE, 0x000000ae },
+ { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */
+ { GPIODATA, 0x00040000 }, /* GPIO2 low */
+ { GPIODIR, 0x00000004 }, /* GPIO2 out */
+ { 1, 1 }, /* msleep 1 */
+ { GPIODATA, 0x00040004 }, /* GPIO2 high */
+ { 1, 10 }, /* msleep 10 */
+ SPI_WRITE(0x5f, 0x01)
+ SPI_WRITE1(0x11)
+ { 1, 200 }, /* msleep 200 */
+ SPI_WRITE1(0x29)
+ SPI_WRITE1(0xde)
+ { START, 0x00000001 }, /* To start operation */
+};
+
+static struct mddi_table mddi_sharp_deinit_table[] = {
+ { 1, 200 }, /* msleep 200 */
+ SPI_WRITE(0x10, 0x1)
+ { 1, 100 }, /* msleep 100 */
+ { GPIODATA, 0x00040004 }, /* GPIO2 high */
+ { GPIODIR, 0x00000004 }, /* GPIO2 out */
+ { GPIODATA, 0x00040000 }, /* GPIO2 low */
+ { 1, 10 }, /* msleep 10 */
+};
+
+static struct mddi_table mddi_tpo_init_table[] = {
+ { VCYCLE, 0x000001e5 },
+ { HCYCLE, 0x000000ac },
+ { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */
+ { 0, 20 }, /* udelay 20 */
+ { GPIODATA, 0x00000004 }, /* GPIO2 high */
+ { GPIODIR, 0x00000004 }, /* GPIO2 out */
+ { 0, 20 }, /* udelay 20 */
+
+ SPI_WRITE(0x08, 0x01)
+ { 0, 500 }, /* udelay 500 */
+ SPI_WRITE(0x08, 0x00)
+ SPI_WRITE(0x02, 0x00)
+ SPI_WRITE(0x03, 0x04)
+ SPI_WRITE(0x04, 0x0e)
+ SPI_WRITE(0x09, 0x02)
+ SPI_WRITE(0x0b, 0x08)
+ SPI_WRITE(0x0c, 0x53)
+ SPI_WRITE(0x0d, 0x01)
+ SPI_WRITE(0x0e, 0xe0)
+ SPI_WRITE(0x0f, 0x01)
+ SPI_WRITE(0x10, 0x58)
+ SPI_WRITE(0x20, 0x1e)
+ SPI_WRITE(0x21, 0x0a)
+ SPI_WRITE(0x22, 0x0a)
+ SPI_WRITE(0x23, 0x1e)
+ SPI_WRITE(0x25, 0x32)
+ SPI_WRITE(0x26, 0x00)
+ SPI_WRITE(0x27, 0xac)
+ SPI_WRITE(0x29, 0x06)
+ SPI_WRITE(0x2a, 0xa4)
+ SPI_WRITE(0x2b, 0x45)
+ SPI_WRITE(0x2c, 0x45)
+ SPI_WRITE(0x2d, 0x15)
+ SPI_WRITE(0x2e, 0x5a)
+ SPI_WRITE(0x2f, 0xff)
+ SPI_WRITE(0x30, 0x6b)
+ SPI_WRITE(0x31, 0x0d)
+ SPI_WRITE(0x32, 0x48)
+ SPI_WRITE(0x33, 0x82)
+ SPI_WRITE(0x34, 0xbd)
+ SPI_WRITE(0x35, 0xe7)
+ SPI_WRITE(0x36, 0x18)
+ SPI_WRITE(0x37, 0x94)
+ SPI_WRITE(0x38, 0x01)
+ SPI_WRITE(0x39, 0x5d)
+ SPI_WRITE(0x3a, 0xae)
+ SPI_WRITE(0x3b, 0xff)
+ SPI_WRITE(0x07, 0x09)
+ { 0, 10 }, /* udelay 10 */
+ { START, 0x00000001 }, /* To start operation */
+};
+
+static struct mddi_table mddi_tpo_deinit_table[] = {
+ SPI_WRITE(0x07, 0x19)
+ { START, 0x00000000 }, /* To stop operation */
+ { GPIODATA, 0x00040004 }, /* GPIO2 high */
+ { GPIODIR, 0x00000004 }, /* GPIO2 out */
+ { GPIODATA, 0x00040000 }, /* GPIO2 low */
+ { 0, 5 }, /* usleep 5 */
+};
+
+
+#define GPIOSEL_VWAKEINT (1U << 0)
+#define INTMASK_VWAKEOUT (1U << 0)
+
+static void sapphire_process_mddi_table(
+ struct msm_mddi_client_data *client_data,
+ struct mddi_table *table, size_t count)
+{
+ int i;
+ for (i = 0; i < count; i++) {
+ uint32_t reg = table[i].reg;
+ uint32_t value = table[i].value;
+
+ if (reg == 0)
+ udelay(value);
+ else if (reg == 1)
+ msleep(value);
+ else
+ client_data->remote_write(client_data, value, reg);
+ }
+}
+
+static struct vreg *vreg_lcm_2v85;
+
+static void sapphire_mddi_power_client(struct msm_mddi_client_data *client_data,
+ int on)
+{
+ unsigned id, on_off;
+ printk(KERN_INFO "sapphire_mddi_client_power:%d\r\n", on);
+ if (on) {
+ on_off = 0;
+ id = PM_VREG_PDOWN_MDDI_ID;
+ msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
+
+ gpio_set_value(SAPPHIRE_MDDI_1V5_EN, 1);
+ mdelay(5); /* delay time >5ms and <10ms */
+
+ if (is_12pin_camera())
+ gpio_set_value(V_VDDE2E_VDD2_GPIO_5M, 1);
+ else
+ gpio_set_value(V_VDDE2E_VDD2_GPIO, 1);
+
+ gpio_set_value(SAPPHIRE_GPIO_MDDI_32K_EN, 1);
+ msleep(3);
+ id = PM_VREG_PDOWN_AUX_ID;
+ msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
+ vreg_enable(vreg_lcm_2v85);
+ msleep(3);
+ gpio_set_value(MDDI_RST_N, 1);
+ msleep(10);
+ } else {
+ gpio_set_value(SAPPHIRE_GPIO_MDDI_32K_EN, 0);
+ gpio_set_value(MDDI_RST_N, 0);
+ msleep(10);
+ vreg_disable(vreg_lcm_2v85);
+ on_off = 1;
+ id = PM_VREG_PDOWN_AUX_ID;
+ msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
+ msleep(5);
+ if (is_12pin_camera())
+ gpio_set_value(V_VDDE2E_VDD2_GPIO_5M, 0);
+ else
+ gpio_set_value(V_VDDE2E_VDD2_GPIO, 0);
+
+ msleep(200);
+ gpio_set_value(SAPPHIRE_MDDI_1V5_EN, 0);
+ id = PM_VREG_PDOWN_MDDI_ID;
+ msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
+ }
+}
+
+static int sapphire_mddi_toshiba_client_init(
+ struct msm_mddi_bridge_platform_data *bridge_data,
+ struct msm_mddi_client_data *client_data)
+{
+ int panel_id;
+
+ client_data->auto_hibernate(client_data, 0);
+ sapphire_process_mddi_table(client_data, mddi_toshiba_init_table,
+ ARRAY_SIZE(mddi_toshiba_init_table));
+ client_data->auto_hibernate(client_data, 1);
+ g_panel_id = panel_id =
+ (client_data->remote_read(client_data, GPIODATA) >> 4) & 3;
+ if (panel_id > 1) {
+ printk(KERN_ERR "unknown panel id at mddi_enable\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int sapphire_mddi_toshiba_client_uninit(
+ struct msm_mddi_bridge_platform_data *bridge_data,
+ struct msm_mddi_client_data *client_data)
+{
+ return 0;
+}
+
+static int sapphire_mddi_panel_unblank(
+ struct msm_mddi_bridge_platform_data *bridge_data,
+ struct msm_mddi_client_data *client_data)
+{
+ int panel_id, ret = 0;
+
+ sapphire_set_backlight_level(0);
+ client_data->auto_hibernate(client_data, 0);
+ sapphire_process_mddi_table(client_data, mddi_toshiba_panel_init_table,
+ ARRAY_SIZE(mddi_toshiba_panel_init_table));
+ panel_id = (client_data->remote_read(client_data, GPIODATA) >> 4) & 3;
+ switch (panel_id) {
+ case 0:
+ printk(KERN_DEBUG "init sharp panel\n");
+ sapphire_process_mddi_table(client_data,
+ mddi_sharp_init_table,
+ ARRAY_SIZE(mddi_sharp_init_table));
+ break;
+ case 1:
+ printk(KERN_DEBUG "init tpo panel\n");
+ sapphire_process_mddi_table(client_data,
+ mddi_tpo_init_table,
+ ARRAY_SIZE(mddi_tpo_init_table));
+ break;
+ default:
+ printk(KERN_DEBUG "unknown panel_id: %d\n", panel_id);
+ ret = -1;
+ };
+ mutex_lock(&sapphire_backlight_lock);
+ sapphire_set_backlight_level(sapphire_backlight_brightness);
+ sapphire_backlight_off = 0;
+ mutex_unlock(&sapphire_backlight_lock);
+ client_data->auto_hibernate(client_data, 1);
+ /* reenable vsync */
+ client_data->remote_write(client_data, GPIOSEL_VWAKEINT,
+ GPIOSEL);
+ client_data->remote_write(client_data, INTMASK_VWAKEOUT,
+ INTMASK);
+ return ret;
+
+}
+
+static int sapphire_mddi_panel_blank(
+ struct msm_mddi_bridge_platform_data *bridge_data,
+ struct msm_mddi_client_data *client_data)
+{
+ int panel_id, ret = 0;
+
+ panel_id = (client_data->remote_read(client_data, GPIODATA) >> 4) & 3;
+ client_data->auto_hibernate(client_data, 0);
+ switch (panel_id) {
+ case 0:
+ printk(KERN_DEBUG "deinit sharp panel\n");
+ sapphire_process_mddi_table(client_data,
+ mddi_sharp_deinit_table,
+ ARRAY_SIZE(mddi_sharp_deinit_table));
+ break;
+ case 1:
+ printk(KERN_DEBUG "deinit tpo panel\n");
+ sapphire_process_mddi_table(client_data,
+ mddi_tpo_deinit_table,
+ ARRAY_SIZE(mddi_tpo_deinit_table));
+ break;
+ default:
+ printk(KERN_DEBUG "unknown panel_id: %d\n", panel_id);
+ ret = -1;
+ };
+ client_data->auto_hibernate(client_data, 1);
+ mutex_lock(&sapphire_backlight_lock);
+ sapphire_set_backlight_level(0);
+ sapphire_backlight_off = 1;
+ mutex_unlock(&sapphire_backlight_lock);
+ client_data->remote_write(client_data, 0, SYSCLKENA);
+ client_data->remote_write(client_data, 1, DPSUS);
+
+ return ret;
+}
+
+static void sapphire_brightness_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ mutex_lock(&sapphire_backlight_lock);
+ sapphire_backlight_brightness = value;
+ if (!sapphire_backlight_off)
+ sapphire_set_backlight_level(sapphire_backlight_brightness);
+ mutex_unlock(&sapphire_backlight_lock);
+}
+
+static struct led_classdev sapphire_backlight_led = {
+ .name = "lcd-backlight",
+ .brightness = SAPPHIRE_DEFAULT_BACKLIGHT_BRIGHTNESS,
+ .brightness_set = sapphire_brightness_set,
+};
+
+static int sapphire_backlight_probe(struct platform_device *pdev)
+{
+ led_classdev_register(&pdev->dev, &sapphire_backlight_led);
+ return 0;
+}
+
+static int sapphire_backlight_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&sapphire_backlight_led);
+ return 0;
+}
+
+static struct platform_driver sapphire_backlight_driver = {
+ .probe = sapphire_backlight_probe,
+ .remove = sapphire_backlight_remove,
+ .driver = {
+ .name = "sapphire-backlight",
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct resource resources_msm_fb[] = {
+ {
+ .start = SMI64_MSM_FB_BASE,
+ .end = SMI64_MSM_FB_BASE + SMI64_MSM_FB_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct msm_mddi_bridge_platform_data toshiba_client_data = {
+ .init = sapphire_mddi_toshiba_client_init,
+ .uninit = sapphire_mddi_toshiba_client_uninit,
+ .blank = sapphire_mddi_panel_blank,
+ .unblank = sapphire_mddi_panel_unblank,
+ .fb_data = {
+ .xres = 320,
+ .yres = 480,
+ .width = 45,
+ .height = 67,
+ .output_format = 0,
+ },
+};
+
+static struct msm_mddi_platform_data mddi_pdata = {
+ .clk_rate = 122880000,
+ .power_client = sapphire_mddi_power_client,
+ .fb_resource = resources_msm_fb,
+ .num_clients = 1,
+ .client_platform_data = {
+ {
+ .product_id = (0xd263 << 16 | 0),
+ .name = "mddi_c_d263_0000",
+ .id = 0,
+ .client_data = &toshiba_client_data,
+ .clk_rate = 0,
+ },
+ },
+};
+
+static struct platform_device sapphire_backlight = {
+ .name = "sapphire-backlight",
+};
+
+int __init sapphire_init_panel(void)
+{
+ int rc = -1;
+ uint32_t config = PCOM_GPIO_CFG(27, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA); /* GPIO27 */
+
+ if (!machine_is_sapphire())
+ return 0;
+
+ /* checking board as soon as possible */
+ printk("sapphire_init_panel:machine_is_sapphire=%d, machine_arch_type=%d, MACH_TYPE_SAPPHIRE=%d\r\n", machine_is_sapphire(), machine_arch_type, MACH_TYPE_SAPPHIRE);
+ if (!machine_is_sapphire())
+ return 0;
+
+ vreg_lcm_2v85 = vreg_get(0, "gp4");
+ if (IS_ERR(vreg_lcm_2v85))
+ return PTR_ERR(vreg_lcm_2v85);
+
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0);
+
+ /* setup FB by SMI size */
+ if (sapphire_get_smi_size() == 32) {
+ resources_msm_fb[0].start = SMI32_MSM_FB_BASE;
+ resources_msm_fb[0].end = SMI32_MSM_FB_BASE + SMI32_MSM_FB_SIZE - 1;
+ }
+
+ rc = platform_device_register(&msm_device_mdp);
+ if (rc)
+ return rc;
+ msm_device_mddi0.dev.platform_data = &mddi_pdata;
+ rc = platform_device_register(&msm_device_mddi0);
+ if (rc)
+ return rc;
+ platform_device_register(&sapphire_backlight);
+ return platform_driver_register(&sapphire_backlight_driver);
+}
+
+device_initcall(sapphire_init_panel);
diff --git a/arch/arm/mach-msm/board-sapphire-rfkill.c b/arch/arm/mach-msm/board-sapphire-rfkill.c
new file mode 100644
index 000000000000..135ffe82c328
--- /dev/null
+++ b/arch/arm/mach-msm/board-sapphire-rfkill.c
@@ -0,0 +1,99 @@
+/* linux/arch/arm/mach-msm/board-sapphire-rfkill.c
+ * Copyright (C) 2007-2009 HTC Corporation.
+ * Author: Thomas Tsai <thomas_tsai@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+/* Control bluetooth power for sapphire platform */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/rfkill.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <asm/mach-types.h>
+#include "gpio_chip.h"
+#include "board-sapphire.h"
+
+void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state);
+
+static struct rfkill *bt_rfk;
+static const char bt_name[] = "brf6300";
+
+extern int sapphire_bt_fastclock_power(int on);
+
+static int bluetooth_set_power(void *data, enum rfkill_state state)
+{
+ switch (state) {
+ case RFKILL_STATE_UNBLOCKED:
+ sapphire_bt_fastclock_power(1);
+ gpio_set_value(SAPPHIRE_GPIO_BT_32K_EN, 1);
+ udelay(10);
+ gpio_configure(101, GPIOF_DRIVE_OUTPUT | GPIOF_OUTPUT_HIGH);
+ break;
+ case RFKILL_STATE_SOFT_BLOCKED:
+ gpio_configure(101, GPIOF_DRIVE_OUTPUT | GPIOF_OUTPUT_LOW);
+ gpio_set_value(SAPPHIRE_GPIO_BT_32K_EN, 0);
+ sapphire_bt_fastclock_power(0);
+ break;
+ default:
+ printk(KERN_ERR "bad bluetooth rfkill state %d\n", state);
+ }
+ return 0;
+}
+
+static int __init sapphire_rfkill_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+
+ /* default to bluetooth off */
+ rfkill_switch_all(RFKILL_TYPE_BLUETOOTH, RFKILL_STATE_SOFT_BLOCKED);
+ bluetooth_set_power(NULL, RFKILL_STATE_SOFT_BLOCKED);
+
+ bt_rfk = rfkill_allocate(&pdev->dev, RFKILL_TYPE_BLUETOOTH);
+ if (!bt_rfk)
+ return -ENOMEM;
+
+ bt_rfk->name = bt_name;
+ bt_rfk->state = RFKILL_STATE_SOFT_BLOCKED;
+ /* userspace cannot take exclusive control */
+ bt_rfk->user_claim_unsupported = 1;
+ bt_rfk->user_claim = 0;
+ bt_rfk->data = NULL; /* user data */
+ bt_rfk->toggle_radio = bluetooth_set_power;
+
+ rc = rfkill_register(bt_rfk);
+
+ if (rc)
+ rfkill_free(bt_rfk);
+ return rc;
+}
+
+static struct platform_driver sapphire_rfkill_driver = {
+ .probe = sapphire_rfkill_probe,
+ .driver = {
+ .name = "sapphire_rfkill",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sapphire_rfkill_init(void)
+{
+ if (!machine_is_sapphire())
+ return 0;
+ return platform_driver_register(&sapphire_rfkill_driver);
+}
+
+module_init(sapphire_rfkill_init);
+MODULE_DESCRIPTION("sapphire rfkill");
+MODULE_AUTHOR("Nick Pelly <npelly@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-msm/board-sapphire-wifi.c b/arch/arm/mach-msm/board-sapphire-wifi.c
new file mode 100644
index 000000000000..43f827c60f13
--- /dev/null
+++ b/arch/arm/mach-msm/board-sapphire-wifi.c
@@ -0,0 +1,74 @@
+/* arch/arm/mach-msm/board-sapphire-wifi.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Dmitry Shmidt <dimitrysh@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+#include <linux/err.h>
+#include <linux/wifi_tiwlan.h>
+
+extern int sapphire_wifi_set_carddetect(int val);
+extern int sapphire_wifi_power(int on);
+extern int sapphire_wifi_reset(int on);
+
+#ifdef CONFIG_WIFI_MEM_PREALLOC
+typedef struct wifi_mem_prealloc_struct {
+ void *mem_ptr;
+ unsigned long size;
+} wifi_mem_prealloc_t;
+
+static wifi_mem_prealloc_t wifi_mem_array[WMPA_NUMBER_OF_SECTIONS] = {
+ { NULL, (WMPA_SECTION_SIZE_0 + WMPA_SECTION_HEADER) },
+ { NULL, (WMPA_SECTION_SIZE_1 + WMPA_SECTION_HEADER) },
+ { NULL, (WMPA_SECTION_SIZE_2 + WMPA_SECTION_HEADER) }
+};
+
+static void *sapphire_wifi_mem_prealloc(int section, unsigned long size)
+{
+ if ((section < 0) || (section >= WMPA_NUMBER_OF_SECTIONS))
+ return NULL;
+ if (wifi_mem_array[section].size < size)
+ return NULL;
+ return wifi_mem_array[section].mem_ptr;
+}
+
+int __init sapphire_init_wifi_mem (void)
+{
+ int i;
+
+ for (i = 0; (i < WMPA_NUMBER_OF_SECTIONS); i++) {
+ wifi_mem_array[i].mem_ptr = vmalloc(wifi_mem_array[i].size);
+ if (wifi_mem_array[i].mem_ptr == NULL)
+ return -ENOMEM;
+ }
+ return 0;
+}
+#endif
+
+struct wifi_platform_data sapphire_wifi_control = {
+ .set_power = sapphire_wifi_power,
+ .set_reset = sapphire_wifi_reset,
+ .set_carddetect = sapphire_wifi_set_carddetect,
+#ifdef CONFIG_WIFI_MEM_PREALLOC
+ .mem_prealloc = sapphire_wifi_mem_prealloc,
+#else
+ .mem_prealloc = NULL,
+#endif
+};
+
+#endif
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
new file mode 100644
index 000000000000..8de7a45ea649
--- /dev/null
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -0,0 +1,1175 @@
+/* linux/arch/arm/mach-msm/board-sapphire.c
+ * Copyright (C) 2007-2009 HTC Corporation.
+ * Author: Thomas Tsai <thomas_tsai@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/keyreset.h>
+#include <linux/leds.h>
+#include <linux/switch.h>
+#include <linux/synaptics_i2c_rmi.h>
+#include <linux/elan_i2c.h>
+#include <linux/akm8976.h>
+#include <mach/htc_headset.h>
+#include <linux/sysdev.h>
+#include <linux/android_pmem.h>
+
+#include <linux/delay.h>
+
+#include <asm/gpio.h>
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+#include <asm/system.h>
+#include <mach/system.h>
+#include <mach/vreg.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/setup.h>
+
+#include <linux/gpio_event.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach/mmc.h>
+#include <linux/mmc/sdio_ids.h>
+
+
+#include "gpio_chip.h"
+#include "board-sapphire.h"
+#include "pm.h"
+
+#include <mach/board.h>
+#include <mach/board_htc.h>
+#include <mach/msm_serial_hs.h>
+#include <mach/htc_pwrsink.h>
+
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+#ifdef CONFIG_WIFI_MEM_PREALLOC
+extern int sapphire_init_wifi_mem(void);
+#endif
+extern struct wifi_platform_data sapphire_wifi_control;
+#endif
+
+#include "proc_comm.h"
+#include "devices.h"
+
+void msm_init_irq(void);
+void msm_init_gpio(void);
+void msm_init_pmic_vibrator(void);
+
+extern int sapphire_init_mmc(unsigned int);
+
+struct sapphire_axis_info {
+ struct gpio_event_axis_info info;
+ uint16_t in_state;
+ uint16_t out_state;
+ uint16_t temp_state;
+ uint16_t threshold;
+};
+static bool nav_just_on;
+static int nav_on_jiffies;
+static int smi_sz = 64;
+static unsigned int hwid = 0;
+static unsigned int skuid = 0;
+static unsigned engineerid = (0x01 << 1); /* default is 3M sensor */
+
+uint16_t sapphire_axis_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+ struct sapphire_axis_info *ai = container_of(info, struct sapphire_axis_info, info);
+ uint16_t out = ai->out_state;
+
+ if (nav_just_on) {
+ if (jiffies == nav_on_jiffies || jiffies == nav_on_jiffies + 1)
+ goto ignore;
+ nav_just_on = 0;
+ }
+ if ((ai->in_state ^ in) & 1)
+ out--;
+ if ((ai->in_state ^ in) & 2)
+ out++;
+ ai->out_state = out;
+ignore:
+ ai->in_state = in;
+ if (ai->out_state - ai->temp_state == ai->threshold) {
+ ai->temp_state++;
+ ai->out_state = ai->temp_state;
+ } else if (ai->temp_state - ai->out_state == ai->threshold) {
+ ai->temp_state--;
+ ai->out_state = ai->temp_state;
+ } else if (abs(ai->out_state - ai->temp_state) > ai->threshold)
+ ai->temp_state = ai->out_state;
+
+ return ai->temp_state;
+}
+
+int sapphire_nav_power(const struct gpio_event_platform_data *pdata, bool on)
+{
+ gpio_set_value(SAPPHIRE_GPIO_JOG_EN, on);
+ if (on) {
+ nav_just_on = 1;
+ nav_on_jiffies = jiffies;
+ }
+ return 0;
+}
+
+static uint32_t sapphire_x_axis_gpios[] = {
+ SAPPHIRE_BALL_LEFT_0, SAPPHIRE_BALL_RIGHT_0
+};
+
+static struct sapphire_axis_info sapphire_x_axis = {
+ .threshold = 2,
+ .info = {
+ .info.func = gpio_event_axis_func,
+ .count = ARRAY_SIZE(sapphire_x_axis_gpios),
+ .type = EV_REL,
+ .code = REL_X,
+ .decoded_size = 1U << ARRAY_SIZE(sapphire_x_axis_gpios),
+ .map = sapphire_axis_map,
+ .gpio = sapphire_x_axis_gpios,
+ .flags = GPIOEAF_PRINT_UNKNOWN_DIRECTION /*| GPIOEAF_PRINT_RAW | GPIOEAF_PRINT_EVENT */
+ }
+};
+
+static uint32_t sapphire_y_axis_gpios[] = {
+ SAPPHIRE_BALL_UP_0, SAPPHIRE_BALL_DOWN_0
+};
+
+static struct sapphire_axis_info sapphire_y_axis = {
+ .threshold = 2,
+ .info = {
+ .info.func = gpio_event_axis_func,
+ .count = ARRAY_SIZE(sapphire_y_axis_gpios),
+ .type = EV_REL,
+ .code = REL_Y,
+ .decoded_size = 1U << ARRAY_SIZE(sapphire_y_axis_gpios),
+ .map = sapphire_axis_map,
+ .gpio = sapphire_y_axis_gpios,
+ .flags = GPIOEAF_PRINT_UNKNOWN_DIRECTION /*| GPIOEAF_PRINT_RAW | GPIOEAF_PRINT_EVENT */
+ }
+};
+
+static struct gpio_event_direct_entry sapphire_nav_buttons[] = {
+ { SAPPHIRE_GPIO_NAVI_ACT_N, BTN_MOUSE },
+ { SAPPHIRE_GPIO_SEARCH_ACT_N, KEY_COMPOSE }, /* CPLD Key Search */
+};
+
+static struct gpio_event_input_info sapphire_nav_button_info = {
+ .info.func = gpio_event_input_func,
+ .flags = GPIOEDF_PRINT_KEYS | GPIOEDF_PRINT_KEY_DEBOUNCE,
+ .poll_time.tv.nsec = 40 * NSEC_PER_MSEC,
+ .type = EV_KEY,
+ .keymap = sapphire_nav_buttons,
+ .keymap_size = ARRAY_SIZE(sapphire_nav_buttons)
+};
+
+static struct gpio_event_info *sapphire_nav_info[] = {
+ &sapphire_x_axis.info.info,
+ &sapphire_y_axis.info.info,
+ &sapphire_nav_button_info.info
+};
+
+static struct gpio_event_platform_data sapphire_nav_data = {
+ .name = "sapphire-nav",
+ .info = sapphire_nav_info,
+ .info_count = ARRAY_SIZE(sapphire_nav_info),
+ .power = sapphire_nav_power,
+};
+
+static struct platform_device sapphire_nav_device = {
+ .name = GPIO_EVENT_DEV_NAME,
+ .id = 2,
+ .dev = {
+ .platform_data = &sapphire_nav_data,
+ },
+};
+
+static int sapphire_reset_keys_up[] = {
+ BTN_MOUSE,
+ 0
+};
+
+static struct keyreset_platform_data sapphire_reset_keys_pdata = {
+ .keys_up = sapphire_reset_keys_up,
+ .keys_down = {
+ KEY_SEND,
+ KEY_MENU,
+ KEY_END,
+ 0
+ },
+};
+
+struct platform_device sapphire_reset_keys_device = {
+ .name = KEYRESET_NAME,
+ .dev.platform_data = &sapphire_reset_keys_pdata,
+};
+
+static int sapphire_ts_power(int on)
+{
+ int gpio_tp_ls_en = SAPPHIRE_TP_LS_EN;
+
+ if (is_12pin_camera())
+ gpio_tp_ls_en = SAPPHIRE20_TP_LS_EN;
+
+ if (on) {
+ sapphire_gpio_write(NULL, SAPPHIRE_GPIO_TP_EN, 1);
+ /* touchscreen must be powered before we enable i2c pullup */
+ msleep(2);
+ /* enable touch panel level shift */
+ gpio_direction_output(gpio_tp_ls_en, 1);
+ msleep(2);
+ } else {
+ gpio_direction_output(gpio_tp_ls_en, 0);
+ udelay(50);
+ sapphire_gpio_write(NULL, SAPPHIRE_GPIO_TP_EN, 0);
+ }
+
+ return 0;
+}
+
+static struct synaptics_i2c_rmi_platform_data sapphire_ts_data[] = {
+{
+ .version = 0x0101,
+ .power = sapphire_ts_power,
+ .flags = SYNAPTICS_FLIP_Y | SYNAPTICS_SNAP_TO_INACTIVE_EDGE,
+ .inactive_left = -50 * 0x10000 / 4334,
+ .inactive_right = -50 * 0x10000 / 4334,
+ .inactive_top = -40 * 0x10000 / 6696,
+ .inactive_bottom = -40 * 0x10000 / 6696,
+ .snap_left_on = 50 * 0x10000 / 4334,
+ .snap_left_off = 60 * 0x10000 / 4334,
+ .snap_right_on = 50 * 0x10000 / 4334,
+ .snap_right_off = 60 * 0x10000 / 4334,
+ .snap_top_on = 100 * 0x10000 / 6696,
+ .snap_top_off = 110 * 0x10000 / 6696,
+ .snap_bottom_on = 100 * 0x10000 / 6696,
+ .snap_bottom_off = 110 * 0x10000 / 6696,
+ },
+ {
+ .flags = SYNAPTICS_FLIP_Y | SYNAPTICS_SNAP_TO_INACTIVE_EDGE,
+ .inactive_left = ((4674 - 4334) / 2 + 200) * 0x10000 / 4334,
+ .inactive_right = ((4674 - 4334) / 2 + 200) * 0x10000 / 4334,
+ .inactive_top = ((6946 - 6696) / 2) * 0x10000 / 6696,
+ .inactive_bottom = ((6946 - 6696) / 2) * 0x10000 / 6696,
+ }
+};
+
+static struct akm8976_platform_data compass_platform_data = {
+ .reset = SAPPHIRE_GPIO_COMPASS_RST_N,
+ .clk_on = SAPPHIRE_GPIO_COMPASS_32K_EN,
+ .intr = SAPPHIRE_GPIO_COMPASS_IRQ,
+};
+
+static struct elan_i2c_platform_data elan_i2c_data[] = {
+ {
+ .version = 0x104,
+ .abs_x_min = 0,
+ .abs_y_min = 0,
+ .intr_gpio = SAPPHIRE_GPIO_TP_ATT_N,
+ .power = sapphire_ts_power,
+ },
+ {
+ .version = 0x103,
+ .abs_x_min = 0,
+ .abs_x_max = 512 * 2,
+ .abs_y_min = 0,
+ .abs_y_max = 896 * 2,
+ .intr_gpio = SAPPHIRE_GPIO_TP_ATT_N,
+ .power = sapphire_ts_power,
+ },
+ {
+ .version = 0x102,
+ .abs_x_min = 0,
+ .abs_x_max = 384,
+ .abs_y_min = 0,
+ .abs_y_max = 576,
+ .intr_gpio = SAPPHIRE_GPIO_TP_ATT_N,
+ .power = sapphire_ts_power,
+ },
+ {
+ .version = 0x101,
+ .abs_x_min = 32 + 1,
+ .abs_x_max = 352 - 1,
+ .abs_y_min = 32 + 1,
+ .abs_y_max = 544 - 1,
+ .intr_gpio = SAPPHIRE_GPIO_TP_ATT_N,
+ .power = sapphire_ts_power,
+ }
+};
+
+static struct msm_camera_device_platform_data msm_camera_device_mt9t013 = {
+ .sensor_reset = 108,
+ .sensor_pwd = 85,
+ .vcm_pwd = SAPPHIRE_GPIO_VCM_PWDN,
+ .config_gpio_on = config_sapphire_camera_on_gpios,
+ .config_gpio_off = config_sapphire_camera_off_gpios,
+};
+
+static struct platform_device sapphire_camera = {
+ .name = "camera",
+ .dev = {
+ .platform_data = &msm_camera_device_mt9t013,
+ },
+};
+
+static struct i2c_board_info i2c_devices[] = {
+ {
+ I2C_BOARD_INFO(SYNAPTICS_I2C_RMI_NAME, 0x20),
+ .platform_data = sapphire_ts_data,
+ .irq = SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_TP_ATT_N)
+ },
+ {
+ I2C_BOARD_INFO(ELAN_8232_I2C_NAME, 0x10),
+ .platform_data = &elan_i2c_data,
+ .irq = SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_TP_ATT_N),
+ },
+ {
+ I2C_BOARD_INFO("akm8976", 0x1C),
+ .platform_data = &compass_platform_data,
+ .irq = SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_COMPASS_IRQ),
+ },
+ {
+ I2C_BOARD_INFO("mt9t013", 0x6C >> 1),
+ .platform_data = &msm_camera_device_mt9t013,
+ },
+};
+
+#ifdef CONFIG_LEDS_CPLD
+static struct resource cpldled_resources[] = {
+ {
+ .start = SAPPHIRE_CPLD_LED_BASE,
+ .end = SAPPHIRE_CPLD_LED_BASE + SAPPHIRE_CPLD_LED_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device android_CPLD_leds = {
+ .name = "leds-cpld",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(cpldled_resources),
+ .resource = cpldled_resources,
+};
+#endif
+
+static struct gpio_led android_led_list[] = {
+ {
+ .name = "button-backlight",
+ .gpio = SAPPHIRE_GPIO_APKEY_LED_EN,
+ },
+};
+
+static struct gpio_led_platform_data android_leds_data = {
+ .num_leds = ARRAY_SIZE(android_led_list),
+ .leds = android_led_list,
+};
+
+static struct platform_device android_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &android_leds_data,
+ },
+};
+
+#ifdef CONFIG_HTC_HEADSET
+/* RTS/CTS to GPO/GPI. */
+static uint32_t uart1_on_gpio_table[] = {
+ /* allenou, uart hs test, 2008/11/18 */
+ #ifdef CONFIG_SERIAL_MSM_HS
+ /* RTS */
+ PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_RTS, 2,
+ GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA),
+ /* CTS */
+ PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_CTS, 2,
+ GPIO_INPUT, GPIO_PULL_UP, GPIO_8MA),
+ #else
+ /* RTS */
+ PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_RTS, 1,
+ GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA),
+ /* CTS */
+ PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_CTS, 1,
+ GPIO_INPUT, GPIO_PULL_DOWN, GPIO_4MA),
+ #endif
+};
+
+/* RTS,CTS to BT. */
+static uint32_t uart1_off_gpio_table[] = {
+ /* RTS */
+ PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_RTS, 0,
+ GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA),
+ /* CTS */
+ PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART1_CTS, 0,
+ GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA),
+};
+
+/* Sapphire: Switch between UART3 and GPIO */
+static uint32_t uart3_on_gpio_table[] = {
+ /* RX */
+ PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART3_RX, 1,
+ GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA),
+ /* TX */
+ PCOM_GPIO_CFG(SAPPHIRE_GPIO_UART3_TX, 1,
+ GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA),
+};
+
+/* set TX,RX to GPI */
+static uint32_t uart3_off_gpi_table[] = {
+ /* RX, H2W DATA */
+ PCOM_GPIO_CFG(SAPPHIRE_GPIO_H2W_DATA, 0,
+ GPIO_INPUT, GPIO_NO_PULL, GPIO_2MA),
+ /* TX, H2W CLK */
+ PCOM_GPIO_CFG(SAPPHIRE_GPIO_H2W_CLK, 0,
+ GPIO_INPUT, GPIO_KEEPER, GPIO_2MA),
+};
+
+static int sapphire_h2w_path = H2W_GPIO;
+
+static void h2w_config_cpld(int route)
+{
+ switch (route) {
+ case H2W_UART1:
+ /* Make sure uart1 funtion pin opened. */
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart1_on_gpio_table+0, 0);
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart1_on_gpio_table+1, 0);
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 1);
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 0);
+ sapphire_h2w_path = H2W_UART1;
+ printk(KERN_INFO "H2W route = H2W-UART1, BT-X, UART3-X \n");
+ break;
+ case H2W_BT:
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 1);
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 1);
+ /* UART1 RTS/CTS to GPO/GPI. */
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart1_off_gpio_table+0, 0);
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart1_off_gpio_table+1, 0);
+ sapphire_h2w_path = H2W_BT;
+ printk(KERN_INFO "H2W route = H2W-BT, UART1-X, UART3-X \n");
+ break;
+ case H2W_UART3:
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart3_on_gpio_table+0, 0);
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart3_on_gpio_table+1, 0);
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 0);
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 1);
+ /* Make sure uart1 funtion pin opened. */
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart1_on_gpio_table+0, 0);
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart1_on_gpio_table+1, 0);
+ sapphire_h2w_path = H2W_UART3;
+ printk(KERN_INFO "H2W route = H2W-UART3, BT-UART1 \n");
+ break;
+ case H2W_GPIO: /*H2W_UART3 TX,RX are changed to H2W_GPIO */
+ default:
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 0);
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 0);
+ /* Set the CPLD connected H2W GPIO's to input */
+ gpio_set_value(SAPPHIRE_GPIO_H2W_CLK_DIR, 0);
+ gpio_set_value(SAPPHIRE_GPIO_H2W_DAT_DIR, 0);
+ /* TX,RX GPI first. */
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart3_off_gpi_table+0, 0);
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart3_off_gpi_table+1, 0);
+ /* Make sure uart1 funtion pin opened. */
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart1_on_gpio_table+0, 0);
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
+ uart1_on_gpio_table+1, 0);
+ sapphire_h2w_path = H2W_GPIO;
+ printk(KERN_INFO "H2W route = H2W-GPIO, BT-UART1 \n");
+ break;
+ }
+}
+
+static void h2w_init_cpld(void)
+{
+ h2w_config_cpld(H2W_GPIO);
+}
+
+static void set_h2w_dat(int n)
+{
+ gpio_set_value(SAPPHIRE_GPIO_H2W_DATA, n);
+}
+
+static void set_h2w_clk(int n)
+{
+ gpio_set_value(SAPPHIRE_GPIO_H2W_CLK, n);
+}
+
+static void set_h2w_dat_dir(int n)
+{
+ if (n == 0) /* input */
+ gpio_direction_input(SAPPHIRE_GPIO_H2W_DATA);
+ else
+ gpio_configure(SAPPHIRE_GPIO_H2W_DATA, GPIOF_DRIVE_OUTPUT);
+
+ gpio_set_value(SAPPHIRE_GPIO_H2W_DAT_DIR, n);
+
+}
+
+static void set_h2w_clk_dir(int n)
+{
+ if (n == 0) /* input */
+ gpio_direction_input(SAPPHIRE_GPIO_H2W_CLK);
+ else
+ gpio_configure(SAPPHIRE_GPIO_H2W_CLK, GPIOF_DRIVE_OUTPUT);
+
+ gpio_set_value(SAPPHIRE_GPIO_H2W_CLK_DIR, n);
+}
+
+static int get_h2w_dat(void)
+{
+ return gpio_get_value(SAPPHIRE_GPIO_H2W_DATA);
+}
+
+static int get_h2w_clk(void)
+{
+ return gpio_get_value(SAPPHIRE_GPIO_H2W_CLK);
+}
+
+static int set_h2w_path(const char *val, struct kernel_param *kp)
+{
+ int ret = -EINVAL;
+
+ ret = param_set_int(val, kp);
+ if (ret)
+ return ret;
+
+ switch (sapphire_h2w_path) {
+ case H2W_GPIO:
+ case H2W_UART1:
+ case H2W_UART3:
+ case H2W_BT:
+ break;
+ default:
+ sapphire_h2w_path = -1;
+ return -EINVAL;
+ }
+
+ h2w_config_cpld(sapphire_h2w_path);
+ return ret;
+}
+module_param_call(h2w_path, set_h2w_path, param_get_int,
+ &sapphire_h2w_path, S_IWUSR | S_IRUGO);
+
+
+static struct h2w_platform_data sapphire_h2w_data = {
+ .power_name = "wlan",
+ .cable_in1 = SAPPHIRE_GPIO_CABLE_IN1,
+ .cable_in2 = SAPPHIRE_GPIO_CABLE_IN2,
+ .h2w_clk = SAPPHIRE_GPIO_H2W_CLK,
+ .h2w_data = SAPPHIRE_GPIO_H2W_DATA,
+ .debug_uart = H2W_UART3,
+ .config_cpld = h2w_config_cpld,
+ .init_cpld = h2w_init_cpld,
+ .set_dat = set_h2w_dat,
+ .set_clk = set_h2w_clk,
+ .set_dat_dir = set_h2w_dat_dir,
+ .set_clk_dir = set_h2w_clk_dir,
+ .get_dat = get_h2w_dat,
+ .get_clk = get_h2w_clk,
+};
+
+static struct platform_device sapphire_h2w = {
+ .name = "h2w",
+ .id = -1,
+ .dev = {
+ .platform_data = &sapphire_h2w_data,
+ },
+};
+#endif
+
+static void sapphire_phy_reset(void)
+{
+ gpio_set_value(SAPPHIRE_GPIO_USB_PHY_RST_N, 0);
+ mdelay(10);
+ gpio_set_value(SAPPHIRE_GPIO_USB_PHY_RST_N, 1);
+ mdelay(10);
+}
+
+static struct pwr_sink sapphire_pwrsink_table[] = {
+ {
+ .id = PWRSINK_AUDIO,
+ .ua_max = 100000,
+ },
+ {
+ .id = PWRSINK_BACKLIGHT,
+ .ua_max = 125000,
+ },
+ {
+ .id = PWRSINK_LED_BUTTON,
+ .ua_max = 0,
+ },
+ {
+ .id = PWRSINK_LED_KEYBOARD,
+ .ua_max = 0,
+ },
+ {
+ .id = PWRSINK_GP_CLK,
+ .ua_max = 0,
+ },
+ {
+ .id = PWRSINK_BLUETOOTH,
+ .ua_max = 15000,
+ },
+ {
+ .id = PWRSINK_CAMERA,
+ .ua_max = 0,
+ },
+ {
+ .id = PWRSINK_SDCARD,
+ .ua_max = 0,
+ },
+ {
+ .id = PWRSINK_VIDEO,
+ .ua_max = 0,
+ },
+ {
+ .id = PWRSINK_WIFI,
+ .ua_max = 200000,
+ },
+ {
+ .id = PWRSINK_SYSTEM_LOAD,
+ .ua_max = 100000,
+ .percent_util = 38,
+ },
+};
+
+static struct pwr_sink_platform_data sapphire_pwrsink_data = {
+ .num_sinks = ARRAY_SIZE(sapphire_pwrsink_table),
+ .sinks = sapphire_pwrsink_table,
+ .suspend_late = NULL,
+ .resume_early = NULL,
+ .suspend_early = NULL,
+ .resume_late = NULL,
+};
+
+static struct platform_device sapphire_pwr_sink = {
+ .name = "htc_pwrsink",
+ .id = -1,
+ .dev = {
+ .platform_data = &sapphire_pwrsink_data,
+ },
+};
+
+static struct platform_device sapphire_rfkill = {
+ .name = "sapphire_rfkill",
+ .id = -1,
+};
+
+static struct msm_pmem_setting pmem_setting_32 = {
+ .pmem_start = SMI32_MSM_PMEM_MDP_BASE,
+ .pmem_size = SMI32_MSM_PMEM_MDP_SIZE,
+ .pmem_adsp_start = SMI32_MSM_PMEM_ADSP_BASE,
+ .pmem_adsp_size = SMI32_MSM_PMEM_ADSP_SIZE,
+ .pmem_gpu0_start = MSM_PMEM_GPU0_BASE,
+ .pmem_gpu0_size = MSM_PMEM_GPU0_SIZE,
+ .pmem_gpu1_start = MSM_PMEM_GPU1_BASE,
+ .pmem_gpu1_size = MSM_PMEM_GPU1_SIZE,
+ .pmem_camera_start = 0,
+ .pmem_camera_size = 0,
+ .ram_console_start = MSM_RAM_CONSOLE_BASE,
+ .ram_console_size = MSM_RAM_CONSOLE_SIZE,
+};
+
+static struct msm_pmem_setting pmem_setting_64 = {
+ .pmem_start = SMI64_MSM_PMEM_MDP_BASE,
+ .pmem_size = SMI64_MSM_PMEM_MDP_SIZE,
+ .pmem_adsp_start = SMI64_MSM_PMEM_ADSP_BASE,
+ .pmem_adsp_size = SMI64_MSM_PMEM_ADSP_SIZE,
+ .pmem_gpu0_start = MSM_PMEM_GPU0_BASE,
+ .pmem_gpu0_size = MSM_PMEM_GPU0_SIZE,
+ .pmem_gpu1_start = MSM_PMEM_GPU1_BASE,
+ .pmem_gpu1_size = MSM_PMEM_GPU1_SIZE,
+ .pmem_camera_start = SMI64_MSM_PMEM_CAMERA_BASE,
+ .pmem_camera_size = SMI64_MSM_PMEM_CAMERA_SIZE,
+ .ram_console_start = MSM_RAM_CONSOLE_BASE,
+ .ram_console_size = MSM_RAM_CONSOLE_SIZE,
+};
+
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+static struct platform_device sapphire_wifi = {
+ .name = "msm_wifi",
+ .id = 1,
+ .num_resources = 0,
+ .resource = NULL,
+ .dev = {
+ .platform_data = &sapphire_wifi_control,
+ },
+};
+#endif
+
+#define SND(num, desc) { .name = desc, .id = num }
+static struct snd_endpoint snd_endpoints_list[] = {
+ SND(0, "HANDSET"),
+ SND(1, "SPEAKER"),
+ SND(2, "HEADSET"),
+ SND(3, "BT"),
+ SND(44, "BT_EC_OFF"),
+ SND(10, "HEADSET_AND_SPEAKER"),
+ SND(256, "CURRENT"),
+
+ /* Bluetooth accessories. */
+
+ SND(12, "HTC BH S100"),
+ SND(13, "HTC BH M100"),
+ SND(14, "Motorola H500"),
+ SND(15, "Nokia HS-36W"),
+ SND(16, "PLT 510v.D"),
+ SND(17, "M2500 by Plantronics"),
+ SND(18, "Nokia HDW-3"),
+ SND(19, "HBH-608"),
+ SND(20, "HBH-DS970"),
+ SND(21, "i.Tech BlueBAND"),
+ SND(22, "Nokia BH-800"),
+ SND(23, "Motorola H700"),
+ SND(24, "HTC BH M200"),
+ SND(25, "Jabra JX10"),
+ SND(26, "320Plantronics"),
+ SND(27, "640Plantronics"),
+ SND(28, "Jabra BT500"),
+ SND(29, "Motorola HT820"),
+ SND(30, "HBH-IV840"),
+ SND(31, "6XXPlantronics"),
+ SND(32, "3XXPlantronics"),
+ SND(33, "HBH-PV710"),
+ SND(34, "Motorola H670"),
+ SND(35, "HBM-300"),
+ SND(36, "Nokia BH-208"),
+ SND(37, "Samsung WEP410"),
+ SND(38, "Jabra BT8010"),
+ SND(39, "Motorola S9"),
+ SND(40, "Jabra BT620s"),
+ SND(41, "Nokia BH-902"),
+ SND(42, "HBH-DS220"),
+ SND(43, "HBH-DS980"),
+};
+#undef SND
+
+static struct msm_snd_endpoints sapphire_snd_endpoints = {
+ .endpoints = snd_endpoints_list,
+ .num = ARRAY_SIZE(snd_endpoints_list),
+};
+
+static struct platform_device sapphire_snd = {
+ .name = "msm_snd",
+ .id = -1,
+ .dev = {
+ .platform_data = &sapphire_snd_endpoints,
+ },
+};
+
+static struct platform_device *devices[] __initdata = {
+ &msm_device_smd,
+ &msm_device_dmov,
+ &msm_device_nand,
+ &msm_device_i2c,
+ &msm_device_uart1,
+#if !defined(CONFIG_MSM_SERIAL_DEBUGGER) && !defined(CONFIG_TROUT_H2W)
+ &msm_device_uart3,
+#endif
+#ifdef CONFIG_SERIAL_MSM_HS
+ &msm_device_uart_dm1,
+#endif
+ &sapphire_nav_device,
+ &sapphire_reset_keys_device,
+ &android_leds,
+#ifdef CONFIG_LEDS_CPLD
+ &android_CPLD_leds,
+#endif
+#ifdef CONFIG_HTC_HEADSET
+ &sapphire_h2w,
+#endif
+ &sapphire_rfkill,
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+ &sapphire_wifi,
+#endif
+
+#ifdef CONFIG_HTC_PWRSINK
+ &sapphire_pwr_sink,
+#endif
+ &sapphire_snd,
+ &sapphire_camera,
+};
+
+extern struct sys_timer msm_timer;
+
+static void __init sapphire_init_irq(void)
+{
+ printk(KERN_DEBUG "sapphire_init_irq()\n");
+ msm_init_irq();
+}
+
+static uint cpld_iset;
+static uint cpld_charger_en;
+static uint cpld_usb_h2w_sw;
+static uint opt_disable_uart3;
+
+module_param_named(iset, cpld_iset, uint, 0);
+module_param_named(charger_en, cpld_charger_en, uint, 0);
+module_param_named(usb_h2w_sw, cpld_usb_h2w_sw, uint, 0);
+module_param_named(disable_uart3, opt_disable_uart3, uint, 0);
+
+static void sapphire_reset(void)
+{
+ gpio_set_value(SAPPHIRE_GPIO_PS_HOLD, 0);
+}
+
+static uint32_t gpio_table[] = {
+ /* BLUETOOTH */
+#ifdef CONFIG_SERIAL_MSM_HS
+ PCOM_GPIO_CFG(43, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RTS */
+ PCOM_GPIO_CFG(44, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* CTS */
+ PCOM_GPIO_CFG(45, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RX */
+ PCOM_GPIO_CFG(46, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* TX */
+#else
+ PCOM_GPIO_CFG(43, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RTS */
+ PCOM_GPIO_CFG(44, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* CTS */
+ PCOM_GPIO_CFG(45, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RX */
+ PCOM_GPIO_CFG(46, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* TX */
+#endif
+};
+
+
+static uint32_t camera_off_gpio_table[] = {
+ /* CAMERA */
+ PCOM_GPIO_CFG(2, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */
+ PCOM_GPIO_CFG(3, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */
+ PCOM_GPIO_CFG(4, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT4 */
+ PCOM_GPIO_CFG(5, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT5 */
+ PCOM_GPIO_CFG(6, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT6 */
+ PCOM_GPIO_CFG(7, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT7 */
+ PCOM_GPIO_CFG(8, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT8 */
+ PCOM_GPIO_CFG(9, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT9 */
+ PCOM_GPIO_CFG(10, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT10 */
+ PCOM_GPIO_CFG(11, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT11 */
+ PCOM_GPIO_CFG(12, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* PCLK */
+ PCOM_GPIO_CFG(13, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* HSYNC_IN */
+ PCOM_GPIO_CFG(14, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* VSYNC_IN */
+ PCOM_GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* MCLK */
+};
+
+static uint32_t camera_on_gpio_table[] = {
+ /* CAMERA */
+ PCOM_GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT2 */
+ PCOM_GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT3 */
+ PCOM_GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT4 */
+ PCOM_GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT5 */
+ PCOM_GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT6 */
+ PCOM_GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT7 */
+ PCOM_GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT8 */
+ PCOM_GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT9 */
+ PCOM_GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT10 */
+ PCOM_GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT11 */
+ PCOM_GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_16MA), /* PCLK */
+ PCOM_GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HSYNC_IN */
+ PCOM_GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* VSYNC_IN */
+ PCOM_GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_16MA), /* MCLK */
+};
+
+static uint32_t camera_off_gpio_12pins_table[] = {
+ /* CAMERA */
+ PCOM_GPIO_CFG(0, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */
+ PCOM_GPIO_CFG(1, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */
+ PCOM_GPIO_CFG(2, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */
+ PCOM_GPIO_CFG(3, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */
+ PCOM_GPIO_CFG(4, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT4 */
+ PCOM_GPIO_CFG(5, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT5 */
+ PCOM_GPIO_CFG(6, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT6 */
+ PCOM_GPIO_CFG(7, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT7 */
+ PCOM_GPIO_CFG(8, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT8 */
+ PCOM_GPIO_CFG(9, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT9 */
+ PCOM_GPIO_CFG(10, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT10 */
+ PCOM_GPIO_CFG(11, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT11 */
+ PCOM_GPIO_CFG(12, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* PCLK */
+ PCOM_GPIO_CFG(13, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* HSYNC_IN */
+ PCOM_GPIO_CFG(14, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* VSYNC_IN */
+ PCOM_GPIO_CFG(15, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* MCLK */
+};
+
+static uint32_t camera_on_gpio_12pins_table[] = {
+ /* CAMERA */
+ PCOM_GPIO_CFG(0, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT0 */
+ PCOM_GPIO_CFG(1, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT1 */
+ PCOM_GPIO_CFG(2, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT2 */
+ PCOM_GPIO_CFG(3, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT3 */
+ PCOM_GPIO_CFG(4, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT4 */
+ PCOM_GPIO_CFG(5, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT5 */
+ PCOM_GPIO_CFG(6, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT6 */
+ PCOM_GPIO_CFG(7, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT7 */
+ PCOM_GPIO_CFG(8, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT8 */
+ PCOM_GPIO_CFG(9, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT9 */
+ PCOM_GPIO_CFG(10, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT10 */
+ PCOM_GPIO_CFG(11, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* DAT11 */
+ PCOM_GPIO_CFG(12, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_16MA), /* PCLK */
+ PCOM_GPIO_CFG(13, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* HSYNC_IN */
+ PCOM_GPIO_CFG(14, 1, GPIO_INPUT, GPIO_PULL_DOWN, GPIO_2MA), /* VSYNC_IN */
+ PCOM_GPIO_CFG(15, 1, GPIO_OUTPUT, GPIO_PULL_DOWN, GPIO_16MA), /* MCLK */
+};
+
+static void config_gpio_table(uint32_t *table, int len)
+{
+ int n;
+ unsigned id;
+ for (n = 0; n < len; n++) {
+ id = table[n];
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0);
+ }
+}
+
+void config_sapphire_camera_on_gpios(void)
+{
+ /*Add for judage it's 10 pins or 12 pins platform ----->*/
+ if (is_12pin_camera()) {
+ config_gpio_table(camera_on_gpio_12pins_table,
+ ARRAY_SIZE(camera_on_gpio_12pins_table));
+ } else {
+ config_gpio_table(camera_on_gpio_table,
+ ARRAY_SIZE(camera_on_gpio_table));
+ }
+ /*End Of Add for judage it's 10 pins or 12 pins platform*/
+}
+
+void config_sapphire_camera_off_gpios(void)
+{
+ /*Add for judage it's 10 pins or 12 pins platform ----->*/
+ if (is_12pin_camera()) {
+ config_gpio_table(camera_off_gpio_12pins_table,
+ ARRAY_SIZE(camera_off_gpio_12pins_table));
+ } else {
+ config_gpio_table(camera_off_gpio_table,
+ ARRAY_SIZE(camera_off_gpio_table));
+ }
+ /*End Of Add for judage it's 10 pins or 12 pins platform*/
+}
+
+static void __init config_gpios(void)
+{
+ config_gpio_table(gpio_table, ARRAY_SIZE(gpio_table));
+ config_sapphire_camera_off_gpios();
+}
+
+void msm_serial_debug_init(unsigned int base, int irq,
+ struct device *clk_device, int signal_irq);
+
+static struct msm_acpu_clock_platform_data sapphire_clock_data = {
+ .acpu_switch_time_us = 20,
+ .max_speed_delta_khz = 256000,
+ .vdd_switch_time_us = 62,
+ .power_collapse_khz = 19200000,
+ .wait_for_irq_khz = 128000000,
+};
+
+#ifdef CONFIG_SERIAL_MSM_HS
+static struct msm_serial_hs_platform_data msm_uart_dm1_pdata = {
+ .wakeup_irq = MSM_GPIO_TO_INT(45),
+ .inject_rx_on_wakeup = 1,
+ .rx_to_inject = 0x32,
+};
+#endif
+
+static struct msm_pm_platform_data msm_pm_data[MSM_PM_SLEEP_MODE_NR] = {
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].latency = 16000,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].latency = 12000,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency = 2000,
+};
+
+static void __init sapphire_init(void)
+{
+ int rc;
+ int i;
+ printk("sapphire_init() revision=%d\n", system_rev);
+
+ /*
+ * Setup common MSM GPIOS
+ */
+ config_gpios();
+
+ msm_hw_reset_hook = sapphire_reset;
+
+ msm_acpu_clock_init(&sapphire_clock_data);
+
+ /* adjust GPIOs based on bootloader request */
+ printk("sapphire_init: cpld_usb_hw2_sw = %d\n", cpld_usb_h2w_sw);
+ gpio_set_value(SAPPHIRE_GPIO_USB_H2W_SW, cpld_usb_h2w_sw);
+
+#if defined(CONFIG_MSM_SERIAL_DEBUGGER)
+ if (!opt_disable_uart3)
+ msm_serial_debug_init(MSM_UART3_PHYS, INT_UART3,
+ &msm_device_uart3.dev, 1);
+#endif
+
+ /* gpio_configure(108, IRQF_TRIGGER_LOW); */
+
+ /* H2W pins <-> UART3, Bluetooth <-> UART1 */
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL0, 0);
+ gpio_set_value(SAPPHIRE_GPIO_H2W_SEL1, 1);
+ /* put the AF VCM in powerdown mode to avoid noise */
+ if (sapphire_is_5M_camera())
+ sapphire_gpio_write(NULL, SAPPHIRE_GPIO_VCM_PWDN, 0);
+ else
+ sapphire_gpio_write(NULL, SAPPHIRE_GPIO_VCM_PWDN, 1);
+ mdelay(100);
+
+#ifdef CONFIG_SERIAL_MSM_HS
+ msm_device_uart_dm1.dev.platform_data = &msm_uart_dm1_pdata;
+#endif
+ msm_add_usb_devices(sapphire_phy_reset);
+
+ if (32 == smi_sz)
+ msm_add_mem_devices(&pmem_setting_32);
+ else
+ msm_add_mem_devices(&pmem_setting_64);
+
+ rc = sapphire_init_mmc(system_rev);
+ if (rc)
+ printk(KERN_CRIT "%s: MMC init failure (%d)\n", __func__, rc);
+
+#ifdef CONFIG_WIFI_MEM_PREALLOC
+ rc = sapphire_init_wifi_mem();
+ if (rc) {
+ printk(KERN_CRIT "%s: WiFi memory init failure (%d)\n",
+ __func__, rc);
+ }
+#endif
+ msm_init_pmic_vibrator();
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+ i2c_register_board_info(0, i2c_devices, ARRAY_SIZE(i2c_devices));
+ msm_pm_set_platform_data(msm_pm_data);
+}
+
+static struct map_desc sapphire_io_desc[] __initdata = {
+ {
+ .virtual = SAPPHIRE_CPLD_BASE,
+ .pfn = __phys_to_pfn(SAPPHIRE_CPLD_START),
+ .length = SAPPHIRE_CPLD_SIZE,
+ .type = MT_DEVICE_NONSHARED
+ }
+};
+
+
+unsigned int sapphire_get_hwid(void)
+{
+ printk(KERN_DEBUG "sapphire_get_hwid=0x%x\r\n", hwid);
+
+ return hwid;
+}
+
+unsigned int sapphire_get_skuid(void)
+{
+ printk(KERN_DEBUG "sapphire_get_skuid=0x%x\r\n", skuid);
+
+ return skuid;
+}
+
+unsigned sapphire_engineerid(void)
+{
+ printk(KERN_DEBUG "sapphire_engineerid=0x%x\r\n", engineerid);
+
+ return engineerid;
+}
+
+int sapphire_is_5M_camera(void)
+{
+ int ret = 0;
+ if (sapphire_get_skuid() == 0x1FF00 && !(sapphire_engineerid() & 0x02))
+ ret = 1;
+ else if (sapphire_get_skuid() == 0x20100 && !(sapphire_engineerid() & 0x02))
+ ret = 1;
+ printk(KERN_DEBUG "sapphire_is_5M_camera=%d\n", ret);
+ return ret;
+}
+
+/* it can support 3M and 5M sensor */
+unsigned int is_12pin_camera(void)
+{
+ unsigned int ret = 0;
+
+ if (sapphire_get_skuid() == 0x1FF00 || sapphire_get_skuid() == 0x20100)
+ ret = 1;
+ else
+ ret = 0;
+ printk(KERN_DEBUG "is_12pin_camera=%d\r\n", ret);
+ return ret;
+}
+
+int sapphire_get_smi_size(void)
+{
+ printk(KERN_DEBUG "get_smi_size=%d\r\n", smi_sz);
+ return smi_sz;
+}
+
+static void __init sapphire_fixup(struct machine_desc *desc, struct tag *tags,
+ char **cmdline, struct meminfo *mi)
+{
+ smi_sz = parse_tag_smi((const struct tag *)tags);
+ printk("sapphire_fixup:smisize=%d\n", smi_sz);
+ hwid = parse_tag_hwid((const struct tag *)tags);
+ printk("sapphire_fixup:hwid=0x%x\n", hwid);
+ skuid = parse_tag_skuid((const struct tag *)tags);
+ printk("sapphire_fixup:skuid=0x%x\n", skuid);
+ engineerid = parse_tag_engineerid((const struct tag *)tags);
+ printk("sapphire_fixup:engineerid=0x%x\n", engineerid);
+
+ mi->nr_banks = 1;
+ mi->bank[0].start = PHYS_OFFSET;
+ mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
+ if (smi_sz == 32) {
+ mi->bank[0].size = (84*1024*1024);
+ } else if (smi_sz == 64) {
+ mi->bank[0].size = (101*1024*1024);
+ } else {
+ printk(KERN_ERR "can not get smi size\n");
+
+ /*Give a default value when not get smi size*/
+ smi_sz = 64;
+ mi->bank[0].size = (101*1024*1024);
+ printk(KERN_ERR "use default : smisize=%d\n", smi_sz);
+ }
+}
+
+static void __init sapphire_map_io(void)
+{
+ msm_map_common_io();
+ iotable_init(sapphire_io_desc, ARRAY_SIZE(sapphire_io_desc));
+ msm_clock_init();
+}
+
+MACHINE_START(SAPPHIRE, "sapphire")
+/* Maintainer: Brian Swetland <swetland@google.com> */
+#ifdef CONFIG_MSM_DEBUG_UART
+ .phys_io = MSM_DEBUG_UART_PHYS,
+ .io_pg_offst = ((MSM_DEBUG_UART_BASE) >> 18) & 0xfffc,
+#endif
+ .boot_params = 0x10000100,
+ .fixup = sapphire_fixup,
+ .map_io = sapphire_map_io,
+ .init_irq = sapphire_init_irq,
+ .init_machine = sapphire_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-sapphire.h b/arch/arm/mach-msm/board-sapphire.h
new file mode 100644
index 000000000000..3327dfeafde0
--- /dev/null
+++ b/arch/arm/mach-msm/board-sapphire.h
@@ -0,0 +1,219 @@
+/* linux/arch/arm/mach-msm/board-sapphire.h
+ * Copyright (C) 2007-2009 HTC Corporation.
+ * Author: Thomas Tsai <thomas_tsai@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+#ifndef __ARCH_ARM_MACH_MSM_BOARD_SAPPHIRE_H
+#define __ARCH_ARM_MACH_MSM_BOARD_SAPPHIRE_H
+
+#include <mach/board.h>
+
+#define MSM_SMI_BASE 0x00000000
+#define MSM_SMI_SIZE 0x00800000
+
+#define MSM_EBI_BASE 0x10000000
+#define MSM_EBI_SIZE 0x06e00000
+
+#define MSM_PMEM_GPU0_BASE 0x00000000
+#define MSM_PMEM_GPU0_SIZE 0x00700000
+
+#define SMI64_MSM_PMEM_MDP_BASE 0x02000000
+#define SMI64_MSM_PMEM_MDP_SIZE 0x00800000
+
+#define SMI64_MSM_PMEM_ADSP_BASE 0x02800000
+#define SMI64_MSM_PMEM_ADSP_SIZE 0x00800000
+
+#define SMI64_MSM_PMEM_CAMERA_BASE 0x03000000
+#define SMI64_MSM_PMEM_CAMERA_SIZE 0x01000000
+
+#define SMI64_MSM_FB_BASE 0x00700000
+#define SMI64_MSM_FB_SIZE 0x00100000
+
+#define SMI64_MSM_LINUX_BASE MSM_EBI_BASE
+#define SMI64_MSM_LINUX_SIZE 0x06500000
+
+
+#define SMI32_MSM_LINUX_BASE MSM_EBI_BASE
+#define SMI32_MSM_LINUX_SIZE 0x5400000
+
+#define SMI32_MSM_PMEM_MDP_BASE SMI32_MSM_LINUX_BASE + SMI32_MSM_LINUX_SIZE
+#define SMI32_MSM_PMEM_MDP_SIZE 0x800000
+
+#define SMI32_MSM_PMEM_ADSP_BASE SMI32_MSM_PMEM_MDP_BASE + SMI32_MSM_PMEM_MDP_SIZE
+#define SMI32_MSM_PMEM_ADSP_SIZE 0x800000
+
+#define SMI32_MSM_FB_BASE SMI32_MSM_PMEM_ADSP_BASE + SMI32_MSM_PMEM_ADSP_SIZE
+#define SMI32_MSM_FB_SIZE 0x9b000
+
+
+#define MSM_PMEM_GPU1_SIZE 0x800000
+#define MSM_PMEM_GPU1_BASE MSM_RAM_CONSOLE_BASE - MSM_PMEM_GPU1_SIZE
+
+#define MSM_RAM_CONSOLE_BASE MSM_EBI_BASE + 0x6d00000
+#define MSM_RAM_CONSOLE_SIZE 128 * SZ_1K
+
+#if (SMI32_MSM_FB_BASE + SMI32_MSM_FB_SIZE) >= (MSM_PMEM_GPU1_BASE)
+#error invalid memory map
+#endif
+
+#if (SMI64_MSM_FB_BASE + SMI64_MSM_FB_SIZE) >= (MSM_PMEM_GPU1_BASE)
+#error invalid memory map
+#endif
+
+#define DECLARE_MSM_IOMAP
+#include <mach/msm_iomap.h>
+
+/*
+** SOC GPIO
+*/
+#define SAPPHIRE_BALL_UP_0 94
+#define SAPPHIRE_BALL_LEFT_0 18
+#define SAPPHIRE_BALL_DOWN_0 49
+#define SAPPHIRE_BALL_RIGHT_0 19
+
+#define SAPPHIRE_POWER_KEY 20
+#define SAPPHIRE_VOLUME_UP 36
+#define SAPPHIRE_VOLUME_DOWN 39
+
+#define SAPPHIRE_GPIO_PS_HOLD (25)
+#define SAPPHIRE_MDDI_1V5_EN (28)
+#define SAPPHIRE_BL_PWM (27)
+#define SAPPHIRE_TP_LS_EN (1)
+#define SAPPHIRE20_TP_LS_EN (88)
+
+/* H2W */
+#define SAPPHIRE_GPIO_CABLE_IN1 (83)
+#define SAPPHIRE_GPIO_CABLE_IN2 (37)
+#define SAPPHIRE_GPIO_UART3_RX (86)
+#define SAPPHIRE_GPIO_UART3_TX (87)
+#define SAPPHIRE_GPIO_H2W_DATA (86)
+#define SAPPHIRE_GPIO_H2W_CLK (87)
+
+#define SAPPHIRE_GPIO_UART1_RTS (43)
+#define SAPPHIRE_GPIO_UART1_CTS (44)
+
+/*
+** CPLD GPIO
+**
+** Sapphire Altera CPLD can keep the registers value and
+** doesn't need a shadow to backup.
+**/
+#define SAPPHIRE_CPLD_BASE 0xE8100000 /* VA */
+#define SAPPHIRE_CPLD_START 0x98000000 /* PA */
+#define SAPPHIRE_CPLD_SIZE SZ_4K
+
+#define SAPPHIRE_GPIO_START (128) /* Pseudo GPIO number */
+
+/* Sapphire has one INT BANK only. */
+#define SAPPHIRE_GPIO_INT_B0_MASK_REG (0x0c) /*INT3 MASK*/
+#define SAPPHIRE_GPIO_INT_B0_STAT_REG (0x0e) /*INT1 STATUS*/
+
+/* LED control register */
+#define SAPPHIRE_CPLD_LED_BASE (SAPPHIRE_CPLD_BASE + 0x10) /* VA */
+#define SAPPHIRE_CPLD_LED_START (SAPPHIRE_CPLD_START + 0x10) /* PA */
+#define SAPPHIRE_CPLD_LED_SIZE 0x08
+
+/* MISCn: GPO pin to Enable/Disable some functions. */
+#define SAPPHIRE_GPIO_MISC1_BASE (SAPPHIRE_GPIO_START + 0x00)
+#define SAPPHIRE_GPIO_MISC2_BASE (SAPPHIRE_GPIO_START + 0x08)
+#define SAPPHIRE_GPIO_MISC3_BASE (SAPPHIRE_GPIO_START + 0x10)
+#define SAPPHIRE_GPIO_MISC4_BASE (SAPPHIRE_GPIO_START + 0x18)
+#define SAPPHIRE_GPIO_MISC5_BASE (SAPPHIRE_GPIO_START + 0x20)
+
+/* INT BANK0: INT1: int status, INT2: int level, INT3: int Mask */
+#define SAPPHIRE_GPIO_INT_B0_BASE (SAPPHIRE_GPIO_START + 0x28)
+
+/* MISCn GPIO: */
+#define SAPPHIRE_GPIO_CPLD128_VER_0 (SAPPHIRE_GPIO_MISC1_BASE + 4)
+#define SAPPHIRE_GPIO_CPLD128_VER_1 (SAPPHIRE_GPIO_MISC1_BASE + 5)
+#define SAPPHIRE_GPIO_CPLD128_VER_2 (SAPPHIRE_GPIO_MISC1_BASE + 6)
+#define SAPPHIRE_GPIO_CPLD128_VER_3 (SAPPHIRE_GPIO_MISC1_BASE + 7)
+
+#define SAPPHIRE_GPIO_H2W_DAT_DIR (SAPPHIRE_GPIO_MISC2_BASE + 2)
+#define SAPPHIRE_GPIO_H2W_CLK_DIR (SAPPHIRE_GPIO_MISC2_BASE + 3)
+#define SAPPHIRE_GPIO_H2W_SEL0 (SAPPHIRE_GPIO_MISC2_BASE + 6)
+#define SAPPHIRE_GPIO_H2W_SEL1 (SAPPHIRE_GPIO_MISC2_BASE + 7)
+
+#define SAPPHIRE_GPIO_I2C_PULL (SAPPHIRE_GPIO_MISC3_BASE + 2)
+#define SAPPHIRE_GPIO_TP_EN (SAPPHIRE_GPIO_MISC3_BASE + 4)
+#define SAPPHIRE_GPIO_JOG_EN (SAPPHIRE_GPIO_MISC3_BASE + 5)
+#define SAPPHIRE_GPIO_JOG_LED_EN (SAPPHIRE_GPIO_MISC3_BASE + 6)
+#define SAPPHIRE_GPIO_APKEY_LED_EN (SAPPHIRE_GPIO_MISC3_BASE + 7)
+
+#define SAPPHIRE_GPIO_VCM_PWDN (SAPPHIRE_GPIO_MISC4_BASE + 0)
+#define SAPPHIRE_GPIO_USB_H2W_SW (SAPPHIRE_GPIO_MISC4_BASE + 1)
+#define SAPPHIRE_GPIO_COMPASS_RST_N (SAPPHIRE_GPIO_MISC4_BASE + 2)
+#define SAPPHIRE_GPIO_USB_PHY_RST_N (SAPPHIRE_GPIO_MISC4_BASE + 5)
+#define SAPPHIRE_GPIO_WIFI_PA_RESETX (SAPPHIRE_GPIO_MISC4_BASE + 6)
+#define SAPPHIRE_GPIO_WIFI_EN (SAPPHIRE_GPIO_MISC4_BASE + 7)
+
+#define SAPPHIRE_GPIO_BT_32K_EN (SAPPHIRE_GPIO_MISC5_BASE + 0)
+#define SAPPHIRE_GPIO_MAC_32K_EN (SAPPHIRE_GPIO_MISC5_BASE + 1)
+#define SAPPHIRE_GPIO_MDDI_32K_EN (SAPPHIRE_GPIO_MISC5_BASE + 2)
+#define SAPPHIRE_GPIO_COMPASS_32K_EN (SAPPHIRE_GPIO_MISC5_BASE + 3)
+
+/* INT STATUS/LEVEL/MASK : INT GPIO should be the last. */
+#define SAPPHIRE_GPIO_NAVI_ACT_N (SAPPHIRE_GPIO_INT_B0_BASE + 0)
+#define SAPPHIRE_GPIO_COMPASS_IRQ (SAPPHIRE_GPIO_INT_B0_BASE + 1)
+#define SAPPHIRE_GPIO_SEARCH_ACT_N (SAPPHIRE_GPIO_INT_B0_BASE + 2)
+#define SAPPHIRE_GPIO_AUD_HSMIC_DET_N (SAPPHIRE_GPIO_INT_B0_BASE + 3)
+#define SAPPHIRE_GPIO_SDMC_CD_N (SAPPHIRE_GPIO_INT_B0_BASE + 4)
+#define SAPPHIRE_GPIO_CAM_BTN_STEP1_N (SAPPHIRE_GPIO_INT_B0_BASE + 5)
+#define SAPPHIRE_GPIO_CAM_BTN_STEP2_N (SAPPHIRE_GPIO_INT_B0_BASE + 6)
+#define SAPPHIRE_GPIO_TP_ATT_N (SAPPHIRE_GPIO_INT_B0_BASE + 7)
+
+#define SAPPHIRE_GPIO_END SAPPHIRE_GPIO_TP_ATT_N
+#define SAPPHIRE_GPIO_LAST_INT (SAPPHIRE_GPIO_TP_ATT_N)
+
+/* Bit position in the CPLD MISCn by the CPLD GPIOn: only bit0-7 is used. */
+#define CPLD_GPIO_BIT_POS_MASK(n) (1U << ((n) & 7))
+#define CPLD_GPIO_REG_OFFSET(n) _g_CPLD_MISCn_Offset[((n)-SAPPHIRE_GPIO_START) >> 3]
+#define CPLD_GPIO_REG(n) (CPLD_GPIO_REG_OFFSET(n) + SAPPHIRE_CPLD_BASE)
+
+/*
+** CPLD INT Start
+*/
+#define SAPPHIRE_INT_START (NR_MSM_IRQS + NR_GPIO_IRQS) /* pseudo number for CPLD INT */
+/* Using INT status/Bank0 for GPIO to INT */
+#define SAPPHIRE_GPIO_TO_INT(n) ((n-SAPPHIRE_GPIO_INT_B0_BASE) + SAPPHIRE_INT_START)
+#define SAPPHIRE_INT_END (SAPPHIRE_GPIO_TO_INT(SAPPHIRE_GPIO_END))
+
+/* get the INT reg by GPIO number */
+#define CPLD_INT_GPIO_TO_BANK(n) (((n)-SAPPHIRE_GPIO_INT_B0_BASE) >> 3)
+#define CPLD_INT_STATUS_REG_OFFSET_G(n) _g_INT_BANK_Offset[CPLD_INT_GPIO_TO_BANK(n)][0]
+#define CPLD_INT_LEVEL_REG_OFFSET_G(n) _g_INT_BANK_Offset[CPLD_INT_GPIO_TO_BANK(n)][1]
+#define CPLD_INT_MASK_REG_OFFSET_G(n) _g_INT_BANK_Offset[CPLD_INT_GPIO_TO_BANK(n)][2]
+#define CPLD_INT_STATUS_REG_G(n) (SAPPHIRE_CPLD_BASE + CPLD_INT_STATUS_REG_OFFSET_G(n))
+#define CPLD_INT_LEVEL_REG_G(n) (SAPPHIRE_CPLD_BASE + CPLD_INT_LEVEL_REG_OFFSET_G(n))
+#define CPLD_INT_MASK_REG_G(n) (SAPPHIRE_CPLD_BASE + CPLD_INT_MASK_REG_OFFSET_G(n))
+
+/* get the INT reg by INT number */
+#define CPLD_INT_TO_BANK(i) ((i-SAPPHIRE_INT_START) >> 3)
+#define CPLD_INT_STATUS_REG_OFFSET(i) _g_INT_BANK_Offset[CPLD_INT_TO_BANK(i)][0]
+#define CPLD_INT_LEVEL_REG_OFFSET(i) _g_INT_BANK_Offset[CPLD_INT_TO_BANK(i)][1]
+#define CPLD_INT_MASK_REG_OFFSET(i) _g_INT_BANK_Offset[CPLD_INT_TO_BANK(i)][2]
+#define CPLD_INT_STATUS_REG(i) (SAPPHIRE_CPLD_BASE + CPLD_INT_STATUS_REG_OFFSET(i))
+#define CPLD_INT_LEVEL_REG(i) (SAPPHIRE_CPLD_BASE + CPLD_INT_LEVEL_REG_OFFSET(i))
+#define CPLD_INT_MASK_REG(i) (SAPPHIRE_CPLD_BASE + CPLD_INT_MASK_REG_OFFSET(i) )
+
+/* return the bit mask by INT number */
+#define SAPPHIRE_INT_BIT_MASK(i) (1U << ((i - SAPPHIRE_INT_START) & 7))
+
+void config_sapphire_camera_on_gpios(void);
+void config_sapphire_camera_off_gpios(void);
+int sapphire_get_smi_size(void);
+unsigned int sapphire_get_hwid(void);
+unsigned int sapphire_get_skuid(void);
+unsigned int is_12pin_camera(void);
+int sapphire_is_5M_camera(void);
+int sapphire_gpio_write(struct gpio_chip *chip, unsigned n, unsigned on);
+
+#endif /* GUARD */
diff --git a/arch/arm/mach-msm/board-trout-gpio.c b/arch/arm/mach-msm/board-trout-gpio.c
new file mode 100644
index 000000000000..527379ec3597
--- /dev/null
+++ b/arch/arm/mach-msm/board-trout-gpio.c
@@ -0,0 +1,305 @@
+/* arch/arm/mach-msm/board-trout-gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/irq.h>
+#include <linux/pm.h>
+#include <linux/sysdev.h>
+
+#include <asm/io.h>
+#include <asm/gpio.h>
+#include <asm/mach-types.h>
+
+#include <mach/htc_pwrsink.h>
+
+#include "board-trout.h"
+#include "gpio_chip.h"
+
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "board_trout."
+
+static uint cpld_usb_h2w_sw;
+module_param_named(usb_h2w_sw, cpld_usb_h2w_sw, uint, 0);
+
+static uint8_t trout_cpld_shadow[4] = {
+#if defined(CONFIG_MSM_DEBUG_UART1)
+ /* H2W pins <-> UART1 */
+ [0] = 0x40, // for serial debug, low current
+#else
+ /* H2W pins <-> UART3, Bluetooth <-> UART1 */
+ [0] = 0x80, // for serial debug, low current
+#endif
+ [1] = 0x04, // I2C_PULL
+ [3] = 0x04, // mmdi 32k en
+};
+static uint8_t trout_int_mask[2] = {
+ [0] = 0xff, /* mask all interrupts */
+ [1] = 0xff,
+};
+static uint8_t trout_sleep_int_mask[] = {
+ [0] = 0xff,
+ [1] = 0xff,
+};
+static int trout_suspended;
+
+static int trout_gpio_read(struct gpio_chip *chip, unsigned n)
+{
+ uint8_t b;
+ int reg;
+ if (n >= TROUT_GPIO_VIRTUAL_BASE)
+ n += TROUT_GPIO_VIRTUAL_TO_REAL_OFFSET;
+ b = 1U << (n & 7);
+ reg = (n & 0x78) >> 2; // assumes base is 128
+ return !!(readb(TROUT_CPLD_BASE + reg) & b);
+}
+
+static void update_pwrsink(unsigned gpio, unsigned on)
+{
+ switch(gpio) {
+ case TROUT_GPIO_UI_LED_EN:
+ htc_pwrsink_set(PWRSINK_LED_BUTTON, on ? 100 : 0);
+ break;
+ case TROUT_GPIO_QTKEY_LED_EN:
+ htc_pwrsink_set(PWRSINK_LED_KEYBOARD, on ? 100 : 0);
+ break;
+ }
+}
+
+static uint8_t trout_gpio_write_shadow(unsigned n, unsigned on)
+{
+ uint8_t b = 1U << (n & 7);
+ int reg = (n & 0x78) >> 2; // assumes base is 128
+
+ if(on)
+ return trout_cpld_shadow[reg >> 1] |= b;
+ else
+ return trout_cpld_shadow[reg >> 1] &= ~b;
+}
+
+static int trout_gpio_write(struct gpio_chip *chip, unsigned n, unsigned on)
+{
+ int reg = (n & 0x78) >> 2; // assumes base is 128
+ unsigned long flags;
+ uint8_t reg_val;
+
+ if ((reg >> 1) >= ARRAY_SIZE(trout_cpld_shadow)) {
+ printk(KERN_ERR "trout_gpio_write called on input %d\n", n);
+ return -ENOTSUPP;
+ }
+
+ local_irq_save(flags);
+ update_pwrsink(n, on);
+ reg_val = trout_gpio_write_shadow(n, on);
+ writeb(reg_val, TROUT_CPLD_BASE + reg);
+ local_irq_restore(flags);
+ return 0;
+}
+
+static int trout_gpio_configure(struct gpio_chip *chip, unsigned int gpio, unsigned long flags)
+{
+ if(flags & (GPIOF_OUTPUT_LOW | GPIOF_OUTPUT_HIGH))
+ trout_gpio_write(chip, gpio, flags & GPIOF_OUTPUT_HIGH);
+ return 0;
+}
+
+static int trout_gpio_get_irq_num(struct gpio_chip *chip, unsigned int gpio, unsigned int *irqp, unsigned long *irqnumflagsp)
+{
+ if ((gpio < TROUT_GPIO_BANK0_FIRST_INT_SOURCE ||
+ gpio > TROUT_GPIO_BANK0_LAST_INT_SOURCE) &&
+ (gpio < TROUT_GPIO_BANK1_FIRST_INT_SOURCE ||
+ gpio > TROUT_GPIO_BANK1_LAST_INT_SOURCE))
+ return -ENOENT;
+ *irqp = TROUT_GPIO_TO_INT(gpio);
+ if(irqnumflagsp)
+ *irqnumflagsp = 0;
+ return 0;
+}
+
+static void trout_gpio_irq_ack(unsigned int irq)
+{
+ int bank = TROUT_INT_TO_BANK(irq);
+ uint8_t mask = TROUT_INT_TO_MASK(irq);
+ int reg = TROUT_BANK_TO_STAT_REG(bank);
+ /*printk(KERN_INFO "trout_gpio_irq_ack irq %d\n", irq);*/
+ writeb(mask, TROUT_CPLD_BASE + reg);
+}
+
+static void trout_gpio_irq_mask(unsigned int irq)
+{
+ unsigned long flags;
+ uint8_t reg_val;
+ int bank = TROUT_INT_TO_BANK(irq);
+ uint8_t mask = TROUT_INT_TO_MASK(irq);
+ int reg = TROUT_BANK_TO_MASK_REG(bank);
+
+ local_irq_save(flags);
+ reg_val = trout_int_mask[bank] |= mask;
+ /*printk(KERN_INFO "trout_gpio_irq_mask irq %d => %d:%02x\n",
+ irq, bank, reg_val);*/
+ if (!trout_suspended)
+ writeb(reg_val, TROUT_CPLD_BASE + reg);
+ local_irq_restore(flags);
+}
+
+static void trout_gpio_irq_unmask(unsigned int irq)
+{
+ unsigned long flags;
+ uint8_t reg_val;
+ int bank = TROUT_INT_TO_BANK(irq);
+ uint8_t mask = TROUT_INT_TO_MASK(irq);
+ int reg = TROUT_BANK_TO_MASK_REG(bank);
+
+ local_irq_save(flags);
+ reg_val = trout_int_mask[bank] &= ~mask;
+ /*printk(KERN_INFO "trout_gpio_irq_unmask irq %d => %d:%02x\n",
+ irq, bank, reg_val);*/
+ if (!trout_suspended)
+ writeb(reg_val, TROUT_CPLD_BASE + reg);
+ local_irq_restore(flags);
+}
+
+int trout_gpio_irq_set_wake(unsigned int irq, unsigned int on)
+{
+ unsigned long flags;
+ int bank = TROUT_INT_TO_BANK(irq);
+ uint8_t mask = TROUT_INT_TO_MASK(irq);
+
+ local_irq_save(flags);
+ if(on)
+ trout_sleep_int_mask[bank] &= ~mask;
+ else
+ trout_sleep_int_mask[bank] |= mask;
+ local_irq_restore(flags);
+ return 0;
+}
+
+static void trout_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ int j, m;
+ unsigned v;
+ int bank;
+ int stat_reg;
+ int int_base = TROUT_INT_START;
+ uint8_t int_mask;
+
+ for (bank = 0; bank < 2; bank++) {
+ stat_reg = TROUT_BANK_TO_STAT_REG(bank);
+ v = readb(TROUT_CPLD_BASE + stat_reg);
+ int_mask = trout_int_mask[bank];
+ if (v & int_mask) {
+ writeb(v & int_mask, TROUT_CPLD_BASE + stat_reg);
+ printk(KERN_ERR "trout_gpio_irq_handler: got masked "
+ "interrupt: %d:%02x\n", bank, v & int_mask);
+ }
+ v &= ~int_mask;
+ while (v) {
+ m = v & -v;
+ j = fls(m) - 1;
+ /*printk(KERN_INFO "msm_gpio_irq_handler %d:%02x %02x b"
+ "it %d irq %d\n", bank, v, m, j, int_base + j);*/
+ v &= ~m;
+ generic_handle_irq(int_base + j);
+ }
+ int_base += TROUT_INT_BANK0_COUNT;
+ }
+ desc->chip->ack(irq);
+}
+
+static int trout_sysdev_suspend(struct sys_device *dev, pm_message_t state)
+{
+ trout_suspended = 1;
+ writeb(trout_sleep_int_mask[0],
+ TROUT_CPLD_BASE + TROUT_GPIO_INT_MASK0_REG);
+ writeb(trout_sleep_int_mask[1],
+ TROUT_CPLD_BASE + TROUT_GPIO_INT_MASK1_REG);
+ writeb(trout_sleep_int_mask[0],
+ TROUT_CPLD_BASE + TROUT_GPIO_INT_STAT0_REG);
+ writeb(trout_sleep_int_mask[1],
+ TROUT_CPLD_BASE + TROUT_GPIO_INT_STAT1_REG);
+ return 0;
+}
+
+int trout_sysdev_resume(struct sys_device *dev)
+{
+ writeb(trout_int_mask[0], TROUT_CPLD_BASE + TROUT_GPIO_INT_MASK0_REG);
+ writeb(trout_int_mask[1], TROUT_CPLD_BASE + TROUT_GPIO_INT_MASK1_REG);
+ trout_suspended = 0;
+ return 0;
+}
+
+static struct irq_chip trout_gpio_irq_chip = {
+ .name = "troutgpio",
+ .ack = trout_gpio_irq_ack,
+ .mask = trout_gpio_irq_mask,
+ .unmask = trout_gpio_irq_unmask,
+ .set_wake = trout_gpio_irq_set_wake,
+ //.set_type = trout_gpio_irq_set_type,
+};
+
+static struct gpio_chip trout_gpio_chip = {
+ .start = TROUT_GPIO_START,
+ .end = TROUT_GPIO_END,
+ .configure = trout_gpio_configure,
+ .get_irq_num = trout_gpio_get_irq_num,
+ .read = trout_gpio_read,
+ .write = trout_gpio_write,
+// .read_detect_status = trout_gpio_read_detect_status,
+// .clear_detect_status = trout_gpio_clear_detect_status
+};
+
+struct sysdev_class trout_sysdev_class = {
+ .name = "troutgpio_irq",
+ .suspend = trout_sysdev_suspend,
+ .resume = trout_sysdev_resume,
+};
+
+static struct sys_device trout_irq_device = {
+ .cls = &trout_sysdev_class,
+};
+
+static int __init trout_init_gpio(void)
+{
+ int i;
+
+ if (!machine_is_trout())
+ return 0;
+
+ /* adjust GPIOs based on bootloader request */
+ pr_info("trout_init_gpio: cpld_usb_hw2_sw = %d\n", cpld_usb_h2w_sw);
+ trout_gpio_write_shadow(TROUT_GPIO_USB_H2W_SW, cpld_usb_h2w_sw);
+
+ for(i = 0; i < ARRAY_SIZE(trout_cpld_shadow); i++)
+ writeb(trout_cpld_shadow[i], TROUT_CPLD_BASE + i * 2);
+
+ for(i = TROUT_INT_START; i <= TROUT_INT_END; i++) {
+ set_irq_chip(i, &trout_gpio_irq_chip);
+ set_irq_handler(i, handle_edge_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+
+ register_gpio_chip(&trout_gpio_chip);
+
+ set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH);
+ set_irq_chained_handler(MSM_GPIO_TO_INT(17), trout_gpio_irq_handler);
+ set_irq_wake(MSM_GPIO_TO_INT(17), 1);
+
+ if(sysdev_class_register(&trout_sysdev_class) == 0)
+ sysdev_register(&trout_irq_device);
+
+ return 0;
+}
+
+postcore_initcall(trout_init_gpio);
diff --git a/arch/arm/mach-msm/board-trout-keypad.c b/arch/arm/mach-msm/board-trout-keypad.c
new file mode 100644
index 000000000000..0299d0686de9
--- /dev/null
+++ b/arch/arm/mach-msm/board-trout-keypad.c
@@ -0,0 +1,345 @@
+/* arch/arm/mach-msm/board-trout-keypad.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/gpio_event.h>
+#include <asm/mach-types.h>
+
+#include "board-trout.h"
+
+static char *keycaps = "--qwerty";
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "board_trout."
+module_param_named(keycaps, keycaps, charp, 0);
+
+
+static unsigned int trout_col_gpios[] = { 35, 34, 33, 32, 31, 23, 30, 78 };
+static unsigned int trout_row_gpios[] = { 42, 41, 40, 39, 38, 37, 36 };
+
+#define KEYMAP_INDEX(col, row) ((col)*ARRAY_SIZE(trout_row_gpios) + (row))
+
+static const unsigned short trout_keymap[ARRAY_SIZE(trout_col_gpios) * ARRAY_SIZE(trout_row_gpios)] = {
+ [KEYMAP_INDEX(0, 0)] = KEY_BACK,
+ [KEYMAP_INDEX(0, 1)] = KEY_HOME,
+// [KEYMAP_INDEX(0, 2)] = KEY_,
+ [KEYMAP_INDEX(0, 3)] = KEY_BACKSPACE,
+ [KEYMAP_INDEX(0, 4)] = KEY_ENTER,
+ [KEYMAP_INDEX(0, 5)] = KEY_RIGHTALT,
+ [KEYMAP_INDEX(0, 6)] = KEY_P,
+
+ [KEYMAP_INDEX(1, 0)] = KEY_MENU,
+// [KEYMAP_INDEX(1, 0)] = 229, // SOFT1
+ [KEYMAP_INDEX(1, 1)] = KEY_SEND,
+ [KEYMAP_INDEX(1, 2)] = KEY_END,
+ [KEYMAP_INDEX(1, 3)] = KEY_LEFTALT,
+ [KEYMAP_INDEX(1, 4)] = KEY_A,
+ [KEYMAP_INDEX(1, 5)] = KEY_LEFTSHIFT,
+ [KEYMAP_INDEX(1, 6)] = KEY_Q,
+
+ [KEYMAP_INDEX(2, 0)] = KEY_U,
+ [KEYMAP_INDEX(2, 1)] = KEY_7,
+ [KEYMAP_INDEX(2, 2)] = KEY_K,
+ [KEYMAP_INDEX(2, 3)] = KEY_J,
+ [KEYMAP_INDEX(2, 4)] = KEY_M,
+ [KEYMAP_INDEX(2, 5)] = KEY_SLASH,
+ [KEYMAP_INDEX(2, 6)] = KEY_8,
+
+ [KEYMAP_INDEX(3, 0)] = KEY_5,
+ [KEYMAP_INDEX(3, 1)] = KEY_6,
+ [KEYMAP_INDEX(3, 2)] = KEY_B,
+ [KEYMAP_INDEX(3, 3)] = KEY_H,
+ [KEYMAP_INDEX(3, 4)] = KEY_N,
+ [KEYMAP_INDEX(3, 5)] = KEY_SPACE,
+ [KEYMAP_INDEX(3, 6)] = KEY_Y,
+
+ [KEYMAP_INDEX(4, 0)] = KEY_4,
+ [KEYMAP_INDEX(4, 1)] = KEY_R,
+ [KEYMAP_INDEX(4, 2)] = KEY_V,
+ [KEYMAP_INDEX(4, 3)] = KEY_G,
+ [KEYMAP_INDEX(4, 4)] = KEY_C,
+ //[KEYMAP_INDEX(4, 5)] = KEY_,
+ [KEYMAP_INDEX(4, 6)] = KEY_T,
+
+ [KEYMAP_INDEX(5, 0)] = KEY_2,
+ [KEYMAP_INDEX(5, 1)] = KEY_W,
+ [KEYMAP_INDEX(5, 2)] = KEY_COMPOSE,
+ [KEYMAP_INDEX(5, 3)] = KEY_VOLUMEUP,
+ [KEYMAP_INDEX(5, 4)] = KEY_S,
+ [KEYMAP_INDEX(5, 5)] = KEY_Z,
+ [KEYMAP_INDEX(5, 6)] = KEY_1,
+
+ [KEYMAP_INDEX(6, 0)] = KEY_I,
+ [KEYMAP_INDEX(6, 1)] = KEY_0,
+ [KEYMAP_INDEX(6, 2)] = KEY_O,
+ [KEYMAP_INDEX(6, 3)] = KEY_L,
+ [KEYMAP_INDEX(6, 4)] = KEY_DOT,
+ [KEYMAP_INDEX(6, 5)] = KEY_COMMA,
+ [KEYMAP_INDEX(6, 6)] = KEY_9,
+
+ [KEYMAP_INDEX(7, 0)] = KEY_3,
+ [KEYMAP_INDEX(7, 1)] = KEY_E,
+ [KEYMAP_INDEX(7, 2)] = KEY_EMAIL, // @
+ [KEYMAP_INDEX(7, 3)] = KEY_VOLUMEDOWN,
+ [KEYMAP_INDEX(7, 4)] = KEY_X,
+ [KEYMAP_INDEX(7, 5)] = KEY_F,
+ [KEYMAP_INDEX(7, 6)] = KEY_D
+};
+
+static unsigned int trout_col_gpios_evt2[] = { 35, 34, 33, 32, 31, 23, 30, 109 };
+static unsigned int trout_row_gpios_evt2[] = { 42, 41, 40, 39, 38, 37, 36 };
+
+static const unsigned short trout_keymap_evt2_1[ARRAY_SIZE(trout_col_gpios) * ARRAY_SIZE(trout_row_gpios)] = {
+ [KEYMAP_INDEX(0, 0)] = KEY_BACK,
+ [KEYMAP_INDEX(0, 1)] = KEY_HOME,
+// [KEYMAP_INDEX(0, 2)] = KEY_,
+ [KEYMAP_INDEX(0, 3)] = KEY_BACKSPACE,
+ [KEYMAP_INDEX(0, 4)] = KEY_ENTER,
+ [KEYMAP_INDEX(0, 5)] = KEY_RIGHTSHIFT,
+ [KEYMAP_INDEX(0, 6)] = KEY_P,
+
+ [KEYMAP_INDEX(1, 0)] = KEY_MENU,
+ [KEYMAP_INDEX(1, 1)] = KEY_SEND,
+// [KEYMAP_INDEX(1, 2)] = KEY_,
+ [KEYMAP_INDEX(1, 3)] = KEY_LEFTSHIFT,
+ [KEYMAP_INDEX(1, 4)] = KEY_A,
+ [KEYMAP_INDEX(1, 5)] = KEY_COMPOSE,
+ [KEYMAP_INDEX(1, 6)] = KEY_Q,
+
+ [KEYMAP_INDEX(2, 0)] = KEY_U,
+ [KEYMAP_INDEX(2, 1)] = KEY_7,
+ [KEYMAP_INDEX(2, 2)] = KEY_K,
+ [KEYMAP_INDEX(2, 3)] = KEY_J,
+ [KEYMAP_INDEX(2, 4)] = KEY_M,
+ [KEYMAP_INDEX(2, 5)] = KEY_SLASH,
+ [KEYMAP_INDEX(2, 6)] = KEY_8,
+
+ [KEYMAP_INDEX(3, 0)] = KEY_5,
+ [KEYMAP_INDEX(3, 1)] = KEY_6,
+ [KEYMAP_INDEX(3, 2)] = KEY_B,
+ [KEYMAP_INDEX(3, 3)] = KEY_H,
+ [KEYMAP_INDEX(3, 4)] = KEY_N,
+ [KEYMAP_INDEX(3, 5)] = KEY_SPACE,
+ [KEYMAP_INDEX(3, 6)] = KEY_Y,
+
+ [KEYMAP_INDEX(4, 0)] = KEY_4,
+ [KEYMAP_INDEX(4, 1)] = KEY_R,
+ [KEYMAP_INDEX(4, 2)] = KEY_V,
+ [KEYMAP_INDEX(4, 3)] = KEY_G,
+ [KEYMAP_INDEX(4, 4)] = KEY_C,
+// [KEYMAP_INDEX(4, 5)] = KEY_,
+ [KEYMAP_INDEX(4, 6)] = KEY_T,
+
+ [KEYMAP_INDEX(5, 0)] = KEY_2,
+ [KEYMAP_INDEX(5, 1)] = KEY_W,
+ [KEYMAP_INDEX(5, 2)] = KEY_LEFTALT,
+ [KEYMAP_INDEX(5, 3)] = KEY_VOLUMEUP,
+ [KEYMAP_INDEX(5, 4)] = KEY_S,
+ [KEYMAP_INDEX(5, 5)] = KEY_Z,
+ [KEYMAP_INDEX(5, 6)] = KEY_1,
+
+ [KEYMAP_INDEX(6, 0)] = KEY_I,
+ [KEYMAP_INDEX(6, 1)] = KEY_0,
+ [KEYMAP_INDEX(6, 2)] = KEY_O,
+ [KEYMAP_INDEX(6, 3)] = KEY_L,
+ [KEYMAP_INDEX(6, 4)] = KEY_COMMA,
+ [KEYMAP_INDEX(6, 5)] = KEY_DOT,
+ [KEYMAP_INDEX(6, 6)] = KEY_9,
+
+ [KEYMAP_INDEX(7, 0)] = KEY_3,
+ [KEYMAP_INDEX(7, 1)] = KEY_E,
+ [KEYMAP_INDEX(7, 2)] = KEY_EMAIL, // @
+ [KEYMAP_INDEX(7, 3)] = KEY_VOLUMEDOWN,
+ [KEYMAP_INDEX(7, 4)] = KEY_X,
+ [KEYMAP_INDEX(7, 5)] = KEY_F,
+ [KEYMAP_INDEX(7, 6)] = KEY_D
+};
+
+static const unsigned short trout_keymap_evt2_2[ARRAY_SIZE(trout_col_gpios) * ARRAY_SIZE(trout_row_gpios)] = {
+ [KEYMAP_INDEX(0, 0)] = KEY_BACK,
+ [KEYMAP_INDEX(0, 1)] = KEY_HOME,
+// [KEYMAP_INDEX(0, 2)] = KEY_,
+ [KEYMAP_INDEX(0, 3)] = KEY_BACKSPACE,
+ [KEYMAP_INDEX(0, 4)] = KEY_ENTER,
+ [KEYMAP_INDEX(0, 5)] = KEY_RIGHTSHIFT,
+ [KEYMAP_INDEX(0, 6)] = KEY_P,
+
+ [KEYMAP_INDEX(1, 0)] = KEY_MENU, /* external menu key */
+ [KEYMAP_INDEX(1, 1)] = KEY_SEND,
+// [KEYMAP_INDEX(1, 2)] = KEY_,
+ [KEYMAP_INDEX(1, 3)] = KEY_LEFTSHIFT,
+ [KEYMAP_INDEX(1, 4)] = KEY_A,
+ [KEYMAP_INDEX(1, 5)] = KEY_F1, /* qwerty menu key */
+ [KEYMAP_INDEX(1, 6)] = KEY_Q,
+
+ [KEYMAP_INDEX(2, 0)] = KEY_U,
+ [KEYMAP_INDEX(2, 1)] = KEY_7,
+ [KEYMAP_INDEX(2, 2)] = KEY_K,
+ [KEYMAP_INDEX(2, 3)] = KEY_J,
+ [KEYMAP_INDEX(2, 4)] = KEY_M,
+ [KEYMAP_INDEX(2, 5)] = KEY_DOT,
+ [KEYMAP_INDEX(2, 6)] = KEY_8,
+
+ [KEYMAP_INDEX(3, 0)] = KEY_5,
+ [KEYMAP_INDEX(3, 1)] = KEY_6,
+ [KEYMAP_INDEX(3, 2)] = KEY_B,
+ [KEYMAP_INDEX(3, 3)] = KEY_H,
+ [KEYMAP_INDEX(3, 4)] = KEY_N,
+ [KEYMAP_INDEX(3, 5)] = KEY_SPACE,
+ [KEYMAP_INDEX(3, 6)] = KEY_Y,
+
+ [KEYMAP_INDEX(4, 0)] = KEY_4,
+ [KEYMAP_INDEX(4, 1)] = KEY_R,
+ [KEYMAP_INDEX(4, 2)] = KEY_V,
+ [KEYMAP_INDEX(4, 3)] = KEY_G,
+ [KEYMAP_INDEX(4, 4)] = KEY_C,
+ [KEYMAP_INDEX(4, 5)] = KEY_EMAIL, // @
+ [KEYMAP_INDEX(4, 6)] = KEY_T,
+
+ [KEYMAP_INDEX(5, 0)] = KEY_2,
+ [KEYMAP_INDEX(5, 1)] = KEY_W,
+ [KEYMAP_INDEX(5, 2)] = KEY_LEFTALT,
+ [KEYMAP_INDEX(5, 3)] = KEY_VOLUMEUP,
+ [KEYMAP_INDEX(5, 4)] = KEY_S,
+ [KEYMAP_INDEX(5, 5)] = KEY_Z,
+ [KEYMAP_INDEX(5, 6)] = KEY_1,
+
+ [KEYMAP_INDEX(6, 0)] = KEY_I,
+ [KEYMAP_INDEX(6, 1)] = KEY_0,
+ [KEYMAP_INDEX(6, 2)] = KEY_O,
+ [KEYMAP_INDEX(6, 3)] = KEY_L,
+ [KEYMAP_INDEX(6, 4)] = KEY_COMMA,
+ [KEYMAP_INDEX(6, 5)] = KEY_RIGHTALT,
+ [KEYMAP_INDEX(6, 6)] = KEY_9,
+
+ [KEYMAP_INDEX(7, 0)] = KEY_3,
+ [KEYMAP_INDEX(7, 1)] = KEY_E,
+ [KEYMAP_INDEX(7, 2)] = KEY_COMPOSE,
+ [KEYMAP_INDEX(7, 3)] = KEY_VOLUMEDOWN,
+ [KEYMAP_INDEX(7, 4)] = KEY_X,
+ [KEYMAP_INDEX(7, 5)] = KEY_F,
+ [KEYMAP_INDEX(7, 6)] = KEY_D
+};
+
+static struct gpio_event_matrix_info trout_keypad_matrix_info = {
+ .info.func = gpio_event_matrix_func,
+ .keymap = trout_keymap,
+ .output_gpios = trout_col_gpios,
+ .input_gpios = trout_row_gpios,
+ .noutputs = ARRAY_SIZE(trout_col_gpios),
+ .ninputs = ARRAY_SIZE(trout_row_gpios),
+ .settle_time.tv.nsec = 40 * NSEC_PER_USEC,
+ .poll_time.tv.nsec = 20 * NSEC_PER_MSEC,
+ .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_REMOVE_PHANTOM_KEYS |GPIOKPF_PRINT_UNMAPPED_KEYS /*| GPIOKPF_PRINT_MAPPED_KEYS*/
+};
+
+static struct gpio_event_direct_entry trout_keypad_nav_map[] = {
+ { TROUT_POWER_KEY, KEY_POWER },
+ { TROUT_GPIO_CAM_BTN_STEP1_N, KEY_CAMERA-1 }, //steal KEY_HP
+ { TROUT_GPIO_CAM_BTN_STEP2_N, KEY_CAMERA },
+};
+
+static struct gpio_event_direct_entry trout_keypad_nav_map_evt2[] = {
+ { TROUT_POWER_KEY, KEY_END },
+ { TROUT_GPIO_CAM_BTN_STEP1_N, KEY_CAMERA-1 }, //steal KEY_HP
+ { TROUT_GPIO_CAM_BTN_STEP2_N, KEY_CAMERA },
+};
+
+static struct gpio_event_input_info trout_keypad_nav_info = {
+ .info.func = gpio_event_input_func,
+ .flags = 0,
+ .type = EV_KEY,
+ .keymap = trout_keypad_nav_map,
+ .keymap_size = ARRAY_SIZE(trout_keypad_nav_map)
+};
+
+static struct gpio_event_direct_entry trout_keypad_switch_map[] = {
+ { TROUT_GPIO_SLIDING_DET, SW_LID }
+};
+
+static struct gpio_event_input_info trout_keypad_switch_info = {
+ .info.func = gpio_event_input_func,
+ .flags = 0,
+ .type = EV_SW,
+ .keymap = trout_keypad_switch_map,
+ .keymap_size = ARRAY_SIZE(trout_keypad_switch_map)
+};
+
+static struct gpio_event_info *trout_keypad_info[] = {
+ &trout_keypad_matrix_info.info,
+ &trout_keypad_nav_info.info,
+ &trout_keypad_switch_info.info,
+};
+
+static struct gpio_event_platform_data trout_keypad_data = {
+ .name = "trout-keypad",
+ .info = trout_keypad_info,
+ .info_count = ARRAY_SIZE(trout_keypad_info)
+};
+
+static struct platform_device trout_keypad_device = {
+ .name = GPIO_EVENT_DEV_NAME,
+ .id = 0,
+ .dev = {
+ .platform_data = &trout_keypad_data,
+ },
+};
+
+static int __init trout_init_keypad(void)
+{
+ if (!machine_is_trout())
+ return 0;
+
+ switch (system_rev) {
+ case 0:
+ /* legacy default keylayout */
+ break;
+ case 1:
+ /* v1 has a new keyboard layout */
+ trout_keypad_matrix_info.keymap = trout_keymap_evt2_1;
+ trout_keypad_matrix_info.output_gpios = trout_col_gpios_evt2;
+ trout_keypad_matrix_info.input_gpios = trout_row_gpios_evt2;
+
+ /* v1 has new direct keys */
+ trout_keypad_nav_info.keymap = trout_keypad_nav_map_evt2;
+ trout_keypad_nav_info.keymap_size = ARRAY_SIZE(trout_keypad_nav_map_evt2);
+
+ /* userspace needs to know about these changes as well */
+ trout_keypad_data.name = "trout-keypad-v2";
+ break;
+ default: /* 2, 3, 4 currently */
+ /* v2 has a new keyboard layout */
+ trout_keypad_matrix_info.keymap = trout_keymap_evt2_2;
+ trout_keypad_matrix_info.output_gpios = trout_col_gpios_evt2;
+ trout_keypad_matrix_info.input_gpios = trout_row_gpios_evt2;
+
+ /* v2 has new direct keys */
+ trout_keypad_nav_info.keymap = trout_keypad_nav_map_evt2;
+ trout_keypad_nav_info.keymap_size = ARRAY_SIZE(trout_keypad_nav_map_evt2);
+
+ /* userspace needs to know about these changes as well */
+ if (!strcmp(keycaps, "qwertz")) {
+ trout_keypad_data.name = "trout-keypad-qwertz";
+ } else {
+ trout_keypad_data.name = "trout-keypad-v3";
+ }
+ break;
+ }
+ return platform_device_register(&trout_keypad_device);
+}
+
+device_initcall(trout_init_keypad);
+
diff --git a/arch/arm/mach-msm/board-trout-mmc.c b/arch/arm/mach-msm/board-trout-mmc.c
new file mode 100644
index 000000000000..f417fa4f4152
--- /dev/null
+++ b/arch/arm/mach-msm/board-trout-mmc.c
@@ -0,0 +1,437 @@
+/* linux/arch/arm/mach-msm/board-trout-mmc.c
+** Author: Brian Swetland <swetland@google.com>
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/err.h>
+#include <linux/debugfs.h>
+
+#include <asm/gpio.h>
+#include <asm/io.h>
+
+#include <mach/vreg.h>
+#include <mach/htc_pwrsink.h>
+
+#include <asm/mach/mmc.h>
+
+#include "devices.h"
+
+#include "board-trout.h"
+
+#include "proc_comm.h"
+
+#define DEBUG_SDSLOT_VDD 1
+
+extern int msm_add_sdcc(unsigned int controller, struct mmc_platform_data *plat);
+
+/* ---- COMMON ---- */
+static void config_gpio_table(uint32_t *table, int len)
+{
+ int n;
+ unsigned id;
+ for(n = 0; n < len; n++) {
+ id = table[n];
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0);
+ }
+}
+
+/* ---- SDCARD ---- */
+
+static uint32_t sdcard_on_gpio_table[] = {
+ PCOM_GPIO_CFG(62, 2, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */
+ PCOM_GPIO_CFG(63, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */
+ PCOM_GPIO_CFG(64, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* DAT3 */
+ PCOM_GPIO_CFG(65, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* DAT2 */
+ PCOM_GPIO_CFG(66, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */
+ PCOM_GPIO_CFG(67, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */
+};
+
+static uint32_t sdcard_off_gpio_table[] = {
+ PCOM_GPIO_CFG(62, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */
+ PCOM_GPIO_CFG(63, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */
+ PCOM_GPIO_CFG(64, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */
+ PCOM_GPIO_CFG(65, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */
+ PCOM_GPIO_CFG(66, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */
+ PCOM_GPIO_CFG(67, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */
+};
+
+static uint opt_disable_sdcard;
+
+static int __init trout_disablesdcard_setup(char *str)
+{
+ int cal = simple_strtol(str, NULL, 0);
+
+ opt_disable_sdcard = cal;
+ return 1;
+}
+
+__setup("board_trout.disable_sdcard=", trout_disablesdcard_setup);
+
+static struct vreg *vreg_sdslot; /* SD slot power */
+
+struct mmc_vdd_xlat {
+ int mask;
+ int level;
+};
+
+static struct mmc_vdd_xlat mmc_vdd_table[] = {
+ { MMC_VDD_165_195, 1800 },
+ { MMC_VDD_20_21, 2050 },
+ { MMC_VDD_21_22, 2150 },
+ { MMC_VDD_22_23, 2250 },
+ { MMC_VDD_23_24, 2350 },
+ { MMC_VDD_24_25, 2450 },
+ { MMC_VDD_25_26, 2550 },
+ { MMC_VDD_26_27, 2650 },
+ { MMC_VDD_27_28, 2750 },
+ { MMC_VDD_28_29, 2850 },
+ { MMC_VDD_29_30, 2950 },
+};
+
+static unsigned int sdslot_vdd = 0xffffffff;
+static unsigned int sdslot_vreg_enabled;
+
+static uint32_t trout_sdslot_switchvdd(struct device *dev, unsigned int vdd)
+{
+ int i, rc;
+
+ BUG_ON(!vreg_sdslot);
+
+ if (vdd == sdslot_vdd)
+ return 0;
+
+ sdslot_vdd = vdd;
+
+ if (vdd == 0) {
+#if DEBUG_SDSLOT_VDD
+ printk("%s: Disabling SD slot power\n", __func__);
+#endif
+ config_gpio_table(sdcard_off_gpio_table,
+ ARRAY_SIZE(sdcard_off_gpio_table));
+ vreg_disable(vreg_sdslot);
+ sdslot_vreg_enabled = 0;
+ return 0;
+ }
+
+ if (!sdslot_vreg_enabled) {
+ rc = vreg_enable(vreg_sdslot);
+ if (rc) {
+ printk(KERN_ERR "%s: Error enabling vreg (%d)\n",
+ __func__, rc);
+ }
+ config_gpio_table(sdcard_on_gpio_table,
+ ARRAY_SIZE(sdcard_on_gpio_table));
+ sdslot_vreg_enabled = 1;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mmc_vdd_table); i++) {
+ if (mmc_vdd_table[i].mask == (1 << vdd)) {
+#if DEBUG_SDSLOT_VDD
+ printk("%s: Setting level to %u\n",
+ __func__, mmc_vdd_table[i].level);
+#endif
+ rc = vreg_set_level(vreg_sdslot,
+ mmc_vdd_table[i].level);
+ if (rc) {
+ printk(KERN_ERR
+ "%s: Error setting vreg level (%d)\n",
+ __func__, rc);
+ }
+ return 0;
+ }
+ }
+
+ printk(KERN_ERR "%s: Invalid VDD %d specified\n", __func__, vdd);
+ return 0;
+}
+
+static unsigned int trout_sdslot_status(struct device *dev)
+{
+ unsigned int status;
+
+ status = (unsigned int) gpio_get_value(TROUT_GPIO_SDMC_CD_N);
+ return (!status);
+}
+
+#define TROUT_MMC_VDD MMC_VDD_165_195 | MMC_VDD_20_21 | MMC_VDD_21_22 \
+ | MMC_VDD_22_23 | MMC_VDD_23_24 | MMC_VDD_24_25 \
+ | MMC_VDD_25_26 | MMC_VDD_26_27 | MMC_VDD_27_28 \
+ | MMC_VDD_28_29 | MMC_VDD_29_30
+
+static struct mmc_platform_data trout_sdslot_data = {
+ .ocr_mask = TROUT_MMC_VDD,
+ .status_irq = TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N),
+ .status = trout_sdslot_status,
+ .translate_vdd = trout_sdslot_switchvdd,
+};
+
+/* ---- WIFI ---- */
+
+static uint32_t wifi_on_gpio_table[] = {
+ PCOM_GPIO_CFG(51, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT3 */
+ PCOM_GPIO_CFG(52, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT2 */
+ PCOM_GPIO_CFG(53, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */
+ PCOM_GPIO_CFG(54, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */
+ PCOM_GPIO_CFG(55, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */
+ PCOM_GPIO_CFG(56, 1, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */
+ PCOM_GPIO_CFG(29, 0, GPIO_INPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */
+};
+
+static uint32_t wifi_off_gpio_table[] = {
+ PCOM_GPIO_CFG(51, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */
+ PCOM_GPIO_CFG(52, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */
+ PCOM_GPIO_CFG(53, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */
+ PCOM_GPIO_CFG(54, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */
+ PCOM_GPIO_CFG(55, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */
+ PCOM_GPIO_CFG(56, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */
+ PCOM_GPIO_CFG(29, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* WLAN IRQ */
+};
+
+static struct vreg *vreg_wifi_osc; /* WIFI 32khz oscilator */
+static int trout_wifi_cd = 0; /* WIFI virtual 'card detect' status */
+
+static struct sdio_embedded_func wifi_func = {
+ .f_class = SDIO_CLASS_WLAN,
+ .f_maxblksize = 512,
+};
+
+static struct embedded_sdio_data trout_wifi_emb_data = {
+ .cis = {
+ .vendor = 0x104c,
+ .device = 0x9066,
+ .blksize = 512,
+ /*.max_dtr = 24000000, Max of chip - no worky on Trout */
+ .max_dtr = 20000000,
+ },
+ .cccr = {
+ .multi_block = 0,
+ .low_speed = 0,
+ .wide_bus = 1,
+ .high_power = 0,
+ .high_speed = 0,
+ },
+ .funcs = &wifi_func,
+ .num_funcs = 1,
+};
+
+static void (*wifi_status_cb)(int card_present, void *dev_id);
+static void *wifi_status_cb_devid;
+
+static int trout_wifi_status_register(void (*callback)(int card_present, void *dev_id), void *dev_id)
+{
+ if (wifi_status_cb)
+ return -EAGAIN;
+ wifi_status_cb = callback;
+ wifi_status_cb_devid = dev_id;
+ return 0;
+}
+
+static unsigned int trout_wifi_status(struct device *dev)
+{
+ return trout_wifi_cd;
+}
+
+int trout_wifi_set_carddetect(int val)
+{
+ printk("%s: %d\n", __func__, val);
+ trout_wifi_cd = val;
+ if (wifi_status_cb) {
+ wifi_status_cb(val, wifi_status_cb_devid);
+ } else
+ printk(KERN_WARNING "%s: Nobody to notify\n", __func__);
+ return 0;
+}
+#ifndef CONFIG_WIFI_CONTROL_FUNC
+EXPORT_SYMBOL(trout_wifi_set_carddetect);
+#endif
+
+static int trout_wifi_power_state;
+
+int trout_wifi_power(int on)
+{
+ int rc;
+
+ printk("%s: %d\n", __func__, on);
+
+ if (on) {
+ config_gpio_table(wifi_on_gpio_table,
+ ARRAY_SIZE(wifi_on_gpio_table));
+ rc = vreg_enable(vreg_wifi_osc);
+ if (rc)
+ return rc;
+ htc_pwrsink_set(PWRSINK_WIFI, 70);
+ } else {
+ config_gpio_table(wifi_off_gpio_table,
+ ARRAY_SIZE(wifi_off_gpio_table));
+ htc_pwrsink_set(PWRSINK_WIFI, 0);
+ }
+ gpio_set_value( TROUT_GPIO_MAC_32K_EN, on);
+ mdelay(100);
+ gpio_set_value( TROUT_GPIO_WIFI_EN, on);
+ mdelay(100);
+ if (!on) {
+ vreg_disable(vreg_wifi_osc);
+ }
+ trout_wifi_power_state = on;
+ return 0;
+}
+#ifndef CONFIG_WIFI_CONTROL_FUNC
+EXPORT_SYMBOL(trout_wifi_power);
+#endif
+
+static int trout_wifi_reset_state;
+int trout_wifi_reset(int on)
+{
+ printk("%s: %d\n", __func__, on);
+ gpio_set_value( TROUT_GPIO_WIFI_PA_RESETX, !on );
+ trout_wifi_reset_state = on;
+ mdelay(50);
+ return 0;
+}
+#ifndef CONFIG_WIFI_CONTROL_FUNC
+EXPORT_SYMBOL(trout_wifi_reset);
+#endif
+
+static struct mmc_platform_data trout_wifi_data = {
+ .ocr_mask = MMC_VDD_28_29,
+ .status = trout_wifi_status,
+ .register_status_notify = trout_wifi_status_register,
+ .embedded_sdio = &trout_wifi_emb_data,
+};
+
+int __init trout_init_mmc(unsigned int sys_rev)
+{
+ wifi_status_cb = NULL;
+
+ sdslot_vreg_enabled = 0;
+
+ vreg_sdslot = vreg_get(0, "gp6");
+ if (IS_ERR(vreg_sdslot))
+ return PTR_ERR(vreg_sdslot);
+ vreg_wifi_osc = vreg_get(0, "mmc");
+ if (IS_ERR(vreg_wifi_osc))
+ return PTR_ERR(vreg_wifi_osc);
+
+ set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 1);
+
+ msm_add_sdcc(1, &trout_wifi_data);
+
+ if (!opt_disable_sdcard)
+ msm_add_sdcc(2, &trout_sdslot_data);
+ else
+ printk(KERN_INFO "trout: SD-Card interface disabled\n");
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int troutmmc_dbg_wifi_reset_set(void *data, u64 val)
+{
+ trout_wifi_reset((int) val);
+ return 0;
+}
+
+static int troutmmc_dbg_wifi_reset_get(void *data, u64 *val)
+{
+ *val = trout_wifi_reset_state;
+ return 0;
+}
+
+static int troutmmc_dbg_wifi_cd_set(void *data, u64 val)
+{
+ trout_wifi_set_carddetect((int) val);
+ return 0;
+}
+
+static int troutmmc_dbg_wifi_cd_get(void *data, u64 *val)
+{
+ *val = trout_wifi_cd;
+ return 0;
+}
+
+static int troutmmc_dbg_wifi_pwr_set(void *data, u64 val)
+{
+ trout_wifi_power((int) val);
+ return 0;
+}
+
+static int troutmmc_dbg_wifi_pwr_get(void *data, u64 *val)
+{
+
+ *val = trout_wifi_power_state;
+ return 0;
+}
+
+static int troutmmc_dbg_sd_pwr_set(void *data, u64 val)
+{
+ trout_sdslot_switchvdd(NULL, (unsigned int) val);
+ return 0;
+}
+
+static int troutmmc_dbg_sd_pwr_get(void *data, u64 *val)
+{
+ *val = sdslot_vdd;
+ return 0;
+}
+
+static int troutmmc_dbg_sd_cd_set(void *data, u64 val)
+{
+ return -ENOSYS;
+}
+
+static int troutmmc_dbg_sd_cd_get(void *data, u64 *val)
+{
+ *val = trout_sdslot_status(NULL);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(troutmmc_dbg_wifi_reset_fops,
+ troutmmc_dbg_wifi_reset_get,
+ troutmmc_dbg_wifi_reset_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(troutmmc_dbg_wifi_cd_fops,
+ troutmmc_dbg_wifi_cd_get,
+ troutmmc_dbg_wifi_cd_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(troutmmc_dbg_wifi_pwr_fops,
+ troutmmc_dbg_wifi_pwr_get,
+ troutmmc_dbg_wifi_pwr_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(troutmmc_dbg_sd_pwr_fops,
+ troutmmc_dbg_sd_pwr_get,
+ troutmmc_dbg_sd_pwr_set, "%llu\n");
+
+DEFINE_SIMPLE_ATTRIBUTE(troutmmc_dbg_sd_cd_fops,
+ troutmmc_dbg_sd_cd_get,
+ troutmmc_dbg_sd_cd_set, "%llu\n");
+
+static int __init troutmmc_dbg_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("troutmmc_dbg", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ debugfs_create_file("wifi_reset", 0644, dent, NULL,
+ &troutmmc_dbg_wifi_reset_fops);
+ debugfs_create_file("wifi_cd", 0644, dent, NULL,
+ &troutmmc_dbg_wifi_cd_fops);
+ debugfs_create_file("wifi_pwr", 0644, dent, NULL,
+ &troutmmc_dbg_wifi_pwr_fops);
+
+ debugfs_create_file("sd_pwr", 0644, dent, NULL,
+ &troutmmc_dbg_sd_pwr_fops);
+ debugfs_create_file("sd_cd", 0644, dent, NULL,
+ &troutmmc_dbg_sd_cd_fops);
+
+ return 0;
+}
+
+device_initcall(troutmmc_dbg_init);
+
+#endif
diff --git a/arch/arm/mach-msm/board-trout-panel.c b/arch/arm/mach-msm/board-trout-panel.c
new file mode 100644
index 000000000000..900b8b1a6f76
--- /dev/null
+++ b/arch/arm/mach-msm/board-trout-panel.c
@@ -0,0 +1,642 @@
+/* linux/arch/arm/mach-msm/board-trout-mddi.c
+** Author: Brian Swetland <swetland@google.com>
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/leds.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <asm/io.h>
+#include <asm/gpio.h>
+#include <asm/mach-types.h>
+
+#include <mach/msm_fb.h>
+#include <mach/vreg.h>
+#include <mach/htc_pwrsink.h>
+
+#include "board-trout.h"
+#include "proc_comm.h"
+#include "devices.h"
+
+#define TROUT_DEFAULT_BACKLIGHT_BRIGHTNESS 255
+
+static struct clk *gp_clk;
+static int trout_backlight_off;
+static int trout_backlight_brightness = TROUT_DEFAULT_BACKLIGHT_BRIGHTNESS;
+static int trout_new_backlight = 1;
+static uint8_t trout_backlight_last_level = 33;
+static DEFINE_MUTEX(trout_backlight_lock);
+
+static void trout_set_backlight_level(uint8_t level)
+{
+ unsigned percent = ((int)level * 100) / 255;
+
+ if (trout_new_backlight) {
+ unsigned long flags;
+ int i = 0;
+ level = (int)level * 34 / 256;
+
+ if (trout_backlight_last_level == level)
+ return;
+
+ if (level == 0) {
+ gpio_set_value(27, 0);
+ msleep(2);
+ } else {
+ local_irq_save(flags);
+ if (trout_backlight_last_level == 0) {
+ gpio_set_value(27, 1);
+ udelay(40);
+ trout_backlight_last_level = 33;
+ }
+ i = (trout_backlight_last_level - level + 33) % 33;
+ while (i-- > 0) {
+ gpio_set_value(27, 0);
+ udelay(1);
+ gpio_set_value(27, 1);
+ udelay(1);
+ }
+ local_irq_restore(flags);
+ }
+ trout_backlight_last_level = level;
+ }
+ else {
+ if(level) {
+ clk_enable(gp_clk);
+ writel((1U << 16) | (~level & 0xffff),
+ MSM_CLK_CTL_BASE + 0x58);
+ /* Going directly to a 100% duty cycle does not
+ * seem to work */
+ if(level == 255) {
+ writel((~127 << 16) | 0xb20,
+ MSM_CLK_CTL_BASE + 0x5c);
+ udelay(1);
+ }
+ writel((~127 << 16) | 0xb58, MSM_CLK_CTL_BASE + 0x5c);
+ }
+ else {
+ writel(0x0, MSM_CLK_CTL_BASE + 0x5c);
+ clk_disable(gp_clk);
+ }
+ }
+ htc_pwrsink_set(PWRSINK_BACKLIGHT, percent);
+}
+
+#define MDDI_CLIENT_CORE_BASE 0x108000
+#define LCD_CONTROL_BLOCK_BASE 0x110000
+#define SPI_BLOCK_BASE 0x120000
+#define I2C_BLOCK_BASE 0x130000
+#define PWM_BLOCK_BASE 0x140000
+#define GPIO_BLOCK_BASE 0x150000
+#define SYSTEM_BLOCK1_BASE 0x160000
+#define SYSTEM_BLOCK2_BASE 0x170000
+
+
+#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
+#define SYSCLKENA (MDDI_CLIENT_CORE_BASE|0x2C)
+#define PWM0OFF (PWM_BLOCK_BASE|0x1C)
+
+#define V_VDDE2E_VDD2_GPIO 0
+#define MDDI_RST_N 82
+
+#define MDDICAP0 (MDDI_CLIENT_CORE_BASE|0x00)
+#define MDDICAP1 (MDDI_CLIENT_CORE_BASE|0x04)
+#define MDDICAP2 (MDDI_CLIENT_CORE_BASE|0x08)
+#define MDDICAP3 (MDDI_CLIENT_CORE_BASE|0x0C)
+#define MDCAPCHG (MDDI_CLIENT_CORE_BASE|0x10)
+#define MDCRCERC (MDDI_CLIENT_CORE_BASE|0x14)
+#define TTBUSSEL (MDDI_CLIENT_CORE_BASE|0x18)
+#define DPSET0 (MDDI_CLIENT_CORE_BASE|0x1C)
+#define DPSET1 (MDDI_CLIENT_CORE_BASE|0x20)
+#define DPSUS (MDDI_CLIENT_CORE_BASE|0x24)
+#define DPRUN (MDDI_CLIENT_CORE_BASE|0x28)
+#define SYSCKENA (MDDI_CLIENT_CORE_BASE|0x2C)
+#define TESTMODE (MDDI_CLIENT_CORE_BASE|0x30)
+#define FIFOMONI (MDDI_CLIENT_CORE_BASE|0x34)
+#define INTMONI (MDDI_CLIENT_CORE_BASE|0x38)
+#define MDIOBIST (MDDI_CLIENT_CORE_BASE|0x3C)
+#define MDIOPSET (MDDI_CLIENT_CORE_BASE|0x40)
+#define BITMAP0 (MDDI_CLIENT_CORE_BASE|0x44)
+#define BITMAP1 (MDDI_CLIENT_CORE_BASE|0x48)
+#define BITMAP2 (MDDI_CLIENT_CORE_BASE|0x4C)
+#define BITMAP3 (MDDI_CLIENT_CORE_BASE|0x50)
+#define BITMAP4 (MDDI_CLIENT_CORE_BASE|0x54)
+
+#define SRST (LCD_CONTROL_BLOCK_BASE|0x00)
+#define PORT_ENB (LCD_CONTROL_BLOCK_BASE|0x04)
+#define START (LCD_CONTROL_BLOCK_BASE|0x08)
+#define PORT (LCD_CONTROL_BLOCK_BASE|0x0C)
+#define CMN (LCD_CONTROL_BLOCK_BASE|0x10)
+#define GAMMA (LCD_CONTROL_BLOCK_BASE|0x14)
+#define INTFLG (LCD_CONTROL_BLOCK_BASE|0x18)
+#define INTMSK (LCD_CONTROL_BLOCK_BASE|0x1C)
+#define MPLFBUF (LCD_CONTROL_BLOCK_BASE|0x20)
+#define HDE_LEFT (LCD_CONTROL_BLOCK_BASE|0x24)
+#define VDE_TOP (LCD_CONTROL_BLOCK_BASE|0x28)
+#define PXL (LCD_CONTROL_BLOCK_BASE|0x30)
+#define HCYCLE (LCD_CONTROL_BLOCK_BASE|0x34)
+#define HSW (LCD_CONTROL_BLOCK_BASE|0x38)
+#define HDE_START (LCD_CONTROL_BLOCK_BASE|0x3C)
+#define HDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x40)
+#define VCYCLE (LCD_CONTROL_BLOCK_BASE|0x44)
+#define VSW (LCD_CONTROL_BLOCK_BASE|0x48)
+#define VDE_START (LCD_CONTROL_BLOCK_BASE|0x4C)
+#define VDE_SIZE (LCD_CONTROL_BLOCK_BASE|0x50)
+#define WAKEUP (LCD_CONTROL_BLOCK_BASE|0x54)
+#define WSYN_DLY (LCD_CONTROL_BLOCK_BASE|0x58)
+#define REGENB (LCD_CONTROL_BLOCK_BASE|0x5C)
+#define VSYNIF (LCD_CONTROL_BLOCK_BASE|0x60)
+#define WRSTB (LCD_CONTROL_BLOCK_BASE|0x64)
+#define RDSTB (LCD_CONTROL_BLOCK_BASE|0x68)
+#define ASY_DATA (LCD_CONTROL_BLOCK_BASE|0x6C)
+#define ASY_DATB (LCD_CONTROL_BLOCK_BASE|0x70)
+#define ASY_DATC (LCD_CONTROL_BLOCK_BASE|0x74)
+#define ASY_DATD (LCD_CONTROL_BLOCK_BASE|0x78)
+#define ASY_DATE (LCD_CONTROL_BLOCK_BASE|0x7C)
+#define ASY_DATF (LCD_CONTROL_BLOCK_BASE|0x80)
+#define ASY_DATG (LCD_CONTROL_BLOCK_BASE|0x84)
+#define ASY_DATH (LCD_CONTROL_BLOCK_BASE|0x88)
+#define ASY_CMDSET (LCD_CONTROL_BLOCK_BASE|0x8C)
+
+#define SSICTL (SPI_BLOCK_BASE|0x00)
+#define SSITIME (SPI_BLOCK_BASE|0x04)
+#define SSITX (SPI_BLOCK_BASE|0x08)
+#define SSIRX (SPI_BLOCK_BASE|0x0C)
+#define SSIINTC (SPI_BLOCK_BASE|0x10)
+#define SSIINTS (SPI_BLOCK_BASE|0x14)
+#define SSIDBG1 (SPI_BLOCK_BASE|0x18)
+#define SSIDBG2 (SPI_BLOCK_BASE|0x1C)
+#define SSIID (SPI_BLOCK_BASE|0x20)
+
+#define WKREQ (SYSTEM_BLOCK1_BASE|0x00)
+#define CLKENB (SYSTEM_BLOCK1_BASE|0x04)
+#define DRAMPWR (SYSTEM_BLOCK1_BASE|0x08)
+#define INTMASK (SYSTEM_BLOCK1_BASE|0x0C)
+#define GPIOSEL (SYSTEM_BLOCK2_BASE|0x00)
+
+#define GPIODATA (GPIO_BLOCK_BASE|0x00)
+#define GPIODIR (GPIO_BLOCK_BASE|0x04)
+#define GPIOIS (GPIO_BLOCK_BASE|0x08)
+#define GPIOIBE (GPIO_BLOCK_BASE|0x0C)
+#define GPIOIEV (GPIO_BLOCK_BASE|0x10)
+#define GPIOIE (GPIO_BLOCK_BASE|0x14)
+#define GPIORIS (GPIO_BLOCK_BASE|0x18)
+#define GPIOMIS (GPIO_BLOCK_BASE|0x1C)
+#define GPIOIC (GPIO_BLOCK_BASE|0x20)
+#define GPIOOMS (GPIO_BLOCK_BASE|0x24)
+#define GPIOPC (GPIO_BLOCK_BASE|0x28)
+#define GPIOID (GPIO_BLOCK_BASE|0x30)
+
+#define SPI_WRITE(reg, val) \
+ { SSITX, 0x00010000 | (((reg) & 0xff) << 8) | ((val) & 0xff) }, \
+ { 0, 5 },
+
+#define SPI_WRITE1(reg) \
+ { SSITX, (reg) & 0xff }, \
+ { 0, 5 },
+
+struct mddi_table {
+ uint32_t reg;
+ uint32_t value;
+};
+static struct mddi_table mddi_toshiba_init_table[] = {
+ { DPSET0, 0x09e90046 },
+ { DPSET1, 0x00000118 },
+ { DPSUS, 0x00000000 },
+ { DPRUN, 0x00000001 },
+ { 1, 14 }, /* msleep 14 */
+ { SYSCKENA, 0x00000001 },
+ //{ CLKENB, 0x000000EF },
+ { CLKENB, 0x0000A1EF }, /* # SYS.CLKENB # Enable clocks for each module (without DCLK , i2cCLK) */
+ //{ CLKENB, 0x000025CB }, /* Clock enable register */
+
+ { GPIODATA, 0x02000200 }, /* # GPI .GPIODATA # GPIO2(RESET_LCD_N) set to 0 , GPIO3(eDRAM_Power) set to 0 */
+ { GPIODIR, 0x000030D }, /* 24D # GPI .GPIODIR # Select direction of GPIO port (0,2,3,6,9 output) */
+ { GPIOSEL, 0/*0x00000173*/}, /* # SYS.GPIOSEL # GPIO port multiplexing control */
+ { GPIOPC, 0x03C300C0 }, /* # GPI .GPIOPC # GPIO2,3 PD cut */
+ { WKREQ, 0x00000000 }, /* # SYS.WKREQ # Wake-up request event is VSYNC alignment */
+
+ { GPIOIBE, 0x000003FF },
+ { GPIOIS, 0x00000000 },
+ { GPIOIC, 0x000003FF },
+ { GPIOIE, 0x00000000 },
+
+ { GPIODATA, 0x00040004 }, /* # GPI .GPIODATA # eDRAM VD supply */
+ { 1, 1 }, /* msleep 1 */
+ { GPIODATA, 0x02040004 }, /* # GPI .GPIODATA # eDRAM VD supply */
+ { DRAMPWR, 0x00000001 }, /* eDRAM power */
+};
+
+static struct mddi_table mddi_toshiba_panel_init_table[] = {
+ { SRST, 0x00000003 }, /* FIFO/LCDC not reset */
+ { PORT_ENB, 0x00000001 }, /* Enable sync. Port */
+ { START, 0x00000000 }, /* To stop operation */
+ //{ START, 0x00000001 }, /* To start operation */
+ { PORT, 0x00000004 }, /* Polarity of VS/HS/DE. */
+ { CMN, 0x00000000 },
+ { GAMMA, 0x00000000 }, /* No Gamma correction */
+ { INTFLG, 0x00000000 }, /* VSYNC interrupt flag clear/status */
+ { INTMSK, 0x00000000 }, /* VSYNC interrupt mask is off. */
+ { MPLFBUF, 0x00000000 }, /* Select frame buffer's base address. */
+ { HDE_LEFT, 0x00000000 }, /* The value of HDE_LEFT. */
+ { VDE_TOP, 0x00000000 }, /* The value of VDE_TPO. */
+ { PXL, 0x00000001 }, /* 1. RGB666 */
+ /* 2. Data is valid from 1st frame of beginning. */
+ { HDE_START, 0x00000006 }, /* HDE_START= 14 PCLK */
+ { HDE_SIZE, 0x0000009F }, /* HDE_SIZE=320 PCLK */
+ { HSW, 0x00000004 }, /* HSW= 10 PCLK */
+ { VSW, 0x00000001 }, /* VSW=2 HCYCLE */
+ { VDE_START, 0x00000003 }, /* VDE_START=4 HCYCLE */
+ { VDE_SIZE, 0x000001DF }, /* VDE_SIZE=480 HCYCLE */
+ { WAKEUP, 0x000001e2 }, /* Wakeup position in VSYNC mode. */
+ { WSYN_DLY, 0x00000000 }, /* Wakeup position in VSIN mode. */
+ { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */
+ { CLKENB, 0x000025CB }, /* Clock enable register */
+
+ { SSICTL, 0x00000170 }, /* SSI control register */
+ { SSITIME, 0x00000250 }, /* SSI timing control register */
+ { SSICTL, 0x00000172 }, /* SSI control register */
+};
+
+
+static struct mddi_table mddi_sharp_init_table[] = {
+ { VCYCLE, 0x000001eb },
+ { HCYCLE, 0x000000ae },
+ { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */
+ { GPIODATA, 0x00040000 }, /* GPIO2 low */
+ { GPIODIR, 0x00000004 }, /* GPIO2 out */
+ { 1, 1 }, /* msleep 1 */
+ { GPIODATA, 0x00040004 }, /* GPIO2 high */
+ { 1, 10 }, /* msleep 10 */
+ SPI_WRITE(0x5f, 0x01)
+ SPI_WRITE1(0x11)
+ { 1, 200 }, /* msleep 200 */
+ SPI_WRITE1(0x29)
+ SPI_WRITE1(0xde)
+ { START, 0x00000001 }, /* To start operation */
+};
+
+static struct mddi_table mddi_sharp_deinit_table[] = {
+ { 1, 200 }, /* msleep 200 */
+ SPI_WRITE(0x10, 0x1)
+ { 1, 100 }, /* msleep 100 */
+ { GPIODATA, 0x00040004 }, /* GPIO2 high */
+ { GPIODIR, 0x00000004 }, /* GPIO2 out */
+ { GPIODATA, 0x00040000 }, /* GPIO2 low */
+ { 1, 10 }, /* msleep 10 */
+};
+
+static struct mddi_table mddi_tpo_init_table[] = {
+ { VCYCLE, 0x000001e5 },
+ { HCYCLE, 0x000000ac },
+ { REGENB, 0x00000001 }, /* Set 1 to enable to change the value of registers. */
+ { 0, 20 }, /* udelay 20 */
+ { GPIODATA, 0x00000004 }, /* GPIO2 high */
+ { GPIODIR, 0x00000004 }, /* GPIO2 out */
+ { 0, 20 }, /* udelay 20 */
+
+ SPI_WRITE(0x08, 0x01)
+ { 0, 500 }, /* udelay 500 */
+ SPI_WRITE(0x08, 0x00)
+ SPI_WRITE(0x02, 0x00)
+ SPI_WRITE(0x03, 0x04)
+ SPI_WRITE(0x04, 0x0e)
+ SPI_WRITE(0x09, 0x02)
+ SPI_WRITE(0x0b, 0x08)
+ SPI_WRITE(0x0c, 0x53)
+ SPI_WRITE(0x0d, 0x01)
+ SPI_WRITE(0x0e, 0xe0)
+ SPI_WRITE(0x0f, 0x01)
+ SPI_WRITE(0x10, 0x58)
+ SPI_WRITE(0x20, 0x1e)
+ SPI_WRITE(0x21, 0x0a)
+ SPI_WRITE(0x22, 0x0a)
+ SPI_WRITE(0x23, 0x1e)
+ SPI_WRITE(0x25, 0x32)
+ SPI_WRITE(0x26, 0x00)
+ SPI_WRITE(0x27, 0xac)
+ SPI_WRITE(0x29, 0x06)
+ SPI_WRITE(0x2a, 0xa4)
+ SPI_WRITE(0x2b, 0x45)
+ SPI_WRITE(0x2c, 0x45)
+ SPI_WRITE(0x2d, 0x15)
+ SPI_WRITE(0x2e, 0x5a)
+ SPI_WRITE(0x2f, 0xff)
+ SPI_WRITE(0x30, 0x6b)
+ SPI_WRITE(0x31, 0x0d)
+ SPI_WRITE(0x32, 0x48)
+ SPI_WRITE(0x33, 0x82)
+ SPI_WRITE(0x34, 0xbd)
+ SPI_WRITE(0x35, 0xe7)
+ SPI_WRITE(0x36, 0x18)
+ SPI_WRITE(0x37, 0x94)
+ SPI_WRITE(0x38, 0x01)
+ SPI_WRITE(0x39, 0x5d)
+ SPI_WRITE(0x3a, 0xae)
+ SPI_WRITE(0x3b, 0xff)
+ SPI_WRITE(0x07, 0x09)
+ { 0, 10 }, /* udelay 10 */
+ { START, 0x00000001 }, /* To start operation */
+};
+
+static struct mddi_table mddi_tpo_deinit_table[] = {
+ SPI_WRITE(0x07, 0x19)
+ { START, 0x00000000 }, /* To stop operation */
+ { GPIODATA, 0x00040004 }, /* GPIO2 high */
+ { GPIODIR, 0x00000004 }, /* GPIO2 out */
+ { GPIODATA, 0x00040000 }, /* GPIO2 low */
+ { 0, 5 }, /* usleep 5 */
+};
+
+
+#define GPIOSEL_VWAKEINT (1U << 0)
+#define INTMASK_VWAKEOUT (1U << 0)
+
+static void trout_process_mddi_table(struct msm_mddi_client_data *cdata,
+ struct mddi_table *table, size_t count)
+{
+ int i;
+ for(i = 0; i < count; i++) {
+ uint32_t reg = table[i].reg;
+ uint32_t value = table[i].value;
+
+ if (reg == 0)
+ udelay(value);
+ else if (reg == 1)
+ msleep(value);
+ else
+ cdata->remote_write(cdata, value, reg);
+ }
+}
+
+static struct vreg *vreg_mddi_1v5;
+static struct vreg *vreg_lcm_2v85;
+
+static void trout_mddi_power_client(struct msm_mddi_client_data *cdata,
+ int on)
+{
+ unsigned id, on_off;
+ if(on) {
+ on_off = 0;
+ id = PM_VREG_PDOWN_MDDI_ID;
+ msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
+ vreg_enable(vreg_mddi_1v5);
+ mdelay(5); // delay time >5ms and <10ms
+ gpio_set_value(V_VDDE2E_VDD2_GPIO, 1);
+ gpio_set_value(TROUT_GPIO_MDDI_32K_EN, 1);
+ msleep(3);
+ id = PM_VREG_PDOWN_AUX_ID;
+ msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
+ vreg_enable(vreg_lcm_2v85);
+ msleep(3);
+ gpio_set_value(MDDI_RST_N, 1);
+ msleep(10);
+ } else {
+ gpio_set_value(TROUT_GPIO_MDDI_32K_EN, 0);
+ gpio_set_value(MDDI_RST_N, 0);
+ msleep(10);
+ vreg_disable(vreg_lcm_2v85);
+ on_off = 1;
+ id = PM_VREG_PDOWN_AUX_ID;
+ msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
+ msleep(5);
+ gpio_set_value(V_VDDE2E_VDD2_GPIO, 0);
+ msleep(200);
+ vreg_disable(vreg_mddi_1v5);
+ id = PM_VREG_PDOWN_MDDI_ID;
+ msm_proc_comm(PCOM_VREG_PULLDOWN, &on_off, &id);
+ }
+}
+
+static int trout_mddi_toshiba_client_init(struct msm_mddi_client_data *cdata)
+{
+ int panel_id;
+
+ cdata->auto_hibernate(cdata, 0);
+ trout_process_mddi_table(cdata, mddi_toshiba_init_table,
+ ARRAY_SIZE(mddi_toshiba_init_table));
+ cdata->auto_hibernate(cdata, 1);
+ panel_id = (cdata->remote_read(cdata, GPIODATA) >> 4) & 3;
+ if (panel_id > 1) {
+ printk("unknown panel id at mddi_enable\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int trout_mddi_toshiba_client_uninit(struct msm_mddi_client_data *cdata)
+{
+ return 0;
+}
+
+static int trout_mddi_panel_unblank(struct msm_panel_data *panel_data)
+{
+ struct msm_mddi_panel_info *panel = container_of(panel_data,
+ struct msm_mddi_panel_info, panel_data);
+ struct msm_mddi_client_data *mddi_client = panel->client_data;
+
+ int panel_id, ret = 0;
+
+ trout_set_backlight_level(0);
+ mddi_client->auto_hibernate(mddi_client, 0);
+ trout_process_mddi_table(mddi_client, mddi_toshiba_panel_init_table,
+ ARRAY_SIZE(mddi_toshiba_panel_init_table));
+ panel_id = (mddi_client->remote_read(mddi_client, GPIODATA) >> 4) & 3;
+ switch(panel_id) {
+ case 0:
+ printk("init sharp panel\n");
+ trout_process_mddi_table(mddi_client,
+ mddi_sharp_init_table,
+ ARRAY_SIZE(mddi_sharp_init_table));
+ break;
+ case 1:
+ printk("init tpo panel\n");
+ trout_process_mddi_table(mddi_client,
+ mddi_tpo_init_table,
+ ARRAY_SIZE(mddi_tpo_init_table));
+ break;
+ default:
+ printk("unknown panel_id: %d\n", panel_id);
+ ret = -1;
+ };
+ mutex_lock(&trout_backlight_lock);
+ trout_set_backlight_level(trout_backlight_brightness);
+ trout_backlight_off = 0;
+ mutex_unlock(&trout_backlight_lock);
+ mddi_client->auto_hibernate(mddi_client, 1);
+ // reenable vsync
+ mddi_client->remote_write(mddi_client, GPIOSEL_VWAKEINT,
+ GPIOSEL);
+ mddi_client->remote_write(mddi_client, INTMASK_VWAKEOUT,
+ INTMASK);
+ return ret;
+
+}
+
+static int trout_mddi_panel_blank(struct msm_panel_data *panel_data)
+{
+ struct msm_mddi_panel_info *panel = container_of(panel_data,
+ struct msm_mddi_panel_info, panel_data);
+ struct msm_mddi_client_data *mddi_client = panel->client_data;
+ int panel_id, ret = 0;
+
+ panel_id = (mddi_client->remote_read(mddi_client, GPIODATA) >> 4) & 3;
+ mddi_client->auto_hibernate(mddi_client, 0);
+ switch(panel_id) {
+ case 0:
+ printk("deinit sharp panel\n");
+ trout_process_mddi_table(mddi_client,
+ mddi_sharp_deinit_table,
+ ARRAY_SIZE(mddi_sharp_deinit_table));
+ break;
+ case 1:
+ printk("deinit tpo panel\n");
+ trout_process_mddi_table(mddi_client,
+ mddi_tpo_deinit_table,
+ ARRAY_SIZE(mddi_tpo_deinit_table));
+ break;
+ default:
+ printk("unknown panel_id: %d\n", panel_id);
+ ret = -1;
+ };
+ mddi_client->auto_hibernate(mddi_client,1);
+ mutex_lock(&trout_backlight_lock);
+ trout_set_backlight_level(0);
+ trout_backlight_off = 1;
+ mutex_unlock(&trout_backlight_lock);
+ mddi_client->remote_write(mddi_client, 0, SYSCLKENA);
+ mddi_client->remote_write(mddi_client, 1, DPSUS);
+
+ return ret;
+}
+
+static void trout_brightness_set(struct led_classdev *led_cdev, enum led_brightness value)
+{
+ mutex_lock(&trout_backlight_lock);
+ trout_backlight_brightness = value;
+ if(!trout_backlight_off)
+ trout_set_backlight_level(trout_backlight_brightness);
+ mutex_unlock(&trout_backlight_lock);
+}
+
+static struct led_classdev trout_backlight_led = {
+ .name = "lcd-backlight",
+ .brightness = TROUT_DEFAULT_BACKLIGHT_BRIGHTNESS,
+ .brightness_set = trout_brightness_set,
+};
+
+static int trout_backlight_probe(struct platform_device *pdev)
+{
+ led_classdev_register(&pdev->dev, &trout_backlight_led);
+ return 0;
+}
+
+static int trout_backlight_remove(struct platform_device *pdev)
+{
+ led_classdev_unregister(&trout_backlight_led);
+ return 0;
+}
+
+static struct platform_driver trout_backlight_driver = {
+ .probe = trout_backlight_probe,
+ .remove = trout_backlight_remove,
+ .driver = {
+ .name = "trout-backlight",
+ .owner = THIS_MODULE,
+ },
+};
+
+static struct resource resources_msm_fb[] = {
+ {
+ .start = MSM_FB_BASE,
+ .end = MSM_FB_BASE + MSM_FB_SIZE,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct msm_mddi_toshiba_client_data toshiba_client_data = {
+ .init = trout_mddi_toshiba_client_init,
+ .uninit = trout_mddi_toshiba_client_uninit,
+ .blank = trout_mddi_panel_blank,
+ .unblank = trout_mddi_panel_unblank,
+ .fb_data = {
+ .xres = 320,
+ .yres = 480,
+ .width = 45,
+ .height = 67,
+ .output_format = 0,
+ },
+};
+
+struct msm_mddi_platform_data mddi_pdata = {
+ .clk_rate = 122880000,
+ .power_client = trout_mddi_power_client,
+ .fb_resource = resources_msm_fb,
+ .num_clients = 1,
+ .client_platform_data = {
+ {
+ .product_id = (0xd263 << 16 | 0),
+ .name = "mddi_c_d263_0000",
+ .id = 0,
+ .client_data = &toshiba_client_data,
+ .clk_rate = 0,
+ },
+ },
+};
+
+static struct platform_device trout_backlight = {
+ .name = "trout-backlight",
+};
+
+int __init trout_init_panel(void)
+{
+ int rc;
+
+ if (!machine_is_trout())
+ return 0;
+ vreg_mddi_1v5 = vreg_get(0, "gp2");
+ if (IS_ERR(vreg_mddi_1v5))
+ return PTR_ERR(vreg_mddi_1v5);
+ vreg_lcm_2v85 = vreg_get(0, "gp4");
+ if (IS_ERR(vreg_lcm_2v85))
+ return PTR_ERR(vreg_lcm_2v85);
+
+ trout_new_backlight = system_rev >= 5;
+ if (trout_new_backlight) {
+ uint32_t config = PCOM_GPIO_CFG(27, 0, GPIO_OUTPUT,
+ GPIO_NO_PULL, GPIO_8MA);
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0);
+ }
+ else {
+ uint32_t config = PCOM_GPIO_CFG(27, 1, GPIO_OUTPUT,
+ GPIO_NO_PULL, GPIO_8MA);
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, 0);
+
+ gp_clk = clk_get(NULL, "gp_clk");
+ if (IS_ERR(gp_clk)) {
+ printk(KERN_ERR "trout_init_panel: could not get gp"
+ "clock\n");
+ gp_clk = NULL;
+ }
+ rc = clk_set_rate(gp_clk, 19200000);
+ if (rc)
+ printk(KERN_ERR "trout_init_panel: set clock rate "
+ "failed\n");
+ }
+
+ rc = platform_device_register(&msm_device_mdp);
+ if (rc)
+ return rc;
+ msm_device_mddi0.dev.platform_data = &mddi_pdata;
+ rc = platform_device_register(&msm_device_mddi0);
+ if (rc)
+ return rc;
+ platform_device_register(&trout_backlight);
+ return platform_driver_register(&trout_backlight_driver);
+}
+
+device_initcall(trout_init_panel);
diff --git a/arch/arm/mach-msm/board-trout-rfkill.c b/arch/arm/mach-msm/board-trout-rfkill.c
new file mode 100644
index 000000000000..5212431dda82
--- /dev/null
+++ b/arch/arm/mach-msm/board-trout-rfkill.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Nick Pelly <npelly@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* Control bluetooth power for trout platform */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/rfkill.h>
+#include <linux/delay.h>
+#include <asm/gpio.h>
+
+#include "board-trout.h"
+
+void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state);
+
+static struct rfkill *bt_rfk;
+static const char bt_name[] = "brf6300";
+
+static int bluetooth_set_power(void *data, enum rfkill_state state)
+{
+ switch (state) {
+ case RFKILL_STATE_UNBLOCKED:
+ gpio_set_value(TROUT_GPIO_BT_32K_EN, 1);
+ udelay(10);
+ gpio_configure(101, GPIOF_DRIVE_OUTPUT | GPIOF_OUTPUT_HIGH);
+ break;
+ case RFKILL_STATE_SOFT_BLOCKED:
+ gpio_configure(101, GPIOF_DRIVE_OUTPUT | GPIOF_OUTPUT_LOW);
+ gpio_set_value(TROUT_GPIO_BT_32K_EN, 0);
+ break;
+ default:
+ printk(KERN_ERR "bad bluetooth rfkill state %d\n", state);
+ }
+ return 0;
+}
+
+static int __init trout_rfkill_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+
+ /* default to bluetooth off */
+ rfkill_switch_all(RFKILL_TYPE_BLUETOOTH, RFKILL_STATE_SOFT_BLOCKED);
+ bluetooth_set_power(NULL, RFKILL_STATE_SOFT_BLOCKED);
+
+ bt_rfk = rfkill_allocate(&pdev->dev, RFKILL_TYPE_BLUETOOTH);
+ if (!bt_rfk)
+ return -ENOMEM;
+
+ bt_rfk->name = bt_name;
+ bt_rfk->state = RFKILL_STATE_SOFT_BLOCKED;
+ /* userspace cannot take exclusive control */
+ bt_rfk->user_claim_unsupported = 1;
+ bt_rfk->user_claim = 0;
+ bt_rfk->data = NULL; // user data
+ bt_rfk->toggle_radio = bluetooth_set_power;
+
+ rc = rfkill_register(bt_rfk);
+
+ if (rc)
+ rfkill_free(bt_rfk);
+ return rc;
+}
+
+static struct platform_driver trout_rfkill_driver = {
+ .probe = trout_rfkill_probe,
+ .driver = {
+ .name = "trout_rfkill",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init trout_rfkill_init(void)
+{
+ return platform_driver_register(&trout_rfkill_driver);
+}
+
+module_init(trout_rfkill_init);
+MODULE_DESCRIPTION("trout rfkill");
+MODULE_AUTHOR("Nick Pelly <npelly@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-msm/board-trout-wifi.c b/arch/arm/mach-msm/board-trout-wifi.c
new file mode 100644
index 000000000000..51b26a405369
--- /dev/null
+++ b/arch/arm/mach-msm/board-trout-wifi.c
@@ -0,0 +1,74 @@
+/* arch/arm/mach-msm/board-trout-wifi.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Dmitry Shmidt <dimitrysh@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+#include <linux/err.h>
+#include <linux/wifi_tiwlan.h>
+
+extern int trout_wifi_set_carddetect(int val);
+extern int trout_wifi_power(int on);
+extern int trout_wifi_reset(int on);
+
+#ifdef CONFIG_WIFI_MEM_PREALLOC
+typedef struct wifi_mem_prealloc_struct {
+ void *mem_ptr;
+ unsigned long size;
+} wifi_mem_prealloc_t;
+
+static wifi_mem_prealloc_t wifi_mem_array[WMPA_NUMBER_OF_SECTIONS] = {
+ { NULL, (WMPA_SECTION_SIZE_0 + WMPA_SECTION_HEADER) },
+ { NULL, (WMPA_SECTION_SIZE_1 + WMPA_SECTION_HEADER) },
+ { NULL, (WMPA_SECTION_SIZE_2 + WMPA_SECTION_HEADER) }
+};
+
+static void *trout_wifi_mem_prealloc(int section, unsigned long size)
+{
+ if( (section < 0) || (section >= WMPA_NUMBER_OF_SECTIONS) )
+ return NULL;
+ if( wifi_mem_array[section].size < size )
+ return NULL;
+ return wifi_mem_array[section].mem_ptr;
+}
+
+int __init trout_init_wifi_mem( void )
+{
+ int i;
+
+ for(i=0;( i < WMPA_NUMBER_OF_SECTIONS );i++) {
+ wifi_mem_array[i].mem_ptr = vmalloc(wifi_mem_array[i].size);
+ if( wifi_mem_array[i].mem_ptr == NULL )
+ return -ENOMEM;
+ }
+ return 0;
+}
+#endif
+
+struct wifi_platform_data trout_wifi_control = {
+ .set_power = trout_wifi_power,
+ .set_reset = trout_wifi_reset,
+ .set_carddetect = trout_wifi_set_carddetect,
+#ifdef CONFIG_WIFI_MEM_PREALLOC
+ .mem_prealloc = trout_wifi_mem_prealloc,
+#else
+ .mem_prealloc = NULL,
+#endif
+};
+
+#endif
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
new file mode 100644
index 000000000000..637253ec3f78
--- /dev/null
+++ b/arch/arm/mach-msm/board-trout.c
@@ -0,0 +1,841 @@
+/* arch/arm/mach-msm/board-trout.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/keyreset.h>
+#include <linux/leds.h>
+#include <linux/switch.h>
+#include <linux/../../../drivers/staging/android/timed_gpio.h>
+#include <linux/synaptics_i2c_rmi.h>
+#include <linux/akm8976.h>
+#include <linux/sysdev.h>
+#include <linux/android_pmem.h>
+
+#include <linux/delay.h>
+
+#include <asm/gpio.h>
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+#include <asm/system.h>
+#include <mach/system.h>
+#include <mach/vreg.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+#include <asm/setup.h>
+
+#include <linux/gpio_event.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach/mmc.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/msm_audio.h>
+
+#include "board-trout.h"
+
+#include "gpio_chip.h"
+#include "pm.h"
+
+#include <mach/board.h>
+#include <mach/board_htc.h>
+#include <mach/msm_serial_hs.h>
+#include <mach/htc_pwrsink.h>
+#ifdef CONFIG_HTC_HEADSET
+#include <mach/htc_headset.h>
+#endif
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+#include <linux/wifi_tiwlan.h>
+#endif
+
+#include "proc_comm.h"
+#include "devices.h"
+
+void msm_init_irq(void);
+void msm_init_gpio(void);
+
+extern int trout_init_mmc(unsigned int);
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+#ifdef CONFIG_WIFI_MEM_PREALLOC
+extern int trout_init_wifi_mem(void);
+#endif
+extern struct wifi_platform_data trout_wifi_control;
+#endif
+
+struct trout_axis_info {
+ struct gpio_event_axis_info info;
+ uint16_t in_state;
+ uint16_t out_state;
+};
+static bool nav_just_on;
+static int nav_on_jiffies;
+
+uint16_t trout_axis_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+ struct trout_axis_info *ai = container_of(info, struct trout_axis_info, info);
+ uint16_t out = ai->out_state;
+
+ if (nav_just_on) {
+ if (jiffies == nav_on_jiffies || jiffies == nav_on_jiffies + 1)
+ goto ignore;
+ nav_just_on = 0;
+ }
+ if((ai->in_state ^ in) & 1)
+ out--;
+ if((ai->in_state ^ in) & 2)
+ out++;
+ ai->out_state = out;
+ignore:
+ ai->in_state = in;
+ return out;
+}
+
+int trout_nav_power(const struct gpio_event_platform_data *pdata, bool on)
+{
+ gpio_set_value(TROUT_GPIO_JOG_EN, on);
+ if (on) {
+ nav_just_on = 1;
+ nav_on_jiffies = jiffies;
+ }
+ return 0;
+}
+
+static uint32_t trout_4_x_axis_gpios[] = {
+ TROUT_4_BALL_LEFT_0, TROUT_4_BALL_RIGHT_0
+};
+static uint32_t trout_5_x_axis_gpios[] = {
+ TROUT_5_BALL_LEFT_0, TROUT_5_BALL_RIGHT_0
+};
+
+static struct trout_axis_info trout_x_axis = {
+ .info = {
+ .info.func = gpio_event_axis_func,
+ .count = ARRAY_SIZE(trout_5_x_axis_gpios),
+ .type = EV_REL,
+ .code = REL_X,
+ .decoded_size = 1U << ARRAY_SIZE(trout_5_x_axis_gpios),
+ .map = trout_axis_map,
+ .gpio = trout_5_x_axis_gpios,
+ .flags = GPIOEAF_PRINT_UNKNOWN_DIRECTION /*| GPIOEAF_PRINT_RAW | GPIOEAF_PRINT_EVENT */
+ }
+};
+
+static uint32_t trout_4_y_axis_gpios[] = {
+ TROUT_4_BALL_UP_0, TROUT_4_BALL_DOWN_0
+};
+static uint32_t trout_5_y_axis_gpios[] = {
+ TROUT_5_BALL_UP_0, TROUT_5_BALL_DOWN_0
+};
+
+static struct trout_axis_info trout_y_axis = {
+ .info = {
+ .info.func = gpio_event_axis_func,
+ .count = ARRAY_SIZE(trout_5_y_axis_gpios),
+ .type = EV_REL,
+ .code = REL_Y,
+ .decoded_size = 1U << ARRAY_SIZE(trout_5_y_axis_gpios),
+ .map = trout_axis_map,
+ .gpio = trout_5_y_axis_gpios,
+ .flags = GPIOEAF_PRINT_UNKNOWN_DIRECTION /*| GPIOEAF_PRINT_RAW | GPIOEAF_PRINT_EVENT */
+ }
+};
+
+static struct gpio_event_direct_entry trout_nav_buttons[] = {
+ { TROUT_GPIO_NAVI_ACT_N, BTN_MOUSE }
+};
+
+static struct gpio_event_input_info trout_nav_button_info = {
+ .info.func = gpio_event_input_func,
+ .flags = 0,
+ .type = EV_KEY,
+ .keymap = trout_nav_buttons,
+ .keymap_size = ARRAY_SIZE(trout_nav_buttons)
+};
+
+static struct gpio_event_info *trout_nav_info[] = {
+ &trout_x_axis.info.info,
+ &trout_y_axis.info.info,
+ &trout_nav_button_info.info
+};
+
+static struct gpio_event_platform_data trout_nav_data = {
+ .name = "trout-nav",
+ .info = trout_nav_info,
+ .info_count = ARRAY_SIZE(trout_nav_info),
+ .power = trout_nav_power,
+};
+
+static struct platform_device trout_nav_device = {
+ .name = GPIO_EVENT_DEV_NAME,
+ .id = 2,
+ .dev = {
+ .platform_data = &trout_nav_data,
+ },
+};
+
+static int trout_reset_keys_up[] = {
+ BTN_MOUSE,
+ 0
+};
+
+static struct keyreset_platform_data trout_reset_keys_pdata = {
+ .keys_up = trout_reset_keys_up,
+ .keys_down = {
+ KEY_SEND,
+ KEY_MENU,
+ KEY_END,
+ 0
+ },
+};
+
+struct platform_device trout_reset_keys_device = {
+ .name = KEYRESET_NAME,
+ .dev.platform_data = &trout_reset_keys_pdata,
+};
+
+static int trout_ts_power(int on)
+{
+ int tp_ls_gpio = system_rev < 5 ? TROUT_4_TP_LS_EN : TROUT_5_TP_LS_EN;
+ if (on) {
+ gpio_set_value(TROUT_GPIO_TP_I2C_PULL, 1);
+ gpio_set_value(TROUT_GPIO_TP_EN, 1);
+ /* touchscreen must be powered before we enable i2c pullup */
+ msleep(2);
+ /* enable touch panel level shift */
+ gpio_set_value(tp_ls_gpio, 1);
+ msleep(2);
+ }
+ else {
+ gpio_set_value(tp_ls_gpio, 0);
+ udelay(50);
+ gpio_set_value(TROUT_GPIO_TP_EN, 0);
+ gpio_set_value(TROUT_GPIO_TP_I2C_PULL, 0);
+ }
+ return 0;
+}
+
+static struct synaptics_i2c_rmi_platform_data trout_ts_data[] = {
+ {
+ .version = 0x010c,
+ .power = trout_ts_power,
+ .flags = SYNAPTICS_FLIP_Y | SYNAPTICS_SNAP_TO_INACTIVE_EDGE,
+ .inactive_left = -100 * 0x10000 / 4334,
+ .inactive_right = -100 * 0x10000 / 4334,
+ .inactive_top = -40 * 0x10000 / 6696,
+ .inactive_bottom = -40 * 0x10000 / 6696,
+ .snap_left_on = 300 * 0x10000 / 4334,
+ .snap_left_off = 310 * 0x10000 / 4334,
+ .snap_right_on = 300 * 0x10000 / 4334,
+ .snap_right_off = 310 * 0x10000 / 4334,
+ .snap_top_on = 100 * 0x10000 / 6696,
+ .snap_top_off = 110 * 0x10000 / 6696,
+ .snap_bottom_on = 100 * 0x10000 / 6696,
+ .snap_bottom_off = 110 * 0x10000 / 6696,
+ },
+ {
+ .flags = SYNAPTICS_FLIP_Y | SYNAPTICS_SNAP_TO_INACTIVE_EDGE,
+ .inactive_left = ((4674 - 4334) / 2 + 200) * 0x10000 / 4334,
+ .inactive_right = ((4674 - 4334) / 2 + 200) * 0x10000 / 4334,
+ .inactive_top = ((6946 - 6696) / 2) * 0x10000 / 6696,
+ .inactive_bottom = ((6946 - 6696) / 2) * 0x10000 / 6696,
+ }
+};
+
+static struct akm8976_platform_data compass_platform_data = {
+ .reset = TROUT_GPIO_COMPASS_RST_N,
+ .clk_on = TROUT_GPIO_COMPASS_32K_EN,
+ .intr = TROUT_GPIO_COMPASS_IRQ,
+};
+
+static struct i2c_board_info i2c_devices[] = {
+ {
+ I2C_BOARD_INFO(SYNAPTICS_I2C_RMI_NAME, 0x20),
+ .platform_data = trout_ts_data,
+ .irq = TROUT_GPIO_TO_INT(TROUT_GPIO_TP_ATT_N)
+ },
+ {
+ I2C_BOARD_INFO("akm8976", 0x1C),
+ .platform_data = &compass_platform_data,
+ .irq = TROUT_GPIO_TO_INT(TROUT_GPIO_COMPASS_IRQ),
+ },
+ {
+ I2C_BOARD_INFO("pca963x", 0x62),
+ },
+};
+
+static struct android_pmem_platform_data android_pmem_pdata = {
+ .name = "pmem",
+ .start = MSM_PMEM_MDP_BASE,
+ .size = MSM_PMEM_MDP_SIZE,
+ .no_allocator = 1,
+ .cached = 1,
+};
+
+static struct android_pmem_platform_data android_pmem_adsp_pdata = {
+ .name = "pmem_adsp",
+ .start = MSM_PMEM_ADSP_BASE,
+ .size = MSM_PMEM_ADSP_SIZE,
+ .no_allocator = 0,
+ .cached = 0,
+};
+
+static struct android_pmem_platform_data android_pmem_camera_pdata = {
+ .name = "pmem_camera",
+ .start = MSM_PMEM_CAMERA_BASE,
+ .size = MSM_PMEM_CAMERA_SIZE,
+ .no_allocator = 0,
+ .cached = 0,
+};
+
+static struct android_pmem_platform_data android_pmem_gpu0_pdata = {
+ .name = "pmem_gpu0",
+ .start = MSM_PMEM_GPU0_BASE,
+ .size = MSM_PMEM_GPU0_SIZE,
+ .no_allocator = 1,
+ .cached = 0,
+ .buffered = 1,
+};
+
+static struct android_pmem_platform_data android_pmem_gpu1_pdata = {
+ .name = "pmem_gpu1",
+ .start = MSM_PMEM_GPU1_BASE,
+ .size = MSM_PMEM_GPU1_SIZE,
+ .no_allocator = 1,
+ .cached = 0,
+ .buffered = 1,
+};
+
+static struct platform_device android_pmem_device = {
+ .name = "android_pmem",
+ .id = 0,
+ .dev = { .platform_data = &android_pmem_pdata },
+};
+
+static struct platform_device android_pmem_adsp_device = {
+ .name = "android_pmem",
+ .id = 1,
+ .dev = { .platform_data = &android_pmem_adsp_pdata },
+};
+
+static struct platform_device android_pmem_gpu0_device = {
+ .name = "android_pmem",
+ .id = 2,
+ .dev = { .platform_data = &android_pmem_gpu0_pdata },
+};
+
+static struct platform_device android_pmem_gpu1_device = {
+ .name = "android_pmem",
+ .id = 3,
+ .dev = { .platform_data = &android_pmem_gpu1_pdata },
+};
+
+static struct platform_device android_pmem_camera_device = {
+ .name = "android_pmem",
+ .id = 4,
+ .dev = { .platform_data = &android_pmem_camera_pdata },
+};
+
+static struct timed_gpio timed_gpios[] = {
+ {
+ .name = "vibrator",
+ .gpio = TROUT_GPIO_HAPTIC_PWM,
+ .max_timeout = 15000,
+ },
+ {
+ .name = "flash",
+ .gpio = TROUT_GPIO_FLASH_EN,
+ .max_timeout = 400,
+ },
+};
+
+static struct timed_gpio_platform_data timed_gpio_data = {
+ .num_gpios = ARRAY_SIZE(timed_gpios),
+ .gpios = timed_gpios,
+};
+
+static struct platform_device android_timed_gpios = {
+ .name = "timed-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &timed_gpio_data,
+ },
+};
+
+static struct gpio_led android_led_list[] = {
+ {
+ .name = "spotlight",
+ .gpio = TROUT_GPIO_SPOTLIGHT_EN,
+ },
+ {
+ .name = "keyboard-backlight",
+ .gpio = TROUT_GPIO_QTKEY_LED_EN,
+ },
+ {
+ .name = "button-backlight",
+ .gpio = TROUT_GPIO_UI_LED_EN,
+ },
+};
+
+static struct gpio_led_platform_data android_leds_data = {
+ .num_leds = ARRAY_SIZE(android_led_list),
+ .leds = android_led_list,
+};
+
+static struct platform_device android_leds = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &android_leds_data,
+ },
+};
+
+static struct gpio_switch_platform_data sd_door_switch_data = {
+ .name = "sd-door",
+ .gpio = TROUT_GPIO_SD_DOOR_N,
+ .state_on = "open",
+ .state_off = "closed",
+};
+
+static struct platform_device sd_door_switch = {
+ .name = "switch-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &sd_door_switch_data,
+ },
+};
+
+#ifdef CONFIG_HTC_HEADSET
+static void h2w_config_cpld(int route)
+{
+ switch (route) {
+ case H2W_UART3:
+ gpio_set_value(TROUT_GPIO_H2W_SEL0, 0);
+ gpio_set_value(TROUT_GPIO_H2W_SEL1, 1);
+ break;
+ case H2W_GPIO:
+ gpio_set_value(TROUT_GPIO_H2W_SEL0, 0);
+ gpio_set_value(TROUT_GPIO_H2W_SEL1, 0);
+ break;
+ }
+}
+
+static void h2w_init_cpld(void)
+{
+ h2w_config_cpld(H2W_UART3);
+ gpio_set_value(TROUT_GPIO_H2W_CLK_DIR, 0);
+ gpio_set_value(TROUT_GPIO_H2W_DAT_DIR, 0);
+}
+
+static struct h2w_platform_data trout_h2w_data = {
+ .cable_in1 = TROUT_GPIO_CABLE_IN1,
+ .cable_in2 = TROUT_GPIO_CABLE_IN2,
+ .h2w_clk = TROUT_GPIO_H2W_CLK_GPI,
+ .h2w_data = TROUT_GPIO_H2W_DAT_GPI,
+ .debug_uart = H2W_UART3,
+ .config_cpld = h2w_config_cpld,
+ .init_cpld = h2w_init_cpld,
+};
+
+static struct platform_device trout_h2w = {
+ .name = "h2w",
+ .id = -1,
+ .dev = {
+ .platform_data = &trout_h2w_data,
+ },
+};
+#endif
+
+static void trout_phy_reset(void)
+{
+ gpio_set_value(TROUT_GPIO_USB_PHY_RST_N, 0);
+ mdelay(10);
+ gpio_set_value(TROUT_GPIO_USB_PHY_RST_N, 1);
+ mdelay(10);
+}
+
+static struct resource trout_ram_console_resource[] = {
+ {
+ .start = MSM_RAM_CONSOLE_BASE,
+ .end = MSM_RAM_CONSOLE_BASE + MSM_RAM_CONSOLE_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device trout_ram_console_device = {
+ .name = "ram_console",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(trout_ram_console_resource),
+ .resource = trout_ram_console_resource,
+};
+
+static struct pwr_sink trout_pwrsink_table[] = {
+ {
+ .id = PWRSINK_AUDIO,
+ .ua_max = 90000,
+ },
+ {
+ .id = PWRSINK_BACKLIGHT,
+ .ua_max = 128000,
+ },
+ {
+ .id = PWRSINK_LED_BUTTON,
+ .ua_max = 17000,
+ },
+ {
+ .id = PWRSINK_LED_KEYBOARD,
+ .ua_max = 22000,
+ },
+ {
+ .id = PWRSINK_GP_CLK,
+ .ua_max = 30000,
+ },
+ {
+ .id = PWRSINK_BLUETOOTH,
+ .ua_max = 15000,
+ },
+ {
+ .id = PWRSINK_CAMERA,
+ .ua_max = 0,
+ },
+ {
+ .id = PWRSINK_SDCARD,
+ .ua_max = 0,
+ },
+ {
+ .id = PWRSINK_VIDEO,
+ .ua_max = 0,
+ },
+ {
+ .id = PWRSINK_WIFI,
+ .ua_max = 200000,
+ },
+ {
+ .id = PWRSINK_SYSTEM_LOAD,
+ .ua_max = 63000,
+ .percent_util = 100,
+ },
+};
+
+static struct pwr_sink_platform_data trout_pwrsink_data = {
+ .num_sinks = ARRAY_SIZE(trout_pwrsink_table),
+ .sinks = trout_pwrsink_table,
+ .suspend_late = NULL,
+ .resume_early = NULL,
+ .suspend_early = NULL,
+ .resume_late = NULL,
+};
+
+static struct platform_device trout_pwr_sink = {
+ .name = "htc_pwrsink",
+ .id = -1,
+ .dev = {
+ .platform_data = &trout_pwrsink_data,
+ },
+};
+
+static struct platform_device trout_rfkill = {
+ .name = "trout_rfkill",
+ .id = -1,
+};
+
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+static struct platform_device trout_wifi = {
+ .name = "msm_wifi",
+ .id = 1,
+ .num_resources = 0,
+ .resource = NULL,
+ .dev = {
+ .platform_data = &trout_wifi_control,
+ },
+};
+#endif
+
+#define SND(num, desc) { .name = desc, .id = num }
+static struct snd_endpoint snd_endpoints_list[] = {
+ SND(0, "HANDSET"),
+ SND(1, "SPEAKER"),
+ SND(2, "HEADSET"),
+ SND(3, "BT"),
+ SND(44, "BT_EC_OFF"),
+ SND(10, "HEADSET_AND_SPEAKER"),
+ SND(256, "CURRENT"),
+
+ /* Bluetooth accessories. */
+
+ SND(12, "HTC BH S100"),
+ SND(13, "HTC BH M100"),
+ SND(14, "Motorola H500"),
+ SND(15, "Nokia HS-36W"),
+ SND(16, "PLT 510v.D"),
+ SND(17, "M2500 by Plantronics"),
+ SND(18, "Nokia HDW-3"),
+ SND(19, "HBH-608"),
+ SND(20, "HBH-DS970"),
+ SND(21, "i.Tech BlueBAND"),
+ SND(22, "Nokia BH-800"),
+ SND(23, "Motorola H700"),
+ SND(24, "HTC BH M200"),
+ SND(25, "Jabra JX10"),
+ SND(26, "320Plantronics"),
+ SND(27, "640Plantronics"),
+ SND(28, "Jabra BT500"),
+ SND(29, "Motorola HT820"),
+ SND(30, "HBH-IV840"),
+ SND(31, "6XXPlantronics"),
+ SND(32, "3XXPlantronics"),
+ SND(33, "HBH-PV710"),
+ SND(34, "Motorola H670"),
+ SND(35, "HBM-300"),
+ SND(36, "Nokia BH-208"),
+ SND(37, "Samsung WEP410"),
+ SND(38, "Jabra BT8010"),
+ SND(39, "Motorola S9"),
+ SND(40, "Jabra BT620s"),
+ SND(41, "Nokia BH-902"),
+ SND(42, "HBH-DS220"),
+ SND(43, "HBH-DS980"),
+};
+#undef SND
+
+static struct msm_snd_endpoints trout_snd_endpoints = {
+ .endpoints = snd_endpoints_list,
+ .num = ARRAY_SIZE(snd_endpoints_list),
+};
+
+static struct platform_device trout_snd = {
+ .name = "msm_snd",
+ .id = -1,
+ .dev = {
+ .platform_data = &trout_snd_endpoints,
+ },
+};
+
+static struct platform_device *devices[] __initdata = {
+ &msm_device_smd,
+ &msm_device_dmov,
+ &msm_device_nand,
+ &msm_device_i2c,
+ &msm_device_uart1,
+#if !defined(CONFIG_MSM_SERIAL_DEBUGGER) && !defined(CONFIG_TROUT_H2W)
+ &msm_device_uart3,
+#endif
+#ifdef CONFIG_SERIAL_MSM_HS
+ &msm_device_uart_dm1,
+#endif
+ &trout_nav_device,
+ &trout_reset_keys_device,
+ &android_leds,
+ &sd_door_switch,
+ &android_timed_gpios,
+ &android_pmem_device,
+ &android_pmem_adsp_device,
+ &android_pmem_gpu0_device,
+ &android_pmem_gpu1_device,
+ &android_pmem_camera_device,
+ &trout_ram_console_device,
+ &trout_rfkill,
+#ifdef CONFIG_WIFI_CONTROL_FUNC
+ &trout_wifi,
+#endif
+#ifdef CONFIG_HTC_HEADSET
+ &trout_h2w,
+#endif
+#ifdef CONFIG_HTC_PWRSINK
+ &trout_pwr_sink,
+#endif
+ &trout_snd,
+};
+
+extern struct sys_timer msm_timer;
+
+static void __init trout_init_irq(void)
+{
+ printk("trout_init_irq()\n");
+ msm_init_irq();
+}
+
+static uint opt_disable_uart3;
+
+module_param_named(disable_uart3, opt_disable_uart3, uint, 0);
+
+static void trout_reset(void)
+{
+ gpio_set_value(TROUT_GPIO_PS_HOLD, 0);
+}
+
+static uint32_t gpio_table[] = {
+ /* BLUETOOTH */
+#ifdef CONFIG_SERIAL_MSM_HS
+ PCOM_GPIO_CFG(43, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RTS */
+ PCOM_GPIO_CFG(44, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* CTS */
+ PCOM_GPIO_CFG(45, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RX */
+ PCOM_GPIO_CFG(46, 3, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* TX */
+#else
+ PCOM_GPIO_CFG(43, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RTS */
+ PCOM_GPIO_CFG(44, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* CTS */
+ PCOM_GPIO_CFG(45, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* RX */
+ PCOM_GPIO_CFG(46, 1, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* TX */
+#endif
+};
+
+static void config_gpio_table(uint32_t *table, int len)
+{
+ int n;
+ unsigned id;
+ for(n = 0; n < len; n++) {
+ id = table[n];
+ msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0);
+ }
+}
+
+static void __init config_gpios(void)
+{
+ config_gpio_table(gpio_table, ARRAY_SIZE(gpio_table));
+}
+
+void msm_serial_debug_init(unsigned int base, int irq,
+ struct device *clk_device, int signal_irq);
+
+static struct msm_acpu_clock_platform_data trout_clock_data = {
+ .acpu_switch_time_us = 20,
+ .max_speed_delta_khz = 256000,
+ .vdd_switch_time_us = 62,
+ .power_collapse_khz = 19200000,
+ .wait_for_irq_khz = 128000000,
+};
+
+#ifdef CONFIG_SERIAL_MSM_HS
+static struct msm_serial_hs_platform_data msm_uart_dm1_pdata = {
+ .wakeup_irq = MSM_GPIO_TO_INT(45),
+ .inject_rx_on_wakeup = 1,
+ .rx_to_inject = 0x32,
+};
+#endif
+
+static struct msm_pm_platform_data msm_pm_data[MSM_PM_SLEEP_MODE_NR] = {
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE].latency = 16000,
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN].latency = 12000,
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT].latency = 2000,
+};
+
+static void __init trout_init(void)
+{
+ int rc;
+
+ printk("trout_init() revision=%d\n", system_rev);
+
+ /*
+ * Setup common MSM GPIOS
+ */
+ config_gpios();
+
+ msm_hw_reset_hook = trout_reset;
+
+ gpio_direction_output(system_rev < 5 ?
+ TROUT_4_TP_LS_EN : TROUT_5_TP_LS_EN, 0);
+
+ msm_acpu_clock_init(&trout_clock_data);
+
+#if defined(CONFIG_MSM_SERIAL_DEBUGGER)
+ if (!opt_disable_uart3)
+ msm_serial_debug_init(MSM_UART3_PHYS, INT_UART3,
+ &msm_device_uart3.dev, 1);
+#endif
+
+ /* gpio_configure(108, IRQF_TRIGGER_LOW); */
+
+ /* put the AF VCM in powerdown mode to avoid noise */
+ gpio_set_value(TROUT_GPIO_VCM_PWDN, 1);
+ mdelay(100);
+
+ if (system_rev < 5) {
+ trout_x_axis.info.gpio = trout_4_x_axis_gpios;
+ trout_y_axis.info.gpio = trout_4_y_axis_gpios;
+ }
+
+#ifdef CONFIG_SERIAL_MSM_HS
+ msm_device_uart_dm1.dev.platform_data = &msm_uart_dm1_pdata;
+#endif
+ msm_add_usb_devices(trout_phy_reset);
+
+ rc = trout_init_mmc(system_rev);
+ if (rc)
+ printk(KERN_CRIT "%s: MMC init failure (%d)\n", __func__, rc);
+
+#ifdef CONFIG_WIFI_MEM_PREALLOC
+ rc = trout_init_wifi_mem();
+ if (rc)
+ printk(KERN_CRIT "%s: WiFi Memory init failure (%d)\n", __func__, rc);
+#endif
+
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+ i2c_register_board_info(0, i2c_devices, ARRAY_SIZE(i2c_devices));
+ msm_pm_set_platform_data(msm_pm_data);
+
+ /* SD card door should wake the device */
+ set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SD_DOOR_N), 1);
+}
+
+static struct map_desc trout_io_desc[] __initdata = {
+ {
+ .virtual = TROUT_CPLD_BASE,
+ .pfn = __phys_to_pfn(TROUT_CPLD_START),
+ .length = TROUT_CPLD_SIZE,
+ .type = MT_DEVICE_NONSHARED
+ }
+};
+
+static void __init trout_fixup(struct machine_desc *desc, struct tag *tags,
+ char **cmdline, struct meminfo *mi)
+{
+ mi->nr_banks=1;
+ mi->bank[0].start = PHYS_OFFSET;
+ mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
+ mi->bank[0].size = (101*1024*1024);
+}
+
+static void __init trout_map_io(void)
+{
+ msm_shared_ram_phys = 0x01F00000;
+
+ msm_map_common_io();
+ iotable_init(trout_io_desc, ARRAY_SIZE(trout_io_desc));
+ msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
+}
+
+MACHINE_START(TROUT, "trout")
+/* Maintainer: Brian Swetland <swetland@google.com> */
+
+/* this is broken... can we just opt out of specifying something here? */
+ .phys_io = 0x80000000,
+ .io_pg_offst = ((0x80000000) >> 18) & 0xfffc,
+
+ .boot_params = 0x10000100,
+ .fixup = trout_fixup,
+ .map_io = trout_map_io,
+ .init_irq = trout_init_irq,
+ .init_machine = trout_init,
+ .timer = &msm_timer,
+MACHINE_END
diff --git a/arch/arm/mach-msm/board-trout.h b/arch/arm/mach-msm/board-trout.h
new file mode 100644
index 000000000000..308c4df90189
--- /dev/null
+++ b/arch/arm/mach-msm/board-trout.h
@@ -0,0 +1,162 @@
+/* linux/arch/arm/mach-msm/board-trout.h
+** Author: Brian Swetland <swetland@google.com>
+*/
+#ifndef __ARCH_ARM_MACH_MSM_BOARD_TROUT_H
+#define __ARCH_ARM_MACH_MSM_BOARD_TROUT_H
+
+#include <mach/board.h>
+
+#define MSM_SMI_BASE 0x00000000
+#define MSM_SMI_SIZE 0x00800000
+
+#define MSM_EBI_BASE 0x10000000
+#define MSM_EBI_SIZE 0x06e00000
+
+#define MSM_PMEM_GPU0_BASE 0x00000000
+#define MSM_PMEM_GPU0_SIZE 0x00700000
+
+#define MSM_PMEM_MDP_BASE 0x02000000
+#define MSM_PMEM_MDP_SIZE 0x00800000
+
+#define MSM_PMEM_ADSP_BASE 0x02800000
+#define MSM_PMEM_ADSP_SIZE 0x00800000
+
+#define MSM_PMEM_CAMERA_BASE 0x03000000
+#define MSM_PMEM_CAMERA_SIZE 0x00800000
+
+#define MSM_FB_BASE 0x03800000
+#define MSM_FB_SIZE 0x00100000
+
+#define MSM_LINUX_BASE MSM_EBI_BASE
+#define MSM_LINUX_SIZE 0x06500000
+
+#define MSM_PMEM_GPU1_SIZE 0x800000
+#define MSM_PMEM_GPU1_BASE MSM_RAM_CONSOLE_BASE - MSM_PMEM_GPU1_SIZE
+
+#define MSM_RAM_CONSOLE_BASE MSM_EBI_BASE + 0x6d00000
+#define MSM_RAM_CONSOLE_SIZE 128 * SZ_1K
+
+#if (MSM_FB_BASE + MSM_FB_SIZE) >= (MSM_PMEM_GPU1_BASE)
+#error invalid memory map
+#endif
+
+#define DECLARE_MSM_IOMAP
+#include <mach/msm_iomap.h>
+
+#define TROUT_4_BALL_UP_0 1
+#define TROUT_4_BALL_LEFT_0 18
+#define TROUT_4_BALL_DOWN_0 57
+#define TROUT_4_BALL_RIGHT_0 91
+
+#define TROUT_5_BALL_UP_0 94
+#define TROUT_5_BALL_LEFT_0 18
+#define TROUT_5_BALL_DOWN_0 90
+#define TROUT_5_BALL_RIGHT_0 19
+
+#define TROUT_POWER_KEY 20
+
+#define TROUT_4_TP_LS_EN 19
+#define TROUT_5_TP_LS_EN 1
+
+#define TROUT_CPLD_BASE 0xE8100000
+#define TROUT_CPLD_START 0x98000000
+#define TROUT_CPLD_SIZE SZ_4K
+
+#define TROUT_GPIO_CABLE_IN1 (83)
+#define TROUT_GPIO_CABLE_IN2 (49)
+
+#define TROUT_GPIO_START (128)
+
+#define TROUT_GPIO_INT_MASK0_REG (0x0c)
+#define TROUT_GPIO_INT_STAT0_REG (0x0e)
+#define TROUT_GPIO_INT_MASK1_REG (0x14)
+#define TROUT_GPIO_INT_STAT1_REG (0x10)
+
+#define TROUT_GPIO_HAPTIC_PWM (28)
+#define TROUT_GPIO_PS_HOLD (25)
+
+#define TROUT_GPIO_MISC2_BASE (TROUT_GPIO_START + 0x00)
+#define TROUT_GPIO_MISC3_BASE (TROUT_GPIO_START + 0x08)
+#define TROUT_GPIO_MISC4_BASE (TROUT_GPIO_START + 0x10)
+#define TROUT_GPIO_MISC5_BASE (TROUT_GPIO_START + 0x18)
+#define TROUT_GPIO_INT2_BASE (TROUT_GPIO_START + 0x20)
+#define TROUT_GPIO_MISC1_BASE (TROUT_GPIO_START + 0x28)
+#define TROUT_GPIO_VIRTUAL_BASE (TROUT_GPIO_START + 0x30)
+#define TROUT_GPIO_INT5_BASE (TROUT_GPIO_START + 0x48)
+
+#define TROUT_GPIO_CHARGER_EN (TROUT_GPIO_MISC2_BASE + 0)
+#define TROUT_GPIO_ISET (TROUT_GPIO_MISC2_BASE + 1)
+#define TROUT_GPIO_H2W_DAT_DIR (TROUT_GPIO_MISC2_BASE + 2)
+#define TROUT_GPIO_H2W_CLK_DIR (TROUT_GPIO_MISC2_BASE + 3)
+#define TROUT_GPIO_H2W_DAT_GPO (TROUT_GPIO_MISC2_BASE + 4)
+#define TROUT_GPIO_H2W_CLK_GPO (TROUT_GPIO_MISC2_BASE + 5)
+#define TROUT_GPIO_H2W_SEL0 (TROUT_GPIO_MISC2_BASE + 6)
+#define TROUT_GPIO_H2W_SEL1 (TROUT_GPIO_MISC2_BASE + 7)
+
+#define TROUT_GPIO_SPOTLIGHT_EN (TROUT_GPIO_MISC3_BASE + 0)
+#define TROUT_GPIO_FLASH_EN (TROUT_GPIO_MISC3_BASE + 1)
+#define TROUT_GPIO_I2C_PULL (TROUT_GPIO_MISC3_BASE + 2)
+#define TROUT_GPIO_TP_I2C_PULL (TROUT_GPIO_MISC3_BASE + 3)
+#define TROUT_GPIO_TP_EN (TROUT_GPIO_MISC3_BASE + 4)
+#define TROUT_GPIO_JOG_EN (TROUT_GPIO_MISC3_BASE + 5)
+#define TROUT_GPIO_UI_LED_EN (TROUT_GPIO_MISC3_BASE + 6)
+#define TROUT_GPIO_QTKEY_LED_EN (TROUT_GPIO_MISC3_BASE + 7)
+
+#define TROUT_GPIO_VCM_PWDN (TROUT_GPIO_MISC4_BASE + 0)
+#define TROUT_GPIO_USB_H2W_SW (TROUT_GPIO_MISC4_BASE + 1)
+#define TROUT_GPIO_COMPASS_RST_N (TROUT_GPIO_MISC4_BASE + 2)
+#define TROUT_GPIO_HAPTIC_EN_UP (TROUT_GPIO_MISC4_BASE + 3)
+#define TROUT_GPIO_HAPTIC_EN_MAIN (TROUT_GPIO_MISC4_BASE + 4)
+#define TROUT_GPIO_USB_PHY_RST_N (TROUT_GPIO_MISC4_BASE + 5)
+#define TROUT_GPIO_WIFI_PA_RESETX (TROUT_GPIO_MISC4_BASE + 6)
+#define TROUT_GPIO_WIFI_EN (TROUT_GPIO_MISC4_BASE + 7)
+
+#define TROUT_GPIO_BT_32K_EN (TROUT_GPIO_MISC5_BASE + 0)
+#define TROUT_GPIO_MAC_32K_EN (TROUT_GPIO_MISC5_BASE + 1)
+#define TROUT_GPIO_MDDI_32K_EN (TROUT_GPIO_MISC5_BASE + 2)
+#define TROUT_GPIO_COMPASS_32K_EN (TROUT_GPIO_MISC5_BASE + 3)
+
+#define TROUT_GPIO_NAVI_ACT_N (TROUT_GPIO_INT2_BASE + 0)
+#define TROUT_GPIO_COMPASS_IRQ (TROUT_GPIO_INT2_BASE + 1)
+#define TROUT_GPIO_SLIDING_DET (TROUT_GPIO_INT2_BASE + 2)
+#define TROUT_GPIO_AUD_HSMIC_DET_N (TROUT_GPIO_INT2_BASE + 3)
+#define TROUT_GPIO_SD_DOOR_N (TROUT_GPIO_INT2_BASE + 4)
+#define TROUT_GPIO_CAM_BTN_STEP1_N (TROUT_GPIO_INT2_BASE + 5)
+#define TROUT_GPIO_CAM_BTN_STEP2_N (TROUT_GPIO_INT2_BASE + 6)
+#define TROUT_GPIO_TP_ATT_N (TROUT_GPIO_INT2_BASE + 7)
+#define TROUT_GPIO_BANK0_FIRST_INT_SOURCE (TROUT_GPIO_NAVI_ACT_N)
+#define TROUT_GPIO_BANK0_LAST_INT_SOURCE (TROUT_GPIO_TP_ATT_N)
+
+#define TROUT_GPIO_H2W_DAT_GPI (TROUT_GPIO_MISC1_BASE + 0)
+#define TROUT_GPIO_H2W_CLK_GPI (TROUT_GPIO_MISC1_BASE + 1)
+#define TROUT_GPIO_CPLD128_VER_0 (TROUT_GPIO_MISC1_BASE + 4)
+#define TROUT_GPIO_CPLD128_VER_1 (TROUT_GPIO_MISC1_BASE + 5)
+#define TROUT_GPIO_CPLD128_VER_2 (TROUT_GPIO_MISC1_BASE + 6)
+#define TROUT_GPIO_CPLD128_VER_3 (TROUT_GPIO_MISC1_BASE + 7)
+
+#define TROUT_GPIO_SDMC_CD_N (TROUT_GPIO_VIRTUAL_BASE + 0)
+#define TROUT_GPIO_END (TROUT_GPIO_SDMC_CD_N)
+#define TROUT_GPIO_BANK1_FIRST_INT_SOURCE (TROUT_GPIO_SDMC_CD_N)
+#define TROUT_GPIO_BANK1_LAST_INT_SOURCE (TROUT_GPIO_SDMC_CD_N)
+
+#define TROUT_GPIO_VIRTUAL_TO_REAL_OFFSET \
+ (TROUT_GPIO_INT5_BASE - TROUT_GPIO_VIRTUAL_BASE)
+
+#define TROUT_INT_START (NR_MSM_IRQS + NR_GPIO_IRQS)
+#define TROUT_INT_BANK0_COUNT (8)
+#define TROUT_INT_BANK1_START (TROUT_INT_START + TROUT_INT_BANK0_COUNT)
+#define TROUT_INT_BANK1_COUNT (1)
+#define TROUT_INT_END (TROUT_INT_START + TROUT_INT_BANK0_COUNT + \
+ TROUT_INT_BANK1_COUNT - 1)
+#define TROUT_GPIO_TO_INT(n) (((n) <= TROUT_GPIO_BANK0_LAST_INT_SOURCE) ? \
+ (TROUT_INT_START - TROUT_GPIO_BANK0_FIRST_INT_SOURCE + (n)) : \
+ (TROUT_INT_BANK1_START - TROUT_GPIO_BANK1_FIRST_INT_SOURCE + (n)))
+
+#define TROUT_INT_TO_BANK(n) ((n - TROUT_INT_START) / TROUT_INT_BANK0_COUNT)
+#define TROUT_INT_TO_MASK(n) (1U << ((n - TROUT_INT_START) & 7))
+#define TROUT_BANK_TO_MASK_REG(bank) \
+ (bank ? TROUT_GPIO_INT_MASK1_REG : TROUT_GPIO_INT_MASK0_REG)
+#define TROUT_BANK_TO_STAT_REG(bank) \
+ (bank ? TROUT_GPIO_INT_STAT1_REG : TROUT_GPIO_INT_STAT0_REG)
+
+#endif /* GUARD */
diff --git a/arch/arm/mach-msm/clock-7x01a.c b/arch/arm/mach-msm/clock-7x01a.c
deleted file mode 100644
index 62230a3428ee..000000000000
--- a/arch/arm/mach-msm/clock-7x01a.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/* arch/arm/mach-msm/clock-7x01a.c
- *
- * Clock tables for MSM7X01A
- *
- * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007 QUALCOMM Incorporated
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-
-#include "clock.h"
-#include "devices.h"
-
-/* clock IDs used by the modem processor */
-
-#define ACPU_CLK 0 /* Applications processor clock */
-#define ADM_CLK 1 /* Applications data mover clock */
-#define ADSP_CLK 2 /* ADSP clock */
-#define EBI1_CLK 3 /* External bus interface 1 clock */
-#define EBI2_CLK 4 /* External bus interface 2 clock */
-#define ECODEC_CLK 5 /* External CODEC clock */
-#define EMDH_CLK 6 /* External MDDI host clock */
-#define GP_CLK 7 /* General purpose clock */
-#define GRP_CLK 8 /* Graphics clock */
-#define I2C_CLK 9 /* I2C clock */
-#define ICODEC_RX_CLK 10 /* Internal CODEX RX clock */
-#define ICODEC_TX_CLK 11 /* Internal CODEX TX clock */
-#define IMEM_CLK 12 /* Internal graphics memory clock */
-#define MDC_CLK 13 /* MDDI client clock */
-#define MDP_CLK 14 /* Mobile display processor clock */
-#define PBUS_CLK 15 /* Peripheral bus clock */
-#define PCM_CLK 16 /* PCM clock */
-#define PMDH_CLK 17 /* Primary MDDI host clock */
-#define SDAC_CLK 18 /* Stereo DAC clock */
-#define SDC1_CLK 19 /* Secure Digital Card clocks */
-#define SDC1_PCLK 20
-#define SDC2_CLK 21
-#define SDC2_PCLK 22
-#define SDC3_CLK 23
-#define SDC3_PCLK 24
-#define SDC4_CLK 25
-#define SDC4_PCLK 26
-#define TSIF_CLK 27 /* Transport Stream Interface clocks */
-#define TSIF_REF_CLK 28
-#define TV_DAC_CLK 29 /* TV clocks */
-#define TV_ENC_CLK 30
-#define UART1_CLK 31 /* UART clocks */
-#define UART2_CLK 32
-#define UART3_CLK 33
-#define UART1DM_CLK 34
-#define UART2DM_CLK 35
-#define USB_HS_CLK 36 /* High speed USB core clock */
-#define USB_HS_PCLK 37 /* High speed USB pbus clock */
-#define USB_OTG_CLK 38 /* Full speed USB clock */
-#define VDC_CLK 39 /* Video controller clock */
-#define VFE_CLK 40 /* Camera / Video Front End clock */
-#define VFE_MDC_CLK 41 /* VFE MDDI client clock */
-
-#define NR_CLKS 42
-
-#define CLOCK(clk_name, clk_id, clk_dev, clk_flags) { \
- .name = clk_name, \
- .id = clk_id, \
- .flags = clk_flags, \
- .dev = clk_dev, \
- }
-
-#define OFF CLKFLAG_AUTO_OFF
-#define MINMAX CLKFLAG_USE_MIN_MAX_TO_SET
-
-struct clk msm_clocks[] = {
- CLOCK("adm_clk", ADM_CLK, NULL, 0),
- CLOCK("adsp_clk", ADSP_CLK, NULL, 0),
- CLOCK("ebi1_clk", EBI1_CLK, NULL, 0),
- CLOCK("ebi2_clk", EBI2_CLK, NULL, 0),
- CLOCK("ecodec_clk", ECODEC_CLK, NULL, 0),
- CLOCK("emdh_clk", EMDH_CLK, NULL, OFF),
- CLOCK("gp_clk", GP_CLK, NULL, 0),
- CLOCK("grp_clk", GRP_CLK, NULL, OFF),
- CLOCK("i2c_clk", I2C_CLK, &msm_device_i2c.dev, 0),
- CLOCK("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
- CLOCK("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
- CLOCK("imem_clk", IMEM_CLK, NULL, OFF),
- CLOCK("mdc_clk", MDC_CLK, NULL, 0),
- CLOCK("mdp_clk", MDP_CLK, NULL, OFF),
- CLOCK("pbus_clk", PBUS_CLK, NULL, 0),
- CLOCK("pcm_clk", PCM_CLK, NULL, 0),
- CLOCK("pmdh_clk", PMDH_CLK, NULL, OFF | MINMAX),
- CLOCK("sdac_clk", SDAC_CLK, NULL, OFF),
- CLOCK("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF),
- CLOCK("sdc_pclk", SDC1_PCLK, &msm_device_sdc1.dev, OFF),
- CLOCK("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF),
- CLOCK("sdc_pclk", SDC2_PCLK, &msm_device_sdc2.dev, OFF),
- CLOCK("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF),
- CLOCK("sdc_pclk", SDC3_PCLK, &msm_device_sdc3.dev, OFF),
- CLOCK("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF),
- CLOCK("sdc_pclk", SDC4_PCLK, &msm_device_sdc4.dev, OFF),
- CLOCK("tsif_clk", TSIF_CLK, NULL, 0),
- CLOCK("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
- CLOCK("tv_dac_clk", TV_DAC_CLK, NULL, 0),
- CLOCK("tv_enc_clk", TV_ENC_CLK, NULL, 0),
- CLOCK("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF),
- CLOCK("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
- CLOCK("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
- CLOCK("uart1dm_clk", UART1DM_CLK, NULL, OFF),
- CLOCK("uart2dm_clk", UART2DM_CLK, NULL, 0),
- CLOCK("usb_hs_clk", USB_HS_CLK, &msm_device_hsusb.dev, OFF),
- CLOCK("usb_hs_pclk", USB_HS_PCLK, &msm_device_hsusb.dev, OFF),
- CLOCK("usb_otg_clk", USB_OTG_CLK, NULL, 0),
- CLOCK("vdc_clk", VDC_CLK, NULL, OFF | MINMAX),
- CLOCK("vfe_clk", VFE_CLK, NULL, OFF),
- CLOCK("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
-};
-
-unsigned msm_num_clocks = ARRAY_SIZE(msm_clocks);
diff --git a/arch/arm/mach-msm/clock-7x30.c b/arch/arm/mach-msm/clock-7x30.c
new file mode 100644
index 000000000000..d1bab48c85fa
--- /dev/null
+++ b/arch/arm/mach-msm/clock-7x30.c
@@ -0,0 +1,957 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <mach/msm_iomap.h>
+#include <mach/clk.h>
+
+#include "clock.h"
+#include "clock-7x30.h"
+
+struct clk_freq_tbl {
+ uint32_t freq_hz;
+ uint32_t src;
+ uint32_t md_val;
+ uint32_t ns_val;
+ uint32_t mode;
+};
+
+struct clk_local {
+ uint32_t count;
+ uint32_t type;
+ uint32_t md_reg;
+ uint32_t ns_reg;
+ uint32_t freq_mask;
+ uint32_t br_en_mask;
+ uint32_t root_en_mask;
+ int parent;
+ uint32_t *children;
+ struct clk_freq_tbl *freq_tbl;
+ struct clk_freq_tbl *current_freq;
+};
+
+
+enum {
+ SRC_PLL0 = 4, /* Modem PLL */
+ SRC_PLL1 = 1, /* Global PLL */
+ SRC_PLL3 = 3, /* Multimedia/Peripheral PLL or Backup PLL1 */
+ SRC_PLL4 = 2, /* Display PLL */
+ SRC_LPXO = 6, /* Low power XO. */
+ SRC_MAX /* Used for sources that can't be turned on/off. */
+};
+
+static uint32_t src_pll_tbl[] = {
+ [SRC_PLL0] = PLL_0,
+ [SRC_PLL1] = PLL_1,
+ [SRC_PLL3] = PLL_3,
+ [SRC_PLL4] = PLL_4,
+};
+
+#define B(x) BIT(x)
+#define BM(msb, lsb) (((((uint32_t)-1) << (31-msb)) >> (31-msb+lsb)) << lsb)
+#define BVAL(msb, lsb, val) (((val) << lsb) & BM(msb, lsb))
+
+#define MD8(m, n) (BVAL(15, 8, m) | BVAL(7, 0, ~(n)))
+#define N8(msb, lsb, m, n) (BVAL(msb, lsb, ~(n-m)))
+#define MD16(m, n) (BVAL(31, 16, m) | BVAL(15, 0, ~(n)))
+#define N16(m, n) (BVAL(31, 16, ~(n-m)))
+#define SPDIV(s, d) (BVAL(4, 3, d-1) | BVAL(2, 0, s))
+#define SDIV(s, d) (BVAL(6, 3, d-1) | BVAL(2, 0, s))
+#define F_MASK_BASIC (BM(6, 3)|BM(2, 0))
+#define F_MASK_MND16 (BM(31, 16)|BM(4, 3)|BM(2, 0))
+#define F_MASK_MND8(m, l) (BM(m, l)|BM(4, 3)|BM(2, 0))
+
+#define F_RAW(f, s, m_v, n_v, mde) { \
+ .freq_hz = f, \
+ .src = s, \
+ .md_val = m_v, \
+ .ns_val = n_v, \
+ .mode = mde, \
+ }
+
+#define FREQ_END 0
+#define F_BASIC(f, s, div) F_RAW(f, s, 0, SDIV(s, div), 0)
+#define F_MND16(f, s, div, m, n) \
+ F_RAW(f, s, MD16(m, n), N16(m, n)|SPDIV(s, div), !!(n))
+#define F_MND8(f, nmsb, nlsb, s, div, m, n) \
+ F_RAW(f, s, MD8(m, n), N8(nmsb, nlsb, m, n)|SPDIV(s, div), !!(n))
+#define F_END F_RAW(FREQ_END, SRC_MAX, 0, 0, 0)
+
+static struct clk_freq_tbl clk_tbl_tcxo[] = {
+ F_RAW(19200000, SRC_MAX, 0, 0, 0),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_uartdm[] = {
+ F_MND16( 3686400, SRC_PLL3, 3, 3, 200),
+ F_MND16( 7372800, SRC_PLL3, 3, 3, 100),
+ F_MND16(14745600, SRC_PLL3, 3, 3, 50),
+ F_MND16(46400000, SRC_PLL3, 3, 145, 768),
+ F_MND16(51200000, SRC_PLL3, 3, 5, 24),
+ F_MND16(58982400, SRC_PLL3, 3, 6, 25),
+ F_MND16(64000000, SRC_PLL1, 4, 1, 3),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_mdh[] = {
+ F_BASIC( 73728000, SRC_PLL3, 10),
+ F_BASIC( 92160000, SRC_PLL3, 8),
+ F_BASIC(122880000, SRC_PLL3, 6),
+ F_BASIC(184320000, SRC_PLL3, 4),
+ F_BASIC(245760000, SRC_PLL3, 3),
+ F_BASIC(368640000, SRC_PLL3, 2),
+ F_BASIC(384000000, SRC_PLL1, 2),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_grp[] = {
+ F_BASIC( 24576000, SRC_LPXO, 1),
+ F_BASIC( 46000000, SRC_PLL3, 16),
+ F_BASIC( 49000000, SRC_PLL3, 15),
+ F_BASIC( 52000000, SRC_PLL3, 14),
+ F_BASIC( 56000000, SRC_PLL3, 13),
+ F_BASIC( 61440000, SRC_PLL3, 12),
+ F_BASIC( 67000000, SRC_PLL3, 11),
+ F_BASIC( 73000000, SRC_PLL3, 10),
+ F_BASIC( 81000000, SRC_PLL3, 9),
+ F_BASIC( 92000000, SRC_PLL3, 8),
+ F_BASIC(105000000, SRC_PLL3, 7),
+ F_BASIC(120000000, SRC_PLL3, 6),
+ F_BASIC(150000000, SRC_PLL3, 5),
+ F_BASIC(183000000, SRC_PLL3, 4),
+ F_BASIC(192000000, SRC_PLL1, 4),
+ F_BASIC(245760000, SRC_PLL3, 3),
+ /* Sync to AXI. Hence this "rate" is not fixed. */
+ F_RAW(1, SRC_MAX, 0, B(14), 0),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_sdc1_3[] = {
+ F_MND8( 144000, 19, 12, SRC_LPXO, 1, 1, 171),
+ F_MND8( 400000, 19, 12, SRC_LPXO, 1, 2, 123),
+ F_MND8(16000000, 19, 12, SRC_PLL3, 3, 14, 215),
+ F_MND8(17000000, 19, 12, SRC_PLL3, 4, 19, 206),
+ F_MND8(20000000, 19, 12, SRC_PLL3, 4, 23, 212),
+ F_MND8(25000000, 19, 12, SRC_LPXO, 1, 0, 0),
+ F_MND8(50000000, 19, 12, SRC_PLL3, 3, 1, 5),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_sdc2_4[] = {
+ F_MND8( 144000, 20, 13, SRC_LPXO, 1, 1, 171),
+ F_MND8( 400000, 20, 13, SRC_LPXO, 1, 2, 123),
+ F_MND8(16000000, 20, 13, SRC_PLL3, 3, 14, 215),
+ F_MND8(17000000, 20, 13, SRC_PLL3, 4, 19, 206),
+ F_MND8(20000000, 20, 13, SRC_PLL3, 4, 23, 212),
+ F_MND8(25000000, 20, 13, SRC_LPXO, 1, 0, 0),
+ F_MND8(50000000, 20, 13, SRC_PLL3, 3, 1, 5),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_mdp_core[] = {
+ F_BASIC( 46000000, SRC_PLL3, 16),
+ F_BASIC( 49000000, SRC_PLL3, 15),
+ F_BASIC( 52000000, SRC_PLL3, 14),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_mdp_lcdc[] = {
+ F_MND16(25000000, SRC_LPXO, 1, 0, 0),
+ F_MND16(30000000, SRC_PLL3, 4, 1, 6),
+ F_MND16(40000000, SRC_PLL3, 2, 1, 9),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_mdp_vsync[] = {
+ F_RAW(24576000, SRC_MAX, 0, 0, 0), /* Initialized to LPXO. */
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_mi2s_codec[] = {
+ F_MND16( 2048000, SRC_LPXO, 4, 1, 3),
+ F_MND16(12288000, SRC_LPXO, 2, 0, 0),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_mi2s[] = {
+ F_MND16(12288000, SRC_LPXO, 2, 0, 0),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_midi[] = {
+ F_MND8(98304000, 19, 12, SRC_PLL3, 3, 2, 5),
+ F_END,
+};
+static struct clk_freq_tbl clk_tbl_sdac[] = {
+ F_MND16( 256000, SRC_LPXO, 4, 1, 24),
+ F_MND16( 352800, SRC_LPXO, 1, 147, 10240),
+ F_MND16( 384000, SRC_LPXO, 4, 1, 16),
+ F_MND16( 512000, SRC_LPXO, 4, 1, 12),
+ F_MND16( 705600, SRC_LPXO, 1, 147, 5120),
+ F_MND16( 768000, SRC_LPXO, 4, 1, 8),
+ F_MND16(1024000, SRC_LPXO, 4, 1, 6),
+ F_MND16(1411200, SRC_LPXO, 1, 147, 2560),
+ F_MND16(1536000, SRC_LPXO, 4, 1, 4),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_tv[] = {
+ F_MND8(27000000, 23, 16, SRC_PLL4, 2, 2, 33),
+ F_MND8(74250000, 23, 16, SRC_PLL4, 2, 1, 6),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_usb[] = {
+ F_MND8(60000000, 23, 16, SRC_PLL1, 2, 5, 32),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_vfe_jpeg[] = {
+ F_MND16( 36000000, SRC_PLL3, 4, 1, 5),
+ F_MND16( 46000000, SRC_PLL3, 4, 1, 4),
+ F_MND16( 61440000, SRC_PLL3, 4, 1, 3),
+ F_MND16( 74000000, SRC_PLL3, 2, 1, 5),
+ F_MND16( 82000000, SRC_PLL3, 3, 1, 3),
+ F_MND16( 92000000, SRC_PLL3, 4, 1, 2),
+ F_MND16( 98000000, SRC_PLL3, 3, 2, 5),
+ F_MND16(105000000, SRC_PLL3, 2, 2, 7),
+ F_MND16(122880000, SRC_PLL3, 2, 1, 3),
+ F_MND16(148000000, SRC_PLL3, 2, 2, 5),
+ F_MND16(154000000, SRC_PLL1, 2, 2, 5),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_cam[] = {
+ F_MND16( 6000000, SRC_PLL1, 4, 1, 32),
+ F_MND16( 8000000, SRC_PLL1, 4, 1, 24),
+ F_MND16(12000000, SRC_PLL1, 4, 1, 16),
+ F_MND16(16000000, SRC_PLL1, 4, 1, 12),
+ F_MND16(19000000, SRC_PLL1, 4, 1, 10),
+ F_MND16(24000000, SRC_PLL1, 4, 1, 8),
+ F_MND16(32000000, SRC_PLL1, 4, 1, 6),
+ F_MND16(48000000, SRC_PLL1, 4, 1, 4),
+ F_MND16(64000000, SRC_PLL1, 4, 1, 3),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_vpe[] = {
+ F_MND8( 24576000, 22, 15, SRC_LPXO, 1, 0, 0),
+ F_MND8( 30720000, 22, 15, SRC_PLL3, 4, 1, 6),
+ F_MND8( 61440000, 22, 15, SRC_PLL3, 4, 1, 3),
+ F_MND8( 81920000, 22, 15, SRC_PLL3, 3, 1, 3),
+ F_MND8(122880000, 22, 15, SRC_PLL3, 3, 1, 2),
+ F_MND8(147000000, 22, 15, SRC_PLL3, 1, 1, 5),
+ F_MND8(153600000, 22, 15, SRC_PLL1, 1, 1, 5),
+ F_MND8(170667000, 22, 15, SRC_PLL1, 1, 2, 9),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_mfc[] = {
+ F_MND8( 24576000, 24, 17, SRC_LPXO, 1, 0, 0),
+ F_MND8( 30720000, 24, 17, SRC_PLL3, 4, 1, 6),
+ F_MND8( 61440000, 24, 17, SRC_PLL3, 4, 1, 3),
+ F_MND8( 81920000, 24, 17, SRC_PLL3, 3, 1, 3),
+ F_MND8(122880000, 24, 17, SRC_PLL3, 3, 1, 2),
+ F_MND8(147000000, 24, 17, SRC_PLL3, 1, 1, 5),
+ F_MND8(153600000, 24, 17, SRC_PLL1, 1, 1, 5),
+ F_MND8(170667000, 24, 17, SRC_PLL1, 1, 2, 9),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_spi[] = {
+ F_MND8(10000000, 19, 12, SRC_PLL3, 4, 7, 129),
+ F_MND8(26000000, 19, 12, SRC_PLL3, 4, 34, 241),
+ F_END,
+};
+
+static struct clk_freq_tbl clk_tbl_lpa_codec[] = {
+ F_RAW(1, SRC_MAX, 0, 0, 0), /* src = MI2S_CODEC_RX */
+ F_RAW(2, SRC_MAX, 0, 1, 0), /* src = ECODEC_CIF */
+ F_RAW(3, SRC_MAX, 0, 2, 0), /* src = MI2S */
+ F_RAW(4, SRC_MAX, 0, 3, 0), /* src = SDAC */
+ F_END,
+};
+
+static struct clk_freq_tbl dummy_freq = F_END;
+
+#define MND 1 /* Integer predivider and fractional MN:D divider. */
+#define BASIC 2 /* Integer divider. */
+#define NORATE 3 /* Just on/off. */
+
+#define C(x) L_7X30_##x##_CLK
+
+#define CLK_LOCAL(id, t, md, ns, f_msk, br, root, tbl, par, chld_lst) \
+ [C(id)] = { \
+ .type = t, \
+ .md_reg = md, \
+ .ns_reg = ns, \
+ .freq_mask = f_msk, \
+ .br_en_mask = br, \
+ .root_en_mask = root, \
+ .parent = C(par), \
+ .children = chld_lst, \
+ .freq_tbl = tbl, \
+ .current_freq = &dummy_freq, \
+ }
+
+#define CLK_BASIC(id, ns, br, root, tbl, par) \
+ CLK_LOCAL(id, BASIC, 0, ns, F_MASK_BASIC, br, root, tbl, \
+ par, NULL)
+#define CLK_MND8_P(id, ns, m, l, br, root, tbl, par, chld_lst) \
+ CLK_LOCAL(id, MND, (ns-4), ns, F_MASK_MND8(m, l), br, root, \
+ tbl, par, chld_lst)
+#define CLK_MND8(id, ns, m, l, br, root, tbl, chld_lst) \
+ CLK_MND8_P(id, ns, m, l, br, root, tbl, NONE, chld_lst)
+#define CLK_MND16(id, ns, br, root, tbl, par, chld_lst) \
+ CLK_LOCAL(id, MND, (ns-4), ns, F_MASK_MND16, br, root, tbl, \
+ par, chld_lst)
+#define CLK_1RATE(id, ns, br, root, tbl) \
+ CLK_LOCAL(id, BASIC, 0, ns, 0, br, root, tbl, NONE, NULL)
+#define CLK_SLAVE(id, ns, br, par) \
+ CLK_LOCAL(id, NORATE, 0, ns, 0, br, 0, NULL, par, NULL)
+#define CLK_NORATE(id, ns, br, root) \
+ CLK_LOCAL(id, NORATE, 0, ns, 0, br, root, NULL, NONE, NULL)
+#define CLK_GLBL(id, glbl, root) \
+ CLK_LOCAL(id, NORATE, 0, glbl, 0, 0, root, NULL, NONE, NULL)
+#define CLK_BRIDGE(id, glbl, root, par) \
+ CLK_LOCAL(id, NORATE, 0, glbl, 0, 0, root, NULL, par, NULL)
+
+#define REG(off) (MSM_CLK_CTL_BASE + off)
+#define MNCNTR_EN_MASK B(8)
+#define MNCNTR_RST_MASK B(7)
+#define MNCNTR_MODE_MASK BM(6, 5)
+#define MNCNTR_MODE BVAL(6, 5, 0x2) /* Dual-edge mode. */
+
+/* Register offsets used more than once. */
+#define USBH_MD 0x02BC
+#define USBH_NS 0x02C0
+#define USBH2_NS 0x046C
+#define USBH3_NS 0x0470
+#define CAM_VFE_NS 0x0044
+#define GLBL_CLK_ENA_SC 0x03BC
+#define GLBL_CLK_ENA_2_SC 0x03C0
+#define SDAC_NS 0x009C
+#define TV_NS 0x00CC
+#define MI2S_RX_NS 0x0070
+#define MI2S_TX_NS 0x0078
+#define MI2S_NS 0x02E0
+#define LPA_NS 0x02E8
+#define MDC_NS 0x007C
+#define MDP_VSYNC_REG 0x0460
+#define PLL_ENA_REG 0x0260
+
+static uint32_t pll_count[NUM_PLL];
+
+static uint32_t chld_grp_3d_src[] = {C(IMEM), C(GRP_3D), C(NONE)};
+static uint32_t chld_mdp_lcdc_p[] = {C(MDP_LCDC_PAD_P), C(NONE)};
+static uint32_t chld_mi2s_codec_rx[] = {C(MI2S_CODEC_RX_S), C(NONE)};
+static uint32_t chld_mi2s_codec_tx[] = {C(MI2S_CODEC_TX_S), C(NONE)};
+static uint32_t chld_mi2s[] = {C(MI2S_S), C(NONE)};
+static uint32_t chld_sdac_m[] = {C(SDAC_S), C(NONE)};
+static uint32_t chld_tv[] = {C(TV_DAC), C(TV_ENC), C(TSIF_REF), C(NONE)};
+static uint32_t chld_usb_src[] = {
+ C(USB_HS), C(USB_HS_CORE),
+ C(USB_HS2), C(USB_HS2_CORE),
+ C(USB_HS3), C(USB_HS3_CORE),
+ C(NONE),
+};
+uint32_t chld_vfe[] = {C(VFE_MDC), C(VFE_CAMIF), C(NONE)};
+
+static struct clk_local clk_local_tbl[] = {
+ CLK_NORATE(MDC, MDC_NS, B(9), B(11)),
+ CLK_NORATE(LPA_CORE, LPA_NS, B(5), 0),
+
+ CLK_1RATE(I2C, 0x0068, B(9), B(11), clk_tbl_tcxo),
+ CLK_1RATE(I2C_2, 0x02D8, B(0), B(2), clk_tbl_tcxo),
+ CLK_1RATE(QUP_I2C, 0x04F0, B(0), B(2), clk_tbl_tcxo),
+ CLK_1RATE(UART1, 0x00E0, B(5), B(4), clk_tbl_tcxo),
+ CLK_1RATE(UART3, 0x0468, B(5), B(4), clk_tbl_tcxo),
+
+ CLK_BASIC(EMDH, 0x0050, 0, B(11), clk_tbl_mdh, AXI_LI_ADSP_A),
+ CLK_BASIC(PMDH, 0x008C, 0, B(11), clk_tbl_mdh, AXI_LI_ADSP_A),
+ CLK_BASIC(MDP, 0x014C, B(9), B(11), clk_tbl_mdp_core, AXI_MDP),
+
+ CLK_MND8_P(VPE, 0x015C, 22, 15, B(9), B(11), clk_tbl_vpe,
+ AXI_VPE, NULL),
+ /* Combining MFC and MFC_DIV2 clocks. */
+ CLK_MND8_P(MFC, 0x0154, 24, 17, B(9)|B(15), B(11), clk_tbl_mfc,
+ AXI_MFC, NULL),
+
+ CLK_MND8(SDC1, 0x00A4, 19, 12, B(9), B(11), clk_tbl_sdc1_3, NULL),
+ CLK_MND8(SDC2, 0x00AC, 20, 13, B(9), B(11), clk_tbl_sdc2_4, NULL),
+ CLK_MND8(SDC3, 0x00B4, 19, 12, B(9), B(11), clk_tbl_sdc1_3, NULL),
+ CLK_MND8(SDC4, 0x00BC, 20, 13, B(9), B(11), clk_tbl_sdc2_4, NULL),
+ CLK_MND8(SPI, 0x02C8, 19, 12, B(9), B(11), clk_tbl_spi, NULL),
+ CLK_MND8(MIDI, 0x02D0, 19, 12, B(9), B(11), clk_tbl_midi, NULL),
+ CLK_MND8_P(USB_HS_SRC, USBH_NS, 23, 16, 0, B(11), clk_tbl_usb,
+ AXI_LI_ADSP_A, chld_usb_src),
+ CLK_SLAVE(USB_HS, USBH_NS, B(9), USB_HS_SRC),
+ CLK_SLAVE(USB_HS_CORE, USBH_NS, B(13), USB_HS_SRC),
+ CLK_SLAVE(USB_HS2, USBH2_NS, B(9), USB_HS_SRC),
+ CLK_SLAVE(USB_HS2_CORE, USBH2_NS, B(4), USB_HS_SRC),
+ CLK_SLAVE(USB_HS3, USBH3_NS, B(9), USB_HS_SRC),
+ CLK_SLAVE(USB_HS3_CORE, USBH3_NS, B(4), USB_HS_SRC),
+ CLK_MND8(TV, TV_NS, 23, 16, 0, B(11), clk_tbl_tv, chld_tv),
+ CLK_SLAVE(TV_DAC, TV_NS, B(12), TV),
+ CLK_SLAVE(TV_ENC, TV_NS, B(9), TV),
+ /* Hacking root & branch into one param. */
+ CLK_SLAVE(TSIF_REF, 0x00C4, B(9)|B(11), TV),
+
+ CLK_MND16(UART1DM, 0x00D4, B(9), B(11), clk_tbl_uartdm, NONE, NULL),
+ CLK_MND16(UART2DM, 0x00DC, B(9), B(11), clk_tbl_uartdm, NONE, NULL),
+ CLK_MND16(JPEG, 0x0164, B(9), B(11), clk_tbl_vfe_jpeg,
+ AXI_LI_JPEG, NULL),
+ CLK_MND16(CAM, 0x0374, 0, B(9), clk_tbl_cam, NONE, NULL),
+ CLK_MND16(VFE, CAM_VFE_NS, B(9), B(13), clk_tbl_vfe_jpeg,
+ AXI_LI_VFE, chld_vfe),
+ CLK_SLAVE(VFE_MDC, CAM_VFE_NS, B(11), VFE),
+ CLK_SLAVE(VFE_CAMIF, CAM_VFE_NS, B(15), VFE),
+
+ CLK_MND16(SDAC_M, SDAC_NS, B(12), B(11), clk_tbl_sdac,
+ NONE, chld_sdac_m),
+ CLK_SLAVE(SDAC_S, SDAC_NS, B(9), SDAC_M),
+
+ CLK_MND16(MDP_LCDC_P, 0x0390, B(9), B(11), clk_tbl_mdp_lcdc,
+ NONE, chld_mdp_lcdc_p),
+ CLK_SLAVE(MDP_LCDC_PAD_P, 0x0390, B(12), MDP_LCDC_P),
+ CLK_1RATE(MDP_VSYNC, MDP_VSYNC_REG, B(0), 0, clk_tbl_mdp_vsync),
+
+ CLK_MND16(MI2S_CODEC_RX_M, MI2S_RX_NS, B(12), B(11),
+ clk_tbl_mi2s_codec, NONE, chld_mi2s_codec_rx),
+ CLK_SLAVE(MI2S_CODEC_RX_S, MI2S_RX_NS, B(9), MI2S_CODEC_RX_M),
+
+ CLK_MND16(MI2S_CODEC_TX_M, MI2S_TX_NS, B(12), B(11),
+ clk_tbl_mi2s_codec, NONE, chld_mi2s_codec_tx),
+ CLK_SLAVE(MI2S_CODEC_TX_S, MI2S_TX_NS, B(9), MI2S_CODEC_TX_M),
+
+ CLK_MND16(MI2S_M, MI2S_NS, B(12), B(11),
+ clk_tbl_mi2s, NONE, chld_mi2s),
+ CLK_SLAVE(MI2S_S, MI2S_NS, B(9), MI2S_M),
+
+ CLK_LOCAL(GRP_2D, BASIC, 0, 0x0034, F_MASK_BASIC | (7 << 12),
+ B(7), B(11), clk_tbl_grp, AXI_GRP_2D, NULL),
+ CLK_LOCAL(GRP_3D_SRC, BASIC, 0, 0x0084, F_MASK_BASIC | (7 << 12),
+ 0, B(11), clk_tbl_grp, AXI_LI_GRP, chld_grp_3d_src),
+ CLK_SLAVE(GRP_3D, 0x0084, B(7), GRP_3D_SRC),
+ CLK_SLAVE(IMEM, 0x0084, B(9), GRP_3D_SRC),
+ CLK_LOCAL(LPA_CODEC, BASIC, 0, LPA_NS, BM(1, 0), B(9), 0,
+ clk_tbl_lpa_codec, NONE, NULL),
+
+ /* Peripheral bus clocks. */
+ CLK_GLBL(ADM, GLBL_CLK_ENA_SC, B(5)),
+ CLK_GLBL(CAMIF_PAD_P, GLBL_CLK_ENA_SC, B(9)),
+ CLK_GLBL(EMDH_P, GLBL_CLK_ENA_2_SC, B(3)),
+ CLK_GLBL(GRP_2D_P, GLBL_CLK_ENA_SC, B(24)),
+ CLK_GLBL(GRP_3D_P, GLBL_CLK_ENA_2_SC, B(17)),
+ CLK_GLBL(JPEG_P, GLBL_CLK_ENA_2_SC, B(24)),
+ CLK_GLBL(LPA_P, GLBL_CLK_ENA_2_SC, B(7)),
+ CLK_GLBL(MDP_P, GLBL_CLK_ENA_2_SC, B(6)),
+ CLK_GLBL(MFC_P, GLBL_CLK_ENA_2_SC, B(26)),
+ CLK_GLBL(PMDH_P, GLBL_CLK_ENA_2_SC, B(4)),
+ CLK_GLBL(ROTATOR_IMEM, GLBL_CLK_ENA_2_SC, B(23)),
+ CLK_GLBL(ROTATOR_P, GLBL_CLK_ENA_2_SC, B(25)),
+ CLK_GLBL(SDC1_H, GLBL_CLK_ENA_SC, B(7)),
+ CLK_GLBL(SDC2_H, GLBL_CLK_ENA_SC, B(8)),
+ CLK_GLBL(SDC3_H, GLBL_CLK_ENA_SC, B(27)),
+ CLK_GLBL(SDC4_H, GLBL_CLK_ENA_SC, B(28)),
+ CLK_GLBL(SPI_P, GLBL_CLK_ENA_2_SC, B(10)),
+ CLK_GLBL(TSIF_P, GLBL_CLK_ENA_SC, B(18)),
+ CLK_GLBL(UART1DM_P, GLBL_CLK_ENA_SC, B(17)),
+ CLK_GLBL(UART2DM_P, GLBL_CLK_ENA_SC, B(26)),
+ CLK_GLBL(USB_HS2_P, GLBL_CLK_ENA_2_SC, B(8)),
+ CLK_GLBL(USB_HS3_P, GLBL_CLK_ENA_2_SC, B(9)),
+ CLK_GLBL(USB_HS_P, GLBL_CLK_ENA_SC, B(25)),
+ CLK_GLBL(VFE_P, GLBL_CLK_ENA_2_SC, B(27)),
+
+ /* AXI bridge clocks. */
+ CLK_BRIDGE(AXI_LI_APPS, GLBL_CLK_ENA_SC, B(2), NONE),
+ CLK_BRIDGE(AXI_LI_ADSP_A, GLBL_CLK_ENA_2_SC, B(14), AXI_LI_APPS),
+ CLK_BRIDGE(AXI_LI_JPEG, GLBL_CLK_ENA_2_SC, B(19), AXI_LI_APPS),
+ CLK_BRIDGE(AXI_LI_VFE, GLBL_CLK_ENA_SC, B(23), AXI_LI_APPS),
+ CLK_BRIDGE(AXI_MDP, GLBL_CLK_ENA_2_SC, B(29), AXI_LI_APPS),
+
+ CLK_BRIDGE(AXI_IMEM, GLBL_CLK_ENA_2_SC, B(18), NONE),
+
+ CLK_BRIDGE(AXI_LI_VG, GLBL_CLK_ENA_SC, B(3), NONE),
+ CLK_BRIDGE(AXI_GRP_2D, GLBL_CLK_ENA_SC, B(21), AXI_LI_VG),
+ CLK_BRIDGE(AXI_LI_GRP, GLBL_CLK_ENA_SC, B(22), AXI_LI_VG),
+ CLK_BRIDGE(AXI_MFC, GLBL_CLK_ENA_2_SC, B(20), AXI_LI_VG),
+ CLK_BRIDGE(AXI_ROTATOR, GLBL_CLK_ENA_2_SC, B(22), AXI_LI_VG),
+ CLK_BRIDGE(AXI_VPE, GLBL_CLK_ENA_2_SC, B(21), AXI_LI_VG),
+};
+
+static DEFINE_SPINLOCK(clock_reg_lock);
+static DEFINE_SPINLOCK(pll_vote_lock);
+
+void pll_enable(uint32_t pll)
+{
+ uint32_t reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll_vote_lock, flags);
+ if (!pll_count[pll]) {
+ reg_val = readl(REG(PLL_ENA_REG));
+ reg_val |= (1 << pll);
+ writel(reg_val, REG(PLL_ENA_REG));
+ }
+ pll_count[pll]++;
+ spin_unlock_irqrestore(&pll_vote_lock, flags);
+}
+
+static void src_enable(uint32_t src)
+{
+ /* SRC_MAX is used as a placeholder for some freqencies that don't
+ * have any direct PLL dependency. */
+ if (src == SRC_MAX || src == SRC_LPXO)
+ return;
+
+ pll_enable(src_pll_tbl[src]);
+}
+
+void pll_disable(uint32_t pll)
+{
+ uint32_t reg_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll_vote_lock, flags);
+ if (pll_count[pll])
+ pll_count[pll]--;
+ else
+ pr_warning("Reference count mismatch in PLL disable!\n");
+
+ if (pll_count[pll] == 0) {
+ reg_val = readl(REG(PLL_ENA_REG));
+ reg_val &= ~(1 << pll);
+ writel(reg_val, REG(PLL_ENA_REG));
+ }
+ spin_unlock_irqrestore(&pll_vote_lock, flags);
+}
+
+static void src_disable(uint32_t src)
+{
+ /* SRC_MAX is used as a placeholder for some freqencies that don't
+ * have any direct PLL dependency. */
+ if (src == SRC_MAX || src == SRC_LPXO)
+ return;
+
+ pll_disable(src_pll_tbl[src]);
+
+}
+
+/*
+ * SoC specific register-based control of clocks.
+ */
+static int _soc_clk_enable(unsigned id)
+{
+ struct clk_local *t = &clk_local_tbl[id];
+ void *ns_reg = REG(t->ns_reg);
+ uint32_t reg_val = 0;
+
+ reg_val = readl(ns_reg);
+ if (t->type == MND) {
+ /* mode can be either 0 or 1. So the R-value of the
+ * expression will evaluate to MNCNTR_EN_MASK or 0. This
+ * avoids the need for a "if(mode == 1)". A "&" will not work
+ * here. */
+ reg_val |= (MNCNTR_EN_MASK * t->current_freq->mode);
+ writel(reg_val, ns_reg);
+ }
+ if (t->root_en_mask) {
+ reg_val |= t->root_en_mask;
+ writel(reg_val, ns_reg);
+ }
+ if (t->br_en_mask) {
+ reg_val |= t->br_en_mask;
+ writel(reg_val, ns_reg);
+ }
+ return 0;
+}
+
+static void _soc_clk_disable(unsigned id)
+{
+ struct clk_local *t = &clk_local_tbl[id];
+ void *ns_reg = REG(t->ns_reg);
+ uint32_t reg_val = 0;
+
+ reg_val = readl(ns_reg);
+
+ if (t->br_en_mask) {
+ reg_val &= ~(t->br_en_mask);
+ writel(reg_val, ns_reg);
+ }
+ if (t->root_en_mask) {
+ reg_val &= ~(t->root_en_mask);
+ writel(reg_val, ns_reg);
+ }
+ if (t->type == MND) {
+ reg_val &= ~MNCNTR_EN_MASK;
+ writel(reg_val, ns_reg);
+ }
+}
+
+static int soc_clk_enable_nolock(unsigned id)
+{
+ struct clk_local *t = &clk_local_tbl[id];
+ int ret = 0;
+
+ if (!t->count) {
+ if (t->parent != C(NONE))
+ soc_clk_enable_nolock(t->parent);
+ src_enable(t->current_freq->src);
+ ret = _soc_clk_enable(id);
+ }
+ t->count++;
+
+ return ret;
+}
+
+static void soc_clk_disable_nolock(unsigned id)
+{
+ struct clk_local *t = &clk_local_tbl[id];
+
+ if (!t->count) {
+ pr_warning("Reference count mismatch in clock disable!\n");
+ return;
+ }
+ if (t->count)
+ t->count--;
+ if (t->count == 0) {
+ _soc_clk_disable(id);
+ src_disable(t->current_freq->src);
+ if (t->parent != C(NONE))
+ soc_clk_disable_nolock(t->parent);
+ }
+
+ return;
+}
+
+static int soc_clk_enable(unsigned id)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock_reg_lock, flags);
+ ret = soc_clk_enable_nolock(id);
+ spin_unlock_irqrestore(&clock_reg_lock, flags);
+
+ return ret;
+}
+
+static void soc_clk_disable(unsigned id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clock_reg_lock, flags);
+ soc_clk_disable_nolock(id);
+ spin_unlock_irqrestore(&clock_reg_lock, flags);
+
+ return;
+}
+
+static int soc_clk_reset(unsigned id, enum clk_reset_action action)
+{
+ return -EPERM;
+}
+
+static int soc_clk_set_rate(unsigned id, unsigned rate)
+{
+ struct clk_local *t = &clk_local_tbl[id];
+ struct clk_freq_tbl *cf = t->current_freq;
+ struct clk_freq_tbl *nf;
+ uint32_t *chld = t->children;
+ void *ns_reg = REG(t->ns_reg);
+ void *md_reg = REG(t->md_reg);
+ uint32_t reg_val = 0;
+ int i, ret = 0;
+ unsigned long flags;
+
+ if (t->type != MND && t->type != BASIC)
+ return -EPERM;
+
+ spin_lock_irqsave(&clock_reg_lock, flags);
+
+ if (rate == cf->freq_hz)
+ goto release_lock;
+
+ for (nf = t->freq_tbl; nf->freq_hz != FREQ_END; nf++)
+ if (nf->freq_hz == rate)
+ break;
+
+ if (nf->freq_hz == FREQ_END) {
+ ret = -EINVAL;
+ goto release_lock;
+ }
+
+ if (t->freq_mask == 0) {
+ t->current_freq = nf;
+ goto release_lock;
+ }
+
+ /* Disable all branches before changing rate to prevent jitter. */
+ for (i = 0; chld && chld[i] != C(NONE); i++) {
+ struct clk_local *ch = &clk_local_tbl[chld[i]];
+ /* Don't bother turning off if it is already off.
+ * Checking ch->count is cheaper (cache) than reading and
+ * writing to a register (uncached/unbuffered). */
+ if (ch->count) {
+ reg_val = readl(REG(ch->ns_reg));
+ reg_val &= ~(ch->br_en_mask);
+ writel(reg_val, REG(ch->ns_reg));
+ }
+ }
+ if (t->count)
+ _soc_clk_disable(id);
+
+ /* Turn on PLL of the new freq. */
+ src_enable(nf->src);
+
+ /* Some clocks share the same register, so must be careful when
+ * assuming a register doesn't need to be re-read. */
+ reg_val = readl(ns_reg);
+ if (t->type == MND) {
+ reg_val |= MNCNTR_RST_MASK;
+ writel(reg_val, ns_reg);
+ /* TODO: Currently writing 0's into reserved bits for 8-bit
+ * MND. Can be avoided by adding md_mask. */
+ if (nf->mode)
+ writel(nf->md_val, md_reg);
+ reg_val &= ~MNCNTR_MODE_MASK;
+ reg_val |= (MNCNTR_MODE * nf->mode);
+ }
+ reg_val &= ~(t->freq_mask);
+ reg_val |= nf->ns_val;
+ writel(reg_val, ns_reg);
+
+ if (t->type == MND) {
+ reg_val &= ~MNCNTR_RST_MASK;
+ writel(reg_val, ns_reg);
+ }
+
+ /* Turn off PLL of the old freq. */
+ src_disable(cf->src);
+
+ /* Current freq must be updated before _soc_clk_enable() is called to
+ * make sure the MNCNTR_E bit is set correctly. */
+ t->current_freq = nf;
+
+ if (t->count)
+ _soc_clk_enable(id);
+ /* Enable only branches that were ON before. */
+ for (i = 0; chld && chld[i] != C(NONE); i++) {
+ struct clk_local *ch = &clk_local_tbl[chld[i]];
+ if (ch->count) {
+ reg_val = readl(REG(ch->ns_reg));
+ reg_val |= ch->br_en_mask;
+ writel(reg_val, REG(ch->ns_reg));
+ }
+ }
+
+release_lock:
+ spin_unlock_irqrestore(&clock_reg_lock, flags);
+ return ret;
+}
+
+static int soc_clk_set_min_rate(unsigned id, unsigned rate)
+{
+ return -EPERM;
+}
+
+static int soc_clk_set_max_rate(unsigned id, unsigned rate)
+{
+ return -EPERM;
+}
+
+static int soc_clk_set_flags(unsigned id, unsigned flags)
+{
+ return -EPERM;
+}
+
+static unsigned soc_clk_get_rate(unsigned id)
+{
+ struct clk_local *t = &clk_local_tbl[id];
+ unsigned long flags;
+ unsigned ret = 0;
+
+ spin_lock_irqsave(&clock_reg_lock, flags);
+ if (t->type == MND && t->type == BASIC)
+ ret = t->current_freq->freq_hz;
+ else {
+ /* Walk up the tree to see if any parent has a rate. */
+ while (t->type == NORATE && t->parent != C(NONE))
+ t = &clk_local_tbl[t->parent];
+ if (t->type == MND || t->type == BASIC)
+ ret = t->current_freq->freq_hz;
+ }
+ spin_unlock_irqrestore(&clock_reg_lock, flags);
+
+ /* Return 0 if the rate has never been set. Might not be correct,
+ * but it's good enough. */
+ if (ret == FREQ_END)
+ ret = 0;
+
+ return ret;
+}
+
+static unsigned soc_clk_is_enabled(unsigned id)
+{
+ return !!(clk_local_tbl[id].count);
+}
+
+static long soc_clk_round_rate(unsigned id, unsigned rate)
+{
+ struct clk_local *t = &clk_local_tbl[id];
+ struct clk_freq_tbl *f;
+
+ if (t->type != MND && t->type != BASIC)
+ return -EINVAL;
+
+ for (f = t->freq_tbl; f->freq_hz != FREQ_END; f++)
+ if (f->freq_hz >= rate)
+ return f->freq_hz;
+
+ return -EPERM;
+}
+
+struct clk_ops clk_ops_7x30 = {
+ .enable = soc_clk_enable,
+ .disable = soc_clk_disable,
+ .reset = soc_clk_reset,
+ .set_rate = soc_clk_set_rate,
+ .set_min_rate = soc_clk_set_min_rate,
+ .set_max_rate = soc_clk_set_max_rate,
+ .set_flags = soc_clk_set_flags,
+ .get_rate = soc_clk_get_rate,
+ .is_enabled = soc_clk_is_enabled,
+ .round_rate = soc_clk_round_rate,
+};
+
+#if 0
+static struct reg_init {
+ void *reg;
+ uint32_t mask;
+ uint32_t val;
+} ri_list[] __initdata = {
+ /* TODO: Remove next line from commercial code. */
+ {REG(PLL_ENA_REG), 0x7F, 0x7F}, /* Turn on all PLLs. */
+
+ /* Enable UMDX_P clock. Known to causes issues, so never turn off. */
+ {REG(GLBL_CLK_ENA_2_SC), B(2), B(2)},
+ {REG(0x0050), 0x3 << 17, 0x3}, /* EMDH RX div = div-4. */
+ {REG(0x008C), 0x3 << 17, 0x3}, /* PMDH RX div = div-4. */
+ /* MI2S_CODEC_RX_S src = MI2S_CODEC_RX_M. */
+ {REG(MI2S_RX_NS), B(14), 0x0},
+ /* MI2S_CODEC_TX_S src = MI2S_CODEC_TX_M. */
+ {REG(MI2S_TX_NS), B(14), 0x0},
+ {REG(MI2S_NS), B(14), 0x0}, /* MI2S_S src = MI2S_M. */
+ {REG(LPA_NS), B(4), B(4)}, /* LPA CORE src = LPA_CODEC. */
+ {REG(0x02EC), 0xF, 0xD}, /* MI2S_CODEC_RX_S div = div-8. */
+ {REG(0x02F0), 0xF, 0xD}, /* MI2S_CODEC_TX_S div = div-8. */
+ {REG(0x02E4), 0xF, 0x3}, /* MI2S_S div = div-4. */
+ {REG(MDC_NS), 0x3, 0x3}, /* MDC src = external MDH src. */
+ {REG(SDAC_NS), 0x3 << 14, 0x0}, /* SDAC div = div-1. */
+ /* Disable sources TCXO/5 & TCXO/6. UART1 src = TCXO*/
+ {REG(0x00E0), 0x3 << 25 | 0x7, 0x0},
+ {REG(0x0468), 0x7, 0x0}, /* UART3 src = TCXO. */
+ {REG(MDP_VSYNC_REG), 0xC, 0x4}, /* MDP VSYNC src = LPXO. */
+
+ /* USBH core clocks src = USB_HS_SRC. */
+ {REG(USBH_NS), B(15), B(15)},
+ {REG(USBH2_NS), B(6), B(6)},
+ {REG(USBH3_NS), B(6), B(6)},
+};
+
+#define set_1rate(clk) \
+ soc_clk_set_rate(C(clk), clk_local_tbl[C(clk)].freq_tbl->freq_hz)
+static __init int soc_clk_init(void)
+{
+ int i;
+ uint32_t val;
+
+ /* Disable all the child clocks of USB_HS_SRC. This needs to be done
+ * before the register init loop since it changes the source of the
+ * USB HS core clocks. */
+ for (i = 0; chld_usb_src[i] != C(NONE); i++)
+ _soc_clk_disable(chld_usb_src[i]);
+
+ soc_clk_set_rate(C(USB_HS_SRC), clk_tbl_usb[0].freq_hz);
+
+ for (i = 0; i < ARRAY_SIZE(ri_list); i++) {
+ val = readl(ri_list[i].reg);
+ val &= ~ri_list[i].mask;
+ val |= ri_list[i].val;
+ writel(val, ri_list[i].reg);
+ }
+
+ /* This is just to update the driver data structures. The actual
+ * register set up is taken care of in the register init loop. */
+ set_1rate(I2C);
+ set_1rate(I2C_2);
+ set_1rate(QUP_I2C);
+ set_1rate(UART1);
+ set_1rate(UART3);
+
+ return 0;
+}
+
+arch_initcall(soc_clk_init);
+#endif
diff --git a/arch/arm/mach-msm/clock-7x30.h b/arch/arm/mach-msm/clock-7x30.h
new file mode 100644
index 000000000000..7480ace1a986
--- /dev/null
+++ b/arch/arm/mach-msm/clock-7x30.h
@@ -0,0 +1,143 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_7X30_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_7X30_H
+
+enum {
+ L_7X30_NONE_CLK = -1,
+ L_7X30_ADM_CLK,
+ L_7X30_I2C_CLK,
+ L_7X30_I2C_2_CLK,
+ L_7X30_QUP_I2C_CLK,
+ L_7X30_UART1DM_CLK,
+ L_7X30_UART1DM_P_CLK,
+ L_7X30_UART2DM_CLK,
+ L_7X30_UART2DM_P_CLK,
+ L_7X30_EMDH_CLK,
+ L_7X30_EMDH_P_CLK,
+ L_7X30_PMDH_CLK,
+ L_7X30_PMDH_P_CLK,
+ L_7X30_GRP_2D_CLK,
+ L_7X30_GRP_2D_P_CLK,
+ L_7X30_GRP_3D_SRC_CLK,
+ L_7X30_GRP_3D_CLK,
+ L_7X30_GRP_3D_P_CLK,
+ L_7X30_IMEM_CLK,
+ L_7X30_SDC1_CLK,
+ L_7X30_SDC1_H_CLK,
+ L_7X30_SDC2_CLK,
+ L_7X30_SDC2_H_CLK,
+ L_7X30_SDC3_CLK,
+ L_7X30_SDC3_H_CLK,
+ L_7X30_SDC4_CLK,
+ L_7X30_SDC4_H_CLK,
+ L_7X30_MDP_CLK,
+ L_7X30_MDP_P_CLK,
+ L_7X30_MDP_LCDC_P_CLK,
+ L_7X30_MDP_LCDC_PAD_P_CLK,
+ L_7X30_MDP_VSYNC_CLK,
+ L_7X30_MI2S_CODEC_RX_M_CLK,
+ L_7X30_MI2S_CODEC_RX_S_CLK,
+ L_7X30_MI2S_CODEC_TX_M_CLK,
+ L_7X30_MI2S_CODEC_TX_S_CLK,
+ L_7X30_MI2S_M_CLK,
+ L_7X30_MI2S_S_CLK,
+ L_7X30_LPA_CODEC_CLK,
+ L_7X30_LPA_CORE_CLK,
+ L_7X30_LPA_P_CLK,
+ L_7X30_MIDI_CLK,
+ L_7X30_MDC_CLK,
+ L_7X30_ROTATOR_IMEM_CLK,
+ L_7X30_ROTATOR_P_CLK,
+ L_7X30_SDAC_M_CLK,
+ L_7X30_SDAC_S_CLK,
+ L_7X30_UART1_CLK,
+ L_7X30_UART3_CLK,
+ L_7X30_TV_CLK,
+ L_7X30_TV_DAC_CLK,
+ L_7X30_TV_ENC_CLK,
+ L_7X30_TSIF_REF_CLK,
+ L_7X30_TSIF_P_CLK,
+ L_7X30_USB_HS_SRC_CLK,
+ L_7X30_USB_HS_CLK,
+ L_7X30_USB_HS_CORE_CLK,
+ L_7X30_USB_HS_P_CLK,
+ L_7X30_USB_HS2_CLK,
+ L_7X30_USB_HS2_CORE_CLK,
+ L_7X30_USB_HS2_P_CLK,
+ L_7X30_USB_HS3_CLK,
+ L_7X30_USB_HS3_CORE_CLK,
+ L_7X30_USB_HS3_P_CLK,
+ L_7X30_VFE_CLK,
+ L_7X30_VFE_P_CLK,
+ L_7X30_VFE_MDC_CLK,
+ L_7X30_VFE_CAMIF_CLK,
+ L_7X30_CAMIF_PAD_P_CLK,
+ L_7X30_CAM_CLK,
+ L_7X30_JPEG_CLK,
+ L_7X30_JPEG_P_CLK,
+ L_7X30_VPE_CLK,
+ L_7X30_MFC_CLK,
+ L_7X30_MFC_P_CLK,
+ L_7X30_SPI_CLK,
+ L_7X30_SPI_P_CLK,
+
+ L_7X30_AXI_LI_VG_CLK,
+ L_7X30_AXI_LI_GRP_CLK,
+ L_7X30_AXI_LI_JPEG_CLK,
+ L_7X30_AXI_GRP_2D_CLK,
+ L_7X30_AXI_MFC_CLK,
+ L_7X30_AXI_VPE_CLK,
+ L_7X30_AXI_LI_VFE_CLK,
+ L_7X30_AXI_LI_APPS_CLK,
+ L_7X30_AXI_MDP_CLK,
+ L_7X30_AXI_IMEM_CLK,
+ L_7X30_AXI_LI_ADSP_A_CLK,
+ L_7X30_AXI_ROTATOR_CLK,
+
+ L_7X30_NR_CLKS
+};
+
+struct clk_ops;
+extern struct clk_ops clk_ops_7x30;
+
+void pll_enable(uint32_t pll);
+void pll_disable(uint32_t pll);
+
+#define CLK_7X30(clk_name, clk_id, clk_dev, clk_flags) { \
+ .name = clk_name, \
+ .id = L_7X30_##clk_id, \
+ .ops = &clk_ops_7x30, \
+ .flags = clk_flags, \
+ .dev = clk_dev, \
+ .dbg_name = #clk_id, \
+ }
+
+#endif
+
diff --git a/arch/arm/mach-msm/clock-pcom.c b/arch/arm/mach-msm/clock-pcom.c
new file mode 100644
index 000000000000..e4997254a279
--- /dev/null
+++ b/arch/arm/mach-msm/clock-pcom.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/stddef.h>
+#include <mach/clk.h>
+
+#include "proc_comm.h"
+#include "clock.h"
+
+/*
+ * glue for the proc_comm interface
+ */
+int pc_clk_enable(unsigned id)
+{
+ int rc = msm_proc_comm(PCOM_CLKCTL_RPC_ENABLE, &id, NULL);
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+void pc_clk_disable(unsigned id)
+{
+ msm_proc_comm(PCOM_CLKCTL_RPC_DISABLE, &id, NULL);
+}
+
+int pc_clk_reset(unsigned id, enum clk_reset_action action)
+{
+ int rc;
+
+ if (action == CLK_RESET_ASSERT)
+ rc = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_ASSERT, &id, NULL);
+ else
+ rc = msm_proc_comm(PCOM_CLKCTL_RPC_RESET_DEASSERT, &id, NULL);
+
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+int pc_clk_set_rate(unsigned id, unsigned rate)
+{
+ /* The rate _might_ be rounded off to the nearest KHz value by the
+ * remote function. So a return value of 0 doesn't necessarily mean
+ * that the exact rate was set successfully.
+ */
+ int rc = msm_proc_comm(PCOM_CLKCTL_RPC_SET_RATE, &id, &rate);
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+int pc_clk_set_min_rate(unsigned id, unsigned rate)
+{
+ int rc = msm_proc_comm(PCOM_CLKCTL_RPC_MIN_RATE, &id, &rate);
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+int pc_clk_set_max_rate(unsigned id, unsigned rate)
+{
+ int rc = msm_proc_comm(PCOM_CLKCTL_RPC_MAX_RATE, &id, &rate);
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+int pc_clk_set_flags(unsigned id, unsigned flags)
+{
+ int rc = msm_proc_comm(PCOM_CLKCTL_RPC_SET_FLAGS, &id, &flags);
+ if (rc < 0)
+ return rc;
+ else
+ return (int)id < 0 ? -EINVAL : 0;
+}
+
+unsigned pc_clk_get_rate(unsigned id)
+{
+ if (msm_proc_comm(PCOM_CLKCTL_RPC_RATE, &id, NULL))
+ return 0;
+ else
+ return id;
+}
+
+unsigned pc_clk_is_enabled(unsigned id)
+{
+ if (msm_proc_comm(PCOM_CLKCTL_RPC_ENABLED, &id, NULL))
+ return 0;
+ else
+ return id;
+}
+
+long pc_clk_round_rate(unsigned id, unsigned rate)
+{
+
+ /* Not supported. */
+ return -EPERM;
+}
+
+struct clk_ops clk_ops_pcom = {
+ .enable = pc_clk_enable,
+ .disable = pc_clk_disable,
+ .reset = pc_clk_reset,
+ .set_rate = pc_clk_set_rate,
+ .set_min_rate = pc_clk_set_min_rate,
+ .set_max_rate = pc_clk_set_max_rate,
+ .set_flags = pc_clk_set_flags,
+ .get_rate = pc_clk_get_rate,
+ .is_enabled = pc_clk_is_enabled,
+ .round_rate = pc_clk_round_rate,
+};
diff --git a/arch/arm/mach-msm/clock-pcom.h b/arch/arm/mach-msm/clock-pcom.h
new file mode 100644
index 000000000000..39a2976ae56b
--- /dev/null
+++ b/arch/arm/mach-msm/clock-pcom.h
@@ -0,0 +1,147 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_CLOCK_PCOM_H
+#define __ARCH_ARM_MACH_MSM_CLOCK_PCOM_H
+
+/* clock IDs used by the modem processor */
+
+#define P_ACPU_CLK 0 /* Applications processor clock */
+#define P_ADM_CLK 1 /* Applications data mover clock */
+#define P_ADSP_CLK 2 /* ADSP clock */
+#define P_EBI1_CLK 3 /* External bus interface 1 clock */
+#define P_EBI2_CLK 4 /* External bus interface 2 clock */
+#define P_ECODEC_CLK 5 /* External CODEC clock */
+#define P_EMDH_CLK 6 /* External MDDI host clock */
+#define P_GP_CLK 7 /* General purpose clock */
+#define P_GRP_CLK 8 /* Graphics clock */
+#define P_I2C_CLK 9 /* I2C clock */
+#define P_ICODEC_RX_CLK 10 /* Internal CODEX RX clock */
+#define P_ICODEC_TX_CLK 11 /* Internal CODEX TX clock */
+#define P_IMEM_CLK 12 /* Internal graphics memory clock */
+#define P_MDC_CLK 13 /* MDDI client clock */
+#define P_MDP_CLK 14 /* Mobile display processor clock */
+#define P_PBUS_CLK 15 /* Peripheral bus clock */
+#define P_PCM_CLK 16 /* PCM clock */
+#define P_PMDH_CLK 17 /* Primary MDDI host clock */
+#define P_SDAC_CLK 18 /* Stereo DAC clock */
+#define P_SDC1_CLK 19 /* Secure Digital Card clocks */
+#define P_SDC1_PCLK 20
+#define P_SDC2_CLK 21
+#define P_SDC2_PCLK 22
+#define P_SDC3_CLK 23
+#define P_SDC3_PCLK 24
+#define P_SDC4_CLK 25
+#define P_SDC4_PCLK 26
+#define P_TSIF_CLK 27 /* Transport Stream Interface clocks */
+#define P_TSIF_REF_CLK 28
+#define P_TV_DAC_CLK 29 /* TV clocks */
+#define P_TV_ENC_CLK 30
+#define P_UART1_CLK 31 /* UART clocks */
+#define P_UART2_CLK 32
+#define P_UART3_CLK 33
+#define P_UART1DM_CLK 34
+#define P_UART2DM_CLK 35
+#define P_USB_HS_CLK 36 /* High speed USB core clock */
+#define P_USB_HS_PCLK 37 /* High speed USB pbus clock */
+#define P_USB_OTG_CLK 38 /* Full speed USB clock */
+#define P_VDC_CLK 39 /* Video controller clock */
+#if CONFIG_MSM_AMSS_VERSION >= 6350
+#define P_VFE_MDC_CLK 40 /* Camera / Video Front End clock */
+#define P_VFE_CLK 41 /* VFE MDDI client clock */
+#else/* For radio code base others */
+#define P_VFE_MDC_CLK 41 /* VFE MDDI client clock */
+#define P_VFE_CLK 40 /* Camera / Video Front End clock */
+#endif
+
+#define P_MDP_LCDC_PCLK_CLK 42
+#define P_MDP_LCDC_PAD_PCLK_CLK 43
+#define P_MDP_VSYNC_CLK 44
+#define P_SPI_CLK 45
+#define P_VFE_AXI_CLK 46
+#define P_USB_HS2_CLK 47 /* High speed USB 2 core clock */
+#define P_USB_HS2_PCLK 48 /* High speed USB 2 pbus clock */
+#define P_USB_HS3_CLK 49 /* High speed USB 3 core clock */
+#define P_USB_HS3_PCLK 50 /* High speed USB 3 pbus clock */
+#define P_GRP_PCLK 51 /* Graphics pbus clock */
+#define P_USB_PHY_CLK 52 /* USB PHY clock */
+#define P_USB_HS_CORE_CLK 53 /* High speed USB 1 core clock */
+#define P_USB_HS2_CORE_CLK 54 /* High speed USB 2 core clock */
+#define P_USB_HS3_CORE_CLK 55 /* High speed USB 3 core clock */
+#define P_CAM_MCLK_CLK 56
+#define P_CAMIF_PAD_PCLK 57
+#define P_GRP_2D_CLK 58
+#define P_GRP_2D_PCLK 59
+#define P_I2S_CLK 60
+#define P_JPEG_CLK 61
+#define P_JPEG_PCLK 62
+#define P_LPA_CODEC_CLK 63
+#define P_LPA_CORE_CLK 64
+#define P_LPA_PCLK 65
+#define P_MDC_IO_CLK 66
+#define P_MDC_PCLK 67
+#define P_MFC_CLK 68
+#define P_MFC_DIV2_CLK 69
+#define P_MFC_PCLK 70
+#define P_QUP_I2C_CLK 71
+#define P_ROTATOR_IMEM_CLK 72
+#define P_ROTATOR_PCLK 73
+#define P_VFE_CAMIF_CLK 74
+#define P_VFE_PCLK 75
+#define P_VPE_CLK 76
+#define P_I2C_2_CLK 77
+#define P_MI2S_CODEC_RX_SCLK 78
+#define P_MI2S_CODEC_RX_MCLK 79
+#define P_MI2S_CODEC_TX_SCLK 80
+#define P_MI2S_CODEC_TX_MCLK 81
+#define P_PMDH_PCLK 82
+#define P_EMDH_PCLK 83
+#define P_SPI_PCLK 84
+#define P_TSIF_PCLK 85
+#define P_MDP_PCLK 86
+#define P_SDAC_MCLK 87
+#define P_MI2S_HDMI_CLK 88
+#define P_MI2S_HDMI_MCLK 89
+#define P_AXI_ROTATOR_CLK 90
+#define P_HDMI_CLK 91
+
+#define P_NR_CLKS 92
+
+struct clk_ops;
+extern struct clk_ops clk_ops_pcom;
+
+#define CLK_PCOM(clk_name, clk_id, clk_dev, clk_flags) { \
+ .name = clk_name, \
+ .id = P_##clk_id, \
+ .ops = &clk_ops_pcom, \
+ .flags = clk_flags, \
+ .dev = clk_dev, \
+ .dbg_name = #clk_id, \
+ }
+
+#endif
diff --git a/arch/arm/mach-msm/clock.c b/arch/arm/mach-msm/clock.c
index 3b1ce36f1032..c7622f9c2169 100644
--- a/arch/arm/mach-msm/clock.c
+++ b/arch/arm/mach-msm/clock.c
@@ -1,7 +1,7 @@
/* arch/arm/mach-msm/clock.c
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007 QUALCOMM Incorporated
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -22,68 +22,28 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+#include <linux/pm_qos_params.h>
+#include <mach/clk.h>
#include "clock.h"
#include "proc_comm.h"
static DEFINE_MUTEX(clocks_mutex);
static DEFINE_SPINLOCK(clocks_lock);
+static DEFINE_SPINLOCK(ebi1_vote_lock);
static LIST_HEAD(clocks);
+struct clk *msm_clocks;
+unsigned msm_num_clocks;
/*
- * glue for the proc_comm interface
+ * Bitmap of enabled clocks, excluding ACPU which is always
+ * enabled
*/
-static inline int pc_clk_enable(unsigned id)
-{
- return msm_proc_comm(PCOM_CLKCTL_RPC_ENABLE, &id, NULL);
-}
-
-static inline void pc_clk_disable(unsigned id)
-{
- msm_proc_comm(PCOM_CLKCTL_RPC_DISABLE, &id, NULL);
-}
-
-static inline int pc_clk_set_rate(unsigned id, unsigned rate)
-{
- return msm_proc_comm(PCOM_CLKCTL_RPC_SET_RATE, &id, &rate);
-}
-
-static inline int pc_clk_set_min_rate(unsigned id, unsigned rate)
-{
- return msm_proc_comm(PCOM_CLKCTL_RPC_MIN_RATE, &id, &rate);
-}
-
-static inline int pc_clk_set_max_rate(unsigned id, unsigned rate)
-{
- return msm_proc_comm(PCOM_CLKCTL_RPC_MAX_RATE, &id, &rate);
-}
-
-static inline int pc_clk_set_flags(unsigned id, unsigned flags)
-{
- return msm_proc_comm(PCOM_CLKCTL_RPC_SET_FLAGS, &id, &flags);
-}
-
-static inline unsigned pc_clk_get_rate(unsigned id)
-{
- if (msm_proc_comm(PCOM_CLKCTL_RPC_RATE, &id, NULL))
- return 0;
- else
- return id;
-}
-
-static inline unsigned pc_clk_is_enabled(unsigned id)
-{
- if (msm_proc_comm(PCOM_CLKCTL_RPC_ENABLED, &id, NULL))
- return 0;
- else
- return id;
-}
-
-static inline int pc_pll_request(unsigned id, unsigned on)
-{
- on = !!on;
- return msm_proc_comm(PCOM_CLKCTL_RPC_PLL_REQUEST, &id, &on);
-}
+static DECLARE_BITMAP(clock_map_enabled, NR_CLKS);
+static DEFINE_SPINLOCK(clock_map_lock);
+static struct notifier_block axi_freq_notifier_block;
/*
* Standard clock functions defined in include/linux/clk.h
@@ -119,8 +79,12 @@ int clk_enable(struct clk *clk)
unsigned long flags;
spin_lock_irqsave(&clocks_lock, flags);
clk->count++;
- if (clk->count == 1)
- pc_clk_enable(clk->id);
+ if (clk->count == 1) {
+ clk->ops->enable(clk->id);
+ spin_lock(&clock_map_lock);
+ clock_map_enabled[BIT_WORD(clk->id)] |= BIT_MASK(clk->id);
+ spin_unlock(&clock_map_lock);
+ }
spin_unlock_irqrestore(&clocks_lock, flags);
return 0;
}
@@ -132,31 +96,52 @@ void clk_disable(struct clk *clk)
spin_lock_irqsave(&clocks_lock, flags);
BUG_ON(clk->count == 0);
clk->count--;
- if (clk->count == 0)
- pc_clk_disable(clk->id);
+ if (clk->count == 0) {
+ clk->ops->disable(clk->id);
+ spin_lock(&clock_map_lock);
+ clock_map_enabled[BIT_WORD(clk->id)] &= ~BIT_MASK(clk->id);
+ spin_unlock(&clock_map_lock);
+ }
spin_unlock_irqrestore(&clocks_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
+int clk_reset(struct clk *clk, enum clk_reset_action action)
+{
+ return clk->ops->reset(clk->id, action);
+}
+EXPORT_SYMBOL(clk_reset);
+
unsigned long clk_get_rate(struct clk *clk)
{
- return pc_clk_get_rate(clk->id);
+ return clk->ops->get_rate(clk->id);
}
EXPORT_SYMBOL(clk_get_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
- int ret;
- if (clk->flags & CLKFLAG_USE_MIN_MAX_TO_SET) {
- ret = pc_clk_set_max_rate(clk->id, rate);
- if (ret)
- return ret;
- return pc_clk_set_min_rate(clk->id, rate);
- }
- return pc_clk_set_rate(clk->id, rate);
+ return clk->ops->set_rate(clk->id, rate);
}
EXPORT_SYMBOL(clk_set_rate);
+long clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ return clk->ops->round_rate(clk->id, rate);
+}
+EXPORT_SYMBOL(clk_round_rate);
+
+int clk_set_min_rate(struct clk *clk, unsigned long rate)
+{
+ return clk->ops->set_min_rate(clk->id, rate);
+}
+EXPORT_SYMBOL(clk_set_min_rate);
+
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+ return clk->ops->set_max_rate(clk->id, rate);
+}
+EXPORT_SYMBOL(clk_set_max_rate);
+
int clk_set_parent(struct clk *clk, struct clk *parent)
{
return -ENOSYS;
@@ -173,22 +158,231 @@ int clk_set_flags(struct clk *clk, unsigned long flags)
{
if (clk == NULL || IS_ERR(clk))
return -EINVAL;
- return pc_clk_set_flags(clk->id, flags);
+ return clk->ops->set_flags(clk->id, flags);
}
EXPORT_SYMBOL(clk_set_flags);
+/* EBI1 is the only shared clock that several clients want to vote on as of
+ * this commit. If this changes in the future, then it might be better to
+ * make clk_min_rate handle the voting or make ebi1_clk_set_min_rate more
+ * generic to support different clocks.
+ */
+static unsigned long ebi1_min_rate[CLKVOTE_MAX];
+static struct clk *ebi1_clk;
+
+/* Rate is in Hz to be consistent with the other clk APIs. */
+int ebi1_clk_set_min_rate(enum clkvote_client client, unsigned long rate)
+{
+ static unsigned long last_set_val = -1;
+ unsigned long new_val;
+ unsigned long flags;
+ int ret = 0, i;
+
+ spin_lock_irqsave(&ebi1_vote_lock, flags);
+
+ ebi1_min_rate[client] = (rate == MSM_AXI_MAX_FREQ) ?
+ (clk_get_max_axi_khz() * 1000) : rate;
+
+ new_val = ebi1_min_rate[0];
+ for (i = 1; i < CLKVOTE_MAX; i++)
+ if (ebi1_min_rate[i] > new_val)
+ new_val = ebi1_min_rate[i];
+
+ /* This check is to save a proc_comm call. */
+ if (last_set_val != new_val) {
+ ret = clk_set_min_rate(ebi1_clk, new_val);
+ if (ret < 0) {
+ pr_err("Setting EBI1 min rate to %lu Hz failed!\n",
+ new_val);
+ pr_err("Last successful value was %lu Hz.\n",
+ last_set_val);
+ } else {
+ last_set_val = new_val;
+ }
+ }
-void __init msm_clock_init(void)
+ spin_unlock_irqrestore(&ebi1_vote_lock, flags);
+
+ return ret;
+}
+
+static int axi_freq_notifier_handler(struct notifier_block *block,
+ unsigned long min_freq, void *v)
+{
+ /* convert min_freq from KHz to Hz, unless it's a magic value */
+ if (min_freq != MSM_AXI_MAX_FREQ)
+ min_freq *= 1000;
+
+ return ebi1_clk_set_min_rate(CLKVOTE_PMQOS, min_freq);
+}
+
+/*
+ * Find out whether any clock is enabled that needs the TCXO clock.
+ *
+ * On exit, the buffer 'reason' holds a bitmap of ids of all enabled
+ * clocks found that require TCXO.
+ *
+ * reason: buffer to hold the bitmap; must be compatible with
+ * linux/bitmap.h
+ * nbits: number of bits that the buffer can hold; 0 is ok
+ *
+ * Return value:
+ * 0: does not require the TCXO clock
+ * 1: requires the TCXO clock
+ */
+int msm_clock_require_tcxo(unsigned long *reason, int nbits)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&clock_map_lock, flags);
+ ret = !bitmap_empty(clock_map_enabled, NR_CLKS);
+ if (nbits > 0)
+ bitmap_copy(reason, clock_map_enabled, min(nbits, NR_CLKS));
+ spin_unlock_irqrestore(&clock_map_lock, flags);
+
+ return ret;
+}
+
+/*
+ * Find the clock matching the given id and copy its name to the
+ * provided buffer.
+ *
+ * Return value:
+ * -ENODEV: there is no clock matching the given id
+ * 0: success
+ */
+int msm_clock_get_name(uint32_t id, char *name, uint32_t size)
+{
+ struct clk *c_clk;
+ int ret = -ENODEV;
+
+ mutex_lock(&clocks_mutex);
+ list_for_each_entry(c_clk, &clocks, list) {
+ if (id == c_clk->id) {
+ strlcpy(name, c_clk->name, size);
+ ret = 0;
+ break;
+ }
+ }
+ mutex_unlock(&clocks_mutex);
+
+ return ret;
+}
+
+void __init msm_clock_init(struct clk *clock_tbl, unsigned num_clocks)
{
unsigned n;
spin_lock_init(&clocks_lock);
mutex_lock(&clocks_mutex);
+ msm_clocks = clock_tbl;
+ msm_num_clocks = num_clocks;
for (n = 0; n < msm_num_clocks; n++)
list_add_tail(&msm_clocks[n].list, &clocks);
mutex_unlock(&clocks_mutex);
+
+ ebi1_clk = clk_get(NULL, "ebi1_clk");
+ BUG_ON(ebi1_clk == NULL);
+
+ axi_freq_notifier_block.notifier_call = axi_freq_notifier_handler;
+ pm_qos_add_notifier(PM_QOS_SYSTEM_BUS_FREQ, &axi_freq_notifier_block);
+
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static struct clk *msm_clock_get_nth(unsigned index)
+{
+ if (index < msm_num_clocks)
+ return msm_clocks + index;
+ else
+ return 0;
}
+static int clock_debug_rate_set(void *data, u64 val)
+{
+ struct clk *clock = data;
+ int ret;
+
+ /* Only increases to max rate will succeed, but that's actually good
+ * for debugging purposes. So we don't check for error. */
+ if (clock->flags & CLK_MAX)
+ clk_set_max_rate(clock, val);
+ if (clock->flags & CLK_MIN)
+ ret = clk_set_min_rate(clock, val);
+ else
+ ret = clk_set_rate(clock, val);
+ if (ret != 0)
+ printk(KERN_ERR "clk_set%s_rate failed (%d)\n",
+ (clock->flags & CLK_MIN) ? "_min" : "", ret);
+ return ret;
+}
+
+static int clock_debug_rate_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+ *val = clk_get_rate(clock);
+ return 0;
+}
+
+static int clock_debug_enable_set(void *data, u64 val)
+{
+ struct clk *clock = data;
+ int rc = 0;
+
+ if (val)
+ rc = clock->ops->enable(clock->id);
+ else
+ clock->ops->disable(clock->id);
+
+ return rc;
+}
+
+static int clock_debug_enable_get(void *data, u64 *val)
+{
+ struct clk *clock = data;
+
+ *val = clock->ops->is_enabled(clock->id);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
+ clock_debug_rate_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
+ clock_debug_enable_set, "%llu\n");
+
+static int __init clock_debug_init(void)
+{
+ struct dentry *dent_rate;
+ struct dentry *dent_enable;
+ struct clk *clock;
+ unsigned n = 0;
+ char temp[50], *ptr;
+
+ dent_rate = debugfs_create_dir("clk_rate", 0);
+ if (IS_ERR(dent_rate))
+ return PTR_ERR(dent_rate);
+
+ dent_enable = debugfs_create_dir("clk_enable", 0);
+ if (IS_ERR(dent_enable))
+ return PTR_ERR(dent_enable);
+
+ while ((clock = msm_clock_get_nth(n++)) != 0) {
+ strncpy(temp, clock->dbg_name, ARRAY_SIZE(temp)-1);
+ for (ptr = temp; *ptr; ptr++)
+ *ptr = tolower(*ptr);
+ debugfs_create_file(temp, 0644, dent_rate,
+ clock, &clock_rate_fops);
+ debugfs_create_file(temp, 0644, dent_enable,
+ clock, &clock_enable_fops);
+ }
+ return 0;
+}
+
+device_initcall(clock_debug_init);
+#endif
+
/* The bootloader and/or AMSS may have left various clocks enabled.
* Disable any clocks that belong to us (CLKFLAG_AUTO_OFF) but have
* not been explicitly enabled by a clk_enable() call.
@@ -205,7 +399,7 @@ static int __init clock_late_init(void)
spin_lock_irqsave(&clocks_lock, flags);
if (!clk->count) {
count++;
- pc_clk_disable(clk->id);
+ clk->ops->disable(clk->id);
}
spin_unlock_irqrestore(&clocks_lock, flags);
}
diff --git a/arch/arm/mach-msm/clock.h b/arch/arm/mach-msm/clock.h
index f875e1544e5f..9608a6c6bbcb 100644
--- a/arch/arm/mach-msm/clock.h
+++ b/arch/arm/mach-msm/clock.h
@@ -1,7 +1,7 @@
/* arch/arm/mach-msm/clock.h
*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007 QUALCOMM Incorporated
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -18,6 +18,10 @@
#define __ARCH_ARM_MACH_MSM_CLOCK_H
#include <linux/list.h>
+#include <mach/clk.h>
+
+#include "clock-pcom.h"
+#include "clock-7x30.h"
#define CLKFLAG_INVERT 0x00000001
#define CLKFLAG_NOINVERT 0x00000002
@@ -25,14 +29,30 @@
#define CLKFLAG_NORESET 0x00000008
#define CLK_FIRST_AVAILABLE_FLAG 0x00000100
-#define CLKFLAG_USE_MIN_MAX_TO_SET 0x00000200
-#define CLKFLAG_AUTO_OFF 0x00000400
+#define CLKFLAG_AUTO_OFF 0x00000200
+#define CLKFLAG_MIN 0x00000400
+#define CLKFLAG_MAX 0x00000800
+
+struct clk_ops {
+ int (*enable)(unsigned id);
+ void (*disable)(unsigned id);
+ int (*reset)(unsigned id, enum clk_reset_action action);
+ int (*set_rate)(unsigned id, unsigned rate);
+ int (*set_min_rate)(unsigned id, unsigned rate);
+ int (*set_max_rate)(unsigned id, unsigned rate);
+ int (*set_flags)(unsigned id, unsigned flags);
+ unsigned (*get_rate)(unsigned id);
+ unsigned (*is_enabled)(unsigned id);
+ long (*round_rate)(unsigned id, unsigned rate);
+};
struct clk {
uint32_t id;
uint32_t count;
uint32_t flags;
const char *name;
+ struct clk_ops *ops;
+ const char *dbg_name;
struct list_head list;
struct device *dev;
};
@@ -41,8 +61,47 @@ struct clk {
#define A11S_CLK_SEL_ADDR (MSM_CSR_BASE + 0x104)
#define A11S_VDD_SVS_PLEVEL_ADDR (MSM_CSR_BASE + 0x124)
-extern struct clk msm_clocks[];
-extern unsigned msm_num_clocks;
+#ifdef CONFIG_DEBUG_FS
+#define CLOCK_DBG_NAME(x) .dbg_name = x,
+#else
+#define CLOCK_DBG_NAME(x)
+#endif
+
+#define CLOCK(clk_name, clk_id, clk_dev, clk_flags) { \
+ .name = clk_name, \
+ .id = clk_id, \
+ .flags = clk_flags, \
+ .dev = clk_dev, \
+ CLOCK_DBG_NAME(#clk_id) \
+ }
+
+#define OFF CLKFLAG_AUTO_OFF
+#define CLK_MIN CLKFLAG_MIN
+#define CLK_MAX CLKFLAG_MAX
+#define CLK_MINMAX (CLK_MIN | CLK_MAX)
+#define NR_CLKS P_NR_CLKS
+
+enum {
+ PLL_0 = 0,
+ PLL_1,
+ PLL_2,
+ PLL_3,
+ PLL_4,
+ PLL_5,
+ PLL_6,
+ NUM_PLL
+};
+
+enum clkvote_client {
+ CLKVOTE_ACPUCLK = 0,
+ CLKVOTE_PMQOS,
+ CLKVOTE_MAX,
+};
+
+int msm_clock_require_tcxo(unsigned long *reason, int nbits);
+int msm_clock_get_name(uint32_t id, char *name, uint32_t size);
+int ebi1_clk_set_min_rate(enum clkvote_client client, unsigned long rate);
+unsigned long clk_get_max_axi_khz(void);
#endif
diff --git a/arch/arm/mach-msm/cpufreq.c b/arch/arm/mach-msm/cpufreq.c
new file mode 100644
index 000000000000..8c20add08267
--- /dev/null
+++ b/arch/arm/mach-msm/cpufreq.c
@@ -0,0 +1,126 @@
+/* arch/arm/mach-msm/cpufreq.c
+ *
+ * MSM architecture cpufreq driver
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ * Author: Mike A. Chan <mikechan@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include "acpuclock.h"
+
+#define dprintk(msg...) \
+ cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "cpufreq-msm", msg)
+
+#ifdef CONFIG_MSM_CPU_FREQ_SCREEN
+static void msm_early_suspend(struct early_suspend *handler)
+{
+ acpuclk_set_rate(CONFIG_MSM_CPU_FREQ_SCREEN_OFF * 1000, SETRATE_CPUFREQ);
+}
+
+static void msm_late_resume(struct early_suspend *handler)
+{
+ acpuclk_set_rate(CONFIG_MSM_CPU_FREQ_SCREEN_ON * 1000, SETRATE_CPUFREQ);
+}
+
+static struct early_suspend msm_power_suspend = {
+ .suspend = msm_early_suspend,
+ .resume = msm_late_resume,
+};
+
+static int __init clock_late_init(void)
+{
+ register_early_suspend(&msm_power_suspend);
+ return 0;
+}
+
+late_initcall(clock_late_init);
+#elif defined(CONFIG_CPU_FREQ_MSM)
+
+static int msm_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ int index;
+ int ret = 0;
+ struct cpufreq_freqs freqs;
+ struct cpufreq_frequency_table *table =
+ cpufreq_frequency_get_table(smp_processor_id());
+
+ if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
+ &index)) {
+ pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
+ return -EINVAL;
+ }
+
+#ifdef CONFIG_CPU_FREQ_DEBUG
+ dprintk("target %d r %d (%d-%d) selected %d\n", target_freq,
+ relation, policy->min, policy->max, table[index].frequency);
+#endif
+ freqs.old = policy->cur;
+ freqs.new = table[index].frequency;
+ freqs.cpu = smp_processor_id();
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ ret = acpuclk_set_rate(table[index].frequency * 1000, SETRATE_CPUFREQ);
+ if (!ret)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ return ret;
+}
+
+static int msm_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+ return 0;
+}
+
+static int __init msm_cpufreq_init(struct cpufreq_policy *policy)
+{
+ struct cpufreq_frequency_table *table =
+ cpufreq_frequency_get_table(smp_processor_id());
+
+ policy->cur = acpuclk_get_rate();
+ if (cpufreq_frequency_table_cpuinfo(policy, table)) {
+#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
+ policy->cpuinfo.min_freq = CONFIG_MSM_CPU_FREQ_MIN;
+ policy->cpuinfo.max_freq = CONFIG_MSM_CPU_FREQ_MAX;
+#endif
+ }
+#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
+ policy->min = CONFIG_MSM_CPU_FREQ_MIN;
+ policy->max = CONFIG_MSM_CPU_FREQ_MAX;
+#endif
+
+ policy->cpuinfo.transition_latency =
+ acpuclk_get_switch_time() * NSEC_PER_USEC;
+ return 0;
+}
+
+static struct cpufreq_driver msm_cpufreq_driver = {
+ /* lps calculations are handled here. */
+ .flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
+ .init = msm_cpufreq_init,
+ .verify = msm_cpufreq_verify,
+ .target = msm_cpufreq_target,
+ .name = "msm",
+};
+
+static int __init msm_cpufreq_register(void)
+{
+ return cpufreq_register_driver(&msm_cpufreq_driver);
+}
+
+late_initcall(msm_cpufreq_register);
+#endif
diff --git a/arch/arm/mach-msm/dal.c b/arch/arm/mach-msm/dal.c
new file mode 100644
index 000000000000..cefe03c5eb24
--- /dev/null
+++ b/arch/arm/mach-msm/dal.c
@@ -0,0 +1,1363 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * Device access library (DAL) implementation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/semaphore.h>
+
+#include <mach/dal.h>
+#include <mach/msm_smd.h>
+
+#define DALRPC_PROTOCOL_VERSION 0x11
+#define DALRPC_SUCCESS 0
+#define DALRPC_MAX_PORTNAME_LEN 64
+#define DALRPC_MAX_ATTACH_PARAM_LEN 64
+#define DALRPC_MAX_SERVICE_NAME_LEN 32
+#define DALRPC_MAX_PARAMS 128
+#define DALRPC_MAX_PARAMS_SIZE (DALRPC_MAX_PARAMS * 4)
+#define DALRPC_MAX_MSG_SIZE (sizeof(struct dalrpc_msg_hdr) + \
+ DALRPC_MAX_PARAMS_SIZE)
+#define DALRPC_MSGID_DDI 0x0
+#define DALRPC_MSGID_DDI_REPLY 0x80
+#define DALRPC_MSGID_ATTACH_REPLY 0x81
+#define DALRPC_MSGID_DETACH_REPLY 0x82
+#define DALRPC_MSGID_ASYNCH 0xC0
+#define ROUND_BUFLEN(x) (((x + 3) & ~0x3))
+
+struct dalrpc_msg_hdr {
+ uint32_t len:16;
+ uint32_t proto_ver:8;
+ uint32_t prio:7;
+ uint32_t async:1;
+ uint32_t ddi_idx:16;
+ uint32_t proto_id:8;
+ uint32_t msgid:8;
+ void *from;
+ void *to;
+};
+
+struct dalrpc_msg {
+ struct dalrpc_msg_hdr hdr;
+ uint32_t param[DALRPC_MAX_PARAMS];
+};
+
+struct dalrpc_event_handle {
+ struct list_head list;
+
+ int flag;
+ spinlock_t lock;
+};
+
+struct dalrpc_cb_handle {
+ struct list_head list;
+
+ void (*fn)(void *, uint32_t, void *, uint32_t);
+ void *context;
+};
+
+struct daldevice_handle {;
+ struct list_head list;
+
+ void *remote_handle;
+ struct completion read_completion;
+ struct dalrpc_port *port;
+ struct dalrpc_msg msg;
+ struct mutex client_lock;
+};
+
+struct dalrpc_port {
+ struct list_head list;
+
+ char port[DALRPC_MAX_PORTNAME_LEN+1];
+ int refcount;
+
+ struct workqueue_struct *wq;
+ struct work_struct port_work;
+ struct mutex write_lock;
+
+ smd_channel_t *ch;
+
+ struct dalrpc_msg msg_in;
+ struct daldevice_handle *msg_owner;
+ unsigned msg_bytes_read;
+
+ struct list_head event_list;
+ struct mutex event_list_lock;
+
+ struct list_head cb_list;
+ struct mutex cb_list_lock;
+};
+
+static LIST_HEAD(port_list);
+static LIST_HEAD(client_list);
+static DEFINE_MUTEX(pc_lists_lock);
+
+static DECLARE_WAIT_QUEUE_HEAD(event_wq);
+
+static int client_exists(void *handle)
+{
+ struct daldevice_handle *h;
+
+ if (!handle)
+ return 0;
+
+ mutex_lock(&pc_lists_lock);
+
+ list_for_each_entry(h, &client_list, list)
+ if (h == handle) {
+ mutex_unlock(&pc_lists_lock);
+ return 1;
+ }
+
+ mutex_unlock(&pc_lists_lock);
+
+ return 0;
+}
+
+static int client_exists_locked(void *handle)
+{
+ struct daldevice_handle *h;
+
+ /* this function must be called with pc_lists_lock acquired */
+
+ if (!handle)
+ return 0;
+
+ list_for_each_entry(h, &client_list, list)
+ if (h == handle)
+ return 1;
+
+ return 0;
+}
+
+static int port_exists(struct dalrpc_port *p)
+{
+ struct dalrpc_port *p_iter;
+
+ /* this function must be called with pc_lists_lock acquired */
+
+ if (!p)
+ return 0;
+
+ list_for_each_entry(p_iter, &port_list, list)
+ if (p_iter == p)
+ return 1;
+
+ return 0;
+}
+
+static struct dalrpc_port *port_name_exists(char *port)
+{
+ struct dalrpc_port *p;
+
+ /* this function must be called with pc_lists_lock acquired */
+
+ list_for_each_entry(p, &port_list, list)
+ if (!strcmp(p->port, port))
+ return p;
+
+ return NULL;
+}
+
+static void port_close(struct dalrpc_port *p)
+{
+ mutex_lock(&pc_lists_lock);
+
+ p->refcount--;
+ if (p->refcount == 0)
+ list_del(&p->list);
+
+ mutex_unlock(&pc_lists_lock);
+
+ if (p->refcount == 0) {
+ destroy_workqueue(p->wq);
+ smd_close(p->ch);
+ kfree(p);
+ }
+}
+
+static int event_exists(struct dalrpc_port *p,
+ struct dalrpc_event_handle *ev)
+{
+ struct dalrpc_event_handle *ev_iter;
+
+ /* this function must be called with event_list_lock acquired */
+
+ list_for_each_entry(ev_iter, &p->event_list, list)
+ if (ev_iter == ev)
+ return 1;
+
+ return 0;
+}
+
+static int cb_exists(struct dalrpc_port *p,
+ struct dalrpc_cb_handle *cb)
+{
+ struct dalrpc_cb_handle *cb_iter;
+
+ /* this function must be called with the cb_list_lock acquired */
+
+ list_for_each_entry(cb_iter, &p->cb_list, list)
+ if (cb_iter == cb)
+ return 1;
+
+ return 0;
+}
+
+static int check_version(struct dalrpc_msg_hdr *msg_hdr)
+{
+ static int version_msg = 1;
+
+ /* disabled because asynch events currently have no version */
+ return 0;
+
+ if (msg_hdr->proto_ver != DALRPC_PROTOCOL_VERSION) {
+ if (version_msg) {
+ printk(KERN_ERR "dalrpc: incompatible verison\n");
+ version_msg = 0;
+ }
+ return -1;
+ }
+ return 0;
+}
+
+static void process_asynch(struct dalrpc_port *p)
+{
+ struct dalrpc_event_handle *ev;
+ struct dalrpc_cb_handle *cb;
+
+ ev = (struct dalrpc_event_handle *)p->msg_in.param[0];
+ cb = (struct dalrpc_cb_handle *)p->msg_in.param[0];
+
+ mutex_lock(&p->event_list_lock);
+ if (event_exists(p, ev)) {
+ spin_lock(&ev->lock);
+ ev->flag = 1;
+ spin_unlock(&ev->lock);
+ smp_mb();
+ wake_up_all(&event_wq);
+ mutex_unlock(&p->event_list_lock);
+ return;
+ }
+ mutex_unlock(&p->event_list_lock);
+
+ mutex_lock(&p->cb_list_lock);
+ if (cb_exists(p, cb)) {
+ cb->fn(cb->context, p->msg_in.param[1],
+ &p->msg_in.param[3], p->msg_in.param[2]);
+ mutex_unlock(&p->cb_list_lock);
+ return;
+ }
+ mutex_unlock(&p->cb_list_lock);
+}
+
+static void process_msg(struct dalrpc_port *p)
+{
+ switch (p->msg_in.hdr.msgid) {
+
+ case DALRPC_MSGID_DDI_REPLY:
+ case DALRPC_MSGID_ATTACH_REPLY:
+ case DALRPC_MSGID_DETACH_REPLY:
+ complete(&p->msg_owner->read_completion);
+ break;
+
+ case DALRPC_MSGID_ASYNCH:
+ process_asynch(p);
+ break;
+
+ default:
+ printk(KERN_ERR "process_msg: bad msgid %#x\n",
+ p->msg_in.hdr.msgid);
+ }
+}
+
+static void flush_msg(struct dalrpc_port *p)
+{
+ int bytes_read, len;
+
+ len = p->msg_in.hdr.len - sizeof(struct dalrpc_msg_hdr);
+ while (len > 0) {
+ bytes_read = smd_read(p->ch, NULL, len);
+ if (bytes_read <= 0)
+ break;
+ len -= bytes_read;
+ }
+ p->msg_bytes_read = 0;
+}
+
+static int check_header(struct dalrpc_port *p)
+{
+ if (check_version(&p->msg_in.hdr) ||
+ p->msg_in.hdr.len > DALRPC_MAX_MSG_SIZE ||
+ (p->msg_in.hdr.msgid != DALRPC_MSGID_ASYNCH &&
+ !client_exists_locked(p->msg_in.hdr.to))) {
+ printk(KERN_ERR "dalrpc_read_msg: bad msg\n");
+ flush_msg(p);
+ return 1;
+ }
+ p->msg_owner = (struct daldevice_handle *)p->msg_in.hdr.to;
+
+ if (p->msg_in.hdr.msgid != DALRPC_MSGID_ASYNCH)
+ memcpy(&p->msg_owner->msg.hdr, &p->msg_in.hdr,
+ sizeof(p->msg_in.hdr));
+
+ return 0;
+}
+
+static int dalrpc_read_msg(struct dalrpc_port *p)
+{
+ uint8_t *read_ptr;
+ int bytes_read;
+
+ /* read msg header */
+ while (p->msg_bytes_read < sizeof(p->msg_in.hdr)) {
+ read_ptr = (uint8_t *)&p->msg_in.hdr + p->msg_bytes_read;
+
+ bytes_read = smd_read(p->ch, read_ptr,
+ sizeof(p->msg_in.hdr) -
+ p->msg_bytes_read);
+ if (bytes_read <= 0)
+ return 0;
+ p->msg_bytes_read += bytes_read;
+
+ if (p->msg_bytes_read == sizeof(p->msg_in.hdr) &&
+ check_header(p))
+ return 1;
+ }
+
+ /* read remainder of msg */
+ if (p->msg_in.hdr.msgid != DALRPC_MSGID_ASYNCH)
+ read_ptr = (uint8_t *)&p->msg_owner->msg;
+ else
+ read_ptr = (uint8_t *)&p->msg_in;
+ read_ptr += p->msg_bytes_read;
+
+ while (p->msg_bytes_read < p->msg_in.hdr.len) {
+ bytes_read = smd_read(p->ch, read_ptr,
+ p->msg_in.hdr.len - p->msg_bytes_read);
+ if (bytes_read <= 0)
+ return 0;
+ p->msg_bytes_read += bytes_read;
+ read_ptr += bytes_read;
+ }
+
+ process_msg(p);
+ p->msg_bytes_read = 0;
+ p->msg_owner = NULL;
+
+ return 1;
+}
+
+static void dalrpc_work(struct work_struct *work)
+{
+ struct dalrpc_port *p = container_of(work,
+ struct dalrpc_port,
+ port_work);
+
+ /* must lock port/client lists to ensure port doesn't disappear
+ under an asynch event */
+ mutex_lock(&pc_lists_lock);
+ if (port_exists(p))
+ while (dalrpc_read_msg(p))
+ ;
+ mutex_unlock(&pc_lists_lock);
+}
+
+static void dalrpc_smd_cb(void *priv, unsigned smd_flags)
+{
+ struct dalrpc_port *p = priv;
+
+ if (smd_flags != SMD_EVENT_DATA)
+ return;
+
+ queue_work(p->wq, &p->port_work);
+}
+
+static struct dalrpc_port *dalrpc_port_open(char *port, int cpu)
+{
+ struct dalrpc_port *p;
+ char wq_name[32];
+
+ p = port_name_exists(port);
+ if (p) {
+ p->refcount++;
+ return p;
+ }
+
+ p = kzalloc(sizeof(struct dalrpc_port), GFP_KERNEL);
+ if (!p)
+ return NULL;
+
+ strncpy(p->port, port, sizeof(p->port) - 1);
+ p->refcount = 1;
+
+ snprintf(wq_name, sizeof(wq_name), "dalrpc_rcv_%s", port);
+ p->wq = create_singlethread_workqueue(wq_name);
+ if (!p->wq) {
+ printk(KERN_ERR "dalrpc_init: unable to create workqueue\n");
+ goto no_wq;
+ }
+ INIT_WORK(&p->port_work, dalrpc_work);
+
+ mutex_init(&p->write_lock);
+ mutex_init(&p->event_list_lock);
+ mutex_init(&p->cb_list_lock);
+
+ INIT_LIST_HEAD(&p->event_list);
+ INIT_LIST_HEAD(&p->cb_list);
+
+ p->msg_owner = NULL;
+ p->msg_bytes_read = 0;
+
+ if (smd_named_open_on_edge(port, cpu, &p->ch, p,
+ dalrpc_smd_cb)) {
+ printk(KERN_ERR "dalrpc_port_init() failed to open port\n");
+ goto no_smd;
+ }
+
+ list_add(&p->list, &port_list);
+
+ return p;
+
+no_smd:
+ destroy_workqueue(p->wq);
+no_wq:
+ kfree(p);
+ return NULL;
+}
+
+static void dalrpc_sendwait(struct daldevice_handle *h)
+{
+ u8 *buf = (u8 *)&h->msg;
+ int len = h->msg.hdr.len;
+ int written;
+
+ mutex_lock(&h->port->write_lock);
+ do {
+ written = smd_write(h->port->ch, buf + (h->msg.hdr.len - len),
+ len);
+ if (written < 0)
+ break;
+ len -= written;
+ } while (len);
+ mutex_unlock(&h->port->write_lock);
+
+ wait_for_completion(&h->read_completion);
+}
+
+int daldevice_attach(uint32_t device_id, char *port, int cpu,
+ void **handle_ptr)
+{
+ struct daldevice_handle *h;
+ char dyn_port[DALRPC_MAX_PORTNAME_LEN + 1] = "DAL00";
+ int ret;
+ int tries = 0;
+
+ if (!port)
+ port = dyn_port;
+
+ if (strlen(port) > DALRPC_MAX_PORTNAME_LEN)
+ return -EINVAL;
+
+ h = kzalloc(sizeof(struct daldevice_handle), GFP_KERNEL);
+ if (!h) {
+ *handle_ptr = NULL;
+ return -ENOMEM;
+ }
+
+ init_completion(&h->read_completion);
+ mutex_init(&h->client_lock);
+
+ mutex_lock(&pc_lists_lock);
+ list_add(&h->list, &client_list);
+ mutex_unlock(&pc_lists_lock);
+
+ /* 3 attempts, enough for one each on the user specified port, the
+ * dynamic discovery port, and the port recommended by the dynamic
+ * discovery port */
+ while (tries < 3) {
+ tries++;
+
+ mutex_lock(&pc_lists_lock);
+ h->port = dalrpc_port_open(port, cpu);
+ if (!h->port) {
+ list_del(&h->list);
+ mutex_unlock(&pc_lists_lock);
+ printk(KERN_ERR "daldevice_attach: could not "
+ "open port\n");
+ kfree(h);
+ *handle_ptr = NULL;
+ return -EIO;
+ }
+ mutex_unlock(&pc_lists_lock);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4 +
+ DALRPC_MAX_ATTACH_PARAM_LEN +
+ DALRPC_MAX_SERVICE_NAME_LEN;
+ h->msg.hdr.proto_ver = DALRPC_PROTOCOL_VERSION;
+ h->msg.hdr.ddi_idx = 0;
+ h->msg.hdr.msgid = 0x1;
+ h->msg.hdr.prio = 0;
+ h->msg.hdr.async = 0;
+ h->msg.hdr.from = h;
+ h->msg.hdr.to = 0;
+ h->msg.param[0] = device_id;
+
+ memset(&h->msg.param[1], 0,
+ DALRPC_MAX_ATTACH_PARAM_LEN +
+ DALRPC_MAX_SERVICE_NAME_LEN);
+
+ dalrpc_sendwait(h);
+ ret = h->msg.param[0];
+
+ if (ret == DALRPC_SUCCESS) {
+ h->remote_handle = h->msg.hdr.from;
+ *handle_ptr = h;
+ break;
+ } else if (strnlen((char *)&h->msg.param[1],
+ DALRPC_MAX_PORTNAME_LEN)) {
+ /* another port was recommended in the response. */
+ strncpy(dyn_port, (char *)&h->msg.param[1],
+ DALRPC_MAX_PORTNAME_LEN);
+ dyn_port[DALRPC_MAX_PORTNAME_LEN] = 0;
+ port = dyn_port;
+ } else if (port == dyn_port) {
+ /* the dynamic discovery port (or port that
+ * was recommended by it) did not recognize
+ * the device id, give up */
+ daldevice_detach(h);
+ break;
+ } else
+ /* the user specified port did not work, try
+ * the dynamic discovery port */
+ port = dyn_port;
+
+ port_close(h->port);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(daldevice_attach);
+
+static void dalrpc_ddi_prologue(uint32_t ddi_idx, struct daldevice_handle *h)
+{
+ h->msg.hdr.proto_ver = DALRPC_PROTOCOL_VERSION;
+ h->msg.hdr.prio = 0;
+ h->msg.hdr.async = 0;
+ h->msg.hdr.msgid = DALRPC_MSGID_DDI;
+ h->msg.hdr.from = h;
+ h->msg.hdr.to = h->remote_handle;
+ h->msg.hdr.ddi_idx = ddi_idx;
+}
+
+int daldevice_detach(void *handle)
+{
+ struct daldevice_handle *h = handle;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ dalrpc_ddi_prologue(0, h);
+
+ if (!h->remote_handle)
+ goto norpc;
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4;
+ h->msg.hdr.msgid = 0x2;
+ h->msg.param[0] = 0;
+
+ dalrpc_sendwait(h);
+
+norpc:
+ mutex_lock(&pc_lists_lock);
+ list_del(&h->list);
+ mutex_unlock(&pc_lists_lock);
+
+ port_close(h->port);
+
+ kfree(h);
+
+ return 0;
+}
+EXPORT_SYMBOL(daldevice_detach);
+
+uint32_t dalrpc_fcn_0(uint32_t ddi_idx, void *handle, uint32_t s1)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4;
+ h->msg.hdr.proto_id = 0;
+ h->msg.param[0] = s1;
+
+ dalrpc_sendwait(h);
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_0);
+
+uint32_t dalrpc_fcn_1(uint32_t ddi_idx, void *handle, uint32_t s1,
+ uint32_t s2)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8;
+ h->msg.hdr.proto_id = 1;
+ h->msg.param[0] = s1;
+ h->msg.param[1] = s2;
+
+ dalrpc_sendwait(h);
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_1);
+
+uint32_t dalrpc_fcn_2(uint32_t ddi_idx, void *handle, uint32_t s1,
+ uint32_t *p_s2)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4;
+ h->msg.hdr.proto_id = 2;
+ h->msg.param[0] = s1;
+
+ dalrpc_sendwait(h);
+
+ if (h->msg.param[0] == DALRPC_SUCCESS)
+ *p_s2 = h->msg.param[1];
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_2);
+
+uint32_t dalrpc_fcn_3(uint32_t ddi_idx, void *handle, uint32_t s1,
+ uint32_t s2, uint32_t s3)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 12;
+ h->msg.hdr.proto_id = 3;
+ h->msg.param[0] = s1;
+ h->msg.param[1] = s2;
+ h->msg.param[2] = s3;
+
+ dalrpc_sendwait(h);
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_3);
+
+uint32_t dalrpc_fcn_4(uint32_t ddi_idx, void *handle, uint32_t s1,
+ uint32_t s2, uint32_t *p_s3)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8;
+ h->msg.hdr.proto_id = 4;
+ h->msg.param[0] = s1;
+ h->msg.param[1] = s2;
+
+ dalrpc_sendwait(h);
+
+ if (h->msg.param[0] == DALRPC_SUCCESS)
+ *p_s3 = h->msg.param[1];
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_4);
+
+uint32_t dalrpc_fcn_5(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+
+ if ((ilen + 4) > DALRPC_MAX_PARAMS_SIZE)
+ return -EINVAL;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4 +
+ ROUND_BUFLEN(ilen);
+ h->msg.hdr.proto_id = 5;
+ h->msg.param[0] = ilen;
+ memcpy(&h->msg.param[1], ibuf, ilen);
+
+ dalrpc_sendwait(h);
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_5);
+
+uint32_t dalrpc_fcn_6(uint32_t ddi_idx, void *handle, uint32_t s1,
+ const void *ibuf, uint32_t ilen)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+
+ if ((ilen + 8) > DALRPC_MAX_PARAMS_SIZE)
+ return -EINVAL;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8 +
+ ROUND_BUFLEN(ilen);
+ h->msg.hdr.proto_id = 6;
+ h->msg.param[0] = s1;
+ h->msg.param[1] = ilen;
+ memcpy(&h->msg.param[2], ibuf, ilen);
+
+ dalrpc_sendwait(h);
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_6);
+
+uint32_t dalrpc_fcn_7(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen, void *obuf, uint32_t olen,
+ uint32_t *oalen)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+ int param_idx;
+
+ if ((ilen + 8) > DALRPC_MAX_PARAMS_SIZE ||
+ (olen + 4) > DALRPC_MAX_PARAMS_SIZE)
+ return -EINVAL;
+
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8 +
+ ROUND_BUFLEN(ilen);
+ h->msg.hdr.proto_id = 7;
+ h->msg.param[0] = ilen;
+ memcpy(&h->msg.param[1], ibuf, ilen);
+ param_idx = (ROUND_BUFLEN(ilen) / 4) + 1;
+ h->msg.param[param_idx] = olen;
+
+ dalrpc_sendwait(h);
+
+ if (h->msg.param[0] == DALRPC_SUCCESS) {
+ if (h->msg.param[1] > olen) {
+ mutex_unlock(&h->client_lock);
+ return -EIO;
+ }
+ *oalen = h->msg.param[1];
+ memcpy(obuf, &h->msg.param[2], h->msg.param[1]);
+ }
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_7);
+
+uint32_t dalrpc_fcn_8(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen, void *obuf, uint32_t olen)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+ int param_idx;
+
+ if ((ilen + 8) > DALRPC_MAX_PARAMS_SIZE ||
+ (olen + 4) > DALRPC_MAX_PARAMS_SIZE)
+ return -EINVAL;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8 +
+ ROUND_BUFLEN(ilen);
+ h->msg.hdr.proto_id = 8;
+ h->msg.param[0] = ilen;
+ memcpy(&h->msg.param[1], ibuf, ilen);
+ param_idx = (ROUND_BUFLEN(ilen) / 4) + 1;
+ h->msg.param[param_idx] = olen;
+
+ dalrpc_sendwait(h);
+
+ if (h->msg.param[0] == DALRPC_SUCCESS) {
+ if (h->msg.param[1] > olen) {
+ mutex_unlock(&h->client_lock);
+ return -EIO;
+ }
+ memcpy(obuf, &h->msg.param[2], h->msg.param[1]);
+ }
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_8);
+
+uint32_t dalrpc_fcn_9(uint32_t ddi_idx, void *handle, void *obuf,
+ uint32_t olen)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+
+ if ((olen + 4) > DALRPC_MAX_PARAMS_SIZE)
+ return -EINVAL;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 4;
+ h->msg.hdr.proto_id = 9;
+ h->msg.param[0] = olen;
+
+ dalrpc_sendwait(h);
+
+ if (h->msg.param[0] == DALRPC_SUCCESS) {
+ if (h->msg.param[1] > olen) {
+ mutex_unlock(&h->client_lock);
+ return -EIO;
+ }
+ memcpy(obuf, &h->msg.param[2], h->msg.param[1]);
+ }
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_9);
+
+uint32_t dalrpc_fcn_10(uint32_t ddi_idx, void *handle, uint32_t s1,
+ const void *ibuf, uint32_t ilen, void *obuf,
+ uint32_t olen, uint32_t *oalen)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+ int param_idx;
+
+ if ((ilen + 12) > DALRPC_MAX_PARAMS_SIZE ||
+ (olen + 4) > DALRPC_MAX_PARAMS_SIZE)
+ return -EINVAL;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 12 +
+ ROUND_BUFLEN(ilen);
+ h->msg.hdr.proto_id = 10;
+ h->msg.param[0] = s1;
+ h->msg.param[1] = ilen;
+ memcpy(&h->msg.param[2], ibuf, ilen);
+ param_idx = (ROUND_BUFLEN(ilen) / 4) + 2;
+ h->msg.param[param_idx] = olen;
+
+ dalrpc_sendwait(h);
+
+ if (h->msg.param[0] == DALRPC_SUCCESS) {
+ if (h->msg.param[1] > olen) {
+ mutex_unlock(&h->client_lock);
+ return -EIO;
+ }
+ *oalen = h->msg.param[1];
+ memcpy(obuf, &h->msg.param[2], h->msg.param[1]);
+ }
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_10);
+
+uint32_t dalrpc_fcn_11(uint32_t ddi_idx, void *handle, uint32_t s1,
+ void *obuf, uint32_t olen)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+
+ if ((olen + 4) > DALRPC_MAX_PARAMS_SIZE)
+ return -EINVAL;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8;
+ h->msg.hdr.proto_id = 11;
+ h->msg.param[0] = s1;
+ h->msg.param[1] = olen;
+
+ dalrpc_sendwait(h);
+
+ if (h->msg.param[0] == DALRPC_SUCCESS) {
+ if (h->msg.param[1] > olen) {
+ mutex_unlock(&h->client_lock);
+ return -EIO;
+ }
+ memcpy(obuf, &h->msg.param[2], h->msg.param[1]);
+ }
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_11);
+
+uint32_t dalrpc_fcn_12(uint32_t ddi_idx, void *handle, uint32_t s1,
+ void *obuf, uint32_t olen, uint32_t *oalen)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+
+ if ((olen + 4) > DALRPC_MAX_PARAMS_SIZE)
+ return -EINVAL;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 8;
+ h->msg.hdr.proto_id = 12;
+ h->msg.param[0] = s1;
+ h->msg.param[1] = olen;
+
+ dalrpc_sendwait(h);
+
+ if (h->msg.param[0] == DALRPC_SUCCESS) {
+ if (h->msg.param[1] > olen) {
+ mutex_unlock(&h->client_lock);
+ return -EIO;
+ }
+ *oalen = h->msg.param[1];
+ memcpy(obuf, &h->msg.param[2], h->msg.param[1]);
+ }
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_12);
+
+uint32_t dalrpc_fcn_13(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen, const void *ibuf2, uint32_t ilen2,
+ void *obuf, uint32_t olen)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+ int param_idx;
+
+ if ((ilen + ilen2 + 12) > DALRPC_MAX_PARAMS_SIZE ||
+ (olen + 4) > DALRPC_MAX_PARAMS_SIZE)
+ return -EINVAL;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 12 +
+ ROUND_BUFLEN(ilen) + ROUND_BUFLEN(ilen2);
+ h->msg.hdr.proto_id = 13;
+ h->msg.param[0] = ilen;
+ memcpy(&h->msg.param[1], ibuf, ilen);
+ param_idx = (ROUND_BUFLEN(ilen) / 4) + 1;
+ h->msg.param[param_idx++] = ilen2;
+ memcpy(&h->msg.param[param_idx], ibuf2, ilen2);
+ param_idx += (ROUND_BUFLEN(ilen2) / 4);
+ h->msg.param[param_idx] = olen;
+
+ dalrpc_sendwait(h);
+
+ if (h->msg.param[0] == DALRPC_SUCCESS) {
+ if (h->msg.param[1] > olen) {
+ mutex_unlock(&h->client_lock);
+ return -EIO;
+ }
+ memcpy(obuf, &h->msg.param[2], h->msg.param[1]);
+ }
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_13);
+
+uint32_t dalrpc_fcn_14(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen, void *obuf, uint32_t olen,
+ void *obuf2, uint32_t olen2, uint32_t *oalen2)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+ int param_idx;
+
+ if ((ilen + 12) > DALRPC_MAX_PARAMS_SIZE ||
+ (olen + olen2 + 8) > DALRPC_MAX_PARAMS_SIZE)
+ return -EINVAL;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 12 +
+ ROUND_BUFLEN(ilen);
+ h->msg.hdr.proto_id = 14;
+ h->msg.param[0] = ilen;
+ memcpy(&h->msg.param[1], ibuf, ilen);
+ param_idx = (ROUND_BUFLEN(ilen) / 4) + 1;
+ h->msg.param[param_idx++] = olen;
+ h->msg.param[param_idx] = olen2;
+
+ dalrpc_sendwait(h);
+
+ if (h->msg.param[0] == DALRPC_SUCCESS) {
+ if (h->msg.param[1] > olen) {
+ mutex_unlock(&h->client_lock);
+ return -EIO;
+ }
+ param_idx = (ROUND_BUFLEN(h->msg.param[1]) / 4) + 2;
+ if (h->msg.param[param_idx] > olen2) {
+ mutex_unlock(&h->client_lock);
+ return -EIO;
+ }
+ memcpy(obuf, &h->msg.param[2], h->msg.param[1]);
+ memcpy(obuf2, &h->msg.param[param_idx + 1],
+ h->msg.param[param_idx]);
+ *oalen2 = h->msg.param[param_idx];
+ }
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_14);
+
+uint32_t dalrpc_fcn_15(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen, const void *ibuf2, uint32_t ilen2,
+ void *obuf, uint32_t olen, uint32_t *oalen,
+ void *obuf2, uint32_t olen2)
+{
+ struct daldevice_handle *h = handle;
+ uint32_t ret;
+ int param_idx;
+
+ if ((ilen + ilen2 + 16) > DALRPC_MAX_PARAMS_SIZE ||
+ (olen + olen2 + 8) > DALRPC_MAX_PARAMS_SIZE)
+ return -EINVAL;
+
+ if (!client_exists(h))
+ return -EINVAL;
+
+ mutex_lock(&h->client_lock);
+
+ dalrpc_ddi_prologue(ddi_idx, h);
+
+ h->msg.hdr.len = sizeof(struct dalrpc_msg_hdr) + 16 +
+ ROUND_BUFLEN(ilen) + ROUND_BUFLEN(ilen2);
+ h->msg.hdr.proto_id = 15;
+ h->msg.param[0] = ilen;
+ memcpy(&h->msg.param[1], ibuf, ilen);
+ param_idx = (ROUND_BUFLEN(ilen) / 4) + 1;
+ h->msg.param[param_idx++] = ilen2;
+ memcpy(&h->msg.param[param_idx], ibuf2, ilen2);
+ param_idx += (ROUND_BUFLEN(ilen2) / 4);
+ h->msg.param[param_idx++] = olen;
+ h->msg.param[param_idx] = olen2;
+
+ dalrpc_sendwait(h);
+
+ if (h->msg.param[0] == DALRPC_SUCCESS) {
+ if (h->msg.param[1] > olen) {
+ mutex_unlock(&h->client_lock);
+ return -EIO;
+ }
+ param_idx = (ROUND_BUFLEN(h->msg.param[1]) / 4) + 2;
+ if (h->msg.param[param_idx] > olen2) {
+ mutex_unlock(&h->client_lock);
+ return -EIO;
+ }
+ memcpy(obuf, &h->msg.param[2], h->msg.param[1]);
+ memcpy(obuf2, &h->msg.param[param_idx + 1],
+ h->msg.param[param_idx]);
+ *oalen = h->msg.param[1];
+ }
+
+ ret = h->msg.param[0];
+ mutex_unlock(&h->client_lock);
+ return ret;
+}
+EXPORT_SYMBOL(dalrpc_fcn_15);
+
+void *dalrpc_alloc_event(void *handle)
+{
+ struct daldevice_handle *h;
+ struct dalrpc_event_handle *ev;
+
+ h = (struct daldevice_handle *)handle;
+
+ if (!client_exists(h))
+ return NULL;
+
+ ev = kmalloc(sizeof(struct dalrpc_event_handle), GFP_KERNEL);
+ if (!ev)
+ return NULL;
+
+ ev->flag = 0;
+ spin_lock_init(&ev->lock);
+
+ mutex_lock(&h->port->event_list_lock);
+ list_add(&ev->list, &h->port->event_list);
+ mutex_unlock(&h->port->event_list_lock);
+
+ return ev;
+}
+EXPORT_SYMBOL(dalrpc_alloc_event);
+
+void *dalrpc_alloc_cb(void *handle,
+ void (*fn)(void *, uint32_t, void *, uint32_t),
+ void *context)
+{
+ struct daldevice_handle *h;
+ struct dalrpc_cb_handle *cb;
+
+ h = (struct daldevice_handle *)handle;
+
+ if (!client_exists(h))
+ return NULL;
+
+ cb = kmalloc(sizeof(struct dalrpc_cb_handle), GFP_KERNEL);
+ if (!cb)
+ return NULL;
+
+ cb->fn = fn;
+ cb->context = context;
+
+ mutex_lock(&h->port->cb_list_lock);
+ list_add(&cb->list, &h->port->cb_list);
+ mutex_unlock(&h->port->cb_list_lock);
+
+ return cb;
+}
+EXPORT_SYMBOL(dalrpc_alloc_cb);
+
+void dalrpc_dealloc_event(void *handle,
+ void *ev_h)
+{
+ struct daldevice_handle *h;
+ struct dalrpc_event_handle *ev;
+
+ h = (struct daldevice_handle *)handle;
+ ev = (struct dalrpc_event_handle *)ev_h;
+
+ mutex_lock(&h->port->event_list_lock);
+ list_del(&ev->list);
+ mutex_unlock(&h->port->event_list_lock);
+ kfree(ev);
+}
+EXPORT_SYMBOL(dalrpc_dealloc_event);
+
+void dalrpc_dealloc_cb(void *handle,
+ void *cb_h)
+{
+ struct daldevice_handle *h;
+ struct dalrpc_cb_handle *cb;
+
+ h = (struct daldevice_handle *)handle;
+ cb = (struct dalrpc_cb_handle *)cb_h;
+
+ mutex_lock(&h->port->cb_list_lock);
+ list_del(&cb->list);
+ mutex_unlock(&h->port->cb_list_lock);
+ kfree(cb);
+}
+EXPORT_SYMBOL(dalrpc_dealloc_cb);
+
+static int event_occurred(int num_events, struct dalrpc_event_handle **events,
+ int *occurred)
+{
+ int i;
+
+ for (i = 0; i < num_events; i++) {
+ spin_lock(&events[i]->lock);
+ if (events[i]->flag) {
+ events[i]->flag = 0;
+ spin_unlock(&events[i]->lock);
+ *occurred = i;
+ return 1;
+ }
+ spin_unlock(&events[i]->lock);
+ }
+
+ return 0;
+}
+
+int dalrpc_event_wait_multiple(int num, void **ev_h, int timeout)
+{
+ struct dalrpc_event_handle **events;
+ int ret, occurred;
+
+ events = (struct dalrpc_event_handle **)ev_h;
+
+ if (timeout == DALRPC_TIMEOUT_INFINITE) {
+ wait_event(event_wq,
+ event_occurred(num, events, &occurred));
+ return occurred;
+ }
+
+ ret = wait_event_timeout(event_wq,
+ event_occurred(num, events, &occurred),
+ timeout);
+ if (ret > 0)
+ return occurred;
+ else
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(dalrpc_event_wait_multiple);
diff --git a/arch/arm/mach-msm/dal_remotetest.c b/arch/arm/mach-msm/dal_remotetest.c
new file mode 100644
index 000000000000..bbe0f1a4475c
--- /dev/null
+++ b/arch/arm/mach-msm/dal_remotetest.c
@@ -0,0 +1,454 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * DAL remote test device test suite.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/debugfs.h>
+
+#include "dal_remotetest.h"
+
+#define BYTEBUF_LEN 64
+
+#define rpc_error(num) \
+ do { \
+ errmask |= (1 << num); \
+ printk(KERN_INFO "%s: remote_unittest_%d failed (%d)\n", \
+ __func__, num, ret); \
+ } while (0)
+
+#define verify_error(num, field) \
+ do { \
+ errmask |= (1 << num); \
+ printk(KERN_INFO "%s: remote_unittest_%d failed (%s)\n", \
+ __func__, num, field); \
+ } while (0)
+
+
+static struct dentry *debugfs_dir_entry;
+static struct dentry *debugfs_modem_entry;
+static struct dentry *debugfs_dsp_entry;
+
+static uint8_t in_bytebuf[BYTEBUF_LEN];
+static uint8_t out_bytebuf[BYTEBUF_LEN];
+static uint8_t out_bytebuf2[BYTEBUF_LEN];
+static struct remote_test_data in_data;
+static struct remote_test_data out_data;
+static int block_until_cb = 1;
+
+static void init_data(struct remote_test_data *data)
+{
+ int i;
+ data->regular_event = REMOTE_UNITTEST_INPUT_HANDLE;
+ data->payload_event = REMOTE_UNITTEST_INPUT_HANDLE;
+ for (i = 0; i < 32; i++)
+ data->test[i] = i;
+}
+
+static int verify_data(struct remote_test_data *data)
+{
+ int i;
+ if (data->regular_event != REMOTE_UNITTEST_INPUT_HANDLE ||
+ data->payload_event != REMOTE_UNITTEST_INPUT_HANDLE)
+ return -1;
+ for (i = 0; i < 32; i++)
+ if (data->test[i] != i)
+ return -1;
+
+ return 0;
+}
+
+static int verify_uint32_buffer(uint32_t *buf)
+{
+ int i;
+ for (i = 0; i < 32; i++)
+ if (buf[i] != i)
+ return -1;
+
+ return 0;
+}
+
+static void init_bytebuf(uint8_t *bytebuf)
+{
+ int i;
+ for (i = 0; i < BYTEBUF_LEN; i++)
+ bytebuf[i] = i & 0xff;
+}
+
+static int verify_bytebuf(uint8_t *bytebuf)
+{
+ int i;
+ for (i = 0; i < BYTEBUF_LEN; i++)
+ if (bytebuf[i] != (i & 0xff))
+ return -1;
+
+ return 0;
+}
+
+static void test_cb(void *context, uint32_t param, void *data, uint32_t len)
+{
+ block_until_cb = 0;
+}
+
+static int remotetest_exec(int dest, u64 *val)
+{
+ void *dev_handle;
+ void *event_handles[3];
+ void *cb_handle;
+ int ret;
+ u64 errmask = 0;
+ uint32_t ouint;
+ uint32_t oalen;
+
+ /* test daldevice_attach */
+ ret = daldevice_attach(REMOTE_UNITTEST_DEVICEID, NULL,
+ dest, &dev_handle);
+ if (ret) {
+ printk(KERN_INFO "%s: failed to attach (%d)\n", __func__, ret);
+ *val = 0xffffffff;
+ return 0;
+ }
+
+ /* test remote_unittest_0 */
+ ret = remote_unittest_0(dev_handle, REMOTE_UNITTEST_INARG_1);
+ if (ret)
+ rpc_error(0);
+
+ /* test remote_unittest_1 */
+ ret = remote_unittest_1(dev_handle, REMOTE_UNITTEST_INARG_1,
+ REMOTE_UNITTEST_INARG_2);
+ if (ret)
+ rpc_error(1);
+
+ /* test remote_unittest_2 */
+ ouint = 0;
+ ret = remote_unittest_2(dev_handle, REMOTE_UNITTEST_INARG_1, &ouint);
+ if (ret)
+ rpc_error(2);
+ else if (ouint != REMOTE_UNITTEST_OUTARG_1)
+ verify_error(2, "ouint");
+
+ /* test remote_unittest_3 */
+ ret = remote_unittest_3(dev_handle, REMOTE_UNITTEST_INARG_1,
+ REMOTE_UNITTEST_INARG_2,
+ REMOTE_UNITTEST_INARG_3);
+ if (ret)
+ rpc_error(3);
+
+ /* test remote_unittest_4 */
+ ouint = 0;
+ ret = remote_unittest_4(dev_handle, REMOTE_UNITTEST_INARG_1,
+ REMOTE_UNITTEST_INARG_2, &ouint);
+ if (ret)
+ rpc_error(4);
+ else if (ouint != REMOTE_UNITTEST_OUTARG_1)
+ verify_error(4, "ouint");
+
+ /* test remote_unittest_5 */
+ init_data(&in_data);
+ ret = remote_unittest_5(dev_handle, &in_data, sizeof(in_data));
+ if (ret)
+ rpc_error(5);
+
+ /* test remote_unittest_6 */
+ init_data(&in_data);
+ ret = remote_unittest_6(dev_handle, REMOTE_UNITTEST_INARG_1,
+ &in_data.test, sizeof(in_data.test));
+ if (ret)
+ rpc_error(6);
+
+ /* test remote_unittest_7 */
+ init_data(&in_data);
+ memset(&out_data, 0, sizeof(out_data));
+ ret = remote_unittest_7(dev_handle, &in_data, sizeof(in_data),
+ &out_data.test, sizeof(out_data.test),
+ &oalen);
+ if (ret)
+ rpc_error(7);
+ else if (oalen != sizeof(out_data.test))
+ verify_error(7, "oalen");
+ else if (verify_uint32_buffer(out_data.test))
+ verify_error(7, "obuf");
+
+ /* test remote_unittest_8 */
+ init_bytebuf(in_bytebuf);
+ memset(&out_data, 0, sizeof(out_data));
+ ret = remote_unittest_8(dev_handle, in_bytebuf, sizeof(in_bytebuf),
+ &out_data, sizeof(out_data));
+ if (ret)
+ rpc_error(8);
+ else if (verify_data(&out_data))
+ verify_error(8, "obuf");
+
+ /* test remote_unittest_9 */
+ memset(&out_bytebuf, 0, sizeof(out_bytebuf));
+ ret = remote_unittest_9(dev_handle, out_bytebuf, sizeof(out_bytebuf));
+ if (ret)
+ rpc_error(9);
+ else if (verify_bytebuf(out_bytebuf))
+ verify_error(9, "obuf");
+
+ /* test remote_unittest_10 */
+ init_bytebuf(in_bytebuf);
+ memset(&out_bytebuf, 0, sizeof(out_bytebuf));
+ ret = remote_unittest_10(dev_handle, REMOTE_UNITTEST_INARG_1,
+ in_bytebuf, sizeof(in_bytebuf),
+ out_bytebuf, sizeof(out_bytebuf), &oalen);
+ if (ret)
+ rpc_error(10);
+ else if (oalen != sizeof(out_bytebuf))
+ verify_error(10, "oalen");
+ else if (verify_bytebuf(out_bytebuf))
+ verify_error(10, "obuf");
+
+ /* test remote_unittest_11 */
+ memset(&out_bytebuf, 0, sizeof(out_bytebuf));
+ ret = remote_unittest_11(dev_handle, REMOTE_UNITTEST_INARG_1,
+ out_bytebuf, sizeof(out_bytebuf));
+ if (ret)
+ rpc_error(11);
+ else if (verify_bytebuf(out_bytebuf))
+ verify_error(11, "obuf");
+
+ /* test remote_unittest_12 */
+ memset(&out_bytebuf, 0, sizeof(out_bytebuf));
+ ret = remote_unittest_12(dev_handle, REMOTE_UNITTEST_INARG_1,
+ out_bytebuf, sizeof(out_bytebuf), &oalen);
+ if (ret)
+ rpc_error(12);
+ else if (oalen != sizeof(out_bytebuf))
+ verify_error(12, "oalen");
+ else if (verify_bytebuf(out_bytebuf))
+ verify_error(12, "obuf");
+
+ /* test remote_unittest_13 */
+ init_data(&in_data);
+ memset(&out_data, 0, sizeof(out_data));
+ ret = remote_unittest_13(dev_handle, in_data.test, sizeof(in_data.test),
+ &in_data, sizeof(in_data),
+ &out_data, sizeof(out_data));
+ if (ret)
+ rpc_error(13);
+ else if (verify_data(&out_data))
+ verify_error(13, "obuf");
+
+ /* test remote_unittest_14 */
+ init_data(&in_data);
+ memset(out_bytebuf, 0, sizeof(out_bytebuf));
+ memset(out_bytebuf2, 0, sizeof(out_bytebuf2));
+ ret = remote_unittest_14(dev_handle,
+ in_data.test, sizeof(in_data.test),
+ out_bytebuf, sizeof(out_bytebuf),
+ out_bytebuf2, sizeof(out_bytebuf2), &oalen);
+ if (ret)
+ rpc_error(14);
+ else if (verify_bytebuf(out_bytebuf))
+ verify_error(14, "obuf");
+ else if (oalen != sizeof(out_bytebuf2))
+ verify_error(14, "oalen");
+ else if (verify_bytebuf(out_bytebuf2))
+ verify_error(14, "obuf2");
+
+ /* test remote_unittest_15 */
+ init_data(&in_data);
+ memset(out_bytebuf, 0, sizeof(out_bytebuf));
+ memset(&out_data, 0, sizeof(out_data));
+ ret = remote_unittest_15(dev_handle,
+ in_data.test, sizeof(in_data.test),
+ &in_data, sizeof(in_data),
+ &out_data, sizeof(out_data), &oalen,
+ out_bytebuf, sizeof(out_bytebuf));
+ if (ret)
+ rpc_error(15);
+ else if (oalen != sizeof(out_data))
+ verify_error(15, "oalen");
+ else if (verify_bytebuf(out_bytebuf))
+ verify_error(15, "obuf");
+ else if (verify_data(&out_data))
+ verify_error(15, "obuf2");
+
+ /* test setting up asynch events */
+ event_handles[0] = dalrpc_alloc_event(dev_handle);
+ event_handles[1] = dalrpc_alloc_event(dev_handle);
+ event_handles[2] = dalrpc_alloc_event(dev_handle);
+ cb_handle = dalrpc_alloc_cb(dev_handle, test_cb, &out_data);
+ in_data.regular_event = (uint32_t)event_handles[2];
+ in_data.payload_event = (uint32_t)cb_handle;
+ ret = remote_unittest_eventcfg(dev_handle, &in_data, sizeof(in_data));
+ if (ret) {
+ errmask |= (1 << 16);
+ printk(KERN_INFO "%s: failed to configure asynch (%d)\n",
+ __func__, ret);
+ }
+
+ /* test event */
+ ret = remote_unittest_eventtrig(dev_handle,
+ REMOTE_UNITTEST_REGULAR_EVENT);
+ if (ret) {
+ errmask |= (1 << 17);
+ printk(KERN_INFO "%s: failed to trigger event (%d)\n",
+ __func__, ret);
+ }
+ ret = dalrpc_event_wait(event_handles[2], 1000);
+ if (ret) {
+ errmask |= (1 << 18);
+ printk(KERN_INFO "%s: failed to receive event (%d)\n",
+ __func__, ret);
+ }
+
+ /* test event again */
+ ret = remote_unittest_eventtrig(dev_handle,
+ REMOTE_UNITTEST_REGULAR_EVENT);
+ if (ret) {
+ errmask |= (1 << 19);
+ printk(KERN_INFO "%s: failed to trigger event (%d)\n",
+ __func__, ret);
+ }
+ ret = dalrpc_event_wait_multiple(3, event_handles, 1000);
+ if (ret != 2) {
+ errmask |= (1 << 20);
+ printk(KERN_INFO "%s: failed to receive event (%d)\n",
+ __func__, ret);
+ }
+
+ /* test callback */
+ ret = remote_unittest_eventtrig(dev_handle,
+ REMOTE_UNITTEST_CALLBACK_EVENT);
+ if (ret) {
+ errmask |= (1 << 21);
+ printk(KERN_INFO "%s: failed to trigger callback (%d)\n",
+ __func__, ret);
+ } else
+ while (block_until_cb)
+ ;
+
+ dalrpc_dealloc_cb(dev_handle, cb_handle);
+ dalrpc_dealloc_event(dev_handle, event_handles[0]);
+ dalrpc_dealloc_event(dev_handle, event_handles[1]);
+ dalrpc_dealloc_event(dev_handle, event_handles[2]);
+
+ /* test daldevice_detach */
+ ret = daldevice_detach(dev_handle);
+ if (ret) {
+ errmask |= (1 << 22);
+ printk(KERN_INFO "%s: failed to detach (%d)\n", __func__, ret);
+ }
+
+ printk(KERN_INFO "%s: remote_unittest complete\n", __func__);
+
+ *val = errmask;
+ return 0;
+}
+
+static int remotetest_modem_exec(void *data, u64 *val)
+{
+ return remotetest_exec(DALRPC_DEST_MODEM, val);
+}
+
+static int remotetest_dsp_exec(void *data, u64 *val)
+{
+ return remotetest_exec(DALRPC_DEST_QDSP, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(dal_modemtest_fops, remotetest_modem_exec,
+ NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(dal_dsptest_fops, remotetest_dsp_exec,
+ NULL, "%llu\n");
+
+static int __init remotetest_init(void)
+{
+ debugfs_dir_entry = debugfs_create_dir("dal", 0);
+ if (IS_ERR(debugfs_dir_entry))
+ return PTR_ERR(debugfs_dir_entry);
+
+ debugfs_modem_entry = debugfs_create_file("modem_test", 0444,
+ debugfs_dir_entry,
+ NULL, &dal_modemtest_fops);
+ if (IS_ERR(debugfs_modem_entry)) {
+ debugfs_remove(debugfs_dir_entry);
+ return PTR_ERR(debugfs_modem_entry);
+ }
+
+ debugfs_dsp_entry = debugfs_create_file("dsp_test", 0444,
+ debugfs_dir_entry,
+ NULL, &dal_dsptest_fops);
+ if (IS_ERR(debugfs_dsp_entry)) {
+ debugfs_remove(debugfs_modem_entry);
+ debugfs_remove(debugfs_dir_entry);
+ return PTR_ERR(debugfs_dsp_entry);
+ }
+
+ return 0;
+}
+
+static void __exit remotetest_exit(void)
+{
+ debugfs_remove(debugfs_modem_entry);
+ debugfs_remove(debugfs_dsp_entry);
+ debugfs_remove(debugfs_dir_entry);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Test for DAL RPC");
+MODULE_VERSION("1.0");
+
+module_init(remotetest_init);
+module_exit(remotetest_exit);
diff --git a/arch/arm/mach-msm/dal_remotetest.h b/arch/arm/mach-msm/dal_remotetest.h
new file mode 100644
index 000000000000..e24b480192ed
--- /dev/null
+++ b/arch/arm/mach-msm/dal_remotetest.h
@@ -0,0 +1,187 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * DAL remote test device API.
+ */
+
+#include <linux/kernel.h>
+
+#include <mach/dal.h>
+
+#define REMOTE_UNITTEST_DEVICEID 0xDA1DA1DA
+
+enum {
+ DALRPC_TEST_API_0 = DALDEVICE_FIRST_DEVICE_API_IDX,
+ DALRPC_TEST_API_1,
+ DALRPC_TEST_API_2,
+ DALRPC_TEST_API_3,
+ DALRPC_TEST_API_4,
+ DALRPC_TEST_API_5,
+ DALRPC_TEST_API_6,
+ DALRPC_TEST_API_7,
+ DALRPC_TEST_API_8,
+ DALRPC_TEST_API_9,
+ DALRPC_TEST_API_10,
+ DALRPC_TEST_API_11,
+ DALRPC_TEST_API_12,
+ DALRPC_TEST_API_13,
+ DALRPC_TEST_API_14,
+ DALRPC_TEST_API_15,
+ DALRPC_TEST_API_16,
+ DALRPC_TEST_API_17
+};
+
+#define REMOTE_UNITTEST_INARG_1 0x01010101
+#define REMOTE_UNITTEST_INARG_2 0x20202020
+#define REMOTE_UNITTEST_INARG_3 0x12121212
+#define REMOTE_UNITTEST_INPUT_HANDLE 0xDA1FDA1F
+#define REMOTE_UNITTEST_OUTARG_1 0xBEEFDEAD
+
+#define REMOTE_UNITTEST_REGULAR_EVENT 0
+#define REMOTE_UNITTEST_CALLBACK_EVENT 1
+
+#define REMOTE_UNITTEST_BAD_PARAM 0x10
+
+struct remote_test_data {
+ uint32_t regular_event;
+ uint32_t test[32];
+ uint32_t payload_event;
+};
+
+static int remote_unittest_0(void *handle, uint32_t s1)
+{
+ return dalrpc_fcn_0(DALRPC_TEST_API_0, handle, s1);
+}
+
+static int remote_unittest_1(void *handle, uint32_t s1, uint32_t s2)
+{
+ return dalrpc_fcn_1(DALRPC_TEST_API_1, handle, s1, s2);
+}
+
+static int remote_unittest_2(void *handle, uint32_t s1, uint32_t *p_s2)
+{
+ return dalrpc_fcn_2(DALRPC_TEST_API_2, handle, s1, p_s2);
+}
+
+static int remote_unittest_3(void *handle, uint32_t s1, uint32_t s2,
+ uint32_t s3)
+{
+ return dalrpc_fcn_3(DALRPC_TEST_API_3, handle, s1, s2, s3);
+}
+
+static int remote_unittest_4(void *handle, uint32_t s1, uint32_t s2,
+ uint32_t *p_s3)
+{
+ return dalrpc_fcn_4(DALRPC_TEST_API_4, handle, s1, s2, p_s3);
+}
+
+static int remote_unittest_5(void *handle, const void *ibuf, uint32_t ilen)
+{
+ return dalrpc_fcn_5(DALRPC_TEST_API_5, handle, ibuf, ilen);
+}
+
+static int remote_unittest_6(void *handle, uint32_t s1, const void *ibuf,
+ uint32_t ilen)
+{
+ return dalrpc_fcn_6(DALRPC_TEST_API_6, handle, s1, ibuf, ilen);
+}
+
+static int remote_unittest_7(void *handle, const void *ibuf, uint32_t ilen,
+ void *obuf, uint32_t olen, uint32_t *oalen)
+{
+ return dalrpc_fcn_7(DALRPC_TEST_API_7, handle, ibuf, ilen, obuf,
+ olen, oalen);
+}
+
+static int remote_unittest_8(void *handle, const void *ibuf, uint32_t ilen,
+ void *obuf, uint32_t olen)
+{
+ return dalrpc_fcn_8(DALRPC_TEST_API_8, handle, ibuf, ilen, obuf, olen);
+}
+
+static int remote_unittest_9(void *handle, void *obuf, uint32_t olen)
+{
+ return dalrpc_fcn_9(DALRPC_TEST_API_9, handle, obuf, olen);
+}
+
+static int remote_unittest_10(void *handle, uint32_t s1, const void *ibuf,
+ uint32_t ilen, void *obuf, uint32_t olen,
+ uint32_t *oalen)
+{
+ return dalrpc_fcn_10(DALRPC_TEST_API_10, handle, s1, ibuf, ilen, obuf,
+ olen, oalen);
+}
+
+static int remote_unittest_11(void *handle, uint32_t s1, void *obuf,
+ uint32_t olen)
+{
+ return dalrpc_fcn_11(DALRPC_TEST_API_11, handle, s1, obuf, olen);
+}
+
+static int remote_unittest_12(void *handle, uint32_t s1, void *obuf,
+ uint32_t olen, uint32_t *oalen)
+{
+ return dalrpc_fcn_12(DALRPC_TEST_API_12, handle, s1, obuf, olen,
+ oalen);
+}
+
+static int remote_unittest_13(void *handle, const void *ibuf, uint32_t ilen,
+ const void *ibuf2, uint32_t ilen2, void *obuf,
+ uint32_t olen)
+{
+ return dalrpc_fcn_13(DALRPC_TEST_API_13, handle, ibuf, ilen, ibuf2,
+ ilen2, obuf, olen);
+}
+
+static int remote_unittest_14(void *handle, const void *ibuf, uint32_t ilen,
+ void *obuf, uint32_t olen, void *obuf2,
+ uint32_t olen2, uint32_t *oalen2)
+{
+ return dalrpc_fcn_14(DALRPC_TEST_API_14, handle, ibuf, ilen, obuf,
+ olen, obuf2, olen2, oalen2);
+}
+
+static int remote_unittest_15(void *handle, const void *ibuf, uint32_t ilen,
+ const void *ibuf2, uint32_t ilen2, void *obuf,
+ uint32_t olen, uint32_t *oalen, void *obuf2,
+ uint32_t olen2)
+{
+ return dalrpc_fcn_15(DALRPC_TEST_API_15, handle, ibuf, ilen, ibuf2,
+ ilen2, obuf, olen, oalen, obuf2, olen2);
+}
+
+static int remote_unittest_eventcfg(void *handle, const void *ibuf,
+ uint32_t ilen)
+{
+ return dalrpc_fcn_5(DALRPC_TEST_API_16, handle, ibuf, ilen);
+}
+
+static int remote_unittest_eventtrig(void *handle, uint32_t event_idx)
+{
+ return dalrpc_fcn_0(DALRPC_TEST_API_17, handle, event_idx);
+}
diff --git a/arch/arm/mach-msm/devices.c b/arch/arm/mach-msm/devices.c
index 31b6b30e98bf..a64578f1a032 100644
--- a/arch/arm/mach-msm/devices.c
+++ b/arch/arm/mach-msm/devices.c
@@ -1,6 +1,7 @@
/* linux/arch/arm/mach-msm/devices.c
*
* Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -16,14 +17,20 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#include <mach/irqs.h>
#include <mach/msm_iomap.h>
+#include <mach/dma.h>
+#include <mach/board.h>
+
#include "devices.h"
#include <asm/mach/flash.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <asm/mach/mmc.h>
+
static struct resource resources_uart1[] = {
{
.start = INT_UART1,
@@ -84,48 +91,14 @@ struct platform_device msm_device_uart3 = {
.resource = resources_uart3,
};
-static struct resource resources_i2c[] = {
- {
- .start = MSM_I2C_PHYS,
- .end = MSM_I2C_PHYS + MSM_I2C_SIZE - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_PWB_I2C,
- .end = INT_PWB_I2C,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device msm_device_i2c = {
- .name = "msm_i2c",
- .id = 0,
- .num_resources = ARRAY_SIZE(resources_i2c),
- .resource = resources_i2c,
-};
-
-static struct resource resources_hsusb[] = {
- {
- .start = MSM_HSUSB_PHYS,
- .end = MSM_HSUSB_PHYS + MSM_HSUSB_SIZE,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_USB_HS,
- .end = INT_USB_HS,
- .flags = IORESOURCE_IRQ,
- },
-};
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+#define MSM_UART1DM_PHYS 0xA0200000
+#define MSM_UART2DM_PHYS 0xA0900000
+#else
+#define MSM_UART1DM_PHYS 0xA0200000
+#define MSM_UART2DM_PHYS 0xA0300000
+#endif
-struct platform_device msm_device_hsusb = {
- .name = "msm_hsusb",
- .id = -1,
- .num_resources = ARRAY_SIZE(resources_hsusb),
- .resource = resources_hsusb,
- .dev = {
- .coherent_dma_mask = 0xffffffff,
- },
-};
struct flash_platform_data msm_nand_data = {
.parts = NULL,
@@ -155,10 +128,27 @@ struct platform_device msm_device_smd = {
.id = -1,
};
+struct platform_device msm_device_dmov = {
+ .name = "msm_dmov",
+ .id = -1,
+};
+
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+#define MSM_SDC1_BASE 0xA0300000
+#define MSM_SDC2_BASE 0xA0400000
+#define MSM_SDC3_BASE 0xA0500000
+#define MSM_SDC4_BASE 0xA0600000
+#else
+#define MSM_SDC1_BASE 0xA0400000
+#define MSM_SDC2_BASE 0xA0500000
+#define MSM_SDC3_BASE 0xA0600000
+#define MSM_SDC4_BASE 0xA0700000
+#endif
+
static struct resource resources_sdc1[] = {
{
- .start = MSM_SDC1_PHYS,
- .end = MSM_SDC1_PHYS + MSM_SDC1_SIZE - 1,
+ .start = MSM_SDC1_BASE,
+ .end = MSM_SDC1_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
@@ -175,8 +165,8 @@ static struct resource resources_sdc1[] = {
static struct resource resources_sdc2[] = {
{
- .start = MSM_SDC2_PHYS,
- .end = MSM_SDC2_PHYS + MSM_SDC2_SIZE - 1,
+ .start = MSM_SDC2_BASE,
+ .end = MSM_SDC2_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
@@ -193,8 +183,8 @@ static struct resource resources_sdc2[] = {
static struct resource resources_sdc3[] = {
{
- .start = MSM_SDC3_PHYS,
- .end = MSM_SDC3_PHYS + MSM_SDC3_SIZE - 1,
+ .start = MSM_SDC3_BASE,
+ .end = MSM_SDC3_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
@@ -211,8 +201,8 @@ static struct resource resources_sdc3[] = {
static struct resource resources_sdc4[] = {
{
- .start = MSM_SDC4_PHYS,
- .end = MSM_SDC4_PHYS + MSM_SDC4_SIZE - 1,
+ .start = MSM_SDC4_BASE,
+ .end = MSM_SDC4_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
{
@@ -266,3 +256,273 @@ struct platform_device msm_device_sdc4 = {
.coherent_dma_mask = 0xffffffff,
},
};
+
+static struct platform_device *msm_sdcc_devices[] __initdata = {
+ &msm_device_sdc1,
+ &msm_device_sdc2,
+ &msm_device_sdc3,
+ &msm_device_sdc4,
+};
+
+int __init msm_add_sdcc(unsigned int controller, struct mmc_platform_data *plat)
+{
+ struct platform_device *pdev;
+
+ if (controller < 1 || controller > 4)
+ return -EINVAL;
+
+ pdev = msm_sdcc_devices[controller-1];
+ pdev->dev.platform_data = plat;
+ return platform_device_register(pdev);
+}
+
+struct clk msm_clocks_7x01a[] = {
+ CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
+ CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
+ CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
+ CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
+ CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
+ CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX),
+ CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
+ CLK_PCOM("grp_clk", GRP_CLK, NULL, OFF),
+ CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
+ CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
+ CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
+ CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
+ CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
+ CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
+ CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
+ CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
+ CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
+ CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC1_PCLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC2_PCLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC3_PCLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC4_PCLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0),
+ CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
+ CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
+ CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
+ CLK_PCOM("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF),
+ CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
+ CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
+ CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs_pclk", USB_HS_PCLK, NULL, OFF),
+ CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
+ CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN),
+ CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF),
+ CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
+ CLK_PCOM("grp_pclk", GRP_PCLK, NULL, 0),
+};
+
+unsigned msm_num_clocks_7x01a = ARRAY_SIZE(msm_clocks_7x01a);
+
+struct clk msm_clocks_7x25[] = {
+ CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
+ CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
+ CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
+ CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
+ CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
+ CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
+ CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
+ CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
+ CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
+ CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
+ CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
+ CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
+ CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0),
+ CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
+ CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
+ CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
+ CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC1_PCLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC2_PCLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC3_PCLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC4_PCLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF),
+ CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
+ CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
+ CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs_pclk", USB_HS_PCLK, NULL, OFF),
+ CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
+ CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN),
+ CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF),
+ CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
+ CLK_PCOM("grp_pclk", GRP_PCLK, NULL, 0),
+};
+
+unsigned msm_num_clocks_7x25 = ARRAY_SIZE(msm_clocks_7x25);
+
+struct clk msm_clocks_7x27[] = {
+ CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
+ CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
+ CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
+ CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
+ CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
+ CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
+ CLK_PCOM("grp_clk", GRP_CLK, NULL, 0),
+ CLK_PCOM("grp_pclk", GRP_PCLK, NULL, 0),
+ CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
+ CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
+ CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
+ CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
+ CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
+ CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
+ CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0),
+ CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
+ CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
+ CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
+ CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC1_PCLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC2_PCLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC3_PCLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC4_PCLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF),
+ CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
+ CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
+ CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs_pclk", USB_HS_PCLK, NULL, OFF),
+ CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
+ CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0),
+ CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN),
+ CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF),
+ CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
+};
+
+unsigned msm_num_clocks_7x27 = ARRAY_SIZE(msm_clocks_7x27);
+
+struct clk msm_clocks_7x30[] = {
+ CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
+ CLK_PCOM("adsp_clk", ADSP_CLK, NULL, 0),
+ CLK_PCOM("cam_m_clk", CAM_MCLK_CLK, NULL, 0),
+ CLK_PCOM("camif_pad_pclk", CAMIF_PAD_PCLK, NULL, OFF),
+ CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
+ CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
+ CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
+ CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
+ CLK_PCOM("grp_2d_clk", GRP_2D_CLK, NULL, 0),
+ CLK_PCOM("grp_2d_pclk", GRP_2D_PCLK, NULL, 0),
+ CLK_PCOM("grp_clk", GRP_CLK, NULL, 0),
+ CLK_PCOM("grp_pclk", GRP_PCLK, NULL, 0),
+ CLK_PCOM("hdmi_clk", HDMI_CLK, NULL, 0),
+ CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
+ CLK_PCOM("lpa_codec_clk", LPA_CODEC_CLK, NULL, 0),
+ CLK_PCOM("lpa_core_clk", LPA_CORE_CLK, NULL, 0),
+ CLK_PCOM("lpa_pclk", LPA_PCLK, NULL, 0),
+ CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
+ CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
+ CLK_PCOM("mddi_pclk", PMDH_PCLK, NULL, 0),
+ CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
+ CLK_PCOM("mdp_pclk", MDP_PCLK, NULL, 0),
+ CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0),
+ CLK_PCOM("mfc_clk", MFC_CLK, NULL, 0),
+ CLK_PCOM("mfc_div2_clk", MFC_DIV2_CLK, NULL, 0),
+ CLK_PCOM("mfc_pclk", MFC_PCLK, NULL, 0),
+ CLK_PCOM("mi2s_codec_rx_m_clk", MI2S_CODEC_RX_MCLK, NULL, 0),
+ CLK_PCOM("mi2s_codec_rx_s_clk", MI2S_CODEC_RX_SCLK, NULL, 0),
+ CLK_PCOM("mi2s_codec_tx_m_clk", MI2S_CODEC_TX_MCLK, NULL, 0),
+ CLK_PCOM("mi2s_codec_tx_s_clk", MI2S_CODEC_TX_SCLK, NULL, 0),
+ CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
+ CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
+ CLK_PCOM("rotator_clk", AXI_ROTATOR_CLK, NULL, 0),
+ CLK_PCOM("rotator_imem_clk", ROTATOR_IMEM_CLK, NULL, OFF),
+ CLK_PCOM("rotator_pclk", ROTATOR_PCLK, NULL, OFF),
+ CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
+ CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC1_PCLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC2_PCLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC3_PCLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC4_PCLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("spi_clk", SPI_CLK, NULL, 0),
+ CLK_PCOM("spi_pclk", SPI_PCLK, NULL, 0),
+ CLK_PCOM("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF),
+ CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
+ CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
+ CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs_pclk", USB_HS_PCLK, NULL, OFF),
+ CLK_PCOM("usb_hs_core_clk", USB_HS_CORE_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs2_clk", USB_HS2_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs2_pclk", USB_HS2_PCLK, NULL, OFF),
+ CLK_PCOM("usb_hs2_core_clk", USB_HS2_CORE_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs3_clk", USB_HS3_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs3_pclk", USB_HS3_PCLK, NULL, OFF),
+ CLK_PCOM("usb_hs3_core_clk", USB_HS3_CORE_CLK, NULL, OFF),
+ CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
+ CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN),
+ CLK_PCOM("vfe_camif_clk", VFE_CAMIF_CLK, NULL, 0),
+ CLK_PCOM("vfe_clk", VFE_CLK, NULL, 0),
+ CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, 0),
+ CLK_PCOM("vfe_pclk", VFE_PCLK, NULL, OFF),
+};
+
+unsigned msm_num_clocks_7x30 = ARRAY_SIZE(msm_clocks_7x30);
+
+struct clk msm_clocks_8x50[] = {
+ CLK_PCOM("adm_clk", ADM_CLK, NULL, 0),
+ CLK_PCOM("ebi1_clk", EBI1_CLK, NULL, CLK_MIN),
+ CLK_PCOM("ebi2_clk", EBI2_CLK, NULL, 0),
+ CLK_PCOM("ecodec_clk", ECODEC_CLK, NULL, 0),
+ CLK_PCOM("emdh_clk", EMDH_CLK, NULL, OFF | CLK_MINMAX),
+ CLK_PCOM("gp_clk", GP_CLK, NULL, 0),
+ CLK_PCOM("grp_clk", GRP_CLK, NULL, 0),
+ CLK_PCOM("icodec_rx_clk", ICODEC_RX_CLK, NULL, 0),
+ CLK_PCOM("icodec_tx_clk", ICODEC_TX_CLK, NULL, 0),
+ CLK_PCOM("imem_clk", IMEM_CLK, NULL, OFF),
+ CLK_PCOM("mdc_clk", MDC_CLK, NULL, 0),
+ CLK_PCOM("mddi_clk", PMDH_CLK, NULL, OFF | CLK_MINMAX),
+ CLK_PCOM("mdp_clk", MDP_CLK, NULL, OFF),
+ CLK_PCOM("mdp_lcdc_pclk_clk", MDP_LCDC_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_lcdc_pad_pclk_clk", MDP_LCDC_PAD_PCLK_CLK, NULL, 0),
+ CLK_PCOM("mdp_vsync_clk", MDP_VSYNC_CLK, NULL, 0),
+ CLK_PCOM("pbus_clk", PBUS_CLK, NULL, CLK_MIN),
+ CLK_PCOM("pcm_clk", PCM_CLK, NULL, 0),
+ CLK_PCOM("sdac_clk", SDAC_CLK, NULL, OFF),
+ CLK_PCOM("sdc_clk", SDC1_CLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC1_PCLK, &msm_device_sdc1.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC2_CLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC2_PCLK, &msm_device_sdc2.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC3_CLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC3_PCLK, &msm_device_sdc3.dev, OFF),
+ CLK_PCOM("sdc_clk", SDC4_CLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("sdc_pclk", SDC4_PCLK, &msm_device_sdc4.dev, OFF),
+ CLK_PCOM("spi_clk", SPI_CLK, NULL, 0),
+ CLK_PCOM("tsif_clk", TSIF_CLK, NULL, 0),
+ CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
+ CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
+ CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
+ CLK_PCOM("uart_clk", UART1_CLK, &msm_device_uart1.dev, OFF),
+ CLK_PCOM("uart_clk", UART2_CLK, &msm_device_uart2.dev, 0),
+ CLK_PCOM("uart_clk", UART3_CLK, &msm_device_uart3.dev, OFF),
+ CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs_pclk", USB_HS_PCLK, NULL, OFF),
+ CLK_PCOM("usb_otg_clk", USB_OTG_CLK, NULL, 0),
+ CLK_PCOM("vdc_clk", VDC_CLK, NULL, OFF | CLK_MIN),
+ CLK_PCOM("vfe_clk", VFE_CLK, NULL, OFF),
+ CLK_PCOM("vfe_mdc_clk", VFE_MDC_CLK, NULL, OFF),
+ CLK_PCOM("vfe_axi_clk", VFE_AXI_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs2_clk", USB_HS2_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs2_pclk", USB_HS2_PCLK, NULL, OFF),
+ CLK_PCOM("usb_hs3_clk", USB_HS3_CLK, NULL, OFF),
+ CLK_PCOM("usb_hs3_pclk", USB_HS3_PCLK, NULL, OFF),
+ CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0),
+};
+
+unsigned msm_num_clocks_8x50 = ARRAY_SIZE(msm_clocks_8x50);
diff --git a/arch/arm/mach-msm/devices.h b/arch/arm/mach-msm/devices.h
index 0744c4a27d6a..9dd8d9b698c8 100644
--- a/arch/arm/mach-msm/devices.h
+++ b/arch/arm/mach-msm/devices.h
@@ -1,6 +1,7 @@
/* linux/arch/arm/mach-msm/devices.h
*
* Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -16,6 +17,8 @@
#ifndef __ARCH_ARM_MACH_MSM_DEVICES_H
#define __ARCH_ARM_MACH_MSM_DEVICES_H
+#include "clock.h"
+
extern struct platform_device msm_device_uart1;
extern struct platform_device msm_device_uart2;
extern struct platform_device msm_device_uart3;
@@ -25,12 +28,31 @@ extern struct platform_device msm_device_sdc2;
extern struct platform_device msm_device_sdc3;
extern struct platform_device msm_device_sdc4;
-extern struct platform_device msm_device_hsusb;
+extern struct platform_device msm_device_hsusb_otg;
+extern struct platform_device msm_device_hsusb_peripheral;
+extern struct platform_device msm_device_hsusb_host;
extern struct platform_device msm_device_i2c;
extern struct platform_device msm_device_smd;
+extern struct platform_device msm_device_dmov;
extern struct platform_device msm_device_nand;
+extern struct clk msm_clocks_7x01a[];
+extern unsigned msm_num_clocks_7x01a;
+
+extern struct clk msm_clocks_7x25[];
+extern unsigned msm_num_clocks_7x25;
+
+extern struct clk msm_clocks_7x27[];
+extern unsigned msm_num_clocks_7x27;
+
+extern struct clk msm_clocks_7x30[];
+extern unsigned msm_num_clocks_7x30;
+
+extern struct clk msm_clocks_8x50[];
+extern unsigned msm_num_clocks_8x50;
+
+
#endif
diff --git a/arch/arm/mach-msm/devices_htc.c b/arch/arm/mach-msm/devices_htc.c
new file mode 100644
index 000000000000..fad7dfb1bcc4
--- /dev/null
+++ b/arch/arm/mach-msm/devices_htc.c
@@ -0,0 +1,450 @@
+/* linux/arch/arm/mach-msm/devices.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2007-2009 HTC Corporation.
+ * Author: Thomas Tsai <thomas_tsai@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <linux/dma-mapping.h>
+#include <mach/msm_iomap.h>
+#include <mach/dma.h>
+#include "gpio_chip.h"
+#include "devices.h"
+#include <mach/board.h>
+#include <mach/board_htc.h>
+#include <mach/msm_hsusb.h>
+#include <linux/usb/mass_storage_function.h>
+#include <linux/usb/android.h>
+
+#include <asm/mach/flash.h>
+#include <asm/setup.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
+#include <linux/android_pmem.h>
+#include <mach/msm_rpcrouter.h>
+#include <mach/msm_iomap.h>
+#include <asm/mach/mmc.h>
+
+static char *df_serialno = "000000000000";
+
+#if 0
+struct platform_device *devices[] __initdata = {
+ &msm_device_nand,
+ &msm_device_smd,
+ &msm_device_i2c,
+};
+
+void __init msm_add_devices(void)
+{
+ platform_add_devices(devices, ARRAY_SIZE(devices));
+}
+#endif
+
+#define HSUSB_API_INIT_PHY_PROC 2
+#define HSUSB_API_PROG 0x30000064
+#define HSUSB_API_VERS 0x10001
+static void internal_phy_reset(void)
+{
+ struct msm_rpc_endpoint *usb_ep;
+ int rc;
+ struct hsusb_phy_start_req {
+ struct rpc_request_hdr hdr;
+ } req;
+
+ printk(KERN_INFO "msm_hsusb_phy_reset\n");
+
+ usb_ep = msm_rpc_connect(HSUSB_API_PROG, HSUSB_API_VERS, 0);
+ if (IS_ERR(usb_ep)) {
+ printk(KERN_ERR "%s: init rpc failed! error: %ld\n",
+ __func__, PTR_ERR(usb_ep));
+ goto close;
+ }
+ rc = msm_rpc_call(usb_ep, HSUSB_API_INIT_PHY_PROC,
+ &req, sizeof(req), 5 * HZ);
+ if (rc < 0)
+ printk(KERN_ERR "%s: rpc call failed! (%d)\n", __func__, rc);
+
+close:
+ msm_rpc_close(usb_ep);
+}
+
+/* adjust eye diagram, disable vbusvalid interrupts */
+static int hsusb_phy_init_seq[] = { 0x40, 0x31, 0x1D, 0x0D, 0x1D, 0x10, -1 };
+
+#ifdef CONFIG_USB_FUNCTION
+static char *usb_functions[] = {
+#if defined(CONFIG_USB_FUNCTION_MASS_STORAGE) || defined(CONFIG_USB_FUNCTION_UMS)
+ "usb_mass_storage",
+#endif
+#ifdef CONFIG_USB_FUNCTION_ADB
+ "adb",
+#endif
+};
+
+static struct msm_hsusb_product usb_products[] = {
+ {
+ .product_id = 0x0c01,
+ .functions = 0x00000041, /* usb_mass_storage */
+ },
+ {
+ .product_id = 0x0c02,
+ .functions = 0x00000043, /* usb_mass_storage + adb */
+ },
+};
+#endif
+
+struct msm_hsusb_platform_data msm_hsusb_pdata = {
+ .phy_reset = internal_phy_reset,
+ .phy_init_seq = hsusb_phy_init_seq,
+#ifdef CONFIG_USB_FUNCTION
+ .vendor_id = 0x0bb4,
+ .product_id = 0x0c02,
+ .version = 0x0100,
+ .product_name = "Android Phone",
+ .manufacturer_name = "HTC",
+
+ .functions = usb_functions,
+ .num_functions = ARRAY_SIZE(usb_functions),
+ .products = usb_products,
+ .num_products = ARRAY_SIZE(usb_products),
+#endif
+};
+
+#ifdef CONFIG_USB_FUNCTION
+static struct usb_mass_storage_platform_data mass_storage_pdata = {
+ .nluns = 1,
+ .buf_size = 16384,
+ .vendor = "HTC ",
+ .product = "Android Phone ",
+ .release = 0x0100,
+};
+
+static struct platform_device usb_mass_storage_device = {
+ .name = "usb_mass_storage",
+ .id = -1,
+ .dev = {
+ .platform_data = &mass_storage_pdata,
+ },
+};
+#endif
+
+#ifdef CONFIG_USB_ANDROID
+static struct android_usb_platform_data android_usb_pdata = {
+ .vendor_id = 0x0bb4,
+ .product_id = 0x0c01,
+ .adb_product_id = 0x0c02,
+ .version = 0x0100,
+ .product_name = "Android Phone",
+ .manufacturer_name = "HTC",
+ .nluns = 1,
+};
+
+static struct platform_device android_usb_device = {
+ .name = "android_usb",
+ .id = -1,
+ .dev = {
+ .platform_data = &android_usb_pdata,
+ },
+};
+#endif
+
+void __init msm_add_usb_devices(void (*phy_reset) (void))
+{
+ /* setup */
+ if (phy_reset)
+ msm_hsusb_pdata.phy_reset = phy_reset;
+ msm_device_hsusb.dev.platform_data = &msm_hsusb_pdata;
+ platform_device_register(&msm_device_hsusb);
+#ifdef CONFIG_USB_FUNCTION_MASS_STORAGE
+ platform_device_register(&usb_mass_storage_device);
+#endif
+#ifdef CONFIG_USB_ANDROID
+ platform_device_register(&android_usb_device);
+#endif
+}
+
+static struct android_pmem_platform_data pmem_pdata = {
+ .name = "pmem",
+ .no_allocator = 1,
+ .cached = 1,
+};
+
+static struct android_pmem_platform_data pmem_adsp_pdata = {
+ .name = "pmem_adsp",
+ .no_allocator = 0,
+ .cached = 0,
+};
+
+static struct android_pmem_platform_data pmem_camera_pdata = {
+ .name = "pmem_camera",
+ .no_allocator = 0,
+ .cached = 0,
+};
+
+static struct android_pmem_platform_data pmem_gpu0_pdata = {
+ .name = "pmem_gpu0",
+ .no_allocator = 1,
+ .cached = 0,
+ .buffered = 1,
+};
+
+static struct android_pmem_platform_data pmem_gpu1_pdata = {
+ .name = "pmem_gpu1",
+ .no_allocator = 1,
+ .cached = 0,
+ .buffered = 1,
+};
+
+static struct platform_device pmem_device = {
+ .name = "android_pmem",
+ .id = 0,
+ .dev = { .platform_data = &pmem_pdata },
+};
+
+static struct platform_device pmem_adsp_device = {
+ .name = "android_pmem",
+ .id = 1,
+ .dev = { .platform_data = &pmem_adsp_pdata },
+};
+
+static struct platform_device pmem_gpu0_device = {
+ .name = "android_pmem",
+ .id = 2,
+ .dev = { .platform_data = &pmem_gpu0_pdata },
+};
+
+static struct platform_device pmem_gpu1_device = {
+ .name = "android_pmem",
+ .id = 3,
+ .dev = { .platform_data = &pmem_gpu1_pdata },
+};
+
+static struct platform_device pmem_camera_device = {
+ .name = "android_pmem",
+ .id = 4,
+ .dev = { .platform_data = &pmem_camera_pdata },
+};
+
+static struct resource ram_console_resource[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct platform_device ram_console_device = {
+ .name = "ram_console",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ram_console_resource),
+ .resource = ram_console_resource,
+};
+
+void __init msm_add_mem_devices(struct msm_pmem_setting *setting)
+{
+ if (setting->pmem_size) {
+ pmem_pdata.start = setting->pmem_start;
+ pmem_pdata.size = setting->pmem_size;
+ platform_device_register(&pmem_device);
+ }
+
+ if (setting->pmem_adsp_size) {
+ pmem_adsp_pdata.start = setting->pmem_adsp_start;
+ pmem_adsp_pdata.size = setting->pmem_adsp_size;
+ platform_device_register(&pmem_adsp_device);
+ }
+
+ if (setting->pmem_gpu0_size) {
+ pmem_gpu0_pdata.start = setting->pmem_gpu0_start;
+ pmem_gpu0_pdata.size = setting->pmem_gpu0_size;
+ platform_device_register(&pmem_gpu0_device);
+ }
+
+ if (setting->pmem_gpu1_size) {
+ pmem_gpu1_pdata.start = setting->pmem_gpu1_start;
+ pmem_gpu1_pdata.size = setting->pmem_gpu1_size;
+ platform_device_register(&pmem_gpu1_device);
+ }
+
+ if (setting->pmem_camera_size) {
+ pmem_camera_pdata.start = setting->pmem_camera_start;
+ pmem_camera_pdata.size = setting->pmem_camera_size;
+ platform_device_register(&pmem_camera_device);
+ }
+
+ if (setting->ram_console_size) {
+ ram_console_resource[0].start = setting->ram_console_start;
+ ram_console_resource[0].end = setting->ram_console_start
+ + setting->ram_console_size - 1;
+ platform_device_register(&ram_console_device);
+ }
+}
+
+#define PM_LIBPROG 0x30000061
+#if (CONFIG_MSM_AMSS_VERSION == 6220) || (CONFIG_MSM_AMSS_VERSION == 6225)
+#define PM_LIBVERS 0xfb837d0b
+#else
+#define PM_LIBVERS 0x10001
+#endif
+
+#if 0
+static struct platform_device *msm_serial_devices[] __initdata = {
+ &msm_device_uart1,
+ &msm_device_uart2,
+ &msm_device_uart3,
+ #ifdef CONFIG_SERIAL_MSM_HS
+ &msm_device_uart_dm1,
+ &msm_device_uart_dm2,
+ #endif
+};
+
+int __init msm_add_serial_devices(unsigned num)
+{
+ if (num > MSM_SERIAL_NUM)
+ return -EINVAL;
+
+ return platform_device_register(msm_serial_devices[num]);
+}
+#endif
+
+#define ATAG_SMI 0x4d534D71
+/* setup calls mach->fixup, then parse_tags, parse_cmdline
+ * We need to setup meminfo in mach->fixup, so this function
+ * will need to traverse each tag to find smi tag.
+ */
+int __init parse_tag_smi(const struct tag *tags)
+{
+ int smi_sz = 0, find = 0;
+ struct tag *t = (struct tag *)tags;
+
+ for (; t->hdr.size; t = tag_next(t)) {
+ if (t->hdr.tag == ATAG_SMI) {
+ printk(KERN_DEBUG "find the smi tag\n");
+ find = 1;
+ break;
+ }
+ }
+ if (!find)
+ return -1;
+
+ printk(KERN_DEBUG "parse_tag_smi: smi size = %d\n", t->u.mem.size);
+ smi_sz = t->u.mem.size;
+ return smi_sz;
+}
+__tagtable(ATAG_SMI, parse_tag_smi);
+
+
+#define ATAG_HWID 0x4d534D72
+int __init parse_tag_hwid(const struct tag *tags)
+{
+ int hwid = 0, find = 0;
+ struct tag *t = (struct tag *)tags;
+
+ for (; t->hdr.size; t = tag_next(t)) {
+ if (t->hdr.tag == ATAG_HWID) {
+ printk(KERN_DEBUG "find the hwid tag\n");
+ find = 1;
+ break;
+ }
+ }
+
+ if (find)
+ hwid = t->u.revision.rev;
+ printk(KERN_DEBUG "parse_tag_hwid: hwid = 0x%x\n", hwid);
+ return hwid;
+}
+__tagtable(ATAG_HWID, parse_tag_hwid);
+
+#define ATAG_SKUID 0x4d534D73
+int __init parse_tag_skuid(const struct tag *tags)
+{
+ int skuid = 0, find = 0;
+ struct tag *t = (struct tag *)tags;
+
+ for (; t->hdr.size; t = tag_next(t)) {
+ if (t->hdr.tag == ATAG_SKUID) {
+ printk(KERN_DEBUG "find the skuid tag\n");
+ find = 1;
+ break;
+ }
+ }
+
+ if (find)
+ skuid = t->u.revision.rev;
+ printk(KERN_DEBUG "parse_tag_skuid: hwid = 0x%x\n", skuid);
+ return skuid;
+}
+__tagtable(ATAG_SKUID, parse_tag_skuid);
+
+#define ATAG_ENGINEERID 0x4d534D75
+int __init parse_tag_engineerid(const struct tag *tags)
+{
+ int engineerid = 0, find = 0;
+ struct tag *t = (struct tag *)tags;
+
+ for (; t->hdr.size; t = tag_next(t)) {
+ if (t->hdr.tag == ATAG_ENGINEERID) {
+ printk(KERN_DEBUG "find the engineer tag\n");
+ find = 1;
+ break;
+ }
+ }
+
+ if (find)
+ engineerid = t->u.revision.rev;
+ printk(KERN_DEBUG "parse_tag_engineerid: hwid = 0x%x\n", engineerid);
+ return engineerid;
+}
+__tagtable(ATAG_ENGINEERID, parse_tag_engineerid);
+
+static int mfg_mode;
+int __init board_mfg_mode_init(char *s)
+{
+ if (!strcmp(s, "normal"))
+ mfg_mode = 0;
+ else if (!strcmp(s, "factory2"))
+ mfg_mode = 1;
+ else if (!strcmp(s, "recovery"))
+ mfg_mode = 2;
+ else if (!strcmp(s, "charge"))
+ mfg_mode = 3;
+
+ return 1;
+}
+__setup("androidboot.mode=", board_mfg_mode_init);
+
+
+int board_mfg_mode(void)
+{
+ return mfg_mode;
+}
+
+static int __init board_serialno_setup(char *serialno)
+{
+ char *str;
+
+ if (board_mfg_mode() || !strlen(serialno))
+ str = df_serialno;
+ else
+ str = serialno;
+#ifdef CONFIG_USB_FUNCTION
+ msm_hsusb_pdata.serial_number = str;
+#endif
+#ifdef CONFIG_USB_ANDROID
+ android_usb_pdata.serial_number = str;
+#endif
+ return 1;
+}
+
+__setup("androidboot.serialno=", board_serialno_setup);
diff --git a/arch/arm/mach-msm/dma.c b/arch/arm/mach-msm/dma.c
index f5420f9585c5..bbfab84938c6 100644
--- a/arch/arm/mach-msm/dma.c
+++ b/arch/arm/mach-msm/dma.c
@@ -1,6 +1,7 @@
/* linux/arch/arm/mach-msm/dma.c
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -13,10 +14,15 @@
*
*/
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <mach/dma.h>
+#define MODULE_NAME "msm_dmov"
#define MSM_DMOV_CHANNEL_COUNT 16
enum {
@@ -25,11 +31,19 @@ enum {
MSM_DMOV_PRINT_FLOW = 4
};
+enum {
+ CLK_DIS,
+ CLK_TO_BE_DIS,
+ CLK_EN
+};
+
static DEFINE_SPINLOCK(msm_dmov_lock);
+static struct clk *msm_dmov_clk;
static unsigned int channel_active;
static struct list_head ready_commands[MSM_DMOV_CHANNEL_COUNT];
static struct list_head active_commands[MSM_DMOV_CHANNEL_COUNT];
unsigned int msm_dmov_print_mask = MSM_DMOV_PRINT_ERRORS;
+unsigned int clk_ctl = CLK_DIS;
#define MSM_DMOV_DPRINTF(mask, format, args...) \
do { \
@@ -48,12 +62,32 @@ void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful)
writel((graceful << 31), DMOV_FLUSH0(id));
}
+static void timer_func(unsigned long func_paramter)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&msm_dmov_lock, irq_flags);
+ if (clk_ctl == CLK_TO_BE_DIS) {
+ BUG_ON(channel_active);
+ clk_disable(msm_dmov_clk);
+ clk_ctl = CLK_DIS;
+ }
+ spin_unlock_irqrestore(&msm_dmov_lock, irq_flags);
+}
+DEFINE_TIMER(timer, timer_func, 0, 0);
+
void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
{
unsigned long irq_flags;
unsigned int status;
spin_lock_irqsave(&msm_dmov_lock, irq_flags);
+ if (clk_ctl == CLK_DIS)
+ clk_enable(msm_dmov_clk);
+ else if (clk_ctl == CLK_TO_BE_DIS)
+ del_timer(&timer);
+ clk_ctl = CLK_EN;
+
status = readl(DMOV_STATUS(id));
if (list_empty(&ready_commands[id]) &&
(status & DMOV_STATUS_CMD_PTR_RDY)) {
@@ -70,6 +104,10 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd)
channel_active |= 1U << id;
writel(cmd->cmdptr, DMOV_CMD_PTR(id));
} else {
+ if (!channel_active) {
+ clk_ctl = CLK_TO_BE_DIS;
+ mod_timer(&timer, jiffies + HZ);
+ }
if (list_empty(&active_commands[id]))
PRINT_ERROR("msm_dmov_enqueue_cmd(%d), error datamover stalled, status %x\n", id, status);
@@ -123,6 +161,7 @@ int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
PRINT_FLOW("dmov_exec_cmdptr(%d, %x) done\n", id, cmdptr);
return 0;
}
+EXPORT_SYMBOL(msm_dmov_exec_cmd);
static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
@@ -219,28 +258,64 @@ static irqreturn_t msm_datamover_irq_handler(int irq, void *dev_id)
PRINT_FLOW("msm_datamover_irq_handler id %d, status %x\n", id, ch_status);
}
- if (!channel_active)
- disable_irq(INT_ADM_AARM);
+ if (!channel_active) {
+ disable_irq_nosync(INT_ADM_AARM);
+ clk_ctl = CLK_TO_BE_DIS;
+ mod_timer(&timer, jiffies + HZ);
+ }
spin_unlock_irqrestore(&msm_dmov_lock, irq_flags);
return IRQ_HANDLED;
}
+static int msm_dmov_suspend_noirq(struct device *dev)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&msm_dmov_lock, irq_flags);
+ if (clk_ctl == CLK_TO_BE_DIS) {
+ BUG_ON(channel_active);
+ del_timer(&timer);
+ clk_disable(msm_dmov_clk);
+ clk_ctl = CLK_DIS;
+ }
+ spin_unlock_irqrestore(&msm_dmov_lock, irq_flags);
+ return 0;
+}
+
+static struct dev_pm_ops dmov_pm = {
+ .suspend_noirq = msm_dmov_suspend_noirq,
+};
+
+static struct platform_driver msm_dmov_driver = {
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &dmov_pm,
+ },
+};
+
static int __init msm_init_datamover(void)
{
int i;
int ret;
+
for (i = 0; i < MSM_DMOV_CHANNEL_COUNT; i++) {
INIT_LIST_HEAD(&ready_commands[i]);
INIT_LIST_HEAD(&active_commands[i]);
writel(DMOV_CONFIG_IRQ_EN | DMOV_CONFIG_FORCE_TOP_PTR_RSLT | DMOV_CONFIG_FORCE_FLUSH_RSLT, DMOV_CONFIG(i));
}
+ msm_dmov_clk = clk_get(NULL, "adm_clk");
+ if (IS_ERR(msm_dmov_clk))
+ return PTR_ERR(msm_dmov_clk);
ret = request_irq(INT_ADM_AARM, msm_datamover_irq_handler, 0, "msmdatamover", NULL);
if (ret)
return ret;
disable_irq(INT_ADM_AARM);
+ ret = platform_driver_register(&msm_dmov_driver);
+ if (ret)
+ return ret;
return 0;
}
arch_initcall(msm_init_datamover);
-
diff --git a/arch/arm/mach-msm/dma_test.c b/arch/arm/mach-msm/dma_test.c
new file mode 100644
index 000000000000..c4967f97f832
--- /dev/null
+++ b/arch/arm/mach-msm/dma_test.c
@@ -0,0 +1,404 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+
+#include <mach/dma.h>
+#include <mach/dma_test.h>
+
+
+/**********************************************************************
+ * User-space testing of the DMA driver.
+ * Intended to be loaded as a module. We have a bunch of static
+ * buffers that the user-side can refer to. The main DMA is simply
+ * used memory-to-memory. Device DMA is best tested with the specific
+ * device driver in question.
+ */
+#define MAX_TEST_BUFFERS 40
+#define MAX_TEST_BUFFER_SIZE 65536
+static void *(buffers[MAX_TEST_BUFFERS]);
+static int sizes[MAX_TEST_BUFFERS];
+
+/* Anything that allocates or deallocates buffers must lock with this
+ * mutex. */
+static DECLARE_MUTEX(buffer_lock);
+
+/* Each buffer has a semaphore associated with it that will be held
+ * for the duration of any operations on that buffer. It also must be
+ * available to free the given buffer. */
+static struct semaphore buffer_sems[MAX_TEST_BUFFERS];
+
+#define buffer_up(num) up(&buffer_sems[num])
+#define buffer_down(num) down(&buffer_sems[num])
+
+/* Use the General Purpose DMA channel as our test channel. This channel
+ * should be available on any target. */
+#define TEST_CHANNEL DMOV_GP_CHAN
+
+struct private {
+ /* Each open instance is allowed a single pending
+ * operation. */
+ struct semaphore sem;
+
+ /* Simple command buffer. Allocated and freed by driver. */
+ /* TODO: Allocate these together. */
+ dmov_s *command_ptr;
+
+ /* Indirect. */
+ u32 *command_ptr_ptr;
+
+ /* Indicates completion with pending request. */
+ struct completion complete;
+};
+
+static void free_buffers(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_TEST_BUFFERS; i++) {
+ if (sizes[i] > 0) {
+ kfree(buffers[i]);
+ sizes[i] = 0;
+ }
+ }
+}
+
+/* Copy between two buffers, using the DMA. */
+
+/* Allocate a buffer of a requested size. */
+static int buffer_req(struct msm_dma_alloc_req *req)
+{
+ int i;
+
+ if (req->size <= 0 || req->size > MAX_TEST_BUFFER_SIZE)
+ return -EINVAL;
+
+ down(&buffer_lock);
+
+ /* Find a free buffer. */
+ for (i = 0; i < MAX_TEST_BUFFERS; i++)
+ if (sizes[i] == 0)
+ break;
+
+ if (i >= MAX_TEST_BUFFERS)
+ goto error;
+
+ buffers[i] = kmalloc(req->size, GFP_KERNEL | __GFP_DMA);
+ if (buffers[i] == 0)
+ goto error;
+ sizes[i] = req->size;
+
+ req->bufnum = i;
+
+ up(&buffer_lock);
+ return 0;
+
+error:
+ up(&buffer_lock);
+ return -ENOSPC;
+}
+
+static int dma_scopy(struct msm_dma_scopy *scopy, struct private *priv)
+{
+ int err = 0;
+ dma_addr_t mapped_cmd;
+ dma_addr_t mapped_cmd_ptr;
+
+ buffer_down(scopy->srcbuf);
+ if (scopy->srcbuf != scopy->destbuf)
+ buffer_down(scopy->destbuf);
+
+ priv->command_ptr->cmd = CMD_PTR_LP | CMD_MODE_SINGLE;
+ priv->command_ptr->src = dma_map_single(NULL, buffers[scopy->srcbuf],
+ scopy->size, DMA_TO_DEVICE);
+ priv->command_ptr->dst = dma_map_single(NULL, buffers[scopy->destbuf],
+ scopy->size, DMA_FROM_DEVICE);
+ priv->command_ptr->len = scopy->size;
+
+ mapped_cmd =
+ dma_map_single(NULL, priv->command_ptr, sizeof(*priv->command_ptr),
+ DMA_TO_DEVICE);
+ *(priv->command_ptr_ptr) = CMD_PTR_ADDR(mapped_cmd) | CMD_PTR_LP;
+
+ mapped_cmd_ptr = dma_map_single(NULL, priv->command_ptr_ptr,
+ sizeof(*priv->command_ptr_ptr),
+ DMA_TO_DEVICE);
+
+ msm_dmov_exec_cmd(TEST_CHANNEL,
+ DMOV_CMD_PTR_LIST | DMOV_CMD_ADDR(mapped_cmd_ptr));
+
+ dma_unmap_single(NULL, (dma_addr_t) mapped_cmd_ptr,
+ sizeof(*priv->command_ptr_ptr), DMA_TO_DEVICE);
+ dma_unmap_single(NULL, (dma_addr_t) mapped_cmd,
+ sizeof(*priv->command_ptr), DMA_TO_DEVICE);
+ dma_unmap_single(NULL, (dma_addr_t) priv->command_ptr->dst,
+ scopy->size, DMA_FROM_DEVICE);
+ dma_unmap_single(NULL, (dma_addr_t) priv->command_ptr->src,
+ scopy->size, DMA_TO_DEVICE);
+
+ if (scopy->srcbuf != scopy->destbuf)
+ buffer_up(scopy->destbuf);
+ buffer_up(scopy->srcbuf);
+
+ return err;
+}
+
+static int dma_test_open(struct inode *inode, struct file *file)
+{
+ struct private *priv;
+
+ printk(KERN_ALERT "%s\n", __func__);
+
+ priv = kmalloc(sizeof(struct private), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+ file->private_data = priv;
+
+ init_MUTEX(&priv->sem);
+
+ /* Note, that these should be allocated together so we don't
+ * waste 32 bytes for each. */
+
+ /* Allocate the command pointer. */
+ priv->command_ptr = kmalloc(sizeof(&priv->command_ptr),
+ GFP_KERNEL | __GFP_DMA);
+ if (priv->command_ptr == NULL) {
+ kfree(priv);
+ return -ENOSPC;
+ }
+
+ /* And the indirect pointer. */
+ priv->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
+ if (priv->command_ptr_ptr == NULL) {
+ kfree(priv->command_ptr);
+ kfree(priv);
+ return -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int dma_test_release(struct inode *inode, struct file *file)
+{
+ struct private *priv;
+
+ printk(KERN_ALERT "%s\n", __func__);
+
+ if (file->private_data != NULL) {
+ priv = file->private_data;
+ kfree(priv->command_ptr_ptr);
+ kfree(priv->command_ptr);
+ }
+ kfree(file->private_data);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static int dma_test_ioctl(struct inode *inode, struct file *file,
+ unsigned cmd, unsigned long arg)
+{
+ int err = 0;
+ int tmp;
+ struct msm_dma_alloc_req alloc_req;
+ struct msm_dma_bufxfer xfer;
+ struct msm_dma_scopy scopy;
+ struct private *priv = file->private_data;
+
+ /* Verify user arguments. */
+ if (_IOC_TYPE(cmd) != MSM_DMA_IOC_MAGIC)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case MSM_DMA_IOALLOC:
+ if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+ sizeof(alloc_req)))
+ return -EFAULT;
+ if (__copy_from_user(&alloc_req, (void __user *)arg,
+ sizeof(alloc_req)))
+ return -EFAULT;
+ err = buffer_req(&alloc_req);
+ if (err < 0)
+ return err;
+ if (__copy_to_user((void __user *)arg, &alloc_req,
+ sizeof(alloc_req)))
+ return -EFAULT;
+ break;
+
+ case MSM_DMA_IOFREEALL:
+ down(&buffer_lock);
+ for (tmp = 0; tmp < MAX_TEST_BUFFERS; tmp++) {
+ buffer_down(tmp);
+ if (sizes[tmp] > 0) {
+ kfree(buffers[tmp]);
+ sizes[tmp] = 0;
+ }
+ buffer_up(tmp);
+ }
+ up(&buffer_lock);
+ break;
+
+ case MSM_DMA_IOWBUF:
+ if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer)))
+ return -EFAULT;
+ if (xfer.bufnum < 0 || xfer.bufnum >= MAX_TEST_BUFFERS)
+ return -EINVAL;
+ buffer_down(xfer.bufnum);
+ if (sizes[xfer.bufnum] == 0 ||
+ xfer.size <= 0 || xfer.size > sizes[xfer.bufnum]) {
+ buffer_up(xfer.bufnum);
+ return -EINVAL;
+ }
+ if (copy_from_user(buffers[xfer.bufnum],
+ (void __user *)xfer.data, xfer.size))
+ err = -EFAULT;
+ buffer_up(xfer.bufnum);
+ break;
+
+ case MSM_DMA_IORBUF:
+ if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer)))
+ return -EFAULT;
+ if (xfer.bufnum < 0 || xfer.bufnum >= MAX_TEST_BUFFERS)
+ return -EINVAL;
+ buffer_down(xfer.bufnum);
+ if (sizes[xfer.bufnum] == 0 ||
+ xfer.size <= 0 || xfer.size > sizes[xfer.bufnum]) {
+ buffer_up(xfer.bufnum);
+ return -EINVAL;
+ }
+ if (copy_to_user((void __user *)xfer.data, buffers[xfer.bufnum],
+ xfer.size))
+ err = -EFAULT;
+ buffer_up(xfer.bufnum);
+ break;
+
+ case MSM_DMA_IOSCOPY:
+ if (copy_from_user(&scopy, (void __user *)arg, sizeof(scopy)))
+ return -EFAULT;
+ if (scopy.srcbuf < 0 || scopy.srcbuf >= MAX_TEST_BUFFERS ||
+ sizes[scopy.srcbuf] == 0 ||
+ scopy.destbuf < 0 || scopy.destbuf >= MAX_TEST_BUFFERS ||
+ sizes[scopy.destbuf] == 0 ||
+ scopy.size > sizes[scopy.destbuf] ||
+ scopy.size > sizes[scopy.srcbuf])
+ return -EINVAL;
+#if 0
+ /* Test interface using memcpy. */
+ memcpy(buffers[scopy.destbuf],
+ buffers[scopy.srcbuf], scopy.size);
+#else
+ err = dma_scopy(&scopy, priv);
+#endif
+ break;
+
+ default:
+ return -ENOTTY;
+ }
+
+ return err;
+}
+
+/**********************************************************************
+ * Register ourselves as a misc device to be able to test the DMA code
+ * from userspace. */
+
+static const struct file_operations dma_test_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = dma_test_ioctl,
+ .open = dma_test_open,
+ .release = dma_test_release,
+};
+
+static struct miscdevice dma_test_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "msmdma",
+ .fops = &dma_test_fops,
+};
+static int dma_test_init(void)
+{
+ int ret, i;
+
+ ret = misc_register(&dma_test_dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < MAX_TEST_BUFFERS; i++)
+ init_MUTEX(&buffer_sems[i]);
+
+ printk(KERN_ALERT "%s\n", __func__);
+ return 0;
+}
+
+static void dma_test_exit(void)
+{
+ free_buffers();
+ misc_deregister(&dma_test_dev);
+ printk(KERN_ALERT "%s\n", __func__);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("David Brown, Qualcomm, Incorporated");
+MODULE_DESCRIPTION("Test for MSM DMA driver");
+MODULE_VERSION("1.01");
+
+module_init(dma_test_init);
+module_exit(dma_test_exit);
diff --git a/arch/arm/mach-msm/fiq_glue.S b/arch/arm/mach-msm/fiq_glue.S
new file mode 100644
index 000000000000..9ded61cc0de8
--- /dev/null
+++ b/arch/arm/mach-msm/fiq_glue.S
@@ -0,0 +1,64 @@
+/* arch/arm/mach-msm/fiq_glue.S
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ .text
+
+ .global fiq_glue_end
+
+ENTRY(fiq_glue)
+ adr r12, fiq_glue_ctxt_addr
+ ldr r8, [r12]
+ ldmia r8, {r9, r10, sp}
+
+ /* store pc, cpsr from previous mode */
+ sub r11, lr, #4
+ mrs r12, spsr
+ stmfd sp!, {r11-r12,lr}
+
+ /* store r8-r14 from previous mode */
+ sub sp, sp, #(7 * 4)
+ stmia sp, {r8-r14}^
+ nop
+
+ /* store r0-r7 from previous mode */
+ stmfd sp!, {r0-r7}
+
+ /* call func(data,regs) */
+ mov r0, r10
+ mov r1, sp
+ blx r9
+
+ /* restore/discard saved state */
+ ldmfd sp!, {r0-r7}
+ add sp, sp, #(9 * 4)
+ ldmfd sp!, {lr}
+
+ subs pc, lr, #4
+
+fiq_glue_ctxt_addr:
+ .long fiq_glue_ctxt
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp */
+ ldr r3, =fiq_glue_ctxt
+ stmia r3, {r0-r2}
+ bx lr
+
+ .data
+fiq_glue_ctxt:
+ .long 0, 0, 0
diff --git a/arch/arm/mach-msm/generic_gpio.c b/arch/arm/mach-msm/generic_gpio.c
new file mode 100644
index 000000000000..fe24d38345d0
--- /dev/null
+++ b/arch/arm/mach-msm/generic_gpio.c
@@ -0,0 +1,274 @@
+/* arch/arm/mach-msm/generic_gpio.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <asm/gpio.h>
+#include "gpio_chip.h"
+
+#define GPIO_NUM_TO_CHIP_INDEX(gpio) ((gpio)>>5)
+
+struct gpio_state {
+ unsigned long flags;
+ int refcount;
+};
+
+static DEFINE_SPINLOCK(gpio_chips_lock);
+static LIST_HEAD(gpio_chip_list);
+static struct gpio_chip **gpio_chip_array;
+static unsigned long gpio_chip_array_size;
+
+int register_gpio_chip(struct gpio_chip *new_gpio_chip)
+{
+ int err = 0;
+ struct gpio_chip *gpio_chip;
+ int i;
+ unsigned long irq_flags;
+ unsigned int chip_array_start_index, chip_array_end_index;
+
+ new_gpio_chip->state = kzalloc((new_gpio_chip->end + 1 - new_gpio_chip->start) * sizeof(new_gpio_chip->state[0]), GFP_KERNEL);
+ if (new_gpio_chip->state == NULL) {
+ printk(KERN_ERR "register_gpio_chip: failed to allocate state\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&gpio_chips_lock, irq_flags);
+ chip_array_start_index = GPIO_NUM_TO_CHIP_INDEX(new_gpio_chip->start);
+ chip_array_end_index = GPIO_NUM_TO_CHIP_INDEX(new_gpio_chip->end);
+ if (chip_array_end_index >= gpio_chip_array_size) {
+ struct gpio_chip **new_gpio_chip_array;
+ unsigned long new_gpio_chip_array_size = chip_array_end_index + 1;
+
+ new_gpio_chip_array = kmalloc(new_gpio_chip_array_size * sizeof(new_gpio_chip_array[0]), GFP_ATOMIC);
+ if (new_gpio_chip_array == NULL) {
+ printk(KERN_ERR "register_gpio_chip: failed to allocate array\n");
+ err = -ENOMEM;
+ goto failed;
+ }
+ for (i = 0; i < gpio_chip_array_size; i++)
+ new_gpio_chip_array[i] = gpio_chip_array[i];
+ for (i = gpio_chip_array_size; i < new_gpio_chip_array_size; i++)
+ new_gpio_chip_array[i] = NULL;
+ gpio_chip_array = new_gpio_chip_array;
+ gpio_chip_array_size = new_gpio_chip_array_size;
+ }
+ list_for_each_entry(gpio_chip, &gpio_chip_list, list) {
+ if (gpio_chip->start > new_gpio_chip->end) {
+ list_add_tail(&new_gpio_chip->list, &gpio_chip->list);
+ goto added;
+ }
+ if (gpio_chip->end >= new_gpio_chip->start) {
+ printk(KERN_ERR "register_gpio_source %u-%u overlaps with %u-%u\n",
+ new_gpio_chip->start, new_gpio_chip->end,
+ gpio_chip->start, gpio_chip->end);
+ err = -EBUSY;
+ goto failed;
+ }
+ }
+ list_add_tail(&new_gpio_chip->list, &gpio_chip_list);
+added:
+ for (i = chip_array_start_index; i <= chip_array_end_index; i++) {
+ if (gpio_chip_array[i] == NULL || gpio_chip_array[i]->start > new_gpio_chip->start)
+ gpio_chip_array[i] = new_gpio_chip;
+ }
+failed:
+ spin_unlock_irqrestore(&gpio_chips_lock, irq_flags);
+ if (err)
+ kfree(new_gpio_chip->state);
+ return err;
+}
+
+static struct gpio_chip *get_gpio_chip_locked(unsigned int gpio)
+{
+ unsigned long i;
+ struct gpio_chip *chip;
+
+ i = GPIO_NUM_TO_CHIP_INDEX(gpio);
+ if (i >= gpio_chip_array_size)
+ return NULL;
+ chip = gpio_chip_array[i];
+ if (chip == NULL)
+ return NULL;
+ list_for_each_entry_from(chip, &gpio_chip_list, list) {
+ if (gpio < chip->start)
+ return NULL;
+ if (gpio <= chip->end)
+ return chip;
+ }
+ return NULL;
+}
+
+static int request_gpio(unsigned int gpio, unsigned long flags)
+{
+ int err = 0;
+ struct gpio_chip *chip;
+ unsigned long irq_flags;
+ unsigned long chip_index;
+
+ spin_lock_irqsave(&gpio_chips_lock, irq_flags);
+ chip = get_gpio_chip_locked(gpio);
+ if (chip == NULL) {
+ err = -EINVAL;
+ goto err;
+ }
+ chip_index = gpio - chip->start;
+ if (chip->state[chip_index].refcount == 0) {
+ chip->configure(chip, gpio, flags);
+ chip->state[chip_index].flags = flags;
+ chip->state[chip_index].refcount++;
+ } else if ((flags & IRQF_SHARED) && (chip->state[chip_index].flags & IRQF_SHARED))
+ chip->state[chip_index].refcount++;
+ else
+ err = -EBUSY;
+err:
+ spin_unlock_irqrestore(&gpio_chips_lock, irq_flags);
+ return err;
+}
+
+int gpio_request(unsigned gpio, const char *label)
+{
+ return request_gpio(gpio, 0);
+}
+EXPORT_SYMBOL(gpio_request);
+
+void gpio_free(unsigned gpio)
+{
+ struct gpio_chip *chip;
+ unsigned long irq_flags;
+ unsigned long chip_index;
+
+ spin_lock_irqsave(&gpio_chips_lock, irq_flags);
+ chip = get_gpio_chip_locked(gpio);
+ if (chip) {
+ chip_index = gpio - chip->start;
+ chip->state[chip_index].refcount--;
+ }
+ spin_unlock_irqrestore(&gpio_chips_lock, irq_flags);
+}
+EXPORT_SYMBOL(gpio_free);
+
+static int gpio_get_irq_num(unsigned int gpio, unsigned int *irqp, unsigned long *irqnumflagsp)
+{
+ int ret = -ENOTSUPP;
+ struct gpio_chip *chip;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&gpio_chips_lock, irq_flags);
+ chip = get_gpio_chip_locked(gpio);
+ if (chip && chip->get_irq_num)
+ ret = chip->get_irq_num(chip, gpio, irqp, irqnumflagsp);
+ spin_unlock_irqrestore(&gpio_chips_lock, irq_flags);
+ return ret;
+}
+
+int gpio_to_irq(unsigned gpio)
+{
+ int ret, irq;
+ ret = gpio_get_irq_num(gpio, &irq, NULL);
+ if (ret)
+ return ret;
+ return irq;
+}
+EXPORT_SYMBOL(gpio_to_irq);
+
+int gpio_configure(unsigned int gpio, unsigned long flags)
+{
+ int ret = -ENOTSUPP;
+ struct gpio_chip *chip;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&gpio_chips_lock, irq_flags);
+ chip = get_gpio_chip_locked(gpio);
+ if (chip)
+ ret = chip->configure(chip, gpio, flags);
+ spin_unlock_irqrestore(&gpio_chips_lock, irq_flags);
+ return ret;
+}
+EXPORT_SYMBOL(gpio_configure);
+
+int gpio_direction_input(unsigned gpio)
+{
+ return gpio_configure(gpio, GPIOF_INPUT);
+}
+EXPORT_SYMBOL(gpio_direction_input);
+
+int gpio_direction_output(unsigned gpio, int value)
+{
+ gpio_set_value(gpio, value);
+ return gpio_configure(gpio, GPIOF_DRIVE_OUTPUT);
+}
+EXPORT_SYMBOL(gpio_direction_output);
+
+int gpio_get_value(unsigned gpio)
+{
+ int ret = -ENOTSUPP;
+ struct gpio_chip *chip;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&gpio_chips_lock, irq_flags);
+ chip = get_gpio_chip_locked(gpio);
+ if (chip && chip->read)
+ ret = chip->read(chip, gpio);
+ spin_unlock_irqrestore(&gpio_chips_lock, irq_flags);
+ return ret;
+}
+EXPORT_SYMBOL(gpio_get_value);
+
+void gpio_set_value(unsigned gpio, int on)
+{
+ int ret = -ENOTSUPP;
+ struct gpio_chip *chip;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&gpio_chips_lock, irq_flags);
+ chip = get_gpio_chip_locked(gpio);
+ if (chip && chip->write)
+ ret = chip->write(chip, gpio, on);
+ spin_unlock_irqrestore(&gpio_chips_lock, irq_flags);
+}
+EXPORT_SYMBOL(gpio_set_value);
+
+int gpio_read_detect_status(unsigned int gpio)
+{
+ int ret = -ENOTSUPP;
+ struct gpio_chip *chip;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&gpio_chips_lock, irq_flags);
+ chip = get_gpio_chip_locked(gpio);
+ if (chip && chip->read_detect_status)
+ ret = chip->read_detect_status(chip, gpio);
+ spin_unlock_irqrestore(&gpio_chips_lock, irq_flags);
+ return ret;
+}
+EXPORT_SYMBOL(gpio_read_detect_status);
+
+int gpio_clear_detect_status(unsigned int gpio)
+{
+ int ret = -ENOTSUPP;
+ struct gpio_chip *chip;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&gpio_chips_lock, irq_flags);
+ chip = get_gpio_chip_locked(gpio);
+ if (chip && chip->clear_detect_status)
+ ret = chip->clear_detect_status(chip, gpio);
+ spin_unlock_irqrestore(&gpio_chips_lock, irq_flags);
+ return ret;
+}
+EXPORT_SYMBOL(gpio_clear_detect_status);
diff --git a/arch/arm/mach-msm/gpio.c b/arch/arm/mach-msm/gpio.c
new file mode 100644
index 000000000000..598022906be6
--- /dev/null
+++ b/arch/arm/mach-msm/gpio.c
@@ -0,0 +1,746 @@
+/* linux/arch/arm/mach-msm/gpio.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/io.h>
+#include <asm/gpio.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include "gpio_chip.h"
+#include "gpio_hw.h"
+#include "proc_comm.h"
+
+#include "smd_private.h"
+
+enum {
+ GPIO_DEBUG_SLEEP = 1U << 0,
+};
+static int msm_gpio_debug_mask = 0;
+module_param_named(debug_mask, msm_gpio_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+/* private gpio_configure flags */
+#define MSM_GPIOF_ENABLE_INTERRUPT 0x10000000
+#define MSM_GPIOF_DISABLE_INTERRUPT 0x20000000
+#define MSM_GPIOF_ENABLE_WAKE 0x40000000
+#define MSM_GPIOF_DISABLE_WAKE 0x80000000
+
+static int msm_gpio_configure(struct gpio_chip *chip, unsigned int gpio, unsigned long flags);
+static int msm_gpio_get_irq_num(struct gpio_chip *chip, unsigned int gpio, unsigned int *irqp, unsigned long *irqnumflagsp);
+static int msm_gpio_read(struct gpio_chip *chip, unsigned n);
+static int msm_gpio_write(struct gpio_chip *chip, unsigned n, unsigned on);
+static int msm_gpio_read_detect_status(struct gpio_chip *chip, unsigned int gpio);
+static int msm_gpio_clear_detect_status(struct gpio_chip *chip, unsigned int gpio);
+
+struct msm_gpio_chip msm_gpio_chips[] = {
+ {
+ .regs = {
+ .out = GPIO_OUT_0,
+ .in = GPIO_IN_0,
+ .int_status = GPIO_INT_STATUS_0,
+ .int_clear = GPIO_INT_CLEAR_0,
+ .int_en = GPIO_INT_EN_0,
+ .int_edge = GPIO_INT_EDGE_0,
+ .int_pos = GPIO_INT_POS_0,
+ .oe = GPIO_OE_0,
+ },
+ .chip = {
+ .start = 0,
+ .end = 15,
+ .configure = msm_gpio_configure,
+ .get_irq_num = msm_gpio_get_irq_num,
+ .read = msm_gpio_read,
+ .write = msm_gpio_write,
+ .read_detect_status = msm_gpio_read_detect_status,
+ .clear_detect_status = msm_gpio_clear_detect_status
+ }
+ },
+ {
+ .regs = {
+ .out = GPIO_OUT_1,
+ .in = GPIO_IN_1,
+ .int_status = GPIO_INT_STATUS_1,
+ .int_clear = GPIO_INT_CLEAR_1,
+ .int_en = GPIO_INT_EN_1,
+ .int_edge = GPIO_INT_EDGE_1,
+ .int_pos = GPIO_INT_POS_1,
+ .oe = GPIO_OE_1,
+ },
+ .chip = {
+ .start = 16,
+#if defined(CONFIG_ARCH_MSM7X30)
+ .end = 43,
+#else
+ .end = 42,
+#endif
+ .configure = msm_gpio_configure,
+ .get_irq_num = msm_gpio_get_irq_num,
+ .read = msm_gpio_read,
+ .write = msm_gpio_write,
+ .read_detect_status = msm_gpio_read_detect_status,
+ .clear_detect_status = msm_gpio_clear_detect_status
+ }
+ },
+ {
+ .regs = {
+ .out = GPIO_OUT_2,
+ .in = GPIO_IN_2,
+ .int_status = GPIO_INT_STATUS_2,
+ .int_clear = GPIO_INT_CLEAR_2,
+ .int_en = GPIO_INT_EN_2,
+ .int_edge = GPIO_INT_EDGE_2,
+ .int_pos = GPIO_INT_POS_2,
+ .oe = GPIO_OE_2,
+ },
+ .chip = {
+#if defined(CONFIG_ARCH_MSM7X30)
+ .start = 44,
+#else
+ .start = 43,
+#endif
+ .end = 67,
+ .configure = msm_gpio_configure,
+ .get_irq_num = msm_gpio_get_irq_num,
+ .read = msm_gpio_read,
+ .write = msm_gpio_write,
+ .read_detect_status = msm_gpio_read_detect_status,
+ .clear_detect_status = msm_gpio_clear_detect_status
+ }
+ },
+ {
+ .regs = {
+ .out = GPIO_OUT_3,
+ .in = GPIO_IN_3,
+ .int_status = GPIO_INT_STATUS_3,
+ .int_clear = GPIO_INT_CLEAR_3,
+ .int_en = GPIO_INT_EN_3,
+ .int_edge = GPIO_INT_EDGE_3,
+ .int_pos = GPIO_INT_POS_3,
+ .oe = GPIO_OE_3,
+ },
+ .chip = {
+ .start = 68,
+ .end = 94,
+ .configure = msm_gpio_configure,
+ .get_irq_num = msm_gpio_get_irq_num,
+ .read = msm_gpio_read,
+ .write = msm_gpio_write,
+ .read_detect_status = msm_gpio_read_detect_status,
+ .clear_detect_status = msm_gpio_clear_detect_status
+ }
+ },
+ {
+ .regs = {
+ .out = GPIO_OUT_4,
+ .in = GPIO_IN_4,
+ .int_status = GPIO_INT_STATUS_4,
+ .int_clear = GPIO_INT_CLEAR_4,
+ .int_en = GPIO_INT_EN_4,
+ .int_edge = GPIO_INT_EDGE_4,
+ .int_pos = GPIO_INT_POS_4,
+ .oe = GPIO_OE_4,
+ },
+ .chip = {
+ .start = 95,
+#if defined(CONFIG_ARCH_QSD8X50)
+ .end = 103,
+#else
+ .end = 106,
+#endif
+ .configure = msm_gpio_configure,
+ .get_irq_num = msm_gpio_get_irq_num,
+ .read = msm_gpio_read,
+ .write = msm_gpio_write,
+ .read_detect_status = msm_gpio_read_detect_status,
+ .clear_detect_status = msm_gpio_clear_detect_status
+ }
+ },
+ {
+ .regs = {
+ .out = GPIO_OUT_5,
+ .in = GPIO_IN_5,
+ .int_status = GPIO_INT_STATUS_5,
+ .int_clear = GPIO_INT_CLEAR_5,
+ .int_en = GPIO_INT_EN_5,
+ .int_edge = GPIO_INT_EDGE_5,
+ .int_pos = GPIO_INT_POS_5,
+ .oe = GPIO_OE_5,
+ },
+ .chip = {
+#if defined(CONFIG_ARCH_QSD8X50)
+ .start = 104,
+ .end = 121,
+#elif defined(CONFIG_ARCH_MSM7X30)
+ .start = 107,
+ .end = 133,
+#else
+ .start = 107,
+ .end = 132,
+#endif
+ .configure = msm_gpio_configure,
+ .get_irq_num = msm_gpio_get_irq_num,
+ .read = msm_gpio_read,
+ .write = msm_gpio_write,
+ .read_detect_status = msm_gpio_read_detect_status,
+ .clear_detect_status = msm_gpio_clear_detect_status
+ }
+ },
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+ {
+ .regs = {
+ .out = GPIO_OUT_6,
+ .in = GPIO_IN_6,
+ .int_status = GPIO_INT_STATUS_6,
+ .int_clear = GPIO_INT_CLEAR_6,
+ .int_en = GPIO_INT_EN_6,
+ .int_edge = GPIO_INT_EDGE_6,
+ .int_pos = GPIO_INT_POS_6,
+ .oe = GPIO_OE_6,
+ },
+ .chip = {
+#if defined(CONFIG_ARCH_MSM7X30)
+ .start = 134,
+ .end = 150,
+#else
+ .start = 122,
+ .end = 152,
+#endif
+ .configure = msm_gpio_configure,
+ .get_irq_num = msm_gpio_get_irq_num,
+ .read = msm_gpio_read,
+ .write = msm_gpio_write,
+ .read_detect_status = msm_gpio_read_detect_status,
+ .clear_detect_status = msm_gpio_clear_detect_status
+ }
+ },
+ {
+ .regs = {
+ .out = GPIO_OUT_7,
+ .in = GPIO_IN_7,
+ .int_status = GPIO_INT_STATUS_7,
+ .int_clear = GPIO_INT_CLEAR_7,
+ .int_en = GPIO_INT_EN_7,
+ .int_edge = GPIO_INT_EDGE_7,
+ .int_pos = GPIO_INT_POS_7,
+ .oe = GPIO_OE_7,
+ },
+ .chip = {
+#if defined(CONFIG_ARCH_MSM7X30)
+ .start = 151,
+ .end = 181,
+#else
+ .start = 153,
+ .end = 164,
+#endif
+ .configure = msm_gpio_configure,
+ .get_irq_num = msm_gpio_get_irq_num,
+ .read = msm_gpio_read,
+ .write = msm_gpio_write,
+ .read_detect_status = msm_gpio_read_detect_status,
+ .clear_detect_status = msm_gpio_clear_detect_status
+ }
+ },
+#endif
+};
+
+static void msm_gpio_update_both_edge_detect(struct msm_gpio_chip *msm_chip)
+{
+ int loop_limit = 100;
+ unsigned pol, val, val2, intstat;
+ do {
+ val = readl(msm_chip->regs.in);
+ pol = readl(msm_chip->regs.int_pos);
+ pol = (pol & ~msm_chip->both_edge_detect) | (~val & msm_chip->both_edge_detect);
+ writel(pol, msm_chip->regs.int_pos);
+ intstat = readl(msm_chip->regs.int_status);
+ val2 = readl(msm_chip->regs.in);
+ if (((val ^ val2) & msm_chip->both_edge_detect & ~intstat) == 0)
+ return;
+ } while (loop_limit-- > 0);
+ printk(KERN_ERR "msm_gpio_update_both_edge_detect, failed to reach stable state %x != %x\n", val, val2);
+}
+
+static int msm_gpio_write(struct gpio_chip *chip, unsigned n, unsigned on)
+{
+ struct msm_gpio_chip *msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ unsigned b = 1U << (n - chip->start);
+ unsigned v;
+
+ v = readl(msm_chip->regs.out);
+ if (on) {
+ writel(v | b, msm_chip->regs.out);
+ } else {
+ writel(v & (~b), msm_chip->regs.out);
+ }
+ return 0;
+}
+
+static int msm_gpio_read(struct gpio_chip *chip, unsigned n)
+{
+ struct msm_gpio_chip *msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ unsigned b = 1U << (n - chip->start);
+
+ return (readl(msm_chip->regs.in) & b) ? 1 : 0;
+}
+
+static int msm_gpio_read_detect_status(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct msm_gpio_chip *msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ unsigned b = 1U << (gpio - chip->start);
+ unsigned v;
+
+ v = readl(msm_chip->regs.int_status);
+#if MSM_GPIO_BROKEN_INT_CLEAR
+ v |= msm_chip->int_status_copy;
+#endif
+ return (v & b) ? 1 : 0;
+}
+
+static int msm_gpio_clear_detect_status(struct gpio_chip *chip, unsigned int gpio)
+{
+ struct msm_gpio_chip *msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ unsigned b = 1U << (gpio - chip->start);
+
+#if MSM_GPIO_BROKEN_INT_CLEAR
+ /* Save interrupts that already triggered before we loose them. */
+ /* Any interrupt that triggers between the read of int_status */
+ /* and the write to int_clear will still be lost though. */
+ msm_chip->int_status_copy |= readl(msm_chip->regs.int_status);
+ msm_chip->int_status_copy &= ~b;
+#endif
+ writel(b, msm_chip->regs.int_clear);
+ msm_gpio_update_both_edge_detect(msm_chip);
+ return 0;
+}
+
+int msm_gpio_configure(struct gpio_chip *chip, unsigned int gpio, unsigned long flags)
+{
+ struct msm_gpio_chip *msm_chip = container_of(chip, struct msm_gpio_chip, chip);
+ unsigned b = 1U << (gpio - chip->start);
+ unsigned v;
+
+ if (flags & (GPIOF_OUTPUT_LOW | GPIOF_OUTPUT_HIGH))
+ msm_gpio_write(chip, gpio, flags & GPIOF_OUTPUT_HIGH);
+
+ if (flags & (GPIOF_INPUT | GPIOF_DRIVE_OUTPUT)) {
+ v = readl(msm_chip->regs.oe);
+ if (flags & GPIOF_DRIVE_OUTPUT) {
+ writel(v | b, msm_chip->regs.oe);
+ } else {
+ writel(v & (~b), msm_chip->regs.oe);
+ }
+ }
+
+ if (flags & (IRQF_TRIGGER_MASK | GPIOF_IRQF_TRIGGER_NONE)) {
+ v = readl(msm_chip->regs.int_edge);
+ if (flags & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)) {
+ writel(v | b, msm_chip->regs.int_edge);
+ irq_desc[MSM_GPIO_TO_INT(gpio)].handle_irq = handle_edge_irq;
+ } else {
+ writel(v & (~b), msm_chip->regs.int_edge);
+ irq_desc[MSM_GPIO_TO_INT(gpio)].handle_irq = handle_level_irq;
+ }
+ if ((flags & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)) == (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)) {
+ msm_chip->both_edge_detect |= b;
+ msm_gpio_update_both_edge_detect(msm_chip);
+ } else {
+ msm_chip->both_edge_detect &= ~b;
+ v = readl(msm_chip->regs.int_pos);
+ if (flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH)) {
+ writel(v | b, msm_chip->regs.int_pos);
+ } else {
+ writel(v & (~b), msm_chip->regs.int_pos);
+ }
+ }
+ }
+
+ /* used by msm_gpio_irq_mask and msm_gpio_irq_unmask */
+ if (flags & (MSM_GPIOF_ENABLE_INTERRUPT | MSM_GPIOF_DISABLE_INTERRUPT)) {
+ v = readl(msm_chip->regs.int_edge);
+ /* level triggered interrupts are also latched */
+ if (!(v & b))
+ msm_gpio_clear_detect_status(chip, gpio);
+ if (flags & MSM_GPIOF_ENABLE_INTERRUPT) {
+ msm_chip->int_enable[0] |= b;
+ } else {
+ msm_chip->int_enable[0] &= ~b;
+ }
+ writel(msm_chip->int_enable[0], msm_chip->regs.int_en);
+ }
+
+ if (flags & (MSM_GPIOF_ENABLE_WAKE | MSM_GPIOF_DISABLE_WAKE)) {
+ if (flags & MSM_GPIOF_ENABLE_WAKE)
+ msm_chip->int_enable[1] |= b;
+ else
+ msm_chip->int_enable[1] &= ~b;
+ }
+
+ return 0;
+}
+
+static int msm_gpio_get_irq_num(struct gpio_chip *chip, unsigned int gpio, unsigned int *irqp, unsigned long *irqnumflagsp)
+{
+ *irqp = MSM_GPIO_TO_INT(gpio);
+ if (irqnumflagsp)
+ *irqnumflagsp = 0;
+ return 0;
+}
+
+
+static void msm_gpio_irq_ack(unsigned int irq)
+{
+ gpio_clear_detect_status(irq - NR_MSM_IRQS);
+}
+
+static void msm_gpio_irq_mask(unsigned int irq)
+{
+ gpio_configure(irq - NR_MSM_IRQS, MSM_GPIOF_DISABLE_INTERRUPT);
+}
+
+static void msm_gpio_irq_unmask(unsigned int irq)
+{
+ gpio_configure(irq - NR_MSM_IRQS, MSM_GPIOF_ENABLE_INTERRUPT);
+}
+
+static int msm_gpio_irq_set_wake(unsigned int irq, unsigned int on)
+{
+ return gpio_configure(irq - NR_MSM_IRQS, on ? MSM_GPIOF_ENABLE_WAKE : MSM_GPIOF_DISABLE_WAKE);
+}
+
+
+static int msm_gpio_irq_set_type(unsigned int irq, unsigned int flow_type)
+{
+ return gpio_configure(irq - NR_MSM_IRQS, flow_type);
+}
+
+static void msm_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ int i, j, m;
+ unsigned v;
+
+ for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
+ struct msm_gpio_chip *msm_chip = &msm_gpio_chips[i];
+ v = readl(msm_chip->regs.int_status);
+ v &= msm_chip->int_enable[0];
+ while (v) {
+ m = v & -v;
+ j = fls(m) - 1;
+ /* printk("msm_gpio_irq_handler %08x %08x bit %d gpio %d irq %d\n", v, m, j, msm_chip->chip.start + j, NR_MSM_IRQS + msm_chip->chip.start + j); */
+ v &= ~m;
+ generic_handle_irq(NR_MSM_IRQS + msm_chip->chip.start + j);
+ }
+ }
+ desc->chip->ack(irq);
+}
+
+static struct irq_chip msm_gpio_irq_chip = {
+ .name = "msmgpio",
+ .ack = msm_gpio_irq_ack,
+ .mask = msm_gpio_irq_mask,
+ .unmask = msm_gpio_irq_unmask,
+ .set_wake = msm_gpio_irq_set_wake,
+ .set_type = msm_gpio_irq_set_type,
+};
+
+#define NUM_GPIO_SMEM_BANKS 6
+#define GPIO_SMEM_NUM_GROUPS 2
+#define GPIO_SMEM_MAX_PC_INTERRUPTS 8
+struct tramp_gpio_smem
+{
+ uint16_t num_fired[GPIO_SMEM_NUM_GROUPS];
+ uint16_t fired[GPIO_SMEM_NUM_GROUPS][GPIO_SMEM_MAX_PC_INTERRUPTS];
+ uint32_t enabled[NUM_GPIO_SMEM_BANKS];
+ uint32_t detection[NUM_GPIO_SMEM_BANKS];
+ uint32_t polarity[NUM_GPIO_SMEM_BANKS];
+};
+
+static void msm_gpio_sleep_int(unsigned long arg)
+{
+ int i, j;
+ struct tramp_gpio_smem *smem_gpio;
+
+ BUILD_BUG_ON(NR_GPIO_IRQS > NUM_GPIO_SMEM_BANKS * 32);
+
+#ifdef CONFIG_MACH_SMD
+ smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio));
+#else
+ smem_gpio = NULL;
+#endif
+ if (smem_gpio == NULL)
+ return;
+
+ local_irq_disable();
+ for(i = 0; i < GPIO_SMEM_NUM_GROUPS; i++) {
+ int count = smem_gpio->num_fired[i];
+ for(j = 0; j < count; j++) {
+ /* TODO: Check mask */
+ generic_handle_irq(MSM_GPIO_TO_INT(smem_gpio->fired[i][j]));
+ }
+ }
+ local_irq_enable();
+}
+
+static DECLARE_TASKLET(msm_gpio_sleep_int_tasklet, msm_gpio_sleep_int, 0);
+
+void msm_gpio_enter_sleep(int from_idle)
+{
+ int i;
+ struct tramp_gpio_smem *smem_gpio;
+
+#ifdef CONFIG_MSM_SMD
+ smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio));
+#else
+ smem_gpio = NULL;
+#endif
+
+ if (smem_gpio) {
+ for (i = 0; i < ARRAY_SIZE(smem_gpio->enabled); i++) {
+ smem_gpio->enabled[i] = 0;
+ smem_gpio->detection[i] = 0;
+ smem_gpio->polarity[i] = 0;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
+ writel(msm_gpio_chips[i].int_enable[!from_idle], msm_gpio_chips[i].regs.int_en);
+ if (smem_gpio) {
+ uint32_t tmp;
+ int start, index, shiftl, shiftr;
+ start = msm_gpio_chips[i].chip.start;
+ index = start / 32;
+ shiftl = start % 32;
+ shiftr = 32 - shiftl;
+ tmp = msm_gpio_chips[i].int_enable[!from_idle];
+ smem_gpio->enabled[index] |= tmp << shiftl;
+ smem_gpio->enabled[index+1] |= tmp >> shiftr;
+ smem_gpio->detection[index] |= readl(msm_gpio_chips[i].regs.int_edge) << shiftl;
+ smem_gpio->detection[index+1] |= readl(msm_gpio_chips[i].regs.int_edge) >> shiftr;
+ smem_gpio->polarity[index] |= readl(msm_gpio_chips[i].regs.int_pos) << shiftl;
+ smem_gpio->polarity[index+1] |= readl(msm_gpio_chips[i].regs.int_pos) >> shiftr;
+ }
+ }
+
+ if (smem_gpio) {
+ if (msm_gpio_debug_mask & GPIO_DEBUG_SLEEP)
+ for (i = 0; i < ARRAY_SIZE(smem_gpio->enabled); i++) {
+ printk("msm_gpio_enter_sleep gpio %d-%d: enable"
+ " %08x, edge %08x, polarity %08x\n",
+ i * 32, i * 32 + 31,
+ smem_gpio->enabled[i],
+ smem_gpio->detection[i],
+ smem_gpio->polarity[i]);
+ }
+ for(i = 0; i < GPIO_SMEM_NUM_GROUPS; i++)
+ smem_gpio->num_fired[i] = 0;
+ }
+}
+
+void msm_gpio_exit_sleep(void)
+{
+ int i;
+ struct tramp_gpio_smem *smem_gpio;
+
+#ifdef CONFIG_MSM_SMD
+ smem_gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*smem_gpio));
+#else
+ smem_gpio = NULL;
+#endif
+
+ for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
+ writel(msm_gpio_chips[i].int_enable[0], msm_gpio_chips[i].regs.int_en);
+ }
+
+ if (smem_gpio && (smem_gpio->num_fired[0] || smem_gpio->num_fired[1])) {
+ if (msm_gpio_debug_mask & GPIO_DEBUG_SLEEP)
+ printk(KERN_INFO "gpio: fired %x %x\n",
+ smem_gpio->num_fired[0], smem_gpio->num_fired[1]);
+ tasklet_schedule(&msm_gpio_sleep_int_tasklet);
+ }
+}
+
+static int __init msm_init_gpio(void)
+{
+ int i;
+
+ for (i = NR_MSM_IRQS; i < NR_MSM_IRQS + NR_GPIO_IRQS; i++) {
+ set_irq_chip(i, &msm_gpio_irq_chip);
+ set_irq_handler(i, handle_edge_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(msm_gpio_chips); i++) {
+ writel(0, msm_gpio_chips[i].regs.int_en);
+ register_gpio_chip(&msm_gpio_chips[i].chip);
+ }
+
+ set_irq_chained_handler(INT_GPIO_GROUP1, msm_gpio_irq_handler);
+ set_irq_chained_handler(INT_GPIO_GROUP2, msm_gpio_irq_handler);
+ set_irq_wake(INT_GPIO_GROUP1, 1);
+ set_irq_wake(INT_GPIO_GROUP2, 2);
+ return 0;
+}
+
+postcore_initcall(msm_init_gpio);
+
+int gpio_tlmm_config(unsigned config, unsigned disable)
+{
+ return msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &config, &disable);
+}
+EXPORT_SYMBOL(gpio_tlmm_config);
+
+int msm_gpios_request_enable(const struct msm_gpio *table, int size)
+{
+ int rc = msm_gpios_request(table, size);
+ if (rc)
+ return rc;
+ rc = msm_gpios_enable(table, size);
+ if (rc)
+ msm_gpios_free(table, size);
+ return rc;
+}
+EXPORT_SYMBOL(msm_gpios_request_enable);
+
+void msm_gpios_disable_free(const struct msm_gpio *table, int size)
+{
+ msm_gpios_disable(table, size);
+ msm_gpios_free(table, size);
+}
+EXPORT_SYMBOL(msm_gpios_disable_free);
+
+int msm_gpios_request(const struct msm_gpio *table, int size)
+{
+ int rc;
+ int i;
+ const struct msm_gpio *g;
+ for (i = 0; i < size; i++) {
+ g = table + i;
+ rc = gpio_request(GPIO_PIN(g->gpio_cfg), g->label);
+ if (rc) {
+ pr_err("gpio_request(%d) <%s> failed: %d\n",
+ GPIO_PIN(g->gpio_cfg), g->label ?: "?", rc);
+ goto err;
+ }
+ }
+ return 0;
+err:
+ msm_gpios_free(table, i);
+ return rc;
+}
+EXPORT_SYMBOL(msm_gpios_request);
+
+void msm_gpios_free(const struct msm_gpio *table, int size)
+{
+ int i;
+ const struct msm_gpio *g;
+ for (i = size-1; i >= 0; i--) {
+ g = table + i;
+ gpio_free(GPIO_PIN(g->gpio_cfg));
+ }
+}
+EXPORT_SYMBOL(msm_gpios_free);
+
+int msm_gpios_enable(const struct msm_gpio *table, int size)
+{
+ int rc;
+ int i;
+ const struct msm_gpio *g;
+ for (i = 0; i < size; i++) {
+ g = table + i;
+ rc = gpio_tlmm_config(g->gpio_cfg, GPIO_ENABLE);
+ if (rc) {
+ pr_err("gpio_tlmm_config(0x%08x, GPIO_ENABLE)"
+ " <%s> failed: %d\n",
+ g->gpio_cfg, g->label ?: "?", rc);
+ pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
+ GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+ GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+ GPIO_DRVSTR(g->gpio_cfg));
+ goto err;
+ }
+ }
+ return 0;
+err:
+ msm_gpios_disable(table, i);
+ return rc;
+}
+EXPORT_SYMBOL(msm_gpios_enable);
+
+void msm_gpios_disable(const struct msm_gpio *table, int size)
+{
+ int rc;
+ int i;
+ const struct msm_gpio *g;
+ for (i = size-1; i >= 0; i--) {
+ g = table + i;
+ rc = gpio_tlmm_config(g->gpio_cfg, GPIO_DISABLE);
+ if (rc) {
+ pr_err("gpio_tlmm_config(0x%08x, GPIO_DISABLE)"
+ " <%s> failed: %d\n",
+ g->gpio_cfg, g->label ?: "?", rc);
+ pr_err("pin %d func %d dir %d pull %d drvstr %d\n",
+ GPIO_PIN(g->gpio_cfg), GPIO_FUNC(g->gpio_cfg),
+ GPIO_DIR(g->gpio_cfg), GPIO_PULL(g->gpio_cfg),
+ GPIO_DRVSTR(g->gpio_cfg));
+ }
+ }
+}
+EXPORT_SYMBOL(msm_gpios_disable);
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int msm_gpio_debug_result = 1;
+
+static int gpio_enable_set(void *data, u64 val)
+{
+ msm_gpio_debug_result = gpio_tlmm_config(val, 0);
+ return 0;
+}
+static int gpio_disable_set(void *data, u64 val)
+{
+ msm_gpio_debug_result = gpio_tlmm_config(val, 1);
+ return 0;
+}
+
+static int gpio_debug_get(void *data, u64 *val)
+{
+ unsigned int result = msm_gpio_debug_result;
+ msm_gpio_debug_result = 1;
+ if (result)
+ *val = 1;
+ else
+ *val = 0;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(gpio_enable_fops, gpio_debug_get,
+ gpio_enable_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(gpio_disable_fops, gpio_debug_get,
+ gpio_disable_set, "%llu\n");
+
+static int __init gpio_debug_init(void)
+{
+ struct dentry *dent;
+ dent = debugfs_create_dir("gpio", 0);
+ if (IS_ERR(dent))
+ return 0;
+
+ debugfs_create_file("enable", 0644, dent, 0, &gpio_enable_fops);
+ debugfs_create_file("disable", 0644, dent, 0, &gpio_disable_fops);
+ return 0;
+}
+
+device_initcall(gpio_debug_init);
+#endif
+
diff --git a/arch/arm/mach-msm/gpio.h b/arch/arm/mach-msm/gpio.h
new file mode 100644
index 000000000000..b411bc1e3c8f
--- /dev/null
+++ b/arch/arm/mach-msm/gpio.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_GPIO_H_
+#define _ARCH_ARM_MACH_MSM_GPIO_H_
+
+void msm_gpio_enter_sleep(int from_idle);
+void msm_gpio_exit_sleep(void);
+
+#endif
diff --git a/arch/arm/mach-msm/gpio_chip.h b/arch/arm/mach-msm/gpio_chip.h
new file mode 100644
index 000000000000..39ccc0f0ed8e
--- /dev/null
+++ b/arch/arm/mach-msm/gpio_chip.h
@@ -0,0 +1,61 @@
+/* arch/arm/mach-msm/gpio_chip.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_GPIO_CHIP_H
+#define _LINUX_GPIO_CHIP_H
+
+#include <linux/list.h>
+
+struct gpio_chip {
+ struct list_head list;
+ struct gpio_state *state;
+
+ unsigned int start;
+ unsigned int end;
+
+ int (*configure)(struct gpio_chip *chip, unsigned int gpio, unsigned long flags);
+ int (*get_irq_num)(struct gpio_chip *chip, unsigned int gpio, unsigned int *irqp, unsigned long *irqnumflagsp);
+ int (*read)(struct gpio_chip *chip, unsigned int gpio);
+ int (*write)(struct gpio_chip *chip, unsigned int gpio, unsigned on);
+ int (*read_detect_status)(struct gpio_chip *chip, unsigned int gpio);
+ int (*clear_detect_status)(struct gpio_chip *chip, unsigned int gpio);
+};
+
+struct msm_gpio_regs {
+ void __iomem *out;
+ void __iomem *in;
+ void __iomem *int_status;
+ void __iomem *int_clear;
+ void __iomem *int_en;
+ void __iomem *int_edge;
+ void __iomem *int_pos;
+ void __iomem *oe;
+};
+
+#define MSM_GPIO_BROKEN_INT_CLEAR 1
+
+struct msm_gpio_chip {
+ struct gpio_chip chip;
+ struct msm_gpio_regs regs;
+#if MSM_GPIO_BROKEN_INT_CLEAR
+ unsigned int_status_copy;
+#endif
+ unsigned int both_edge_detect;
+ unsigned int int_enable[2]; /* 0: awake, 1: sleep */
+};
+
+int register_gpio_chip(struct gpio_chip *gpio_chip);
+
+#endif
diff --git a/arch/arm/mach-msm/gpio_hw-7x30.h b/arch/arm/mach-msm/gpio_hw-7x30.h
new file mode 100644
index 000000000000..9142e7e6045c
--- /dev/null
+++ b/arch/arm/mach-msm/gpio_hw-7x30.h
@@ -0,0 +1,112 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_GPIO_HW_7X30_H
+#define __ARCH_ARM_MACH_MSM_GPIO_HW_7X30_H
+
+/* output value */
+#define GPIO_OUT_0 GPIO1_REG(0x00) /* gpio 15-0 */
+#define GPIO_OUT_1 GPIO2_REG(0x00) /* gpio 43-16 */
+#define GPIO_OUT_2 GPIO1_REG(0x04) /* gpio 67-44 */
+#define GPIO_OUT_3 GPIO1_REG(0x08) /* gpio 94-68 */
+#define GPIO_OUT_4 GPIO1_REG(0x0C) /* gpio 106-95 */
+#define GPIO_OUT_5 GPIO1_REG(0x50) /* gpio 133-107 */
+#define GPIO_OUT_6 GPIO1_REG(0xC4) /* gpio 150-134 */
+#define GPIO_OUT_7 GPIO1_REG(0x214) /* gpio 181-151 */
+
+/* same pin map as above, output enable */
+#define GPIO_OE_0 GPIO1_REG(0x10)
+#define GPIO_OE_1 GPIO2_REG(0x08)
+#define GPIO_OE_2 GPIO1_REG(0x14)
+#define GPIO_OE_3 GPIO1_REG(0x18)
+#define GPIO_OE_4 GPIO1_REG(0x1C)
+#define GPIO_OE_5 GPIO1_REG(0x54)
+#define GPIO_OE_6 GPIO1_REG(0xC8)
+#define GPIO_OE_7 GPIO1_REG(0x218)
+
+/* same pin map as above, input read */
+#define GPIO_IN_0 GPIO1_REG(0x34)
+#define GPIO_IN_1 GPIO2_REG(0x20)
+#define GPIO_IN_2 GPIO1_REG(0x38)
+#define GPIO_IN_3 GPIO1_REG(0x3C)
+#define GPIO_IN_4 GPIO1_REG(0x40)
+#define GPIO_IN_5 GPIO1_REG(0x44)
+#define GPIO_IN_6 GPIO1_REG(0xCC)
+#define GPIO_IN_7 GPIO1_REG(0x21C)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define GPIO_INT_EDGE_0 GPIO1_REG(0x60)
+#define GPIO_INT_EDGE_1 GPIO2_REG(0x50)
+#define GPIO_INT_EDGE_2 GPIO1_REG(0x64)
+#define GPIO_INT_EDGE_3 GPIO1_REG(0x68)
+#define GPIO_INT_EDGE_4 GPIO1_REG(0x6C)
+#define GPIO_INT_EDGE_5 GPIO1_REG(0xC0)
+#define GPIO_INT_EDGE_6 GPIO1_REG(0xD0)
+#define GPIO_INT_EDGE_7 GPIO1_REG(0x240)
+
+/* same pin map as above, 1=positive 0=negative */
+#define GPIO_INT_POS_0 GPIO1_REG(0x70)
+#define GPIO_INT_POS_1 GPIO2_REG(0x58)
+#define GPIO_INT_POS_2 GPIO1_REG(0x74)
+#define GPIO_INT_POS_3 GPIO1_REG(0x78)
+#define GPIO_INT_POS_4 GPIO1_REG(0x7C)
+#define GPIO_INT_POS_5 GPIO1_REG(0xBC)
+#define GPIO_INT_POS_6 GPIO1_REG(0xD4)
+#define GPIO_INT_POS_7 GPIO1_REG(0x228)
+
+/* same pin map as above, interrupt enable */
+#define GPIO_INT_EN_0 GPIO1_REG(0x80)
+#define GPIO_INT_EN_1 GPIO2_REG(0x60)
+#define GPIO_INT_EN_2 GPIO1_REG(0x84)
+#define GPIO_INT_EN_3 GPIO1_REG(0x88)
+#define GPIO_INT_EN_4 GPIO1_REG(0x8C)
+#define GPIO_INT_EN_5 GPIO1_REG(0xB8)
+#define GPIO_INT_EN_6 GPIO1_REG(0xD8)
+#define GPIO_INT_EN_7 GPIO1_REG(0x22C)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define GPIO_INT_CLEAR_0 GPIO1_REG(0x90)
+#define GPIO_INT_CLEAR_1 GPIO2_REG(0x68)
+#define GPIO_INT_CLEAR_2 GPIO1_REG(0x94)
+#define GPIO_INT_CLEAR_3 GPIO1_REG(0x98)
+#define GPIO_INT_CLEAR_4 GPIO1_REG(0x9C)
+#define GPIO_INT_CLEAR_5 GPIO1_REG(0xB4)
+#define GPIO_INT_CLEAR_6 GPIO1_REG(0xDC)
+#define GPIO_INT_CLEAR_7 GPIO1_REG(0x230)
+
+/* same pin map as above, 1=interrupt pending */
+#define GPIO_INT_STATUS_0 GPIO1_REG(0xA0)
+#define GPIO_INT_STATUS_1 GPIO2_REG(0x70)
+#define GPIO_INT_STATUS_2 GPIO1_REG(0xA4)
+#define GPIO_INT_STATUS_3 GPIO1_REG(0xA8)
+#define GPIO_INT_STATUS_4 GPIO1_REG(0xAC)
+#define GPIO_INT_STATUS_5 GPIO1_REG(0xB0)
+#define GPIO_INT_STATUS_6 GPIO1_REG(0xE0)
+#define GPIO_INT_STATUS_7 GPIO1_REG(0x234)
+
+#endif
diff --git a/arch/arm/mach-msm/gpio_hw-7xxx.h b/arch/arm/mach-msm/gpio_hw-7xxx.h
new file mode 100644
index 000000000000..17b4d80c2310
--- /dev/null
+++ b/arch/arm/mach-msm/gpio_hw-7xxx.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_GPIO_HW_7XXX_H
+#define __ARCH_ARM_MACH_MSM_GPIO_HW_7XXX_H
+
+/* output value */
+#define GPIO_OUT_0 GPIO1_REG(0x00) /* gpio 15-0 */
+#define GPIO_OUT_1 GPIO2_REG(0x00) /* gpio 42-16 */
+#define GPIO_OUT_2 GPIO1_REG(0x04) /* gpio 67-43 */
+#define GPIO_OUT_3 GPIO1_REG(0x08) /* gpio 94-68 */
+#define GPIO_OUT_4 GPIO1_REG(0x0C) /* gpio 106-95 */
+#define GPIO_OUT_5 GPIO1_REG(0x50) /* gpio 107-121 */
+
+/* same pin map as above, output enable */
+#define GPIO_OE_0 GPIO1_REG(0x10)
+#define GPIO_OE_1 GPIO2_REG(0x08)
+#define GPIO_OE_2 GPIO1_REG(0x14)
+#define GPIO_OE_3 GPIO1_REG(0x18)
+#define GPIO_OE_4 GPIO1_REG(0x1C)
+#define GPIO_OE_5 GPIO1_REG(0x54)
+
+/* same pin map as above, input read */
+#define GPIO_IN_0 GPIO1_REG(0x34)
+#define GPIO_IN_1 GPIO2_REG(0x20)
+#define GPIO_IN_2 GPIO1_REG(0x38)
+#define GPIO_IN_3 GPIO1_REG(0x3C)
+#define GPIO_IN_4 GPIO1_REG(0x40)
+#define GPIO_IN_5 GPIO1_REG(0x44)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define GPIO_INT_EDGE_0 GPIO1_REG(0x60)
+#define GPIO_INT_EDGE_1 GPIO2_REG(0x50)
+#define GPIO_INT_EDGE_2 GPIO1_REG(0x64)
+#define GPIO_INT_EDGE_3 GPIO1_REG(0x68)
+#define GPIO_INT_EDGE_4 GPIO1_REG(0x6C)
+#define GPIO_INT_EDGE_5 GPIO1_REG(0xC0)
+
+/* same pin map as above, 1=positive 0=negative */
+#define GPIO_INT_POS_0 GPIO1_REG(0x70)
+#define GPIO_INT_POS_1 GPIO2_REG(0x58)
+#define GPIO_INT_POS_2 GPIO1_REG(0x74)
+#define GPIO_INT_POS_3 GPIO1_REG(0x78)
+#define GPIO_INT_POS_4 GPIO1_REG(0x7C)
+#define GPIO_INT_POS_5 GPIO1_REG(0xBC)
+
+/* same pin map as above, interrupt enable */
+#define GPIO_INT_EN_0 GPIO1_REG(0x80)
+#define GPIO_INT_EN_1 GPIO2_REG(0x60)
+#define GPIO_INT_EN_2 GPIO1_REG(0x84)
+#define GPIO_INT_EN_3 GPIO1_REG(0x88)
+#define GPIO_INT_EN_4 GPIO1_REG(0x8C)
+#define GPIO_INT_EN_5 GPIO1_REG(0xB8)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define GPIO_INT_CLEAR_0 GPIO1_REG(0x90)
+#define GPIO_INT_CLEAR_1 GPIO2_REG(0x68)
+#define GPIO_INT_CLEAR_2 GPIO1_REG(0x94)
+#define GPIO_INT_CLEAR_3 GPIO1_REG(0x98)
+#define GPIO_INT_CLEAR_4 GPIO1_REG(0x9C)
+#define GPIO_INT_CLEAR_5 GPIO1_REG(0xB4)
+
+/* same pin map as above, 1=interrupt pending */
+#define GPIO_INT_STATUS_0 GPIO1_REG(0xA0)
+#define GPIO_INT_STATUS_1 GPIO2_REG(0x70)
+#define GPIO_INT_STATUS_2 GPIO1_REG(0xA4)
+#define GPIO_INT_STATUS_3 GPIO1_REG(0xA8)
+#define GPIO_INT_STATUS_4 GPIO1_REG(0xAC)
+#define GPIO_INT_STATUS_5 GPIO1_REG(0xB0)
+
+#endif
diff --git a/arch/arm/mach-msm/gpio_hw-8xxx.h b/arch/arm/mach-msm/gpio_hw-8xxx.h
new file mode 100644
index 000000000000..91b1a43298fd
--- /dev/null
+++ b/arch/arm/mach-msm/gpio_hw-8xxx.h
@@ -0,0 +1,112 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_GPIO_HW_8XXX_H
+#define __ARCH_ARM_MACH_MSM_GPIO_HW_8XXX_H
+
+/* output value */
+#define GPIO_OUT_0 GPIO1_REG(0x00) /* gpio 15-0 */
+#define GPIO_OUT_1 GPIO2_REG(0x00) /* gpio 42-16 */
+#define GPIO_OUT_2 GPIO1_REG(0x04) /* gpio 67-43 */
+#define GPIO_OUT_3 GPIO1_REG(0x08) /* gpio 94-68 */
+#define GPIO_OUT_4 GPIO1_REG(0x0C) /* gpio 103-95 */
+#define GPIO_OUT_5 GPIO1_REG(0x10) /* gpio 121-104 */
+#define GPIO_OUT_6 GPIO1_REG(0x14) /* gpio 152-122 */
+#define GPIO_OUT_7 GPIO1_REG(0x18) /* gpio 164-153 */
+
+/* same pin map as above, output enable */
+#define GPIO_OE_0 GPIO1_REG(0x20)
+#define GPIO_OE_1 GPIO2_REG(0x08)
+#define GPIO_OE_2 GPIO1_REG(0x24)
+#define GPIO_OE_3 GPIO1_REG(0x28)
+#define GPIO_OE_4 GPIO1_REG(0x2C)
+#define GPIO_OE_5 GPIO1_REG(0x30)
+#define GPIO_OE_6 GPIO1_REG(0x34)
+#define GPIO_OE_7 GPIO1_REG(0x38)
+
+/* same pin map as above, input read */
+#define GPIO_IN_0 GPIO1_REG(0x50)
+#define GPIO_IN_1 GPIO2_REG(0x20)
+#define GPIO_IN_2 GPIO1_REG(0x54)
+#define GPIO_IN_3 GPIO1_REG(0x58)
+#define GPIO_IN_4 GPIO1_REG(0x5C)
+#define GPIO_IN_5 GPIO1_REG(0x60)
+#define GPIO_IN_6 GPIO1_REG(0x64)
+#define GPIO_IN_7 GPIO1_REG(0x68)
+
+/* same pin map as above, 1=edge 0=level interrup */
+#define GPIO_INT_EDGE_0 GPIO1_REG(0x70)
+#define GPIO_INT_EDGE_1 GPIO2_REG(0x50)
+#define GPIO_INT_EDGE_2 GPIO1_REG(0x74)
+#define GPIO_INT_EDGE_3 GPIO1_REG(0x78)
+#define GPIO_INT_EDGE_4 GPIO1_REG(0x7C)
+#define GPIO_INT_EDGE_5 GPIO1_REG(0x80)
+#define GPIO_INT_EDGE_6 GPIO1_REG(0x84)
+#define GPIO_INT_EDGE_7 GPIO1_REG(0x88)
+
+/* same pin map as above, 1=positive 0=negative */
+#define GPIO_INT_POS_0 GPIO1_REG(0x90)
+#define GPIO_INT_POS_1 GPIO2_REG(0x58)
+#define GPIO_INT_POS_2 GPIO1_REG(0x94)
+#define GPIO_INT_POS_3 GPIO1_REG(0x98)
+#define GPIO_INT_POS_4 GPIO1_REG(0x9C)
+#define GPIO_INT_POS_5 GPIO1_REG(0xA0)
+#define GPIO_INT_POS_6 GPIO1_REG(0xA4)
+#define GPIO_INT_POS_7 GPIO1_REG(0xA8)
+
+/* same pin map as above, interrupt enable */
+#define GPIO_INT_EN_0 GPIO1_REG(0xB0)
+#define GPIO_INT_EN_1 GPIO2_REG(0x60)
+#define GPIO_INT_EN_2 GPIO1_REG(0xB4)
+#define GPIO_INT_EN_3 GPIO1_REG(0xB8)
+#define GPIO_INT_EN_4 GPIO1_REG(0xBC)
+#define GPIO_INT_EN_5 GPIO1_REG(0xC0)
+#define GPIO_INT_EN_6 GPIO1_REG(0xC4)
+#define GPIO_INT_EN_7 GPIO1_REG(0xC8)
+
+/* same pin map as above, write 1 to clear interrupt */
+#define GPIO_INT_CLEAR_0 GPIO1_REG(0xD0)
+#define GPIO_INT_CLEAR_1 GPIO2_REG(0x68)
+#define GPIO_INT_CLEAR_2 GPIO1_REG(0xD4)
+#define GPIO_INT_CLEAR_3 GPIO1_REG(0xD8)
+#define GPIO_INT_CLEAR_4 GPIO1_REG(0xDC)
+#define GPIO_INT_CLEAR_5 GPIO1_REG(0xE0)
+#define GPIO_INT_CLEAR_6 GPIO1_REG(0xE4)
+#define GPIO_INT_CLEAR_7 GPIO1_REG(0xE8)
+
+/* same pin map as above, 1=interrupt pending */
+#define GPIO_INT_STATUS_0 GPIO1_REG(0xF0)
+#define GPIO_INT_STATUS_1 GPIO2_REG(0x70)
+#define GPIO_INT_STATUS_2 GPIO1_REG(0xF4)
+#define GPIO_INT_STATUS_3 GPIO1_REG(0xF8)
+#define GPIO_INT_STATUS_4 GPIO1_REG(0xFC)
+#define GPIO_INT_STATUS_5 GPIO1_REG(0x100)
+#define GPIO_INT_STATUS_6 GPIO1_REG(0x104)
+#define GPIO_INT_STATUS_7 GPIO1_REG(0x108)
+
+#endif
diff --git a/arch/arm/mach-msm/gpio_hw.h b/arch/arm/mach-msm/gpio_hw.h
new file mode 100644
index 000000000000..cc6b036c1dbc
--- /dev/null
+++ b/arch/arm/mach-msm/gpio_hw.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_GPIO_HW_H
+#define __ARCH_ARM_MACH_MSM_GPIO_HW_H
+
+#include <mach/msm_iomap.h>
+
+/* see 80-VA736-2 Rev C pp 695-751
+**
+** These are actually the *shadow* gpio registers, since the
+** real ones (which allow full access) are only available to the
+** ARM9 side of the world.
+**
+** Since the _BASE need to be page-aligned when we're mapping them
+** to virtual addresses, adjust for the additional offset in these
+** macros.
+*/
+
+#if defined(CONFIG_ARCH_MSM7X30)
+#define GPIO1_REG(off) (MSM_GPIO1_BASE + (off))
+#define GPIO2_REG(off) (MSM_GPIO2_BASE + 0x400 + (off))
+#else
+#define GPIO1_REG(off) (MSM_GPIO1_BASE + 0x800 + (off))
+#define GPIO2_REG(off) (MSM_GPIO2_BASE + 0xC00 + (off))
+#endif
+
+#if defined(CONFIG_ARCH_QSD8X50)
+#include "gpio_hw-8xxx.h"
+#elif defined(CONFIG_ARCH_MSM7X30)
+#include "gpio_hw-7x30.h"
+#else
+#include "gpio_hw-7xxx.h"
+#endif
+
+#endif
diff --git a/arch/arm/mach-msm/htc_acoustic.c b/arch/arm/mach-msm/htc_acoustic.c
new file mode 100644
index 000000000000..3de71dddb589
--- /dev/null
+++ b/arch/arm/mach-msm/htc_acoustic.c
@@ -0,0 +1,239 @@
+/* arch/arm/mach-msm/htc_acoustic.c
+ *
+ * Copyright (C) 2007-2008 HTC Corporation
+ * Author: Laurence Chen <Laurence_Chen@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+
+#include <mach/msm_smd.h>
+#include <mach/msm_rpcrouter.h>
+#include <mach/msm_iomap.h>
+
+#include "smd_private.h"
+
+#define ACOUSTIC_IOCTL_MAGIC 'p'
+#define ACOUSTIC_ARM11_DONE _IOW(ACOUSTIC_IOCTL_MAGIC, 22, unsigned int)
+
+#define HTCRPOG 0x30100002
+#define HTCVERS 0
+#define ONCRPC_SET_MIC_BIAS_PROC (1)
+#define ONCRPC_ACOUSTIC_INIT_PROC (5)
+#define ONCRPC_ALLOC_ACOUSTIC_MEM_PROC (6)
+
+#define HTC_ACOUSTIC_TABLE_SIZE (0x10000)
+
+#define D(fmt, args...) printk(KERN_INFO "htc-acoustic: "fmt, ##args)
+#define E(fmt, args...) printk(KERN_ERR "htc-acoustic: "fmt, ##args)
+
+struct set_smem_req {
+ struct rpc_request_hdr hdr;
+ uint32_t size;
+};
+
+struct set_smem_rep {
+ struct rpc_reply_hdr hdr;
+ int n;
+};
+
+struct set_acoustic_req {
+ struct rpc_request_hdr hdr;
+};
+
+struct set_acoustic_rep {
+ struct rpc_reply_hdr hdr;
+ int n;
+};
+
+static uint32_t htc_acoustic_vir_addr;
+static struct msm_rpc_endpoint *endpoint;
+static struct mutex api_lock;
+
+static int acoustic_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ unsigned long pgoff, delta;
+ int rc = -EINVAL;
+ size_t size;
+
+ D("mmap\n");
+
+ mutex_lock(&api_lock);
+
+ size = vma->vm_end - vma->vm_start;
+
+ if (vma->vm_pgoff != 0) {
+ E("mmap failed: page offset %lx\n", vma->vm_pgoff);
+ goto done;
+ }
+
+ if (!htc_acoustic_vir_addr) {
+ E("mmap failed: smem region not allocated\n");
+ rc = -EIO;
+ goto done;
+ }
+
+ pgoff = MSM_SHARED_RAM_PHYS +
+ (htc_acoustic_vir_addr - (uint32_t)MSM_SHARED_RAM_BASE);
+ delta = PAGE_ALIGN(pgoff) - pgoff;
+
+ if (size + delta > HTC_ACOUSTIC_TABLE_SIZE) {
+ E("mmap failed: size %d\n", size);
+ goto done;
+ }
+
+ pgoff += delta;
+ vma->vm_flags |= VM_IO | VM_RESERVED;
+
+ rc = io_remap_pfn_range(vma, vma->vm_start, pgoff >> PAGE_SHIFT,
+ size, vma->vm_page_prot);
+
+ if (rc < 0)
+ E("mmap failed: remap error %d\n", rc);
+
+done: mutex_unlock(&api_lock);
+ return rc;
+}
+
+static int acoustic_open(struct inode *inode, struct file *file)
+{
+ int rc = -EIO;
+ struct set_smem_req req_smem;
+ struct set_smem_rep rep_smem;
+
+ D("open\n");
+
+ mutex_lock(&api_lock);
+
+ if (!htc_acoustic_vir_addr) {
+ if (endpoint == NULL) {
+ endpoint = msm_rpc_connect(HTCRPOG, HTCVERS, 0);
+ if (IS_ERR(endpoint)) {
+ E("init rpc failed! rc = %ld\n",
+ PTR_ERR(endpoint));
+ endpoint = NULL;
+ goto done;
+ }
+ }
+
+ req_smem.size = cpu_to_be32(HTC_ACOUSTIC_TABLE_SIZE);
+ rc = msm_rpc_call_reply(endpoint,
+ ONCRPC_ALLOC_ACOUSTIC_MEM_PROC,
+ &req_smem, sizeof(req_smem),
+ &rep_smem, sizeof(rep_smem),
+ 5 * HZ);
+
+ if (rep_smem.n != 0 || rc < 0) {
+ E("open failed: ALLOC_ACOUSTIC_MEM_PROC error %d.\n",
+ rc);
+ goto done;
+ }
+ htc_acoustic_vir_addr =
+ (uint32_t)smem_alloc(SMEM_ID_VENDOR1,
+ HTC_ACOUSTIC_TABLE_SIZE);
+ if (!htc_acoustic_vir_addr) {
+ E("open failed: smem_alloc error\n");
+ goto done;
+ }
+ }
+
+ rc = 0;
+done:
+ mutex_unlock(&api_lock);
+ return rc;
+}
+
+static int acoustic_release(struct inode *inode, struct file *file)
+{
+ D("release\n");
+ return 0;
+}
+
+static long acoustic_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int rc, reply_value;
+ struct set_acoustic_req req;
+ struct set_acoustic_rep rep;
+
+ D("ioctl\n");
+
+ mutex_lock(&api_lock);
+
+ switch (cmd) {
+ case ACOUSTIC_ARM11_DONE:
+ D("ioctl: ACOUSTIC_ARM11_DONE called %d.\n", current->pid);
+ rc = msm_rpc_call_reply(endpoint,
+ ONCRPC_ACOUSTIC_INIT_PROC, &req,
+ sizeof(req), &rep, sizeof(rep),
+ 5 * HZ);
+
+ reply_value = be32_to_cpu(rep.n);
+ if (reply_value != 0 || rc < 0) {
+ E("ioctl failed: ONCRPC_ACOUSTIC_INIT_PROC "\
+ "error %d.\n", rc);
+ if (rc >= 0)
+ rc = -EIO;
+ break;
+ }
+ D("ioctl: ONCRPC_ACOUSTIC_INIT_PROC success.\n");
+ break;
+ default:
+ E("ioctl: invalid command\n");
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&api_lock);
+ return 0;
+}
+
+
+static struct file_operations acoustic_fops = {
+ .owner = THIS_MODULE,
+ .open = acoustic_open,
+ .release = acoustic_release,
+ .mmap = acoustic_mmap,
+ .unlocked_ioctl = acoustic_ioctl,
+};
+
+static struct miscdevice acoustic_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "htc-acoustic",
+ .fops = &acoustic_fops,
+};
+
+static int __init acoustic_init(void)
+{
+ mutex_init(&api_lock);
+ return misc_register(&acoustic_misc);
+}
+
+static void __exit acoustic_exit(void)
+{
+ misc_deregister(&acoustic_misc);
+}
+
+module_init(acoustic_init);
+module_exit(acoustic_exit);
+
+MODULE_AUTHOR("Laurence Chen <Laurence_Chen@htc.com>");
+MODULE_DESCRIPTION("HTC acoustic driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-msm/htc_akm_cal.c b/arch/arm/mach-msm/htc_akm_cal.c
new file mode 100644
index 000000000000..943083fe0fbe
--- /dev/null
+++ b/arch/arm/mach-msm/htc_akm_cal.c
@@ -0,0 +1,64 @@
+/* arch/arm/mach-msm/htc_akm_cal.c
+ *
+ * Code to extract compass calibration information from ATAG set up
+ * by the bootloader.
+ *
+ * Copyright (C) 2007-2008 HTC Corporation
+ * Author: Farmer Tseng <farmer_tseng@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/setup.h>
+
+/* configuration tags specific to AKM8976 */
+#define ATAG_AKM8976 0x89768976 /* AKM8976 */
+
+#define MAX_CALI_SIZE 0x1000U
+
+static char akm_cal_ram[MAX_CALI_SIZE];
+
+char *get_akm_cal_ram(void)
+{
+ return(akm_cal_ram);
+}
+EXPORT_SYMBOL(get_akm_cal_ram);
+
+static int __init parse_tag_akm(const struct tag *tag)
+{
+ unsigned char *dptr = (unsigned char *)(&tag->u);
+ unsigned size;
+
+ size = min((tag->hdr.size - 2) * sizeof(__u32), MAX_CALI_SIZE);
+
+ printk(KERN_INFO "AKM Data size = %d , 0x%x, size = %d\n",
+ tag->hdr.size, tag->hdr.tag, size);
+
+#ifdef ATAG_COMPASS_DEBUG
+ unsigned i;
+ unsigned char *ptr;
+
+ ptr = dptr;
+ printk(KERN_INFO
+ "AKM Data size = %d , 0x%x\n",
+ tag->hdr.size, tag->hdr.tag);
+ for (i = 0; i < size; i++)
+ printk(KERN_INFO "%02x ", *ptr++);
+#endif
+ memcpy((void *)akm_cal_ram, (void *)dptr, size);
+ return 0;
+}
+
+__tagtable(ATAG_AKM8976, parse_tag_akm);
diff --git a/arch/arm/mach-msm/htc_battery.c b/arch/arm/mach-msm/htc_battery.c
new file mode 100644
index 000000000000..7320edbff1a9
--- /dev/null
+++ b/arch/arm/mach-msm/htc_battery.c
@@ -0,0 +1,769 @@
+/* arch/arm/mach-msm/htc_battery.c
+ *
+ * Copyright (C) 2008 HTC Corporation.
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/power_supply.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/wakelock.h>
+#include <asm/gpio.h>
+#include <mach/msm_rpcrouter.h>
+#include <mach/board.h>
+
+static struct wake_lock vbus_wake_lock;
+
+#define TRACE_BATT 0
+
+#if TRACE_BATT
+#define BATT(x...) printk(KERN_INFO "[BATT] " x)
+#else
+#define BATT(x...) do {} while (0)
+#endif
+
+/* rpc related */
+#define APP_BATT_PDEV_NAME "rs30100001:00000000"
+#define APP_BATT_PROG 0x30100001
+#define APP_BATT_VER 0
+#define HTC_PROCEDURE_BATTERY_NULL 0
+#define HTC_PROCEDURE_GET_BATT_LEVEL 1
+#define HTC_PROCEDURE_GET_BATT_INFO 2
+#define HTC_PROCEDURE_GET_CABLE_STATUS 3
+#define HTC_PROCEDURE_SET_BATT_DELTA 4
+
+/* module debugger */
+#define HTC_BATTERY_DEBUG 1
+#define BATTERY_PREVENTION 1
+
+/* Enable this will shut down if no battery */
+#define ENABLE_BATTERY_DETECTION 0
+
+#define GPIO_BATTERY_DETECTION 21
+#define GPIO_BATTERY_CHARGER_EN 128
+
+/* Charge current selection */
+#define GPIO_BATTERY_CHARGER_CURRENT 129
+
+typedef enum {
+ DISABLE = 0,
+ ENABLE_SLOW_CHG,
+ ENABLE_FAST_CHG
+} batt_ctl_t;
+
+/* This order is the same as htc_power_supplies[]
+ * And it's also the same as htc_cable_status_update()
+ */
+typedef enum {
+ CHARGER_BATTERY = 0,
+ CHARGER_USB,
+ CHARGER_AC
+} charger_type_t;
+
+struct battery_info_reply {
+ u32 batt_id; /* Battery ID from ADC */
+ u32 batt_vol; /* Battery voltage from ADC */
+ u32 batt_temp; /* Battery Temperature (C) from formula and ADC */
+ u32 batt_current; /* Battery current from ADC */
+ u32 level; /* formula */
+ u32 charging_source; /* 0: no cable, 1:usb, 2:AC */
+ u32 charging_enabled; /* 0: Disable, 1: Enable */
+ u32 full_bat; /* Full capacity of battery (mAh) */
+};
+
+struct htc_battery_info {
+ int present;
+ unsigned long update_time;
+
+ /* lock to protect the battery info */
+ struct mutex lock;
+
+ /* lock held while calling the arm9 to query the battery info */
+ struct mutex rpc_lock;
+ struct battery_info_reply rep;
+};
+
+static struct msm_rpc_endpoint *endpoint;
+
+static struct htc_battery_info htc_batt_info;
+
+static unsigned int cache_time = 1000;
+
+static int htc_battery_initial = 0;
+
+static enum power_supply_property htc_battery_properties[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+ POWER_SUPPLY_PROP_CAPACITY,
+};
+
+static enum power_supply_property htc_power_properties[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static char *supply_list[] = {
+ "battery",
+};
+
+/* HTC dedicated attributes */
+static ssize_t htc_battery_show_property(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static int htc_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+
+static int htc_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val);
+
+static struct power_supply htc_power_supplies[] = {
+ {
+ .name = "battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = htc_battery_properties,
+ .num_properties = ARRAY_SIZE(htc_battery_properties),
+ .get_property = htc_battery_get_property,
+ },
+ {
+ .name = "usb",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .supplied_to = supply_list,
+ .num_supplicants = ARRAY_SIZE(supply_list),
+ .properties = htc_power_properties,
+ .num_properties = ARRAY_SIZE(htc_power_properties),
+ .get_property = htc_power_get_property,
+ },
+ {
+ .name = "ac",
+ .type = POWER_SUPPLY_TYPE_MAINS,
+ .supplied_to = supply_list,
+ .num_supplicants = ARRAY_SIZE(supply_list),
+ .properties = htc_power_properties,
+ .num_properties = ARRAY_SIZE(htc_power_properties),
+ .get_property = htc_power_get_property,
+ },
+};
+
+
+/* -------------------------------------------------------------------------- */
+
+#if defined(CONFIG_DEBUG_FS)
+int htc_battery_set_charging(batt_ctl_t ctl);
+static int batt_debug_set(void *data, u64 val)
+{
+ return htc_battery_set_charging((batt_ctl_t) val);
+}
+
+static int batt_debug_get(void *data, u64 *val)
+{
+ return -ENOSYS;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(batt_debug_fops, batt_debug_get, batt_debug_set, "%llu\n");
+static int __init batt_debug_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("htc_battery", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ debugfs_create_file("charger_state", 0644, dent, NULL, &batt_debug_fops);
+
+ return 0;
+}
+
+device_initcall(batt_debug_init);
+#endif
+
+static int init_batt_gpio(void)
+{
+ if (gpio_request(GPIO_BATTERY_DETECTION, "batt_detect") < 0)
+ goto gpio_failed;
+ if (gpio_request(GPIO_BATTERY_CHARGER_EN, "charger_en") < 0)
+ goto gpio_failed;
+ if (gpio_request(GPIO_BATTERY_CHARGER_CURRENT, "charge_current") < 0)
+ goto gpio_failed;
+
+ return 0;
+
+gpio_failed:
+ return -EINVAL;
+
+}
+
+/*
+ * battery_charging_ctrl - battery charing control.
+ * @ctl: battery control command
+ *
+ */
+static int battery_charging_ctrl(batt_ctl_t ctl)
+{
+ int result = 0;
+
+ switch (ctl) {
+ case DISABLE:
+ BATT("charger OFF\n");
+ /* 0 for enable; 1 disable */
+ result = gpio_direction_output(GPIO_BATTERY_CHARGER_EN, 1);
+ break;
+ case ENABLE_SLOW_CHG:
+ BATT("charger ON (SLOW)\n");
+ result = gpio_direction_output(GPIO_BATTERY_CHARGER_CURRENT, 0);
+ result = gpio_direction_output(GPIO_BATTERY_CHARGER_EN, 0);
+ break;
+ case ENABLE_FAST_CHG:
+ BATT("charger ON (FAST)\n");
+ result = gpio_direction_output(GPIO_BATTERY_CHARGER_CURRENT, 1);
+ result = gpio_direction_output(GPIO_BATTERY_CHARGER_EN, 0);
+ break;
+ default:
+ printk(KERN_ERR "Not supported battery ctr called.!\n");
+ result = -EINVAL;
+ break;
+ }
+
+ return result;
+}
+
+int htc_battery_set_charging(batt_ctl_t ctl)
+{
+ int rc;
+
+ if ((rc = battery_charging_ctrl(ctl)) < 0)
+ goto result;
+
+ if (!htc_battery_initial) {
+ htc_batt_info.rep.charging_enabled = ctl & 0x3;
+ } else {
+ mutex_lock(&htc_batt_info.lock);
+ htc_batt_info.rep.charging_enabled = ctl & 0x3;
+ mutex_unlock(&htc_batt_info.lock);
+ }
+result:
+ return rc;
+}
+
+int htc_battery_status_update(u32 curr_level)
+{
+ int notify;
+ if (!htc_battery_initial)
+ return 0;
+
+ mutex_lock(&htc_batt_info.lock);
+ notify = (htc_batt_info.rep.level != curr_level);
+ htc_batt_info.rep.level = curr_level;
+ mutex_unlock(&htc_batt_info.lock);
+
+ if (notify)
+ power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]);
+ return 0;
+}
+
+int htc_cable_status_update(int status)
+{
+ int rc = 0;
+ unsigned source;
+
+ if (!htc_battery_initial)
+ return 0;
+
+ mutex_lock(&htc_batt_info.lock);
+ switch(status) {
+ case CHARGER_BATTERY:
+ BATT("cable NOT PRESENT\n");
+ htc_batt_info.rep.charging_source = CHARGER_BATTERY;
+ break;
+ case CHARGER_USB:
+ BATT("cable USB\n");
+ htc_batt_info.rep.charging_source = CHARGER_USB;
+ break;
+ case CHARGER_AC:
+ BATT("cable AC\n");
+ htc_batt_info.rep.charging_source = CHARGER_AC;
+ break;
+ default:
+ printk(KERN_ERR "%s: Not supported cable status received!\n",
+ __FUNCTION__);
+ rc = -EINVAL;
+ }
+ source = htc_batt_info.rep.charging_source;
+ mutex_unlock(&htc_batt_info.lock);
+
+ msm_hsusb_set_vbus_state(source == CHARGER_USB);
+ if (source == CHARGER_USB) {
+ wake_lock(&vbus_wake_lock);
+ } else {
+ /* give userspace some time to see the uevent and update
+ * LED state or whatnot...
+ */
+ wake_lock_timeout(&vbus_wake_lock, HZ / 2);
+ }
+
+ /* if the power source changes, all power supplies may change state */
+ power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]);
+ power_supply_changed(&htc_power_supplies[CHARGER_USB]);
+ power_supply_changed(&htc_power_supplies[CHARGER_AC]);
+
+ return rc;
+}
+
+static int htc_get_batt_info(struct battery_info_reply *buffer)
+{
+ struct rpc_request_hdr req;
+
+ struct htc_get_batt_info_rep {
+ struct rpc_reply_hdr hdr;
+ struct battery_info_reply info;
+ } rep;
+
+ int rc;
+
+ if (buffer == NULL)
+ return -EINVAL;
+
+ rc = msm_rpc_call_reply(endpoint, HTC_PROCEDURE_GET_BATT_INFO,
+ &req, sizeof(req),
+ &rep, sizeof(rep),
+ 5 * HZ);
+ if ( rc < 0 )
+ return rc;
+
+ mutex_lock(&htc_batt_info.lock);
+ buffer->batt_id = be32_to_cpu(rep.info.batt_id);
+ buffer->batt_vol = be32_to_cpu(rep.info.batt_vol);
+ buffer->batt_temp = be32_to_cpu(rep.info.batt_temp);
+ buffer->batt_current = be32_to_cpu(rep.info.batt_current);
+ buffer->level = be32_to_cpu(rep.info.level);
+ buffer->charging_source = be32_to_cpu(rep.info.charging_source);
+ buffer->charging_enabled = be32_to_cpu(rep.info.charging_enabled);
+ buffer->full_bat = be32_to_cpu(rep.info.full_bat);
+ mutex_unlock(&htc_batt_info.lock);
+
+ return 0;
+}
+
+#if 0
+static int htc_get_cable_status(void)
+{
+
+ struct rpc_request_hdr req;
+
+ struct htc_get_cable_status_rep {
+ struct rpc_reply_hdr hdr;
+ int status;
+ } rep;
+
+ int rc;
+
+ rc = msm_rpc_call_reply(endpoint, HTC_PROCEDURE_GET_CABLE_STATUS,
+ &req, sizeof(req),
+ &rep, sizeof(rep),
+ 5 * HZ);
+ if (rc < 0)
+ return rc;
+
+ return be32_to_cpu(rep.status);
+}
+#endif
+
+/* -------------------------------------------------------------------------- */
+static int htc_power_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ charger_type_t charger;
+
+ mutex_lock(&htc_batt_info.lock);
+ charger = htc_batt_info.rep.charging_source;
+ mutex_unlock(&htc_batt_info.lock);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (psy->type == POWER_SUPPLY_TYPE_MAINS)
+ val->intval = (charger == CHARGER_AC ? 1 : 0);
+ else if (psy->type == POWER_SUPPLY_TYPE_USB)
+ val->intval = (charger == CHARGER_USB ? 1 : 0);
+ else
+ val->intval = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int htc_battery_get_charging_status(void)
+{
+ u32 level;
+ charger_type_t charger;
+ int ret;
+
+ mutex_lock(&htc_batt_info.lock);
+ charger = htc_batt_info.rep.charging_source;
+
+ switch (charger) {
+ case CHARGER_BATTERY:
+ ret = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ case CHARGER_USB:
+ case CHARGER_AC:
+ level = htc_batt_info.rep.level;
+ if (level == 100)
+ ret = POWER_SUPPLY_STATUS_FULL;
+ else
+ ret = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ default:
+ ret = POWER_SUPPLY_STATUS_UNKNOWN;
+ }
+ mutex_unlock(&htc_batt_info.lock);
+ return ret;
+}
+
+static int htc_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = htc_battery_get_charging_status();
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = htc_batt_info.present;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ mutex_lock(&htc_batt_info.lock);
+ val->intval = htc_batt_info.rep.level;
+ mutex_unlock(&htc_batt_info.lock);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define HTC_BATTERY_ATTR(_name) \
+{ \
+ .attr = { .name = #_name, .mode = S_IRUGO, .owner = THIS_MODULE }, \
+ .show = htc_battery_show_property, \
+ .store = NULL, \
+}
+
+static struct device_attribute htc_battery_attrs[] = {
+ HTC_BATTERY_ATTR(batt_id),
+ HTC_BATTERY_ATTR(batt_vol),
+ HTC_BATTERY_ATTR(batt_temp),
+ HTC_BATTERY_ATTR(batt_current),
+ HTC_BATTERY_ATTR(charging_source),
+ HTC_BATTERY_ATTR(charging_enabled),
+ HTC_BATTERY_ATTR(full_bat),
+};
+
+enum {
+ BATT_ID = 0,
+ BATT_VOL,
+ BATT_TEMP,
+ BATT_CURRENT,
+ CHARGING_SOURCE,
+ CHARGING_ENABLED,
+ FULL_BAT,
+};
+
+static int htc_rpc_set_delta(unsigned delta)
+{
+ struct set_batt_delta_req {
+ struct rpc_request_hdr hdr;
+ uint32_t data;
+ } req;
+
+ req.data = cpu_to_be32(delta);
+ return msm_rpc_call(endpoint, HTC_PROCEDURE_SET_BATT_DELTA,
+ &req, sizeof(req), 5 * HZ);
+}
+
+
+static ssize_t htc_battery_set_delta(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc;
+ unsigned long delta = 0;
+
+ delta = simple_strtoul(buf, NULL, 10);
+
+ if (delta > 100)
+ return -EINVAL;
+
+ mutex_lock(&htc_batt_info.rpc_lock);
+ rc = htc_rpc_set_delta(delta);
+ mutex_unlock(&htc_batt_info.rpc_lock);
+ if (rc < 0)
+ return rc;
+ return count;
+}
+
+static struct device_attribute htc_set_delta_attrs[] = {
+ __ATTR(delta, S_IWUSR | S_IWGRP, NULL, htc_battery_set_delta),
+};
+
+static int htc_battery_create_attrs(struct device * dev)
+{
+ int i, j, rc;
+
+ for (i = 0; i < ARRAY_SIZE(htc_battery_attrs); i++) {
+ rc = device_create_file(dev, &htc_battery_attrs[i]);
+ if (rc)
+ goto htc_attrs_failed;
+ }
+
+ for (j = 0; j < ARRAY_SIZE(htc_set_delta_attrs); j++) {
+ rc = device_create_file(dev, &htc_set_delta_attrs[j]);
+ if (rc)
+ goto htc_delta_attrs_failed;
+ }
+
+ goto succeed;
+
+htc_attrs_failed:
+ while (i--)
+ device_remove_file(dev, &htc_battery_attrs[i]);
+htc_delta_attrs_failed:
+ while (j--)
+ device_remove_file(dev, &htc_set_delta_attrs[i]);
+succeed:
+ return rc;
+}
+
+static ssize_t htc_battery_show_property(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i = 0;
+ const ptrdiff_t off = attr - htc_battery_attrs;
+
+ /* rpc lock is used to prevent two threads from calling
+ * into the get info rpc at the same time
+ */
+
+ mutex_lock(&htc_batt_info.rpc_lock);
+ /* check cache time to decide if we need to update */
+ if (htc_batt_info.update_time &&
+ time_before(jiffies, htc_batt_info.update_time +
+ msecs_to_jiffies(cache_time)))
+ goto dont_need_update;
+
+ if (htc_get_batt_info(&htc_batt_info.rep) < 0) {
+ printk(KERN_ERR "%s: rpc failed!!!\n", __FUNCTION__);
+ } else {
+ htc_batt_info.update_time = jiffies;
+ }
+dont_need_update:
+ mutex_unlock(&htc_batt_info.rpc_lock);
+
+ mutex_lock(&htc_batt_info.lock);
+ switch (off) {
+ case BATT_ID:
+ i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n",
+ htc_batt_info.rep.batt_id);
+ break;
+ case BATT_VOL:
+ i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n",
+ htc_batt_info.rep.batt_vol);
+ break;
+ case BATT_TEMP:
+ i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n",
+ htc_batt_info.rep.batt_temp);
+ break;
+ case BATT_CURRENT:
+ i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n",
+ htc_batt_info.rep.batt_current);
+ break;
+ case CHARGING_SOURCE:
+ i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n",
+ htc_batt_info.rep.charging_source);
+ break;
+ case CHARGING_ENABLED:
+ i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n",
+ htc_batt_info.rep.charging_enabled);
+ break;
+ case FULL_BAT:
+ i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n",
+ htc_batt_info.rep.full_bat);
+ break;
+ default:
+ i = -EINVAL;
+ }
+ mutex_unlock(&htc_batt_info.lock);
+
+ return i;
+}
+
+static int htc_battery_probe(struct platform_device *pdev)
+{
+ int i, rc;
+
+ /* init battery gpio */
+ if ((rc = init_batt_gpio()) < 0) {
+ printk(KERN_ERR "%s: init battery gpio failed!\n", __FUNCTION__);
+ return rc;
+ }
+
+ /* init structure data member */
+ htc_batt_info.update_time = jiffies;
+ htc_batt_info.present = gpio_get_value(GPIO_BATTERY_DETECTION);
+
+ /* init rpc */
+ endpoint = msm_rpc_connect(APP_BATT_PROG, APP_BATT_VER, 0);
+ if (IS_ERR(endpoint)) {
+ printk(KERN_ERR "%s: init rpc failed! rc = %ld\n",
+ __FUNCTION__, PTR_ERR(endpoint));
+ return rc;
+ }
+
+ /* init power supplier framework */
+ for (i = 0; i < ARRAY_SIZE(htc_power_supplies); i++) {
+ rc = power_supply_register(&pdev->dev, &htc_power_supplies[i]);
+ if (rc)
+ printk(KERN_ERR "Failed to register power supply (%d)\n", rc);
+ }
+
+ /* create htc detail attributes */
+ htc_battery_create_attrs(htc_power_supplies[CHARGER_BATTERY].dev);
+
+ /* After battery driver gets initialized, send rpc request to inquiry
+ * the battery status in case of we lost some info
+ */
+ htc_battery_initial = 1;
+
+ mutex_lock(&htc_batt_info.rpc_lock);
+ if (htc_get_batt_info(&htc_batt_info.rep) < 0)
+ printk(KERN_ERR "%s: get info failed\n", __FUNCTION__);
+
+ htc_cable_status_update(htc_batt_info.rep.charging_source);
+ battery_charging_ctrl(htc_batt_info.rep.charging_enabled ?
+ ENABLE_SLOW_CHG : DISABLE);
+
+ if (htc_rpc_set_delta(1) < 0)
+ printk(KERN_ERR "%s: set delta failed\n", __FUNCTION__);
+ htc_batt_info.update_time = jiffies;
+ mutex_unlock(&htc_batt_info.rpc_lock);
+
+ if (htc_batt_info.rep.charging_enabled == 0)
+ battery_charging_ctrl(DISABLE);
+
+ return 0;
+}
+
+static struct platform_driver htc_battery_driver = {
+ .probe = htc_battery_probe,
+ .driver = {
+ .name = APP_BATT_PDEV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+/* batt_mtoa server definitions */
+#define BATT_MTOA_PROG 0x30100000
+#define BATT_MTOA_VERS 0
+#define RPC_BATT_MTOA_NULL 0
+#define RPC_BATT_MTOA_SET_CHARGING_PROC 1
+#define RPC_BATT_MTOA_CABLE_STATUS_UPDATE_PROC 2
+#define RPC_BATT_MTOA_LEVEL_UPDATE_PROC 3
+
+struct rpc_batt_mtoa_set_charging_args {
+ int enable;
+};
+
+struct rpc_batt_mtoa_cable_status_update_args {
+ int status;
+};
+
+struct rpc_dem_battery_update_args {
+ uint32_t level;
+};
+
+static int handle_battery_call(struct msm_rpc_server *server,
+ struct rpc_request_hdr *req, unsigned len)
+{
+ switch (req->procedure) {
+ case RPC_BATT_MTOA_NULL:
+ return 0;
+
+ case RPC_BATT_MTOA_SET_CHARGING_PROC: {
+ struct rpc_batt_mtoa_set_charging_args *args;
+ args = (struct rpc_batt_mtoa_set_charging_args *)(req + 1);
+ args->enable = be32_to_cpu(args->enable);
+ BATT("set_charging: enable=%d\n",args->enable);
+ htc_battery_set_charging(args->enable);
+ return 0;
+ }
+ case RPC_BATT_MTOA_CABLE_STATUS_UPDATE_PROC: {
+ struct rpc_batt_mtoa_cable_status_update_args *args;
+ args = (struct rpc_batt_mtoa_cable_status_update_args *)(req + 1);
+ args->status = be32_to_cpu(args->status);
+ BATT("cable_status_update: status=%d\n",args->status);
+ htc_cable_status_update(args->status);
+ return 0;
+ }
+ case RPC_BATT_MTOA_LEVEL_UPDATE_PROC: {
+ struct rpc_dem_battery_update_args *args;
+ args = (struct rpc_dem_battery_update_args *)(req + 1);
+ args->level = be32_to_cpu(args->level);
+ BATT("dem_battery_update: level=%d\n",args->level);
+ htc_battery_status_update(args->level);
+ return 0;
+ }
+ default:
+ printk(KERN_ERR "%s: program 0x%08x:%d: unknown procedure %d\n",
+ __FUNCTION__, req->prog, req->vers, req->procedure);
+ return -ENODEV;
+ }
+}
+
+static struct msm_rpc_server battery_server = {
+ .prog = BATT_MTOA_PROG,
+ .vers = BATT_MTOA_VERS,
+ .rpc_call = handle_battery_call,
+};
+
+static int __init htc_battery_init(void)
+{
+ wake_lock_init(&vbus_wake_lock, WAKE_LOCK_SUSPEND, "vbus_present");
+ mutex_init(&htc_batt_info.lock);
+ mutex_init(&htc_batt_info.rpc_lock);
+ msm_rpc_create_server(&battery_server);
+ platform_driver_register(&htc_battery_driver);
+ return 0;
+}
+
+module_init(htc_battery_init);
+MODULE_DESCRIPTION("HTC Battery Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/arch/arm/mach-msm/htc_headset.c b/arch/arm/mach-msm/htc_headset.c
new file mode 100644
index 000000000000..a69a2e1ca5f8
--- /dev/null
+++ b/arch/arm/mach-msm/htc_headset.c
@@ -0,0 +1,1246 @@
+/*
+ * H2W device detection driver.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC, Inc.
+ *
+ * Authors:
+ * Laurence Chen <Laurence_Chen@htc.com>
+ * Nick Pelly <npelly@google.com>
+ * Thomas Tsai <thomas_tsai@htc.com>
+ * Farmer Tseng <farmer_tseng@htc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+/* For detecting HTC 2 Wire devices, such as wired headset.
+
+ Logically, the H2W driver is always present, and H2W state (hi->state)
+ indicates what is currently plugged into the H2W interface.
+
+ When the headset is plugged in, CABLE_IN1 is pulled low. When the headset
+ button is pressed, CABLE_IN2 is pulled low. These two lines are shared with
+ the TX and RX (respectively) of UART3 - used for serial debugging.
+
+ This headset driver keeps the CPLD configured as UART3 for as long as
+ possible, so that we can do serial FIQ debugging even when the kernel is
+ locked and this driver no longer runs. So it only configures the CPLD to
+ GPIO while the headset is plugged in, and for 10ms during detection work.
+
+ Unfortunately we can't leave the CPLD as UART3 while a headset is plugged
+ in, UART3 is pullup on TX but the headset is pull-down, causing a 55 mA
+ drain on trout.
+
+ The headset detection work involves setting CPLD to GPIO, and then pulling
+ CABLE_IN1 high with a stronger pullup than usual. A H2W headset will still
+ pull this line low, whereas other attachments such as a serial console
+ would get pulled up by this stronger pullup.
+
+ Headset insertion/removal causes UEvent's to be sent, and
+ /sys/class/switch/h2w/state to be updated.
+
+ Button presses are interpreted as input event (KEY_MEDIA). Button presses
+ are ignored if the headset is plugged in, so the buttons on 11 pin -> 3.5mm
+ jack adapters do not work until a headset is plugged into the adapter. This
+ is to avoid serial RX traffic causing spurious button press events.
+
+ We tend to check the status of CABLE_IN1 a few more times than strictly
+ necessary during headset detection, to avoid spurious headset insertion
+ events caused by serial debugger TX traffic.
+*/
+
+#include <linux/module.h>
+#include <linux/sysdev.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/hrtimer.h>
+#include <linux/switch.h>
+#include <linux/input.h>
+#include <linux/debugfs.h>
+#include <asm/gpio.h>
+#include <asm/atomic.h>
+#include <mach/board.h>
+#include <mach/vreg.h>
+#include <asm/mach-types.h>
+
+#include <mach/htc_headset.h>
+
+#define H2WI(fmt, arg...) \
+ printk(KERN_INFO "[H2W] %s " fmt "\r\n", __func__, ## arg)
+#define H2WE(fmt, arg...) \
+ printk(KERN_ERR "[H2W] %s " fmt "\r\n", __func__, ## arg)
+
+#ifdef CONFIG_DEBUG_H2W
+#define H2W_DBG(fmt, arg...) printk(KERN_INFO "[H2W] %s " fmt "\r\n", __func__, ## arg)
+#else
+#define H2W_DBG(fmt, arg...) do {} while (0)
+#endif
+
+static struct workqueue_struct *g_detection_work_queue;
+static void detection_work(struct work_struct *work);
+static DECLARE_WORK(g_detection_work, detection_work);
+
+struct h2w_info {
+ struct switch_dev sdev;
+ struct input_dev *input;
+ struct mutex mutex_lock;
+
+ atomic_t btn_state;
+ int ignore_btn;
+
+ unsigned int irq;
+ unsigned int irq_btn;
+
+ int cable_in1;
+ int cable_in2;
+ int h2w_clk;
+ int h2w_data;
+ int debug_uart;
+
+ void (*config_cpld) (int);
+ void (*init_cpld) (void);
+ /* for h2w */
+ void (*set_dat)(int);
+ void (*set_clk)(int);
+ void (*set_dat_dir)(int);
+ void (*set_clk_dir)(int);
+ int (*get_dat)(void);
+ int (*get_clk)(void);
+
+ int htc_headset_flag;
+
+ struct hrtimer timer;
+ ktime_t debounce_time;
+
+ struct hrtimer btn_timer;
+ ktime_t btn_debounce_time;
+
+ H2W_INFO h2w_info;
+ H2W_SPEED speed;
+ struct vreg *vreg_h2w;
+};
+static struct h2w_info *hi;
+
+static ssize_t h2w_print_name(struct switch_dev *sdev, char *buf)
+{
+ switch (switch_get_state(&hi->sdev)) {
+ case H2W_NO_DEVICE:
+ return sprintf(buf, "No Device\n");
+ case H2W_HTC_HEADSET:
+ return sprintf(buf, "Headset\n");
+ }
+ return -EINVAL;
+}
+
+static void button_pressed(void)
+{
+ H2W_DBG("button_pressed \n");
+ atomic_set(&hi->btn_state, 1);
+ input_report_key(hi->input, KEY_MEDIA, 1);
+ input_sync(hi->input);
+}
+
+static void button_released(void)
+{
+ H2W_DBG("button_released \n");
+ atomic_set(&hi->btn_state, 0);
+ input_report_key(hi->input, KEY_MEDIA, 0);
+ input_sync(hi->input);
+}
+
+/*****************
+ * H2W proctocol *
+ *****************/
+static inline void h2w_begin_command(void)
+{
+ /* Disable H2W interrupt */
+ set_irq_type(hi->irq_btn, IRQF_TRIGGER_HIGH);
+ disable_irq(hi->irq);
+ disable_irq(hi->irq_btn);
+
+ /* Set H2W_CLK as output low */
+ hi->set_clk(0);
+ hi->set_clk_dir(1);
+}
+
+static inline void h2w_end_command(void)
+{
+ /* Set H2W_CLK as input */
+ hi->set_clk_dir(0);
+
+ /* Enable H2W interrupt */
+ enable_irq(hi->irq);
+ enable_irq(hi->irq_btn);
+ set_irq_type(hi->irq_btn, IRQF_TRIGGER_RISING);
+}
+
+/*
+ * One bit write data
+ * ________
+ * SCLK O ______| |______O(L)
+ *
+ *
+ * SDAT I <XXXXXXXXXXXXXXXXXXXX>
+ */
+static inline void one_clock_write(unsigned short flag)
+{
+ if (flag)
+ hi->set_dat(1);
+ else
+ hi->set_dat(0);
+
+ udelay(hi->speed);
+ hi->set_clk(1);
+ udelay(hi->speed);
+ hi->set_clk(0);
+}
+
+/*
+ * One bit write data R/W bit
+ * ________
+ * SCLK ______| |______O(L)
+ * 1----> 1----->
+ * 2-------> ______
+ * SDAT <XXXXXXXXXXXXXX> I
+ * O(H/L)
+ */
+static inline void one_clock_write_RWbit(unsigned short flag)
+{
+ if (flag)
+ hi->set_dat(1);
+ else
+ hi->set_dat(0);
+
+ udelay(hi->speed);
+ hi->set_clk(1);
+ udelay(hi->speed);
+ hi->set_clk(0);
+ hi->set_dat_dir(0);
+ udelay(hi->speed);
+}
+
+/*
+ * H2W Reset
+ * ___________
+ * SCLK O(L)______| |___O(L)
+ * 1---->
+ * 4-->1-->1-->1us-->
+ * ____
+ * SDAT O(L)________ | |_______O(L)
+ *
+ * H2w reset command needs to be issued before every access
+ */
+static inline void h2w_reset(void)
+{
+ /* Set H2W_DAT as output low */
+ hi->set_dat(0);
+ hi->set_dat_dir(1);
+
+ udelay(hi->speed);
+ hi->set_clk(1);
+ udelay(4 * hi->speed);
+ hi->set_dat(1);
+ udelay(hi->speed);
+ hi->set_dat(0);
+ udelay(hi->speed);
+ hi->set_clk(0);
+ udelay(hi->speed);
+}
+
+/*
+ * H2W Start
+ * ___________
+ * SCLK O(L)______| |___O(L)
+ * 1---->
+ * 2----------->1-->
+ *
+ * SDAT O(L)______________________O(L)
+ */
+static inline void h2w_start(void)
+{
+ udelay(hi->speed);
+ hi->set_clk(1);
+ udelay(2 * hi->speed);
+ hi->set_clk(0);
+ udelay(hi->speed);
+}
+
+/*
+ * H2W Ack
+ * __________
+ * SCLK _____| |_______O(L)
+ * 1----> 1------>
+ * 2--------->
+ * ________________________
+ * SDAT become Input mode here I
+ */
+static inline int h2w_ack(void)
+{
+ int retry_times = 0;
+
+ack_resend:
+ if (retry_times == MAX_ACK_RESEND_TIMES)
+ return -1;
+
+ udelay(hi->speed);
+ hi->set_clk(1);
+ udelay(2 * hi->speed);
+
+ if (!hi->get_dat()) {
+ retry_times++;
+ hi->set_clk(0);
+ udelay(hi->speed);
+ goto ack_resend;
+ }
+
+ hi->set_clk(0);
+ udelay(hi->speed);
+ return 0;
+}
+
+/*
+ * One bit read data
+ * ________
+ * SCLK ______| |______O(L)
+ * 2----> 2----->
+ * 2------->
+ * SDAT <XXXXXXXXXXXXXXXXXXXX>I
+ */
+static unsigned char h2w_readc(void)
+{
+ unsigned char h2w_read_data = 0x0;
+ int index;
+
+ for (index = 0; index < 8; index++) {
+ hi->set_clk(0);
+ udelay(hi->speed);
+ hi->set_clk(1);
+ udelay(hi->speed);
+ if (hi->get_dat())
+ h2w_read_data |= (1 << (7 - index));
+ }
+ hi->set_clk(0);
+ udelay(hi->speed);
+
+ return h2w_read_data;
+}
+
+static int h2w_readc_cmd(H2W_ADDR address)
+{
+ int ret = -1, retry_times = 0;
+ unsigned char read_data;
+
+read_resend:
+ if (retry_times == MAX_HOST_RESEND_TIMES)
+ goto err_read;
+
+ h2w_reset();
+ h2w_start();
+ /* Write address */
+ one_clock_write(address & 0x1000);
+ one_clock_write(address & 0x0800);
+ one_clock_write(address & 0x0400);
+ one_clock_write(address & 0x0200);
+ one_clock_write(address & 0x0100);
+ one_clock_write(address & 0x0080);
+ one_clock_write(address & 0x0040);
+ one_clock_write(address & 0x0020);
+ one_clock_write(address & 0x0010);
+ one_clock_write(address & 0x0008);
+ one_clock_write(address & 0x0004);
+ one_clock_write(address & 0x0002);
+ one_clock_write(address & 0x0001);
+ one_clock_write_RWbit(1);
+ if (h2w_ack() < 0) {
+ H2W_DBG("Addr NO ACK(%d).\n", retry_times);
+ retry_times++;
+ hi->set_clk(0);
+ mdelay(RESEND_DELAY);
+ goto read_resend;
+ }
+
+ read_data = h2w_readc();
+
+ if (h2w_ack() < 0) {
+ H2W_DBG("Data NO ACK(%d).\n", retry_times);
+ retry_times++;
+ hi->set_clk(0);
+ mdelay(RESEND_DELAY);
+ goto read_resend;
+ }
+ ret = (int)read_data;
+
+err_read:
+ if (ret < 0)
+ H2WE("NO ACK.\n");
+
+ return ret;
+}
+
+static int h2w_writec_cmd(H2W_ADDR address, unsigned char data)
+{
+ int ret = -1;
+ int retry_times = 0;
+
+write_resend:
+ if (retry_times == MAX_HOST_RESEND_TIMES)
+ goto err_write;
+
+ h2w_reset();
+ h2w_start();
+
+ /* Write address */
+ one_clock_write(address & 0x1000);
+ one_clock_write(address & 0x0800);
+ one_clock_write(address & 0x0400);
+ one_clock_write(address & 0x0200);
+ one_clock_write(address & 0x0100);
+ one_clock_write(address & 0x0080);
+ one_clock_write(address & 0x0040);
+ one_clock_write(address & 0x0020);
+ one_clock_write(address & 0x0010);
+ one_clock_write(address & 0x0008);
+ one_clock_write(address & 0x0004);
+ one_clock_write(address & 0x0002);
+ one_clock_write(address & 0x0001);
+ one_clock_write_RWbit(0);
+ if (h2w_ack() < 0) {
+ H2W_DBG("Addr NO ACK(%d).\n", retry_times);
+ retry_times++;
+ hi->set_clk(0);
+ mdelay(RESEND_DELAY);
+ goto write_resend;
+ }
+
+ /* Write data */
+ hi->set_dat_dir(1);
+ one_clock_write(data & 0x0080);
+ one_clock_write(data & 0x0040);
+ one_clock_write(data & 0x0020);
+ one_clock_write(data & 0x0010);
+ one_clock_write(data & 0x0008);
+ one_clock_write(data & 0x0004);
+ one_clock_write(data & 0x0002);
+ one_clock_write_RWbit(data & 0x0001);
+ if (h2w_ack() < 0) {
+ H2W_DBG("Data NO ACK(%d).\n", retry_times);
+ retry_times++;
+ hi->set_clk(0);
+ mdelay(RESEND_DELAY);
+ goto write_resend;
+ }
+ ret = 0;
+
+err_write:
+ if (ret < 0)
+ H2WE("NO ACK.\n");
+
+ return ret;
+}
+
+static int h2w_get_fnkey(void)
+{
+ int ret;
+ h2w_begin_command();
+ ret = h2w_readc_cmd(H2W_FNKEY_UPDOWN);
+ h2w_end_command();
+ return ret;
+}
+
+static int h2w_dev_init(H2W_INFO *ph2w_info)
+{
+ int ret = -1;
+ unsigned char ascr0 = 0;
+ int h2w_sys = 0, maxgpadd = 0, maxadd = 0, key = 0;
+
+ hi->speed = H2W_50KHz;
+ h2w_begin_command();
+
+ /* read H2W_SYSTEM */
+ h2w_sys = h2w_readc_cmd(H2W_SYSTEM);
+ if (h2w_sys == -1) {
+ H2WE("read H2W_SYSTEM(0x0000) failed.\n");
+ goto err_plugin;
+ }
+ ph2w_info->ACC_CLASS = (h2w_sys & 0x03);
+ ph2w_info->AUDIO_DEVICE = (h2w_sys & 0x04) > 0 ? 1 : 0;
+ ph2w_info->HW_REV = (h2w_sys & 0x18) >> 3;
+ ph2w_info->SLEEP_PR = (h2w_sys & 0x20) >> 5;
+ ph2w_info->CLK_SP = (h2w_sys & 0xC0) >> 6;
+
+ /* enter init mode */
+ if (h2w_writec_cmd(H2W_ASCR0, H2W_ASCR_DEVICE_INI) < 0) {
+ H2WE("write H2W_ASCR0(0x0002) failed.\n");
+ goto err_plugin;
+ }
+ udelay(10);
+
+ /* read H2W_MAX_GP_ADD */
+ maxgpadd = h2w_readc_cmd(H2W_MAX_GP_ADD);
+ if (maxgpadd == -1) {
+ H2WE("write H2W_MAX_GP_ADD(0x0001) failed.\n");
+ goto err_plugin;
+ }
+ ph2w_info->CLK_SP += (maxgpadd & 0x60) >> 3;
+ ph2w_info->MAX_GP_ADD = (maxgpadd & 0x1F);
+
+ /* read key group */
+ if (ph2w_info->MAX_GP_ADD >= 1) {
+ ph2w_info->KEY_MAXADD = h2w_readc_cmd(H2W_KEY_MAXADD);
+ if (ph2w_info->KEY_MAXADD == -1)
+ goto err_plugin;
+ if (ph2w_info->KEY_MAXADD >= 1) {
+ key = h2w_readc_cmd(H2W_ASCII_DOWN);
+ if (key < 0)
+ goto err_plugin;
+ ph2w_info->ASCII_DOWN = (key == 0xFF) ? 1 : 0;
+ }
+ if (ph2w_info->KEY_MAXADD >= 2) {
+ key = h2w_readc_cmd(H2W_ASCII_UP);
+ if (key == -1)
+ goto err_plugin;
+ ph2w_info->ASCII_UP = (key == 0xFF) ? 1 : 0;
+ }
+ if (ph2w_info->KEY_MAXADD >= 3) {
+ key = h2w_readc_cmd(H2W_FNKEY_UPDOWN);
+ if (key == -1)
+ goto err_plugin;
+ ph2w_info->FNKEY_UPDOWN = (key == 0xFF) ? 1 : 0;
+ }
+ if (ph2w_info->KEY_MAXADD >= 4) {
+ key = h2w_readc_cmd(H2W_KD_STATUS);
+ if (key == -1)
+ goto err_plugin;
+ ph2w_info->KD_STATUS = (key == 0x01) ? 1 : 0;
+ }
+ }
+
+ /* read led group */
+ if (ph2w_info->MAX_GP_ADD >= 2) {
+ ph2w_info->LED_MAXADD = h2w_readc_cmd(H2W_LED_MAXADD);
+ if (ph2w_info->LED_MAXADD == -1)
+ goto err_plugin;
+ if (ph2w_info->LED_MAXADD >= 1) {
+ key = h2w_readc_cmd(H2W_LEDCT0);
+ if (key == -1)
+ goto err_plugin;
+ ph2w_info->LEDCT0 = (key == 0x02) ? 1 : 0;
+ }
+ }
+
+ /* read group 3, 4, 5 */
+ if (ph2w_info->MAX_GP_ADD >= 3) {
+ maxadd = h2w_readc_cmd(H2W_CRDL_MAXADD);
+ if (maxadd == -1)
+ goto err_plugin;
+ }
+ if (ph2w_info->MAX_GP_ADD >= 4) {
+ maxadd = h2w_readc_cmd(H2W_CARKIT_MAXADD);
+ if (maxadd == -1)
+ goto err_plugin;
+ }
+ if (ph2w_info->MAX_GP_ADD >= 5) {
+ maxadd = h2w_readc_cmd(H2W_USBHOST_MAXADD);
+ if (maxadd == -1)
+ goto err_plugin;
+ }
+
+ /* read medical group */
+ if (ph2w_info->MAX_GP_ADD >= 6) {
+ ph2w_info->MED_MAXADD = h2w_readc_cmd(H2W_MED_MAXADD);
+ if (ph2w_info->MED_MAXADD == -1)
+ goto err_plugin;
+ if (ph2w_info->MED_MAXADD >= 1) {
+ key = h2w_readc_cmd(H2W_MED_CONTROL);
+ if (key == -1)
+ goto err_plugin;
+ ph2w_info->DATA_EN = (key & 0x01);
+ ph2w_info->AP_EN = (key & 0x02) >> 1;
+ ph2w_info->AP_ID = (key & 0x1c) >> 2;
+ }
+ if (ph2w_info->MED_MAXADD >= 2) {
+ key = h2w_readc_cmd(H2W_MED_IN_DATA);
+ if (key == -1)
+ goto err_plugin;
+ }
+ }
+
+ if (ph2w_info->AUDIO_DEVICE)
+ ascr0 = H2W_ASCR_AUDIO_IN | H2W_ASCR_ACT_EN;
+ else
+ ascr0 = H2W_ASCR_ACT_EN;
+
+ if (h2w_writec_cmd(H2W_ASCR0, ascr0) < 0)
+ goto err_plugin;
+ udelay(10);
+
+ ret = 0;
+
+ /* adjust speed */
+ if (ph2w_info->MAX_GP_ADD == 2) {
+ /* Remote control */
+ hi->speed = H2W_250KHz;
+ } else if (ph2w_info->MAX_GP_ADD == 6) {
+ if (ph2w_info->MED_MAXADD >= 1) {
+ key = h2w_readc_cmd(H2W_MED_CONTROL);
+ if (key == -1)
+ goto err_plugin;
+ ph2w_info->DATA_EN = (key & 0x01);
+ ph2w_info->AP_EN = (key & 0x02) >> 1;
+ ph2w_info->AP_ID = (key & 0x1c) >> 2;
+ }
+ }
+
+err_plugin:
+ h2w_end_command();
+
+ return ret;
+}
+
+static inline void h2w_dev_power_on(int on)
+{
+ if (!hi->vreg_h2w)
+ return;
+
+ if (on)
+ vreg_enable(hi->vreg_h2w);
+ else
+ vreg_disable(hi->vreg_h2w);
+}
+
+static int h2w_dev_detect(void)
+{
+ int ret = -1;
+ int retry_times;
+
+ for (retry_times = 5; retry_times; retry_times--) {
+ /* Enable H2W Power */
+ h2w_dev_power_on(1);
+ msleep(100);
+ memset(&hi->h2w_info, 0, sizeof(H2W_INFO));
+ if (h2w_dev_init(&hi->h2w_info) < 0) {
+ h2w_dev_power_on(0);
+ msleep(100);
+ } else if (hi->h2w_info.MAX_GP_ADD == 2) {
+ ret = 0;
+ break;
+ } else {
+ printk(KERN_INFO "h2w_detect: detect error(%d)\n"
+ , hi->h2w_info.MAX_GP_ADD);
+ h2w_dev_power_on(0);
+ msleep(100);
+ }
+ printk(KERN_INFO "h2w_detect(%d)\n"
+ , hi->h2w_info.MAX_GP_ADD);
+ }
+ H2W_DBG("h2w_detect:(%d)\n", retry_times);
+ return ret;
+}
+
+static void remove_headset(void)
+{
+ unsigned long irq_flags;
+
+ H2W_DBG("");
+
+ mutex_lock(&hi->mutex_lock);
+ switch_set_state(&hi->sdev, switch_get_state(&hi->sdev) &
+ ~(BIT_HEADSET | BIT_HEADSET_NO_MIC));
+ mutex_unlock(&hi->mutex_lock);
+ hi->init_cpld();
+
+ /* Disable button */
+ switch (hi->htc_headset_flag) {
+ case H2W_HTC_HEADSET:
+ local_irq_save(irq_flags);
+ disable_irq(hi->irq_btn);
+ local_irq_restore(irq_flags);
+
+ if (atomic_read(&hi->btn_state))
+ button_released();
+ break;
+ case H2W_DEVICE:
+ h2w_dev_power_on(0);
+ set_irq_type(hi->irq_btn, IRQF_TRIGGER_LOW);
+ disable_irq(hi->irq_btn);
+ /* 10ms (5-15 with 10ms tick) */
+ hi->btn_debounce_time = ktime_set(0, 10000000);
+ hi->set_clk_dir(0);
+ hi->set_dat_dir(0);
+ break;
+ }
+
+ hi->htc_headset_flag = 0;
+ hi->debounce_time = ktime_set(0, 100000000); /* 100 ms */
+
+}
+
+#ifdef CONFIG_MSM_SERIAL_DEBUGGER
+extern void msm_serial_debug_enable(int);
+#endif
+
+static void insert_headset(int type)
+{
+ unsigned long irq_flags;
+ int state;
+
+ H2W_DBG("");
+
+ hi->htc_headset_flag = type;
+ state = BIT_HEADSET | BIT_HEADSET_NO_MIC;
+
+ state = switch_get_state(&hi->sdev);
+ state &= ~(BIT_HEADSET_NO_MIC | BIT_HEADSET);
+ switch (type) {
+ case H2W_HTC_HEADSET:
+ printk(KERN_INFO "insert_headset H2W_HTC_HEADSET\n");
+ state |= BIT_HEADSET;
+ hi->ignore_btn = !gpio_get_value(hi->cable_in2);
+ /* Enable button irq */
+ local_irq_save(irq_flags);
+ enable_irq(hi->irq_btn);
+ local_irq_restore(irq_flags);
+ hi->debounce_time = ktime_set(0, 200000000); /* 20 ms */
+ break;
+ case H2W_DEVICE:
+ if (h2w_dev_detect() < 0) {
+ printk(KERN_INFO "H2W_DEVICE -- Non detect\n");
+ remove_headset();
+ } else {
+ printk(KERN_INFO "H2W_DEVICE -- detect\n");
+ hi->btn_debounce_time = ktime_set(0, 0);
+ local_irq_save(irq_flags);
+ enable_irq(hi->irq_btn);
+ set_irq_type(hi->irq_btn, IRQF_TRIGGER_RISING);
+ local_irq_restore(irq_flags);
+ state |= BIT_HEADSET;
+ }
+ break;
+ case H2W_USB_CRADLE:
+ state |= BIT_HEADSET_NO_MIC;
+ break;
+ case H2W_UART_DEBUG:
+ hi->config_cpld(hi->debug_uart);
+ printk(KERN_INFO "switch to H2W_UART_DEBUG\n");
+ default:
+ return;
+ }
+ mutex_lock(&hi->mutex_lock);
+ switch_set_state(&hi->sdev, state);
+ mutex_unlock(&hi->mutex_lock);
+
+#ifdef CONFIG_MSM_SERIAL_DEBUGGER
+ msm_serial_debug_enable(false);
+#endif
+
+}
+#if 0
+static void remove_headset(void)
+{
+ unsigned long irq_flags;
+
+ H2W_DBG("");
+
+ switch_set_state(&hi->sdev, H2W_NO_DEVICE);
+
+ hi->init_cpld();
+
+ /* Disable button */
+ local_irq_save(irq_flags);
+ disable_irq(hi->irq_btn);
+ local_irq_restore(irq_flags);
+
+ if (atomic_read(&hi->btn_state))
+ button_released();
+
+ hi->debounce_time = ktime_set(0, 100000000); /* 100 ms */
+}
+#endif
+static int is_accessary_pluged_in(void)
+{
+ int type = 0;
+ int clk1 = 0, dat1 = 0, clk2 = 0, dat2 = 0, clk3 = 0, dat3 = 0;
+
+ /* Step1: save H2W_CLK and H2W_DAT */
+ /* Delay 10ms for pin stable. */
+ msleep(10);
+ clk1 = gpio_get_value(hi->h2w_clk);
+ dat1 = gpio_get_value(hi->h2w_data);
+
+ /*
+ * Step2: set GPIO_CABLE_IN1 as output high and GPIO_CABLE_IN2 as
+ * input
+ */
+ gpio_direction_output(hi->cable_in1, 1);
+ gpio_direction_input(hi->cable_in2);
+ /* Delay 10ms for pin stable. */
+ msleep(10);
+ /* Step 3: save H2W_CLK and H2W_DAT */
+ clk2 = gpio_get_value(hi->h2w_clk);
+ dat2 = gpio_get_value(hi->h2w_data);
+
+ /*
+ * Step 4: set GPIO_CABLE_IN1 as input and GPIO_CABLE_IN2 as output
+ * high
+ */
+ gpio_direction_input(hi->cable_in1);
+ gpio_direction_output(hi->cable_in2, 1);
+ /* Delay 10ms for pin stable. */
+ msleep(10);
+ /* Step 5: save H2W_CLK and H2W_DAT */
+ clk3 = gpio_get_value(hi->h2w_clk);
+ dat3 = gpio_get_value(hi->h2w_data);
+
+ /* Step 6: set both GPIO_CABLE_IN1 and GPIO_CABLE_IN2 as input */
+ gpio_direction_input(hi->cable_in1);
+ gpio_direction_input(hi->cable_in2);
+
+ H2W_DBG("(%d,%d) (%d,%d) (%d,%d)\n",
+ clk1, dat1, clk2, dat2, clk3, dat3);
+
+ if ((clk1 == 0) && (dat1 == 1) &&
+ (clk2 == 0) && (dat2 == 1) &&
+ (clk3 == 0) && (dat3 == 1))
+ type = H2W_HTC_HEADSET;
+ else if ((clk1 == 0) && (dat1 == 0) &&
+ (clk2 == 0) && (dat2 == 0) &&
+ (clk3 == 0) && (dat3 == 0))
+ type = NORMAL_HEARPHONE;
+ else if ((clk1 == 0) && (dat1 == 0) &&
+ (clk2 == 1) && (dat2 == 0) &&
+ (clk3 == 0) && (dat3 == 1))
+ type = H2W_DEVICE;
+ else if ((clk1 == 0) && (dat1 == 0) &&
+ (clk2 == 1) && (dat2 == 1) &&
+ (clk3 == 1) && (dat3 == 1))
+ type = H2W_USB_CRADLE;
+ else if ((clk1 == 0) && (dat1 == 1) &&
+ (clk2 == 1) && (dat2 == 1) &&
+ (clk3 == 0) && (dat3 == 1))
+ type = H2W_UART_DEBUG;
+ else
+ type = H2W_NO_DEVICE;
+
+ return type;
+}
+
+
+static void detection_work(struct work_struct *work)
+{
+ unsigned long irq_flags;
+ int type;
+
+ H2W_DBG("");
+
+ if (gpio_get_value(hi->cable_in1) != 0) {
+ /* Headset not plugged in */
+ if (switch_get_state(&hi->sdev) != H2W_NO_DEVICE)
+ remove_headset();
+ return;
+ }
+
+ /* Something plugged in, lets make sure its a headset */
+
+ /* Switch CPLD to GPIO to do detection */
+ hi->config_cpld(H2W_GPIO);
+
+ /* Disable headset interrupt while detecting.*/
+ local_irq_save(irq_flags);
+ disable_irq(hi->irq);
+ local_irq_restore(irq_flags);
+
+ /* Something plugged in, lets make sure its a headset */
+ type = is_accessary_pluged_in();
+
+ /* Restore IRQs */
+ local_irq_save(irq_flags);
+ enable_irq(hi->irq);
+ local_irq_restore(irq_flags);
+
+ insert_headset(type);
+}
+
+static enum hrtimer_restart button_event_timer_func(struct hrtimer *data)
+{
+ int key, press, keyname, h2w_key = 1;
+
+ H2W_DBG("");
+
+ if (switch_get_state(&hi->sdev) == H2W_HTC_HEADSET) {
+ switch (hi->htc_headset_flag) {
+ case H2W_HTC_HEADSET:
+ if (gpio_get_value(hi->cable_in2)) {
+ if (hi->ignore_btn)
+ hi->ignore_btn = 0;
+ else if (atomic_read(&hi->btn_state))
+ button_released();
+ } else {
+ if (!hi->ignore_btn &&
+ !atomic_read(&hi->btn_state))
+ button_pressed();
+ }
+ break;
+ case H2W_DEVICE:
+ if ((hi->get_dat() == 1) && (hi->get_clk() == 1)) {
+ /* Don't do anything because H2W pull out. */
+ H2WE("Remote Control pull out.\n");
+ } else {
+ key = h2w_get_fnkey();
+ press = (key > 0x7F) ? 0 : 1;
+ keyname = key & 0x7F;
+ /* H2WI("key = %d, press = %d,
+ keyname = %d \n",
+ key, press, keyname); */
+ switch (keyname) {
+ case H2W_KEY_PLAY:
+ H2WI("H2W_KEY_PLAY");
+ key = KEY_PLAYPAUSE;
+ break;
+ case H2W_KEY_FORWARD:
+ H2WI("H2W_KEY_FORWARD");
+ key = KEY_NEXTSONG;
+ break;
+ case H2W_KEY_BACKWARD:
+ H2WI("H2W_KEY_BACKWARD");
+ key = KEY_PREVIOUSSONG;
+ break;
+ case H2W_KEY_VOLUP:
+ H2WI("H2W_KEY_VOLUP");
+ key = KEY_VOLUMEUP;
+ break;
+ case H2W_KEY_VOLDOWN:
+ H2WI("H2W_KEY_VOLDOWN");
+ key = KEY_VOLUMEDOWN;
+ break;
+ case H2W_KEY_PICKUP:
+ H2WI("H2W_KEY_PICKUP");
+ key = KEY_SEND;
+ break;
+ case H2W_KEY_HANGUP:
+ H2WI("H2W_KEY_HANGUP");
+ key = KEY_END;
+ break;
+ case H2W_KEY_MUTE:
+ H2WI("H2W_KEY_MUTE");
+ key = KEY_MUTE;
+ break;
+ case H2W_KEY_HOLD:
+ H2WI("H2W_KEY_HOLD");
+ break;
+ default:
+ H2WI("default");
+ h2w_key = 0;
+ }
+ if (h2w_key) {
+ if (press)
+ H2WI("Press\n");
+ else
+ H2WI("Release\n");
+ input_report_key(hi->input, key, press);
+ }
+ }
+ break;
+ } /* end switch */
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static enum hrtimer_restart detect_event_timer_func(struct hrtimer *data)
+{
+ H2W_DBG("");
+
+ queue_work(g_detection_work_queue, &g_detection_work);
+ return HRTIMER_NORESTART;
+}
+
+static irqreturn_t detect_irq_handler(int irq, void *dev_id)
+{
+ int value1, value2;
+ int retry_limit = 10;
+
+ H2W_DBG("");
+ set_irq_type(hi->irq_btn, IRQF_TRIGGER_LOW);
+ do {
+ value1 = gpio_get_value(hi->cable_in1);
+ set_irq_type(hi->irq, value1 ?
+ IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH);
+ value2 = gpio_get_value(hi->cable_in1);
+ } while (value1 != value2 && retry_limit-- > 0);
+
+ H2W_DBG("value2 = %d (%d retries), device=%d",
+ value2, (10-retry_limit), switch_get_state(&hi->sdev));
+
+ if ((switch_get_state(&hi->sdev) == H2W_NO_DEVICE) ^ value2) {
+ if (switch_get_state(&hi->sdev) == H2W_HTC_HEADSET)
+ hi->ignore_btn = 1;
+ /* Do the rest of the work in timer context */
+ hrtimer_start(&hi->timer, hi->debounce_time, HRTIMER_MODE_REL);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t button_irq_handler(int irq, void *dev_id)
+{
+ int value1, value2;
+ int retry_limit = 10;
+
+ H2W_DBG("");
+ do {
+ value1 = gpio_get_value(hi->cable_in2);
+ if (hi->htc_headset_flag != H2W_DEVICE)
+ set_irq_type(hi->irq_btn, value1 ?
+ IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH);
+ value2 = gpio_get_value(hi->cable_in2);
+ } while (value1 != value2 && retry_limit-- > 0);
+
+ H2W_DBG("value2 = %d (%d retries)", value2, (10-retry_limit));
+
+ hrtimer_start(&hi->btn_timer, hi->btn_debounce_time, HRTIMER_MODE_REL);
+
+ return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static int h2w_debug_set(void *data, u64 val)
+{
+ mutex_lock(&hi->mutex_lock);
+ switch_set_state(&hi->sdev, (int)val);
+ mutex_unlock(&hi->mutex_lock);
+ return 0;
+}
+
+static int h2w_debug_get(void *data, u64 *val)
+{
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(h2w_debug_fops, h2w_debug_get, h2w_debug_set, "%llu\n");
+static int __init h2w_debug_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("h2w", 0);
+ if (IS_ERR(dent))
+ return PTR_ERR(dent);
+
+ debugfs_create_file("state", 0644, dent, NULL, &h2w_debug_fops);
+
+ return 0;
+}
+
+device_initcall(h2w_debug_init);
+#endif
+
+static int h2w_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct h2w_platform_data *pdata = pdev->dev.platform_data;
+
+ printk(KERN_INFO "H2W: Registering H2W (headset) driver\n");
+ hi = kzalloc(sizeof(struct h2w_info), GFP_KERNEL);
+ if (!hi)
+ return -ENOMEM;
+
+ atomic_set(&hi->btn_state, 0);
+ hi->ignore_btn = 0;
+
+ hi->debounce_time = ktime_set(0, 100000000); /* 100 ms */
+ hi->btn_debounce_time = ktime_set(0, 10000000); /* 10 ms */
+
+ hi->htc_headset_flag = 0;
+ hi->cable_in1 = pdata->cable_in1;
+ hi->cable_in2 = pdata->cable_in2;
+ hi->h2w_clk = pdata->h2w_clk;
+ hi->h2w_data = pdata->h2w_data;
+ hi->debug_uart = pdata->debug_uart;
+ hi->config_cpld = pdata->config_cpld;
+ hi->init_cpld = pdata->init_cpld;
+ hi->set_dat = pdata->set_dat;
+ hi->set_clk = pdata->set_clk;
+ hi->set_dat_dir = pdata->set_dat_dir;
+ hi->set_clk_dir = pdata->set_clk_dir;
+ hi->get_dat = pdata->get_dat;
+ hi->get_clk = pdata->get_clk;
+ hi->speed = H2W_50KHz;
+ /* obtain needed VREGs */
+ if (pdata->power_name)
+ hi->vreg_h2w = vreg_get(0, pdata->power_name);
+
+ mutex_init(&hi->mutex_lock);
+
+ hi->sdev.name = "h2w";
+ hi->sdev.print_name = h2w_print_name;
+
+ ret = switch_dev_register(&hi->sdev);
+ if (ret < 0)
+ goto err_switch_dev_register;
+
+ g_detection_work_queue = create_workqueue("detection");
+ if (g_detection_work_queue == NULL) {
+ ret = -ENOMEM;
+ goto err_create_work_queue;
+ }
+
+ ret = gpio_request(hi->cable_in1, "h2w_detect");
+ if (ret < 0)
+ goto err_request_detect_gpio;
+
+ ret = gpio_request(hi->cable_in2, "h2w_button");
+ if (ret < 0)
+ goto err_request_button_gpio;
+
+ ret = gpio_direction_input(hi->cable_in1);
+ if (ret < 0)
+ goto err_set_detect_gpio;
+
+ ret = gpio_direction_input(hi->cable_in2);
+ if (ret < 0)
+ goto err_set_button_gpio;
+
+ hi->irq = gpio_to_irq(hi->cable_in1);
+ if (hi->irq < 0) {
+ ret = hi->irq;
+ goto err_get_h2w_detect_irq_num_failed;
+ }
+
+ hi->irq_btn = gpio_to_irq(hi->cable_in2);
+ if (hi->irq_btn < 0) {
+ ret = hi->irq_btn;
+ goto err_get_button_irq_num_failed;
+ }
+
+ /* Set CPLD MUX to H2W <-> CPLD GPIO */
+ hi->init_cpld();
+
+ hrtimer_init(&hi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hi->timer.function = detect_event_timer_func;
+ hrtimer_init(&hi->btn_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hi->btn_timer.function = button_event_timer_func;
+
+ ret = request_irq(hi->irq, detect_irq_handler,
+ IRQF_TRIGGER_LOW, "h2w_detect", NULL);
+ if (ret < 0)
+ goto err_request_detect_irq;
+
+ /* Disable button until plugged in */
+ set_irq_flags(hi->irq_btn, IRQF_VALID | IRQF_NOAUTOEN);
+ ret = request_irq(hi->irq_btn, button_irq_handler,
+ IRQF_TRIGGER_LOW, "h2w_button", NULL);
+ if (ret < 0)
+ goto err_request_h2w_headset_button_irq;
+
+ ret = set_irq_wake(hi->irq, 1);
+ if (ret < 0)
+ goto err_request_input_dev;
+
+ ret = set_irq_wake(hi->irq_btn, 1);
+ if (ret < 0)
+ goto err_request_input_dev;
+
+
+
+ hi->input = input_allocate_device();
+ if (!hi->input) {
+ ret = -ENOMEM;
+ goto err_request_input_dev;
+ }
+
+ hi->input->name = "h2w headset";
+ set_bit(EV_SYN, hi->input->evbit);
+ set_bit(EV_KEY, hi->input->evbit);
+ set_bit(KEY_MEDIA, hi->input->keybit);
+ set_bit(KEY_NEXTSONG, hi->input->keybit);
+ set_bit(KEY_PLAYPAUSE, hi->input->keybit);
+ set_bit(KEY_PREVIOUSSONG, hi->input->keybit);
+ set_bit(KEY_MUTE, hi->input->keybit);
+ set_bit(KEY_VOLUMEUP, hi->input->keybit);
+ set_bit(KEY_VOLUMEDOWN, hi->input->keybit);
+ set_bit(KEY_END, hi->input->keybit);
+ set_bit(KEY_SEND, hi->input->keybit);
+
+ ret = input_register_device(hi->input);
+ if (ret < 0)
+ goto err_register_input_dev;
+
+ return 0;
+
+err_register_input_dev:
+ input_free_device(hi->input);
+err_request_input_dev:
+ free_irq(hi->irq_btn, 0);
+err_request_h2w_headset_button_irq:
+ free_irq(hi->irq, 0);
+err_request_detect_irq:
+err_get_button_irq_num_failed:
+err_get_h2w_detect_irq_num_failed:
+err_set_button_gpio:
+err_set_detect_gpio:
+ gpio_free(hi->cable_in2);
+err_request_button_gpio:
+ gpio_free(hi->cable_in1);
+err_request_detect_gpio:
+ destroy_workqueue(g_detection_work_queue);
+err_create_work_queue:
+ switch_dev_unregister(&hi->sdev);
+err_switch_dev_register:
+ printk(KERN_ERR "H2W: Failed to register driver\n");
+
+ return ret;
+}
+
+static int h2w_remove(struct platform_device *pdev)
+{
+ H2W_DBG("");
+ if (switch_get_state(&hi->sdev))
+ remove_headset();
+ input_unregister_device(hi->input);
+ gpio_free(hi->cable_in2);
+ gpio_free(hi->cable_in1);
+ free_irq(hi->irq_btn, 0);
+ free_irq(hi->irq, 0);
+ destroy_workqueue(g_detection_work_queue);
+ switch_dev_unregister(&hi->sdev);
+
+ return 0;
+}
+
+
+static struct platform_driver h2w_driver = {
+ .probe = h2w_probe,
+ .remove = h2w_remove,
+ .driver = {
+ .name = "h2w",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init h2w_init(void)
+{
+ H2W_DBG("");
+ return platform_driver_register(&h2w_driver);
+}
+
+static void __exit h2w_exit(void)
+{
+ platform_driver_unregister(&h2w_driver);
+}
+
+module_init(h2w_init);
+module_exit(h2w_exit);
+
+MODULE_AUTHOR("Laurence Chen <Laurence_Chen@htc.com>");
+MODULE_DESCRIPTION("HTC 2 Wire detection driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-msm/htc_pwrsink.c b/arch/arm/mach-msm/htc_pwrsink.c
new file mode 100644
index 000000000000..2ec2c7f4bb1b
--- /dev/null
+++ b/arch/arm/mach-msm/htc_pwrsink.c
@@ -0,0 +1,281 @@
+/* arch/arm/mach-msm/htc_pwrsink.c
+ *
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (C) 2008 Google, Inc.
+ * Author: San Mehat <san@google.com>
+ * Kant Kang <kant_kang@htc.com>
+ * Eiven Peng <eiven_peng@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/earlysuspend.h>
+#include <mach/msm_smd.h>
+#include <mach/htc_pwrsink.h>
+
+#include "smd_private.h"
+
+enum {
+ PWRSINK_DEBUG_CURR_CHANGE = 1U << 0,
+ PWRSINK_DEBUG_CURR_CHANGE_AUDIO = 1U << 1,
+};
+static int pwrsink_debug_mask;
+module_param_named(debug_mask, pwrsink_debug_mask, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+
+static int initialized;
+static unsigned audio_path = 1; /* HTC_SND_DEVICE_SPEAKER = 1 */
+static struct pwr_sink_audio audio_sink_array[PWRSINK_AUDIO_LAST + 1];
+static struct pwr_sink *sink_array[PWRSINK_LAST + 1];
+static DEFINE_SPINLOCK(sink_lock);
+static DEFINE_SPINLOCK(audio_sink_lock);
+static unsigned long total_sink;
+static uint32_t *smem_total_sink;
+
+int htc_pwrsink_set(pwrsink_id_type id, unsigned percent_utilized)
+{
+ unsigned long flags;
+
+ if (!smem_total_sink)
+ smem_total_sink = smem_alloc(SMEM_ID_VENDOR0, sizeof(uint32_t));
+
+ if (!initialized)
+ return -EAGAIN;
+
+ if (id < 0 || id > PWRSINK_LAST)
+ return -EINVAL;
+
+ spin_lock_irqsave(&sink_lock, flags);
+
+ if (!sink_array[id]) {
+ spin_unlock_irqrestore(&sink_lock, flags);
+ return -ENOENT;
+ }
+
+ if (sink_array[id]->percent_util == percent_utilized) {
+ spin_unlock_irqrestore(&sink_lock, flags);
+ return 0;
+ }
+
+ total_sink -= (sink_array[id]->ua_max *
+ sink_array[id]->percent_util / 100);
+ sink_array[id]->percent_util = percent_utilized;
+ total_sink += (sink_array[id]->ua_max *
+ sink_array[id]->percent_util / 100);
+
+ if (smem_total_sink)
+ *smem_total_sink = total_sink / 1000;
+
+ pr_debug("htc_pwrsink: ID %d, Util %d%%, Total %lu uA %s\n",
+ id, percent_utilized, total_sink,
+ smem_total_sink ? "SET" : "");
+
+ spin_unlock_irqrestore(&sink_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(htc_pwrsink_set);
+
+static void compute_audio_current(void)
+{
+ /* unsigned long flags; */
+ unsigned max_percent = 0;
+ int i, active_audio_sinks = 0;
+ pwrsink_audio_id_type last_active_audio_sink = 0;
+
+ /* Make sure this segment will be spinlocked
+ before computing by calling function. */
+ /* spin_lock_irqsave(&audio_sink_lock, flags); */
+ for (i = 0; i <= PWRSINK_AUDIO_LAST; ++i) {
+ max_percent = (audio_sink_array[i].percent > max_percent) ?
+ audio_sink_array[i].percent : max_percent;
+ if (audio_sink_array[i].percent > 0) {
+ active_audio_sinks++;
+ last_active_audio_sink = i;
+ }
+ }
+ if (active_audio_sinks == 0)
+ htc_pwrsink_set(PWRSINK_AUDIO, 0);
+ else if (active_audio_sinks == 1) {
+ pwrsink_audio_id_type laas = last_active_audio_sink;
+ /* TODO: add volume and routing path current. */
+ if (audio_path == 1) /* Speaker */
+ htc_pwrsink_set(PWRSINK_AUDIO,
+ audio_sink_array[laas].percent);
+ else
+ htc_pwrsink_set(PWRSINK_AUDIO,
+ audio_sink_array[laas].percent * 9 / 10);
+ } else if (active_audio_sinks > 1) {
+ /* TODO: add volume and routing path current. */
+ if (audio_path == 1) /* Speaker */
+ htc_pwrsink_set(PWRSINK_AUDIO, max_percent);
+ else
+ htc_pwrsink_set(PWRSINK_AUDIO, max_percent * 9 / 10);
+ }
+ /* spin_unlock_irqrestore(&audio_sink_lock, flags); */
+
+ if (pwrsink_debug_mask & PWRSINK_DEBUG_CURR_CHANGE_AUDIO)
+ pr_info("%s: active_audio_sinks=%d, audio_path=%d\n", __func__,
+ active_audio_sinks, audio_path);
+}
+
+int htc_pwrsink_audio_set(pwrsink_audio_id_type id, unsigned percent_utilized)
+{
+ unsigned long flags;
+
+ if (id < 0 || id > PWRSINK_AUDIO_LAST)
+ return -EINVAL;
+
+ if (pwrsink_debug_mask & PWRSINK_DEBUG_CURR_CHANGE_AUDIO)
+ pr_info("%s: id=%d, percent=%d, percent_old=%d\n", __func__,
+ id, percent_utilized, audio_sink_array[id].percent);
+
+ spin_lock_irqsave(&audio_sink_lock, flags);
+ if (audio_sink_array[id].percent == percent_utilized) {
+ spin_unlock_irqrestore(&audio_sink_lock, flags);
+ return 0;
+ }
+ audio_sink_array[id].percent = percent_utilized;
+ spin_unlock_irqrestore(&audio_sink_lock, flags);
+ compute_audio_current();
+ return 0;
+}
+EXPORT_SYMBOL(htc_pwrsink_audio_set);
+
+int htc_pwrsink_audio_volume_set(pwrsink_audio_id_type id, unsigned volume)
+{
+ unsigned long flags;
+
+ if (id < 0 || id > PWRSINK_AUDIO_LAST)
+ return -EINVAL;
+
+ if (pwrsink_debug_mask & PWRSINK_DEBUG_CURR_CHANGE_AUDIO)
+ pr_info("%s: id=%d, volume=%d, volume_old=%d\n", __func__,
+ id, volume, audio_sink_array[id].volume);
+
+ spin_lock_irqsave(&audio_sink_lock, flags);
+ if (audio_sink_array[id].volume == volume) {
+ spin_unlock_irqrestore(&audio_sink_lock, flags);
+ return 0;
+ }
+ audio_sink_array[id].volume = volume;
+ spin_unlock_irqrestore(&audio_sink_lock, flags);
+ compute_audio_current();
+ return 0;
+}
+EXPORT_SYMBOL(htc_pwrsink_audio_volume_set);
+
+int htc_pwrsink_audio_path_set(unsigned path)
+{
+ unsigned long flags;
+
+ if (pwrsink_debug_mask & PWRSINK_DEBUG_CURR_CHANGE_AUDIO)
+ pr_info("%s: path=%d, path_old=%d\n",
+ __func__, path, audio_path);
+
+ spin_lock_irqsave(&audio_sink_lock, flags);
+ if (audio_path == path) {
+ spin_unlock_irqrestore(&audio_sink_lock, flags);
+ return 0;
+ }
+ audio_path = path;
+ spin_unlock_irqrestore(&audio_sink_lock, flags);
+ compute_audio_current();
+ return 0;
+}
+EXPORT_SYMBOL(htc_pwrsink_audio_path_set);
+
+void htc_pwrsink_suspend_early(struct early_suspend *h)
+{
+ htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 70);
+}
+
+int htc_pwrsink_suspend_late(struct platform_device *pdev, pm_message_t state)
+{
+ struct pwr_sink_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdata && pdata->suspend_late)
+ pdata->suspend_late(pdev, state);
+ else
+ htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 13);
+ return 0;
+}
+
+int htc_pwrsink_resume_early(struct platform_device *pdev)
+{
+ struct pwr_sink_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdata && pdata->resume_early)
+ pdata->resume_early(pdev);
+ else
+ htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 70);
+ return 0;
+}
+
+void htc_pwrsink_resume_late(struct early_suspend *h)
+{
+ htc_pwrsink_set(PWRSINK_SYSTEM_LOAD, 100);
+}
+
+struct early_suspend htc_pwrsink_early_suspend = {
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
+ .suspend = htc_pwrsink_suspend_early,
+ .resume = htc_pwrsink_resume_late,
+};
+
+static int __init htc_pwrsink_probe(struct platform_device *pdev)
+{
+ struct pwr_sink_platform_data *pdata = pdev->dev.platform_data;
+ int i;
+
+ if (!pdata)
+ return -EINVAL;
+
+ total_sink = 0;
+ for (i = 0; i < pdata->num_sinks; i++) {
+ sink_array[pdata->sinks[i].id] = &pdata->sinks[i];
+ total_sink += (pdata->sinks[i].ua_max *
+ pdata->sinks[i].percent_util / 100);
+ }
+
+ initialized = 1;
+
+ if (pdata->suspend_early)
+ htc_pwrsink_early_suspend.suspend = pdata->suspend_early;
+ if (pdata->resume_late)
+ htc_pwrsink_early_suspend.resume = pdata->resume_late;
+ register_early_suspend(&htc_pwrsink_early_suspend);
+
+ return 0;
+}
+
+static struct platform_driver htc_pwrsink_driver = {
+ .probe = htc_pwrsink_probe,
+ .suspend_late = htc_pwrsink_suspend_late,
+ .resume_early = htc_pwrsink_resume_early,
+ .driver = {
+ .name = "htc_pwrsink",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init htc_pwrsink_init(void)
+{
+ initialized = 0;
+ memset(sink_array, 0, sizeof(sink_array));
+ return platform_driver_register(&htc_pwrsink_driver);
+}
+
+module_init(htc_pwrsink_init);
diff --git a/arch/arm/mach-msm/htc_wifi_nvs.c b/arch/arm/mach-msm/htc_wifi_nvs.c
new file mode 100644
index 000000000000..95b8c3bbae9d
--- /dev/null
+++ b/arch/arm/mach-msm/htc_wifi_nvs.c
@@ -0,0 +1,56 @@
+/* arch/arm/mach-msm/htc_wifi_nvs.c
+ *
+ * Code to extract WiFi calibration information from ATAG set up
+ * by the bootloader.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Dmitry Shmidt <dimitrysh@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/setup.h>
+
+/* configuration tags specific to msm */
+#define ATAG_MSM_WIFI 0x57494649 /* MSM WiFi */
+
+#define MAX_NVS_SIZE 0x800U
+static unsigned char wifi_nvs_ram[MAX_NVS_SIZE];
+
+unsigned char *get_wifi_nvs_ram( void )
+{
+ return( wifi_nvs_ram );
+}
+EXPORT_SYMBOL(get_wifi_nvs_ram);
+
+static int __init parse_tag_msm_wifi(const struct tag *tag)
+{
+ unsigned char *dptr = (unsigned char *)(&tag->u);
+ unsigned size;
+
+ size = min((tag->hdr.size - 2) * sizeof(__u32), MAX_NVS_SIZE);
+#ifdef ATAG_MSM_WIFI_DEBUG
+ unsigned i;
+
+ printk("WiFi Data size = %d , 0x%x\n", tag->hdr.size, tag->hdr.tag);
+ for(i=0;( i < size );i++) {
+ printk("%02x ", *dptr++);
+ }
+#endif
+ memcpy( (void *)wifi_nvs_ram, (void *)dptr, size );
+ return 0;
+}
+
+__tagtable(ATAG_MSM_WIFI, parse_tag_msm_wifi);
diff --git a/arch/arm/mach-msm/idle-v6.S b/arch/arm/mach-msm/idle-v6.S
new file mode 100644
index 000000000000..1c74c6436a4e
--- /dev/null
+++ b/arch/arm/mach-msm/idle-v6.S
@@ -0,0 +1,177 @@
+/*
+ * Idle processing for ARMv6-based Qualcomm SoCs.
+ * Work around bugs with SWFI.
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ENTRY(msm_arch_idle)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 0 /* flush entire data cache */
+ mrc p15, 0, r1, c1, c0, 0 /* read current CR */
+ bic r0, r1, #(1 << 2) /* clear dcache bit */
+ bic r0, r0, #(1 << 12) /* clear icache bit */
+ mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
+
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 /* dsb */
+ mcr p15, 0, r0, c7, c0, 4 /* wait for interrupt */
+
+ mcr p15, 0, r1, c1, c0, 0 /* restore d/i cache */
+ mcr p15, 0, r0, c7, c5, 4 /* isb */
+
+ mov pc, lr
+
+ENTRY(msm_pm_collapse)
+ ldr r0, =saved_state
+ stmia r0!, {r4-r14}
+
+#if defined(CONFIG_MSM_FIQ_SUPPORT)
+ cpsid f
+#endif
+ mrc p15, 0, r1, c1, c0, 0 /* MMU control */
+ mrc p15, 0, r2, c2, c0, 0 /* ttb */
+ mrc p15, 0, r3, c3, c0, 0 /* dacr */
+ mrc p15, 0, ip, c13, c0, 1 /* context ID */
+ stmia r0!, {r1-r3, ip}
+#if defined(CONFIG_OPROFILE)
+ mrc p15, 0, r1, c15, c12, 0 /* pmnc */
+ mrc p15, 0, r2, c15, c12, 1 /* ccnt */
+ mrc p15, 0, r3, c15, c12, 2 /* pmn0 */
+ mrc p15, 0, ip, c15, c12, 3 /* pmn1 */
+ stmia r0!, {r1-r3, ip}
+#endif
+ mrc p15, 0, r1, c1, c0, 2 /* read CACR */
+ stmia r0!, {r1}
+
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 0 /* flush entire data cache */
+ mrc p15, 0, r1, c1, c0, 0 /* read current CR */
+ bic r0, r1, #(1 << 2) /* clear dcache bit */
+ bic r0, r0, #(1 << 12) /* clear icache bit */
+ mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
+
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 /* dsb */
+ mcr p15, 0, r0, c7, c0, 4 /* wait for interrupt */
+
+ mcr p15, 0, r1, c1, c0, 0 /* restore d/i cache */
+ mcr p15, 0, r0, c7, c5, 4 /* isb */
+
+#if defined(CONFIG_MSM_FIQ_SUPPORT)
+ cpsie f
+#endif
+
+ ldr r0, =saved_state /* restore registers */
+ ldmfd r0, {r4-r14}
+ mov r0, #0 /* return power collapse failed */
+ mov pc, lr
+
+ENTRY(msm_pm_collapse_exit)
+#if 0 /* serial debug */
+ mov r0, #0x80000016
+ mcr p15, 0, r0, c15, c2, 4
+ mov r0, #0xA9000000
+ add r0, r0, #0x00A00000 /* UART1 */
+ /*add r0, r0, #0x00C00000*/ /* UART3 */
+ mov r1, #'A'
+ str r1, [r0, #0x00C]
+#endif
+ ldr r1, =saved_state_end
+ ldr r2, =msm_pm_collapse_exit
+ adr r3, msm_pm_collapse_exit
+ add r1, r1, r3
+ sub r1, r1, r2
+
+ ldmdb r1!, {r2}
+ mcr p15, 0, r2, c1, c0, 2 /* restore CACR */
+#if defined(CONFIG_OPROFILE)
+ ldmdb r1!, {r2-r5}
+ mcr p15, 0, r3, c15, c12, 1 /* ccnt */
+ mcr p15, 0, r4, c15, c12, 2 /* pmn0 */
+ mcr p15, 0, r5, c15, c12, 3 /* pmn1 */
+ mcr p15, 0, r2, c15, c12, 0 /* pmnc */
+#endif
+ ldmdb r1!, {r2-r5}
+ mcr p15, 0, r4, c3, c0, 0 /* dacr */
+ mcr p15, 0, r3, c2, c0, 0 /* ttb */
+ mcr p15, 0, r5, c13, c0, 1 /* context ID */
+ mov r0, #0
+ mcr p15, 0, r0, c7, c5, 4 /* isb */
+ ldmdb r1!, {r4-r14}
+
+ /* Add 1:1 map in the PMD to allow smooth switch when turning on MMU */
+ and r3, r3, #~0x7F /* mask off lower 7 bits of TTB */
+ adr r0, msm_pm_mapped_pa /* get address of the mapped instr */
+ lsr r1, r0, #20 /* get the addr range of addr in MB */
+ lsl r1, r1, #2 /* multiply by 4 to get to the pg index */
+ add r3, r3, r1 /* pgd + pgd_index(addr) */
+ ldr r1, [r3] /* save current entry to r1 */
+ lsr r0, #20 /* align current addr to 1MB boundary */
+ lsl r0, #20
+ /* Create new entry for this 1MB page */
+ orr r0, r0, #0x400 /* PMD_SECT_AP_WRITE */
+ orr r0, r0, #0x2 /* PMD_TYPE_SECT|PMD_DOMAIN(DOMAIN_KERNEL) */
+ str r0, [r3] /* put new entry into the MMU table */
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 4 /* dsb */
+ mcr p15, 0, r2, c1, c0, 0 /* MMU control */
+ mcr p15, 0, r0, c7, c5, 4 /* isb */
+msm_pm_mapped_pa:
+ /* Switch to virtual */
+ adr r2, msm_pm_pa_to_va
+ ldr r0, =msm_pm_pa_to_va
+ mov pc, r0
+msm_pm_pa_to_va:
+ sub r0, r0, r2
+ /* Restore r1 in MMU table */
+ add r3, r3, r0
+ str r1, [r3]
+
+ mov r0, #0
+ mcr p15, 0, r0, c7, c10, 0 /* flush entire data cache */
+ mcr p15, 0, r0, c7, c10, 4 /* dsb */
+ mcr p15, 0, r0, c7, c5, 4 /* isb */
+ mcr p15, 0, r0, c8, c7, 0 /* invalidate entire unified TLB */
+ mcr p15, 0, r0, c7, c5, 6 /* invalidate entire branch target
+ * cache */
+ mcr p15, 0, r0, c7, c7, 0 /* invalidate both data and instruction
+ * cache */
+ mcr p15, 0, r0, c7, c10, 4 /* dsb */
+ mcr p15, 0, r0, c7, c5, 4 /* isb */
+
+ mov r0, #1
+ mov pc, lr
+ nop
+ nop
+ nop
+ nop
+ nop
+1: b 1b
+
+
+ .data
+
+saved_state:
+ .space 4 * 11 /* r4-14 */
+ .space 4 * 4 /* cp15 - MMU control, ttb, dacr, context ID */
+#if defined(CONFIG_OPROFILE)
+ .space 4 * 4 /* more cp15 - pmnc, ccnt, pmn0, pmn1 */
+#endif
+ .space 4 /* cacr */
+saved_state_end:
+
diff --git a/arch/arm/mach-msm/idle-v7.S b/arch/arm/mach-msm/idle-v7.S
new file mode 100644
index 000000000000..588062813e78
--- /dev/null
+++ b/arch/arm/mach-msm/idle-v7.S
@@ -0,0 +1,175 @@
+/*
+ * Idle processing for ARMv7-based Qualcomm SoCs.
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ENTRY(msm_arch_idle)
+ wfi
+ bx lr
+
+ENTRY(msm_pm_collapse)
+#if defined(CONFIG_MSM_FIQ_SUPPORT)
+ cpsid f
+#endif
+
+ ldr r0, =saved_state
+ stmia r0!, {r4-r14}
+ mrc p15, 0, r1, c1, c0, 0 /* MMU control */
+ mrc p15, 0, r2, c2, c0, 0 /* TTBR0 */
+ mrc p15, 0, r3, c3, c0, 0 /* dacr */
+ mrc p15, 3, r4, c15, c0, 3 /* L2CR1 is the L2 cache control reg 1 */
+ mrc p15, 0, r5, c10, c2, 0 /* PRRR */
+ mrc p15, 0, r6, c10, c2, 1 /* NMRR */
+ mrc p15, 0, r7, c1, c0, 1 /* ACTLR */
+ mrc p15, 0, r8, c2, c0, 1 /* TTBR1 */
+ mrc p15, 0, r9, c13, c0, 3 /* TPIDRURO */
+ mrc p15, 0, ip, c13, c0, 1 /* context ID */
+ stmia r0!, {r1-r9, ip}
+#ifdef CONFIG_MSM_CPU_AVS
+ mrc p15, 7, r1, c15, c1, 7 /* AVSCSR is the Adaptive Voltage Scaling
+ * Control and Status Register */
+ mrc p15, 7, r2, c15, c0, 6 /* AVSDSCR is the Adaptive Voltage
+ * Scaling Delay Synthesizer Control
+ * Register */
+ mrc p15, 7, r3, c15, c1, 0 /* TSCSR is the Temperature Status and
+ * Control Register
+ */
+ stmia r0!, {r1-r3}
+#endif
+
+#ifdef CONFIG_MSM_JTAG_V7
+ bl msm_save_jtag_debug
+#endif
+ bl v7_flush_dcache_all
+
+ mrc p15, 0, r1, c1, c0, 0 /* read current CR */
+ bic r0, r1, #(1 << 2) /* clear dcache bit */
+ bic r0, r0, #(1 << 12) /* clear icache bit */
+ mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
+
+ dsb
+ wfi
+
+ mcr p15, 0, r1, c1, c0, 0 /* restore d/i cache */
+ isb
+
+#if defined(CONFIG_MSM_FIQ_SUPPORT)
+ cpsie f
+#endif
+
+ ldr r0, =saved_state /* restore registers */
+ ldmfd r0, {r4-r14}
+ mov r0, #0 /* return power collapse failed */
+ bx lr
+
+ENTRY(msm_pm_collapse_exit)
+#if 0 /* serial debug */
+ mov r0, #0x80000016
+ mcr p15, 0, r0, c15, c2, 4
+ mov r0, #0xA9000000
+ add r0, r0, #0x00A00000 /* UART1 */
+ /*add r0, r0, #0x00C00000*/ /* UART3 */
+ mov r1, #'A'
+ str r1, [r0, #0x00C]
+#endif
+ ldr r1, =saved_state_end
+ ldr r2, =msm_pm_collapse_exit
+ adr r3, msm_pm_collapse_exit
+ add r1, r1, r3
+ sub r1, r1, r2
+#ifdef CONFIG_MSM_CPU_AVS
+ ldmdb r1!, {r2-r4}
+ mcr p15, 7, r4, c15, c1, 0 /* TSCSR */
+ mcr p15, 7, r3, c15, c0, 6 /* AVSDSCR */
+ mcr p15, 7, r2, c15, c1, 7 /* AVSCSR */
+#endif
+ ldmdb r1!, {r2-r11}
+ mcr p15, 0, r4, c3, c0, 0 /* dacr */
+ mcr p15, 0, r3, c2, c0, 0 /* TTBR0 */
+ mcr p15, 3, r5, c15, c0, 3 /* L2CR1 */
+ mcr p15, 0, r6, c10, c2, 0 /* PRRR */
+ mcr p15, 0, r7, c10, c2, 1 /* NMRR */
+ mcr p15, 0, r8, c1, c0, 1 /* ACTLR */
+ mcr p15, 0, r9, c2, c0, 1 /* TTBR1 */
+ mcr p15, 0, r10, c13, c0, 3 /* TPIDRURO */
+ mcr p15, 0, r11, c13, c0, 1 /* context ID */
+ isb
+ ldmdb r1!, {r4-r14}
+ /* Add 1:1 map in the PMD to allow smooth switch when turning on MMU */
+ and r3, r3, #~0x7F /* mask off lower 7 bits of TTB */
+ adr r0, msm_pm_mapped_pa /* get address of the mapped instr */
+ lsr r1, r0, #20 /* get the addr range of addr in MB */
+ lsl r1, r1, #2 /* multiply by 4 to get to the pg index */
+ add r3, r3, r1 /* pgd + pgd_index(addr) */
+ ldr r1, [r3] /* save current entry to r1 */
+ lsr r0, #20 /* align current addr to 1MB boundary */
+ lsl r0, #20
+ /* Create new entry for this 1MB page */
+ orr r0, r0, #0x4 /* PMD_SECT_BUFFERED */
+ orr r0, r0, #0x400 /* PMD_SECT_AP_WRITE */
+ orr r0, r0, #0x2 /* PMD_TYPE_SECT|PMD_DOMAIN(DOMAIN_KERNEL) */
+ str r0, [r3] /* put new entry into the MMU table */
+ mcr p15, 0, r3, c7, c10, 1 /* flush_pmd */
+ dsb
+ isb
+ mcr p15, 0, r2, c1, c0, 0 /* MMU control */
+ isb
+msm_pm_mapped_pa:
+ /* Switch to virtual */
+ adr r2, msm_pm_pa_to_va
+ ldr r0, =msm_pm_pa_to_va
+ mov pc, r0
+msm_pm_pa_to_va:
+ sub r0, r0, r2
+ /* Restore r1 in MMU table */
+ add r3, r3, r0
+ str r1, [r3]
+ mcr p15, 0, r3, c7, c10, 1 /* flush_pmd */
+ dsb
+ isb
+ mcr p15, 0, r3, c8, c7, 0 /* UTLBIALL */
+ mcr p15, 0, r3, c7, c5, 6 /* BPIALL */
+ dsb
+ isb
+ stmfd sp!, {lr}
+ bl v7_flush_kern_cache_all
+#ifdef CONFIG_MSM_JTAG_V7
+ bl msm_restore_jtag_debug
+#endif
+ ldmfd sp!, {lr}
+ mov r0, #1
+ bx lr
+ nop
+ nop
+ nop
+ nop
+ nop
+1: b 1b
+
+
+ .data
+
+saved_state:
+ .space 4 * 11 /* r4-14 */
+ .space 4 * 10 /* cp15 */
+#ifdef CONFIG_MSM_CPU_AVS
+ .space 4 * 3 /* AVS control registers */
+#endif
+saved_state_end:
+
+
diff --git a/arch/arm/mach-msm/idle.S b/arch/arm/mach-msm/idle.S
deleted file mode 100644
index 6a94f0527137..000000000000
--- a/arch/arm/mach-msm/idle.S
+++ /dev/null
@@ -1,36 +0,0 @@
-/* arch/arm/mach-msm/include/mach/idle.S
- *
- * Idle processing for MSM7K - work around bugs with SWFI.
- *
- * Copyright (c) 2007 QUALCOMM Incorporated.
- * Copyright (C) 2007 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
-ENTRY(arch_idle)
-#ifdef CONFIG_MSM7X00A_IDLE
- mrc p15, 0, r1, c1, c0, 0 /* read current CR */
- bic r0, r1, #(1 << 2) /* clear dcache bit */
- bic r0, r0, #(1 << 12) /* clear icache bit */
- mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
-
- mov r0, #0 /* prepare wfi value */
- mcr p15, 0, r0, c7, c10, 0 /* flush the cache */
- mcr p15, 0, r0, c7, c10, 4 /* memory barrier */
- mcr p15, 0, r0, c7, c0, 4 /* wait for interrupt */
-
- mcr p15, 0, r1, c1, c0, 0 /* restore d/i cache */
-#endif
- mov pc, lr
diff --git a/arch/arm/mach-msm/idle.h b/arch/arm/mach-msm/idle.h
new file mode 100644
index 000000000000..2e0371ed5b59
--- /dev/null
+++ b/arch/arm/mach-msm/idle.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_IDLE_H_
+#define _ARCH_ARM_MACH_MSM_IDLE_H_
+
+int msm_arch_idle(void);
+int msm_pm_collapse(void);
+void msm_pm_collapse_exit(void);
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 264d62e519f3..163dffe87923 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -1,6 +1,7 @@
/* arch/arm/mach-msm/include/mach/board.h
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -18,6 +19,8 @@
#define __ASM_ARCH_MSM_BOARD_H
#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/clk.h>
/* platform device data structures */
@@ -27,12 +30,35 @@ struct msm_mddi_platform_data
unsigned has_vsync_irq:1;
};
+struct msm_acpu_clock_platform_data
+{
+ uint32_t acpu_switch_time_us;
+ uint32_t max_speed_delta_khz;
+ uint32_t vdd_switch_time_us;
+ unsigned long power_collapse_khz;
+ unsigned long wait_for_irq_khz;
+ unsigned int max_axi_khz;
+ unsigned int max_vdd;
+ int (*acpu_set_vdd) (int mvolts);
+};
+
/* common init routines for use by arch/arm/mach-msm/board-*.c */
void __init msm_add_devices(void);
void __init msm_map_common_io(void);
+void __init msm_map_qsd8x50_io(void);
+void __init msm_map_msm7x30_io(void);
+void __init msm_map_comet_io(void);
void __init msm_init_irq(void);
-void __init msm_init_gpio(void);
-void __init msm_clock_init(void);
+void __init msm_clock_init(struct clk *clock_tbl, unsigned num_clocks);
+void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *);
+
+#if defined(CONFIG_USB_FUNCTION_MSM_HSUSB)
+void msm_hsusb_set_vbus_state(int online);
+#else
+static inline void msm_hsusb_set_vbus_state(int online) {}
+#endif
+
+extern int msm_shared_ram_phys; /* defined in arch/arm/mach-msm/io.c */
#endif
diff --git a/arch/arm/mach-msm/include/mach/board_htc.h b/arch/arm/mach-msm/include/mach/board_htc.h
new file mode 100644
index 000000000000..b537c91b957a
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/board_htc.h
@@ -0,0 +1,78 @@
+/* arch/arm/mach-msm/include/mach/BOARD_HTC.h
+ * Copyright (C) 2007-2009 HTC Corporation.
+ * Author: Thomas Tsai <thomas_tsai@htc.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ASM_ARCH_MSM_BOARD_HTC_H
+#define __ASM_ARCH_MSM_BOARD_HTC_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <asm/setup.h>
+
+struct msm_pmem_setting{
+ resource_size_t pmem_start;
+ resource_size_t pmem_size;
+ resource_size_t pmem_adsp_start;
+ resource_size_t pmem_adsp_size;
+ resource_size_t pmem_gpu0_start;
+ resource_size_t pmem_gpu0_size;
+ resource_size_t pmem_gpu1_start;
+ resource_size_t pmem_gpu1_size;
+ resource_size_t pmem_camera_start;
+ resource_size_t pmem_camera_size;
+ resource_size_t ram_console_start;
+ resource_size_t ram_console_size;
+};
+
+enum {
+ MSM_SERIAL_UART1 = 0,
+ MSM_SERIAL_UART2,
+ MSM_SERIAL_UART3,
+#ifdef CONFIG_SERIAL_MSM_HS
+ MSM_SERIAL_UART1DM,
+ MSM_SERIAL_UART2DM,
+#endif
+ MSM_SERIAL_NUM,
+};
+
+
+/* common init routines for use by arch/arm/mach-msm/board-*.c */
+
+void __init msm_add_usb_devices(void (*phy_reset) (void));
+void __init msm_add_mem_devices(struct msm_pmem_setting *setting);
+void __init msm_init_pmic_vibrator(void);
+
+struct mmc_platform_data;
+int __init msm_add_sdcc_devices(unsigned int controller, struct mmc_platform_data *plat);
+int __init msm_add_serial_devices(unsigned uart);
+
+#if defined(CONFIG_USB_FUNCTION_MSM_HSUSB)
+/* START: add USB connected notify function */
+struct t_usb_status_notifier{
+ struct list_head notifier_link;
+ const char *name;
+ void (*func)(int online);
+};
+ int usb_register_notifier(struct t_usb_status_notifier *);
+ static LIST_HEAD(g_lh_usb_notifier_list);
+/* END: add USB connected notify function */
+#endif
+
+int __init board_mfg_mode(void);
+int __init parse_tag_smi(const struct tag *tags);
+int __init parse_tag_hwid(const struct tag * tags);
+int __init parse_tag_skuid(const struct tag * tags);
+int parse_tag_engineerid(const struct tag * tags);
+
+char *board_serialno(void);
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/camera.h b/arch/arm/mach-msm/include/mach/camera.h
new file mode 100644
index 000000000000..220fca53c7e5
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/camera.h
@@ -0,0 +1,297 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ASM__ARCH_CAMERA_H
+#define __ASM__ARCH_CAMERA_H
+
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include "linux/types.h"
+
+#include <mach/board.h>
+#include <media/msm_camera.h>
+
+#undef CDBG
+#ifdef CAMERA_DBG_MSG
+#define CDBG(fmt, args...) printk(KERN_INFO "msm_camera: " fmt, ##args)
+#else
+#define CDBG(fmt, args...)
+#endif
+
+#define MSM_CAMERA_MSG 0
+#define MSM_CAMERA_EVT 1
+#define NUM_WB_EXP_NEUTRAL_REGION_LINES 4
+#define NUM_WB_EXP_STAT_OUTPUT_BUFFERS 3
+#define NUM_AUTOFOCUS_MULTI_WINDOW_GRIDS 16
+#define NUM_AF_STAT_OUTPUT_BUFFERS 3
+
+enum msm_queut_t {
+ MSM_CAM_Q_IVALID,
+ MSM_CAM_Q_CTRL,
+ MSM_CAM_Q_VFE_EVT,
+ MSM_CAM_Q_VFE_MSG,
+ MSM_CAM_Q_V4L2_REQ,
+
+ MSM_CAM_Q_MAX
+};
+
+enum vfe_resp_msg_t {
+ VFE_EVENT,
+ VFE_MSG_GENERAL,
+ VFE_MSG_SNAPSHOT,
+ VFE_MSG_OUTPUT1,
+ VFE_MSG_OUTPUT2,
+ VFE_MSG_STATS_AF,
+ VFE_MSG_STATS_WE,
+
+ VFE_MSG_INVALID
+};
+
+struct msm_vfe_phy_info {
+ uint32_t sbuf_phy;
+ uint32_t y_phy;
+ uint32_t cbcr_phy;
+};
+
+struct msm_vfe_resp_t {
+ enum vfe_resp_msg_t type;
+ struct msm_vfe_evt_msg_t evt_msg;
+ struct msm_vfe_phy_info phy;
+ void *extdata;
+ int32_t extlen;
+};
+
+struct msm_vfe_resp {
+ void (*vfe_resp)(struct msm_vfe_resp_t *,
+ enum msm_queut_t, void *syncdata);
+};
+
+struct msm_camvfe_fn_t {
+ int (*vfe_init) (struct msm_vfe_resp *, struct platform_device *);
+ int (*vfe_enable) (struct camera_enable_cmd_t *);
+ int (*vfe_config) (struct msm_vfe_cfg_cmd_t *, void *);
+ int (*vfe_disable) (struct camera_enable_cmd_t *,
+ struct platform_device *dev);
+ void (*vfe_release) (struct platform_device *);
+};
+
+struct msm_sensor_ctrl_t {
+ int (*s_init)(struct msm_camera_sensor_info *);
+ int (*s_release)(void);
+ int (*s_config)(void __user *);
+};
+
+struct msm_sync_t {
+ spinlock_t msg_event_queue_lock;
+ struct list_head msg_event_queue;
+ wait_queue_head_t msg_event_wait;
+
+ spinlock_t prev_frame_q_lock;
+ struct list_head prev_frame_q;
+ wait_queue_head_t prev_frame_wait;
+
+ spinlock_t pict_frame_q_lock;
+ struct list_head pict_frame_q;
+ wait_queue_head_t pict_frame_wait;
+
+ spinlock_t ctrl_status_lock;
+ struct list_head ctrl_status_queue;
+ wait_queue_head_t ctrl_status_wait;
+
+ struct hlist_head frame;
+ struct hlist_head stats;
+};
+
+struct msm_device_t {
+ struct msm_camvfe_fn_t vfefn;
+ struct device *device;
+ struct cdev cdev;
+ struct platform_device *pdev;
+
+ struct mutex msm_lock;
+ uint8_t opencnt;
+
+ const char *apps_id;
+
+ void *cropinfo;
+ int croplen;
+
+ struct mutex pict_pp_lock;
+ uint8_t pict_pp;
+
+ int sidx;
+ struct msm_sensor_ctrl_t sctrl;
+
+ struct mutex msm_sem;
+ struct msm_sync_t sync;
+};
+
+/* this structure is used in kernel */
+struct msm_queue_cmd_t {
+ struct list_head list;
+
+ /* 1 - control command or control command status;
+ * 2 - adsp event;
+ * 3 - adsp message;
+ * 4 - v4l2 request;
+ */
+ enum msm_queut_t type;
+ void *command;
+};
+
+struct register_address_value_pair_t {
+ uint16_t register_address;
+ uint16_t register_value;
+};
+
+struct msm_pmem_region {
+ struct hlist_node list;
+ enum msm_pmem_t type;
+ void *vaddr;
+ unsigned long paddr;
+ unsigned long len;
+ struct file *file;
+ uint32_t y_off;
+ uint32_t cbcr_off;
+ int fd;
+ uint8_t active;
+};
+
+struct axidata_t {
+ uint32_t bufnum1;
+ uint32_t bufnum2;
+ struct msm_pmem_region *region;
+};
+
+int32_t mt9d112_probe_init(void *, void *);
+int32_t mt9t013_probe_init(void *, void *);
+int32_t mt9p012_probe_init(void *, void *);
+int32_t s5k3e2fx_probe_init(void *, void *);
+
+int32_t flash_set_led_state(enum msm_camera_led_state_t led_state);
+
+/* Below functions are added for V4L2 kernel APIs */
+struct msm_driver {
+ struct msm_device_t *vmsm;
+ long (*init)(struct msm_device_t *);
+ long (*ctrl)(struct msm_ctrl_cmd_t *,
+ struct msm_device_t *);
+
+ long (*reg_pmem)(struct msm_pmem_info_t *,
+ struct msm_device_t *);
+
+ long (*get_frame) (struct msm_frame_t *,
+ struct msm_device_t *);
+
+ long (*put_frame) (struct msm_frame_t *,
+ struct msm_device_t *msm);
+
+ long (*get_pict) (struct msm_ctrl_cmd_t *,
+ struct msm_device_t *msm);
+
+ unsigned int (*drv_poll) (struct file *, struct poll_table_struct *,
+ struct msm_device_t *msm);
+};
+
+unsigned int msm_poll(struct file *, struct poll_table_struct *);
+
+long msm_register(struct msm_driver *,
+ const char *);
+long msm_unregister(struct msm_driver *,
+ const char *);
+
+void msm_camvfe_init(void);
+int msm_camvfe_check(void *);
+void msm_camvfe_fn_init(struct msm_camvfe_fn_t *);
+int msm_camera_drv_start(struct platform_device *);
+int msm_camera_drv_remove(struct platform_device *);
+
+enum msm_camio_clk_type {
+ CAMIO_VFE_MDC_CLK,
+ CAMIO_MDC_CLK,
+ CAMIO_VFE_CLK,
+ CAMIO_VFE_AXI_CLK,
+
+ CAMIO_MAX_CLK
+};
+
+enum msm_camio_clk_src_type {
+ MSM_CAMIO_CLK_SRC_INTERNAL,
+ MSM_CAMIO_CLK_SRC_EXTERNAL,
+ MSM_CAMIO_CLK_SRC_MAX
+};
+
+enum msm_s_test_mode_t {
+ S_TEST_OFF,
+ S_TEST_1,
+ S_TEST_2,
+ S_TEST_3
+};
+
+enum msm_s_resolution_t {
+ S_QTR_SIZE,
+ S_FULL_SIZE,
+ S_INVALID_SIZE
+};
+
+enum msm_s_reg_update_t {
+ /* Sensor egisters that need to be updated during initialization */
+ S_REG_INIT,
+ /* Sensor egisters that needs periodic I2C writes */
+ S_UPDATE_PERIODIC,
+ /* All the sensor Registers will be updated */
+ S_UPDATE_ALL,
+ /* Not valid update */
+ S_UPDATE_INVALID
+};
+
+enum msm_s_setting_t {
+ S_RES_PREVIEW,
+ S_RES_CAPTURE
+};
+
+int msm_camio_enable(struct platform_device *dev);
+
+int msm_camio_clk_enable(enum msm_camio_clk_type clk);
+int msm_camio_clk_disable(enum msm_camio_clk_type clk);
+int msm_camio_clk_config(uint32_t freq);
+void msm_camio_clk_rate_set(int rate);
+void msm_camio_clk_axi_rate_set(int rate);
+
+void msm_camio_camif_pad_reg_reset(void);
+void msm_camio_camif_pad_reg_reset_2(void);
+
+void msm_camio_vfe_blk_reset(void);
+
+void msm_camio_clk_sel(enum msm_camio_clk_src_type);
+void msm_camio_disable(struct platform_device *);
+int msm_camio_probe_on(struct platform_device *);
+int msm_camio_probe_off(struct platform_device *);
+#endif
diff --git a/arch/arm/mach-msm/include/mach/clk.h b/arch/arm/mach-msm/include/mach/clk.h
new file mode 100644
index 000000000000..ac70550cb41c
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/clk.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#ifndef __MACH_CLK_H
+#define __MACH_CLK_H
+
+/* Magic rate value for use with PM QOS to request the board's maximum
+ * supported AXI rate. PM QOS will only pass positive s32 rate values
+ * through to the clock driver, so INT_MAX is used.
+ */
+#define MSM_AXI_MAX_FREQ LONG_MAX
+
+enum clk_reset_action {
+ CLK_RESET_DEASSERT = 0,
+ CLK_RESET_ASSERT = 1
+};
+
+struct clk;
+
+/* Rate is minimum clock rate in Hz */
+int clk_set_min_rate(struct clk *clk, unsigned long rate);
+
+/* Rate is maximum clock rate in Hz */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
+/* Assert/Deassert reset to a hardware block associated with a clock */
+int clk_reset(struct clk *clk, enum clk_reset_action action);
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/dal.h b/arch/arm/mach-msm/include/mach/dal.h
new file mode 100644
index 000000000000..a864f1469063
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/dal.h
@@ -0,0 +1,155 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __DAL_H__
+#define __DAL_H__
+
+#include <linux/kernel.h>
+#include <mach/msm_smd.h>
+
+#define DALRPC_DEST_MODEM SMD_APPS_MODEM
+#define DALRPC_DEST_QDSP SMD_APPS_QDSP
+
+#define DALRPC_TIMEOUT_INFINITE -1
+
+enum {
+ DALDEVICE_ATTACH_IDX = 0,
+ DALDEVICE_DETACH_IDX,
+ DALDEVICE_INIT_IDX,
+ DALDEVICE_DEINIT_IDX,
+ DALDEVICE_OPEN_IDX,
+ DALDEVICE_CLOSE_IDX,
+ DALDEVICE_INFO_IDX,
+ DALDEVICE_POWEREVENT_IDX,
+ DALDEVICE_SYSREQUEST_IDX,
+ DALDEVICE_FIRST_DEVICE_API_IDX
+};
+
+struct daldevice_info_t {
+ uint32_t size;
+ uint32_t version;
+ char name[32];
+};
+
+int daldevice_attach(uint32_t device_id, char *port, int cpu,
+ void **handle_ptr);
+
+/* The caller must ensure there are no outstanding dalrpc calls on
+ * the client before (and while) calling daldevice_detach. */
+int daldevice_detach(void *handle);
+
+uint32_t dalrpc_fcn_0(uint32_t ddi_idx, void *handle, uint32_t s1);
+uint32_t dalrpc_fcn_1(uint32_t ddi_idx, void *handle, uint32_t s1,
+ uint32_t s2);
+uint32_t dalrpc_fcn_2(uint32_t ddi_idx, void *handle, uint32_t s1,
+ uint32_t *p_s2);
+uint32_t dalrpc_fcn_3(uint32_t ddi_idx, void *handle, uint32_t s1,
+ uint32_t s2, uint32_t s3);
+uint32_t dalrpc_fcn_4(uint32_t ddi_idx, void *handle, uint32_t s1,
+ uint32_t s2, uint32_t *p_s3);
+uint32_t dalrpc_fcn_5(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen);
+uint32_t dalrpc_fcn_6(uint32_t ddi_idx, void *handle, uint32_t s1,
+ const void *ibuf, uint32_t ilen);
+uint32_t dalrpc_fcn_7(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen, void *obuf, uint32_t olen,
+ uint32_t *oalen);
+uint32_t dalrpc_fcn_8(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen, void *obuf, uint32_t olen);
+uint32_t dalrpc_fcn_9(uint32_t ddi_idx, void *handle, void *obuf,
+ uint32_t olen);
+uint32_t dalrpc_fcn_10(uint32_t ddi_idx, void *handle, uint32_t s1,
+ const void *ibuf, uint32_t ilen, void *obuf,
+ uint32_t olen, uint32_t *oalen);
+uint32_t dalrpc_fcn_11(uint32_t ddi_idx, void *handle, uint32_t s1,
+ void *obuf, uint32_t olen);
+uint32_t dalrpc_fcn_12(uint32_t ddi_idx, void *handle, uint32_t s1,
+ void *obuf, uint32_t olen, uint32_t *oalen);
+uint32_t dalrpc_fcn_13(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen, const void *ibuf2, uint32_t ilen2,
+ void *obuf, uint32_t olen);
+uint32_t dalrpc_fcn_14(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen, void *obuf1, uint32_t olen1,
+ void *obuf2, uint32_t olen2, uint32_t *oalen2);
+uint32_t dalrpc_fcn_15(uint32_t ddi_idx, void *handle, const void *ibuf,
+ uint32_t ilen, const void *ibuf2, uint32_t ilen2,
+ void *obuf, uint32_t olen, uint32_t *oalen,
+ void *obuf2, uint32_t olen2);
+
+static inline uint32_t daldevice_info(void *handle,
+ struct daldevice_info_t *info,
+ uint32_t info_size)
+{
+ return dalrpc_fcn_9(DALDEVICE_INFO_IDX, handle, info, info_size);
+}
+
+static inline uint32_t daldevice_sysrequest(void *handle, uint32_t req_id,
+ const void *src_ptr,
+ uint32_t src_len, void *dest_ptr,
+ uint32_t dest_len,
+ uint32_t *dest_alen)
+{
+ return dalrpc_fcn_10(DALDEVICE_SYSREQUEST_IDX, handle, req_id,
+ src_ptr, src_len, dest_ptr, dest_len, dest_alen);
+}
+
+static inline uint32_t daldevice_init(void *handle)
+{
+ return dalrpc_fcn_0(DALDEVICE_INIT_IDX, handle, 0);
+}
+
+static inline uint32_t daldevice_deinit(void *handle)
+{
+ return dalrpc_fcn_0(DALDEVICE_DEINIT_IDX, handle, 0);
+}
+
+static inline uint32_t daldevice_open(void *handle, uint32_t mode)
+{
+ return dalrpc_fcn_0(DALDEVICE_OPEN_IDX, handle, mode);
+}
+
+static inline uint32_t daldevice_close(void *handle)
+{
+ return dalrpc_fcn_0(DALDEVICE_CLOSE_IDX, handle, 0);
+}
+
+void *dalrpc_alloc_event(void *handle);
+void *dalrpc_alloc_cb(void *handle,
+ void (*fn)(void *, uint32_t, void *, uint32_t),
+ void *context);
+void dalrpc_dealloc_event(void *handle,
+ void *ev_h);
+void dalrpc_dealloc_cb(void *handle,
+ void *cb_h);
+
+#define dalrpc_event_wait(ev_h, timeout) \
+ dalrpc_event_wait_multiple(1, &ev_h, timeout)
+
+int dalrpc_event_wait_multiple(int num, void **ev_h, int timeout);
+
+#endif /* __DAL_H__ */
diff --git a/arch/arm/mach-msm/include/mach/dma.h b/arch/arm/mach-msm/include/mach/dma.h
index 5ab5bdffab07..7a2fc563333b 100644
--- a/arch/arm/mach-msm/include/mach/dma.h
+++ b/arch/arm/mach-msm/include/mach/dma.h
@@ -1,6 +1,7 @@
/* linux/include/asm-arm/arch-msm/dma.h
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -41,46 +42,55 @@ int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr);
#define DMOV_SD2(off, ch) (MSM_DMOV_BASE + 0x0800 + (off) + ((ch) << 2))
#define DMOV_SD3(off, ch) (MSM_DMOV_BASE + 0x0C00 + (off) + ((ch) << 2))
-/* only security domain 3 is available to the ARM11
- * SD0 -> mARM trusted, SD1 -> mARM nontrusted, SD2 -> aDSP, SD3 -> aARM
- */
+#if defined(CONFIG_ARCH_MSM7X30)
+#define DMOV_SD_AARM DMOV_SD2
+#else
+#define DMOV_SD_AARM DMOV_SD3
+#endif
-#define DMOV_CMD_PTR(ch) DMOV_SD3(0x000, ch)
+#define DMOV_CMD_PTR(ch) DMOV_SD_AARM(0x000, ch)
#define DMOV_CMD_LIST (0 << 29) /* does not work */
#define DMOV_CMD_PTR_LIST (1 << 29) /* works */
#define DMOV_CMD_INPUT_CFG (2 << 29) /* untested */
#define DMOV_CMD_OUTPUT_CFG (3 << 29) /* untested */
#define DMOV_CMD_ADDR(addr) ((addr) >> 3)
-#define DMOV_RSLT(ch) DMOV_SD3(0x040, ch)
+#define DMOV_RSLT(ch) DMOV_SD_AARM(0x040, ch)
#define DMOV_RSLT_VALID (1 << 31) /* 0 == host has empties result fifo */
#define DMOV_RSLT_ERROR (1 << 3)
#define DMOV_RSLT_FLUSH (1 << 2)
#define DMOV_RSLT_DONE (1 << 1) /* top pointer done */
#define DMOV_RSLT_USER (1 << 0) /* command with FR force result */
-#define DMOV_FLUSH0(ch) DMOV_SD3(0x080, ch)
-#define DMOV_FLUSH1(ch) DMOV_SD3(0x0C0, ch)
-#define DMOV_FLUSH2(ch) DMOV_SD3(0x100, ch)
-#define DMOV_FLUSH3(ch) DMOV_SD3(0x140, ch)
-#define DMOV_FLUSH4(ch) DMOV_SD3(0x180, ch)
-#define DMOV_FLUSH5(ch) DMOV_SD3(0x1C0, ch)
+#define DMOV_FLUSH0(ch) DMOV_SD_AARM(0x080, ch)
+#define DMOV_FLUSH1(ch) DMOV_SD_AARM(0x0C0, ch)
+#define DMOV_FLUSH2(ch) DMOV_SD_AARM(0x100, ch)
+#define DMOV_FLUSH3(ch) DMOV_SD_AARM(0x140, ch)
+#define DMOV_FLUSH4(ch) DMOV_SD_AARM(0x180, ch)
+#define DMOV_FLUSH5(ch) DMOV_SD_AARM(0x1C0, ch)
-#define DMOV_STATUS(ch) DMOV_SD3(0x200, ch)
+#define DMOV_STATUS(ch) DMOV_SD_AARM(0x200, ch)
#define DMOV_STATUS_RSLT_COUNT(n) (((n) >> 29))
#define DMOV_STATUS_CMD_COUNT(n) (((n) >> 27) & 3)
#define DMOV_STATUS_RSLT_VALID (1 << 1)
#define DMOV_STATUS_CMD_PTR_RDY (1 << 0)
+#define DMOV_ISR DMOV_SD_AARM(0x380, 0)
-#define DMOV_ISR DMOV_SD3(0x380, 0)
-
-#define DMOV_CONFIG(ch) DMOV_SD3(0x300, ch)
+#define DMOV_CONFIG(ch) DMOV_SD_AARM(0x300, ch)
#define DMOV_CONFIG_FORCE_TOP_PTR_RSLT (1 << 2)
#define DMOV_CONFIG_FORCE_FLUSH_RSLT (1 << 1)
#define DMOV_CONFIG_IRQ_EN (1 << 0)
/* channel assignments */
+#define DMOV_GP_CHAN 4
+
+#define DMOV_CE_IN_CHAN 5
+#define DMOV_CE_IN_CRCI 1
+
+#define DMOV_CE_OUT_CHAN 6
+#define DMOV_CE_OUT_CRCI 2
+
#define DMOV_NAND_CHAN 7
#define DMOV_NAND_CRCI_CMD 5
#define DMOV_NAND_CRCI_DATA 4
diff --git a/arch/arm/mach-msm/include/mach/dma_test.h b/arch/arm/mach-msm/include/mach/dma_test.h
new file mode 100644
index 000000000000..7f7dfe38a350
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/dma_test.h
@@ -0,0 +1,67 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MSM_DMA_TEST__
+#define __MSM_DMA_TEST__
+
+#include <linux/ioctl.h>
+
+#define MSM_DMA_IOC_MAGIC 0x83
+
+/* The testing driver can manage a series of buffers. These are
+ * allocated and freed using these calls. */
+struct msm_dma_alloc_req {
+ int size; /* Size of this request, in bytes. */
+ int bufnum; /* OUT: Number of buffer allocated. */
+};
+#define MSM_DMA_IOALLOC _IOWR(MSM_DMA_IOC_MAGIC, 2, struct msm_dma_alloc_req)
+
+/* Free the specified buffer. */
+#define MSM_DMA_IOFREE _IOW(MSM_DMA_IOC_MAGIC, 3, int)
+
+/* Free all used buffers. */
+#define MSM_DMA_IOFREEALL _IO(MSM_DMA_IOC_MAGIC, 7)
+
+/* Read/write data into kernel buffer. */
+struct msm_dma_bufxfer {
+ void *data;
+ int size;
+ int bufnum;
+};
+#define MSM_DMA_IOWBUF _IOW(MSM_DMA_IOC_MAGIC, 4, struct msm_dma_bufxfer)
+#define MSM_DMA_IORBUF _IOW(MSM_DMA_IOC_MAGIC, 5, struct msm_dma_bufxfer)
+
+/* Use the data mover to copy from one buffer to another. */
+struct msm_dma_scopy {
+ int srcbuf;
+ int destbuf;
+ int size;
+};
+#define MSM_DMA_IOSCOPY _IOW(MSM_DMA_IOC_MAGIC, 6, struct msm_dma_scopy)
+
+#endif /* __MSM_DMA_TEST__ */
diff --git a/arch/arm/mach-msm/include/mach/fiq.h b/arch/arm/mach-msm/include/mach/fiq.h
new file mode 100644
index 000000000000..29a3ba1f33f3
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/fiq.h
@@ -0,0 +1,33 @@
+/* linux/include/asm-arm/arch-msm/irqs.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_FIQ_H
+#define __ASM_ARCH_MSM_FIQ_H
+
+/* cause an interrupt to be an FIQ instead of a regular IRQ */
+void msm_fiq_select(int number);
+void msm_fiq_unselect(int number);
+
+/* enable/disable an interrupt that is an FIQ (not safe from FIQ context) */
+void msm_fiq_enable(int number);
+void msm_fiq_disable(int number);
+
+/* install an FIQ handler */
+int msm_fiq_set_handler(void (*func)(void *data, void *regs), void *data);
+
+/* cause an edge triggered interrupt to fire (safe from FIQ context */
+void msm_trigger_irq(int number);
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/gpio.h b/arch/arm/mach-msm/include/mach/gpio.h
new file mode 100644
index 000000000000..713afb930d2d
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/gpio.h
@@ -0,0 +1,167 @@
+/* linux/include/asm-arm/arch-msm/gpio.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_GPIO_H
+#define __ASM_ARCH_MSM_GPIO_H
+
+#include <linux/interrupt.h>
+
+/**
+ * struct msm_gpio - GPIO pin description
+ * @gpio_cfg - configuration bitmap, as per gpio_tlmm_config()
+ * @label - textual label
+ *
+ * Usually, GPIO's are operated by sets.
+ * This struct accumulate all GPIO information in single source
+ * and facilitete group operations provided by msm_gpios_xxx()
+ */
+struct msm_gpio {
+ u32 gpio_cfg;
+ const char *label;
+};
+
+/**
+ * msm_gpios_request_enable() - request and enable set of GPIOs
+ *
+ * Request and configure set of GPIO's
+ * In case of error, all operations rolled back.
+ * Return error code.
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+int msm_gpios_request_enable(const struct msm_gpio *table, int size);
+/**
+ * msm_gpios_disable_free() - disable and free set of GPIOs
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+void msm_gpios_disable_free(const struct msm_gpio *table, int size);
+/**
+ * msm_gpios_request() - request set of GPIOs
+ * In case of error, all operations rolled back.
+ * Return error code.
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+int msm_gpios_request(const struct msm_gpio *table, int size);
+/**
+ * msm_gpios_free() - free set of GPIOs
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+void msm_gpios_free(const struct msm_gpio *table, int size);
+/**
+ * msm_gpios_enable() - enable set of GPIOs
+ * In case of error, all operations rolled back.
+ * Return error code.
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+int msm_gpios_enable(const struct msm_gpio *table, int size);
+/**
+ * msm_gpios_disable() - disable set of GPIOs
+ *
+ * @table: GPIO table
+ * @size: number of entries in @table
+ */
+void msm_gpios_disable(const struct msm_gpio *table, int size);
+
+int gpio_request(unsigned gpio, const char *label);
+void gpio_free(unsigned gpio);
+int gpio_direction_input(unsigned gpio);
+int gpio_direction_output(unsigned gpio, int value);
+int gpio_get_value(unsigned gpio);
+void gpio_set_value(unsigned gpio, int value);
+int gpio_to_irq(unsigned gpio);
+
+#include <asm-generic/gpio.h>
+
+/* extended gpio api */
+
+#define GPIOF_IRQF_MASK 0x0000ffff /* use to specify edge detection without */
+#define GPIOF_IRQF_TRIGGER_NONE 0x00010000 /* IRQF_TRIGGER_NONE is 0 which also means "as already configured" */
+#define GPIOF_INPUT 0x00020000
+#define GPIOF_DRIVE_OUTPUT 0x00040000
+#define GPIOF_OUTPUT_LOW 0x00080000
+#define GPIOF_OUTPUT_HIGH 0x00100000
+
+#define GPIOIRQF_SHARED 0x00000001 /* the irq line is shared with other inputs */
+
+extern int gpio_configure(unsigned int gpio, unsigned long flags);
+extern int gpio_read_detect_status(unsigned int gpio);
+extern int gpio_clear_detect_status(unsigned int gpio);
+
+/* GPIO TLMM (Top Level Multiplexing) Definitions */
+
+/* GPIO TLMM: Function -- GPIO specific */
+
+/* GPIO TLMM: Direction */
+enum {
+ GPIO_INPUT,
+ GPIO_OUTPUT,
+};
+
+/* GPIO TLMM: Pullup/Pulldown */
+enum {
+ GPIO_NO_PULL,
+ GPIO_PULL_DOWN,
+ GPIO_KEEPER,
+ GPIO_PULL_UP,
+};
+
+/* GPIO TLMM: Drive Strength */
+enum {
+ GPIO_2MA,
+ GPIO_4MA,
+ GPIO_6MA,
+ GPIO_8MA,
+ GPIO_10MA,
+ GPIO_12MA,
+ GPIO_14MA,
+ GPIO_16MA,
+};
+
+enum {
+ GPIO_ENABLE,
+ GPIO_DISABLE,
+};
+
+#define GPIO_CFG(gpio, func, dir, pull, drvstr) \
+ ((((gpio) & 0x3FF) << 4) | \
+ ((func) & 0xf) | \
+ (((dir) & 0x1) << 14) | \
+ (((pull) & 0x3) << 15) | \
+ (((drvstr) & 0xF) << 17))
+
+/**
+ * extract GPIO pin from bit-field used for gpio_tlmm_config
+ */
+#define GPIO_PIN(gpio_cfg) (((gpio_cfg) >> 4) & 0x3ff)
+#define GPIO_FUNC(gpio_cfg) (((gpio_cfg) >> 0) & 0xf)
+#define GPIO_DIR(gpio_cfg) (((gpio_cfg) >> 14) & 0x1)
+#define GPIO_PULL(gpio_cfg) (((gpio_cfg) >> 15) & 0x3)
+#define GPIO_DRVSTR(gpio_cfg) (((gpio_cfg) >> 17) & 0xf)
+
+int gpio_tlmm_config(unsigned config, unsigned disable);
+
+#endif
+
diff --git a/arch/arm/mach-msm/include/mach/htc_headset.h b/arch/arm/mach-msm/include/mach/htc_headset.h
new file mode 100644
index 000000000000..2f4c18db2625
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/htc_headset.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2008 HTC, Inc.
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_HTC_HEADSET_H
+#define __ASM_ARCH_HTC_HEADSET_H
+
+struct h2w_platform_data {
+ char *power_name;
+ int cable_in1;
+ int cable_in2;
+ int h2w_clk;
+ int h2w_data;
+ int debug_uart;
+ void (*config_cpld)(int);
+ void (*init_cpld)(void);
+ void (*set_dat)(int);
+ void (*set_clk)(int);
+ void (*set_dat_dir)(int);
+ void (*set_clk_dir)(int);
+ int (*get_dat)(void);
+ int (*get_clk)(void);
+};
+
+#define BIT_HEADSET (1 << 0)
+#define BIT_HEADSET_NO_MIC (1 << 1)
+#define BIT_TTY (1 << 2)
+#define BIT_FM_HEADSET (1 << 3)
+#define BIT_FM_SPEAKER (1 << 4)
+
+enum {
+ H2W_NO_DEVICE = 0,
+ H2W_HTC_HEADSET = 1,
+/* H2W_TTY_DEVICE = 2,*/
+ NORMAL_HEARPHONE= 2,
+ H2W_DEVICE = 3,
+ H2W_USB_CRADLE = 4,
+ H2W_UART_DEBUG = 5,
+};
+
+enum {
+ H2W_GPIO = 0,
+ H2W_UART1 = 1,
+ H2W_UART3 = 2,
+ H2W_BT = 3
+};
+
+#define RESEND_DELAY (3) /* ms */
+#define MAX_ACK_RESEND_TIMES (6) /* follow spec */
+#define MAX_HOST_RESEND_TIMES (3) /* follow spec */
+#define MAX_HYGEIA_RESEND_TIMES (5)
+
+#define H2W_ASCR_DEVICE_INI (0x01)
+#define H2W_ASCR_ACT_EN (0x02)
+#define H2W_ASCR_PHONE_IN (0x04)
+#define H2W_ASCR_RESET (0x08)
+#define H2W_ASCR_AUDIO_IN (0x10)
+
+#define H2W_LED_OFF (0x0)
+#define H2W_LED_BKL (0x1)
+#define H2W_LED_MTL (0x2)
+
+typedef enum {
+ /* === system group 0x0000~0x00FF === */
+ /* (R) Accessory type register */
+ H2W_SYSTEM = 0x0000,
+ /* (R) Maximum group address */
+ H2W_MAX_GP_ADD = 0x0001,
+ /* (R/W) Accessory system control register0 */
+ H2W_ASCR0 = 0x0002,
+
+ /* === key group 0x0100~0x01FF === */
+ /* (R) Key group maximum sub address */
+ H2W_KEY_MAXADD = 0x0100,
+ /* (R) ASCII key press down flag */
+ H2W_ASCII_DOWN = 0x0101,
+ /* (R) ASCII key release up flag */
+ H2W_ASCII_UP = 0x0102,
+ /* (R) Function key status flag */
+ H2W_FNKEY_UPDOWN = 0x0103,
+ /* (R/W) Key device status */
+ H2W_KD_STATUS = 0x0104,
+
+ /* === led group 0x0200~0x02FF === */
+ /* (R) LED group maximum sub address */
+ H2W_LED_MAXADD = 0x0200,
+ /* (R/W) LED control register0 */
+ H2W_LEDCT0 = 0x0201,
+
+ /* === crdl group 0x0300~0x03FF === */
+ /* (R) Cardle group maximum sub address */
+ H2W_CRDL_MAXADD = 0x0300,
+ /* (R/W) Cardle group function control register0 */
+ H2W_CRDLCT0 = 0x0301,
+
+ /* === car kit group 0x0400~0x04FF === */
+ H2W_CARKIT_MAXADD = 0x0400,
+
+ /* === usb host group 0x0500~0x05FF === */
+ H2W_USBHOST_MAXADD = 0x0500,
+
+ /* === medical group 0x0600~0x06FF === */
+ H2W_MED_MAXADD = 0x0600,
+ H2W_MED_CONTROL = 0x0601,
+ H2W_MED_IN_DATA = 0x0602,
+} H2W_ADDR;
+
+
+typedef struct H2W_INFO {
+ /* system group */
+ unsigned char CLK_SP;
+ int SLEEP_PR;
+ unsigned char HW_REV;
+ int AUDIO_DEVICE;
+ unsigned char ACC_CLASS;
+ unsigned char MAX_GP_ADD;
+
+ /* key group */
+ int KEY_MAXADD;
+ int ASCII_DOWN;
+ int ASCII_UP;
+ int FNKEY_UPDOWN;
+ int KD_STATUS;
+
+ /* led group */
+ int LED_MAXADD;
+ int LEDCT0;
+
+ /* medical group */
+ int MED_MAXADD;
+ unsigned char AP_ID;
+ unsigned char AP_EN;
+ unsigned char DATA_EN;
+} H2W_INFO;
+
+typedef enum {
+ H2W_500KHz = 1,
+ H2W_250KHz = 2,
+ H2W_166KHz = 3,
+ H2W_125KHz = 4,
+ H2W_100KHz = 5,
+ H2W_83KHz = 6,
+ H2W_71KHz = 7,
+ H2W_62KHz = 8,
+ H2W_55KHz = 9,
+ H2W_50KHz = 10,
+} H2W_SPEED;
+
+typedef enum {
+ H2W_KEY_INVALID = -1,
+ H2W_KEY_PLAY = 0,
+ H2W_KEY_FORWARD = 1,
+ H2W_KEY_BACKWARD = 2,
+ H2W_KEY_VOLUP = 3,
+ H2W_KEY_VOLDOWN = 4,
+ H2W_KEY_PICKUP = 5,
+ H2W_KEY_HANGUP = 6,
+ H2W_KEY_MUTE = 7,
+ H2W_KEY_HOLD = 8,
+ H2W_NUM_KEYFUNC = 9,
+} KEYFUNC;
+#endif
diff --git a/arch/arm/mach-msm/include/mach/htc_pwrsink.h b/arch/arm/mach-msm/include/mach/htc_pwrsink.h
new file mode 100644
index 000000000000..c7a91f1d906c
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/htc_pwrsink.h
@@ -0,0 +1,87 @@
+/* include/asm/mach-msm/htc_pwrsink.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _ARCH_ARM_MACH_MSM_HTC_PWRSINK_H_
+#define _ARCH_ARM_MACH_MSM_HTC_PWRSINK_H_
+
+#include <linux/platform_device.h>
+#include <linux/earlysuspend.h>
+
+typedef enum {
+ PWRSINK_AUDIO_PCM = 0,
+ PWRSINK_AUDIO_MP3,
+ PWRSINK_AUDIO_AAC,
+
+ PWRSINK_AUDIO_LAST = PWRSINK_AUDIO_AAC,
+ PWRSINK_AUDIO_INVALID
+} pwrsink_audio_id_type;
+
+struct pwr_sink_audio {
+ unsigned volume;
+ unsigned percent;
+};
+
+typedef enum {
+ PWRSINK_SYSTEM_LOAD = 0,
+ PWRSINK_AUDIO,
+ PWRSINK_BACKLIGHT,
+ PWRSINK_LED_BUTTON,
+ PWRSINK_LED_KEYBOARD,
+ PWRSINK_GP_CLK,
+ PWRSINK_BLUETOOTH,
+ PWRSINK_CAMERA,
+ PWRSINK_SDCARD,
+ PWRSINK_VIDEO,
+ PWRSINK_WIFI,
+
+ PWRSINK_LAST = PWRSINK_WIFI,
+ PWRSINK_INVALID
+} pwrsink_id_type;
+
+struct pwr_sink {
+ pwrsink_id_type id;
+ unsigned ua_max;
+ unsigned percent_util;
+};
+
+struct pwr_sink_platform_data {
+ unsigned num_sinks;
+ struct pwr_sink *sinks;
+ int (*suspend_late)(struct platform_device *, pm_message_t state);
+ int (*resume_early)(struct platform_device *);
+ void (*suspend_early)(struct early_suspend *);
+ void (*resume_late)(struct early_suspend *);
+};
+
+#ifndef CONFIG_HTC_PWRSINK
+static inline int htc_pwrsink_set(pwrsink_id_type id, unsigned percent)
+{
+ return 0;
+}
+static inline int htc_pwrsink_audio_set(pwrsink_audio_id_type id,
+ unsigned percent_utilized) { return 0; }
+static inline int htc_pwrsink_audio_volume_set(
+ pwrsink_audio_id_type id, unsigned volume) { return 0; }
+static inline int htc_pwrsink_audio_path_set(unsigned path) { return 0; }
+#else
+extern int htc_pwrsink_set(pwrsink_id_type id, unsigned percent);
+extern int htc_pwrsink_audio_set(pwrsink_audio_id_type id,
+ unsigned percent_utilized);
+extern int htc_pwrsink_audio_volume_set(pwrsink_audio_id_type id,
+ unsigned volume);
+extern int htc_pwrsink_audio_path_set(unsigned path);
+#endif
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/internal_power_rail.h b/arch/arm/mach-msm/include/mach/internal_power_rail.h
new file mode 100644
index 000000000000..cd7ca765bde6
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/internal_power_rail.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _INTERNAL_POWER_RAIL_H
+#define _INTERNAL_POWER_RAIL_H
+
+/* Clock power rail IDs */
+#define PWR_RAIL_GRP_CLK 8
+#define PWR_RAIL_VDC_CLK 39
+#define PWR_RAIL_VFE_CLK 41
+#define PWR_RAIL_MFC_CLK 68
+
+enum rail_ctl_mode {
+ PWR_RAIL_CTL_AUTO = 0,
+ PWR_RAIL_CTL_MANUAL,
+};
+
+int internal_pwr_rail_ctl(unsigned rail_id, bool enable);
+int internal_pwr_rail_mode(unsigned rail_id, enum rail_ctl_mode mode);
+
+#endif /* _INTERNAL_POWER_RAIL_H */
+
diff --git a/arch/arm/mach-msm/include/mach/io.h b/arch/arm/mach-msm/include/mach/io.h
index aab964591db4..bdac617f4204 100644
--- a/arch/arm/mach-msm/include/mach/io.h
+++ b/arch/arm/mach-msm/include/mach/io.h
@@ -23,7 +23,7 @@
void __iomem *__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype);
-#define __io(a) __typesafe_io(a)
+#define __io(a) __typesafe_io(a)
#define __mem_pci(a) (a)
#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-7x30.h b/arch/arm/mach-msm/include/mach/irqs-7x30.h
new file mode 100644
index 000000000000..cd9b58572be3
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/irqs-7x30.h
@@ -0,0 +1,158 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_IRQS_7X30_H
+#define __ASM_ARCH_MSM_IRQS_7X30_H
+
+/* MSM ACPU Interrupt Numbers */
+
+#define INT_DEBUG_TIMER_EXP 0
+#define INT_GPT0_TIMER_EXP 1
+#define INT_GPT1_TIMER_EXP 2
+#define INT_WDT0_ACCSCSSBARK 3
+#define INT_WDT1_ACCSCSSBARK 4
+#define INT_AVS_SVIC 5
+#define INT_AVS_SVIC_SW_DONE 6
+#define INT_SC_DBG_RX_FULL 7
+#define INT_SC_DBG_TX_EMPTY 8
+#define INT_SC_PERF_MON 9
+#define INT_AVS_REQ_DOWN 10
+#define INT_AVS_REQ_UP 11
+#define INT_SC_ACG 12
+/* SCSS_VICFIQSTS1[13:15] are RESERVED */
+#define INT_L2_SVICCPUIRPTREQ 16
+#define INT_L2_SVICDMANSIRPTREQ 17
+#define INT_L2_SVICDMASIRPTREQ 18
+#define INT_L2_SVICSLVIRPTREQ 19
+#define INT_AD5A_MPROC_APPS_0 20
+#define INT_AD5A_MPROC_APPS_1 21
+#define INT_A9_M2A_0 22
+#define INT_A9_M2A_1 23
+#define INT_A9_M2A_2 24
+#define INT_A9_M2A_3 25
+#define INT_A9_M2A_4 26
+#define INT_A9_M2A_5 27
+#define INT_A9_M2A_6 28
+#define INT_A9_M2A_7 29
+#define INT_A9_M2A_8 30
+#define INT_A9_M2A_9 31
+
+#define INT_AXI_EBI1_SC (32 + 0)
+#define INT_IMEM_ERR (32 + 1)
+#define INT_AXI_EBI0_SC (32 + 2)
+#define INT_PBUS_SC_IRQC (32 + 3)
+#define INT_PERPH_BUS_BPM (32 + 4)
+#define INT_CC_TEMP_SENSE (32 + 5)
+#define INT_UXMC_EBI0 (32 + 6)
+#define INT_UXMC_EBI1 (32 + 7)
+#define INT_EBI2_OP_DONE (32 + 8)
+#define INT_EBI2_WR_ER_DONE (32 + 9)
+#define INT_TCSR_SPSS_CE (32 + 10)
+#define INT_EMDH (32 + 11)
+#define INT_PMDH (32 + 12)
+#define INT_MDC (32 + 13)
+#define INT_MIDI_TO_SUPSS (32 + 14)
+#define INT_LPA_2 (32 + 15)
+#define INT_GPIO_GROUP1_SECURE (32 + 16)
+#define INT_GPIO_GROUP2_SECURE (32 + 17)
+#define INT_GPIO_GROUP1 (32 + 18)
+#define INT_GPIO_GROUP2 (32 + 19)
+#define INT_MPRPH_SOFTRESET (32 + 20)
+#define INT_PWB_I2C (32 + 21)
+#define INT_PWB_I2C_2 (32 + 22)
+#define INT_TSSC_SAMPLE (32 + 23)
+#define INT_TSSC_PENUP (32 + 24)
+#define INT_TCHSCRN_SSBI (32 + 25)
+#define INT_FM_RDS (32 + 26)
+#define INT_KEYSENSE (32 + 27)
+#define INT_USB_OTG_HS (32 + 28)
+#define INT_USB_OTG_HS2 (32 + 29)
+#define INT_USB_OTG_HS3 (32 + 30)
+#define INT_RESERVED_BIT31 (32 + 31)
+
+#define INT_SPI_OUTPUT (64 + 0)
+#define INT_SPI_INPUT (64 + 1)
+#define INT_SPI_ERROR (64 + 2)
+#define INT_UART1 (64 + 3)
+#define INT_UART1_RX (64 + 4)
+#define INT_UART2 (64 + 5)
+#define INT_UART2_RX (64 + 6)
+#define INT_UART3 (64 + 7)
+#define INT_UART3_RX (64 + 8)
+#define INT_UART1DM_IRQ (64 + 9)
+#define INT_UART1DM_RX (64 + 10)
+#define INT_UART2DM_IRQ (64 + 11)
+#define INT_UART2DM_RX (64 + 12)
+#define INT_TSIF (64 + 13)
+#define INT_ADM_SC1 (64 + 14)
+#define INT_ADM_SC2 (64 + 15)
+#define INT_MDP (64 + 16)
+#define INT_VPE (64 + 17)
+#define INT_GRP_2D (64 + 18)
+#define INT_GRP_3D (64 + 19)
+#define INT_ROTATOR (64 + 20)
+#define INT_MFC720 (64 + 21)
+#define INT_JPEG (64 + 22)
+#define INT_VFE (64 + 23)
+#define INT_TV_ENC (64 + 24)
+#define INT_PMIC_SSBI (64 + 25)
+#define INT_MPM_1 (64 + 26)
+#define INT_TCSR_SPSS_SAMPLE (64 + 27)
+#define INT_TCSR_SPSS_PENUP (64 + 28)
+#define INT_MPM_2 (64 + 29)
+#define INT_SDC1_0 (64 + 30)
+#define INT_SDC1_1 (64 + 31)
+
+#define INT_SDC3_0 (96 + 0)
+#define INT_SDC3_1 (96 + 1)
+#define INT_SDC2_0 (96 + 2)
+#define INT_SDC2_1 (96 + 3)
+#define INT_SDC4_0 (96 + 4)
+#define INT_SDC4_1 (96 + 5)
+/* SCSS_VICFIQSTS3[6:31] are RESERVED */
+
+/* Retrofit universal macro names */
+#define INT_ADM_AARM INT_ADM_SC2
+#define INT_USB_HS INT_USB_OTG_HS
+#define INT_USB_OTG INT_USB_OTG_HS
+#define INT_TCHSCRN1 INT_TSSC_PENUP
+#define INT_TCHSCRN2 INT_TSSC_SAMPLE
+#define INT_GP_TIMER_EXP INT_GPT0_TIMER_EXP
+#define INT_ADSP_A11 INT_AD5A_MPROC_APPS_0
+#define INT_ADSP_A9_A11 INT_AD5A_MPROC_APPS_1
+#define INT_MDDI_EXT INT_EMDH
+#define INT_MDDI_PRI INT_PMDH
+#define INT_MDDI_CLIENT INT_MDC
+#define INT_NAND_WR_ER_DONE INT_EBI2_WR_ER_DONE
+#define INT_NAND_OP_DONE INT_EBI2_OP_DONE
+
+#define NR_GPIO_IRQS 181
+#define NR_MSM_IRQS 128
+#define NR_BOARD_IRQS 128
+
+#endif /* __ASM_ARCH_MSM_IRQS_7X30_H */
diff --git a/arch/arm/mach-msm/include/mach/irqs-7xxx.h b/arch/arm/mach-msm/include/mach/irqs-7xxx.h
new file mode 100644
index 000000000000..1b3c73d38073
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/irqs-7xxx.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ */
+
+#ifndef __ASM_ARCH_MSM_IRQS_7XXX_H
+#define __ASM_ARCH_MSM_IRQS_7XXX_H
+
+/* MSM ARM11 Interrupt Numbers */
+/* See 80-VE113-1 A, pp219-221 */
+
+#define INT_A9_M2A_0 0
+#define INT_A9_M2A_1 1
+#define INT_A9_M2A_2 2
+#define INT_A9_M2A_3 3
+#define INT_A9_M2A_4 4
+#define INT_A9_M2A_5 5
+#define INT_A9_M2A_6 6
+#define INT_GP_TIMER_EXP 7
+#define INT_DEBUG_TIMER_EXP 8
+#define INT_UART1 9
+#define INT_UART2 10
+#define INT_UART3 11
+#define INT_UART1_RX 12
+#define INT_UART2_RX 13
+#define INT_UART3_RX 14
+#define INT_USB_OTG 15
+#define INT_MDDI_PRI 16
+#define INT_MDDI_EXT 17
+#define INT_MDDI_CLIENT 18
+#define INT_MDP 19
+#define INT_GRAPHICS 20
+#define INT_ADM_AARM 21
+#define INT_ADSP_A11 22
+#define INT_ADSP_A9_A11 23
+#define INT_SDC1_0 24
+#define INT_SDC1_1 25
+#define INT_SDC2_0 26
+#define INT_SDC2_1 27
+#define INT_KEYSENSE 28
+#define INT_TCHSCRN_SSBI 29
+#define INT_TCHSCRN1 30
+#define INT_TCHSCRN2 31
+
+#define INT_GPIO_GROUP1 (32 + 0)
+#define INT_GPIO_GROUP2 (32 + 1)
+#define INT_PWB_I2C (32 + 2)
+#define INT_SOFTRESET (32 + 3)
+#define INT_NAND_WR_ER_DONE (32 + 4)
+#define INT_NAND_OP_DONE (32 + 5)
+#define INT_PBUS_ARM11 (32 + 6)
+#define INT_AXI_MPU_SMI (32 + 7)
+#define INT_AXI_MPU_EBI1 (32 + 8)
+#define INT_AD_HSSD (32 + 9)
+#define INT_ARM11_PMU (32 + 10)
+#define INT_ARM11_DMA (32 + 11)
+#define INT_TSIF_IRQ (32 + 12)
+#define INT_UART1DM_IRQ (32 + 13)
+#define INT_UART1DM_RX (32 + 14)
+#define INT_USB_HS (32 + 15)
+#define INT_SDC3_0 (32 + 16)
+#define INT_SDC3_1 (32 + 17)
+#define INT_SDC4_0 (32 + 18)
+#define INT_SDC4_1 (32 + 19)
+#define INT_UART2DM_IRQ (32 + 20)
+#define INT_UART2DM_RX (32 + 21)
+
+/* 22-31 are reserved */
+
+/* 7x00A uses 122, but 7x25 has up to 132. */
+#define NR_GPIO_IRQS 133
+#define NR_MSM_IRQS 64
+#define NR_BOARD_IRQS 64
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs-8xxx.h b/arch/arm/mach-msm/include/mach/irqs-8xxx.h
new file mode 100644
index 000000000000..3ce01e9d01fa
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/irqs-8xxx.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_IRQS_8XXX_H
+#define __ASM_ARCH_MSM_IRQS_8XXX_H
+
+/* MSM ACPU Interrupt Numbers */
+
+#define INT_A9_M2A_0 0
+#define INT_A9_M2A_1 1
+#define INT_A9_M2A_2 2
+#define INT_A9_M2A_3 3
+#define INT_A9_M2A_4 4
+#define INT_A9_M2A_5 5
+#define INT_A9_M2A_6 6
+#define INT_GP_TIMER_EXP 7
+#define INT_DEBUG_TIMER_EXP 8
+#define INT_SIRC_0 9
+#define INT_SDC3_0 10
+#define INT_SDC3_1 11
+#define INT_SDC4_0 12
+#define INT_SDC4_1 13
+#define INT_AD6_EXT_VFR 14
+#define INT_USB_OTG 15
+#define INT_MDDI_PRI 16
+#define INT_MDDI_EXT 17
+#define INT_MDDI_CLIENT 18
+#define INT_MDP 19
+#define INT_GRAPHICS 20
+#define INT_ADM_AARM 21
+#define INT_ADSP_A11 22
+#define INT_ADSP_A9_A11 23
+#define INT_SDC1_0 24
+#define INT_SDC1_1 25
+#define INT_SDC2_0 26
+#define INT_SDC2_1 27
+#define INT_KEYSENSE 28
+#define INT_TCHSCRN_SSBI 29
+#define INT_TCHSCRN1 30
+#define INT_TCHSCRN2 31
+
+#define INT_TCSR_MPRPH_SC1 (32 + 0)
+#define INT_USB_FS2 (32 + 1)
+#define INT_PWB_I2C (32 + 2)
+#define INT_SOFTRESET (32 + 3)
+#define INT_NAND_WR_ER_DONE (32 + 4)
+#define INT_NAND_OP_DONE (32 + 5)
+#define INT_TCSR_MPRPH_SC2 (32 + 6)
+#define INT_OP_PEN (32 + 7)
+#define INT_AD_HSSD (32 + 8)
+#define INT_ARM11_PM (32 + 9)
+#define INT_SDMA_NON_SECURE (32 + 10)
+#define INT_TSIF_IRQ (32 + 11)
+#define INT_UART1DM_IRQ (32 + 12)
+#define INT_UART1DM_RX (32 + 13)
+#define INT_SDMA_SECURE (32 + 14)
+#define INT_SI2S_SLAVE (32 + 15)
+#define INT_SC_I2CPU (32 + 16)
+#define INT_SC_DBG_RDTRFULL (32 + 17)
+#define INT_SC_DBG_WDTRFULL (32 + 18)
+#define INT_SCPLL_CTL_DONE (32 + 19)
+#define INT_UART2DM_IRQ (32 + 20)
+#define INT_UART2DM_RX (32 + 21)
+#define INT_VDC_MEC (32 + 22)
+#define INT_VDC_DB (32 + 23)
+#define INT_VDC_AXI (32 + 24)
+#define INT_VFE (32 + 25)
+#define INT_USB_HS (32 + 26)
+#define INT_AUDIO_OUT0 (32 + 27)
+#define INT_AUDIO_OUT1 (32 + 28)
+#define INT_CRYPTO (32 + 29)
+#define INT_AD6M_IDLE (32 + 30)
+#define INT_SIRC_1 (32 + 31)
+
+#define NR_GPIO_IRQS 165
+#define NR_MSM_IRQS 64
+#define NR_BOARD_IRQS 64
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/irqs.h b/arch/arm/mach-msm/include/mach/irqs.h
index 9dd4cf8a2693..902910ad3b16 100644
--- a/arch/arm/mach-msm/include/mach/irqs.h
+++ b/arch/arm/mach-msm/include/mach/irqs.h
@@ -1,6 +1,6 @@
-/* arch/arm/mach-msm/include/mach/irqs.h
- *
+/*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -17,74 +17,21 @@
#ifndef __ASM_ARCH_MSM_IRQS_H
#define __ASM_ARCH_MSM_IRQS_H
-/* MSM ARM11 Interrupt Numbers */
-/* See 80-VE113-1 A, pp219-221 */
-
-#define INT_A9_M2A_0 0
-#define INT_A9_M2A_1 1
-#define INT_A9_M2A_2 2
-#define INT_A9_M2A_3 3
-#define INT_A9_M2A_4 4
-#define INT_A9_M2A_5 5
-#define INT_A9_M2A_6 6
-#define INT_GP_TIMER_EXP 7
-#define INT_DEBUG_TIMER_EXP 8
-#define INT_UART1 9
-#define INT_UART2 10
-#define INT_UART3 11
-#define INT_UART1_RX 12
-#define INT_UART2_RX 13
-#define INT_UART3_RX 14
-#define INT_USB_OTG 15
-#define INT_MDDI_PRI 16
-#define INT_MDDI_EXT 17
-#define INT_MDDI_CLIENT 18
-#define INT_MDP 19
-#define INT_GRAPHICS 20
-#define INT_ADM_AARM 21
-#define INT_ADSP_A11 22
-#define INT_ADSP_A9_A11 23
-#define INT_SDC1_0 24
-#define INT_SDC1_1 25
-#define INT_SDC2_0 26
-#define INT_SDC2_1 27
-#define INT_KEYSENSE 28
-#define INT_TCHSCRN_SSBI 29
-#define INT_TCHSCRN1 30
-#define INT_TCHSCRN2 31
-
-#define INT_GPIO_GROUP1 (32 + 0)
-#define INT_GPIO_GROUP2 (32 + 1)
-#define INT_PWB_I2C (32 + 2)
-#define INT_SOFTRESET (32 + 3)
-#define INT_NAND_WR_ER_DONE (32 + 4)
-#define INT_NAND_OP_DONE (32 + 5)
-#define INT_PBUS_ARM11 (32 + 6)
-#define INT_AXI_MPU_SMI (32 + 7)
-#define INT_AXI_MPU_EBI1 (32 + 8)
-#define INT_AD_HSSD (32 + 9)
-#define INT_ARM11_PMU (32 + 10)
-#define INT_ARM11_DMA (32 + 11)
-#define INT_TSIF_IRQ (32 + 12)
-#define INT_UART1DM_IRQ (32 + 13)
-#define INT_UART1DM_RX (32 + 14)
-#define INT_USB_HS (32 + 15)
-#define INT_SDC3_0 (32 + 16)
-#define INT_SDC3_1 (32 + 17)
-#define INT_SDC4_0 (32 + 18)
-#define INT_SDC4_1 (32 + 19)
-#define INT_UART2DM_RX (32 + 20)
-#define INT_UART2DM_IRQ (32 + 21)
-
-/* 22-31 are reserved */
-
#define MSM_IRQ_BIT(irq) (1 << ((irq) & 31))
-#define NR_MSM_IRQS 64
-#define NR_GPIO_IRQS 122
-#define NR_BOARD_IRQS 64
-#define NR_IRQS (NR_MSM_IRQS + NR_GPIO_IRQS + NR_BOARD_IRQS)
+#if defined(CONFIG_ARCH_MSM7X30)
+#include "irqs-7x30.h"
+#elif defined(CONFIG_ARCH_QSD8X50)
+#include "irqs-8xxx.h"
+#include "sirc.h"
+#elif defined(CONFIG_ARCH_MSM_ARM11)
+#include "irqs-7xxx.h"
+#else
+#error "Unknown architecture specification"
+#endif
+#define NR_IRQS (NR_MSM_IRQS + NR_GPIO_IRQS + NR_BOARD_IRQS)
#define MSM_GPIO_TO_INT(n) (NR_MSM_IRQS + (n))
+#define MSM_INT_TO_REG(base, irq) (base + irq / 32)
#endif
diff --git a/arch/arm/mach-msm/include/mach/memory.h b/arch/arm/mach-msm/include/mach/memory.h
index f4698baec976..dc278487c217 100644
--- a/arch/arm/mach-msm/include/mach/memory.h
+++ b/arch/arm/mach-msm/include/mach/memory.h
@@ -1,6 +1,7 @@
/* arch/arm/mach-msm/include/mach/memory.h
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -12,12 +13,38 @@
* GNU General Public License for more details.
*
*/
-
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
/* physical offset of RAM */
+#ifdef CONFIG_MSM_STACKED_MEMORY
+
+#ifdef CONFIG_ARCH_MSM_SCORPION
+#define PHYS_OFFSET UL(0x20000000)
+#else
#define PHYS_OFFSET UL(0x10000000)
+#endif
+
+#else /* !CONFIG_MSM_STACKED_MEMORY */
+
+#define PHYS_OFFSET UL(0x00200000)
+
+#endif
+
+#ifndef __ASSEMBLY__
+void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment);
+
+#ifdef CONFIG_ARCH_MSM_ARM11
+void write_to_strongly_ordered_memory(void);
+
+#include <asm/mach-types.h>
+
+#define arch_barrier_extra() do \
+ { if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa()) \
+ write_to_strongly_ordered_memory(); \
+ } while (0)
+#endif
+#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/mmc.h b/arch/arm/mach-msm/include/mach/mmc.h
index 0ecf25426284..306de3d73fff 100644
--- a/arch/arm/mach-msm/include/mach/mmc.h
+++ b/arch/arm/mach-msm/include/mach/mmc.h
@@ -1,26 +1,16 @@
/*
- * arch/arm/include/asm/mach/mmc.h
+ * arch/arm/mach-msm/include/mach/mmc.h
*/
-#ifndef ASMARM_MACH_MMC_H
-#define ASMARM_MACH_MMC_H
+#ifndef ASM_ARCH_MACH_MMC_H
+#define ASM_ARCH_MACH_MMC_H
#include <linux/mmc/host.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-
-struct embedded_sdio_data {
- struct sdio_cis cis;
- struct sdio_cccr cccr;
- struct sdio_embedded_func *funcs;
- int num_funcs;
-};
struct mmc_platform_data {
unsigned int ocr_mask; /* available voltages */
u32 (*translate_vdd)(struct device *, unsigned int);
unsigned int (*status)(struct device *);
- struct embedded_sdio_data *embedded_sdio;
- int (*register_status_notify)(void (*callback)(int card_present, void *dev_id), void *dev_id);
+ unsigned long irq_flags;
};
#endif
diff --git a/arch/arm/mach-msm/include/mach/mpp.h b/arch/arm/mach-msm/include/mach/mpp.h
new file mode 100644
index 000000000000..7af853bdca49
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/mpp.h
@@ -0,0 +1,73 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_MPP_H
+#define __ARCH_ARM_MACH_MSM_MPP_H
+
+struct mpp {
+ const char *name;
+ unsigned id;
+ int is_input;
+ int status;
+};
+
+/* Digital Logical Output Level */
+enum {
+ MPP_DLOGIC_LVL_MSME,
+ MPP_DLOGIC_LVL_MSMP,
+ MPP_DLOGIC_LVL_RUIM,
+ MPP_DLOGIC_LVL_MMC,
+ MPP_DLOGIC_LVL_VDD,
+};
+
+/* Digital Logical Output Control Value */
+enum {
+ MPP_DLOGIC_OUT_CTRL_LOW,
+ MPP_DLOGIC_OUT_CTRL_HIGH,
+ MPP_DLOGIC_OUT_CTRL_MPP, /* MPP Output = MPP Input */
+ MPP_DLOGIC_OUT_CTRL_NOT_MPP, /* MPP Output = Inverted MPP Input */
+};
+
+/* Digital Logical Input Value */
+enum {
+ MPP_DLOGIC_IN_DBUS_NONE,
+ MPP_DLOGIC_IN_DBUS_1,
+ MPP_DLOGIC_IN_DBUS_2,
+ MPP_DLOGIC_IN_DBUS_3,
+};
+
+#define MPP_CFG(level, control) ((((level) & 0x0FFFF) << 16) | \
+ ((control) & 0x0FFFFF))
+#define MPP_CFG_INPUT(level, dbus) ((((level) & 0x0FFFF) << 16) | \
+ ((control) & 0x0FFFFF))
+
+struct mpp *mpp_get(struct device *dev, const char *id);
+int mpp_config_digital_out(struct mpp *mpp, unsigned config);
+int mpp_config_digital_in(struct mpp *mpp, unsigned config);
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_handset.h b/arch/arm/mach-msm/include/mach/msm_handset.h
new file mode 100644
index 000000000000..6947e03b8986
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_handset.h
@@ -0,0 +1,34 @@
+/* arch/arm/mach-msm/include/mach/msm_handset.h
+ *
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_HANDSET_H
+#define _MSM_HANDSET_H
+
+#include <linux/input.h>
+
+#if defined(CONFIG_INPUT_MSM_HANDSET)
+struct input_dev *msm_get_handset_input_dev(void);
+#else
+struct input_dev *msm_get_handset_input_dev(void)
+{
+ return NULL;
+}
+#endif
+
+struct msm_handset {
+ struct input_dev *ip_dev;
+};
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_i2ckbd.h b/arch/arm/mach-msm/include/mach/msm_i2ckbd.h
new file mode 100644
index 000000000000..2ca51f261cac
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_i2ckbd.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _MSM_I2CKBD_H_
+#define _MSM_I2CKBD_H_
+
+struct msm_i2ckbd_platform_data {
+ uint8_t hwrepeat;
+ uint8_t scanset1;
+ int gpioreset;
+ int gpioirq;
+ int (*gpio_setup) (void);
+ void (*gpio_shutdown)(void);
+};
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_iomap.h b/arch/arm/mach-msm/include/mach/msm_iomap.h
index 9dae1a98c77a..2f54547e1890 100644
--- a/arch/arm/mach-msm/include/mach/msm_iomap.h
+++ b/arch/arm/mach-msm/include/mach/msm_iomap.h
@@ -1,6 +1,7 @@
/* arch/arm/mach-msm/include/mach/msm_iomap.h
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -44,44 +45,98 @@
#endif
#define MSM_VIC_BASE IOMEM(0xE0000000)
+#if defined(CONFIG_ARCH_QSD8X50)
+#define MSM_VIC_PHYS 0xAC000000
+#elif defined(CONFIG_ARCH_MSM7X30)
+#define MSM_VIC_PHYS 0xC0080000
+#else
#define MSM_VIC_PHYS 0xC0000000
+#endif
#define MSM_VIC_SIZE SZ_4K
#define MSM_CSR_BASE IOMEM(0xE0001000)
+#if defined(CONFIG_ARCH_QSD8X50)
+#define MSM_CSR_PHYS 0xAC100000
+#else
#define MSM_CSR_PHYS 0xC0100000
+#endif
#define MSM_CSR_SIZE SZ_4K
-#define MSM_GPT_PHYS MSM_CSR_PHYS
-#define MSM_GPT_BASE MSM_CSR_BASE
-#define MSM_GPT_SIZE SZ_4K
+#define MSM_TMR_PHYS MSM_CSR_PHYS
+#define MSM_TMR_BASE MSM_CSR_BASE
+#define MSM_TMR_SIZE SZ_4K
#define MSM_DMOV_BASE IOMEM(0xE0002000)
+#if defined(CONFIG_ARCH_MSM7X30)
+#define MSM_DMOV_PHYS 0xAC400000
+#else
#define MSM_DMOV_PHYS 0xA9700000
+#endif
#define MSM_DMOV_SIZE SZ_4K
#define MSM_GPIO1_BASE IOMEM(0xE0003000)
+#if defined(CONFIG_ARCH_QSD8X50)
+#define MSM_GPIO1_PHYS 0xA9000000
+#elif defined(CONFIG_ARCH_MSM7X30)
+#define MSM_GPIO1_PHYS 0xAC001000
+#else
#define MSM_GPIO1_PHYS 0xA9200000
+#endif
#define MSM_GPIO1_SIZE SZ_4K
#define MSM_GPIO2_BASE IOMEM(0xE0004000)
+
+#if defined(CONFIG_ARCH_MSM7X30)
+#define MSM_GPIO2_PHYS 0xAC101000
+#elif defined(CONFIG_ARCH_QSD8X50)
+#define MSM_GPIO2_PHYS 0xA9100000
+#else
#define MSM_GPIO2_PHYS 0xA9300000
+#endif
#define MSM_GPIO2_SIZE SZ_4K
#define MSM_CLK_CTL_BASE IOMEM(0xE0005000)
+#if defined(CONFIG_ARCH_MSM7X30)
+#define MSM_CLK_CTL_PHYS 0xAB800000
+#else
#define MSM_CLK_CTL_PHYS 0xA8600000
+#endif
#define MSM_CLK_CTL_SIZE SZ_4K
+#define MSM_L2CC_BASE IOMEM(0xE0006000)
+#define MSM_L2CC_PHYS 0xC0400000
+#define MSM_L2CC_SIZE SZ_4K
+
+#define MSM_SIRC_BASE IOMEM(0xE1006000)
+#define MSM_SIRC_PHYS 0xAC200000
+#define MSM_SIRC_SIZE SZ_4K
+
+#define MSM_SCPLL_BASE IOMEM(0xE1007000)
+#define MSM_SCPLL_PHYS 0xA8800000
+#define MSM_SCPLL_SIZE SZ_4K
+
+#define MSM_ACC_BASE IOMEM(0xE0007000)
+#define MSM_ACC_PHYS 0xC0101000
+#define MSM_ACC_SIZE SZ_4K
+
+#define MSM_GCC_BASE IOMEM(0xE0008000)
+#define MSM_GCC_PHYS 0xC0182000
+#define MSM_GCC_SIZE SZ_4K
+
#define MSM_SHARED_RAM_BASE IOMEM(0xE0100000)
-#define MSM_SHARED_RAM_PHYS 0x01F00000
#define MSM_SHARED_RAM_SIZE SZ_1M
+#if defined(CONFIG_ARCH_MSM7X30)
+#define MSM_UART1_PHYS 0xACA00000
+#define MSM_UART2_PHYS 0xACB00000
+#define MSM_UART3_PHYS 0xACC00000
+#else
#define MSM_UART1_PHYS 0xA9A00000
-#define MSM_UART1_SIZE SZ_4K
-
#define MSM_UART2_PHYS 0xA9B00000
-#define MSM_UART2_SIZE SZ_4K
-
#define MSM_UART3_PHYS 0xA9C00000
+#endif
+#define MSM_UART1_SIZE SZ_4K
+#define MSM_UART2_SIZE SZ_4K
#define MSM_UART3_SIZE SZ_4K
#ifdef CONFIG_MSM_DEBUG_UART
@@ -96,38 +151,27 @@
#define MSM_DEBUG_UART_SIZE SZ_4K
#endif
-#define MSM_SDC1_PHYS 0xA0400000
-#define MSM_SDC1_SIZE SZ_4K
-
-#define MSM_SDC2_PHYS 0xA0500000
-#define MSM_SDC2_SIZE SZ_4K
-
-#define MSM_SDC3_PHYS 0xA0600000
-#define MSM_SDC3_SIZE SZ_4K
-
-#define MSM_SDC4_PHYS 0xA0700000
-#define MSM_SDC4_SIZE SZ_4K
-
#define MSM_I2C_PHYS 0xA9900000
#define MSM_I2C_SIZE SZ_4K
#define MSM_HSUSB_PHYS 0xA0800000
+#define MSM_HSUSB_BASE IOMEM(0xE0009000)
#define MSM_HSUSB_SIZE SZ_4K
-#define MSM_PMDH_PHYS 0xAA600000
-#define MSM_PMDH_SIZE SZ_4K
-
-#define MSM_EMDH_PHYS 0xAA700000
-#define MSM_EMDH_SIZE SZ_4K
-
-#define MSM_MDP_PHYS 0xAA200000
-#define MSM_MDP_SIZE 0x000F0000
-
+#define MSM_MDC_BASE IOMEM(0xE0200000)
#define MSM_MDC_PHYS 0xAA500000
#define MSM_MDC_SIZE SZ_1M
+#define MSM_AD5_BASE IOMEM(0xE0300000)
#define MSM_AD5_PHYS 0xAC000000
#define MSM_AD5_SIZE (SZ_1M*13)
+#define MSM_SSBI_BASE IOMEM(0xE1004000)
+#define MSM_SSBI_PHYS 0xA8100000
+#define MSM_SSBI_SIZE SZ_4K
+
+#define MSM_TSSC_BASE IOMEM(0xE1005000)
+#define MSM_TSSC_PHYS 0xAA300000
+#define MSM_TSSC_SIZE SZ_4K
#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_otg.h b/arch/arm/mach-msm/include/mach/msm_otg.h
new file mode 100644
index 000000000000..6ab662921fee
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_otg.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_OTG_H
+#define __ARCH_ARM_MACH_MSM_OTG_H
+
+/*
+ * The otg driver needs to interact with both device side and host side
+ * usb controllers. it decides which controller is active at a given
+ * moment, using the transceiver, ID signal.
+ */
+
+struct msm_otg_transceiver {
+ struct device *dev;
+ struct clk *clk;
+ struct clk *pclk;
+ int in_lpm;
+ struct msm_otg_ops *dcd_ops;
+ struct msm_otg_ops *hcd_ops;
+ int irq;
+ int flags;
+ int state;
+ int active;
+ void __iomem *regs; /* device memory/io */
+ struct work_struct work;
+ spinlock_t lock;
+
+ /* bind/unbind the host controller */
+ int (*set_host)(struct msm_otg_transceiver *otg,
+ struct msm_otg_ops *hcd_ops);
+
+ /* bind/unbind the peripheral controller */
+ int (*set_peripheral)(struct msm_otg_transceiver *otg,
+ struct msm_otg_ops *dcd_ops);
+ void (*set_suspend) (int on);
+
+};
+
+struct msm_otg_ops {
+ void (*status_change)(int);
+};
+
+/* for usb host and peripheral controller drivers */
+#ifdef CONFIG_USB_MSM_OTG
+
+extern struct msm_otg_transceiver *msm_otg_get_transceiver(void);
+extern void msm_otg_put_transceiver(struct msm_otg_transceiver *xceiv);
+
+#else
+
+static inline struct msm_otg_transceiver *msm_otg_get_transceiver(void)
+{
+ return NULL;
+}
+
+static inline void msm_otg_put_transceiver(struct msm_otg_transceiver *xceiv)
+{
+}
+
+#endif /*CONFIG_USB_MSM_OTG*/
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_rpcrouter.h b/arch/arm/mach-msm/include/mach/msm_rpcrouter.h
new file mode 100644
index 000000000000..d0c01fb53047
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_rpcrouter.h
@@ -0,0 +1,296 @@
+/** include/asm-arm/arch-msm/msm_rpcrouter.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM__ARCH_MSM_RPCROUTER_H
+#define __ASM__ARCH_MSM_RPCROUTER_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/ioctl.h>
+
+struct rpcrouter_ioctl_server_args {
+ uint32_t prog;
+ uint32_t vers;
+};
+
+#define RPC_ROUTER_VERSION_V1 0x00010000
+
+#define RPC_ROUTER_IOCTL_MAGIC (0xC1)
+
+#define RPC_ROUTER_IOCTL_GET_VERSION \
+ _IOR(RPC_ROUTER_IOCTL_MAGIC, 0, unsigned int)
+
+#define RPC_ROUTER_IOCTL_GET_MTU \
+ _IOR(RPC_ROUTER_IOCTL_MAGIC, 1, unsigned int)
+
+#define RPC_ROUTER_IOCTL_REGISTER_SERVER \
+ _IOWR(RPC_ROUTER_IOCTL_MAGIC, 2, unsigned int)
+
+#define RPC_ROUTER_IOCTL_UNREGISTER_SERVER \
+ _IOWR(RPC_ROUTER_IOCTL_MAGIC, 3, unsigned int)
+
+#define RPC_ROUTER_IOCTL_CLEAR_NETRESET \
+ _IOWR(RPC_ROUTER_IOCTL_MAGIC, 4, unsigned int)
+
+/* RPC API version structure
+ * Version bit 31 : 1->hashkey versioning,
+ * 0->major-minor (backward compatible) versioning
+ * hashkey versioning:
+ * Version bits 31-0 hashkey
+ * major-minor (backward compatible) versioning
+ * Version bits 30-28 reserved (no match)
+ * Version bits 27-16 major (must match)
+ * Version bits 15-0 minor (greater or equal)
+ */
+#define RPC_VERSION_MODE_MASK 0x80000000
+#define RPC_VERSION_MAJOR_MASK 0x0fff0000
+#define RPC_VERSION_MINOR_MASK 0x0000ffff
+
+/* callback ID for NULL callback function is -1 */
+#define MSM_RPC_CLIENT_NULL_CB_ID 0xffffffff
+
+struct msm_rpc_endpoint;
+
+struct rpcsvr_platform_device
+{
+ struct platform_device base;
+ uint32_t prog;
+ uint32_t vers;
+};
+
+#define RPC_DATA_IN 0
+/*
+ * Structures for sending / receiving direct RPC requests
+ * XXX: Any cred/verif lengths > 0 not supported
+ */
+
+struct rpc_request_hdr
+{
+ uint32_t xid;
+ uint32_t type; /* 0 */
+ uint32_t rpc_vers; /* 2 */
+ uint32_t prog;
+ uint32_t vers;
+ uint32_t procedure;
+ uint32_t cred_flavor;
+ uint32_t cred_length;
+ uint32_t verf_flavor;
+ uint32_t verf_length;
+};
+
+typedef struct
+{
+ uint32_t low;
+ uint32_t high;
+} rpc_reply_progmismatch_data;
+
+typedef struct
+{
+} rpc_denied_reply_hdr;
+
+typedef struct
+{
+ uint32_t verf_flavor;
+ uint32_t verf_length;
+ uint32_t accept_stat;
+#define RPC_ACCEPTSTAT_SUCCESS 0
+#define RPC_ACCEPTSTAT_PROG_UNAVAIL 1
+#define RPC_ACCEPTSTAT_PROG_MISMATCH 2
+#define RPC_ACCEPTSTAT_PROC_UNAVAIL 3
+#define RPC_ACCEPTSTAT_GARBAGE_ARGS 4
+#define RPC_ACCEPTSTAT_SYSTEM_ERR 5
+#define RPC_ACCEPTSTAT_PROG_LOCKED 6
+ /*
+ * Following data is dependant on accept_stat
+ * If ACCEPTSTAT == PROG_MISMATCH then there is a
+ * 'rpc_reply_progmismatch_data' structure following the header.
+ * Otherwise the data is procedure specific
+ */
+} rpc_accepted_reply_hdr;
+
+struct rpc_reply_hdr
+{
+ uint32_t xid;
+ uint32_t type;
+ uint32_t reply_stat;
+#define RPCMSG_REPLYSTAT_ACCEPTED 0
+#define RPCMSG_REPLYSTAT_DENIED 1
+ union {
+ rpc_accepted_reply_hdr acc_hdr;
+ rpc_denied_reply_hdr dny_hdr;
+ } data;
+};
+
+/* flags for msm_rpc_connect() */
+#define MSM_RPC_UNINTERRUPTIBLE 0x0001
+
+/* use IS_ERR() to check for failure */
+struct msm_rpc_endpoint *msm_rpc_open(void);
+/* Connect with the specified server version */
+struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog, uint32_t vers, unsigned flags);
+/* Connect with a compatible server version */
+struct msm_rpc_endpoint *msm_rpc_connect_compatible(uint32_t prog,
+ uint32_t vers, unsigned flags);
+int msm_rpc_get_compatible_server(uint32_t prog, uint32_t vers,
+ uint32_t *found_vers);
+/* check if server version can handle client requested version */
+int msm_rpc_is_compatible_version(uint32_t server_version,
+ uint32_t client_version);
+
+int msm_rpc_close(struct msm_rpc_endpoint *ept);
+int msm_rpc_write(struct msm_rpc_endpoint *ept,
+ void *data, int len);
+int msm_rpc_read(struct msm_rpc_endpoint *ept,
+ void **data, unsigned len, long timeout);
+void msm_rpc_setup_req(struct rpc_request_hdr *hdr,
+ uint32_t prog, uint32_t vers, uint32_t proc);
+int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
+ uint32_t prog, uint32_t vers);
+int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept,
+ uint32_t prog, uint32_t vers);
+
+int msm_rpc_clear_netreset(struct msm_rpc_endpoint *ept);
+/* simple blocking rpc call
+ *
+ * request is mandatory and must have a rpc_request_hdr
+ * at the start. The header will be filled out for you.
+ *
+ * reply provides a buffer for replies of reply_max_size
+ */
+int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc,
+ void *request, int request_size,
+ void *reply, int reply_max_size,
+ long timeout);
+int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc,
+ void *request, int request_size,
+ long timeout);
+
+struct msm_rpc_server
+{
+ struct list_head list;
+ uint32_t flags;
+
+ uint32_t prog;
+ uint32_t vers;
+
+ struct mutex cb_req_lock;
+ struct mutex reply_lock;
+ char *cb_req;
+ char *reply;
+
+ struct msm_rpc_endpoint *cb_ept;
+
+ int (*rpc_call)(struct msm_rpc_server *server,
+ struct rpc_request_hdr *req, unsigned len);
+};
+
+int msm_rpc_create_server(struct msm_rpc_server *server);
+
+#define MSM_RPC_MSGSIZE_MAX 8192
+
+struct msm_rpc_client;
+
+struct msm_rpc_client {
+ struct task_struct *read_thread;
+ struct task_struct *cb_thread;
+
+ struct msm_rpc_endpoint *ept;
+ wait_queue_head_t reply_wait;
+
+ uint32_t prog, ver;
+
+ void *buf;
+ int read_avail;
+
+ int (*cb_func)(struct msm_rpc_client *, void *, int);
+ void *cb_buf;
+ int cb_size;
+
+ struct list_head cb_item_list;
+ struct mutex cb_item_list_lock;
+
+ wait_queue_head_t cb_wait;
+ int cb_avail;
+
+ atomic_t next_cb_id;
+ struct mutex cb_list_lock;
+ struct list_head cb_list;
+
+ uint32_t exit_flag;
+ struct completion complete;
+ struct completion cb_complete;
+
+ struct mutex req_lock;
+ struct mutex reply_lock;
+ char *req;
+ char *reply;
+};
+
+struct msm_rpc_client_info {
+ uint32_t pid;
+ uint32_t cid;
+ uint32_t prog;
+ uint32_t vers;
+};
+
+struct msm_rpc_client *msm_rpc_register_client(
+ const char *name,
+ uint32_t prog, uint32_t ver,
+ uint32_t create_cb_thread,
+ int (*cb_func)(struct msm_rpc_client *, void *, int));
+
+int msm_rpc_unregister_client(struct msm_rpc_client *client);
+
+int msm_rpc_client_req(struct msm_rpc_client *client, uint32_t proc,
+ int (*arg_func)(struct msm_rpc_client *,
+ void *, void *), void *arg_data,
+ int (*result_func)(struct msm_rpc_client *,
+ void *, void *), void *result_data,
+ long timeout);
+
+void *msm_rpc_start_accepted_reply(struct msm_rpc_client *client,
+ uint32_t xid, uint32_t accept_status);
+
+int msm_rpc_send_accepted_reply(struct msm_rpc_client *client, uint32_t size);
+
+void *msm_rpc_server_start_accepted_reply(struct msm_rpc_server *server,
+ uint32_t xid, uint32_t accept_status);
+
+int msm_rpc_server_send_accepted_reply(struct msm_rpc_server *server,
+ uint32_t size);
+
+int msm_rpc_add_cb_func(struct msm_rpc_client *client, void *cb_func);
+
+void *msm_rpc_get_cb_func(struct msm_rpc_client *client, uint32_t cb_id);
+
+void msm_rpc_remove_cb_func(struct msm_rpc_client *client, void *cb_func);
+
+int msm_rpc_server_cb_req(struct msm_rpc_server *server,
+ struct msm_rpc_client_info *clnt_info,
+ uint32_t cb_proc,
+ int (*arg_func)(struct msm_rpc_server *server,
+ void *buf, void *data),
+ void *arg_data,
+ int (*ret_func)(struct msm_rpc_server *server,
+ void *buf, void *data),
+ void *ret_data, long timeout);
+
+void msm_rpc_server_get_requesting_client(
+ struct msm_rpc_client_info *clnt_info);
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_smd.h b/arch/arm/mach-msm/include/mach/msm_smd.h
new file mode 100644
index 000000000000..8ef65c04ea85
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_smd.h
@@ -0,0 +1,82 @@
+/* linux/include/asm-arm/arch-msm/msm_smd.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_SMD_H
+#define __ASM_ARCH_MSM_SMD_H
+
+typedef struct smd_channel smd_channel_t;
+
+/* warning: notify() may be called before open returns */
+int smd_open(const char *name, smd_channel_t **ch, void *priv,
+ void (*notify)(void *priv, unsigned event));
+
+#define SMD_EVENT_DATA 1
+#define SMD_EVENT_OPEN 2
+#define SMD_EVENT_CLOSE 3
+
+int smd_close(smd_channel_t *ch);
+
+/* passing a null pointer for data reads and discards */
+int smd_read(smd_channel_t *ch, void *data, int len);
+int smd_read_from_cb(smd_channel_t *ch, void *data, int len);
+
+/* Write to stream channels may do a partial write and return
+** the length actually written.
+** Write to packet channels will never do a partial write --
+** it will return the requested length written or an error.
+*/
+int smd_write(smd_channel_t *ch, const void *data, int len);
+
+int smd_write_avail(smd_channel_t *ch);
+int smd_read_avail(smd_channel_t *ch);
+
+/* Returns the total size of the current packet being read.
+** Returns 0 if no packets available or a stream channel.
+*/
+int smd_cur_packet_size(smd_channel_t *ch);
+
+
+#if 0
+/* these are interruptable waits which will block you until the specified
+** number of bytes are readable or writable.
+*/
+int smd_wait_until_readable(smd_channel_t *ch, int bytes);
+int smd_wait_until_writable(smd_channel_t *ch, int bytes);
+#endif
+
+/* these are used to get and set the IF sigs of a channel.
+ * DTR and RTS can be set; DSR, CTS, CD and RI can be read.
+ */
+int smd_tiocmget(smd_channel_t *ch);
+int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear);
+
+#if defined(CONFIG_MSM_N_WAY_SMD)
+enum {
+ SMD_APPS_MODEM = 0,
+ SMD_APPS_QDSP,
+ SMD_MODEM_QDSP
+};
+#else
+enum {
+ SMD_APPS_MODEM = 0
+};
+#endif
+
+int smd_named_open_on_edge(const char *name, uint32_t edge, smd_channel_t **_ch,
+ void *priv, void (*notify)(void *, unsigned));
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_touch.h b/arch/arm/mach-msm/include/mach/msm_touch.h
new file mode 100644
index 000000000000..763d6a8f1113
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_touch.h
@@ -0,0 +1,26 @@
+/* arch/arm/mach-msm/include/mach/msm_touch.h
+ *
+ * Platform data for MSM touchscreen driver.
+ *
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MACH_MSM_TOUCH_H_
+#define _MACH_MSM_TOUCH_H_
+
+struct msm_ts_platform_data {
+ unsigned int x_max;
+ unsigned int y_max;
+ unsigned int pressure_max;
+};
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/msm_touchpad.h b/arch/arm/mach-msm/include/mach/msm_touchpad.h
new file mode 100644
index 000000000000..5c02f15c5ca5
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/msm_touchpad.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * Touchpad driver for QSD platform.
+ */
+
+struct msm_touchpad_platform_data {
+ int gpioirq;
+ int gpiosuspend;
+ int (*gpio_setup) (void);
+ void (*gpio_shutdown)(void);
+};
diff --git a/arch/arm/mach-msm/include/mach/oem_rapi_client.h b/arch/arm/mach-msm/include/mach/oem_rapi_client.h
new file mode 100644
index 000000000000..4b340eb814c9
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/oem_rapi_client.h
@@ -0,0 +1,91 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ASM__ARCH_OEM_RAPI_CLIENT_H
+#define __ASM__ARCH_OEM_RAPI_CLIENT_H
+
+/*
+ * OEM RAPI CLIENT Driver header file
+ */
+
+#include <linux/types.h>
+#include <mach/msm_rpcrouter.h>
+
+enum {
+ OEM_RAPI_CLIENT_EVENT_NONE = 0,
+
+ /*
+ * list of oem rapi client events
+ */
+
+ OEM_RAPI_CLIENT_EVENT_MAX
+
+};
+
+struct oem_rapi_client_streaming_func_cb_arg {
+ uint32_t event;
+ void *handle;
+ uint32_t in_len;
+ char *input;
+ uint32_t out_len_valid;
+ uint32_t output_valid;
+ uint32_t output_size;
+};
+
+struct oem_rapi_client_streaming_func_cb_ret {
+ uint32_t *out_len;
+ char *output;
+};
+
+struct oem_rapi_client_streaming_func_arg {
+ uint32_t event;
+ int (*cb_func)(struct oem_rapi_client_streaming_func_cb_arg *,
+ struct oem_rapi_client_streaming_func_cb_ret *);
+ void *handle;
+ uint32_t in_len;
+ char *input;
+ uint32_t out_len_valid;
+ uint32_t output_valid;
+ uint32_t output_size;
+};
+
+struct oem_rapi_client_streaming_func_ret {
+ uint32_t *out_len;
+ char *output;
+};
+
+int oem_rapi_client_streaming_function(
+ struct msm_rpc_client *client,
+ struct oem_rapi_client_streaming_func_arg *arg,
+ struct oem_rapi_client_streaming_func_ret *ret);
+
+int oem_rapi_client_close(void);
+
+struct msm_rpc_client *oem_rapi_client_init(void);
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/pmic.h b/arch/arm/mach-msm/include/mach/pmic.h
new file mode 100644
index 000000000000..c339e743c032
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/pmic.h
@@ -0,0 +1,557 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_PMIC_H
+#define __ARCH_ARM_MACH_PMIC_H
+
+
+enum spkr_left_right {
+ LEFT_SPKR,
+ RIGHT_SPKR,
+};
+
+enum spkr_gain {
+ SPKR_GAIN_MINUS16DB, /* -16 db */
+ SPKR_GAIN_MINUS12DB, /* -12 db */
+ SPKR_GAIN_MINUS08DB, /* -08 db */
+ SPKR_GAIN_MINUS04DB, /* -04 db */
+ SPKR_GAIN_00DB, /* 00 db */
+ SPKR_GAIN_PLUS04DB, /* +04 db */
+ SPKR_GAIN_PLUS08DB, /* +08 db */
+ SPKR_GAIN_PLUS12DB, /* +12 db */
+};
+
+enum spkr_dly {
+ SPKR_DLY_10MS, /* ~10 ms delay */
+ SPKR_DLY_100MS, /* ~100 ms delay */
+};
+
+enum spkr_hpf_corner_freq {
+ SPKR_FREQ_1_39KHZ, /* 1.39 kHz */
+ SPKR_FREQ_0_64KHZ, /* 0.64 kHz */
+ SPKR_FREQ_0_86KHZ, /* 0.86 kHz */
+ SPKR_FREQ_0_51KHZ, /* 0.51 kHz */
+ SPKR_FREQ_1_06KHZ, /* 1.06 kHz */
+ SPKR_FREQ_0_57KHZ, /* 0.57 kHz */
+ SPKR_FREQ_0_73KHZ, /* 0.73 kHz */
+ SPKR_FREQ_0_47KHZ, /* 0.47 kHz */
+ SPKR_FREQ_1_20KHZ, /* 1.20 kHz */
+ SPKR_FREQ_0_60KHZ, /* 0.60 kHz */
+ SPKR_FREQ_0_76KHZ, /* 0.76 kHz */
+ SPKR_FREQ_0_49KHZ, /* 0.49 kHz */
+ SPKR_FREQ_0_95KHZ, /* 0.95 kHz */
+ SPKR_FREQ_0_54KHZ, /* 0.54 kHz */
+ SPKR_FREQ_0_68KHZ, /* 0.68 kHz */
+ SPKR_FREQ_0_45KHZ, /* 0.45 kHz */
+};
+
+/* Turn the speaker on or off and enables or disables mute.*/
+enum spkr_cmd {
+ SPKR_DISABLE, /* Enable Speaker */
+ SPKR_ENABLE, /* Disable Speaker */
+ SPKR_MUTE_OFF, /* turn speaker mute off, SOUND ON */
+ SPKR_MUTE_ON, /* turn speaker mute on, SOUND OFF */
+ SPKR_OFF, /* turn speaker OFF (speaker disable and mute on) */
+ SPKR_ON, /* turn speaker ON (speaker enable and mute off) */
+ SPKR_SET_FREQ_CMD, /* set speaker frequency */
+ SPKR_GET_FREQ_CMD, /* get speaker frequency */
+ SPKR_SET_GAIN_CMD, /* set speaker gain */
+ SPKR_GET_GAIN_CMD, /* get speaker gain */
+ SPKR_SET_DELAY_CMD, /* set speaker delay */
+ SPKR_GET_DELAY_CMD, /* get speaker delay */
+ SPKR_SET_PDM_MODE,
+ SPKR_SET_PWM_MODE,
+};
+
+struct spkr_config_mode {
+ uint32_t is_right_chan_en;
+ uint32_t is_left_chan_en;
+ uint32_t is_right_left_chan_added;
+ uint32_t is_stereo_en;
+ uint32_t is_usb_with_hpf_20hz;
+ uint32_t is_mux_bypassed;
+ uint32_t is_hpf_en;
+ uint32_t is_sink_curr_from_ref_volt_cir_en;
+};
+
+enum mic_volt {
+ MIC_VOLT_2_00V, /* 2.00 V */
+ MIC_VOLT_1_93V, /* 1.93 V */
+ MIC_VOLT_1_80V, /* 1.80 V */
+ MIC_VOLT_1_73V, /* 1.73 V */
+};
+
+enum ledtype {
+ LED_LCD,
+ LED_KEYPAD,
+};
+
+enum flash_led_mode {
+ FLASH_LED_MODE__MANUAL,
+ FLASH_LED_MODE__DBUS1,
+ FLASH_LED_MODE__DBUS2,
+ FLASH_LED_MODE__DBUS3,
+};
+
+enum flash_led_pol {
+ FLASH_LED_POL__ACTIVE_HIGH,
+ FLASH_LED_POL__ACTIVE_LOW,
+};
+
+enum switch_cmd {
+ OFF_CMD,
+ ON_CMD
+};
+
+enum vreg_lp_id {
+ PM_VREG_LP_MSMA_ID,
+ PM_VREG_LP_MSMP_ID,
+ PM_VREG_LP_MSME1_ID,
+ PM_VREG_LP_GP3_ID,
+ PM_VREG_LP_MSMC_ID,
+ PM_VREG_LP_MSME2_ID,
+ PM_VREG_LP_GP4_ID,
+ PM_VREG_LP_GP1_ID,
+ PM_VREG_LP_RFTX_ID,
+ PM_VREG_LP_RFRX1_ID,
+ PM_VREG_LP_RFRX2_ID,
+ PM_VREG_LP_WLAN_ID,
+ PM_VREG_LP_MMC_ID,
+ PM_VREG_LP_RUIM_ID,
+ PM_VREG_LP_MSMC0_ID,
+ PM_VREG_LP_GP2_ID,
+ PM_VREG_LP_GP5_ID,
+ PM_VREG_LP_GP6_ID,
+ PM_VREG_LP_MPLL_ID,
+ PM_VREG_LP_RFUBM_ID,
+ PM_VREG_LP_RFA_ID,
+ PM_VREG_LP_CDC2_ID,
+ PM_VREG_LP_RFTX2_ID,
+ PM_VREG_LP_USIM_ID,
+ PM_VREG_LP_USB2P6_ID,
+ PM_VREG_LP_TCXO_ID,
+ PM_VREG_LP_USB3P3_ID,
+
+ PM_VREG_LP_MSME_ID = PM_VREG_LP_MSME1_ID,
+ /* backward compatible enums only */
+ PM_VREG_LP_CAM_ID = PM_VREG_LP_GP1_ID,
+ PM_VREG_LP_MDDI_ID = PM_VREG_LP_GP2_ID,
+ PM_VREG_LP_RUIM2_ID = PM_VREG_LP_GP3_ID,
+ PM_VREG_LP_AUX_ID = PM_VREG_LP_GP4_ID,
+ PM_VREG_LP_AUX2_ID = PM_VREG_LP_GP5_ID,
+ PM_VREG_LP_BT_ID = PM_VREG_LP_GP6_ID,
+ PM_VREG_LP_MSMC_LDO_ID = PM_VREG_LP_MSMC_ID,
+ PM_VREG_LP_MSME1_LDO_ID = PM_VREG_LP_MSME1_ID,
+ PM_VREG_LP_MSME2_LDO_ID = PM_VREG_LP_MSME2_ID,
+ PM_VREG_LP_RFA1_ID = PM_VREG_LP_RFRX2_ID,
+ PM_VREG_LP_RFA2_ID = PM_VREG_LP_RFTX2_ID,
+ PM_VREG_LP_XO_ID = PM_VREG_LP_TCXO_ID
+};
+
+enum vreg_id {
+ PM_VREG_MSMA_ID = 0,
+ PM_VREG_MSMP_ID,
+ PM_VREG_MSME1_ID,
+ PM_VREG_MSMC1_ID,
+ PM_VREG_MSMC2_ID,
+ PM_VREG_GP3_ID,
+ PM_VREG_MSME2_ID,
+ PM_VREG_GP4_ID,
+ PM_VREG_GP1_ID,
+ PM_VREG_TCXO_ID,
+ PM_VREG_PA_ID,
+ PM_VREG_RFTX_ID,
+ PM_VREG_RFRX1_ID,
+ PM_VREG_RFRX2_ID,
+ PM_VREG_SYNT_ID,
+ PM_VREG_WLAN_ID,
+ PM_VREG_USB_ID,
+ PM_VREG_BOOST_ID,
+ PM_VREG_MMC_ID,
+ PM_VREG_RUIM_ID,
+ PM_VREG_MSMC0_ID,
+ PM_VREG_GP2_ID,
+ PM_VREG_GP5_ID,
+ PM_VREG_GP6_ID,
+ PM_VREG_RF_ID,
+ PM_VREG_RF_VCO_ID,
+ PM_VREG_MPLL_ID,
+ PM_VREG_S2_ID,
+ PM_VREG_S3_ID,
+ PM_VREG_RFUBM_ID,
+ PM_VREG_NCP_ID,
+ PM_VREG_RF2_ID,
+ PM_VREG_RFA_ID,
+ PM_VREG_CDC2_ID,
+ PM_VREG_RFTX2_ID,
+ PM_VREG_USIM_ID,
+ PM_VREG_USB2P6_ID,
+ PM_VREG_USB3P3_ID,
+ PM_VREG_EXTCDC1_ID,
+ PM_VREG_EXTCDC2_ID,
+
+ /* backward compatible enums only */
+ PM_VREG_MSME_ID = PM_VREG_MSME1_ID,
+ PM_VREG_MSME_BUCK_SMPS_ID = PM_VREG_MSME1_ID,
+ PM_VREG_MSME1_LDO_ID = PM_VREG_MSME1_ID,
+ PM_VREG_MSMC_ID = PM_VREG_MSMC1_ID,
+ PM_VREG_MSMC_LDO_ID = PM_VREG_MSMC1_ID,
+ PM_VREG_MSMC1_BUCK_SMPS_ID = PM_VREG_MSMC1_ID,
+ PM_VREG_MSME2_LDO_ID = PM_VREG_MSME2_ID,
+ PM_VREG_CAM_ID = PM_VREG_GP1_ID,
+ PM_VREG_MDDI_ID = PM_VREG_GP2_ID,
+ PM_VREG_RUIM2_ID = PM_VREG_GP3_ID,
+ PM_VREG_AUX_ID = PM_VREG_GP4_ID,
+ PM_VREG_AUX2_ID = PM_VREG_GP5_ID,
+ PM_VREG_BT_ID = PM_VREG_GP6_ID,
+ PM_VREG_RF1_ID = PM_VREG_RF_ID,
+ PM_VREG_S1_ID = PM_VREG_RF1_ID,
+ PM_VREG_5V_ID = PM_VREG_BOOST_ID,
+ PM_VREG_RFA1_ID = PM_VREG_RFRX2_ID,
+ PM_VREG_RFA2_ID = PM_VREG_RFTX2_ID,
+ PM_VREG_XO_ID = PM_VREG_TCXO_ID
+};
+
+enum vreg_pdown_id {
+ PM_VREG_PDOWN_MSMA_ID,
+ PM_VREG_PDOWN_MSMP_ID,
+ PM_VREG_PDOWN_MSME1_ID,
+ PM_VREG_PDOWN_MSMC1_ID,
+ PM_VREG_PDOWN_MSMC2_ID,
+ PM_VREG_PDOWN_GP3_ID,
+ PM_VREG_PDOWN_MSME2_ID,
+ PM_VREG_PDOWN_GP4_ID,
+ PM_VREG_PDOWN_GP1_ID,
+ PM_VREG_PDOWN_TCXO_ID,
+ PM_VREG_PDOWN_PA_ID,
+ PM_VREG_PDOWN_RFTX_ID,
+ PM_VREG_PDOWN_RFRX1_ID,
+ PM_VREG_PDOWN_RFRX2_ID,
+ PM_VREG_PDOWN_SYNT_ID,
+ PM_VREG_PDOWN_WLAN_ID,
+ PM_VREG_PDOWN_USB_ID,
+ PM_VREG_PDOWN_MMC_ID,
+ PM_VREG_PDOWN_RUIM_ID,
+ PM_VREG_PDOWN_MSMC0_ID,
+ PM_VREG_PDOWN_GP2_ID,
+ PM_VREG_PDOWN_GP5_ID,
+ PM_VREG_PDOWN_GP6_ID,
+ PM_VREG_PDOWN_RF_ID,
+ PM_VREG_PDOWN_RF_VCO_ID,
+ PM_VREG_PDOWN_MPLL_ID,
+ PM_VREG_PDOWN_S2_ID,
+ PM_VREG_PDOWN_S3_ID,
+ PM_VREG_PDOWN_RFUBM_ID,
+ /* new for HAN */
+ PM_VREG_PDOWN_RF1_ID,
+ PM_VREG_PDOWN_RF2_ID,
+ PM_VREG_PDOWN_RFA_ID,
+ PM_VREG_PDOWN_CDC2_ID,
+ PM_VREG_PDOWN_RFTX2_ID,
+ PM_VREG_PDOWN_USIM_ID,
+ PM_VREG_PDOWN_USB2P6_ID,
+ PM_VREG_PDOWN_USB3P3_ID,
+
+ /* backward compatible enums only */
+ PM_VREG_PDOWN_CAM_ID = PM_VREG_PDOWN_GP1_ID,
+ PM_VREG_PDOWN_MDDI_ID = PM_VREG_PDOWN_GP2_ID,
+ PM_VREG_PDOWN_RUIM2_ID = PM_VREG_PDOWN_GP3_ID,
+ PM_VREG_PDOWN_AUX_ID = PM_VREG_PDOWN_GP4_ID,
+ PM_VREG_PDOWN_AUX2_ID = PM_VREG_PDOWN_GP5_ID,
+ PM_VREG_PDOWN_BT_ID = PM_VREG_PDOWN_GP6_ID,
+ PM_VREG_PDOWN_MSME_ID = PM_VREG_PDOWN_MSME1_ID,
+ PM_VREG_PDOWN_MSMC_ID = PM_VREG_PDOWN_MSMC1_ID,
+ PM_VREG_PDOWN_RFA1_ID = PM_VREG_PDOWN_RFRX2_ID,
+ PM_VREG_PDOWN_RFA2_ID = PM_VREG_PDOWN_RFTX2_ID,
+ PM_VREG_PDOWN_XO_ID = PM_VREG_PDOWN_TCXO_ID
+};
+
+enum mpp_which {
+ PM_MPP_1,
+ PM_MPP_2,
+ PM_MPP_3,
+ PM_MPP_4,
+ PM_MPP_5,
+ PM_MPP_6,
+ PM_MPP_7,
+ PM_MPP_8,
+ PM_MPP_9,
+ PM_MPP_10,
+ PM_MPP_11,
+ PM_MPP_12,
+ PM_MPP_13,
+ PM_MPP_14,
+ PM_MPP_15,
+ PM_MPP_16,
+ PM_MPP_17,
+ PM_MPP_18,
+ PM_MPP_19,
+ PM_MPP_20,
+ PM_MPP_21,
+ PM_MPP_22,
+
+ PM_NUM_MPP_HAN = PM_MPP_4 + 1,
+ PM_NUM_MPP_KIP = PM_MPP_4 + 1,
+ PM_NUM_MPP_EPIC = PM_MPP_4 + 1,
+ PM_NUM_MPP_PM7500 = PM_MPP_22 + 1,
+ PM_NUM_MPP_PM6650 = PM_MPP_12 + 1,
+ PM_NUM_MPP_PM6658 = PM_MPP_12 + 1,
+ PM_NUM_MPP_PANORAMIX = PM_MPP_2 + 1,
+ PM_NUM_MPP_PM6640 = PM_NUM_MPP_PANORAMIX,
+ PM_NUM_MPP_PM6620 = PM_NUM_MPP_PANORAMIX
+};
+
+enum mpp_dlogic_level {
+ PM_MPP__DLOGIC__LVL_MSME,
+ PM_MPP__DLOGIC__LVL_MSMP,
+ PM_MPP__DLOGIC__LVL_RUIM,
+ PM_MPP__DLOGIC__LVL_MMC,
+ PM_MPP__DLOGIC__LVL_VDD,
+};
+
+enum mpp_dlogic_in_dbus {
+ PM_MPP__DLOGIC_IN__DBUS_NONE,
+ PM_MPP__DLOGIC_IN__DBUS1,
+ PM_MPP__DLOGIC_IN__DBUS2,
+ PM_MPP__DLOGIC_IN__DBUS3,
+};
+
+enum mpp_dlogic_out_ctrl {
+ PM_MPP__DLOGIC_OUT__CTRL_LOW,
+ PM_MPP__DLOGIC_OUT__CTRL_HIGH,
+ PM_MPP__DLOGIC_OUT__CTRL_MPP,
+ PM_MPP__DLOGIC_OUT__CTRL_NOT_MPP,
+};
+
+enum mpp_i_sink_level {
+ PM_MPP__I_SINK__LEVEL_5mA,
+ PM_MPP__I_SINK__LEVEL_10mA,
+ PM_MPP__I_SINK__LEVEL_15mA,
+ PM_MPP__I_SINK__LEVEL_20mA,
+ PM_MPP__I_SINK__LEVEL_25mA,
+ PM_MPP__I_SINK__LEVEL_30mA,
+ PM_MPP__I_SINK__LEVEL_35mA,
+ PM_MPP__I_SINK__LEVEL_40mA,
+};
+
+enum mpp_i_sink_switch {
+ PM_MPP__I_SINK__SWITCH_DIS,
+ PM_MPP__I_SINK__SWITCH_ENA,
+ PM_MPP__I_SINK__SWITCH_ENA_IF_MPP_HIGH,
+ PM_MPP__I_SINK__SWITCH_ENA_IF_MPP_LOW,
+};
+
+enum pm_vib_mot_mode {
+ PM_VIB_MOT_MODE__MANUAL,
+ PM_VIB_MOT_MODE__DBUS1,
+ PM_VIB_MOT_MODE__DBUS2,
+ PM_VIB_MOT_MODE__DBUS3,
+};
+
+enum pm_vib_mot_pol {
+ PM_VIB_MOT_POL__ACTIVE_HIGH,
+ PM_VIB_MOT_POL__ACTIVE_LOW,
+};
+
+struct rtc_time {
+ uint sec;
+};
+
+enum rtc_alarm {
+ PM_RTC_ALARM_1,
+};
+
+enum hsed_controller {
+ PM_HSED_CONTROLLER_0,
+ PM_HSED_CONTROLLER_1,
+ PM_HSED_CONTROLLER_2,
+};
+
+enum hsed_switch {
+ PM_HSED_SC_SWITCH_TYPE,
+ PM_HSED_OC_SWITCH_TYPE,
+};
+
+enum hsed_enable {
+ PM_HSED_ENABLE_OFF,
+ PM_HSED_ENABLE_TCXO,
+ PM_HSED_ENABLE_PWM_TCXO,
+ PM_HSED_ENABLE_ALWAYS,
+};
+
+enum hsed_hyst_pre_div {
+ PM_HSED_HYST_PRE_DIV_1,
+ PM_HSED_HYST_PRE_DIV_2,
+ PM_HSED_HYST_PRE_DIV_4,
+ PM_HSED_HYST_PRE_DIV_8,
+ PM_HSED_HYST_PRE_DIV_16,
+ PM_HSED_HYST_PRE_DIV_32,
+ PM_HSED_HYST_PRE_DIV_64,
+ PM_HSED_HYST_PRE_DIV_128,
+};
+
+enum hsed_hyst_time {
+ PM_HSED_HYST_TIME_1_CLK_CYCLES,
+ PM_HSED_HYST_TIME_2_CLK_CYCLES,
+ PM_HSED_HYST_TIME_3_CLK_CYCLES,
+ PM_HSED_HYST_TIME_4_CLK_CYCLES,
+ PM_HSED_HYST_TIME_5_CLK_CYCLES,
+ PM_HSED_HYST_TIME_6_CLK_CYCLES,
+ PM_HSED_HYST_TIME_7_CLK_CYCLES,
+ PM_HSED_HYST_TIME_8_CLK_CYCLES,
+ PM_HSED_HYST_TIME_9_CLK_CYCLES,
+ PM_HSED_HYST_TIME_10_CLK_CYCLES,
+ PM_HSED_HYST_TIME_11_CLK_CYCLES,
+ PM_HSED_HYST_TIME_12_CLK_CYCLES,
+ PM_HSED_HYST_TIME_13_CLK_CYCLES,
+ PM_HSED_HYST_TIME_14_CLK_CYCLES,
+ PM_HSED_HYST_TIME_15_CLK_CYCLES,
+ PM_HSED_HYST_TIME_16_CLK_CYCLES,
+};
+
+enum hsed_period_pre_div {
+ PM_HSED_PERIOD_PRE_DIV_2,
+ PM_HSED_PERIOD_PRE_DIV_4,
+ PM_HSED_PERIOD_PRE_DIV_8,
+ PM_HSED_PERIOD_PRE_DIV_16,
+ PM_HSED_PERIOD_PRE_DIV_32,
+ PM_HSED_PERIOD_PRE_DIV_64,
+ PM_HSED_PERIOD_PRE_DIV_128,
+ PM_HSED_PERIOD_PRE_DIV_256,
+};
+
+enum hsed_period_time {
+ PM_HSED_PERIOD_TIME_1_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_2_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_3_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_4_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_5_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_6_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_7_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_8_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_9_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_10_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_11_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_12_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_13_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_14_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_15_CLK_CYCLES,
+ PM_HSED_PERIOD_TIME_16_CLK_CYCLES,
+};
+
+int pmic_lp_mode_control(enum switch_cmd cmd, enum vreg_lp_id id);
+int pmic_vreg_set_level(enum vreg_id vreg, int level);
+int pmic_vreg_pull_down_switch(enum switch_cmd cmd, enum vreg_pdown_id id);
+int pmic_secure_mpp_control_digital_output(enum mpp_which which,
+ enum mpp_dlogic_level level, enum mpp_dlogic_out_ctrl out);
+int pmic_secure_mpp_config_i_sink(enum mpp_which which,
+ enum mpp_i_sink_level level, enum mpp_i_sink_switch onoff);
+int pmic_secure_mpp_config_digital_input(enum mpp_which which,
+ enum mpp_dlogic_level level, enum mpp_dlogic_in_dbus dbus);
+int pmic_rtc_start(struct rtc_time *time);
+int pmic_rtc_stop(void);
+int pmic_rtc_get_time(struct rtc_time *time);
+int pmic_rtc_enable_alarm(enum rtc_alarm alarm,
+ struct rtc_time *time);
+int pmic_rtc_disable_alarm(enum rtc_alarm alarm);
+int pmic_rtc_get_alarm_time(enum rtc_alarm alarm,
+ struct rtc_time *time);
+int pmic_rtc_get_alarm_status(uint *status);
+int pmic_rtc_set_time_adjust(uint adjust);
+int pmic_rtc_get_time_adjust(uint *adjust);
+int pmic_speaker_cmd(const enum spkr_cmd cmd);
+int pmic_set_spkr_configuration(struct spkr_config_mode *cfg);
+int pmic_get_spkr_configuration(struct spkr_config_mode *cfg);
+int pmic_spkr_en_right_chan(uint enable);
+int pmic_spkr_is_right_chan_en(uint *enabled);
+int pmic_spkr_en_left_chan(uint enable);
+int pmic_spkr_is_left_chan_en(uint *enabled);
+int pmic_spkr_en(enum spkr_left_right left_right, uint enabled);
+int pmic_spkr_is_en(enum spkr_left_right left_right, uint *enabled);
+int pmic_spkr_set_gain(enum spkr_left_right left_right, enum spkr_gain gain);
+int pmic_spkr_get_gain(enum spkr_left_right left_right, enum spkr_gain *gain);
+int pmic_set_speaker_gain(enum spkr_gain gain);
+int pmic_set_speaker_delay(enum spkr_dly delay);
+int pmic_speaker_1k6_zin_enable(uint enable);
+int pmic_spkr_set_mux_hpf_corner_freq(enum spkr_hpf_corner_freq freq);
+int pmic_spkr_get_mux_hpf_corner_freq(enum spkr_hpf_corner_freq *freq);
+int pmic_spkr_select_usb_with_hpf_20hz(uint enable);
+int pmic_spkr_is_usb_with_hpf_20hz(uint *enabled);
+int pmic_spkr_bypass_mux(uint enable);
+int pmic_spkr_is_mux_bypassed(uint *enabled);
+int pmic_spkr_en_hpf(uint enable);
+int pmic_spkr_is_hpf_en(uint *enabled);
+int pmic_spkr_en_sink_curr_from_ref_volt_cir(uint enable);
+int pmic_spkr_is_sink_curr_from_ref_volt_cir_en(uint *enabled);
+int pmic_spkr_set_delay(enum spkr_left_right left_right, enum spkr_dly delay);
+int pmic_spkr_get_delay(enum spkr_left_right left_right, enum spkr_dly *delay);
+int pmic_spkr_en_mute(enum spkr_left_right left_right, uint enabled);
+int pmic_spkr_is_mute_en(enum spkr_left_right left_right, uint *enabled);
+int pmic_mic_en(uint enable);
+int pmic_mic_is_en(uint *enabled);
+int pmic_mic_set_volt(enum mic_volt vol);
+int pmic_mic_get_volt(enum mic_volt *voltage);
+int pmic_set_led_intensity(enum ledtype type, int level);
+int pmic_flash_led_set_current(uint16_t milliamps);
+int pmic_flash_led_set_mode(enum flash_led_mode mode);
+int pmic_flash_led_set_polarity(enum flash_led_pol pol);
+int pmic_spkr_add_right_left_chan(uint enable);
+int pmic_spkr_is_right_left_chan_added(uint *enabled);
+int pmic_spkr_en_stereo(uint enable);
+int pmic_spkr_is_stereo_en(uint *enabled);
+int pmic_vib_mot_set_volt(uint vol);
+int pmic_vib_mot_set_mode(enum pm_vib_mot_mode mode);
+int pmic_vib_mot_set_polarity(enum pm_vib_mot_pol pol);
+int pmic_vid_en(uint enable);
+int pmic_vid_is_en(uint *enabled);
+int pmic_vid_load_detect_en(uint enable);
+
+int pmic_hsed_set_period(
+ enum hsed_controller controller,
+ enum hsed_period_pre_div period_pre_div,
+ enum hsed_period_time period_time
+);
+
+int pmic_hsed_set_hysteresis(
+ enum hsed_controller controller,
+ enum hsed_hyst_pre_div hyst_pre_div,
+ enum hsed_hyst_time hyst_time
+);
+
+int pmic_hsed_set_current_threshold(
+ enum hsed_controller controller,
+ enum hsed_switch switch_hsed,
+ uint32_t current_threshold
+);
+
+int pmic_hsed_enable(
+ enum hsed_controller controller,
+ enum hsed_enable enable
+);
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/remote_spinlock.h b/arch/arm/mach-msm/include/mach/remote_spinlock.h
new file mode 100644
index 000000000000..8d8a5ddb8a4e
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/remote_spinlock.h
@@ -0,0 +1,115 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * Part of this this code is based on the standard ARM spinlock
+ * implementation (asm/spinlock.h) found in the 2.6.29 kernel.
+ */
+
+#ifndef __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+#define __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+
+#include <linux/types.h>
+
+typedef struct {
+ volatile uint32_t lock;
+} raw_remote_spinlock_t;
+
+typedef raw_remote_spinlock_t *_remote_spinlock_t;
+
+#define remote_spin_lock_id_t uint32_t
+
+static inline void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+"1: ldrex %0, [%1]\n"
+" teq %0, #0\n"
+" strexeq %0, %2, [%1]\n"
+" teqeq %0, #0\n"
+" bne 1b"
+ : "=&r" (tmp)
+ : "r" (&lock->lock), "r" (1)
+ : "cc");
+
+ smp_mb();
+}
+
+static inline void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
+{
+ smp_mb();
+
+ __asm__ __volatile__(
+" str %1, [%0]\n"
+ :
+ : "r" (&lock->lock), "r" (0)
+ : "cc");
+}
+
+static inline void __raw_remote_swp_spin_lock(raw_remote_spinlock_t *lock)
+{
+ unsigned long tmp;
+
+ __asm__ __volatile__(
+"1: swp %0, %2, [%1]\n"
+" teq %0, #0\n"
+" bne 1b"
+ : "=&r" (tmp)
+ : "r" (&lock->lock), "r" (1)
+ : "cc");
+
+ smp_mb();
+}
+
+static inline void __raw_remote_swp_spin_unlock(raw_remote_spinlock_t *lock)
+{
+ smp_mb();
+
+ __asm__ __volatile__(
+" str %1, [%0]"
+ :
+ : "r" (&lock->lock), "r" (0)
+ : "cc");
+}
+
+
+int _remote_spin_lock_init(remote_spin_lock_id_t id, _remote_spinlock_t *lock);
+
+/* Only use SWP-based spinlocks for ARM11 apps processors where the LDREX/STREX
+ * instructions are unable to lock shared memory for exclusive access. */
+#if defined(CONFIG_ARCH_MSM_ARM11)
+#define _remote_spin_lock(lock) __raw_remote_swp_spin_lock(*lock)
+#define _remote_spin_unlock(lock) __raw_remote_swp_spin_unlock(*lock)
+#else
+#define _remote_spin_lock(lock) __raw_remote_ex_spin_lock(*lock)
+#define _remote_spin_unlock(lock) __raw_remote_ex_spin_unlock(*lock)
+#endif /* CONFIG_ARCH_MSM_ARM11 */
+
+#endif /* __ASM__ARCH_QC_REMOTE_SPINLOCK_H */
+
diff --git a/arch/arm/mach-msm/include/mach/rpc_hsusb.h b/arch/arm/mach-msm/include/mach/rpc_hsusb.h
new file mode 100644
index 000000000000..9ef304be2189
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/rpc_hsusb.h
@@ -0,0 +1,45 @@
+/* linux/include/mach/rpc_hsusb.h
+ *
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#ifndef __ASM_ARCH_MSM_RPC_HSUSB_H
+#define __ASM_ARCH_MSM_RPC_HSUSB_H
+
+#include <mach/msm_rpcrouter.h>
+
+int msm_hsusb_rpc_connect(void);
+int msm_hsusb_phy_reset(void);
+int msm_hsusb_vbus_powerup(void);
+int msm_hsusb_vbus_shutdown(void);
+int msm_hsusb_send_productID(uint32_t product_id);
+int msm_hsusb_send_serial_number(char *serial_number);
+int msm_hsusb_is_serial_num_null(uint32_t val);
+int msm_hsusb_reset_rework_installed(void);
+int msm_hsusb_enable_pmic_ulpidata0(void);
+int msm_hsusb_disable_pmic_ulpidata0(void);
+int msm_hsusb_rpc_close(void);
+
+int msm_chg_rpc_connect(void);
+int msm_chg_usb_charger_connected(uint32_t type);
+int msm_chg_usb_i_is_available(uint32_t sample);
+int msm_chg_usb_i_is_not_available(void);
+int msm_chg_usb_charger_disconnected(void);
+int msm_chg_rpc_close(void);
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/sirc.h b/arch/arm/mach-msm/include/mach/sirc.h
new file mode 100644
index 000000000000..133f37ef1546
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/sirc.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __ASM_ARCH_MSM_SIRC_H
+#define __ASM_ARCH_MSM_SIRC_H
+
+struct sirc_regs_t {
+ void *int_enable;
+ void *int_enable_clear;
+ void *int_enable_set;
+ void *int_type;
+ void *int_polarity;
+ void *int_clear;
+};
+
+struct sirc_cascade_regs {
+ void *int_status;
+ unsigned int cascade_irq;
+};
+
+void msm_init_sirc(void);
+void msm_sirc_enter_sleep(void);
+void msm_sirc_exit_sleep(void);
+
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+
+#include <mach/msm_iomap.h>
+
+/*
+ * Secondary interrupt controller interrupts
+ */
+
+#define FIRST_SIRC_IRQ (NR_MSM_IRQS + NR_GPIO_IRQS)
+
+#define INT_UART1 (FIRST_SIRC_IRQ + 0)
+#define INT_UART2 (FIRST_SIRC_IRQ + 1)
+#define INT_UART3 (FIRST_SIRC_IRQ + 2)
+#define INT_UART1_RX (FIRST_SIRC_IRQ + 3)
+#define INT_UART2_RX (FIRST_SIRC_IRQ + 4)
+#define INT_UART3_RX (FIRST_SIRC_IRQ + 5)
+#define INT_SPI_INPUT (FIRST_SIRC_IRQ + 6)
+#define INT_SPI_OUTPUT (FIRST_SIRC_IRQ + 7)
+#define INT_SPI_ERROR (FIRST_SIRC_IRQ + 8)
+#define INT_GPIO_GROUP1 (FIRST_SIRC_IRQ + 9)
+#define INT_GPIO_GROUP2 (FIRST_SIRC_IRQ + 10)
+#define INT_GPIO_GROUP1_SECURE (FIRST_SIRC_IRQ + 11)
+#define INT_GPIO_GROUP2_SECURE (FIRST_SIRC_IRQ + 12)
+#define INT_AVS_SVIC (FIRST_SIRC_IRQ + 13)
+#define INT_AVS_REQ_UP (FIRST_SIRC_IRQ + 14)
+#define INT_AVS_REQ_DOWN (FIRST_SIRC_IRQ + 15)
+#define INT_PBUS_ERR (FIRST_SIRC_IRQ + 16)
+#define INT_AXI_ERR (FIRST_SIRC_IRQ + 17)
+#define INT_SMI_ERR (FIRST_SIRC_IRQ + 18)
+#define INT_EBI1_ERR (FIRST_SIRC_IRQ + 19)
+#define INT_IMEM_ERR (FIRST_SIRC_IRQ + 20)
+#define INT_SC_TEMP_SENSOR (FIRST_SIRC_IRQ + 21)
+#define INT_TV_ENC (FIRST_SIRC_IRQ + 22)
+
+#define NR_SIRC_IRQS 23
+#define SIRC_MASK 0x007FFFFF
+#define LAST_SIRC_IRQ (FIRST_SIRC_IRQ + NR_SIRC_IRQS - 1)
+
+#define SPSS_SIRC_INT_SELECT (MSM_SIRC_BASE + 0x00)
+#define SPSS_SIRC_INT_ENABLE (MSM_SIRC_BASE + 0x04)
+#define SPSS_SIRC_INT_ENABLE_CLEAR (MSM_SIRC_BASE + 0x08)
+#define SPSS_SIRC_INT_ENABLE_SET (MSM_SIRC_BASE + 0x0C)
+#define SPSS_SIRC_INT_TYPE (MSM_SIRC_BASE + 0x10)
+#define SPSS_SIRC_INT_POLARITY (MSM_SIRC_BASE + 0x14)
+#define SPSS_SIRC_SECURITY (MSM_SIRC_BASE + 0x18)
+#define SPSS_SIRC_IRQ_STATUS (MSM_SIRC_BASE + 0x1C)
+#define SPSS_SIRC_IRQ1_STATUS (MSM_SIRC_BASE + 0x20)
+#define SPSS_SIRC_RAW_STATUS (MSM_SIRC_BASE + 0x24)
+#define SPSS_SIRC_INT_CLEAR (MSM_SIRC_BASE + 0x28)
+#define SPSS_SIRC_SOFT_INT (MSM_SIRC_BASE + 0x2C)
+
+#endif
+
+#endif
diff --git a/arch/arm/mach-msm/include/mach/smem_log.h b/arch/arm/mach-msm/include/mach/smem_log.h
new file mode 100644
index 000000000000..bd94bacc4809
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/smem_log.h
@@ -0,0 +1,245 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define SMEM_LOG_BASE 0x30
+
+#define SMIOC_SETMODE _IOW(SMEM_LOG_BASE, 1, int)
+#define SMIOC_SETLOG _IOW(SMEM_LOG_BASE, 2, int)
+
+#define SMIOC_TEXT 0x00000001
+#define SMIOC_BINARY 0x00000002
+#define SMIOC_LOG 0x00000003
+#define SMIOC_STATIC_LOG 0x00000004
+
+/* Event indentifier format:
+ * bit 31-28 is processor ID 8 => apps, 4 => Q6, 0 => modem
+ * bits 27-16 are subsystem id (event base)
+ * bits 15-0 are event id
+ */
+
+#define PROC 0xF0000000
+#define SUB 0x0FFF0000
+#define ID 0x0000FFFF
+
+#define SMEM_LOG_PROC_ID_MODEM 0x00000000
+#define SMEM_LOG_PROC_ID_Q6 0x40000000
+#define SMEM_LOG_PROC_ID_APPS 0x80000000
+
+#define SMEM_LOG_CONT 0x10000000
+
+#define SMEM_LOG_DEBUG_EVENT_BASE 0x00000000
+#define SMEM_LOG_ONCRPC_EVENT_BASE 0x00010000
+#define SMEM_LOG_SMEM_EVENT_BASE 0x00020000
+#define SMEM_LOG_TMC_EVENT_BASE 0x00030000
+#define SMEM_LOG_TIMETICK_EVENT_BASE 0x00040000
+#define SMEM_LOG_DEM_EVENT_BASE 0x00050000
+#define SMEM_LOG_ERROR_EVENT_BASE 0x00060000
+#define SMEM_LOG_DCVS_EVENT_BASE 0x00070000
+#define SMEM_LOG_SLEEP_EVENT_BASE 0x00080000
+#define SMEM_LOG_RPC_ROUTER_EVENT_BASE 0x00090000
+#if defined(CONFIG_MSM_N_WAY_SMSM)
+#define DEM_SMSM_ISR (SMEM_LOG_DEM_EVENT_BASE + 0x1)
+#define DEM_STATE_CHANGE (SMEM_LOG_DEM_EVENT_BASE + 0x2)
+#define DEM_STATE_MACHINE_ENTER (SMEM_LOG_DEM_EVENT_BASE + 0x3)
+#define DEM_ENTER_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 0x4)
+#define DEM_END_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 0x5)
+#define DEM_SETUP_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 0x6)
+#define DEM_SETUP_POWER_COLLAPSE (SMEM_LOG_DEM_EVENT_BASE + 0x7)
+#define DEM_SETUP_SUSPEND (SMEM_LOG_DEM_EVENT_BASE + 0x8)
+#define DEM_EARLY_EXIT (SMEM_LOG_DEM_EVENT_BASE + 0x9)
+#define DEM_WAKEUP_REASON (SMEM_LOG_DEM_EVENT_BASE + 0xA)
+#define DEM_DETECT_WAKEUP (SMEM_LOG_DEM_EVENT_BASE + 0xB)
+#define DEM_DETECT_RESET (SMEM_LOG_DEM_EVENT_BASE + 0xC)
+#define DEM_DETECT_SLEEPEXIT (SMEM_LOG_DEM_EVENT_BASE + 0xD)
+#define DEM_DETECT_RUN (SMEM_LOG_DEM_EVENT_BASE + 0xE)
+#define DEM_APPS_SWFI (SMEM_LOG_DEM_EVENT_BASE + 0xF)
+#define DEM_SEND_WAKEUP (SMEM_LOG_DEM_EVENT_BASE + 0x10)
+#define DEM_ASSERT_OKTS (SMEM_LOG_DEM_EVENT_BASE + 0x11)
+#define DEM_NEGATE_OKTS (SMEM_LOG_DEM_EVENT_BASE + 0x12)
+#define DEM_PROC_COMM_CMD (SMEM_LOG_DEM_EVENT_BASE + 0x13)
+#define DEM_REMOVE_PROC_PWR (SMEM_LOG_DEM_EVENT_BASE + 0x14)
+#define DEM_RESTORE_PROC_PWR (SMEM_LOG_DEM_EVENT_BASE + 0x15)
+#define DEM_SMI_CLK_DISABLED (SMEM_LOG_DEM_EVENT_BASE + 0x16)
+#define DEM_SMI_CLK_ENABLED (SMEM_LOG_DEM_EVENT_BASE + 0x17)
+#define DEM_MAO_INTS (SMEM_LOG_DEM_EVENT_BASE + 0x18)
+#define DEM_APPS_WAKEUP_INT (SMEM_LOG_DEM_EVENT_BASE + 0x19)
+#define DEM_PROC_WAKEUP (SMEM_LOG_DEM_EVENT_BASE + 0x1A)
+#define DEM_PROC_POWERUP (SMEM_LOG_DEM_EVENT_BASE + 0x1B)
+#define DEM_TIMER_EXPIRED (SMEM_LOG_DEM_EVENT_BASE + 0x1C)
+#define DEM_SEND_BATTERY_INFO (SMEM_LOG_DEM_EVENT_BASE + 0x1D)
+#define DEM_REMOTE_PWR_CB (SMEM_LOG_DEM_EVENT_BASE + 0x24)
+#define DEM_TIME_SYNC_START (SMEM_LOG_DEM_EVENT_BASE + 0x1E)
+#define DEM_TIME_SYNC_SEND_VALUE (SMEM_LOG_DEM_EVENT_BASE + 0x1F)
+#define DEM_TIME_SYNC_DONE (SMEM_LOG_DEM_EVENT_BASE + 0x20)
+#define DEM_TIME_SYNC_REQUEST (SMEM_LOG_DEM_EVENT_BASE + 0x21)
+#define DEM_TIME_SYNC_POLL (SMEM_LOG_DEM_EVENT_BASE + 0x22)
+#define DEM_TIME_SYNC_INIT (SMEM_LOG_DEM_EVENT_BASE + 0x23)
+#define DEM_INIT (SMEM_LOG_DEM_EVENT_BASE + 0x25)
+#else
+#define DEM_NO_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 1)
+#define DEM_INSUF_TIME (SMEM_LOG_DEM_EVENT_BASE + 2)
+#define DEMAPPS_ENTER_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 3)
+#define DEMAPPS_DETECT_WAKEUP (SMEM_LOG_DEM_EVENT_BASE + 4)
+#define DEMAPPS_END_APPS_TCXO (SMEM_LOG_DEM_EVENT_BASE + 5)
+#define DEMAPPS_ENTER_SLEEPEXIT (SMEM_LOG_DEM_EVENT_BASE + 6)
+#define DEMAPPS_END_APPS_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 7)
+#define DEMAPPS_SETUP_APPS_PWRCLPS (SMEM_LOG_DEM_EVENT_BASE + 8)
+#define DEMAPPS_PWRCLPS_EARLY_EXIT (SMEM_LOG_DEM_EVENT_BASE + 9)
+#define DEMMOD_SEND_WAKEUP (SMEM_LOG_DEM_EVENT_BASE + 0xA)
+#define DEMMOD_NO_APPS_VOTE (SMEM_LOG_DEM_EVENT_BASE + 0xB)
+#define DEMMOD_NO_TCXO_SLEEP (SMEM_LOG_DEM_EVENT_BASE + 0xC)
+#define DEMMOD_BT_CLOCK (SMEM_LOG_DEM_EVENT_BASE + 0xD)
+#define DEMMOD_UART_CLOCK (SMEM_LOG_DEM_EVENT_BASE + 0xE)
+#define DEMMOD_OKTS (SMEM_LOG_DEM_EVENT_BASE + 0xF)
+#define DEM_SLEEP_INFO (SMEM_LOG_DEM_EVENT_BASE + 0x10)
+#define DEMMOD_TCXO_END (SMEM_LOG_DEM_EVENT_BASE + 0x11)
+#define DEMMOD_END_SLEEP_SIG (SMEM_LOG_DEM_EVENT_BASE + 0x12)
+#define DEMMOD_SETUP_APPSSLEEP (SMEM_LOG_DEM_EVENT_BASE + 0x13)
+#define DEMMOD_ENTER_TCXO (SMEM_LOG_DEM_EVENT_BASE + 0x14)
+#define DEMMOD_WAKE_APPS (SMEM_LOG_DEM_EVENT_BASE + 0x15)
+#define DEMMOD_POWER_COLLAPSE_APPS (SMEM_LOG_DEM_EVENT_BASE + 0x16)
+#define DEMMOD_RESTORE_APPS_PWR (SMEM_LOG_DEM_EVENT_BASE + 0x17)
+#define DEMAPPS_ASSERT_OKTS (SMEM_LOG_DEM_EVENT_BASE + 0x18)
+#define DEMAPPS_RESTART_START_TIMER (SMEM_LOG_DEM_EVENT_BASE + 0x19)
+#define DEMAPPS_ENTER_RUN (SMEM_LOG_DEM_EVENT_BASE + 0x1A)
+#define DEMMOD_MAO_INTS (SMEM_LOG_DEM_EVENT_BASE + 0x1B)
+#define DEMMOD_POWERUP_APPS_CALLED (SMEM_LOG_DEM_EVENT_BASE + 0x1C)
+#define DEMMOD_PC_TIMER_EXPIRED (SMEM_LOG_DEM_EVENT_BASE + 0x1D)
+#define DEM_DETECT_SLEEPEXIT (SMEM_LOG_DEM_EVENT_BASE + 0x1E)
+#define DEM_DETECT_RUN (SMEM_LOG_DEM_EVENT_BASE + 0x1F)
+#define DEM_SET_APPS_TIMER (SMEM_LOG_DEM_EVENT_BASE + 0x20)
+#define DEM_NEGATE_OKTS (SMEM_LOG_DEM_EVENT_BASE + 0x21)
+#define DEMMOD_APPS_WAKEUP_INT (SMEM_LOG_DEM_EVENT_BASE + 0x22)
+#define DEMMOD_APPS_SWFI (SMEM_LOG_DEM_EVENT_BASE + 0x23)
+#define DEM_SEND_BATTERY_INFO (SMEM_LOG_DEM_EVENT_BASE + 0x24)
+#define DEM_SMI_CLK_DISABLED (SMEM_LOG_DEM_EVENT_BASE + 0x25)
+#define DEM_SMI_CLK_ENABLED (SMEM_LOG_DEM_EVENT_BASE + 0x26)
+#define DEMAPPS_SETUP_APPS_SUSPEND (SMEM_LOG_DEM_EVENT_BASE + 0x27)
+#define DEM_RPC_EARLY_EXIT (SMEM_LOG_DEM_EVENT_BASE + 0x28)
+#define DEMAPPS_WAKEUP_REASON (SMEM_LOG_DEM_EVENT_BASE + 0x29)
+#define DEM_INIT (SMEM_LOG_DEM_EVENT_BASE + 0x30)
+#endif
+#define DEMMOD_UMTS_BASE (SMEM_LOG_DEM_EVENT_BASE + 0x8000)
+#define DEMMOD_GL1_GO_TO_SLEEP (DEMMOD_UMTS_BASE + 0x0000)
+#define DEMMOD_GL1_SLEEP_START (DEMMOD_UMTS_BASE + 0x0001)
+#define DEMMOD_GL1_AFTER_GSM_CLK_ON (DEMMOD_UMTS_BASE + 0x0002)
+#define DEMMOD_GL1_BEFORE_RF_ON (DEMMOD_UMTS_BASE + 0x0003)
+#define DEMMOD_GL1_AFTER_RF_ON (DEMMOD_UMTS_BASE + 0x0004)
+#define DEMMOD_GL1_FRAME_TICK (DEMMOD_UMTS_BASE + 0x0005)
+#define DEMMOD_GL1_WCDMA_START (DEMMOD_UMTS_BASE + 0x0006)
+#define DEMMOD_GL1_WCDMA_ENDING (DEMMOD_UMTS_BASE + 0x0007)
+#define DEMMOD_UMTS_NOT_OKTS (DEMMOD_UMTS_BASE + 0x0008)
+#define DEMMOD_UMTS_START_TCXO_SHUTDOWN (DEMMOD_UMTS_BASE + 0x0009)
+#define DEMMOD_UMTS_END_TCXO_SHUTDOWN (DEMMOD_UMTS_BASE + 0x000A)
+#define DEMMOD_UMTS_START_ARM_HALT (DEMMOD_UMTS_BASE + 0x000B)
+#define DEMMOD_UMTS_END_ARM_HALT (DEMMOD_UMTS_BASE + 0x000C)
+#define DEMMOD_UMTS_NEXT_WAKEUP_SCLK (DEMMOD_UMTS_BASE + 0x000D)
+#define TIME_REMOTE_LOG_EVENT_START (SMEM_LOG_TIMETICK_EVENT_BASE + 0)
+#define TIME_REMOTE_LOG_EVENT_GOTO_WAIT (SMEM_LOG_TIMETICK_EVENT_BASE + 1)
+#define TIME_REMOTE_LOG_EVENT_GOTO_INIT (SMEM_LOG_TIMETICK_EVENT_BASE + 2)
+#define ERR_ERROR_FATAL (SMEM_LOG_ERROR_EVENT_BASE + 1)
+#define ERR_ERROR_FATAL_TASK (SMEM_LOG_ERROR_EVENT_BASE + 2)
+#define DCVSAPPS_LOG_IDLE (SMEM_LOG_DCVS_EVENT_BASE + 0x0)
+#define DCVSAPPS_LOG_ERR (SMEM_LOG_DCVS_EVENT_BASE + 0x1)
+#define DCVSAPPS_LOG_CHG (SMEM_LOG_DCVS_EVENT_BASE + 0x2)
+#define DCVSAPPS_LOG_REG (SMEM_LOG_DCVS_EVENT_BASE + 0x3)
+#define DCVSAPPS_LOG_DEREG (SMEM_LOG_DCVS_EVENT_BASE + 0x4)
+#define SMEM_LOG_EVENT_CB (SMEM_LOG_SMEM_EVENT_BASE + 0)
+#define SMEM_LOG_EVENT_START (SMEM_LOG_SMEM_EVENT_BASE + 1)
+#define SMEM_LOG_EVENT_INIT (SMEM_LOG_SMEM_EVENT_BASE + 2)
+#define SMEM_LOG_EVENT_RUNNING (SMEM_LOG_SMEM_EVENT_BASE + 3)
+#define SMEM_LOG_EVENT_STOP (SMEM_LOG_SMEM_EVENT_BASE + 4)
+#define SMEM_LOG_EVENT_RESTART (SMEM_LOG_SMEM_EVENT_BASE + 5)
+#define SMEM_LOG_EVENT_SS (SMEM_LOG_SMEM_EVENT_BASE + 6)
+#define SMEM_LOG_EVENT_READ (SMEM_LOG_SMEM_EVENT_BASE + 7)
+#define SMEM_LOG_EVENT_WRITE (SMEM_LOG_SMEM_EVENT_BASE + 8)
+#define SMEM_LOG_EVENT_SIGS1 (SMEM_LOG_SMEM_EVENT_BASE + 9)
+#define SMEM_LOG_EVENT_SIGS2 (SMEM_LOG_SMEM_EVENT_BASE + 10)
+#define SMEM_LOG_EVENT_WRITE_DM (SMEM_LOG_SMEM_EVENT_BASE + 11)
+#define SMEM_LOG_EVENT_READ_DM (SMEM_LOG_SMEM_EVENT_BASE + 12)
+#define SMEM_LOG_EVENT_SKIP_DM (SMEM_LOG_SMEM_EVENT_BASE + 13)
+#define SMEM_LOG_EVENT_STOP_DM (SMEM_LOG_SMEM_EVENT_BASE + 14)
+#define SMEM_LOG_EVENT_ISR (SMEM_LOG_SMEM_EVENT_BASE + 15)
+#define SMEM_LOG_EVENT_TASK (SMEM_LOG_SMEM_EVENT_BASE + 16)
+#define SMEM_LOG_EVENT_RS (SMEM_LOG_SMEM_EVENT_BASE + 17)
+#define ONCRPC_LOG_EVENT_SMD_WAIT (SMEM_LOG_ONCRPC_EVENT_BASE + 0)
+#define ONCRPC_LOG_EVENT_RPC_WAIT (SMEM_LOG_ONCRPC_EVENT_BASE + 1)
+#define ONCRPC_LOG_EVENT_RPC_BOTH_WAIT (SMEM_LOG_ONCRPC_EVENT_BASE + 2)
+#define ONCRPC_LOG_EVENT_RPC_INIT (SMEM_LOG_ONCRPC_EVENT_BASE + 3)
+#define ONCRPC_LOG_EVENT_RUNNING (SMEM_LOG_ONCRPC_EVENT_BASE + 4)
+#define ONCRPC_LOG_EVENT_APIS_INITED (SMEM_LOG_ONCRPC_EVENT_BASE + 5)
+#define ONCRPC_LOG_EVENT_AMSS_RESET (SMEM_LOG_ONCRPC_EVENT_BASE + 6)
+#define ONCRPC_LOG_EVENT_SMD_RESET (SMEM_LOG_ONCRPC_EVENT_BASE + 7)
+#define ONCRPC_LOG_EVENT_ONCRPC_RESET (SMEM_LOG_ONCRPC_EVENT_BASE + 8)
+#define ONCRPC_LOG_EVENT_CB (SMEM_LOG_ONCRPC_EVENT_BASE + 9)
+#define ONCRPC_LOG_EVENT_STD_CALL (SMEM_LOG_ONCRPC_EVENT_BASE + 10)
+#define ONCRPC_LOG_EVENT_STD_REPLY (SMEM_LOG_ONCRPC_EVENT_BASE + 11)
+#define ONCRPC_LOG_EVENT_STD_CALL_ASYNC (SMEM_LOG_ONCRPC_EVENT_BASE + 12)
+#define NO_SLEEP_OLD (SMEM_LOG_SLEEP_EVENT_BASE + 0x1)
+#define INSUF_TIME (SMEM_LOG_SLEEP_EVENT_BASE + 0x2)
+#define MOD_UART_CLOCK (SMEM_LOG_SLEEP_EVENT_BASE + 0x3)
+#define SLEEP_INFO (SMEM_LOG_SLEEP_EVENT_BASE + 0x4)
+#define MOD_TCXO_END (SMEM_LOG_SLEEP_EVENT_BASE + 0x5)
+#define MOD_ENTER_TCXO (SMEM_LOG_SLEEP_EVENT_BASE + 0x6)
+#define NO_SLEEP_NEW (SMEM_LOG_SLEEP_EVENT_BASE + 0x7)
+#define RPC_ROUTER_LOG_EVENT_UNKNOWN (SMEM_LOG_RPC_ROUTER_EVENT_BASE)
+#define RPC_ROUTER_LOG_EVENT_MSG_READ (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 1)
+#define RPC_ROUTER_LOG_EVENT_MSG_WRITTEN (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 2)
+#define RPC_ROUTER_LOG_EVENT_MSG_CFM_REQ (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 3)
+#define RPC_ROUTER_LOG_EVENT_MSG_CFM_SNT (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 4)
+#define RPC_ROUTER_LOG_EVENT_MID_READ (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 5)
+#define RPC_ROUTER_LOG_EVENT_MID_WRITTEN (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 6)
+#define RPC_ROUTER_LOG_EVENT_MID_CFM_REQ (SMEM_LOG_RPC_ROUTER_EVENT_BASE + 7)
+
+#ifdef CONFIG_MSM_SMD_LOGGING
+void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3);
+void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3, uint32_t data4, uint32_t data5,
+ uint32_t data6);
+void smem_log_event_to_static(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3);
+void smem_log_event6_to_static(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3, uint32_t data4, uint32_t data5,
+ uint32_t data6);
+#else
+void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3) { }
+void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3, uint32_t data4, uint32_t data5,
+ uint32_t data6) { }
+void smem_log_event_to_static(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3) { }
+void smem_log_event6_to_static(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3, uint32_t data4, uint32_t data5,
+ uint32_t data6) { }
+#endif
+
diff --git a/arch/arm/mach-msm/include/mach/system.h b/arch/arm/mach-msm/include/mach/system.h
index 574ccc493daf..d2e83f42ba16 100644
--- a/arch/arm/mach-msm/include/mach/system.h
+++ b/arch/arm/mach-msm/include/mach/system.h
@@ -21,3 +21,8 @@ static inline void arch_reset(char mode, const char *cmd)
{
for (;;) ; /* depends on IPC w/ other core */
}
+
+/* low level hardware reset hook -- for example, hitting the
+ * PSHOLD line on the PMIC to hard reset the system
+ */
+extern void (*msm_hw_reset_hook)(void);
diff --git a/arch/arm/mach-msm/include/mach/uncompress.h b/arch/arm/mach-msm/include/mach/uncompress.h
index d94292c29d8e..6d1a2e82fc3f 100644
--- a/arch/arm/mach-msm/include/mach/uncompress.h
+++ b/arch/arm/mach-msm/include/mach/uncompress.h
@@ -15,7 +15,6 @@
#ifndef __ASM_ARCH_MSM_UNCOMPRESS_H
-#include "hardware.h"
#include "linux/io.h"
#include "mach/msm_iomap.h"
diff --git a/arch/arm/mach-msm/include/mach/usbdiag.h b/arch/arm/mach-msm/include/mach/usbdiag.h
new file mode 100644
index 000000000000..a2b89650f4c9
--- /dev/null
+++ b/arch/arm/mach-msm/include/mach/usbdiag.h
@@ -0,0 +1,44 @@
+/* include/asm-arm/arch-msm/usbdiag.h
+ *
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#ifndef _DRIVERS_USB_DIAG_H_
+#define _DRIVERS_USB_DIAG_H_
+#define ENOREQ -1
+struct diag_operations {
+
+ int (*diag_connect)(void);
+ int (*diag_disconnect)(void);
+ int (*diag_char_write_complete)(unsigned char * , int, int);
+ int (*diag_char_read_complete)(unsigned char *, int , int);
+};
+
+struct diag_request {
+ char *buf;
+ int length;
+};
+int diag_open(int);
+void diag_close(void);
+int diag_read(unsigned char *, int);
+int diag_write(unsigned char *, int);
+
+int diag_usb_register(struct diag_operations *);
+int diag_usb_unregister(void);
+int diag_read_from_cb(unsigned char * , int);
+#endif
diff --git a/arch/arm/mach-msm/include/mach/vmalloc.h b/arch/arm/mach-msm/include/mach/vmalloc.h
index 05f81fd8623c..9d8c8375b206 100644
--- a/arch/arm/mach-msm/include/mach/vmalloc.h
+++ b/arch/arm/mach-msm/include/mach/vmalloc.h
@@ -1,6 +1,7 @@
/* arch/arm/mach-msm/include/mach/vmalloc.h
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -16,7 +17,11 @@
#ifndef __ASM_ARCH_MSM_VMALLOC_H
#define __ASM_ARCH_MSM_VMALLOC_H
-#define VMALLOC_END (PAGE_OFFSET + 0x10000000)
+#ifdef CONFIG_VMSPLIT_2G
+#define VMALLOC_END (PAGE_OFFSET + 0x60000000)
+#else
+#define VMALLOC_END (PAGE_OFFSET + 0x20000000)
+#endif
#endif
diff --git a/arch/arm/mach-msm/include/mach/vreg.h b/arch/arm/mach-msm/include/mach/vreg.h
index 9f9e25cb718e..6626e7864e28 100644
--- a/arch/arm/mach-msm/include/mach/vreg.h
+++ b/arch/arm/mach-msm/include/mach/vreg.h
@@ -23,7 +23,7 @@ struct vreg *vreg_get(struct device *dev, const char *id);
void vreg_put(struct vreg *vreg);
int vreg_enable(struct vreg *vreg);
-void vreg_disable(struct vreg *vreg);
+int vreg_disable(struct vreg *vreg);
int vreg_set_level(struct vreg *vreg, unsigned mv);
#endif
diff --git a/arch/arm/mach-msm/internal_power_rail.c b/arch/arm/mach-msm/internal_power_rail.c
new file mode 100644
index 000000000000..c2c5e5bed9ce
--- /dev/null
+++ b/arch/arm/mach-msm/internal_power_rail.c
@@ -0,0 +1,90 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <mach/internal_power_rail.h>
+
+#include "proc_comm.h"
+
+/* Enable or disable an internal power rail */
+int internal_pwr_rail_ctl(unsigned rail_id, bool enable)
+{
+ int cmd, rc;
+
+ cmd = enable ? PCOM_CLKCTL_RPC_RAIL_ENABLE :
+ PCOM_CLKCTL_RPC_RAIL_DISABLE;
+
+ rc = msm_proc_comm(cmd, &rail_id, NULL);
+
+ return rc;
+
+}
+EXPORT_SYMBOL(internal_pwr_rail_ctl);
+
+/* Specify an internal power rail control mode (ex. auto, manual) */
+int internal_pwr_rail_mode(unsigned rail_id, enum rail_ctl_mode mode)
+{
+ int rc;
+
+ rc = msm_proc_comm(PCOM_CLKCTL_RPC_RAIL_CONTROL, &rail_id, &mode);
+
+ return rc;
+}
+EXPORT_SYMBOL(internal_pwr_rail_mode);
+
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 1c5e7dac086f..39a99a3a0f6b 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -1,8 +1,9 @@
/* arch/arm/mach-msm/io.c
*
- * MSM7K io support
+ * MSM7K, QSD io support
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -19,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <mach/hardware.h>
#include <asm/page.h>
@@ -34,10 +36,28 @@
.type = MT_DEVICE_NONSHARED, \
}
+/* msm_shared_ram_phys default value of 0x00100000 is the most common value
+ * and should work as-is for any target without stacked memory.
+ */
+int msm_shared_ram_phys = 0x00100000;
+
+static void msm_map_io(struct map_desc *io_desc, int size)
+{
+ int i;
+
+ BUG_ON(!size);
+ for (i = 0; i < size; i++)
+ if (io_desc[i].virtual == (unsigned long)MSM_SHARED_RAM_BASE)
+ io_desc[i].pfn = __phys_to_pfn(msm_shared_ram_phys);
+
+ iotable_init(io_desc, size);
+}
+
+#if defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7X27)
static struct map_desc msm_io_desc[] __initdata = {
MSM_DEVICE(VIC),
MSM_DEVICE(CSR),
- MSM_DEVICE(GPT),
+ MSM_DEVICE(TMR),
MSM_DEVICE(DMOV),
MSM_DEVICE(GPIO1),
MSM_DEVICE(GPIO2),
@@ -45,9 +65,16 @@ static struct map_desc msm_io_desc[] __initdata = {
#ifdef CONFIG_MSM_DEBUG_UART
MSM_DEVICE(DEBUG_UART),
#endif
+#ifdef CONFIG_CACHE_L2X0
+ {
+ .virtual = (unsigned long) MSM_L2CC_BASE,
+ .pfn = __phys_to_pfn(MSM_L2CC_PHYS),
+ .length = MSM_L2CC_SIZE,
+ .type = MT_DEVICE,
+ },
+#endif
{
.virtual = (unsigned long) MSM_SHARED_RAM_BASE,
- .pfn = __phys_to_pfn(MSM_SHARED_RAM_PHYS),
.length = MSM_SHARED_RAM_SIZE,
.type = MT_DEVICE,
},
@@ -60,9 +87,98 @@ void __init msm_map_common_io(void)
* pages are peripheral interface or not.
*/
asm("mcr p15, 0, %0, c15, c2, 4" : : "r" (0));
+ msm_map_io(msm_io_desc, ARRAY_SIZE(msm_io_desc));
+}
+#endif
+
+#ifdef CONFIG_ARCH_QSD8X50
+static struct map_desc qsd8x50_io_desc[] __initdata = {
+ MSM_DEVICE(VIC),
+ MSM_DEVICE(CSR),
+ MSM_DEVICE(TMR),
+ MSM_DEVICE(DMOV),
+ MSM_DEVICE(GPIO1),
+ MSM_DEVICE(GPIO2),
+ MSM_DEVICE(CLK_CTL),
+ MSM_DEVICE(SIRC),
+ MSM_DEVICE(SCPLL),
+ MSM_DEVICE(AD5),
+ MSM_DEVICE(MDC),
+#ifdef CONFIG_MSM_DEBUG_UART
+ MSM_DEVICE(DEBUG_UART),
+#endif
+ {
+ .virtual = (unsigned long) MSM_SHARED_RAM_BASE,
+ .length = MSM_SHARED_RAM_SIZE,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init msm_map_qsd8x50_io(void)
+{
+ msm_map_io(qsd8x50_io_desc, ARRAY_SIZE(qsd8x50_io_desc));
+}
+#endif /* CONFIG_ARCH_QSD8X50 */
+
+#ifdef CONFIG_ARCH_MSM7X30
+static struct map_desc msm7x30_io_desc[] __initdata = {
+ MSM_DEVICE(VIC),
+ MSM_DEVICE(CSR),
+ MSM_DEVICE(TMR),
+ MSM_DEVICE(DMOV),
+ MSM_DEVICE(GPIO1),
+ MSM_DEVICE(GPIO2),
+ MSM_DEVICE(CLK_CTL),
+ MSM_DEVICE(SIRC),
+ MSM_DEVICE(SCPLL),
+ MSM_DEVICE(AD5),
+ MSM_DEVICE(MDC),
+ MSM_DEVICE(ACC),
+ MSM_DEVICE(GCC),
+#ifdef CONFIG_MSM_DEBUG_UART
+ MSM_DEVICE(DEBUG_UART),
+#endif
+ {
+ .virtual = (unsigned long) MSM_SHARED_RAM_BASE,
+ .length = MSM_SHARED_RAM_SIZE,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init msm_map_msm7x30_io(void)
+{
+ msm_map_io(msm7x30_io_desc, ARRAY_SIZE(msm7x30_io_desc));
+}
+#endif /* CONFIG_ARCH_MSM7X30 */
- iotable_init(msm_io_desc, ARRAY_SIZE(msm_io_desc));
+#ifdef CONFIG_MACH_QSD8X50_COMET
+static struct map_desc comet_io_desc[] __initdata = {
+ MSM_DEVICE(VIC),
+ MSM_DEVICE(CSR),
+ MSM_DEVICE(TMR),
+ MSM_DEVICE(DMOV),
+ MSM_DEVICE(GPIO1),
+ MSM_DEVICE(GPIO2),
+ MSM_DEVICE(CLK_CTL),
+ MSM_DEVICE(SIRC),
+ MSM_DEVICE(SCPLL),
+ MSM_DEVICE(AD5),
+ MSM_DEVICE(MDC),
+#ifdef CONFIG_MSM_DEBUG_UART
+ MSM_DEVICE(DEBUG_UART),
+#endif
+ {
+ .virtual = (unsigned long) MSM_SHARED_RAM_BASE,
+ .length = MSM_SHARED_RAM_SIZE,
+ .type = MT_DEVICE,
+ },
+};
+
+void __init msm_map_comet_io(void)
+{
+ msm_map_io(comet_io_desc, ARRAY_SIZE(comet_io_desc));
}
+#endif /* CONFIG_MACH_QSD8X50_COMET */
void __iomem *
__msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
@@ -76,5 +192,7 @@ __msm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
mtype = MT_DEVICE_NONSHARED;
}
- return __arm_ioremap(phys_addr, size, mtype);
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
}
+EXPORT_SYMBOL(__msm_ioremap);
diff --git a/arch/arm/mach-msm/irq.c b/arch/arm/mach-msm/irq.c
index 69ca0dd79bdf..03687ab2c223 100644
--- a/arch/arm/mach-msm/irq.c
+++ b/arch/arm/mach-msm/irq.c
@@ -1,6 +1,7 @@
/* linux/arch/arm/mach-msm/irq.c
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -22,96 +23,504 @@
#include <linux/irq.h>
#include <linux/io.h>
+#include <asm/cacheflush.h>
+
#include <mach/hardware.h>
#include <mach/msm_iomap.h>
+#include <mach/fiq.h>
+
+#include "smd_private.h"
+
+enum {
+ IRQ_DEBUG_SLEEP_INT_TRIGGER = 1U << 0,
+ IRQ_DEBUG_SLEEP_INT = 1U << 1,
+ IRQ_DEBUG_SLEEP_ABORT = 1U << 2,
+ IRQ_DEBUG_SLEEP = 1U << 3,
+ IRQ_DEBUG_SLEEP_REQUEST = 1U << 4,
+};
+static int msm_irq_debug_mask;
+module_param_named(debug_mask, msm_irq_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
#define VIC_REG(off) (MSM_VIC_BASE + (off))
+#define VIC_INT_TO_REG_ADDR(base, irq) (base + (irq / 32) * 4)
+#define VIC_INT_TO_REG_INDEX(irq) ((irq >> 5) & 3)
#define VIC_INT_SELECT0 VIC_REG(0x0000) /* 1: FIQ, 0: IRQ */
#define VIC_INT_SELECT1 VIC_REG(0x0004) /* 1: FIQ, 0: IRQ */
+#define VIC_INT_SELECT2 VIC_REG(0x0008) /* 1: FIQ, 0: IRQ */
+#define VIC_INT_SELECT3 VIC_REG(0x000C) /* 1: FIQ, 0: IRQ */
#define VIC_INT_EN0 VIC_REG(0x0010)
#define VIC_INT_EN1 VIC_REG(0x0014)
+#define VIC_INT_EN2 VIC_REG(0x0018)
+#define VIC_INT_EN3 VIC_REG(0x001C)
#define VIC_INT_ENCLEAR0 VIC_REG(0x0020)
#define VIC_INT_ENCLEAR1 VIC_REG(0x0024)
+#define VIC_INT_ENCLEAR2 VIC_REG(0x0028)
+#define VIC_INT_ENCLEAR3 VIC_REG(0x002C)
#define VIC_INT_ENSET0 VIC_REG(0x0030)
#define VIC_INT_ENSET1 VIC_REG(0x0034)
+#define VIC_INT_ENSET2 VIC_REG(0x0038)
+#define VIC_INT_ENSET3 VIC_REG(0x003C)
#define VIC_INT_TYPE0 VIC_REG(0x0040) /* 1: EDGE, 0: LEVEL */
#define VIC_INT_TYPE1 VIC_REG(0x0044) /* 1: EDGE, 0: LEVEL */
+#define VIC_INT_TYPE2 VIC_REG(0x0048) /* 1: EDGE, 0: LEVEL */
+#define VIC_INT_TYPE3 VIC_REG(0x004C) /* 1: EDGE, 0: LEVEL */
#define VIC_INT_POLARITY0 VIC_REG(0x0050) /* 1: NEG, 0: POS */
#define VIC_INT_POLARITY1 VIC_REG(0x0054) /* 1: NEG, 0: POS */
+#define VIC_INT_POLARITY2 VIC_REG(0x0058) /* 1: NEG, 0: POS */
+#define VIC_INT_POLARITY3 VIC_REG(0x005C) /* 1: NEG, 0: POS */
#define VIC_NO_PEND_VAL VIC_REG(0x0060)
+
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+#define VIC_NO_PEND_VAL_FIQ VIC_REG(0x0064)
+#define VIC_INT_MASTEREN VIC_REG(0x0068) /* 1: IRQ, 2: FIQ */
+#define VIC_CONFIG VIC_REG(0x006C) /* 1: USE SC VIC */
+#else
#define VIC_INT_MASTEREN VIC_REG(0x0064) /* 1: IRQ, 2: FIQ */
#define VIC_PROTECTION VIC_REG(0x006C) /* 1: ENABLE */
#define VIC_CONFIG VIC_REG(0x0068) /* 1: USE ARM1136 VIC */
+#endif
+
#define VIC_IRQ_STATUS0 VIC_REG(0x0080)
#define VIC_IRQ_STATUS1 VIC_REG(0x0084)
+#define VIC_IRQ_STATUS2 VIC_REG(0x0088)
+#define VIC_IRQ_STATUS3 VIC_REG(0x008C)
#define VIC_FIQ_STATUS0 VIC_REG(0x0090)
#define VIC_FIQ_STATUS1 VIC_REG(0x0094)
+#define VIC_FIQ_STATUS2 VIC_REG(0x0098)
+#define VIC_FIQ_STATUS3 VIC_REG(0x009C)
#define VIC_RAW_STATUS0 VIC_REG(0x00A0)
#define VIC_RAW_STATUS1 VIC_REG(0x00A4)
+#define VIC_RAW_STATUS2 VIC_REG(0x00A8)
+#define VIC_RAW_STATUS3 VIC_REG(0x00AC)
#define VIC_INT_CLEAR0 VIC_REG(0x00B0)
#define VIC_INT_CLEAR1 VIC_REG(0x00B4)
+#define VIC_INT_CLEAR2 VIC_REG(0x00B8)
+#define VIC_INT_CLEAR3 VIC_REG(0x00BC)
#define VIC_SOFTINT0 VIC_REG(0x00C0)
#define VIC_SOFTINT1 VIC_REG(0x00C4)
+#define VIC_SOFTINT2 VIC_REG(0x00C8)
+#define VIC_SOFTINT3 VIC_REG(0x00CC)
#define VIC_IRQ_VEC_RD VIC_REG(0x00D0) /* pending int # */
#define VIC_IRQ_VEC_PEND_RD VIC_REG(0x00D4) /* pending vector addr */
#define VIC_IRQ_VEC_WR VIC_REG(0x00D8)
+
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+#define VIC_FIQ_VEC_RD VIC_REG(0x00DC)
+#define VIC_FIQ_VEC_PEND_RD VIC_REG(0x00E0)
+#define VIC_FIQ_VEC_WR VIC_REG(0x00E4)
+#define VIC_IRQ_IN_SERVICE VIC_REG(0x00E8)
+#define VIC_IRQ_IN_STACK VIC_REG(0x00EC)
+#define VIC_FIQ_IN_SERVICE VIC_REG(0x00F0)
+#define VIC_FIQ_IN_STACK VIC_REG(0x00F4)
+#define VIC_TEST_BUS_SEL VIC_REG(0x00F8)
+#define VIC_IRQ_CTRL_CONFIG VIC_REG(0x00FC)
+#else
#define VIC_IRQ_IN_SERVICE VIC_REG(0x00E0)
#define VIC_IRQ_IN_STACK VIC_REG(0x00E4)
#define VIC_TEST_BUS_SEL VIC_REG(0x00E8)
+#endif
#define VIC_VECTPRIORITY(n) VIC_REG(0x0200+((n) * 4))
#define VIC_VECTADDR(n) VIC_REG(0x0400+((n) * 4))
+#if defined(CONFIG_ARCH_MSM7X30)
+#define VIC_NUM_REGS 4
+#else
+#define VIC_NUM_REGS 2
+#endif
+
+#if VIC_NUM_REGS == 2
+#define DPRINT_REGS(base_reg, format, ...) \
+ printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \
+ readl(base_reg ## 0), readl(base_reg ## 1))
+#define DPRINT_ARRAY(array, format, ...) \
+ printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \
+ array[0], array[1])
+#elif VIC_NUM_REGS == 4
+#define DPRINT_REGS(base_reg, format, ...) \
+ printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \
+ readl(base_reg ## 0), readl(base_reg ## 1), \
+ readl(base_reg ## 2), readl(base_reg ## 3))
+#define DPRINT_ARRAY(array, format, ...) \
+ printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \
+ array[0], array[1], \
+ array[2], array[3])
+#else
+#error "VIC_NUM_REGS set to illegal value"
+#endif
+
+static uint32_t msm_irq_smsm_wake_enable[2];
+static struct {
+ uint32_t int_en[2];
+ uint32_t int_type;
+ uint32_t int_polarity;
+ uint32_t int_select;
+} msm_irq_shadow_reg[VIC_NUM_REGS];
+static uint32_t msm_irq_idle_disable[VIC_NUM_REGS];
+
+#define SMSM_FAKE_IRQ (0xff)
+static uint8_t msm_irq_to_smsm[NR_IRQS] = {
+ [INT_MDDI_EXT] = 1,
+ [INT_MDDI_PRI] = 2,
+ [INT_MDDI_CLIENT] = 3,
+ [INT_USB_OTG] = 4,
+
+ [INT_PWB_I2C] = 5,
+ [INT_SDC1_0] = 6,
+ [INT_SDC1_1] = 7,
+ [INT_SDC2_0] = 8,
+
+ [INT_SDC2_1] = 9,
+ [INT_ADSP_A9_A11] = 10,
+ [INT_UART1] = 11,
+ [INT_UART2] = 12,
+
+ [INT_UART3] = 13,
+ [INT_UART1_RX] = 14,
+ [INT_UART2_RX] = 15,
+ [INT_UART3_RX] = 16,
+
+ [INT_UART1DM_IRQ] = 17,
+ [INT_UART1DM_RX] = 18,
+ [INT_KEYSENSE] = 19,
+#if !defined(CONFIG_ARCH_MSM7X30)
+ [INT_AD_HSSD] = 20,
+#endif
+
+ [INT_NAND_WR_ER_DONE] = 21,
+ [INT_NAND_OP_DONE] = 22,
+ [INT_TCHSCRN1] = 23,
+ [INT_TCHSCRN2] = 24,
+
+ [INT_TCHSCRN_SSBI] = 25,
+ [INT_USB_HS] = 26,
+ [INT_UART2DM_RX] = 27,
+ [INT_UART2DM_IRQ] = 28,
+
+ [INT_SDC4_1] = 29,
+ [INT_SDC4_0] = 30,
+ [INT_SDC3_1] = 31,
+ [INT_SDC3_0] = 32,
+
+ /* fake wakeup interrupts */
+ [INT_GPIO_GROUP1] = SMSM_FAKE_IRQ,
+ [INT_GPIO_GROUP2] = SMSM_FAKE_IRQ,
+ [INT_A9_M2A_0] = SMSM_FAKE_IRQ,
+ [INT_A9_M2A_1] = SMSM_FAKE_IRQ,
+ [INT_A9_M2A_5] = SMSM_FAKE_IRQ,
+ [INT_GP_TIMER_EXP] = SMSM_FAKE_IRQ,
+ [INT_DEBUG_TIMER_EXP] = SMSM_FAKE_IRQ,
+ [INT_ADSP_A11] = SMSM_FAKE_IRQ,
+#ifdef CONFIG_ARCH_QSD8X50
+ [INT_SIRC_0] = SMSM_FAKE_IRQ,
+ [INT_SIRC_1] = SMSM_FAKE_IRQ,
+#endif
+};
+
+static inline void msm_irq_write_all_regs(void __iomem *base, unsigned int val)
+{
+ int i;
+
+ for (i = 0; i < VIC_NUM_REGS; i++)
+ writel(val, base + (i * 4));
+}
+
static void msm_irq_ack(unsigned int irq)
{
- void __iomem *reg = VIC_INT_CLEAR0 + ((irq & 32) ? 4 : 0);
+ void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_CLEAR0, irq);
irq = 1 << (irq & 31);
writel(irq, reg);
}
static void msm_irq_mask(unsigned int irq)
{
- void __iomem *reg = VIC_INT_ENCLEAR0 + ((irq & 32) ? 4 : 0);
- writel(1 << (irq & 31), reg);
+ void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, irq);
+ unsigned index = VIC_INT_TO_REG_INDEX(irq);
+ uint32_t mask = 1UL << (irq & 31);
+ int smsm_irq = msm_irq_to_smsm[irq];
+
+ msm_irq_shadow_reg[index].int_en[0] &= ~mask;
+ writel(mask, reg);
+ if (smsm_irq == 0)
+ msm_irq_idle_disable[index] &= ~mask;
+ else {
+ mask = 1UL << (smsm_irq - 1);
+ msm_irq_smsm_wake_enable[0] &= ~mask;
+ }
}
static void msm_irq_unmask(unsigned int irq)
{
- void __iomem *reg = VIC_INT_ENSET0 + ((irq & 32) ? 4 : 0);
- writel(1 << (irq & 31), reg);
+ void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, irq);
+ unsigned index = VIC_INT_TO_REG_INDEX(irq);
+ uint32_t mask = 1UL << (irq & 31);
+ int smsm_irq = msm_irq_to_smsm[irq];
+
+ msm_irq_shadow_reg[index].int_en[0] |= mask;
+ writel(mask, reg);
+
+ if (smsm_irq == 0)
+ msm_irq_idle_disable[index] |= mask;
+ else {
+ mask = 1UL << (smsm_irq - 1);
+ msm_irq_smsm_wake_enable[0] |= mask;
+ }
}
static int msm_irq_set_wake(unsigned int irq, unsigned int on)
{
- return -EINVAL;
+ unsigned index = VIC_INT_TO_REG_INDEX(irq);
+ uint32_t mask = 1UL << (irq & 31);
+ int smsm_irq = msm_irq_to_smsm[irq];
+
+ if (smsm_irq == 0) {
+ printk(KERN_ERR "msm_irq_set_wake: bad wakeup irq %d\n", irq);
+ return -EINVAL;
+ }
+ if (on)
+ msm_irq_shadow_reg[index].int_en[1] |= mask;
+ else
+ msm_irq_shadow_reg[index].int_en[1] &= ~mask;
+
+ if (smsm_irq == SMSM_FAKE_IRQ)
+ return 0;
+
+ mask = 1UL << (smsm_irq - 1);
+ if (on)
+ msm_irq_smsm_wake_enable[1] |= mask;
+ else
+ msm_irq_smsm_wake_enable[1] &= ~mask;
+ return 0;
}
static int msm_irq_set_type(unsigned int irq, unsigned int flow_type)
{
- void __iomem *treg = VIC_INT_TYPE0 + ((irq & 32) ? 4 : 0);
- void __iomem *preg = VIC_INT_POLARITY0 + ((irq & 32) ? 4 : 0);
+ void __iomem *treg = VIC_INT_TO_REG_ADDR(VIC_INT_TYPE0, irq);
+ void __iomem *preg = VIC_INT_TO_REG_ADDR(VIC_INT_POLARITY0, irq);
+ unsigned index = VIC_INT_TO_REG_INDEX(irq);
int b = 1 << (irq & 31);
+ uint32_t polarity;
+ uint32_t type;
+ polarity = msm_irq_shadow_reg[index].int_polarity;
if (flow_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
- writel(readl(preg) | b, preg);
+ polarity |= b;
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_HIGH))
- writel(readl(preg) & (~b), preg);
+ polarity &= ~b;
+ writel(polarity, preg);
+ msm_irq_shadow_reg[index].int_polarity = polarity;
+ type = msm_irq_shadow_reg[index].int_type;
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- writel(readl(treg) | b, treg);
- set_irq_handler(irq, handle_edge_irq);
+ type |= b;
+ irq_desc[irq].handle_irq = handle_edge_irq;
}
if (flow_type & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
- writel(readl(treg) & (~b), treg);
- set_irq_handler(irq, handle_level_irq);
+ type &= ~b;
+ irq_desc[irq].handle_irq = handle_level_irq;
+ }
+ writel(type, treg);
+ msm_irq_shadow_reg[index].int_type = type;
+ return 0;
+}
+
+unsigned int msm_irq_pending(void)
+{
+ unsigned int i, pending = 0;
+
+ for (i = 0; (i < VIC_NUM_REGS) && !pending; i++)
+ pending |= readl(VIC_IRQ_STATUS0 + (i * 4));
+
+ return pending;
+}
+
+int msm_irq_idle_sleep_allowed(void)
+{
+ uint32_t i, disable = 0;
+
+ if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_REQUEST)
+ DPRINT_ARRAY(msm_irq_idle_disable,
+ "msm_irq_idle_sleep_allowed: disable");
+
+ for (i = 0; i < VIC_NUM_REGS; i++)
+ disable |= msm_irq_idle_disable[i];
+
+ return !disable;
+}
+
+/*
+ * Prepare interrupt subsystem for entering sleep -- phase 1.
+ * If modem_wake is true, return currently enabled interrupts in *irq_mask.
+ */
+void msm_irq_enter_sleep1(bool modem_wake, int from_idle, uint32_t *irq_mask)
+{
+ if (modem_wake) {
+ *irq_mask = msm_irq_smsm_wake_enable[!from_idle];
+ if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP)
+ printk(KERN_INFO
+ "%s irq_mask %x\n", __func__, *irq_mask);
+ }
+}
+
+/*
+ * Prepare interrupt subsystem for entering sleep -- phase 2.
+ * Detect any pending interrupts and configure interrupt hardware.
+ *
+ * Return value:
+ * -EAGAIN: there are pending interrupt(s); interrupt configuration
+ * is not changed.
+ * 0: success
+ */
+int msm_irq_enter_sleep2(bool modem_wake, int from_idle)
+{
+ int i, limit = 10;
+ uint32_t pending[VIC_NUM_REGS];
+
+ if (from_idle && !modem_wake)
+ return 0;
+
+ /* edge triggered interrupt may get lost if this mode is used */
+ WARN_ON_ONCE(!modem_wake && !from_idle);
+
+ if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP)
+ DPRINT_REGS(VIC_IRQ_STATUS, "%s change irq, pend", __func__);
+
+ for (i = 0; i < VIC_NUM_REGS; i++) {
+ pending[i] = readl(VIC_IRQ_STATUS0 + (i * 4));
+ pending[i] &= msm_irq_shadow_reg[i].int_en[!from_idle];
}
+
+ /* Clear INT_A9_M2A_5 since requesting sleep triggers it */
+ pending[0] &= ~(1U << INT_A9_M2A_5);
+
+ for (i = 0; i < VIC_NUM_REGS; i++) {
+ if (pending[i]) {
+ if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_ABORT)
+ DPRINT_ARRAY(pending, "%s abort",
+ __func__);
+ return -EAGAIN;
+ }
+ }
+
+ msm_irq_write_all_regs(VIC_INT_EN0, 0);
+
+ while (limit-- > 0) {
+ int pend_irq;
+ int irq = readl(VIC_IRQ_VEC_RD);
+ if (irq == -1)
+ break;
+ pend_irq = readl(VIC_IRQ_VEC_PEND_RD);
+ if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_INT)
+ printk(KERN_INFO "%s cleared int %d (%d)\n",
+ __func__, irq, pend_irq);
+ }
+
+ if (modem_wake) {
+ msm_irq_set_type(INT_A9_M2A_6, IRQF_TRIGGER_RISING);
+ writel(1U << INT_A9_M2A_6, VIC_INT_ENSET0);
+ } else {
+ for (i = 0; i < VIC_NUM_REGS; i++)
+ writel(msm_irq_shadow_reg[i].int_en[1],
+ VIC_INT_ENSET0 + (i * 4));
+ }
+
return 0;
}
+/*
+ * Restore interrupt subsystem from sleep -- phase 1.
+ * Configure interrupt hardware.
+ */
+void msm_irq_exit_sleep1(uint32_t irq_mask, uint32_t wakeup_reason,
+ uint32_t pending_irqs)
+{
+ int i;
+
+ msm_irq_ack(INT_A9_M2A_6);
+
+ for (i = 0; i < VIC_NUM_REGS; i++) {
+ writel(msm_irq_shadow_reg[i].int_type,
+ VIC_INT_TYPE0 + i * 4);
+ writel(msm_irq_shadow_reg[i].int_polarity,
+ VIC_INT_POLARITY0 + i * 4);
+ writel(msm_irq_shadow_reg[i].int_en[0],
+ VIC_INT_EN0 + i * 4);
+ writel(msm_irq_shadow_reg[i].int_select,
+ VIC_INT_SELECT0 + i * 4);
+ }
+
+ writel(3, VIC_INT_MASTEREN);
+
+ if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP)
+ DPRINT_REGS(VIC_IRQ_STATUS, "%s %x %x %x now",
+ __func__, irq_mask, pending_irqs, wakeup_reason);
+}
+
+/*
+ * Restore interrupt subsystem from sleep -- phase 2.
+ * Poke the specified pending interrupts into interrupt hardware.
+ */
+void msm_irq_exit_sleep2(uint32_t irq_mask, uint32_t wakeup_reason,
+ uint32_t pending)
+{
+ int i;
+
+ if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP)
+ DPRINT_REGS(VIC_IRQ_STATUS, "%s %x %x %x now",
+ __func__, irq_mask, pending, wakeup_reason);
+
+ for (i = 0; pending && i < ARRAY_SIZE(msm_irq_to_smsm); i++) {
+ unsigned reg_offset = VIC_INT_TO_REG_ADDR(0, i);
+ uint32_t reg_mask = 1UL << (i & 31);
+ int smsm_irq = msm_irq_to_smsm[i];
+ uint32_t smsm_mask;
+
+ if (smsm_irq == 0)
+ continue;
+
+ smsm_mask = 1U << (smsm_irq - 1);
+ if (!(pending & smsm_mask))
+ continue;
+
+ pending &= ~smsm_mask;
+ if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_INT)
+ DPRINT_REGS(VIC_IRQ_STATUS,
+ "%s: irq %d still pending %x now",
+ __func__, i, pending);
+#if 0 /* debug intetrrupt trigger */
+ if (readl(VIC_IRQ_STATUS0 + reg_offset) & reg_mask)
+ writel(reg_mask, VIC_INT_CLEAR0 + reg_offset);
+#endif
+ if (readl(VIC_IRQ_STATUS0 + reg_offset) & reg_mask)
+ continue;
+
+ writel(reg_mask, VIC_SOFTINT0 + reg_offset);
+
+ if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_INT_TRIGGER)
+ DPRINT_REGS(VIC_IRQ_STATUS,
+ "%s: irq %d need trigger, now",
+ __func__, i);
+ }
+}
+
+/*
+ * Restore interrupt subsystem from sleep -- phase 3.
+ * Print debug information.
+ */
+void msm_irq_exit_sleep3(uint32_t irq_mask, uint32_t wakeup_reason,
+ uint32_t pending_irqs)
+{
+ if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP)
+ DPRINT_REGS(VIC_IRQ_STATUS, "%s %x %x %x state %x now",
+ __func__, irq_mask, pending_irqs, wakeup_reason,
+ smsm_get_state(SMSM_MODEM_STATE));
+}
+
static struct irq_chip msm_irq_chip = {
.name = "msm",
+ .disable = msm_irq_mask,
.ack = msm_irq_ack,
.mask = msm_irq_mask,
.unmask = msm_irq_unmask,
@@ -124,26 +533,22 @@ void __init msm_init_irq(void)
unsigned n;
/* select level interrupts */
- writel(0, VIC_INT_TYPE0);
- writel(0, VIC_INT_TYPE1);
+ msm_irq_write_all_regs(VIC_INT_TYPE0, 0);
/* select highlevel interrupts */
- writel(0, VIC_INT_POLARITY0);
- writel(0, VIC_INT_POLARITY1);
+ msm_irq_write_all_regs(VIC_INT_POLARITY0, 0);
/* select IRQ for all INTs */
- writel(0, VIC_INT_SELECT0);
- writel(0, VIC_INT_SELECT1);
+ msm_irq_write_all_regs(VIC_INT_SELECT0, 0);
/* disable all INTs */
- writel(0, VIC_INT_EN0);
- writel(0, VIC_INT_EN1);
+ msm_irq_write_all_regs(VIC_INT_EN0, 0);
- /* don't use 1136 vic */
+ /* don't use vic */
writel(0, VIC_CONFIG);
/* enable interrupt controller */
- writel(1, VIC_INT_MASTEREN);
+ writel(3, VIC_INT_MASTEREN);
for (n = 0; n < NR_MSM_IRQS; n++) {
set_irq_chip(n, &msm_irq_chip);
@@ -151,3 +556,85 @@ void __init msm_init_irq(void)
set_irq_flags(n, IRQF_VALID);
}
}
+
+#if defined(CONFIG_MSM_FIQ_SUPPORT)
+void msm_trigger_irq(int irq)
+{
+ void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_SOFTINT0, irq);
+ uint32_t mask = 1UL << (irq & 31);
+ writel(mask, reg);
+}
+
+void msm_fiq_enable(int irq)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ msm_irq_unmask(irq);
+ local_irq_restore(flags);
+}
+
+void msm_fiq_disable(int irq)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ msm_irq_mask(irq);
+ local_irq_restore(flags);
+}
+
+void msm_fiq_select(int irq)
+{
+ void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_SELECT0, irq);
+ unsigned index = VIC_INT_TO_REG_INDEX(irq);
+ uint32_t mask = 1UL << (irq & 31);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ msm_irq_shadow_reg[index].int_select |= mask;
+ writel(msm_irq_shadow_reg[index].int_select, reg);
+ local_irq_restore(flags);
+}
+
+void msm_fiq_unselect(int irq)
+{
+ void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_SELECT0, irq);
+ unsigned index = VIC_INT_TO_REG_INDEX(irq);
+ uint32_t mask = 1UL << (irq & 31);
+ unsigned long flags;
+
+ local_irq_save(flags);
+ msm_irq_shadow_reg[index].int_select &= (!mask);
+ writel(msm_irq_shadow_reg[index].int_select, reg);
+ local_irq_restore(flags);
+}
+/* set_fiq_handler originally from arch/arm/kernel/fiq.c */
+static void set_fiq_handler(void *start, unsigned int length)
+{
+ memcpy((void *)0xffff001c, start, length);
+ flush_icache_range(0xffff001c, 0xffff001c + length);
+ if (!vectors_high())
+ flush_icache_range(0x1c, 0x1c + length);
+}
+
+extern unsigned char fiq_glue, fiq_glue_end;
+
+static void (*fiq_func)(void *data, void *regs);
+static unsigned long long fiq_stack[256];
+
+void fiq_glue_setup(void *func, void *data, void *sp);
+
+int msm_fiq_set_handler(void (*func)(void *data, void *regs), void *data)
+{
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ local_irq_save(flags);
+ if (fiq_func == 0) {
+ fiq_func = func;
+ fiq_glue_setup(func, data, fiq_stack + 255);
+ set_fiq_handler(&fiq_glue, (&fiq_glue_end - &fiq_glue));
+ ret = 0;
+ }
+ local_irq_restore(flags);
+ return ret;
+}
+#endif
diff --git a/arch/arm/mach-msm/irq.h b/arch/arm/mach-msm/irq.h
new file mode 100644
index 000000000000..ad4cb92ca570
--- /dev/null
+++ b/arch/arm/mach-msm/irq.h
@@ -0,0 +1,43 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_IRQ_H_
+#define _ARCH_ARM_MACH_MSM_IRQ_H_
+
+int msm_irq_idle_sleep_allowed(void);
+unsigned int msm_irq_pending(void);
+void msm_irq_enter_sleep1(bool arm9_wake, int from_idle, uint32_t *irq_mask);
+int msm_irq_enter_sleep2(bool arm9_wake, int from_idle);
+void msm_irq_exit_sleep1
+ (uint32_t irq_mask, uint32_t wakeup_reason, uint32_t pending_irqs);
+void msm_irq_exit_sleep2
+ (uint32_t irq_mask, uint32_t wakeup_reason, uint32_t pending);
+void msm_irq_exit_sleep3
+ (uint32_t irq_mask, uint32_t wakeup_reason, uint32_t pending_irqs);
+
+#endif
diff --git a/arch/arm/mach-msm/jtag-v7.S b/arch/arm/mach-msm/jtag-v7.S
new file mode 100644
index 000000000000..ebac7b254549
--- /dev/null
+++ b/arch/arm/mach-msm/jtag-v7.S
@@ -0,0 +1,117 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * JTAG support functions for ARMv7-based Qualcomm SoCs.
+ */
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+ENTRY(msm_save_jtag_debug)
+ /* lock debug and preserve registers through power collapse */
+ ldr r3, =dbg_state /* store state at dbg_state */
+
+ ldr r1, =0xc5ACCE55 /* set DBGOSLAR lock */
+ mcr p14,0,r1,c1,c0,4
+ isb
+
+ mrc p14,0,r1,c1,c2,4 /* DBGOSSRR state register count */
+
+ cmp r1, #(0x20-1) /* check for state overflow */
+ movge r1, #0 /* if not enough space, don't save */
+
+ str r1,[r3],#4 /* save count for restore */
+
+1: cmp r1,#0
+ mrcne p14,0,r2,c1,c2,4 /* DBGOSSRR state value */
+ strne r2,[r3],#4 /* push value */
+ subne r1,r1,#1
+ bne 1b
+
+ /* unlock JTAG. Works better than leaving locked. */
+ stmfd sp!, {lr}
+ bl msm_unlock_jtag_debug
+ ldmfd sp!, {lr}
+ bx lr
+
+ENTRY(msm_unlock_jtag_debug)
+ mov r0, #0 /* unlock value */
+ mcr p14,0,r0,c1,c0,4 /* unlock DBGOSLAR */
+ isb
+ bx lr
+
+ENTRY(msm_restore_jtag_debug)
+ /* restore debug registers after power collapse */
+ ldr r3, =dbg_state /* load state from dbg_state */
+
+ ldr r1, =0xc5ACCE55 /* set DBGOSLAR lock */
+ mcr p14,0,r1,c1,c0,4
+ isb
+
+ mrc p14,0,r1,c1,c2,4 /* DBGOSSRR dummy read (required)*/
+ ldr r1,[r3],#4 /* load saved count */
+ cmp r1,#0 /* skip if none stored
+ beq msm_pm_dbg_restore_end
+
+ /* restores debug state except DBGDSCR */
+1: ldr r2,[r3],#4
+ cmp r1,#0x10 /* DBGDSCR special case */
+ biceq r2,r2,#0xc000 /* DBGDSCR = DBGDSCR & ~0xc000 */
+ mcr p14,0,r2,c1,c2,4 /* DBGOSSRR write state value */
+ subs r1,r1,#1
+ bne 1b
+ isb
+
+ /* second loop to restore DBGDSCR after other state restored */
+ ldr r3, =dbg_state /* load state from dbg_state */
+
+ ldr r1, =0xc5ACCE55 /* set DBGOSLAR lock */
+ mcr p14,0,r1,c1,c0,4
+ isb
+
+ mrc p14,0,r1,c1,c5,4 /* clear sticky power down bit */
+ isb
+
+ mrc p14,0,r1,c1,c2,4 /* DBGOSSRR dummy read (required)*/
+ ldr r1,[r3],#4 /* load saved count */
+
+1: ldr r2,[r3],#4
+ mcr p14,0,r2,c1,c2,4 /* DBGOSSRR write state value */
+ subs r1,r1,#1
+ bne 1b
+msm_pm_dbg_restore_end:
+ mcr p14,0,r1,c1,c0,4 /* unlock DBGOSLAR */
+ isb
+ bx lr
+
+
+ .data
+
+dbg_state:
+ .space 4 * 0x20
+
diff --git a/arch/arm/mach-msm/keypad-surf-ffa.c b/arch/arm/mach-msm/keypad-surf-ffa.c
new file mode 100644
index 000000000000..1ea72af71a47
--- /dev/null
+++ b/arch/arm/mach-msm/keypad-surf-ffa.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/gpio_event.h>
+
+#include <asm/mach-types.h>
+
+/* don't turn this on without updating the ffa support */
+#define SCAN_FUNCTION_KEYS 0
+
+/* FFA:
+ 36: KEYSENSE_N(0)
+ 37: KEYSENSE_N(1)
+ 38: KEYSENSE_N(2)
+ 39: KEYSENSE_N(3)
+ 40: KEYSENSE_N(4)
+
+ 31: KYPD_17
+ 32: KYPD_15
+ 33: KYPD_13
+ 34: KYPD_11
+ 35: KYPD_9
+ 41: KYPD_MEMO
+*/
+
+static unsigned int keypad_row_gpios[] = {
+ 31, 32, 33, 34, 35, 41
+#if SCAN_FUNCTION_KEYS
+ , 42
+#endif
+};
+
+static unsigned int keypad_col_gpios[] = { 36, 37, 38, 39, 40 };
+
+static unsigned int keypad_row_gpios_8k_ffa[] = {31, 32, 33, 34, 35, 36};
+static unsigned int keypad_col_gpios_8k_ffa[] = {38, 39, 40, 41, 42};
+
+#define KEYMAP_INDEX(row, col) ((row)*ARRAY_SIZE(keypad_col_gpios) + (col))
+#define FFA_8K_KEYMAP_INDEX(row, col) ((row)* \
+ ARRAY_SIZE(keypad_col_gpios_8k_ffa) + (col))
+
+static const unsigned short keypad_keymap_surf[ARRAY_SIZE(keypad_col_gpios) *
+ ARRAY_SIZE(keypad_row_gpios)] = {
+ [KEYMAP_INDEX(0, 0)] = KEY_5,
+ [KEYMAP_INDEX(0, 1)] = KEY_9,
+ [KEYMAP_INDEX(0, 2)] = 229, /* SOFT1 */
+ [KEYMAP_INDEX(0, 3)] = KEY_6,
+ [KEYMAP_INDEX(0, 4)] = KEY_LEFT,
+
+ [KEYMAP_INDEX(1, 0)] = KEY_0,
+ [KEYMAP_INDEX(1, 1)] = KEY_RIGHT,
+ [KEYMAP_INDEX(1, 2)] = KEY_1,
+ [KEYMAP_INDEX(1, 3)] = 228, /* KEY_SHARP */
+ [KEYMAP_INDEX(1, 4)] = KEY_SEND,
+
+ [KEYMAP_INDEX(2, 0)] = KEY_VOLUMEUP,
+ [KEYMAP_INDEX(2, 1)] = KEY_HOME, /* FA */
+ [KEYMAP_INDEX(2, 2)] = KEY_F8, /* QCHT */
+ [KEYMAP_INDEX(2, 3)] = KEY_F6, /* R+ */
+ [KEYMAP_INDEX(2, 4)] = KEY_F7, /* R- */
+
+ [KEYMAP_INDEX(3, 0)] = KEY_UP,
+ [KEYMAP_INDEX(3, 1)] = KEY_CLEAR,
+ [KEYMAP_INDEX(3, 2)] = KEY_4,
+ [KEYMAP_INDEX(3, 3)] = KEY_MUTE, /* SPKR */
+ [KEYMAP_INDEX(3, 4)] = KEY_2,
+
+ [KEYMAP_INDEX(4, 0)] = 230, /* SOFT2 */
+ [KEYMAP_INDEX(4, 1)] = 232, /* KEY_CENTER */
+ [KEYMAP_INDEX(4, 2)] = KEY_DOWN,
+ [KEYMAP_INDEX(4, 3)] = KEY_BACK, /* FB */
+ [KEYMAP_INDEX(4, 4)] = KEY_8,
+
+ [KEYMAP_INDEX(5, 0)] = KEY_VOLUMEDOWN,
+ [KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */
+ [KEYMAP_INDEX(5, 2)] = KEY_MAIL, /* MESG */
+ [KEYMAP_INDEX(5, 3)] = KEY_3,
+ [KEYMAP_INDEX(5, 4)] = KEY_7,
+
+#if SCAN_FUNCTION_KEYS
+ [KEYMAP_INDEX(6, 0)] = KEY_F5,
+ [KEYMAP_INDEX(6, 1)] = KEY_F4,
+ [KEYMAP_INDEX(6, 2)] = KEY_F3,
+ [KEYMAP_INDEX(6, 3)] = KEY_F2,
+ [KEYMAP_INDEX(6, 4)] = KEY_F1
+#endif
+};
+
+static const unsigned short keypad_keymap_ffa[ARRAY_SIZE(keypad_col_gpios) *
+ ARRAY_SIZE(keypad_row_gpios)] = {
+ /*[KEYMAP_INDEX(0, 0)] = ,*/
+ /*[KEYMAP_INDEX(0, 1)] = ,*/
+ [KEYMAP_INDEX(0, 2)] = KEY_1,
+ [KEYMAP_INDEX(0, 3)] = KEY_SEND,
+ [KEYMAP_INDEX(0, 4)] = KEY_LEFT,
+
+ [KEYMAP_INDEX(1, 0)] = KEY_3,
+ [KEYMAP_INDEX(1, 1)] = KEY_RIGHT,
+ [KEYMAP_INDEX(1, 2)] = KEY_VOLUMEUP,
+ /*[KEYMAP_INDEX(1, 3)] = ,*/
+ [KEYMAP_INDEX(1, 4)] = KEY_6,
+
+ [KEYMAP_INDEX(2, 0)] = KEY_HOME, /* A */
+ [KEYMAP_INDEX(2, 1)] = KEY_BACK, /* B */
+ [KEYMAP_INDEX(2, 2)] = KEY_0,
+ [KEYMAP_INDEX(2, 3)] = 228, /* KEY_SHARP */
+ [KEYMAP_INDEX(2, 4)] = KEY_9,
+
+ [KEYMAP_INDEX(3, 0)] = KEY_UP,
+ [KEYMAP_INDEX(3, 1)] = 232, /* KEY_CENTER */ /* i */
+ [KEYMAP_INDEX(3, 2)] = KEY_4,
+ /*[KEYMAP_INDEX(3, 3)] = ,*/
+ [KEYMAP_INDEX(3, 4)] = KEY_2,
+
+ [KEYMAP_INDEX(4, 0)] = KEY_VOLUMEDOWN,
+ [KEYMAP_INDEX(4, 1)] = KEY_SOUND,
+ [KEYMAP_INDEX(4, 2)] = KEY_DOWN,
+ [KEYMAP_INDEX(4, 3)] = KEY_8,
+ [KEYMAP_INDEX(4, 4)] = KEY_5,
+
+ /*[KEYMAP_INDEX(5, 0)] = ,*/
+ [KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */
+ [KEYMAP_INDEX(5, 2)] = 230, /*SOFT2*/ /* 2 */
+ [KEYMAP_INDEX(5, 3)] = KEY_MENU, /* 1 */
+ [KEYMAP_INDEX(5, 4)] = KEY_7,
+};
+
+#define QSD8x50_FFA_KEYMAP_SIZE (ARRAY_SIZE(keypad_col_gpios_8k_ffa) * \
+ ARRAY_SIZE(keypad_row_gpios_8k_ffa))
+
+static const unsigned short keypad_keymap_8k_ffa[QSD8x50_FFA_KEYMAP_SIZE] = {
+
+ [FFA_8K_KEYMAP_INDEX(0, 0)] = KEY_VOLUMEDOWN,
+ /*[KEYMAP_INDEX(0, 1)] = ,*/
+ [FFA_8K_KEYMAP_INDEX(0, 2)] = KEY_DOWN,
+ [FFA_8K_KEYMAP_INDEX(0, 3)] = KEY_8,
+ [FFA_8K_KEYMAP_INDEX(0, 4)] = KEY_5,
+
+ [FFA_8K_KEYMAP_INDEX(1, 0)] = KEY_UP,
+ [FFA_8K_KEYMAP_INDEX(1, 1)] = KEY_CLEAR,
+ [FFA_8K_KEYMAP_INDEX(1, 2)] = KEY_4,
+ /*[KEYMAP_INDEX(1, 3)] = ,*/
+ [FFA_8K_KEYMAP_INDEX(1, 4)] = KEY_2,
+
+ [FFA_8K_KEYMAP_INDEX(2, 0)] = KEY_HOME, /* A */
+ [FFA_8K_KEYMAP_INDEX(2, 1)] = KEY_BACK, /* B */
+ [FFA_8K_KEYMAP_INDEX(2, 2)] = KEY_0,
+ [FFA_8K_KEYMAP_INDEX(2, 3)] = 228, /* KEY_SHARP */
+ [FFA_8K_KEYMAP_INDEX(2, 4)] = KEY_9,
+
+ [FFA_8K_KEYMAP_INDEX(3, 0)] = KEY_3,
+ [FFA_8K_KEYMAP_INDEX(3, 1)] = KEY_RIGHT,
+ [FFA_8K_KEYMAP_INDEX(3, 2)] = KEY_VOLUMEUP,
+ /*[KEYMAP_INDEX(3, 3)] = ,*/
+ [FFA_8K_KEYMAP_INDEX(3, 4)] = KEY_6,
+
+ [FFA_8K_KEYMAP_INDEX(4, 0)] = 232, /* OK */
+ [FFA_8K_KEYMAP_INDEX(4, 1)] = KEY_SOUND,
+ [FFA_8K_KEYMAP_INDEX(4, 2)] = KEY_1,
+ [FFA_8K_KEYMAP_INDEX(4, 3)] = KEY_SEND,
+ [FFA_8K_KEYMAP_INDEX(4, 4)] = KEY_LEFT,
+
+ /*[KEYMAP_INDEX(5, 0)] = ,*/
+ [FFA_8K_KEYMAP_INDEX(5, 1)] = 227, /* KEY_STAR */
+ [FFA_8K_KEYMAP_INDEX(5, 2)] = 230, /*SOFT2*/ /* 2 */
+ [FFA_8K_KEYMAP_INDEX(5, 3)] = KEY_MENU, /* 1 */
+ [FFA_8K_KEYMAP_INDEX(5, 4)] = KEY_7,
+};
+
+static const unsigned short keypad_virtual_keys[] = {
+ KEY_END,
+ KEY_POWER
+};
+
+static int keypad_gpio_event_matrix_func(struct input_dev *input_dev,
+ struct gpio_event_info *info,
+ void **data, int func);
+
+/* SURF keypad platform device information */
+static struct gpio_event_matrix_info surf_keypad_matrix_info = {
+ .info.func = keypad_gpio_event_matrix_func,
+ .keymap = keypad_keymap_surf,
+ .output_gpios = keypad_row_gpios,
+ .input_gpios = keypad_col_gpios,
+ .noutputs = ARRAY_SIZE(keypad_row_gpios),
+ .ninputs = ARRAY_SIZE(keypad_col_gpios),
+ .settle_time.tv.nsec = 0,
+ .poll_time.tv.nsec = 20 * NSEC_PER_MSEC,
+ .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE |
+ GPIOKPF_PRINT_UNMAPPED_KEYS
+};
+
+static struct gpio_event_info *surf_keypad_info[] = {
+ &surf_keypad_matrix_info.info
+};
+
+static struct gpio_event_platform_data surf_keypad_data = {
+ .name = "surf_keypad",
+ .info = surf_keypad_info,
+ .info_count = ARRAY_SIZE(surf_keypad_info)
+};
+
+struct platform_device keypad_device_surf = {
+ .name = GPIO_EVENT_DEV_NAME,
+ .id = -1,
+ .dev = {
+ .platform_data = &surf_keypad_data,
+ },
+};
+
+/* 8k FFA keypad platform device information */
+static struct gpio_event_matrix_info keypad_matrix_info_8k_ffa = {
+ .info.func = keypad_gpio_event_matrix_func,
+ .keymap = keypad_keymap_8k_ffa,
+ .output_gpios = keypad_row_gpios_8k_ffa,
+ .input_gpios = keypad_col_gpios_8k_ffa,
+ .noutputs = ARRAY_SIZE(keypad_row_gpios_8k_ffa),
+ .ninputs = ARRAY_SIZE(keypad_col_gpios_8k_ffa),
+ .settle_time.tv.nsec = 0,
+ .poll_time.tv.nsec = 20 * NSEC_PER_MSEC,
+ .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE |
+ GPIOKPF_PRINT_UNMAPPED_KEYS
+};
+
+static struct gpio_event_info *keypad_info_8k_ffa[] = {
+ &keypad_matrix_info_8k_ffa.info
+};
+
+static struct gpio_event_platform_data keypad_data_8k_ffa = {
+ .name = "8k_ffa_keypad",
+ .info = keypad_info_8k_ffa,
+ .info_count = ARRAY_SIZE(keypad_info_8k_ffa)
+};
+
+struct platform_device keypad_device_8k_ffa = {
+ .name = GPIO_EVENT_DEV_NAME,
+ .id = -1,
+ .dev = {
+ .platform_data = &keypad_data_8k_ffa,
+ },
+};
+
+/* 7k FFA keypad platform device information */
+static struct gpio_event_matrix_info keypad_matrix_info_7k_ffa = {
+ .info.func = keypad_gpio_event_matrix_func,
+ .keymap = keypad_keymap_ffa,
+ .output_gpios = keypad_row_gpios,
+ .input_gpios = keypad_col_gpios,
+ .noutputs = ARRAY_SIZE(keypad_row_gpios),
+ .ninputs = ARRAY_SIZE(keypad_col_gpios),
+ .settle_time.tv.nsec = 0,
+ .poll_time.tv.nsec = 20 * NSEC_PER_MSEC,
+ .flags = GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_DRIVE_INACTIVE |
+ GPIOKPF_PRINT_UNMAPPED_KEYS
+};
+
+static struct gpio_event_info *keypad_info_7k_ffa[] = {
+ &keypad_matrix_info_7k_ffa.info
+};
+
+static struct gpio_event_platform_data keypad_data_7k_ffa = {
+ .name = "7k_ffa_keypad",
+ .info = keypad_info_7k_ffa,
+ .info_count = ARRAY_SIZE(keypad_info_7k_ffa)
+};
+
+struct platform_device keypad_device_7k_ffa = {
+ .name = GPIO_EVENT_DEV_NAME,
+ .id = -1,
+ .dev = {
+ .platform_data = &keypad_data_7k_ffa,
+ },
+};
+
+static struct input_dev *keypad_dev;
+
+static int keypad_gpio_event_matrix_func(struct input_dev *input_dev,
+ struct gpio_event_info *info,
+ void **data, int func)
+{
+ int err;
+ int i;
+
+ err = gpio_event_matrix_func(input_dev, info, data, func);
+
+ if (func == GPIO_EVENT_FUNC_INIT && !err) {
+ keypad_dev = input_dev;
+ for (i = 0; i < ARRAY_SIZE(keypad_virtual_keys); i++)
+ set_bit(keypad_virtual_keys[i] & KEY_MAX,
+ input_dev->keybit);
+ } else if (func == GPIO_EVENT_FUNC_UNINIT) {
+ keypad_dev = NULL;
+ }
+
+ return err;
+}
+
+struct input_dev *msm_keypad_get_input_dev(void)
+{
+ return keypad_dev;
+}
+
diff --git a/arch/arm/mach-msm/keypad-surf-ffa.h b/arch/arm/mach-msm/keypad-surf-ffa.h
new file mode 100644
index 000000000000..2a29307fddfb
--- /dev/null
+++ b/arch/arm/mach-msm/keypad-surf-ffa.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _KEYPAD_SURF_FFA_H
+#define _KEYPAD_SURF_FFA_H
+
+#include <linux/input.h>
+
+#if defined(CONFIG_SURF_FFA_GPIO_KEYPAD)
+struct input_dev *msm_keypad_get_input_dev(void);
+#else
+static struct input_dev *msm_keypad_get_input_dev(void)
+{
+ return NULL;
+}
+#endif
+
+#endif
diff --git a/arch/arm/mach-msm/memory.c b/arch/arm/mach-msm/memory.c
new file mode 100644
index 000000000000..e87bbddc58b0
--- /dev/null
+++ b/arch/arm/mach-msm/memory.c
@@ -0,0 +1,86 @@
+/* arch/arm/mach-msm/memory.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/bootmem.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/mach/map.h>
+
+int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ unsigned long pfn_addr = pfn << PAGE_SHIFT;
+ if ((pfn_addr >= 0x88000000) && (pfn_addr < 0xD0000000)) {
+ prot = pgprot_device(prot);
+ printk("remapping device %lx\n", prot);
+ }
+ return remap_pfn_range(vma, addr, pfn, size, prot);
+}
+
+void *zero_page_strongly_ordered;
+
+static void map_zero_page_strongly_ordered(void)
+{
+ if (zero_page_strongly_ordered)
+ return;
+
+ zero_page_strongly_ordered =
+ ioremap_strongly_ordered(page_to_pfn(empty_zero_page)
+ << PAGE_SHIFT, PAGE_SIZE);
+}
+
+void write_to_strongly_ordered_memory(void)
+{
+ map_zero_page_strongly_ordered();
+ *(int *)zero_page_strongly_ordered = 0;
+}
+
+void flush_axi_bus_buffer(void)
+{
+ __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
+ : : "r" (0) : "memory");
+ write_to_strongly_ordered_memory();
+}
+
+void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
+{
+ void *unused_addr = NULL;
+ unsigned long addr, tmp_size, unused_size;
+
+ /* Allocate maximum size needed, see where it ends up.
+ * Then free it -- in this path there are no other allocators
+ * so we can depend on getting the same address back
+ * when we allocate a smaller piece that is aligned
+ * at the end (if necessary) and the piece we really want,
+ * then free the unused first piece.
+ */
+
+ tmp_size = size + alignment - PAGE_SIZE;
+ addr = (unsigned long)alloc_bootmem(tmp_size);
+ free_bootmem(__pa(addr), tmp_size);
+
+ unused_size = alignment - (addr % alignment);
+ if (unused_size)
+ unused_addr = alloc_bootmem(unused_size);
+
+ addr = (unsigned long)alloc_bootmem(size);
+ if (unused_size)
+ free_bootmem(__pa(unused_addr), unused_size);
+
+ return (void *)addr;
+}
diff --git a/arch/arm/mach-msm/modem_notifier.c b/arch/arm/mach-msm/modem_notifier.c
new file mode 100644
index 000000000000..532757065aa9
--- /dev/null
+++ b/arch/arm/mach-msm/modem_notifier.c
@@ -0,0 +1,237 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * Modem Restart Notifier -- Provides notification
+ * of modem restart events.
+ */
+
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+
+#include "modem_notifier.h"
+
+#define DEBUG
+
+static struct srcu_notifier_head modem_notifier_list;
+static struct workqueue_struct *modem_notifier_wq;
+
+static void notify_work_start_reset(struct work_struct *work)
+{
+ modem_notify(0, MODEM_NOTIFIER_START_RESET);
+}
+static DECLARE_WORK(modem_notifier_start_reset_work, &notify_work_start_reset);
+
+void modem_queue_start_reset_notify(void)
+{
+ int ret;
+
+ ret = queue_work(modem_notifier_wq, &modem_notifier_start_reset_work);
+
+ if (!ret)
+ printk(KERN_ERR "%s\n", __func__);
+}
+EXPORT_SYMBOL(modem_queue_start_reset_notify);
+
+static void notify_work_end_reset(struct work_struct *work)
+{
+ modem_notify(0, MODEM_NOTIFIER_END_RESET);
+}
+static DECLARE_WORK(modem_notifier_end_reset_work, &notify_work_end_reset);
+
+void modem_queue_end_reset_notify(void)
+{
+ int ret;
+
+ ret = queue_work(modem_notifier_wq, &modem_notifier_end_reset_work);
+
+ if (!ret)
+ printk(KERN_ERR "%s\n", __func__);
+}
+EXPORT_SYMBOL(modem_queue_end_reset_notify);
+
+int modem_register_notifier(struct notifier_block *nb)
+{
+ int ret;
+
+ ret = srcu_notifier_chain_register(
+ &modem_notifier_list, nb);
+
+ return ret;
+}
+EXPORT_SYMBOL(modem_register_notifier);
+
+int modem_unregister_notifier(struct notifier_block *nb)
+{
+ int ret;
+
+ ret = srcu_notifier_chain_unregister(
+ &modem_notifier_list, nb);
+
+ return ret;
+}
+EXPORT_SYMBOL(modem_unregister_notifier);
+
+void modem_notify(void *data, unsigned int state)
+{
+ srcu_notifier_call_chain(&modem_notifier_list, state, data);
+}
+EXPORT_SYMBOL(modem_notify);
+
+#if defined(CONFIG_DEBUG_FS)
+static int debug_reset_start(const char __user *buf, int count)
+{
+ modem_queue_start_reset_notify();
+ return 0;
+}
+
+static int debug_reset_end(const char __user *buf, int count)
+{
+ modem_queue_end_reset_notify();
+ return 0;
+}
+
+static ssize_t debug_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int (*fling)(const char __user *buf, int max) = file->private_data;
+ fling(buf, count);
+ return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .write = debug_write,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ int (*fling)(const char __user *buf, int max))
+{
+ debugfs_create_file(name, mode, dent, fling, &debug_ops);
+}
+
+static void modem_notifier_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("modem_notifier", 0);
+ if (IS_ERR(dent))
+ return;
+
+ debug_create("reset_start", 0444, dent, debug_reset_start);
+ debug_create("reset_end", 0444, dent, debug_reset_end);
+}
+#else
+static void modem_notifier_debugfs_init(void) {}
+#endif
+
+#if defined(DEBUG)
+static int modem_notifier_test_call(struct notifier_block *this,
+ unsigned long code,
+ void *_cmd)
+{
+ switch (code) {
+ case MODEM_NOTIFIER_START_RESET:
+ printk(KERN_ERR "Notify: start reset\n");
+ break;
+ case MODEM_NOTIFIER_END_RESET:
+ printk(KERN_ERR "Notify: end reset\n");
+ break;
+ default:
+ printk(KERN_ERR "Notify: general\n");
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nb = {
+ .notifier_call = modem_notifier_test_call,
+};
+
+static void register_test_notifier(void)
+{
+ modem_register_notifier(&nb);
+}
+#endif
+
+static int __init init_modem_notifier_list(void)
+{
+ srcu_init_notifier_head(&modem_notifier_list);
+ modem_notifier_debugfs_init();
+#if defined(DEBUG)
+ register_test_notifier();
+#endif
+
+ /* Create the workqueue */
+ modem_notifier_wq = create_singlethread_workqueue("modem_notifier");
+ if (!modem_notifier_wq) {
+ srcu_cleanup_notifier_head(&modem_notifier_list);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+module_init(init_modem_notifier_list);
diff --git a/arch/arm/mach-msm/modem_notifier.h b/arch/arm/mach-msm/modem_notifier.h
new file mode 100644
index 000000000000..627f5a142b73
--- /dev/null
+++ b/arch/arm/mach-msm/modem_notifier.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * Modem Restart Notifier API
+ *
+ */
+
+#ifndef _MODEM_NOTIFIER_H
+#define _MODEM_NOTIFIER_H
+
+#include <linux/notifier.h>
+
+#define MODEM_NOTIFIER_START_RESET 0x1
+#define MODEM_NOTIFIER_END_RESET 0x2
+
+extern int modem_register_notifier(struct notifier_block *nb);
+extern int modem_unregister_notifier(struct notifier_block *nb);
+extern void modem_notify(void *data, unsigned int state);
+extern void modem_queue_start_reset_notify(void);
+extern void modem_queue_end_reset_notify(void);
+
+
+#endif /* _MODEM_NOTIFIER_H */
diff --git a/arch/arm/mach-msm/mpp.c b/arch/arm/mach-msm/mpp.c
new file mode 100644
index 000000000000..eb6f533d61d0
--- /dev/null
+++ b/arch/arm/mach-msm/mpp.c
@@ -0,0 +1,176 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* Qualcomm PMIC Multi-Purpose Pin Configurations */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+
+#include <mach/mpp.h>
+
+#include "proc_comm.h"
+
+#define MPP(_name, _id, _is_input, _status) \
+ { .name = _name, .id = _id, .is_input = _is_input, .status = _status}
+
+static struct mpp mpps[] = {
+ MPP("mpp1", 0, 0, 0),
+ MPP("mpp2", 1, 0, 0),
+ MPP("mpp3", 2, 0, 0),
+ MPP("mpp4", 3, 0, 0),
+ MPP("mpp5", 4, 0, 0),
+ MPP("mpp6", 5, 0, 0),
+ MPP("mpp7", 6, 0, 0),
+ MPP("mpp8", 7, 0, 0),
+ MPP("mpp9", 8, 0, 0),
+ MPP("mpp10", 9, 0, 0),
+ MPP("mpp11", 10, 0, 0),
+ MPP("mpp12", 11, 0, 0),
+ MPP("mpp13", 12, 0, 0),
+ MPP("mpp14", 13, 0, 0),
+ MPP("mpp15", 14, 0, 0),
+ MPP("mpp16", 15, 0, 0),
+ MPP("mpp17", 16, 0, 0),
+ MPP("mpp18", 17, 0, 0),
+ MPP("mpp19", 18, 0, 0),
+ MPP("mpp20", 19, 0, 0),
+ MPP("mpp21", 20, 0, 0),
+ MPP("mpp22", 21, 0, 0),
+};
+
+struct mpp *mpp_get(struct device *dev, const char *id)
+{
+ int n;
+ for (n = 0; n < ARRAY_SIZE(mpps); n++) {
+ if (!strcmp(mpps[n].name, id))
+ return mpps + n;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(mpp_get);
+
+int mpp_config_digital_out(struct mpp *mpp, unsigned config)
+{
+ unsigned id = mpp->id;
+ int err;
+ err = msm_proc_comm(PCOM_PM_MPP_CONFIG, &id, &config);
+ mpp->status = err;
+ mpp->is_input = 0;
+ return err;
+}
+EXPORT_SYMBOL(mpp_config_digital_out);
+
+int mpp_config_digital_in(struct mpp *mpp, unsigned config)
+{
+ unsigned id = mpp->id;
+ int err;
+ err = msm_proc_comm(PCOM_PM_MPP_CONFIG_DIGITAL_INPUT, &id, &config);
+ mpp->status = err;
+ mpp->is_input = 1;
+ return err;
+}
+EXPORT_SYMBOL(mpp_config_digital_in);
+
+#if defined(CONFIG_DEBUG_FS)
+static int mpp_debug_set(void *data, u64 val)
+{
+ int err;
+ struct mpp *mpp = data;
+
+ err = mpp_config_digital_out(mpp, (unsigned)val);
+ if (err) {
+ printk(KERN_ERR
+ "%s: mpp_config_digital_out \
+ [%s(%d) = 0x%x] failed\n",
+ __func__, mpp->name, mpp->id, (unsigned)val);
+ }
+ return 0;
+}
+
+static int mpp_debug_get(void *data, u64 *val)
+{
+ struct mpp *mpp = data;
+ int status = mpp->status;
+ if (!status)
+ *val = 0;
+ else
+ *val = 1;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mpp_fops, mpp_debug_get, mpp_debug_set, "%llu\n");
+
+static int __init mpp_debug_init(void)
+{
+ struct dentry *dent;
+ int n;
+
+ dent = debugfs_create_dir("mpp", 0);
+ if (IS_ERR(dent))
+ return 0;
+
+ for (n = 0; n < ARRAY_SIZE(mpps); n++)
+ debugfs_create_file(mpps[n].name, 0644, dent, mpps + n,
+ &mpp_fops);
+
+ return 0;
+}
+
+device_initcall(mpp_debug_init);
+#endif
diff --git a/arch/arm/mach-msm/msm-keypad-devices.h b/arch/arm/mach-msm/msm-keypad-devices.h
new file mode 100644
index 000000000000..469564a754ad
--- /dev/null
+++ b/arch/arm/mach-msm/msm-keypad-devices.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _MSM_KEYPAD_DEVICES_H
+#define _MSM_KEYPAD_DEVICES_H
+
+extern struct platform_device keypad_device_7k_ffa;
+extern struct platform_device keypad_device_8k_ffa;
+extern struct platform_device keypad_device_surf;
+
+#endif
diff --git a/arch/arm/mach-msm/msm_vibrator.c b/arch/arm/mach-msm/msm_vibrator.c
new file mode 100644
index 000000000000..f4da4363aa98
--- /dev/null
+++ b/arch/arm/mach-msm/msm_vibrator.c
@@ -0,0 +1,137 @@
+/* include/asm/mach-msm/htc_pwrsink.h
+ *
+ * Copyright (C) 2008 HTC Corporation.
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/hrtimer.h>
+#include <../../../drivers/staging/android/timed_output.h>
+#include <linux/sched.h>
+
+#include <mach/msm_rpcrouter.h>
+
+#define PM_LIBPROG 0x30000061
+#if (CONFIG_MSM_AMSS_VERSION == 6220) || (CONFIG_MSM_AMSS_VERSION == 6225)
+#define PM_LIBVERS 0xfb837d0b
+#else
+#define PM_LIBVERS 0x10001
+#endif
+
+#define HTC_PROCEDURE_SET_VIB_ON_OFF 21
+#define PMIC_VIBRATOR_LEVEL (3000)
+
+static struct work_struct work_vibrator_on;
+static struct work_struct work_vibrator_off;
+static struct hrtimer vibe_timer;
+
+static void set_pmic_vibrator(int on)
+{
+ static struct msm_rpc_endpoint *vib_endpoint;
+ struct set_vib_on_off_req {
+ struct rpc_request_hdr hdr;
+ uint32_t data;
+ } req;
+
+ if (!vib_endpoint) {
+ vib_endpoint = msm_rpc_connect(PM_LIBPROG, PM_LIBVERS, 0);
+ if (IS_ERR(vib_endpoint)) {
+ printk(KERN_ERR "init vib rpc failed!\n");
+ vib_endpoint = 0;
+ return;
+ }
+ }
+
+
+ if (on)
+ req.data = cpu_to_be32(PMIC_VIBRATOR_LEVEL);
+ else
+ req.data = cpu_to_be32(0);
+
+ msm_rpc_call(vib_endpoint, HTC_PROCEDURE_SET_VIB_ON_OFF, &req,
+ sizeof(req), 5 * HZ);
+}
+
+static void pmic_vibrator_on(struct work_struct *work)
+{
+ set_pmic_vibrator(1);
+}
+
+static void pmic_vibrator_off(struct work_struct *work)
+{
+ set_pmic_vibrator(0);
+}
+
+static void timed_vibrator_on(struct timed_output_dev *sdev)
+{
+ schedule_work(&work_vibrator_on);
+}
+
+static void timed_vibrator_off(struct timed_output_dev *sdev)
+{
+ schedule_work(&work_vibrator_off);
+}
+
+static void vibrator_enable(struct timed_output_dev *dev, int value)
+{
+ hrtimer_cancel(&vibe_timer);
+
+ if (value == 0)
+ timed_vibrator_off(dev);
+ else {
+ value = (value > 15000 ? 15000 : value);
+
+ timed_vibrator_on(dev);
+
+ hrtimer_start(&vibe_timer,
+ ktime_set(value / 1000, (value % 1000) * 1000000),
+ HRTIMER_MODE_REL);
+ }
+}
+
+static int vibrator_get_time(struct timed_output_dev *dev)
+{
+ if (hrtimer_active(&vibe_timer)) {
+ ktime_t r = hrtimer_get_remaining(&vibe_timer);
+ return r.tv.sec * 1000 + r.tv.nsec / 1000000;
+ } else
+ return 0;
+}
+
+static enum hrtimer_restart vibrator_timer_func(struct hrtimer *timer)
+{
+ timed_vibrator_off(NULL);
+ return HRTIMER_NORESTART;
+}
+
+static struct timed_output_dev pmic_vibrator = {
+ .name = "vibrator",
+ .get_time = vibrator_get_time,
+ .enable = vibrator_enable,
+};
+
+void __init msm_init_pmic_vibrator(void)
+{
+ INIT_WORK(&work_vibrator_on, pmic_vibrator_on);
+ INIT_WORK(&work_vibrator_off, pmic_vibrator_off);
+
+ hrtimer_init(&vibe_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ vibe_timer.function = vibrator_timer_func;
+
+ timed_output_dev_register(&pmic_vibrator);
+}
+
+MODULE_DESCRIPTION("timed output pmic vibrator device");
+MODULE_LICENSE("GPL");
+
diff --git a/arch/arm/mach-msm/nand_partitions.c b/arch/arm/mach-msm/nand_partitions.c
new file mode 100644
index 000000000000..fc874470339b
--- /dev/null
+++ b/arch/arm/mach-msm/nand_partitions.c
@@ -0,0 +1,187 @@
+/* arch/arm/mach-msm/nand_partitions.c
+ *
+ * Code to extract partition information from ATAG set up by the
+ * bootloader.
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach/flash.h>
+#include <linux/io.h>
+
+#include <asm/setup.h>
+
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+
+#include <mach/msm_iomap.h>
+
+#include <mach/board.h>
+#include "smd_private.h"
+
+/* configuration tags specific to msm */
+
+#define ATAG_MSM_PARTITION 0x4d534D70 /* MSMp */
+
+struct msm_ptbl_entry {
+ char name[16];
+ __u32 offset;
+ __u32 size;
+ __u32 flags;
+};
+
+#define MSM_MAX_PARTITIONS 8
+
+static struct mtd_partition msm_nand_partitions[MSM_MAX_PARTITIONS];
+static char msm_nand_names[MSM_MAX_PARTITIONS * 16];
+
+extern struct flash_platform_data msm_nand_data;
+
+static int __init parse_tag_msm_partition(const struct tag *tag)
+{
+ struct mtd_partition *ptn = msm_nand_partitions;
+ char *name = msm_nand_names;
+ struct msm_ptbl_entry *entry = (void *) &tag->u;
+ unsigned count, n;
+
+ count = (tag->hdr.size - 2) /
+ (sizeof(struct msm_ptbl_entry) / sizeof(__u32));
+
+ if (count > MSM_MAX_PARTITIONS)
+ count = MSM_MAX_PARTITIONS;
+
+ for (n = 0; n < count; n++) {
+ memcpy(name, entry->name, 15);
+ name[15] = 0;
+
+ ptn->name = name;
+ ptn->offset = entry->offset * 64 * 2048;
+ ptn->size = entry->size * 64 * 2048;
+
+ printk(KERN_INFO "Partition (from atag) %s "
+ "-- Offset:%llx Size:%llx\n",
+ ptn->name, ptn->offset, ptn->size);
+
+ name += 16;
+ entry++;
+ ptn++;
+ }
+
+ msm_nand_data.nr_parts = count;
+ msm_nand_data.parts = msm_nand_partitions;
+
+ return 0;
+}
+
+__tagtable(ATAG_MSM_PARTITION, parse_tag_msm_partition);
+
+#define FLASH_PART_MAGIC1 0x55EE73AA
+#define FLASH_PART_MAGIC2 0xE35EBDDB
+#define FLASH_PARTITION_VERSION 0x3
+
+#define LINUX_FS_PARTITION_NAME "0:EFS2APPS"
+
+struct flash_partition_entry {
+ char name[16];
+ u32 offset; /* Offset in blocks from beginning of device */
+ u32 length; /* Length of the partition in blocks */
+ u8 attrib1;
+ u8 attrib2;
+ u8 attrib3;
+ u8 which_flash; /* Numeric ID (first = 0, second = 1) */
+};
+struct flash_partition_table {
+ u32 magic1;
+ u32 magic2;
+ u32 version;
+ u32 numparts;
+ struct flash_partition_entry part_entry[16];
+};
+
+static int get_nand_partitions(void)
+{
+ struct flash_partition_table *partition_table;
+ struct flash_partition_entry *part_entry;
+ struct mtd_partition *ptn = msm_nand_partitions;
+ char *name = msm_nand_names;
+ int part;
+
+ if (msm_nand_data.nr_parts)
+ return 0;
+
+ partition_table = (struct flash_partition_table *)
+ smem_alloc(SMEM_AARM_PARTITION_TABLE,
+ sizeof(struct flash_partition_table));
+
+ if (!partition_table) {
+ printk(KERN_WARNING "%s: no flash partition table in shared "
+ "memory\n", __func__);
+ return -ENOENT;
+ }
+
+ if ((partition_table->magic1 != (u32) FLASH_PART_MAGIC1) ||
+ (partition_table->magic2 != (u32) FLASH_PART_MAGIC2) ||
+ (partition_table->version != (u32) FLASH_PARTITION_VERSION)) {
+ printk(KERN_WARNING "%s: version mismatch -- magic1=%#x, "
+ "magic2=%#x, version=%#x\n", __func__,
+ partition_table->magic1,
+ partition_table->magic2,
+ partition_table->version);
+ return -EFAULT;
+ }
+
+ msm_nand_data.nr_parts = 0;
+
+ /* Get the LINUX FS partition info */
+ for (part = 0; part < partition_table->numparts; part++) {
+ part_entry = &partition_table->part_entry[part];
+
+ /* Find a match for the Linux file system partition */
+ if (strcmp(part_entry->name, LINUX_FS_PARTITION_NAME) == 0) {
+ strcpy(name, part_entry->name);
+ ptn->name = name;
+
+ /*TODO: Get block count and size info */
+ ptn->offset = part_entry->offset * 64 * 2048;
+
+ /* For SMEM, -1 indicates remaining space in flash,
+ * but for MTD it is 0
+ */
+ if (part_entry->length == (u32)-1)
+ ptn->size = 0;
+ else
+ ptn->size = part_entry->length * 64 * 2048;
+
+ msm_nand_data.nr_parts = 1;
+ msm_nand_data.parts = msm_nand_partitions;
+
+ printk(KERN_INFO "Partition(from smem) %s "
+ "-- Offset:%llx Size:%llx\n",
+ ptn->name, ptn->offset, ptn->size);
+
+ return 0;
+ }
+ }
+
+ printk(KERN_WARNING "%s: no partition table found!", __func__);
+
+ return -ENODEV;
+}
+
+device_initcall(get_nand_partitions);
diff --git a/arch/arm/mach-msm/nohlt.c b/arch/arm/mach-msm/nohlt.c
new file mode 100644
index 000000000000..a680fbb8f82d
--- /dev/null
+++ b/arch/arm/mach-msm/nohlt.c
@@ -0,0 +1,86 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * MSM architecture driver to control arm halt behavior
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <asm/system.h>
+
+#ifdef CONFIG_DEBUG_FS
+static int set_nohalt(void *data, u64 val)
+{
+ if (val)
+ disable_hlt();
+ else
+ enable_hlt();
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(nohalt_ops, NULL, set_nohalt, "%llu\n");
+
+static int __init init_hlt_debug(void)
+{
+ debugfs_create_file("nohlt", 0200, NULL, NULL, &nohalt_ops);
+
+ return 0;
+}
+
+late_initcall(init_hlt_debug);
+#endif
diff --git a/arch/arm/mach-msm/oem_rapi_client.c b/arch/arm/mach-msm/oem_rapi_client.c
new file mode 100644
index 000000000000..e31de45f2920
--- /dev/null
+++ b/arch/arm/mach-msm/oem_rapi_client.c
@@ -0,0 +1,489 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * OEM RAPI CLIENT Driver source file
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <mach/msm_rpcrouter.h>
+#include <mach/oem_rapi_client.h>
+
+#define OEM_RAPI_PROG 0x3000006B
+#define OEM_RAPI_VERS 0x00010001
+
+#define OEM_RAPI_NULL_PROC 0
+#define OEM_RAPI_RPC_GLUE_CODE_INFO_REMOTE_PROC 1
+#define OEM_RAPI_STREAMING_FUNCTION_PROC 2
+
+#define OEM_RAPI_CLIENT_MAX_OUT_BUFF_SIZE 128
+
+static struct msm_rpc_client *rpc_client;
+static uint32_t open_count;
+static DEFINE_MUTEX(oem_rapi_client_lock);
+
+static int oem_rapi_client_cb(struct msm_rpc_client *client,
+ void *buffer, int in_size)
+{
+ struct rpc_request_hdr *req;
+ void *buf, *cb_func, *reply;
+ uint32_t cb_id, accept_status, size;
+ int rc;
+
+ struct oem_rapi_client_streaming_func_cb_arg arg;
+ struct oem_rapi_client_streaming_func_cb_ret ret;
+
+ arg.input = NULL;
+ ret.out_len = NULL;
+ ret.output = NULL;
+
+ req = (struct rpc_request_hdr *)buffer;
+ buf = (void *)(req + 1);
+
+ /* cb_id */
+ cb_id = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+
+ /* enum */
+ arg.event = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+
+ /* handle */
+ arg.handle = (void *)be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+
+ /* in_len */
+ arg.in_len = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+
+ /* input */
+ size = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+ if (size) {
+ arg.input = kmalloc(size, GFP_KERNEL);
+ if (arg.input)
+ memcpy(arg.input, buf, size);
+ else {
+ accept_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
+ goto oem_rapi_send_ack;
+ }
+ }
+ buf += size;
+ if (size & 0x3)
+ buf += 4 - (size & 0x3);
+
+ /* out_len */
+ arg.out_len_valid = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+ if (arg.out_len_valid) {
+ ret.out_len = kmalloc(sizeof(*ret.out_len), GFP_KERNEL);
+ if (!ret.out_len) {
+ accept_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
+ goto oem_rapi_send_ack;
+ }
+ }
+
+ /* out */
+ arg.output_valid = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+ if (arg.output_valid) {
+ arg.output_size = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+ ret.output = kmalloc(arg.output_size, GFP_KERNEL);
+ if (!ret.output) {
+ accept_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
+ goto oem_rapi_send_ack;
+ }
+ }
+
+ cb_func = msm_rpc_get_cb_func(client, cb_id);
+ if (cb_func) {
+ rc = ((int (*)(struct oem_rapi_client_streaming_func_cb_arg *,
+ struct oem_rapi_client_streaming_func_cb_ret *))
+ cb_func)(&arg, &ret);
+ if (rc)
+ accept_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
+ else
+ accept_status = RPC_ACCEPTSTAT_SUCCESS;
+ } else
+ accept_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
+
+ oem_rapi_send_ack:
+ reply = msm_rpc_start_accepted_reply(client, be32_to_cpu(req->xid),
+ accept_status);
+
+ size = 0;
+ if (accept_status == RPC_ACCEPTSTAT_SUCCESS) {
+ *(uint32_t *)reply = cpu_to_be32((uint32_t)(ret.out_len != 0));
+ reply += sizeof(uint32_t);
+ size += sizeof(uint32_t);
+
+ if (ret.out_len) {
+ *(uint32_t *)reply = cpu_to_be32(*ret.out_len);
+ reply += sizeof(uint32_t);
+ size += sizeof(uint32_t);
+ }
+
+ if (ret.output && ret.out_len) {
+ *(uint32_t *)reply =
+ cpu_to_be32((uint32_t)(*ret.out_len));
+ reply += sizeof(uint32_t);
+ size += sizeof(uint32_t);
+
+ memcpy(reply, ret.output, *ret.out_len);
+ reply += *ret.out_len;
+ size += *ret.out_len;
+ if (*ret.out_len & 0x3) {
+ memset(reply, 0, 4 - (*ret.out_len & 0x3));
+ reply += 4 - (*ret.out_len & 0x3);
+ size += 4 - (*ret.out_len & 0x3);
+ }
+ } else {
+ *(uint32_t *)reply = cpu_to_be32(0);
+ reply += sizeof(uint32_t);
+ size += sizeof(uint32_t);
+ }
+ }
+ rc = msm_rpc_send_accepted_reply(client, size);
+ if (rc)
+ pr_err("%s: sending reply failed: %d\n", __func__, rc);
+
+ kfree(arg.input);
+ kfree(ret.out_len);
+ kfree(ret.output);
+
+ return 0;
+}
+
+static int oem_rapi_client_streaming_function_arg(struct msm_rpc_client *client,
+ void *buf, void *data)
+{
+ int size = 0;
+ int cb_id;
+ struct oem_rapi_client_streaming_func_arg *arg = data;
+
+ /* enum */
+ *((uint32_t *)buf) = cpu_to_be32(arg->event);
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+
+ /* cb_id */
+ cb_id = msm_rpc_add_cb_func(client, (void *)arg->cb_func);
+ if ((cb_id < 0) && (cb_id != MSM_RPC_CLIENT_NULL_CB_ID))
+ return cb_id;
+ *((uint32_t *)buf) = cpu_to_be32((uint32_t)cb_id);
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+
+ /* handle */
+ *((uint32_t *)buf) = cpu_to_be32((uint32_t)arg->handle);
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+
+ /* in_len */
+ *((uint32_t *)buf) = cpu_to_be32(arg->in_len);
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+
+ /* input */
+ *((uint32_t *)buf) = cpu_to_be32(arg->in_len);
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+ memcpy(buf, arg->input, arg->in_len);
+ size += arg->in_len;
+ buf += arg->in_len;
+ if (arg->in_len & 0x3) {
+ memset(buf, 0, 4 - (arg->in_len & 0x3));
+ buf += 4 - (arg->in_len & 0x3);
+ size += 4 - (arg->in_len & 0x3);
+ }
+
+ /* out_len */
+ *((uint32_t *)buf) = cpu_to_be32((uint32_t)(arg->out_len_valid));
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+
+ /* output */
+ *((uint32_t *)buf) = cpu_to_be32((uint32_t)(arg->output_valid));
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+ if (arg->output_valid) {
+ *((uint32_t *)buf) = cpu_to_be32(arg->output_size);
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+ }
+
+ return size;
+}
+
+static int oem_rapi_client_streaming_function_ret(struct msm_rpc_client *client,
+ void *buf, void *data)
+{
+ uint32_t data_present, size;
+ struct oem_rapi_client_streaming_func_ret *ret = data;
+
+ /* out_len */
+ data_present = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+ if (data_present && ret->out_len) {
+ *ret->out_len = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+ }
+
+ /* output */
+ size = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+ if (size && ret->output)
+ memcpy(ret->output, buf, size);
+ buf += size;
+ if (size & 0x3)
+ buf += 4 - (size & 0x3);
+
+ return 0;
+}
+
+int oem_rapi_client_streaming_function(
+ struct msm_rpc_client *client,
+ struct oem_rapi_client_streaming_func_arg *arg,
+ struct oem_rapi_client_streaming_func_ret *ret)
+{
+ return msm_rpc_client_req(client,
+ OEM_RAPI_STREAMING_FUNCTION_PROC,
+ oem_rapi_client_streaming_function_arg, arg,
+ oem_rapi_client_streaming_function_ret,
+ ret, -1);
+}
+EXPORT_SYMBOL(oem_rapi_client_streaming_function);
+
+int oem_rapi_client_close(void)
+{
+ mutex_lock(&oem_rapi_client_lock);
+ if (--open_count == 0) {
+ msm_rpc_unregister_client(rpc_client);
+ pr_info("%s: disconnected from remote oem rapi server\n",
+ __func__);
+ }
+ mutex_unlock(&oem_rapi_client_lock);
+ return 0;
+}
+EXPORT_SYMBOL(oem_rapi_client_close);
+
+struct msm_rpc_client *oem_rapi_client_init(void)
+{
+ mutex_lock(&oem_rapi_client_lock);
+ if (open_count == 0) {
+ rpc_client = msm_rpc_register_client("oemrapiclient",
+ OEM_RAPI_PROG,
+ OEM_RAPI_VERS, 0,
+ oem_rapi_client_cb);
+ if (!IS_ERR(rpc_client))
+ open_count++;
+ }
+ mutex_unlock(&oem_rapi_client_lock);
+ return rpc_client;
+}
+EXPORT_SYMBOL(oem_rapi_client_init);
+
+#if defined(CONFIG_DEBUG_FS)
+
+static struct dentry *dent;
+static int oem_rapi_client_test_res;
+
+static int oem_rapi_client_null(struct msm_rpc_client *client,
+ void *arg, void *ret)
+{
+ return msm_rpc_client_req(client, OEM_RAPI_NULL_PROC,
+ NULL, NULL, NULL, NULL, -1);
+}
+
+static int oem_rapi_client_test_streaming_cb_func(
+ struct oem_rapi_client_streaming_func_cb_arg *arg,
+ struct oem_rapi_client_streaming_func_cb_ret *ret)
+{
+ uint32_t size;
+ pr_info("oem rapi client test cb func\n");
+
+ size = (arg->in_len < OEM_RAPI_CLIENT_MAX_OUT_BUFF_SIZE) ?
+ arg->in_len : OEM_RAPI_CLIENT_MAX_OUT_BUFF_SIZE;
+
+ if (ret->out_len != 0)
+ *ret->out_len = size;
+
+ if (ret->output != 0)
+ memcpy(ret->output, arg->input, size);
+
+ return 0;
+}
+
+static ssize_t debug_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ char _buf[16];
+
+ snprintf(_buf, sizeof(_buf), "%i\n", oem_rapi_client_test_res);
+
+ return simple_read_from_buffer(buf, count, pos, _buf, strlen(_buf));
+}
+
+static ssize_t debug_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ char input[OEM_RAPI_CLIENT_MAX_OUT_BUFF_SIZE];
+ char output[OEM_RAPI_CLIENT_MAX_OUT_BUFF_SIZE];
+ uint32_t out_len;
+ struct oem_rapi_client_streaming_func_arg arg;
+ struct oem_rapi_client_streaming_func_ret ret;
+
+ unsigned char cmd[64];
+ int len;
+
+ if (count < 1)
+ return 0;
+
+ len = count > 63 ? 63 : count;
+
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+
+ cmd[len] = 0;
+
+ if (cmd[len-1] == '\n') {
+ cmd[len-1] = 0;
+ len--;
+ }
+
+ if (!strncmp(cmd, "null", 64)) {
+ oem_rapi_client_test_res = oem_rapi_client_null(rpc_client,
+ NULL, NULL);
+ } else if (!strncmp(cmd, "streaming_func", 64)) {
+ memset(input, 5, 16);
+ arg.event = 0;
+ arg.cb_func = oem_rapi_client_test_streaming_cb_func;
+ arg.handle = (void *)20;
+ arg.in_len = 16;
+ arg.input = input;
+ arg.out_len_valid = 1;
+ arg.output_valid = 1;
+ arg.output_size = OEM_RAPI_CLIENT_MAX_OUT_BUFF_SIZE;
+
+ ret.out_len = &out_len;
+ ret.output = output;
+ oem_rapi_client_test_res = oem_rapi_client_streaming_function(
+ rpc_client, &arg, &ret);
+ } else
+ oem_rapi_client_test_res = -EINVAL;
+
+ if (oem_rapi_client_test_res)
+ pr_err("oem rapi client test fail %d\n",
+ oem_rapi_client_test_res);
+ else
+ pr_info("oem rapi client test passed\n");
+
+ return count;
+}
+
+static int debug_release(struct inode *ip, struct file *fp)
+{
+ return oem_rapi_client_close();
+}
+
+static int debug_open(struct inode *ip, struct file *fp)
+{
+ struct msm_rpc_client *client;
+ client = oem_rapi_client_init();
+ if (IS_ERR(client)) {
+ pr_err("%s: couldn't open oem rapi client\n", __func__);
+ return PTR_ERR(client);
+ } else
+ pr_info("%s: connected to remote oem rapi server\n", __func__);
+
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .owner = THIS_MODULE,
+ .open = debug_open,
+ .release = debug_release,
+ .read = debug_read,
+ .write = debug_write,
+};
+
+static void __exit oem_rapi_client_mod_exit(void)
+{
+ debugfs_remove(dent);
+}
+
+static int __init oem_rapi_client_mod_init(void)
+{
+ dent = debugfs_create_file("oem_rapi", 0444, 0, NULL, &debug_ops);
+ open_count = 0;
+ oem_rapi_client_test_res = -1;
+ return 0;
+}
+
+module_init(oem_rapi_client_mod_init);
+module_exit(oem_rapi_client_mod_exit);
+
+#endif
+
+MODULE_DESCRIPTION("OEM RAPI CLIENT Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/arm/mach-msm/ping_mdm_rpc_client.c b/arch/arm/mach-msm/ping_mdm_rpc_client.c
new file mode 100644
index 000000000000..b70527e4875d
--- /dev/null
+++ b/arch/arm/mach-msm/ping_mdm_rpc_client.c
@@ -0,0 +1,772 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * SMD RPC PING MODEM Driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <mach/msm_rpcrouter.h>
+
+#define PING_TEST_BASE 0x31
+
+#define PTIOC_NULL_TEST _IO(PING_TEST_BASE, 1)
+#define PTIOC_REG_TEST _IO(PING_TEST_BASE, 2)
+#define PTIOC_DATA_REG_TEST _IO(PING_TEST_BASE, 3)
+#define PTIOC_DATA_CB_REG_TEST _IO(PING_TEST_BASE, 4)
+
+#define PING_MDM_PROG 0x30000081
+#define PING_MDM_VERS 0x00010001
+#define PING_MDM_CB_PROG 0x31000081
+#define PING_MDM_CB_VERS 0x00010001
+
+#define PING_MDM_NULL_PROC 0
+#define PING_MDM_RPC_GLUE_CODE_INFO_REMOTE_PROC 1
+#define PING_MDM_REGISTER_PROC 2
+#define PING_MDM_UNREGISTER_PROC 3
+#define PING_MDM_REGISTER_DATA_PROC 4
+#define PING_MDM_UNREGISTER_DATA_CB_PROC 5
+#define PING_MDM_REGISTER_DATA_CB_PROC 6
+
+#define PING_MDM_DATA_CB_PROC 1
+#define PING_MDM_CB_PROC 2
+
+static struct msm_rpc_client *rpc_client;
+static uint32_t open_count;
+static DEFINE_MUTEX(ping_mdm_lock);
+
+struct ping_mdm_register_cb_arg {
+ uint32_t cb_id;
+ int val;
+};
+
+struct ping_mdm_register_data_cb_cb_arg {
+ uint32_t cb_id;
+ uint32_t *data;
+ uint32_t size;
+ uint32_t sum;
+};
+
+struct ping_mdm_register_data_cb_cb_ret {
+ uint32_t result;
+};
+
+static int ping_mdm_register_cb(struct msm_rpc_client *client,
+ void *buffer, int in_size)
+{
+ int rc;
+ uint32_t accept_status;
+ struct rpc_request_hdr *req;
+ struct ping_mdm_register_cb_arg arg, *buf_ptr;
+ void *cb_func;
+
+ req = (struct rpc_request_hdr *)buffer;
+ buf_ptr = (struct ping_mdm_register_cb_arg *)(req + 1);
+
+ arg.cb_id = be32_to_cpu(buf_ptr->cb_id);
+ arg.val = be32_to_cpu(buf_ptr->val);
+
+ cb_func = msm_rpc_get_cb_func(client, arg.cb_id);
+ if (cb_func) {
+ rc = ((int (*)(struct ping_mdm_register_cb_arg *, void *))
+ cb_func)(&arg, NULL);
+ if (rc)
+ accept_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
+ else
+ accept_status = RPC_ACCEPTSTAT_SUCCESS;
+ } else
+ accept_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
+
+ msm_rpc_start_accepted_reply(client, be32_to_cpu(req->xid),
+ accept_status);
+ rc = msm_rpc_send_accepted_reply(client, 0);
+ if (rc)
+ pr_err("%s: send accepted reply failed: %d\n", __func__, rc);
+
+ return rc;
+}
+
+static int ping_mdm_data_cb(struct msm_rpc_client *client,
+ void *buffer, int in_size)
+{
+ struct rpc_request_hdr *req;
+ int rc, i;
+ void *buf, *cb_func, *reply;
+ uint32_t size, accept_status;
+ struct ping_mdm_register_data_cb_cb_arg arg;
+ struct ping_mdm_register_data_cb_cb_ret ret;
+
+ req = (struct rpc_request_hdr *)buffer;
+ buf = (void *)(req + 1);
+
+ arg.cb_id = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+
+ size = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+ if (size) {
+ arg.data = kmalloc((size * sizeof(*arg.data)), GFP_KERNEL);
+ if (arg.data)
+ for (i = 0; i < size; i++)
+ arg.data[i] =
+ be32_to_cpu(*((uint32_t *)buf + i));
+ }
+ buf += sizeof(uint32_t) * size;
+
+ arg.size = be32_to_cpu(*(uint32_t *)buf);
+ buf += sizeof(uint32_t);
+
+ arg.sum = be32_to_cpu(*(uint32_t *)buf);
+
+ cb_func = msm_rpc_get_cb_func(client, arg.cb_id);
+ if (cb_func) {
+ rc = ((int (*)
+ (struct ping_mdm_register_data_cb_cb_arg *,
+ struct ping_mdm_register_data_cb_cb_ret *))
+ cb_func)(&arg, &ret);
+ if (rc)
+ accept_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
+ else
+ accept_status = RPC_ACCEPTSTAT_SUCCESS;
+ } else
+ accept_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
+
+ reply = msm_rpc_start_accepted_reply(client, be32_to_cpu(req->xid),
+ accept_status);
+
+ size = 0;
+ if (accept_status == RPC_ACCEPTSTAT_SUCCESS) {
+ *(uint32_t *)reply = cpu_to_be32(ret.result);
+ size = sizeof(uint32_t);
+ }
+ rc = msm_rpc_send_accepted_reply(client, size);
+ if (rc)
+ pr_err("%s: send accepted reply failed: %d\n", __func__, rc);
+
+ return rc;
+}
+
+static int ping_mdm_cb_func(struct msm_rpc_client *client,
+ void *buffer, int in_size)
+{
+ int rc = 0;
+ struct rpc_request_hdr *req;
+
+ req = (struct rpc_request_hdr *)buffer;
+
+ switch (be32_to_cpu(req->procedure)) {
+ case PING_MDM_CB_PROC:
+ rc = ping_mdm_register_cb(client, buffer, in_size);
+ break;
+ case PING_MDM_DATA_CB_PROC:
+ rc = ping_mdm_data_cb(client, buffer, in_size);
+ break;
+ default:
+ pr_err("%s: procedure not supported %d\n", __func__,
+ be32_to_cpu(req->procedure));
+ msm_rpc_start_accepted_reply(client, be32_to_cpu(req->xid),
+ RPC_ACCEPTSTAT_PROC_UNAVAIL);
+ rc = msm_rpc_send_accepted_reply(client, 0);
+ if (rc)
+ pr_err("%s: sending reply failed: %d\n", __func__, rc);
+ break;
+ }
+ return rc;
+}
+
+struct ping_mdm_unregister_data_cb_arg {
+ int (*cb_func)(
+ struct ping_mdm_register_data_cb_cb_arg *arg,
+ struct ping_mdm_register_data_cb_cb_ret *ret);
+};
+
+struct ping_mdm_register_data_cb_arg {
+ int (*cb_func)(
+ struct ping_mdm_register_data_cb_cb_arg *arg,
+ struct ping_mdm_register_data_cb_cb_ret *ret);
+ uint32_t num;
+ uint32_t size;
+ uint32_t interval_ms;
+ uint32_t num_tasks;
+};
+
+struct ping_mdm_register_data_cb_ret {
+ uint32_t result;
+};
+
+struct ping_mdm_unregister_data_cb_ret {
+ uint32_t result;
+};
+
+static int ping_mdm_data_cb_register_arg(struct msm_rpc_client *client,
+ void *buf, void *data)
+{
+ struct ping_mdm_register_data_cb_arg *arg;
+ int cb_id, size = 0;
+
+ arg = (struct ping_mdm_register_data_cb_arg *)data;
+
+ cb_id = msm_rpc_add_cb_func(client, (void *)arg->cb_func);
+ if ((cb_id < 0) && (cb_id != MSM_RPC_CLIENT_NULL_CB_ID))
+ return cb_id;
+
+ *((uint32_t *)buf) = cpu_to_be32((uint32_t)cb_id);
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+
+ *((uint32_t *)buf) = cpu_to_be32(arg->num);
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+
+ *((uint32_t *)buf) = cpu_to_be32(arg->size);
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+
+ *((uint32_t *)buf) = cpu_to_be32(arg->interval_ms);
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+
+ *((uint32_t *)buf) = cpu_to_be32(arg->num_tasks);
+ size += sizeof(uint32_t);
+
+ return size;
+}
+
+static int ping_mdm_data_cb_unregister_arg(struct msm_rpc_client *client,
+ void *buf, void *data)
+{
+ struct ping_mdm_unregister_data_cb_arg *arg;
+ int cb_id;
+
+ arg = (struct ping_mdm_unregister_data_cb_arg *)data;
+
+ cb_id = msm_rpc_add_cb_func(client, (void *)arg->cb_func);
+ if ((cb_id < 0) && (cb_id != MSM_RPC_CLIENT_NULL_CB_ID))
+ return cb_id;
+
+ *((uint32_t *)buf) = cpu_to_be32((uint32_t)cb_id);
+
+ return sizeof(uint32_t);
+}
+
+static int ping_mdm_data_cb_register_ret(struct msm_rpc_client *client,
+ void *buf, void *data)
+{
+ struct ping_mdm_register_data_cb_ret *data_ptr, *buf_ptr;
+
+ data_ptr = (struct ping_mdm_register_data_cb_ret *)data;
+ buf_ptr = (struct ping_mdm_register_data_cb_ret *)buf;
+
+ data_ptr->result = be32_to_cpu(buf_ptr->result);
+ return 0;
+}
+
+static int ping_mdm_register_data_cb(
+ struct msm_rpc_client *client,
+ struct ping_mdm_register_data_cb_arg *arg,
+ struct ping_mdm_register_data_cb_ret *ret)
+{
+ return msm_rpc_client_req(client,
+ PING_MDM_REGISTER_DATA_CB_PROC,
+ ping_mdm_data_cb_register_arg, arg,
+ ping_mdm_data_cb_register_ret, ret, -1);
+}
+
+static int ping_mdm_unregister_data_cb(
+ struct msm_rpc_client *client,
+ struct ping_mdm_unregister_data_cb_arg *arg,
+ struct ping_mdm_unregister_data_cb_ret *ret)
+{
+ return msm_rpc_client_req(client,
+ PING_MDM_UNREGISTER_DATA_CB_PROC,
+ ping_mdm_data_cb_unregister_arg, arg,
+ ping_mdm_data_cb_register_ret, ret, -1);
+}
+
+struct ping_mdm_data_arg {
+ uint32_t *data;
+ uint32_t size;
+};
+
+struct ping_mdm_data_ret {
+ uint32_t result;
+};
+
+static int ping_mdm_data_register_arg(struct msm_rpc_client *client,
+ void *buf, void *data)
+{
+ int i;
+ struct ping_mdm_data_arg *data_ptr;
+
+ data_ptr = (struct ping_mdm_data_arg *)data;
+
+ *((uint32_t *)buf) = cpu_to_be32(data_ptr->size);
+ buf += sizeof(data_ptr->size);
+ for (i = 0; i < data_ptr->size; i++) {
+ *((uint32_t *)buf) = cpu_to_be32(data_ptr->data[i]);
+ buf += sizeof(*data_ptr->data);
+ }
+
+ *((uint32_t *)buf) = cpu_to_be32(data_ptr->size);
+
+ return (data_ptr->size * sizeof(uint32_t)) +
+ (sizeof(data_ptr->size) * 2);
+}
+
+static int ping_mdm_data_register_ret(struct msm_rpc_client *client,
+ void *buf, void *data)
+{
+ struct ping_mdm_data_ret *data_ptr, *buf_ptr;
+
+ data_ptr = (struct ping_mdm_data_ret *)data;
+ buf_ptr = (struct ping_mdm_data_ret *)buf;
+
+ data_ptr->result = be32_to_cpu(buf_ptr->result);
+ return 0;
+}
+
+static int ping_mdm_data_register(
+ struct msm_rpc_client *client,
+ struct ping_mdm_data_arg *arg,
+ struct ping_mdm_data_ret *ret)
+{
+ return msm_rpc_client_req(client,
+ PING_MDM_REGISTER_DATA_PROC,
+ ping_mdm_data_register_arg, arg,
+ ping_mdm_data_register_ret, ret, -1);
+}
+
+struct ping_mdm_register_arg {
+ int (*cb_func)(struct ping_mdm_register_cb_arg *, void *);
+ int num;
+};
+
+struct ping_mdm_unregister_arg {
+ int (*cb_func)(struct ping_mdm_register_cb_arg *, void *);
+};
+
+struct ping_mdm_register_ret {
+ uint32_t result;
+};
+
+struct ping_mdm_unregister_ret {
+ uint32_t result;
+};
+
+static int ping_mdm_register_arg(struct msm_rpc_client *client,
+ void *buf, void *data)
+{
+ struct ping_mdm_register_arg *arg;
+ int cb_id, size = 0;
+
+ arg = (struct ping_mdm_register_arg *)data;
+
+ cb_id = msm_rpc_add_cb_func(client, (void *)arg->cb_func);
+ if ((cb_id < 0) && (cb_id != MSM_RPC_CLIENT_NULL_CB_ID))
+ return cb_id;
+
+ *((uint32_t *)buf) = cpu_to_be32((uint32_t)cb_id);
+ size += sizeof(uint32_t);
+ buf += sizeof(uint32_t);
+
+ *((int32_t *)buf) = cpu_to_be32(arg->num);
+ size += sizeof(uint32_t);
+
+ return size;
+}
+
+static int ping_mdm_unregister_arg(struct msm_rpc_client *client,
+ void *buf, void *data)
+{
+ struct ping_mdm_unregister_arg *arg;
+ int cb_id;
+
+ arg = (struct ping_mdm_unregister_arg *)data;
+
+ cb_id = msm_rpc_add_cb_func(client, (void *)arg->cb_func);
+ if ((cb_id < 0) && (cb_id != MSM_RPC_CLIENT_NULL_CB_ID))
+ return cb_id;
+
+ *((uint32_t *)buf) = cpu_to_be32((uint32_t)cb_id);
+
+ return sizeof(uint32_t);
+}
+
+static int ping_mdm_register_ret(struct msm_rpc_client *client,
+ void *buf, void *data)
+{
+ struct ping_mdm_register_ret *data_ptr, *buf_ptr;
+
+ data_ptr = (struct ping_mdm_register_ret *)data;
+ buf_ptr = (struct ping_mdm_register_ret *)buf;
+
+ data_ptr->result = be32_to_cpu(buf_ptr->result);
+
+ return 0;
+}
+
+static int ping_mdm_register(
+ struct msm_rpc_client *client,
+ struct ping_mdm_register_arg *arg,
+ struct ping_mdm_register_ret *ret)
+{
+ return msm_rpc_client_req(client,
+ PING_MDM_REGISTER_PROC,
+ ping_mdm_register_arg, arg,
+ ping_mdm_register_ret, ret, -1);
+}
+
+static int ping_mdm_unregister(
+ struct msm_rpc_client *client,
+ struct ping_mdm_unregister_arg *arg,
+ struct ping_mdm_unregister_ret *ret)
+{
+ return msm_rpc_client_req(client,
+ PING_MDM_UNREGISTER_PROC,
+ ping_mdm_unregister_arg, arg,
+ ping_mdm_register_ret, ret, -1);
+}
+
+static int ping_mdm_null(struct msm_rpc_client *client,
+ void *arg, void *ret)
+{
+ return msm_rpc_client_req(client, PING_MDM_NULL_PROC,
+ NULL, NULL, NULL, NULL, -1);
+}
+
+static int ping_mdm_close(void)
+{
+ mutex_lock(&ping_mdm_lock);
+ if (--open_count == 0) {
+ msm_rpc_unregister_client(rpc_client);
+ pr_info("%s: disconnected from remote ping server\n",
+ __func__);
+ }
+ mutex_unlock(&ping_mdm_lock);
+ return 0;
+}
+
+static struct msm_rpc_client *ping_mdm_init(void)
+{
+ mutex_lock(&ping_mdm_lock);
+ if (open_count == 0) {
+ rpc_client = msm_rpc_register_client("pingdef",
+ PING_MDM_PROG,
+ PING_MDM_VERS, 1,
+ ping_mdm_cb_func);
+ if (!IS_ERR(rpc_client))
+ open_count++;
+ }
+ mutex_unlock(&ping_mdm_lock);
+ return rpc_client;
+}
+
+static struct dentry *dent;
+
+static DEFINE_MUTEX(ping_mdm_cb_lock);
+static LIST_HEAD(ping_mdm_cb_list);
+static uint32_t test_res;
+
+static int reg_cb_num, reg_cb_num_req;
+static int data_cb_num, data_cb_num_req;
+static int reg_done_flag, data_cb_done_flag;
+static DECLARE_WAIT_QUEUE_HEAD(reg_test_wait);
+static DECLARE_WAIT_QUEUE_HEAD(data_cb_test_wait);
+
+static int ping_mdm_data_register_test(void)
+{
+ int i, rc = 0;
+ uint32_t my_data[64];
+ uint32_t my_sum = 0;
+ struct ping_mdm_data_arg data_arg;
+ struct ping_mdm_data_ret data_ret;
+
+ for (i = 0; i < 64; i++) {
+ my_data[i] = (42 + i);
+ my_sum ^= (42 + i);
+ }
+
+ data_arg.data = my_data;
+ data_arg.size = 64;
+
+ rc = ping_mdm_data_register(rpc_client, &data_arg, &data_ret);
+ if (rc)
+ return rc;
+
+ if (my_sum != data_ret.result) {
+ pr_err("%s: sum mismatch %d %d\n",
+ __func__, my_sum, data_ret.result);
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static int ping_mdm_test_register_data_cb(
+ struct ping_mdm_register_data_cb_cb_arg *arg,
+ struct ping_mdm_register_data_cb_cb_ret *ret)
+{
+ uint32_t i, sum = 0;
+
+ pr_info("%s: received cb_id %d, size = %d, sum = %u\n",
+ __func__, arg->cb_id, arg->size, arg->sum);
+
+ if (arg->data)
+ for (i = 0; i < arg->size; i++)
+ sum ^= arg->data[i];
+
+ if (sum != arg->sum)
+ pr_err("%s: sum mismatch %d %d\n", __func__, sum, arg->sum);
+
+ data_cb_num++;
+ if (data_cb_num == data_cb_num_req) {
+ data_cb_done_flag = 1;
+ wake_up(&data_cb_test_wait);
+ }
+
+ ret->result = 1;
+ return 0;
+}
+
+static int ping_mdm_data_cb_register_test(void)
+{
+ int rc = 0;
+ struct ping_mdm_register_data_cb_arg reg_arg;
+ struct ping_mdm_unregister_data_cb_arg unreg_arg;
+ struct ping_mdm_register_data_cb_ret reg_ret;
+ struct ping_mdm_unregister_data_cb_ret unreg_ret;
+
+ data_cb_num = 0;
+ data_cb_num_req = 10;
+ data_cb_done_flag = 0;
+
+ reg_arg.cb_func = ping_mdm_test_register_data_cb;
+ reg_arg.num = 10;
+ reg_arg.size = 64;
+ reg_arg.interval_ms = 10;
+ reg_arg.num_tasks = 1;
+
+ rc = ping_mdm_register_data_cb(rpc_client, &reg_arg, &reg_ret);
+ if (rc)
+ return rc;
+
+ pr_info("%s: data_cb_register result: 0x%x\n",
+ __func__, reg_ret.result);
+ wait_event(data_cb_test_wait, data_cb_done_flag);
+
+ unreg_arg.cb_func = reg_arg.cb_func;
+ rc = ping_mdm_unregister_data_cb(rpc_client, &unreg_arg, &unreg_ret);
+ if (rc)
+ return rc;
+
+ pr_info("%s: data_cb_unregister result: 0x%x\n",
+ __func__, unreg_ret.result);
+
+ return 0;
+}
+
+static int ping_mdm_test_register_cb(
+ struct ping_mdm_register_cb_arg *arg, void *ret)
+{
+ pr_info("%s: received cb_id %d, val = %d\n",
+ __func__, arg->cb_id, arg->val);
+
+ reg_cb_num++;
+ if (reg_cb_num == reg_cb_num_req) {
+ reg_done_flag = 1;
+ wake_up(&reg_test_wait);
+ }
+ return 0;
+}
+
+static int ping_mdm_register_test(void)
+{
+ int rc = 0;
+ struct ping_mdm_register_arg reg_arg;
+ struct ping_mdm_unregister_arg unreg_arg;
+ struct ping_mdm_register_ret reg_ret;
+ struct ping_mdm_unregister_ret unreg_ret;
+
+ reg_cb_num = 0;
+ reg_cb_num_req = 10;
+ reg_done_flag = 0;
+
+ reg_arg.num = 10;
+ reg_arg.cb_func = ping_mdm_test_register_cb;
+
+ rc = ping_mdm_register(rpc_client, &reg_arg, &reg_ret);
+ if (rc)
+ return rc;
+
+ pr_info("%s: register result: 0x%x\n",
+ __func__, reg_ret.result);
+
+ wait_event(reg_test_wait, reg_done_flag);
+
+ unreg_arg.cb_func = ping_mdm_test_register_cb;
+ rc = ping_mdm_unregister(rpc_client, &unreg_arg, &unreg_ret);
+ if (rc)
+ return rc;
+
+ pr_info("%s: unregister result: 0x%x\n",
+ __func__, unreg_ret.result);
+
+ return 0;
+}
+
+static int ping_mdm_null_test(void)
+{
+ return ping_mdm_null(rpc_client, NULL, NULL);
+}
+
+static int ping_test_release(struct inode *ip, struct file *fp)
+{
+ return ping_mdm_close();
+}
+
+static int ping_test_open(struct inode *ip, struct file *fp)
+{
+ struct msm_rpc_client *client;
+
+ client = ping_mdm_init();
+ if (IS_ERR(client)) {
+ pr_err("%s: couldn't open ping client\n", __func__);
+ return PTR_ERR(client);
+ } else
+ pr_info("%s: connected to remote ping server\n",
+ __func__);
+
+ return 0;
+}
+
+static ssize_t ping_test_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ char _buf[16];
+
+ snprintf(_buf, sizeof(_buf), "%i\n", test_res);
+
+ return simple_read_from_buffer(buf, count, pos, _buf, strlen(_buf));
+}
+
+static ssize_t ping_test_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ unsigned char cmd[64];
+ int len;
+
+ if (count < 1)
+ return 0;
+
+ len = count > 63 ? 63 : count;
+
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+
+ cmd[len] = 0;
+
+ /* lazy */
+ if (cmd[len-1] == '\n') {
+ cmd[len-1] = 0;
+ len--;
+ }
+
+ if (!strncmp(cmd, "null_test", 64))
+ test_res = ping_mdm_null_test();
+ else if (!strncmp(cmd, "reg_test", 64))
+ test_res = ping_mdm_register_test();
+ else if (!strncmp(cmd, "data_reg_test", 64))
+ test_res = ping_mdm_data_register_test();
+ else if (!strncmp(cmd, "data_cb_reg_test", 64))
+ test_res = ping_mdm_data_cb_register_test();
+ else
+ test_res = -EINVAL;
+
+ return count;
+}
+
+static const struct file_operations debug_ops = {
+ .owner = THIS_MODULE,
+ .open = ping_test_open,
+ .read = ping_test_read,
+ .write = ping_test_write,
+ .release = ping_test_release,
+};
+
+static void __exit ping_test_exit(void)
+{
+ debugfs_remove(dent);
+}
+
+static int __init ping_test_init(void)
+{
+ dent = debugfs_create_file("ping_mdm", 0444, 0, NULL, &debug_ops);
+ test_res = 0;
+ open_count = 0;
+ return 0;
+}
+
+module_init(ping_test_init);
+module_exit(ping_test_exit);
+
+MODULE_DESCRIPTION("PING TEST Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/arm/mach-msm/pm.c b/arch/arm/mach-msm/pm.c
new file mode 100644
index 000000000000..d5d26cc021ed
--- /dev/null
+++ b/arch/arm/mach-msm/pm.c
@@ -0,0 +1,954 @@
+/* arch/arm/mach-msm/pm.c
+ *
+ * MSM Power Management Routines
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <linux/pm_qos_params.h>
+#include <linux/proc_fs.h>
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+#include <linux/uaccess.h>
+#include <mach/msm_iomap.h>
+#include <mach/system.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_HAS_WAKELOCK
+#include <linux/wakelock.h>
+#endif
+
+#include "smd_private.h"
+#include "acpuclock.h"
+#include "clock.h"
+#include "proc_comm.h"
+#include "idle.h"
+#include "irq.h"
+#include "gpio.h"
+#include "timer.h"
+#include "pm.h"
+
+enum {
+ MSM_PM_DEBUG_SUSPEND = 1U << 0,
+ MSM_PM_DEBUG_POWER_COLLAPSE = 1U << 1,
+ MSM_PM_DEBUG_STATE = 1U << 2,
+ MSM_PM_DEBUG_CLOCK = 1U << 3,
+ MSM_PM_DEBUG_RESET_VECTOR = 1U << 4,
+ MSM_PM_DEBUG_SMSM_STATE = 1U << 5,
+ MSM_PM_DEBUG_IDLE = 1U << 6,
+};
+static int msm_pm_debug_mask;
+module_param_named(debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
+static int msm_pm_sleep_time_override;
+module_param_named(sleep_time_override,
+ msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
+#endif
+
+static int msm_pm_sleep_mode = CONFIG_MSM7X00A_SLEEP_MODE;
+module_param_named(sleep_mode, msm_pm_sleep_mode, int, S_IRUGO | S_IWUSR | S_IWGRP);
+static int msm_pm_idle_sleep_mode = CONFIG_MSM7X00A_IDLE_SLEEP_MODE;
+module_param_named(idle_sleep_mode, msm_pm_idle_sleep_mode, int, S_IRUGO | S_IWUSR | S_IWGRP);
+static int msm_pm_idle_sleep_min_time = CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME;
+module_param_named(idle_sleep_min_time, msm_pm_idle_sleep_min_time, int, S_IRUGO | S_IWUSR | S_IWGRP);
+static int msm_pm_idle_spin_time = CONFIG_MSM7X00A_IDLE_SPIN_TIME;
+module_param_named(idle_spin_time, msm_pm_idle_spin_time, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define A11S_CLK_SLEEP_EN (MSM_CSR_BASE + 0x11c)
+#define A11S_PWRDOWN (MSM_CSR_BASE + 0x440)
+#define A11S_STANDBY_CTL (MSM_CSR_BASE + 0x108)
+#define A11RAMBACKBIAS (MSM_CSR_BASE + 0x508)
+
+enum {
+ SLEEP_LIMIT_NONE = 0,
+ SLEEP_LIMIT_NO_TCXO_SHUTDOWN = 2
+};
+
+static atomic_t msm_pm_init_done = ATOMIC_INIT(0);
+struct smsm_interrupt_info_ext {
+ uint32_t aArm_en_mask;
+ uint32_t aArm_interrupts_pending;
+ uint32_t aArm_wakeup_reason;
+ uint32_t aArm_rpc_prog;
+ uint32_t aArm_rpc_proc;
+ char aArm_smd_port_name[20];
+ uint32_t aArm_gpio_info;
+};
+static struct msm_pm_smem_addr_t {
+ uint32_t *sleep_delay;
+ uint32_t *limit_sleep;
+ struct smsm_interrupt_info *int_info;
+ struct smsm_interrupt_info_ext *int_info_ext;
+} msm_pm_sma;
+
+static uint32_t *msm_pm_reset_vector;
+static uint32_t msm_pm_max_sleep_time;
+static struct msm_pm_platform_data *msm_pm_modes;
+
+#ifdef CONFIG_MSM_IDLE_STATS
+enum msm_pm_time_stats_id {
+ MSM_PM_STAT_REQUESTED_IDLE,
+ MSM_PM_STAT_IDLE_SPIN,
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_IDLE_SLEEP,
+ MSM_PM_STAT_IDLE_FAILED_SLEEP,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ MSM_PM_STAT_FAILED_SUSPEND,
+ MSM_PM_STAT_NOT_IDLE,
+ MSM_PM_STAT_COUNT
+};
+
+static struct msm_pm_time_stats {
+ const char *name;
+ int64_t first_bucket_time;
+ int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int count;
+ int64_t total_time;
+} msm_pm_stats[MSM_PM_STAT_COUNT] = {
+ [MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request",
+ [MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_SPIN].name = "idle-spin",
+ [MSM_PM_STAT_IDLE_SPIN].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_WFI].name = "idle-wfi",
+ [MSM_PM_STAT_IDLE_WFI].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_SLEEP].name = "idle-sleep",
+ [MSM_PM_STAT_IDLE_SLEEP].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_FAILED_SLEEP].name = "idle-failed-sleep",
+ [MSM_PM_STAT_IDLE_FAILED_SLEEP].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_POWER_COLLAPSE].name = "idle-power-collapse",
+ [MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].name =
+ "idle-failed-power-collapse",
+ [MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_SUSPEND].name = "suspend",
+ [MSM_PM_STAT_SUSPEND].first_bucket_time =
+ CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_FAILED_SUSPEND].name = "failed-suspend",
+ [MSM_PM_STAT_FAILED_SUSPEND].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_NOT_IDLE].name = "not-idle",
+ [MSM_PM_STAT_NOT_IDLE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+};
+
+static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
+{
+ int i;
+ int64_t bt;
+ msm_pm_stats[id].total_time += t;
+ msm_pm_stats[id].count++;
+ bt = t;
+ do_div(bt, msm_pm_stats[id].first_bucket_time);
+ if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
+ (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
+ i = DIV_ROUND_UP(fls((uint32_t)bt),
+ CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
+ else
+ i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+ msm_pm_stats[id].bucket[i]++;
+ if (t < msm_pm_stats[id].min_time[i] || !msm_pm_stats[id].max_time[i])
+ msm_pm_stats[id].min_time[i] = t;
+ if (t > msm_pm_stats[id].max_time[i])
+ msm_pm_stats[id].max_time[i] = t;
+}
+
+static uint32_t msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
+static DECLARE_BITMAP(msm_pm_clocks_no_tcxo_shutdown, NR_CLKS);
+#endif
+
+static int
+msm_pm_wait_state(uint32_t wait_state_all_set, uint32_t wait_state_all_clear,
+ uint32_t wait_state_any_set, uint32_t wait_state_any_clear)
+{
+ int i;
+ uint32_t state;
+
+ for (i = 0; i < 2000000; i++) {
+ state = smsm_get_state(SMSM_MODEM_STATE);
+ if (((state & wait_state_all_set) == wait_state_all_set) &&
+ ((~state & wait_state_all_clear) == wait_state_all_clear) &&
+ (wait_state_any_set == 0 || (state & wait_state_any_set) ||
+ wait_state_any_clear == 0 || (state & wait_state_any_clear)))
+ return 0;
+ }
+ printk(KERN_ERR "msm_pm_wait_state(%x, %x, %x, %x) failed %x\n",
+ wait_state_all_set, wait_state_all_clear,
+ wait_state_any_set, wait_state_any_clear, state);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Respond to timing out waiting for Modem
+ *
+ * NOTE: The function never returns.
+ */
+static void msm_pm_timeout(void)
+{
+#if defined(CONFIG_MSM_PM_TIMEOUT_RESET_CHIP)
+ printk(KERN_EMERG "%s(): resetting chip\n", __func__);
+ msm_proc_comm(PCOM_RESET_CHIP_IMM, NULL, NULL);
+#elif defined(CONFIG_MSM_PM_TIMEOUT_RESET_MODEM)
+ printk(KERN_EMERG "%s(): resetting modem\n", __func__);
+ msm_proc_comm_reset_modem_now();
+#elif defined(CONFIG_MSM_PM_TIMEOUT_HALT)
+ printk(KERN_EMERG "%s(): halting\n", __func__);
+#endif
+ for (;;)
+ ;
+}
+
+static int msm_sleep(int sleep_mode, uint32_t sleep_delay,
+ uint32_t sleep_limit, int from_idle)
+{
+ uint32_t saved_vector[2];
+ int collapsed;
+ uint32_t enter_state;
+ uint32_t enter_wait_set = 0;
+ uint32_t enter_wait_clear = 0;
+ uint32_t exit_state;
+ uint32_t exit_wait_clear = 0;
+ uint32_t exit_wait_set = 0;
+ unsigned long pm_saved_acpu_clk_rate = 0;
+ int ret;
+ int rv = -EINTR;
+
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_SUSPEND)
+ printk(KERN_INFO "msm_sleep(): "
+ "mode %d delay %u limit %u idle %d\n",
+ sleep_mode, sleep_delay, sleep_limit, from_idle);
+
+ switch (sleep_mode) {
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
+ enter_state = SMSM_PWRC;
+ enter_wait_set = SMSM_RSA;
+ exit_state = SMSM_WFPI;
+ exit_wait_clear = SMSM_RSA;
+ break;
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND:
+ enter_state = SMSM_PWRC_SUSPEND;
+ enter_wait_set = SMSM_RSA;
+ exit_state = SMSM_WFPI;
+ exit_wait_clear = SMSM_RSA;
+ break;
+ case MSM_PM_SLEEP_MODE_APPS_SLEEP:
+ enter_state = SMSM_SLEEP;
+ exit_state = SMSM_SLEEPEXIT;
+ exit_wait_set = SMSM_SLEEPEXIT;
+ break;
+ default:
+ enter_state = 0;
+ exit_state = 0;
+ }
+
+ if (enter_state && !(smsm_get_state(SMSM_MODEM_STATE) & SMSM_RUN)) {
+ if ((MSM_PM_DEBUG_POWER_COLLAPSE | MSM_PM_DEBUG_SUSPEND) &
+ msm_pm_debug_mask)
+ printk(KERN_INFO "msm_sleep(): modem not ready\n");
+ rv = -EBUSY;
+ goto check_failed;
+ }
+
+ memset(msm_pm_sma.int_info, 0, sizeof(*msm_pm_sma.int_info));
+ msm_irq_enter_sleep1(!!enter_state, from_idle,
+ &msm_pm_sma.int_info->aArm_en_mask);
+ msm_gpio_enter_sleep(from_idle);
+
+ if (enter_state) {
+ if (sleep_delay == 0 && sleep_mode >= MSM_PM_SLEEP_MODE_APPS_SLEEP)
+ sleep_delay = 192000*5; /* APPS_SLEEP does not allow infinite timeout */
+
+ *msm_pm_sma.sleep_delay = sleep_delay;
+ *msm_pm_sma.limit_sleep = sleep_limit;
+ ret = smsm_change_state(SMSM_APPS_STATE, SMSM_RUN, enter_state);
+ if (ret) {
+ printk(KERN_ERR "msm_sleep(): smsm_change_state %x failed\n", enter_state);
+ enter_state = 0;
+ exit_state = 0;
+ }
+ ret = msm_pm_wait_state(enter_wait_set, enter_wait_clear, 0, 0);
+ if (ret) {
+ printk(KERN_EMERG "msm_sleep(): power collapse entry "
+ "timed out waiting for Modem's response\n");
+ msm_pm_timeout();
+ }
+ }
+ if (msm_irq_enter_sleep2(!!enter_state, from_idle))
+ goto enter_failed;
+
+ if (enter_state) {
+ writel(0x1f, A11S_CLK_SLEEP_EN);
+ writel(1, A11S_PWRDOWN);
+
+ writel(0, A11S_STANDBY_CTL);
+ writel(0, A11RAMBACKBIAS);
+
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE)
+ printk(KERN_INFO "msm_sleep(): enter "
+ "A11S_CLK_SLEEP_EN %x, A11S_PWRDOWN %x, "
+ "smsm_get_state %x\n", readl(A11S_CLK_SLEEP_EN),
+ readl(A11S_PWRDOWN),
+ smsm_get_state(SMSM_MODEM_STATE));
+ }
+
+ if (sleep_mode <= MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT) {
+ pm_saved_acpu_clk_rate = acpuclk_power_collapse();
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK)
+ printk(KERN_INFO "msm_sleep(): %ld enter power collapse"
+ "\n", pm_saved_acpu_clk_rate);
+ if (pm_saved_acpu_clk_rate == 0)
+ goto ramp_down_failed;
+ }
+ if (sleep_mode < MSM_PM_SLEEP_MODE_APPS_SLEEP) {
+#ifdef CONFIG_MSM_ADM_OFF_AT_POWER_COLLAPSE
+ /* XXX: Temp workaround that needs to be removed soon. The
+ * right fix will probably involve the DMA driver taking
+ * ownership of the ADM clock. */
+ /* id is set to denote ADM clock. */
+ unsigned id = 1;
+ msm_proc_comm(PCOM_CLKCTL_RPC_DISABLE, &id, NULL);
+#endif
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE)
+ smsm_print_sleep_info(*msm_pm_sma.sleep_delay,
+ *msm_pm_sma.limit_sleep,
+ msm_pm_sma.int_info->aArm_en_mask,
+ msm_pm_sma.int_info->aArm_wakeup_reason,
+ msm_pm_sma.int_info->aArm_interrupts_pending);
+ saved_vector[0] = msm_pm_reset_vector[0];
+ saved_vector[1] = msm_pm_reset_vector[1];
+ msm_pm_reset_vector[0] = 0xE51FF004; /* ldr pc, 4 */
+ msm_pm_reset_vector[1] = virt_to_phys(msm_pm_collapse_exit);
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_RESET_VECTOR)
+ printk(KERN_INFO "msm_sleep(): vector %x %x -> "
+ "%x %x\n", saved_vector[0], saved_vector[1],
+ msm_pm_reset_vector[0], msm_pm_reset_vector[1]);
+ collapsed = msm_pm_collapse();
+ msm_pm_reset_vector[0] = saved_vector[0];
+ msm_pm_reset_vector[1] = saved_vector[1];
+ if (collapsed) {
+ cpu_init();
+ local_fiq_enable();
+ rv = 0;
+ }
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_POWER_COLLAPSE)
+ printk(KERN_INFO "msm_pm_collapse(): returned %d\n",
+ collapsed);
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE)
+ smsm_print_sleep_info(*msm_pm_sma.sleep_delay,
+ *msm_pm_sma.limit_sleep,
+ msm_pm_sma.int_info->aArm_en_mask,
+ msm_pm_sma.int_info->aArm_wakeup_reason,
+ msm_pm_sma.int_info->aArm_interrupts_pending);
+#ifdef CONFIG_MSM_ADM_OFF_AT_POWER_COLLAPSE
+ /* id is set to denote ADM clock. */
+ id = 1;
+ if (msm_proc_comm(PCOM_CLKCTL_RPC_ENABLE, &id, NULL) < 0 ||
+ id < 0)
+ printk(KERN_ERR
+ "msm_sleep(): failed to turn on ADM clock\n");
+#endif
+ } else {
+ msm_arch_idle();
+ rv = 0;
+ }
+
+ if (sleep_mode <= MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT) {
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK)
+ printk(KERN_INFO "msm_sleep(): exit power collapse %ld"
+ "\n", pm_saved_acpu_clk_rate);
+ if (acpuclk_set_rate(pm_saved_acpu_clk_rate, SETRATE_PC) < 0)
+ printk(KERN_ERR "msm_sleep(): clk_set_rate %ld "
+ "failed\n", pm_saved_acpu_clk_rate);
+ }
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE)
+ printk(KERN_INFO "msm_sleep(): exit A11S_CLK_SLEEP_EN %x, "
+ "A11S_PWRDOWN %x, smsm_get_state %x\n",
+ readl(A11S_CLK_SLEEP_EN), readl(A11S_PWRDOWN),
+ smsm_get_state(SMSM_MODEM_STATE));
+ramp_down_failed:
+ msm_irq_exit_sleep1(msm_pm_sma.int_info->aArm_en_mask,
+ msm_pm_sma.int_info->aArm_wakeup_reason,
+ msm_pm_sma.int_info->aArm_interrupts_pending);
+enter_failed:
+ if (enter_state) {
+ writel(0x00, A11S_CLK_SLEEP_EN);
+ writel(0, A11S_PWRDOWN);
+ smsm_change_state(SMSM_APPS_STATE, enter_state, exit_state);
+ if (msm_pm_wait_state(exit_wait_set, exit_wait_clear, 0, 0)) {
+ printk(KERN_EMERG "msm_sleep(): power collapse exit "
+ "timed out waiting for Modem's response\n");
+ msm_pm_timeout();
+ }
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE)
+ printk(KERN_INFO "msm_sleep(): sleep exit "
+ "A11S_CLK_SLEEP_EN %x, A11S_PWRDOWN %x, "
+ "smsm_get_state %x\n", readl(A11S_CLK_SLEEP_EN),
+ readl(A11S_PWRDOWN),
+ smsm_get_state(SMSM_MODEM_STATE));
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE)
+ smsm_print_sleep_info(*msm_pm_sma.sleep_delay,
+ *msm_pm_sma.limit_sleep,
+ msm_pm_sma.int_info->aArm_en_mask,
+ msm_pm_sma.int_info->aArm_wakeup_reason,
+ msm_pm_sma.int_info->aArm_interrupts_pending);
+ }
+ msm_irq_exit_sleep2(msm_pm_sma.int_info->aArm_en_mask,
+ msm_pm_sma.int_info->aArm_wakeup_reason,
+ msm_pm_sma.int_info->aArm_interrupts_pending);
+ if (enter_state) {
+ smsm_change_state(SMSM_APPS_STATE, exit_state, SMSM_RUN);
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_STATE)
+ printk(KERN_INFO "msm_sleep(): sleep exit "
+ "A11S_CLK_SLEEP_EN %x, A11S_PWRDOWN %x, "
+ "smsm_get_state %x\n", readl(A11S_CLK_SLEEP_EN),
+ readl(A11S_PWRDOWN),
+ smsm_get_state(SMSM_MODEM_STATE));
+ }
+ msm_irq_exit_sleep3(msm_pm_sma.int_info->aArm_en_mask,
+ msm_pm_sma.int_info->aArm_wakeup_reason,
+ msm_pm_sma.int_info->aArm_interrupts_pending);
+ msm_gpio_exit_sleep();
+ smd_sleep_exit();
+
+check_failed:
+ return rv;
+}
+
+void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
+{
+ int64_t max_sleep_time_bs = max_sleep_time_ns;
+
+ /* Convert from ns -> BS units */
+ do_div(max_sleep_time_bs, NSEC_PER_SEC / 32768);
+
+ if (max_sleep_time_bs > 0x6DDD000)
+ msm_pm_max_sleep_time = (uint32_t) 0x6DDD000;
+ else
+ msm_pm_max_sleep_time = (uint32_t) max_sleep_time_bs;
+
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_SUSPEND)
+ printk(KERN_INFO "%s: Requested %lldns (%lldbs), Giving %ubs\n",
+ __func__, max_sleep_time_ns,
+ max_sleep_time_bs,
+ msm_pm_max_sleep_time);
+}
+EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
+
+void arch_idle(void)
+{
+ int ret;
+ int spin;
+ int64_t sleep_time;
+ int low_power = 0;
+ struct msm_pm_platform_data *mode;
+#ifdef CONFIG_MSM_IDLE_STATS
+ DECLARE_BITMAP(clk_ids, NR_CLKS);
+ int64_t t1;
+ static int64_t t2;
+ int exit_stat;
+#endif
+ int latency_qos = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
+ uint32_t sleep_limit = SLEEP_LIMIT_NONE;
+ int allow_sleep =
+ msm_pm_idle_sleep_mode < MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT &&
+#ifdef CONFIG_HAS_WAKELOCK
+ !has_wake_lock(WAKE_LOCK_IDLE) &&
+#endif
+ msm_irq_idle_sleep_allowed();
+
+ if (!atomic_read(&msm_pm_init_done))
+ return;
+
+ sleep_time = msm_timer_enter_idle();
+
+#ifdef CONFIG_MSM_IDLE_STATS
+ t1 = ktime_to_ns(ktime_get());
+ msm_pm_add_stat(MSM_PM_STAT_NOT_IDLE, t1 - t2);
+ msm_pm_add_stat(MSM_PM_STAT_REQUESTED_IDLE, sleep_time);
+#endif
+
+ mode = &msm_pm_modes[MSM_PM_SLEEP_MODE_POWER_COLLAPSE];
+ if (mode->latency >= latency_qos)
+ sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
+
+ mode = &msm_pm_modes[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN];
+ if (mode->latency >= latency_qos)
+ allow_sleep = false;
+
+ mode = &msm_pm_modes[
+ MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT];
+ if (mode->latency >= latency_qos) {
+ /* no time even for SWFI */
+ while (!msm_irq_pending())
+ udelay(1);
+#ifdef CONFIG_MSM_IDLE_STATS
+ exit_stat = MSM_PM_STAT_IDLE_SPIN;
+#endif
+ goto abort_idle;
+ }
+
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_IDLE)
+ printk(KERN_INFO "arch_idle: sleep time %llu, allow_sleep %d\n",
+ sleep_time, allow_sleep);
+ spin = msm_pm_idle_spin_time >> 10;
+ while (spin-- > 0) {
+ if (msm_irq_pending()) {
+#ifdef CONFIG_MSM_IDLE_STATS
+ exit_stat = MSM_PM_STAT_IDLE_SPIN;
+#endif
+ goto abort_idle;
+ }
+ udelay(1);
+ }
+ if (sleep_time < msm_pm_idle_sleep_min_time || !allow_sleep) {
+ unsigned long saved_rate;
+ saved_rate = acpuclk_wait_for_irq();
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK)
+ printk(KERN_DEBUG "arch_idle: clk %ld -> swfi\n",
+ saved_rate);
+ if (saved_rate) {
+ msm_arch_idle();
+#ifdef CONFIG_MSM_IDLE_STATS
+ exit_stat = MSM_PM_STAT_IDLE_WFI;
+#endif
+ } else {
+ while (!msm_irq_pending())
+ udelay(1);
+#ifdef CONFIG_MSM_IDLE_STATS
+ exit_stat = MSM_PM_STAT_IDLE_SPIN;
+#endif
+ }
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_CLOCK)
+ printk(KERN_DEBUG "msm_sleep: clk swfi -> %ld\n",
+ saved_rate);
+ if (saved_rate
+ && acpuclk_set_rate(saved_rate, SETRATE_SWFI) < 0)
+ printk(KERN_ERR "msm_sleep(): clk_set_rate %ld "
+ "failed\n", saved_rate);
+ } else {
+#ifdef CONFIG_MSM_IDLE_STATS
+ ret = msm_clock_require_tcxo(clk_ids, NR_CLKS);
+#elif defined(CONFIG_CLOCK_BASED_SLEEP_LIMIT)
+ ret = msm_clock_require_tcxo(NULL, 0);
+#endif
+
+#ifdef CONFIG_CLOCK_BASED_SLEEP_LIMIT
+ if (ret)
+ sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
+#endif
+
+ low_power = 1;
+ do_div(sleep_time, NSEC_PER_SEC / 32768);
+ if (sleep_time > 0x6DDD000) {
+ printk("sleep_time too big %lld\n", sleep_time);
+ sleep_time = 0x6DDD000;
+ }
+ ret = msm_sleep(msm_pm_idle_sleep_mode, sleep_time,
+ sleep_limit, 1);
+#ifdef CONFIG_MSM_IDLE_STATS
+ switch (msm_pm_idle_sleep_mode) {
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND:
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
+ if (ret)
+ exit_stat =
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
+ else {
+ exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
+ msm_pm_sleep_limit = sleep_limit;
+ bitmap_copy(msm_pm_clocks_no_tcxo_shutdown,
+ clk_ids, NR_CLKS);
+ }
+ break;
+ case MSM_PM_SLEEP_MODE_APPS_SLEEP:
+ if (ret)
+ exit_stat = MSM_PM_STAT_IDLE_FAILED_SLEEP;
+ else
+ exit_stat = MSM_PM_STAT_IDLE_SLEEP;
+ break;
+ default:
+ exit_stat = MSM_PM_STAT_IDLE_WFI;
+ }
+#endif
+ }
+abort_idle:
+ msm_timer_exit_idle(low_power);
+#ifdef CONFIG_MSM_IDLE_STATS
+ t2 = ktime_to_ns(ktime_get());
+ msm_pm_add_stat(exit_stat, t2 - t1);
+#endif
+}
+
+static int msm_pm_enter(suspend_state_t state)
+{
+ uint32_t sleep_limit;
+ int ret;
+#ifdef CONFIG_MSM_IDLE_STATS
+ DECLARE_BITMAP(clk_ids, NR_CLKS);
+ int64_t period = 0;
+ int64_t time = 0;
+
+ time = msm_timer_get_sclk_time(&period);
+ ret = msm_clock_require_tcxo(clk_ids, NR_CLKS);
+#elif defined(CONFIG_CLOCK_BASED_SLEEP_LIMIT)
+ ret = msm_clock_require_tcxo(NULL, 0);
+#endif /* CONFIG_MSM_IDLE_STATS */
+
+#ifdef CONFIG_CLOCK_BASED_SLEEP_LIMIT
+ sleep_limit = ret ? SLEEP_LIMIT_NO_TCXO_SHUTDOWN : SLEEP_LIMIT_NONE;
+#else
+ sleep_limit = SLEEP_LIMIT_NONE;
+#endif
+
+#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
+ if (msm_pm_sleep_time_override > 0) {
+ int64_t ns = NSEC_PER_SEC * (int64_t)msm_pm_sleep_time_override;
+ msm_pm_set_max_sleep_time(ns);
+ msm_pm_sleep_time_override = 0;
+ }
+#endif
+
+ ret = msm_sleep(msm_pm_sleep_mode,
+ msm_pm_max_sleep_time, sleep_limit, 0);
+
+#ifdef CONFIG_MSM_IDLE_STATS
+ if (msm_pm_sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND ||
+ msm_pm_sleep_mode == MSM_PM_SLEEP_MODE_POWER_COLLAPSE) {
+ enum msm_pm_time_stats_id id;
+ int64_t end_time;
+
+ if (ret)
+ id = MSM_PM_STAT_FAILED_SUSPEND;
+ else {
+ id = MSM_PM_STAT_SUSPEND;
+ msm_pm_sleep_limit = sleep_limit;
+ bitmap_copy(msm_pm_clocks_no_tcxo_shutdown, clk_ids,
+ NR_CLKS);
+ }
+
+ if (time != 0) {
+ end_time = msm_timer_get_sclk_time(NULL);
+ if (end_time != 0) {
+ time = end_time - time;
+ if (time < 0)
+ time += period;
+ } else
+ time = 0;
+ }
+
+ msm_pm_add_stat(id, time);
+ }
+#endif
+
+ return 0;
+}
+
+static struct platform_suspend_ops msm_pm_ops = {
+ .enter = msm_pm_enter,
+ .valid = suspend_valid_only_mem,
+};
+
+static uint32_t restart_reason = 0x776655AA;
+
+static void msm_pm_power_off(void)
+{
+ msm_proc_comm(PCOM_POWER_DOWN, 0, 0);
+ for (;;) ;
+}
+
+static void msm_pm_restart(char str, const char *cmd)
+{
+ msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0);
+
+ for (;;) ;
+}
+
+static int msm_reboot_call(struct notifier_block *this, unsigned long code, void *_cmd)
+{
+ if((code == SYS_RESTART) && _cmd) {
+ char *cmd = _cmd;
+ if (!strcmp(cmd, "bootloader")) {
+ restart_reason = 0x77665500;
+ } else if (!strcmp(cmd, "recovery")) {
+ restart_reason = 0x77665502;
+ } else if (!strcmp(cmd, "eraseflash")) {
+ restart_reason = 0x776655EF;
+ } else if (!strncmp(cmd, "oem-", 4)) {
+ unsigned code = simple_strtoul(cmd + 4, 0, 16) & 0xff;
+ restart_reason = 0x6f656d00 | code;
+ } else {
+ restart_reason = 0x77665501;
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block msm_reboot_notifier =
+{
+ .notifier_call = msm_reboot_call,
+};
+
+#ifdef CONFIG_MSM_IDLE_STATS
+/*
+ * Helper function of snprintf where buf is auto-incremented, size is auto-
+ * decremented, and there is no return value.
+ *
+ * NOTE: buf and size must be l-values (e.g. variables)
+ */
+#define SNPRINTF(buf, size, format, ...) \
+ do { \
+ if (size > 0) { \
+ int ret; \
+ ret = snprintf(buf, size, format, ## __VA_ARGS__); \
+ if (ret > size) { \
+ buf += size; \
+ size = 0; \
+ } else { \
+ buf += ret; \
+ size -= ret; \
+ } \
+ } \
+ } while (0)
+
+/*
+ * Write out the power management statistics.
+ */
+static int msm_pm_read_proc(
+ char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int i;
+ char *p = page;
+ char clk_name[16];
+
+ if (count < 1024) {
+ *start = (char *) 0;
+ *eof = 0;
+ return 0;
+ }
+
+ if (!off) {
+ SNPRINTF(p, count, "Clocks against last TCXO shutdown:\n");
+ for_each_bit(i, msm_pm_clocks_no_tcxo_shutdown, NR_CLKS) {
+ clk_name[0] = '\0';
+ msm_clock_get_name(i, clk_name, sizeof(clk_name));
+ SNPRINTF(p, count, " %s (id=%d)\n", clk_name, i);
+ }
+
+ SNPRINTF(p, count, "Last power collapse voted ");
+ if (msm_pm_sleep_limit == SLEEP_LIMIT_NONE)
+ SNPRINTF(p, count, "for TCXO shutdown\n\n");
+ else
+ SNPRINTF(p, count, "against TCXO shutdown\n\n");
+
+ *start = (char *) 1;
+ *eof = 0;
+ } else if (--off < ARRAY_SIZE(msm_pm_stats)) {
+ int64_t bucket_time;
+ int64_t s;
+ uint32_t ns;
+
+ s = msm_pm_stats[off].total_time;
+ ns = do_div(s, NSEC_PER_SEC);
+ SNPRINTF(p, count,
+ "%s:\n"
+ " count: %7d\n"
+ " total_time: %lld.%09u\n",
+ msm_pm_stats[off].name,
+ msm_pm_stats[off].count,
+ s, ns);
+
+ bucket_time = msm_pm_stats[off].first_bucket_time;
+ for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
+ s = bucket_time;
+ ns = do_div(s, NSEC_PER_SEC);
+ SNPRINTF(p, count,
+ " <%6lld.%09u: %7d (%lld-%lld)\n",
+ s, ns, msm_pm_stats[off].bucket[i],
+ msm_pm_stats[off].min_time[i],
+ msm_pm_stats[off].max_time[i]);
+
+ bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
+ }
+
+ SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
+ s, ns, msm_pm_stats[off].bucket[i],
+ msm_pm_stats[off].min_time[i],
+ msm_pm_stats[off].max_time[i]);
+
+ *start = (char *) 1;
+ *eof = (off + 1 >= ARRAY_SIZE(msm_pm_stats));
+ }
+
+ return p - page;
+}
+#undef SNPRINTF
+
+#define MSM_PM_STATS_RESET "reset"
+
+/*
+ * Reset the power management statistics values.
+ */
+static int msm_pm_write_proc(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char buf[sizeof(MSM_PM_STATS_RESET)];
+ int ret;
+ unsigned long flags;
+ int i;
+
+ if (count < strlen(MSM_PM_STATS_RESET)) {
+ ret = -EINVAL;
+ goto write_proc_failed;
+ }
+
+ if (copy_from_user(buf, buffer, strlen(MSM_PM_STATS_RESET))) {
+ ret = -EFAULT;
+ goto write_proc_failed;
+ }
+
+ if (memcmp(buf, MSM_PM_STATS_RESET, strlen(MSM_PM_STATS_RESET))) {
+ ret = -EINVAL;
+ goto write_proc_failed;
+ }
+
+ local_irq_save(flags);
+ for (i = 0; i < ARRAY_SIZE(msm_pm_stats); i++) {
+ memset(msm_pm_stats[i].bucket,
+ 0, sizeof(msm_pm_stats[i].bucket));
+ memset(msm_pm_stats[i].min_time,
+ 0, sizeof(msm_pm_stats[i].min_time));
+ memset(msm_pm_stats[i].max_time,
+ 0, sizeof(msm_pm_stats[i].max_time));
+ msm_pm_stats[i].count = 0;
+ msm_pm_stats[i].total_time = 0;
+ }
+
+ msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
+ bitmap_zero(msm_pm_clocks_no_tcxo_shutdown, NR_CLKS);
+ local_irq_restore(flags);
+
+ return count;
+
+write_proc_failed:
+ return ret;
+}
+#undef MSM_PM_STATS_RESET
+#endif /* CONFIG_MSM_IDLE_STATS */
+
+static int __init msm_pm_init(void)
+{
+#ifdef CONFIG_MSM_IDLE_STATS
+ struct proc_dir_entry *d_entry;
+#endif
+
+ pm_power_off = msm_pm_power_off;
+ arm_pm_restart = msm_pm_restart;
+ msm_pm_max_sleep_time = 0;
+
+ register_reboot_notifier(&msm_reboot_notifier);
+
+ msm_pm_sma.sleep_delay = smem_alloc(SMEM_SMSM_SLEEP_DELAY,
+ sizeof(*msm_pm_sma.sleep_delay));
+ if (msm_pm_sma.sleep_delay == NULL) {
+ printk(KERN_ERR "msm_pm_init: failed get SLEEP_DELAY\n");
+ return -ENODEV;
+ }
+
+ msm_pm_sma.limit_sleep = smem_alloc(SMEM_SMSM_LIMIT_SLEEP,
+ sizeof(*msm_pm_sma.limit_sleep));
+ if (msm_pm_sma.limit_sleep == NULL) {
+ printk(KERN_ERR "msm_pm_init: failed get LIMIT_SLEEP\n");
+ return -ENODEV;
+ }
+
+ msm_pm_sma.int_info_ext = smem_alloc(SMEM_SMSM_INT_INFO,
+ sizeof(*msm_pm_sma.int_info_ext));
+
+ if (msm_pm_sma.int_info_ext)
+ msm_pm_sma.int_info = (struct smsm_interrupt_info *)
+ msm_pm_sma.int_info_ext;
+ else
+ msm_pm_sma.int_info = smem_alloc(SMEM_SMSM_INT_INFO,
+ sizeof(*msm_pm_sma.int_info));
+
+ if (msm_pm_sma.int_info == NULL) {
+ printk(KERN_ERR "msm_pm_init: failed get INT_INFO\n");
+ return -ENODEV;
+ }
+
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+ /* The bootloader is responsible for initializing many of Scorpion's
+ * coprocessor registers for things like cache timing. The state of
+ * these coprocessor registers is lost on reset, so part of the
+ * bootloader must be re-executed. Do not overwrite the reset vector
+ * or bootloader area.
+ */
+ msm_pm_reset_vector = PAGE_OFFSET;
+#else
+ msm_pm_reset_vector = ioremap(0, PAGE_SIZE);
+ if (msm_pm_reset_vector == NULL) {
+ printk(KERN_ERR "msm_pm_init: failed to map reset vector\n");
+ return -ENODEV;
+ }
+#endif /* CONFIG_ARCH_MSM_SCORPION */
+
+ BUG_ON(msm_pm_modes == NULL);
+
+ atomic_set(&msm_pm_init_done, 1);
+ suspend_set_ops(&msm_pm_ops);
+
+#ifdef CONFIG_MSM_IDLE_STATS
+ d_entry = create_proc_entry("msm_pm_stats",
+ S_IRUGO | S_IWUSR | S_IWGRP, NULL);
+ if (d_entry) {
+ d_entry->read_proc = msm_pm_read_proc;
+ d_entry->write_proc = msm_pm_write_proc;
+ d_entry->data = NULL;
+ }
+#endif
+
+ return 0;
+}
+
+void __init msm_pm_set_platform_data(struct msm_pm_platform_data *data)
+{
+ msm_pm_modes = data;
+}
+
+late_initcall(msm_pm_init);
diff --git a/arch/arm/mach-msm/pm.h b/arch/arm/mach-msm/pm.h
new file mode 100644
index 000000000000..b6e60e9683ce
--- /dev/null
+++ b/arch/arm/mach-msm/pm.h
@@ -0,0 +1,42 @@
+/* arch/arm/mach-msm/pm.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_PM_H
+#define __ARCH_ARM_MACH_MSM_PM_H
+
+enum {
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE,
+ MSM_PM_SLEEP_MODE_APPS_SLEEP,
+ MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT,
+ MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT,
+ MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN,
+ MSM_PM_SLEEP_MODE_NR
+};
+
+struct msm_pm_platform_data {
+ u8 supported;
+ u8 suspend_enabled; /* enabled for suspend */
+ u8 idle_enabled; /* enabled for idle low power */
+ u32 latency; /* interrupt latency in microseconds when entering
+ and exiting the low power mode */
+ u32 residency; /* time threshold in microseconds beyond which
+ staying in the low power mode saves power */
+};
+
+void msm_pm_set_platform_data(struct msm_pm_platform_data *data);
+#endif
diff --git a/arch/arm/mach-msm/pm2.c b/arch/arm/mach-msm/pm2.c
new file mode 100644
index 000000000000..dd5d690b76a5
--- /dev/null
+++ b/arch/arm/mach-msm/pm2.c
@@ -0,0 +1,1666 @@
+/* arch/arm/mach-msm/pm2.c
+ *
+ * MSM Power Management Routines
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <linux/pm_qos_params.h>
+#include <linux/proc_fs.h>
+#include <linux/suspend.h>
+#include <linux/reboot.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <mach/msm_iomap.h>
+#include <mach/system.h>
+#ifdef CONFIG_CACHE_L2X0
+#include <asm/hardware/cache-l2x0.h>
+#endif
+#ifdef CONFIG_VFP
+#include <asm/vfp.h>
+#endif
+
+#include "smd_private.h"
+#include "acpuclock.h"
+#include "clock.h"
+#include "proc_comm.h"
+#include "idle.h"
+#include "irq.h"
+#include "gpio.h"
+#include "timer.h"
+#include "pm.h"
+
+/******************************************************************************
+ * Debug Definitions
+ *****************************************************************************/
+
+enum {
+ MSM_PM_DEBUG_SUSPEND = 1U << 0,
+ MSM_PM_DEBUG_POWER_COLLAPSE = 1U << 1,
+ MSM_PM_DEBUG_STATE = 1U << 2,
+ MSM_PM_DEBUG_CLOCK = 1U << 3,
+ MSM_PM_DEBUG_RESET_VECTOR = 1U << 4,
+ MSM_PM_DEBUG_SMSM_STATE = 1U << 5,
+ MSM_PM_DEBUG_IDLE = 1U << 6,
+};
+
+static int msm_pm_debug_mask;
+module_param_named(
+ debug_mask, msm_pm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+#define MSM_PM_DPRINTK(mask, level, message, ...) \
+ do { \
+ if ((mask) & msm_pm_debug_mask) \
+ printk(level message, ## __VA_ARGS__); \
+ } while (0)
+
+#define MSM_PM_DEBUG_PRINT_STATE(tag) \
+ do { \
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_STATE, \
+ KERN_INFO, "%s: " \
+ "APPS_CLK_SLEEP_EN %x, APPS_PWRDOWN %x, " \
+ "SMSM_POWER_MASTER_DEM %x, SMSM_MODEM_STATE %x, " \
+ "SMSM_APPS_DEM %x\n", \
+ tag, \
+ readl(APPS_CLK_SLEEP_EN), readl(APPS_PWRDOWN), \
+ smsm_get_state(SMSM_POWER_MASTER_DEM), \
+ smsm_get_state(SMSM_MODEM_STATE), \
+ smsm_get_state(SMSM_APPS_DEM)); \
+ } while (0)
+
+#define MSM_PM_DEBUG_PRINT_SLEEP_INFO() \
+ do { \
+ if (msm_pm_debug_mask & MSM_PM_DEBUG_SMSM_STATE) \
+ smsm_print_sleep_info(msm_pm_smem_data->sleep_time, \
+ msm_pm_smem_data->resources_used, \
+ msm_pm_smem_data->irq_mask, \
+ msm_pm_smem_data->wakeup_reason, \
+ msm_pm_smem_data->pending_irqs); \
+ } while (0)
+
+
+/******************************************************************************
+ * Sleep Modes and Parameters
+ *****************************************************************************/
+
+static int msm_pm_sleep_mode = CONFIG_MSM7X00A_SLEEP_MODE;
+module_param_named(
+ sleep_mode, msm_pm_sleep_mode,
+ int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+static int msm_pm_idle_sleep_mode = CONFIG_MSM7X00A_IDLE_SLEEP_MODE;
+module_param_named(
+ idle_sleep_mode, msm_pm_idle_sleep_mode,
+ int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+static int msm_pm_idle_sleep_min_time = CONFIG_MSM7X00A_IDLE_SLEEP_MIN_TIME;
+module_param_named(
+ idle_sleep_min_time, msm_pm_idle_sleep_min_time,
+ int, S_IRUGO | S_IWUSR | S_IWGRP
+);
+
+#define MSM_PM_MODE_ATTR_SUSPEND_ENABLED "suspend_enabled"
+#define MSM_PM_MODE_ATTR_IDLE_ENABLED "idle_enabled"
+#define MSM_PM_MODE_ATTR_LATENCY "latency"
+#define MSM_PM_MODE_ATTR_RESIDENCY "residency"
+#define MSM_PM_MODE_ATTR_NR (4)
+
+static char *msm_pm_sleep_mode_labels[MSM_PM_SLEEP_MODE_NR] = {
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND] = " ",
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = "power_collapse",
+ [MSM_PM_SLEEP_MODE_APPS_SLEEP] = "apps_sleep",
+ [MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT] =
+ "ramp_down_and_wfi",
+ [MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT] = "wfi",
+ [MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] =
+ "power_collapse_no_xo_shutdown",
+};
+
+static struct msm_pm_platform_data *msm_pm_modes;
+
+static struct kobject *msm_pm_mode_kobjs[MSM_PM_SLEEP_MODE_NR];
+static struct attribute_group *msm_pm_mode_attr_group[MSM_PM_SLEEP_MODE_NR];
+static struct attribute **msm_pm_mode_attrs[MSM_PM_SLEEP_MODE_NR];
+static struct kobj_attribute *msm_pm_mode_kobj_attrs[MSM_PM_SLEEP_MODE_NR];
+
+/*
+ * Write out the attribute.
+ */
+static ssize_t msm_pm_mode_attr_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
+ struct kernel_param kp;
+
+ if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
+ continue;
+
+ if (!strcmp(attr->attr.name,
+ MSM_PM_MODE_ATTR_SUSPEND_ENABLED)) {
+ u32 arg = msm_pm_modes[i].suspend_enabled;
+ kp.arg = &arg;
+ ret = param_get_ulong(buf, &kp);
+ } else if (!strcmp(attr->attr.name,
+ MSM_PM_MODE_ATTR_IDLE_ENABLED)) {
+ u32 arg = msm_pm_modes[i].idle_enabled;
+ kp.arg = &arg;
+ ret = param_get_ulong(buf, &kp);
+ } else if (!strcmp(attr->attr.name,
+ MSM_PM_MODE_ATTR_LATENCY)) {
+ kp.arg = &msm_pm_modes[i].latency;
+ ret = param_get_ulong(buf, &kp);
+ } else if (!strcmp(attr->attr.name,
+ MSM_PM_MODE_ATTR_RESIDENCY)) {
+ kp.arg = &msm_pm_modes[i].residency;
+ ret = param_get_ulong(buf, &kp);
+ }
+
+ break;
+ }
+
+ if (ret > 0) {
+ strcat(buf, "\n");
+ ret++;
+ }
+
+ return ret;
+}
+
+/*
+ * Read in the new attribute value.
+ */
+static ssize_t msm_pm_mode_attr_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ int ret = -EINVAL;
+ int i;
+
+ for (i = 0; i < MSM_PM_SLEEP_MODE_NR; i++) {
+ struct kernel_param kp;
+
+ if (strcmp(kobj->name, msm_pm_sleep_mode_labels[i]))
+ continue;
+
+ if (!strcmp(attr->attr.name,
+ MSM_PM_MODE_ATTR_SUSPEND_ENABLED)) {
+ kp.arg = &msm_pm_modes[i].suspend_enabled;
+ ret = param_set_byte(buf, &kp);
+ } else if (!strcmp(attr->attr.name,
+ MSM_PM_MODE_ATTR_IDLE_ENABLED)) {
+ kp.arg = &msm_pm_modes[i].idle_enabled;
+ ret = param_set_byte(buf, &kp);
+ } else if (!strcmp(attr->attr.name,
+ MSM_PM_MODE_ATTR_LATENCY)) {
+ kp.arg = &msm_pm_modes[i].latency;
+ ret = param_set_ulong(buf, &kp);
+ } else if (!strcmp(attr->attr.name,
+ MSM_PM_MODE_ATTR_RESIDENCY)) {
+ kp.arg = &msm_pm_modes[i].residency;
+ ret = param_set_ulong(buf, &kp);
+ }
+
+ break;
+ }
+
+ return ret ? ret : count;
+}
+
+/*
+ * Add sysfs entries for the sleep modes.
+ */
+static int __init msm_pm_mode_sysfs_add(void)
+{
+ struct kobject *module_kobj = NULL;
+ struct kobject *modes_kobj = NULL;
+
+ struct kobject *kobj;
+ struct attribute_group *attr_group;
+ struct attribute **attrs;
+ struct kobj_attribute *kobj_attrs;
+
+ int i, k;
+ int ret;
+
+ module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+ if (!module_kobj) {
+ printk(KERN_ERR "%s: cannot find kobject for module %s\n",
+ __func__, KBUILD_MODNAME);
+ ret = -ENOENT;
+ goto mode_sysfs_add_cleanup;
+ }
+
+ modes_kobj = kobject_create_and_add("modes", module_kobj);
+ if (!modes_kobj) {
+ printk(KERN_ERR "%s: cannot create modes kobject\n", __func__);
+ ret = -ENOMEM;
+ goto mode_sysfs_add_cleanup;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(msm_pm_mode_kobjs); i++) {
+ if (!msm_pm_modes[i].supported)
+ continue;
+
+ kobj = kobject_create_and_add(
+ msm_pm_sleep_mode_labels[i], modes_kobj);
+ attr_group = kzalloc(sizeof(*attr_group), GFP_KERNEL);
+ attrs = kzalloc(sizeof(*attrs) * (MSM_PM_MODE_ATTR_NR + 1),
+ GFP_KERNEL);
+ kobj_attrs = kzalloc(sizeof(*kobj_attrs) * MSM_PM_MODE_ATTR_NR,
+ GFP_KERNEL);
+
+ if (!kobj || !attr_group || !attrs || !kobj_attrs) {
+ printk(KERN_ERR
+ "%s: cannot create kobject or attributes\n",
+ __func__);
+ ret = -ENOMEM;
+ goto mode_sysfs_add_abort;
+ }
+
+ kobj_attrs[0].attr.name = MSM_PM_MODE_ATTR_SUSPEND_ENABLED;
+ kobj_attrs[1].attr.name = MSM_PM_MODE_ATTR_IDLE_ENABLED;
+ kobj_attrs[2].attr.name = MSM_PM_MODE_ATTR_LATENCY;
+ kobj_attrs[3].attr.name = MSM_PM_MODE_ATTR_RESIDENCY;
+
+ for (k = 0; k < MSM_PM_MODE_ATTR_NR; k++) {
+ kobj_attrs[k].attr.mode = 0644;
+ kobj_attrs[k].show = msm_pm_mode_attr_show;
+ kobj_attrs[k].store = msm_pm_mode_attr_store;
+
+ attrs[k] = &kobj_attrs[k].attr;
+ }
+ attrs[MSM_PM_MODE_ATTR_NR] = NULL;
+
+ attr_group->attrs = attrs;
+ ret = sysfs_create_group(kobj, attr_group);
+ if (ret) {
+ printk(KERN_ERR
+ "%s: cannot create kobject attribute group\n",
+ __func__);
+ goto mode_sysfs_add_abort;
+ }
+
+ msm_pm_mode_kobjs[i] = kobj;
+ msm_pm_mode_attr_group[i] = attr_group;
+ msm_pm_mode_attrs[i] = attrs;
+ msm_pm_mode_kobj_attrs[i] = kobj_attrs;
+ }
+
+ return 0;
+
+mode_sysfs_add_abort:
+ kfree(kobj_attrs);
+ kfree(attrs);
+ kfree(attr_group);
+ kobject_put(kobj);
+
+mode_sysfs_add_cleanup:
+ for (i = ARRAY_SIZE(msm_pm_mode_kobjs) - 1; i >= 0; i--) {
+ if (!msm_pm_mode_kobjs[i])
+ continue;
+
+ sysfs_remove_group(
+ msm_pm_mode_kobjs[i], msm_pm_mode_attr_group[i]);
+
+ kfree(msm_pm_mode_kobj_attrs[i]);
+ kfree(msm_pm_mode_attrs[i]);
+ kfree(msm_pm_mode_attr_group[i]);
+ kobject_put(msm_pm_mode_kobjs[i]);
+ }
+
+ return ret;
+}
+
+void __init msm_pm_set_platform_data(struct msm_pm_platform_data *data)
+{
+ msm_pm_modes = data;
+}
+
+
+/******************************************************************************
+ * Sleep Limitations
+ *****************************************************************************/
+enum {
+ SLEEP_LIMIT_NONE = 0,
+ SLEEP_LIMIT_NO_TCXO_SHUTDOWN = 2
+};
+
+
+/******************************************************************************
+ * Configure Hardware for Power Down/Up
+ *****************************************************************************/
+
+#define APPS_CLK_SLEEP_EN (MSM_CSR_BASE + 0x11c)
+#define APPS_PWRDOWN (MSM_CSR_BASE + 0x440)
+#define APPS_STANDBY_CTL (MSM_CSR_BASE + 0x108)
+
+/*
+ * Configure hardware registers in preparation for Apps power down.
+ */
+static void msm_pm_config_hw_before_power_down(void)
+{
+ writel(0x1f, APPS_CLK_SLEEP_EN);
+ writel(1, APPS_PWRDOWN);
+ writel(0, APPS_STANDBY_CTL);
+}
+
+/*
+ * Clear hardware registers after Apps powers up.
+ */
+static void msm_pm_config_hw_after_power_up(void)
+{
+ writel(0, APPS_PWRDOWN);
+ writel(0, APPS_CLK_SLEEP_EN);
+}
+
+/*
+ * Configure hardware registers in preparation for SWFI.
+ */
+static void msm_pm_config_hw_before_swfi(void)
+{
+#ifdef CONFIG_ARCH_MSM_SCORPION
+ writel(0x1f, APPS_CLK_SLEEP_EN);
+#else
+ writel(0x0f, APPS_CLK_SLEEP_EN);
+#endif
+}
+
+/*
+ * Respond to timing out waiting for Modem
+ *
+ * NOTE: The function never returns.
+ */
+static void msm_pm_timeout(void)
+{
+#if defined(CONFIG_MSM_PM_TIMEOUT_RESET_CHIP)
+ printk(KERN_EMERG "%s(): resetting chip\n", __func__);
+ msm_proc_comm(PCOM_RESET_CHIP_IMM, NULL, NULL);
+#elif defined(CONFIG_MSM_PM_TIMEOUT_RESET_MODEM)
+ printk(KERN_EMERG "%s(): resetting modem\n", __func__);
+ msm_proc_comm_reset_modem_now();
+#elif defined(CONFIG_MSM_PM_TIMEOUT_HALT)
+ printk(KERN_EMERG "%s(): halting\n", __func__);
+#endif
+ for (;;)
+ ;
+}
+
+
+/******************************************************************************
+ * State Polling Definitions
+ *****************************************************************************/
+
+struct msm_pm_polled_group {
+ uint32_t group_id;
+
+ uint32_t bits_all_set;
+ uint32_t bits_all_clear;
+ uint32_t bits_any_set;
+ uint32_t bits_any_clear;
+
+ uint32_t value_read;
+};
+
+/*
+ * Return true if all bits indicated by flag are set in source.
+ */
+static inline bool msm_pm_all_set(uint32_t source, uint32_t flag)
+{
+ return (source & flag) == flag;
+}
+
+/*
+ * Return true if any bit indicated by flag are set in source.
+ */
+static inline bool msm_pm_any_set(uint32_t source, uint32_t flag)
+{
+ return !flag || (source & flag);
+}
+
+/*
+ * Return true if all bits indicated by flag are cleared in source.
+ */
+static inline bool msm_pm_all_clear(uint32_t source, uint32_t flag)
+{
+ return (~source & flag) == flag;
+}
+
+/*
+ * Return true if any bit indicated by flag are cleared in source.
+ */
+static inline bool msm_pm_any_clear(uint32_t source, uint32_t flag)
+{
+ return !flag || (~source & flag);
+}
+
+/*
+ * Poll the shared memory states as indicated by the poll groups.
+ *
+ * nr_grps: number of groups in the array
+ * grps: array of groups
+ *
+ * The function returns when conditions specified by any of the poll
+ * groups become true. The conditions specified by a poll group are
+ * deemed true when 1) at least one bit from bits_any_set is set OR one
+ * bit from bits_any_clear is cleared; and 2) all bits in bits_all_set
+ * are set; and 3) all bits in bits_all_clear are cleared.
+ *
+ * Return value:
+ * >=0: index of the poll group whose conditions have become true
+ * -ETIMEDOUT: timed out
+ */
+static int msm_pm_poll_state(int nr_grps, struct msm_pm_polled_group *grps)
+{
+ int i, k;
+
+ for (i = 0; i < 500000; i++)
+ for (k = 0; k < nr_grps; k++) {
+ bool all_set, all_clear;
+ bool any_set, any_clear;
+
+ grps[k].value_read = smsm_get_state(grps[k].group_id);
+
+ all_set = msm_pm_all_set(grps[k].value_read,
+ grps[k].bits_all_set);
+ all_clear = msm_pm_all_clear(grps[k].value_read,
+ grps[k].bits_all_clear);
+ any_set = msm_pm_any_set(grps[k].value_read,
+ grps[k].bits_any_set);
+ any_clear = msm_pm_any_clear(grps[k].value_read,
+ grps[k].bits_any_clear);
+
+ if (all_set && all_clear && (any_set || any_clear))
+ return k;
+ }
+
+ printk(KERN_ERR "%s failed:\n", __func__);
+ for (k = 0; k < nr_grps; k++)
+ printk(KERN_ERR "(%x, %x, %x, %x) %x\n",
+ grps[k].bits_all_set, grps[k].bits_all_clear,
+ grps[k].bits_any_set, grps[k].bits_any_clear,
+ grps[k].value_read);
+
+ return -ETIMEDOUT;
+}
+
+
+/******************************************************************************
+ * Suspend Max Sleep Time
+ *****************************************************************************/
+
+#define SCLK_HZ (32768)
+#define MSM_PM_SLEEP_TICK_LIMIT (0x6DDD000)
+
+#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
+static int msm_pm_sleep_time_override;
+module_param_named(sleep_time_override,
+ msm_pm_sleep_time_override, int, S_IRUGO | S_IWUSR | S_IWGRP);
+#endif
+
+static uint32_t msm_pm_max_sleep_time;
+
+/*
+ * Convert time from nanoseconds to slow clock ticks, then cap it to the
+ * specified limit
+ */
+static int64_t msm_pm_convert_and_cap_time(int64_t time_ns, int64_t limit)
+{
+ do_div(time_ns, NSEC_PER_SEC / SCLK_HZ);
+ return (time_ns > limit) ? limit : time_ns;
+}
+
+/*
+ * Set the sleep time for suspend. 0 means infinite sleep time.
+ */
+void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (max_sleep_time_ns == 0) {
+ msm_pm_max_sleep_time = 0;
+ } else {
+ msm_pm_max_sleep_time = (uint32_t)msm_pm_convert_and_cap_time(
+ max_sleep_time_ns, MSM_PM_SLEEP_TICK_LIMIT);
+
+ if (msm_pm_max_sleep_time == 0)
+ msm_pm_max_sleep_time = 1;
+ }
+
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
+ "%s(): Requested %lld ns Giving %u sclk ticks\n", __func__,
+ max_sleep_time_ns, msm_pm_max_sleep_time);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
+
+
+/******************************************************************************
+ * CONFIG_MSM_IDLE_STATS
+ *****************************************************************************/
+
+#ifdef CONFIG_MSM_IDLE_STATS
+enum msm_pm_time_stats_id {
+ MSM_PM_STAT_REQUESTED_IDLE,
+ MSM_PM_STAT_IDLE_SPIN,
+ MSM_PM_STAT_IDLE_WFI,
+ MSM_PM_STAT_IDLE_SLEEP,
+ MSM_PM_STAT_IDLE_FAILED_SLEEP,
+ MSM_PM_STAT_IDLE_POWER_COLLAPSE,
+ MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE,
+ MSM_PM_STAT_SUSPEND,
+ MSM_PM_STAT_FAILED_SUSPEND,
+ MSM_PM_STAT_NOT_IDLE,
+ MSM_PM_STAT_COUNT
+};
+
+static struct msm_pm_time_stats {
+ const char *name;
+ int64_t first_bucket_time;
+ int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int count;
+ int64_t total_time;
+} msm_pm_stats[MSM_PM_STAT_COUNT] = {
+ [MSM_PM_STAT_REQUESTED_IDLE].name = "idle-request",
+ [MSM_PM_STAT_REQUESTED_IDLE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_SPIN].name = "idle-spin",
+ [MSM_PM_STAT_IDLE_SPIN].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_WFI].name = "idle-wfi",
+ [MSM_PM_STAT_IDLE_WFI].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_SLEEP].name = "idle-sleep",
+ [MSM_PM_STAT_IDLE_SLEEP].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_FAILED_SLEEP].name = "idle-failed-sleep",
+ [MSM_PM_STAT_IDLE_FAILED_SLEEP].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_POWER_COLLAPSE].name = "idle-power-collapse",
+ [MSM_PM_STAT_IDLE_POWER_COLLAPSE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].name =
+ "idle-failed-power-collapse",
+ [MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_SUSPEND].name = "suspend",
+ [MSM_PM_STAT_SUSPEND].first_bucket_time =
+ CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_FAILED_SUSPEND].name = "failed-suspend",
+ [MSM_PM_STAT_FAILED_SUSPEND].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+
+ [MSM_PM_STAT_NOT_IDLE].name = "not-idle",
+ [MSM_PM_STAT_NOT_IDLE].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET,
+};
+
+static uint32_t msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
+static DECLARE_BITMAP(msm_pm_clocks_no_tcxo_shutdown, NR_CLKS);
+
+/*
+ * Add the given time data to the statistics collection.
+ */
+static void msm_pm_add_stat(enum msm_pm_time_stats_id id, int64_t t)
+{
+ int i;
+ int64_t bt;
+
+ msm_pm_stats[id].total_time += t;
+ msm_pm_stats[id].count++;
+
+ bt = t;
+ do_div(bt, msm_pm_stats[id].first_bucket_time);
+
+ if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
+ (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
+ i = DIV_ROUND_UP(fls((uint32_t)bt),
+ CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
+ else
+ i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+
+ msm_pm_stats[id].bucket[i]++;
+
+ if (t < msm_pm_stats[id].min_time[i] || !msm_pm_stats[id].max_time[i])
+ msm_pm_stats[id].min_time[i] = t;
+ if (t > msm_pm_stats[id].max_time[i])
+ msm_pm_stats[id].max_time[i] = t;
+}
+
+/*
+ * Helper function of snprintf where buf is auto-incremented, size is auto-
+ * decremented, and there is no return value.
+ *
+ * NOTE: buf and size must be l-values (e.g. variables)
+ */
+#define SNPRINTF(buf, size, format, ...) \
+ do { \
+ if (size > 0) { \
+ int ret; \
+ ret = snprintf(buf, size, format, ## __VA_ARGS__); \
+ if (ret > size) { \
+ buf += size; \
+ size = 0; \
+ } else { \
+ buf += ret; \
+ size -= ret; \
+ } \
+ } \
+ } while (0)
+
+/*
+ * Write out the power management statistics.
+ */
+static int msm_pm_read_proc
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int i;
+ char *p = page;
+ char clk_name[16];
+
+ if (count < 1024) {
+ *start = (char *) 0;
+ *eof = 0;
+ return 0;
+ }
+
+ if (!off) {
+ SNPRINTF(p, count, "Clocks against last TCXO shutdown:\n");
+ for_each_bit(i, msm_pm_clocks_no_tcxo_shutdown, NR_CLKS) {
+ clk_name[0] = '\0';
+ msm_clock_get_name(i, clk_name, sizeof(clk_name));
+ SNPRINTF(p, count, " %s (id=%d)\n", clk_name, i);
+ }
+
+ SNPRINTF(p, count, "Last power collapse voted ");
+ if (msm_pm_sleep_limit == SLEEP_LIMIT_NONE)
+ SNPRINTF(p, count, "for TCXO shutdown\n\n");
+ else
+ SNPRINTF(p, count, "against TCXO shutdown\n\n");
+
+ *start = (char *) 1;
+ *eof = 0;
+ } else if (--off < ARRAY_SIZE(msm_pm_stats)) {
+ int64_t bucket_time;
+ int64_t s;
+ uint32_t ns;
+
+ s = msm_pm_stats[off].total_time;
+ ns = do_div(s, NSEC_PER_SEC);
+ SNPRINTF(p, count,
+ "%s:\n"
+ " count: %7d\n"
+ " total_time: %lld.%09u\n",
+ msm_pm_stats[off].name,
+ msm_pm_stats[off].count,
+ s, ns);
+
+ bucket_time = msm_pm_stats[off].first_bucket_time;
+ for (i = 0; i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1; i++) {
+ s = bucket_time;
+ ns = do_div(s, NSEC_PER_SEC);
+ SNPRINTF(p, count,
+ " <%6lld.%09u: %7d (%lld-%lld)\n",
+ s, ns, msm_pm_stats[off].bucket[i],
+ msm_pm_stats[off].min_time[i],
+ msm_pm_stats[off].max_time[i]);
+
+ bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
+ }
+
+ SNPRINTF(p, count, " >=%6lld.%09u: %7d (%lld-%lld)\n",
+ s, ns, msm_pm_stats[off].bucket[i],
+ msm_pm_stats[off].min_time[i],
+ msm_pm_stats[off].max_time[i]);
+
+ *start = (char *) 1;
+ *eof = (off + 1 >= ARRAY_SIZE(msm_pm_stats));
+ }
+
+ return p - page;
+}
+#undef SNPRINTF
+
+#define MSM_PM_STATS_RESET "reset"
+
+/*
+ * Reset the power management statistics values.
+ */
+static int msm_pm_write_proc(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char buf[sizeof(MSM_PM_STATS_RESET)];
+ int ret;
+ unsigned long flags;
+ int i;
+
+ if (count < strlen(MSM_PM_STATS_RESET)) {
+ ret = -EINVAL;
+ goto write_proc_failed;
+ }
+
+ if (copy_from_user(buf, buffer, strlen(MSM_PM_STATS_RESET))) {
+ ret = -EFAULT;
+ goto write_proc_failed;
+ }
+
+ if (memcmp(buf, MSM_PM_STATS_RESET, strlen(MSM_PM_STATS_RESET))) {
+ ret = -EINVAL;
+ goto write_proc_failed;
+ }
+
+ local_irq_save(flags);
+ for (i = 0; i < ARRAY_SIZE(msm_pm_stats); i++) {
+ memset(msm_pm_stats[i].bucket,
+ 0, sizeof(msm_pm_stats[i].bucket));
+ memset(msm_pm_stats[i].min_time,
+ 0, sizeof(msm_pm_stats[i].min_time));
+ memset(msm_pm_stats[i].max_time,
+ 0, sizeof(msm_pm_stats[i].max_time));
+ msm_pm_stats[i].count = 0;
+ msm_pm_stats[i].total_time = 0;
+ }
+
+ msm_pm_sleep_limit = SLEEP_LIMIT_NONE;
+ bitmap_zero(msm_pm_clocks_no_tcxo_shutdown, NR_CLKS);
+ local_irq_restore(flags);
+
+ return count;
+
+write_proc_failed:
+ return ret;
+}
+#undef MSM_PM_STATS_RESET
+#endif /* CONFIG_MSM_IDLE_STATS */
+
+
+/******************************************************************************
+ * Shared Memory Bits
+ *****************************************************************************/
+
+#define DEM_MASTER_BITS_PER_CPU 6
+
+/* Power Master State Bits - Per CPU */
+#define DEM_MASTER_SMSM_RUN \
+ (0x01UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
+#define DEM_MASTER_SMSM_RSA \
+ (0x02UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
+#define DEM_MASTER_SMSM_PWRC_EARLY_EXIT \
+ (0x04UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
+#define DEM_MASTER_SMSM_SLEEP_EXIT \
+ (0x08UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
+#define DEM_MASTER_SMSM_READY \
+ (0x10UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
+#define DEM_MASTER_SMSM_SLEEP \
+ (0x20UL << (DEM_MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
+
+/* Power Slave State Bits */
+#define DEM_SLAVE_SMSM_RUN (0x0001)
+#define DEM_SLAVE_SMSM_PWRC (0x0002)
+#define DEM_SLAVE_SMSM_PWRC_DELAY (0x0004)
+#define DEM_SLAVE_SMSM_PWRC_EARLY_EXIT (0x0008)
+#define DEM_SLAVE_SMSM_WFPI (0x0010)
+#define DEM_SLAVE_SMSM_SLEEP (0x0020)
+#define DEM_SLAVE_SMSM_SLEEP_EXIT (0x0040)
+#define DEM_SLAVE_SMSM_MSGS_REDUCED (0x0080)
+#define DEM_SLAVE_SMSM_RESET (0x0100)
+#define DEM_SLAVE_SMSM_PWRC_SUSPEND (0x0200)
+
+
+/******************************************************************************
+ * Shared Memory Data
+ *****************************************************************************/
+
+#define DEM_MAX_PORT_NAME_LEN (20)
+
+struct msm_pm_smem_t {
+ uint32_t sleep_time;
+ uint32_t irq_mask;
+ uint32_t resources_used;
+ uint32_t reserved1;
+
+ uint32_t wakeup_reason;
+ uint32_t pending_irqs;
+ uint32_t rpc_prog;
+ uint32_t rpc_proc;
+ char smd_port_name[DEM_MAX_PORT_NAME_LEN];
+ uint32_t reserved2;
+};
+
+
+/******************************************************************************
+ *
+ *****************************************************************************/
+static struct msm_pm_smem_t *msm_pm_smem_data;
+static uint32_t *msm_pm_reset_vector;
+static atomic_t msm_pm_init_done = ATOMIC_INIT(0);
+
+/*
+ * Power collapse the Apps processor. This function executes the handshake
+ * protocol with Modem.
+ *
+ * Return value:
+ * -EAGAIN: modem reset occurred or early exit from power collapse
+ * -EBUSY: modem not ready for our power collapse -- no power loss
+ * -ETIMEDOUT: timed out waiting for modem's handshake -- no power loss
+ * 0: success
+ */
+static int msm_pm_power_collapse
+ (bool from_idle, uint32_t sleep_delay, uint32_t sleep_limit)
+{
+ struct msm_pm_polled_group state_grps[2];
+ unsigned long saved_acpuclk_rate;
+ uint32_t saved_vector[2];
+ int collapsed = 0;
+ int ret;
+#ifdef CONFIG_MSM_ADM_OFF_AT_POWER_COLLAPSE
+ unsigned id;
+#endif
+
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
+ KERN_INFO, "%s(): idle %d, delay %u, limit %u\n", __func__,
+ (int)from_idle, sleep_delay, sleep_limit);
+
+ if (!(smsm_get_state(SMSM_POWER_MASTER_DEM) & DEM_MASTER_SMSM_READY)) {
+ MSM_PM_DPRINTK(
+ MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE,
+ KERN_INFO, "%s(): master not ready\n", __func__);
+ ret = -EBUSY;
+ goto power_collapse_bail;
+ }
+
+ memset(msm_pm_smem_data, 0, sizeof(*msm_pm_smem_data));
+
+ msm_irq_enter_sleep1(true, from_idle, &msm_pm_smem_data->irq_mask);
+ msm_gpio_enter_sleep(from_idle);
+
+ msm_pm_smem_data->sleep_time = sleep_delay;
+ msm_pm_smem_data->resources_used = sleep_limit;
+
+ /* Enter PWRC/PWRC_SUSPEND */
+
+ if (from_idle)
+ smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_RUN,
+ DEM_SLAVE_SMSM_PWRC);
+ else
+ smsm_change_state(SMSM_APPS_DEM, DEM_SLAVE_SMSM_RUN,
+ DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND);
+
+ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): PWRC");
+ MSM_PM_DEBUG_PRINT_SLEEP_INFO();
+
+ memset(state_grps, 0, sizeof(state_grps));
+ state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
+ state_grps[0].bits_all_set = DEM_MASTER_SMSM_RSA;
+ state_grps[1].group_id = SMSM_MODEM_STATE;
+ state_grps[1].bits_all_set = SMSM_RESET;
+
+ ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
+
+ if (ret < 0) {
+ printk(KERN_EMERG "%s(): power collapse entry "
+ "timed out waiting for Modem's response\n", __func__);
+ msm_pm_timeout();
+ }
+
+ if (ret == 1) {
+ MSM_PM_DPRINTK(
+ MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
+ KERN_INFO,
+ "%s(): msm_pm_poll_state detected Modem reset\n",
+ __func__);
+ goto power_collapse_early_exit;
+ }
+
+ /* DEM Master in RSA */
+
+ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): PWRC RSA");
+
+ ret = msm_irq_enter_sleep2(true, from_idle);
+ if (ret < 0) {
+ MSM_PM_DPRINTK(
+ MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
+ KERN_INFO,
+ "%s(): msm_irq_enter_sleep2 aborted, %d\n", __func__,
+ ret);
+ goto power_collapse_early_exit;
+ }
+
+#ifdef CONFIG_MSM_ADM_OFF_AT_POWER_COLLAPSE
+ /* XXX: Temp workaround that needs to be removed soon. The
+ * right fix will probably involve the DMA driver taking
+ * ownership of the ADM clock. */
+ /* id is set to denote ADM clock. */
+ id = 1;
+ msm_proc_comm(PCOM_CLKCTL_RPC_DISABLE, &id, NULL);
+#endif
+
+ msm_pm_config_hw_before_power_down();
+ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): pre power down");
+
+ saved_acpuclk_rate = acpuclk_power_collapse();
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
+ "%s(): change clock rate (old rate = %lu)\n", __func__,
+ saved_acpuclk_rate);
+
+ if (saved_acpuclk_rate == 0) {
+ msm_pm_config_hw_after_power_up();
+ goto power_collapse_early_exit;
+ }
+
+ saved_vector[0] = msm_pm_reset_vector[0];
+ saved_vector[1] = msm_pm_reset_vector[1];
+ msm_pm_reset_vector[0] = 0xE51FF004; /* ldr pc, 4 */
+ msm_pm_reset_vector[1] = virt_to_phys(msm_pm_collapse_exit);
+
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_RESET_VECTOR, KERN_INFO,
+ "%s(): vector %x %x -> %x %x\n", __func__,
+ saved_vector[0], saved_vector[1],
+ msm_pm_reset_vector[0], msm_pm_reset_vector[1]);
+
+#ifdef CONFIG_VFP
+ if (from_idle)
+ vfp_flush_context();
+#endif
+
+#ifdef CONFIG_CACHE_L2X0
+ l2x0_suspend();
+#endif
+
+ collapsed = msm_pm_collapse();
+
+#ifdef CONFIG_CACHE_L2X0
+ l2x0_resume(collapsed);
+#endif
+
+ msm_pm_reset_vector[0] = saved_vector[0];
+ msm_pm_reset_vector[1] = saved_vector[1];
+
+ if (collapsed) {
+#ifdef CONFIG_VFP
+ if (from_idle)
+ vfp_reinit();
+#endif
+ cpu_init();
+ local_fiq_enable();
+ }
+
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND | MSM_PM_DEBUG_POWER_COLLAPSE,
+ KERN_INFO,
+ "%s(): msm_pm_collapse returned %d\n", __func__, collapsed);
+
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
+ "%s(): restore clock rate to %lu\n", __func__,
+ saved_acpuclk_rate);
+ if (acpuclk_set_rate(saved_acpuclk_rate, SETRATE_PC) < 0)
+ printk(KERN_ERR "%s(): failed to restore clock rate(%lu)\n",
+ __func__, saved_acpuclk_rate);
+
+#ifdef CONFIG_MSM_ADM_OFF_AT_POWER_COLLAPSE
+ /* id is set to denote ADM clock. */
+ id = 1;
+ if (msm_proc_comm(PCOM_CLKCTL_RPC_ENABLE, &id, NULL) < 0 || id < 0)
+ printk(KERN_ERR
+ "%s(): failed to turn on ADM clock\n", __func__);
+#endif
+
+ msm_irq_exit_sleep1(msm_pm_smem_data->irq_mask,
+ msm_pm_smem_data->wakeup_reason,
+ msm_pm_smem_data->pending_irqs);
+
+ msm_pm_config_hw_after_power_up();
+ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): post power up");
+
+ memset(state_grps, 0, sizeof(state_grps));
+ state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
+ state_grps[0].bits_any_set =
+ DEM_MASTER_SMSM_RSA | DEM_MASTER_SMSM_PWRC_EARLY_EXIT;
+ state_grps[1].group_id = SMSM_MODEM_STATE;
+ state_grps[1].bits_all_set = SMSM_RESET;
+
+ ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
+
+ if (ret < 0) {
+ printk(KERN_EMERG "%s(): power collapse exit "
+ "timed out waiting for Modem's response\n", __func__);
+ msm_pm_timeout();
+ }
+
+ if (ret == 1) {
+ MSM_PM_DPRINTK(
+ MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
+ KERN_INFO,
+ "%s(): msm_pm_poll_state detected Modem reset\n",
+ __func__);
+ goto power_collapse_early_exit;
+ }
+
+ /* Sanity check */
+ if (collapsed) {
+ BUG_ON(!(state_grps[0].value_read & DEM_MASTER_SMSM_RSA));
+ } else {
+ BUG_ON(!(state_grps[0].value_read &
+ DEM_MASTER_SMSM_PWRC_EARLY_EXIT));
+ goto power_collapse_early_exit;
+ }
+
+ /* Enter WFPI */
+
+ smsm_change_state(SMSM_APPS_DEM,
+ DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND,
+ DEM_SLAVE_SMSM_WFPI);
+
+ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): WFPI");
+
+ memset(state_grps, 0, sizeof(state_grps));
+ state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
+ state_grps[0].bits_all_set = DEM_MASTER_SMSM_RUN;
+ state_grps[1].group_id = SMSM_MODEM_STATE;
+ state_grps[1].bits_all_set = SMSM_RESET;
+
+ ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
+
+ if (ret < 0) {
+ printk(KERN_EMERG "%s(): power collapse WFPI "
+ "timed out waiting for Modem's response\n", __func__);
+ msm_pm_timeout();
+ }
+
+ if (ret == 1) {
+ MSM_PM_DPRINTK(
+ MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
+ KERN_INFO,
+ "%s(): msm_pm_poll_state detected Modem reset\n",
+ __func__);
+ ret = -EAGAIN;
+ goto power_collapse_restore_gpio_bail;
+ }
+
+ /* DEM Master == RUN */
+
+ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): WFPI RUN");
+ MSM_PM_DEBUG_PRINT_SLEEP_INFO();
+
+ msm_irq_exit_sleep2(msm_pm_smem_data->irq_mask,
+ msm_pm_smem_data->wakeup_reason,
+ msm_pm_smem_data->pending_irqs);
+ msm_irq_exit_sleep3(msm_pm_smem_data->irq_mask,
+ msm_pm_smem_data->wakeup_reason,
+ msm_pm_smem_data->pending_irqs);
+ msm_gpio_exit_sleep();
+
+ smsm_change_state(SMSM_APPS_DEM,
+ DEM_SLAVE_SMSM_WFPI, DEM_SLAVE_SMSM_RUN);
+
+ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): RUN");
+
+ smd_sleep_exit();
+ return 0;
+
+power_collapse_early_exit:
+ /* Enter PWRC_EARLY_EXIT */
+
+ smsm_change_state(SMSM_APPS_DEM,
+ DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND,
+ DEM_SLAVE_SMSM_PWRC_EARLY_EXIT);
+
+ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): EARLY_EXIT");
+
+ memset(state_grps, 0, sizeof(state_grps));
+ state_grps[0].group_id = SMSM_POWER_MASTER_DEM;
+ state_grps[0].bits_all_set = DEM_MASTER_SMSM_PWRC_EARLY_EXIT;
+ state_grps[1].group_id = SMSM_MODEM_STATE;
+ state_grps[1].bits_all_set = SMSM_RESET;
+
+ ret = msm_pm_poll_state(ARRAY_SIZE(state_grps), state_grps);
+ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): EARLY_EXIT EE");
+
+ if (ret < 0) {
+ printk(KERN_EMERG "%s(): power collapse EARLY_EXIT "
+ "timed out waiting for Modem's response\n", __func__);
+ msm_pm_timeout();
+ }
+
+ if (ret == 1) {
+ MSM_PM_DPRINTK(
+ MSM_PM_DEBUG_SUSPEND|MSM_PM_DEBUG_POWER_COLLAPSE,
+ KERN_INFO,
+ "%s(): msm_pm_poll_state detected Modem reset\n",
+ __func__);
+ }
+
+ /* DEM Master == RESET or PWRC_EARLY_EXIT */
+
+ ret = -EAGAIN;
+
+power_collapse_restore_gpio_bail:
+ msm_gpio_exit_sleep();
+
+ /* Enter RUN */
+ smsm_change_state(SMSM_APPS_DEM,
+ DEM_SLAVE_SMSM_PWRC | DEM_SLAVE_SMSM_PWRC_SUSPEND |
+ DEM_SLAVE_SMSM_PWRC_EARLY_EXIT, DEM_SLAVE_SMSM_RUN);
+
+ MSM_PM_DEBUG_PRINT_STATE("msm_pm_power_collapse(): RUN");
+
+ if (collapsed)
+ smd_sleep_exit();
+
+power_collapse_bail:
+ return ret;
+}
+
+/*
+ * Apps-sleep the Apps processor. This function execute the handshake
+ * protocol with Modem.
+ *
+ * Return value:
+ * -ENOSYS: function not implemented yet
+ */
+static int msm_pm_apps_sleep(uint32_t sleep_delay, uint32_t sleep_limit)
+{
+ return -ENOSYS;
+}
+
+/*
+ * Bring the Apps processor to SWFI.
+ *
+ * Return value:
+ * -EIO: could not ramp Apps processor clock
+ * 0: success
+ */
+static int msm_pm_swfi(bool ramp_acpu)
+{
+ unsigned long saved_acpuclk_rate = 0;
+
+ if (ramp_acpu) {
+ saved_acpuclk_rate = acpuclk_wait_for_irq();
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
+ "%s(): change clock rate (old rate = %lu)\n", __func__,
+ saved_acpuclk_rate);
+
+ if (!saved_acpuclk_rate)
+ return -EIO;
+ }
+
+ msm_pm_config_hw_before_swfi();
+ msm_arch_idle();
+
+ if (ramp_acpu) {
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_CLOCK, KERN_INFO,
+ "%s(): restore clock rate to %lu\n", __func__,
+ saved_acpuclk_rate);
+ if (acpuclk_set_rate(saved_acpuclk_rate, SETRATE_SWFI) < 0)
+ printk(KERN_ERR
+ "%s(): failed to restore clock rate(%lu)\n",
+ __func__, saved_acpuclk_rate);
+ }
+
+ return 0;
+}
+
+
+/******************************************************************************
+ * External Idle/Suspend Functions
+ *****************************************************************************/
+
+/*
+ * Put CPU in low power mode.
+ */
+void arch_idle(void)
+{
+ bool allow[MSM_PM_SLEEP_MODE_NR];
+ uint32_t sleep_limit = SLEEP_LIMIT_NONE;
+
+ int latency_qos;
+ int64_t timer_expiration;
+
+ int low_power;
+ int ret;
+ int i;
+
+#ifdef CONFIG_MSM_IDLE_STATS
+ DECLARE_BITMAP(clk_ids, NR_CLKS);
+ int64_t t1;
+ static int64_t t2;
+ int exit_stat;
+#endif /* CONFIG_MSM_IDLE_STATS */
+
+ if (!atomic_read(&msm_pm_init_done))
+ return;
+
+ latency_qos = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
+ timer_expiration = msm_timer_enter_idle();
+
+#ifdef CONFIG_MSM_IDLE_STATS
+ t1 = ktime_to_ns(ktime_get());
+ msm_pm_add_stat(MSM_PM_STAT_NOT_IDLE, t1 - t2);
+ msm_pm_add_stat(MSM_PM_STAT_REQUESTED_IDLE, timer_expiration);
+#endif /* CONFIG_MSM_IDLE_STATS */
+
+ for (i = 0; i < ARRAY_SIZE(allow); i++)
+ allow[i] = true;
+
+ switch (msm_pm_idle_sleep_mode) {
+ case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
+ allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT] =
+ false;
+ /* fall through */
+ case MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT:
+ allow[MSM_PM_SLEEP_MODE_APPS_SLEEP] = false;
+ /* fall through */
+ case MSM_PM_SLEEP_MODE_APPS_SLEEP:
+ allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] = false;
+ allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
+ /* fall through */
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND:
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
+ break;
+ default:
+ printk(KERN_ERR "idle sleep mode is invalid: %d\n",
+ msm_pm_idle_sleep_mode);
+#ifdef CONFIG_MSM_IDLE_STATS
+ exit_stat = MSM_PM_STAT_IDLE_SPIN;
+#endif /* CONFIG_MSM_IDLE_STATS */
+ low_power = 0;
+ goto arch_idle_exit;
+ }
+
+ if ((timer_expiration < msm_pm_idle_sleep_min_time) ||
+ !msm_irq_idle_sleep_allowed()) {
+ allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
+ allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] = false;
+ allow[MSM_PM_SLEEP_MODE_APPS_SLEEP] = false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(allow); i++) {
+ struct msm_pm_platform_data *mode = &msm_pm_modes[i];
+ if (!mode->supported || !mode->idle_enabled ||
+ mode->latency >= latency_qos ||
+ mode->residency * 1000ULL >= timer_expiration)
+ allow[i] = false;
+ }
+
+#ifdef CONFIG_MSM_IDLE_STATS
+ ret = msm_clock_require_tcxo(clk_ids, NR_CLKS);
+#elif defined(CONFIG_CLOCK_BASED_SLEEP_LIMIT)
+ ret = msm_clock_require_tcxo(NULL, 0);
+#endif /* CONFIG_MSM_IDLE_STATS */
+
+#ifdef CONFIG_CLOCK_BASED_SLEEP_LIMIT
+ if (ret)
+ sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
+#endif
+
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
+ "%s(): latency qos %d, next timer %lld, sleep limit %u\n",
+ __func__, latency_qos, timer_expiration, sleep_limit);
+
+ for (i = 0; i < ARRAY_SIZE(allow); i++)
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_IDLE, KERN_INFO,
+ "%s(): allow %s: %d\n", __func__,
+ msm_pm_sleep_mode_labels[i], (int)allow[i]);
+
+ if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
+ allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
+ uint32_t sleep_delay;
+
+ sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
+ timer_expiration, MSM_PM_SLEEP_TICK_LIMIT);
+ if (sleep_delay == 0) /* 0 would mean infinite time */
+ sleep_delay = 1;
+
+ if (!allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
+ sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
+
+ ret = msm_pm_power_collapse(true, sleep_delay, sleep_limit);
+ low_power = (ret != -EBUSY && ret != -ETIMEDOUT);
+
+#ifdef CONFIG_MSM_IDLE_STATS
+ if (ret)
+ exit_stat = MSM_PM_STAT_IDLE_FAILED_POWER_COLLAPSE;
+ else {
+ exit_stat = MSM_PM_STAT_IDLE_POWER_COLLAPSE;
+ msm_pm_sleep_limit = sleep_limit;
+ bitmap_copy(msm_pm_clocks_no_tcxo_shutdown, clk_ids,
+ NR_CLKS);
+ }
+#endif /* CONFIG_MSM_IDLE_STATS */
+ } else if (allow[MSM_PM_SLEEP_MODE_APPS_SLEEP]) {
+ uint32_t sleep_delay;
+
+ sleep_delay = (uint32_t) msm_pm_convert_and_cap_time(
+ timer_expiration, MSM_PM_SLEEP_TICK_LIMIT);
+ if (sleep_delay == 0) /* 0 would mean infinite time */
+ sleep_delay = 1;
+
+ ret = msm_pm_apps_sleep(sleep_delay, sleep_limit);
+ low_power = 0;
+
+#ifdef CONFIG_MSM_IDLE_STATS
+ if (ret)
+ exit_stat = MSM_PM_STAT_IDLE_FAILED_SLEEP;
+ else
+ exit_stat = MSM_PM_STAT_IDLE_SLEEP;
+#endif /* CONFIG_MSM_IDLE_STATS */
+ } else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
+ ret = msm_pm_swfi(true);
+ if (ret)
+ while (!msm_irq_pending())
+ udelay(1);
+ low_power = 0;
+#ifdef CONFIG_MSM_IDLE_STATS
+ exit_stat = ret ? MSM_PM_STAT_IDLE_SPIN : MSM_PM_STAT_IDLE_WFI;
+#endif /* CONFIG_MSM_IDLE_STATS */
+ } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
+ msm_pm_swfi(false);
+ low_power = 0;
+#ifdef CONFIG_MSM_IDLE_STATS
+ exit_stat = MSM_PM_STAT_IDLE_WFI;
+#endif /* CONFIG_MSM_IDLE_STATS */
+ } else {
+ while (!msm_irq_pending())
+ udelay(1);
+ low_power = 0;
+#ifdef CONFIG_MSM_IDLE_STATS
+ exit_stat = MSM_PM_STAT_IDLE_SPIN;
+#endif /* CONFIG_MSM_IDLE_STATS */
+ }
+
+arch_idle_exit:
+ msm_timer_exit_idle(low_power);
+
+#ifdef CONFIG_MSM_IDLE_STATS
+ t2 = ktime_to_ns(ktime_get());
+ msm_pm_add_stat(exit_stat, t2 - t1);
+#endif /* CONFIG_MSM_IDLE_STATS */
+}
+
+/*
+ * Suspend the Apps processor.
+ *
+ * Return value:
+ * -EAGAIN: modem reset occurred or early exit from suspend
+ * -EBUSY: modem not ready for our suspend
+ * -EINVAL: invalid sleep mode
+ * -EIO: could not ramp Apps processor clock
+ * -ETIMEDOUT: timed out waiting for modem's handshake
+ * 0: success
+ */
+static int msm_pm_enter(suspend_state_t state)
+{
+ bool allow[MSM_PM_SLEEP_MODE_NR];
+ uint32_t sleep_limit = SLEEP_LIMIT_NONE;
+ int ret;
+ int i;
+
+#ifdef CONFIG_MSM_IDLE_STATS
+ DECLARE_BITMAP(clk_ids, NR_CLKS);
+ int64_t period = 0;
+ int64_t time = 0;
+
+ time = msm_timer_get_sclk_time(&period);
+ ret = msm_clock_require_tcxo(clk_ids, NR_CLKS);
+#elif defined(CONFIG_CLOCK_BASED_SLEEP_LIMIT)
+ ret = msm_clock_require_tcxo(NULL, 0);
+#endif /* CONFIG_MSM_IDLE_STATS */
+
+#ifdef CONFIG_CLOCK_BASED_SLEEP_LIMIT
+ if (ret)
+ sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
+#endif
+
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
+ "%s(): sleep limit %u\n", __func__, sleep_limit);
+
+ for (i = 0; i < ARRAY_SIZE(allow); i++)
+ allow[i] = true;
+
+ switch (msm_pm_sleep_mode) {
+ case MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT:
+ allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT] =
+ false;
+ /* fall through */
+ case MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT:
+ allow[MSM_PM_SLEEP_MODE_APPS_SLEEP] = false;
+ /* fall through */
+ case MSM_PM_SLEEP_MODE_APPS_SLEEP:
+ allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN] = false;
+ allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] = false;
+ /* fall through */
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE_SUSPEND:
+ case MSM_PM_SLEEP_MODE_POWER_COLLAPSE:
+ break;
+ default:
+ printk(KERN_ERR "suspend sleep mode is invalid: %d\n",
+ msm_pm_sleep_mode);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(allow); i++) {
+ struct msm_pm_platform_data *mode = &msm_pm_modes[i];
+ if (!mode->supported || !mode->suspend_enabled)
+ allow[i] = false;
+ }
+
+ ret = 0;
+
+ if (allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE] ||
+ allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE_NO_XO_SHUTDOWN]) {
+#ifdef CONFIG_MSM_IDLE_STATS
+ enum msm_pm_time_stats_id id;
+ int64_t end_time;
+#endif
+
+#ifdef CONFIG_MSM_SLEEP_TIME_OVERRIDE
+ if (msm_pm_sleep_time_override > 0) {
+ int64_t ns;
+ ns = NSEC_PER_SEC * (int64_t)msm_pm_sleep_time_override;
+ msm_pm_set_max_sleep_time(ns);
+ msm_pm_sleep_time_override = 0;
+ }
+#endif
+ if (!allow[MSM_PM_SLEEP_MODE_POWER_COLLAPSE])
+ sleep_limit = SLEEP_LIMIT_NO_TCXO_SHUTDOWN;
+
+ ret = msm_pm_power_collapse(
+ false, msm_pm_max_sleep_time, sleep_limit);
+
+#ifdef CONFIG_MSM_IDLE_STATS
+ if (ret)
+ id = MSM_PM_STAT_FAILED_SUSPEND;
+ else {
+ id = MSM_PM_STAT_SUSPEND;
+ msm_pm_sleep_limit = sleep_limit;
+ bitmap_copy(msm_pm_clocks_no_tcxo_shutdown, clk_ids,
+ NR_CLKS);
+ }
+
+ if (time != 0) {
+ end_time = msm_timer_get_sclk_time(NULL);
+ if (end_time != 0) {
+ time = end_time - time;
+ if (time < 0)
+ time += period;
+ } else
+ time = 0;
+ }
+
+ msm_pm_add_stat(id, time);
+#endif
+ } else if (allow[MSM_PM_SLEEP_MODE_RAMP_DOWN_AND_WAIT_FOR_INTERRUPT]) {
+ ret = msm_pm_swfi(true);
+ if (ret)
+ while (!msm_irq_pending())
+ udelay(1);
+ } else if (allow[MSM_PM_SLEEP_MODE_WAIT_FOR_INTERRUPT]) {
+ msm_pm_swfi(false);
+ }
+
+ MSM_PM_DPRINTK(MSM_PM_DEBUG_SUSPEND, KERN_INFO,
+ "%s(): return %d\n", __func__, ret);
+
+ return ret;
+}
+
+static struct platform_suspend_ops msm_pm_ops = {
+ .enter = msm_pm_enter,
+ .valid = suspend_valid_only_mem,
+};
+
+
+/******************************************************************************
+ * Restart Definitions
+ *****************************************************************************/
+
+static uint32_t restart_reason = 0x776655AA;
+
+static void msm_pm_power_off(void)
+{
+ msm_proc_comm(PCOM_POWER_DOWN, 0, 0);
+ for (;;)
+ ;
+}
+
+static void msm_pm_restart(char str, const char *cmd )
+{
+ msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0);
+
+ for (;;)
+ ;
+}
+
+static int msm_reboot_call
+ (struct notifier_block *this, unsigned long code, void *_cmd)
+{
+ if ((code == SYS_RESTART) && _cmd) {
+ char *cmd = _cmd;
+ if (!strcmp(cmd, "bootloader")) {
+ restart_reason = 0x77665500;
+ } else if (!strcmp(cmd, "recovery")) {
+ restart_reason = 0x77665502;
+ } else if (!strcmp(cmd, "eraseflash")) {
+ restart_reason = 0x776655EF;
+ } else if (!strncmp(cmd, "oem-", 4)) {
+ unsigned code = simple_strtoul(cmd + 4, 0, 16) & 0xff;
+ restart_reason = 0x6f656d00 | code;
+ } else {
+ restart_reason = 0x77665501;
+ }
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block msm_reboot_notifier = {
+ .notifier_call = msm_reboot_call,
+};
+
+
+/******************************************************************************
+ *
+ *****************************************************************************/
+
+/*
+ * Initialize the power management subsystem.
+ *
+ * Return value:
+ * -ENODEV: initialization failed
+ * 0: success
+ */
+static int __init msm_pm_init(void)
+{
+#ifdef CONFIG_MSM_IDLE_STATS
+ struct proc_dir_entry *d_entry;
+#endif
+ int ret;
+
+ pm_power_off = msm_pm_power_off;
+ arm_pm_restart = msm_pm_restart;
+ register_reboot_notifier(&msm_reboot_notifier);
+
+ msm_pm_smem_data = smem_alloc(SMEM_APPS_DEM_SLAVE_DATA,
+ sizeof(*msm_pm_smem_data));
+ if (msm_pm_smem_data == NULL) {
+ printk(KERN_ERR "%s: failed to get smsm_data\n", __func__);
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_ARCH_MSM_SCORPION
+ /* The bootloader is responsible for initializing many of Scorpion's
+ * coprocessor registers for things like cache timing. The state of
+ * these coprocessor registers is lost on reset, so part of the
+ * bootloader must be re-executed. Do not overwrite the reset vector
+ * or bootloader area.
+ */
+ msm_pm_reset_vector = (uint32_t *) PAGE_OFFSET;
+#else
+ msm_pm_reset_vector = ioremap(0, PAGE_SIZE);
+ if (msm_pm_reset_vector == NULL) {
+ printk(KERN_ERR "%s: failed to map reset vector\n", __func__);
+ return -ENODEV;
+ }
+#endif /* CONFIG_ARCH_MSM_SCORPION */
+
+ ret = msm_timer_init_time_sync();
+ if (ret)
+ return ret;
+
+ ret = smsm_change_intr_mask(SMSM_POWER_MASTER_DEM, 0xFFFFFFFF, 0);
+ if (ret) {
+ printk(KERN_ERR "%s: failed to clear interrupt mask, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ BUG_ON(msm_pm_modes == NULL);
+
+ atomic_set(&msm_pm_init_done, 1);
+ suspend_set_ops(&msm_pm_ops);
+
+ msm_pm_mode_sysfs_add();
+#ifdef CONFIG_MSM_IDLE_STATS
+ d_entry = create_proc_entry("msm_pm_stats",
+ S_IRUGO | S_IWUSR | S_IWGRP, NULL);
+ if (d_entry) {
+ d_entry->read_proc = msm_pm_read_proc;
+ d_entry->write_proc = msm_pm_write_proc;
+ d_entry->data = NULL;
+ }
+#endif
+
+ return 0;
+}
+
+late_initcall(msm_pm_init);
diff --git a/arch/arm/mach-msm/pmic.c b/arch/arm/mach-msm/pmic.c
new file mode 100644
index 000000000000..3c481a6bc180
--- /dev/null
+++ b/arch/arm/mach-msm/pmic.c
@@ -0,0 +1,1095 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+
+#include <mach/pmic.h>
+
+#include "smd_rpcrouter.h"
+
+#define TRACE_PMIC 0
+
+#if TRACE_PMIC
+#define PMIC(x...) printk(KERN_INFO "[PMIC] " x)
+#else
+#define PMIC(x...) do {} while (0)
+#endif
+
+
+#define LIB_NULL_PROC 0
+#define LIB_RPC_GLUE_CODE_INFO_REMOTE_PROC 1
+#define LP_MODE_CONTROL_PROC 2
+#define VREG_SET_LEVEL_PROC 3
+#define VREG_PULL_DOWN_SWITCH_PROC 4
+#define SECURE_MPP_CONFIG_DIGITAL_OUTPUT_PROC 5
+#define SECURE_MPP_CONFIG_I_SINK_PROC 6
+#define RTC_START_PROC 7
+#define RTC_STOP_PROC 8
+#define RTC_GET_TIME_PROC 9
+#define RTC_ENABLE_ALARM_PROC 10
+#define RTC_DISABLE_ALARM_PROC 11
+#define RTC_GET_ALARM_TIME_PROC 12
+#define RTC_GET_ALARM_STATUS_PROC 13
+#define RTC_SET_TIME_ADJUST_PROC 14
+#define RTC_GET_TIME_ADJUST_PROC 15
+#define SET_LED_INTENSITY_PROC 16
+#define FLASH_LED_SET_CURRENT_PROC 17
+#define FLASH_LED_SET_MODE_PROC 18
+#define FLASH_LED_SET_POLARITY_PROC 19
+#define SPEAKER_CMD_PROC 20
+#define SET_SPEAKER_GAIN_PROC 21
+#define VIB_MOT_SET_VOLT_PROC 22
+#define VIB_MOT_SET_MODE_PROC 23
+#define VIB_MOT_SET_POLARITY_PROC 24
+#define VID_EN_PROC 25
+#define VID_IS_EN_PROC 26
+#define VID_LOAD_DETECT_EN_PROC 27
+#define MIC_EN_PROC 28
+#define MIC_IS_EN_PROC 29
+#define MIC_SET_VOLT_PROC 30
+#define MIC_GET_VOLT_PROC 31
+#define SPKR_EN_RIGHT_CHAN_PROC 32
+#define SPKR_IS_RIGHT_CHAN_EN_PROC 33
+#define SPKR_EN_LEFT_CHAN_PROC 34
+#define SPKR_IS_LEFT_CHAN_EN_PROC 35
+#define SET_SPKR_CONFIGURATION_PROC 36
+#define GET_SPKR_CONFIGURATION_PROC 37
+#define SPKR_GET_GAIN_PROC 38
+#define SPKR_IS_EN_PROC 39
+#define SPKR_EN_MUTE_PROC 40
+#define SPKR_IS_MUTE_EN_PROC 41
+#define SPKR_SET_DELAY_PROC 42
+#define SPKR_GET_DELAY_PROC 43
+#define SECURE_MPP_CONFIG_DIGITAL_INPUT_PROC 44
+#define SET_SPEAKER_DELAY_PROC 45
+#define SPEAKER_1K6_ZIN_ENABLE_PROC 46
+#define SPKR_SET_MUX_HPF_CORNER_FREQ_PROC 47
+#define SPKR_GET_MUX_HPF_CORNER_FREQ_PROC 48
+#define SPKR_IS_RIGHT_LEFT_CHAN_ADDED_PROC 49
+#define SPKR_EN_STEREO_PROC 50
+#define SPKR_IS_STEREO_EN_PROC 51
+#define SPKR_SELECT_USB_WITH_HPF_20HZ_PROC 52
+#define SPKR_IS_USB_WITH_HPF_20HZ_PROC 53
+#define SPKR_BYPASS_MUX_PROC 54
+#define SPKR_IS_MUX_BYPASSED_PROC 55
+#define SPKR_EN_HPF_PROC 56
+#define SPKR_IS_HPF_EN_PROC 57
+#define SPKR_EN_SINK_CURR_FROM_REF_VOLT_CIR_PROC 58
+#define SPKR_IS_SINK_CURR_FROM_REF_VOLT_CIR_EN_PROC 59
+#define SPKR_ADD_RIGHT_LEFT_CHAN_PROC 60
+#define SPKR_SET_GAIN_PROC 61
+#define SPKR_EN_PROC 62
+#define HSED_SET_PERIOD_PROC 63
+#define HSED_SET_HYSTERESIS_PROC 64
+#define HSED_SET_CURRENT_THRESHOLD_PROC 65
+#define HSED_ENABLE_PROC 66
+
+
+/* rpc related */
+#define PMIC_RPC_TIMEOUT (5*HZ)
+
+#define PMIC_PDEV_NAME "rs00010001:00000000"
+#define PMIC_RPC_PROG 0x30000061
+#define PMIC_RPC_VER_1_1 0x00010001
+#define PMIC_RPC_VER_2_1 0x00020001
+
+/* error bit flags defined by modem side */
+#define PM_ERR_FLAG__PAR1_OUT_OF_RANGE (0x0001)
+#define PM_ERR_FLAG__PAR2_OUT_OF_RANGE (0x0002)
+#define PM_ERR_FLAG__PAR3_OUT_OF_RANGE (0x0004)
+#define PM_ERR_FLAG__PAR4_OUT_OF_RANGE (0x0008)
+#define PM_ERR_FLAG__PAR5_OUT_OF_RANGE (0x0010)
+
+#define PM_ERR_FLAG__ALL_PARMS_OUT_OF_RANGE (0x001F) /* all 5 previous */
+
+#define PM_ERR_FLAG__SBI_OPT_ERR (0x0080)
+#define PM_ERR_FLAG__FEATURE_NOT_SUPPORTED (0x0100)
+
+#define PMIC_BUFF_SIZE 256
+
+struct pmic_buf {
+ char *start; /* buffer start addr */
+ char *end; /* buffer end addr */
+ int size; /* buffer size */
+ char *data; /* payload begin addr */
+ int len; /* payload len */
+};
+
+static DEFINE_MUTEX(pmic_mtx);
+
+struct pmic_ctrl {
+ int inited;
+ struct pmic_buf tbuf;
+ struct pmic_buf rbuf;
+ struct msm_rpc_endpoint *endpoint;
+};
+
+static struct pmic_ctrl pmic_ctrl = {
+ .inited = -1,
+};
+
+static int pmic_rpc_req_reply(struct pmic_buf *tbuf,
+ struct pmic_buf *rbuf, int proc);
+static int pmic_rpc_set_only(uint data0, uint data1, uint data2,
+ uint data3, int num, int proc);
+static int pmic_rpc_set_struct(int, uint, uint *data, uint size, int proc);
+static int pmic_rpc_set_get(uint setdata, uint *getdata, int size, int proc);
+static int pmic_rpc_get_only(uint *getdata, int size, int proc);
+
+static int pmic_buf_init(void)
+{
+ struct pmic_ctrl *pm = &pmic_ctrl;
+
+ memset(&pmic_ctrl, 0, sizeof(pmic_ctrl));
+
+ pm->tbuf.start = kmalloc(PMIC_BUFF_SIZE, GFP_KERNEL);
+ if (pm->tbuf.start == NULL) {
+ printk(KERN_ERR "%s:%u\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+
+ pm->tbuf.data = pm->tbuf.start;
+ pm->tbuf.size = PMIC_BUFF_SIZE;
+ pm->tbuf.end = pm->tbuf.start + PMIC_BUFF_SIZE;
+ pm->tbuf.len = 0;
+
+ pm->rbuf.start = kmalloc(PMIC_BUFF_SIZE, GFP_KERNEL);
+ if (pm->rbuf.start == NULL) {
+ kfree(pm->tbuf.start);
+ printk(KERN_ERR "%s:%u\n", __func__, __LINE__);
+ return -ENOMEM;
+ }
+ pm->rbuf.data = pm->rbuf.start;
+ pm->rbuf.size = PMIC_BUFF_SIZE;
+ pm->rbuf.end = pm->rbuf.start + PMIC_BUFF_SIZE;
+ pm->rbuf.len = 0;
+
+ pm->inited = 1;
+
+ return 0;
+}
+
+static inline void pmic_buf_reserve(struct pmic_buf *bp, int len)
+{
+ bp->data += len;
+}
+
+static inline void pmic_buf_reset(struct pmic_buf *bp)
+{
+ bp->data = bp->start;
+ bp->len = 0;
+}
+
+static int modem_to_linux_err(uint err)
+{
+ if (err == 0)
+ return 0;
+
+ if (err & PM_ERR_FLAG__ALL_PARMS_OUT_OF_RANGE)
+ return -EINVAL; /* PM_ERR_FLAG__PAR[1..5]_OUT_OF_RANGE */
+
+ if (err & PM_ERR_FLAG__SBI_OPT_ERR)
+ return -EIO;
+
+ if (err & PM_ERR_FLAG__FEATURE_NOT_SUPPORTED)
+ return -ENOSYS;
+
+ return -EPERM;
+}
+
+static int pmic_put_tx_data(struct pmic_buf *tp, uint datav)
+{
+ uint *lp;
+
+ if ((tp->size - tp->len) < sizeof(datav)) {
+ printk(KERN_ERR "%s: OVERFLOW size=%d len=%d\n",
+ __func__, tp->size, tp->len);
+ return -1;
+ }
+
+ lp = (uint *)tp->data;
+ *lp = cpu_to_be32(datav);
+ tp->data += sizeof(datav);
+ tp->len += sizeof(datav);
+
+ return sizeof(datav);
+}
+
+static int pmic_pull_rx_data(struct pmic_buf *rp, uint *datap)
+{
+ uint *lp;
+
+ if (rp->len < sizeof(*datap)) {
+ printk(KERN_ERR "%s: UNDERRUN len=%d\n", __func__, rp->len);
+ return -1;
+ }
+ lp = (uint *)rp->data;
+ *datap = be32_to_cpu(*lp);
+ rp->data += sizeof(*datap);
+ rp->len -= sizeof(*datap);
+
+ return sizeof(*datap);
+}
+
+
+/*
+ *
+ * +-------------------+
+ * | PROC cmd layer |
+ * +-------------------+
+ * | RPC layer |
+ * +-------------------+
+ *
+ * 1) network byte order
+ * 2) RPC request header(40 bytes) and RPC reply header (24 bytes)
+ * 3) each transaction consists of a request and reply
+ * 3) PROC (comamnd) layer has its own sub-protocol defined
+ * 4) sub-protocol can be grouped to follwoing 7 cases:
+ * a) set one argument, no get
+ * b) set two argument, no get
+ * c) set three argument, no get
+ * d) set a struct, no get
+ * e) set a argument followed by a struct, no get
+ * f) set a argument, get a argument
+ * g) no set, get either a argument or a struct
+ */
+
+/**
+ * pmic_rpc_req_reply() - send request and wait for reply
+ * @tbuf: buffer contains arguments
+ * @rbuf: buffer to be filled with arguments at reply
+ * @proc: command/request id
+ *
+ * This function send request to modem and wait until reply received
+ */
+static int pmic_rpc_req_reply(struct pmic_buf *tbuf, struct pmic_buf *rbuf,
+ int proc)
+{
+ struct pmic_ctrl *pm = &pmic_ctrl;
+ int ans, len;
+
+
+ if ((pm->endpoint == NULL) || IS_ERR(pm->endpoint)) {
+ pm->endpoint = msm_rpc_connect_compatible(PMIC_RPC_PROG,
+ PMIC_RPC_VER_2_1, 0);
+ if (IS_ERR(pm->endpoint)) {
+ pm->endpoint = msm_rpc_connect_compatible(PMIC_RPC_PROG,
+ PMIC_RPC_VER_1_1, 0);
+ }
+
+ if (IS_ERR(pm->endpoint)) {
+ ans = PTR_ERR(pm->endpoint);
+ printk(KERN_ERR "%s: init rpc failed! ans = %d\n",
+ __func__, ans);
+ return ans;
+ }
+ }
+
+ /*
+ * data is point to next available space at this moment,
+ * move it back to beginning of request header and increase
+ * the length
+ */
+ tbuf->data = tbuf->start;
+ tbuf->len += sizeof(struct rpc_request_hdr);
+
+ len = msm_rpc_call_reply(pm->endpoint, proc,
+ tbuf->data, tbuf->len,
+ rbuf->data, rbuf->size,
+ PMIC_RPC_TIMEOUT);
+
+ if (len <= 0) {
+ printk(KERN_ERR "%s: rpc failed! len = %d\n", __func__, len);
+ pm->endpoint = NULL; /* re-connect later ? */
+ return len;
+ }
+
+ rbuf->len = len;
+ /* strip off rpc_reply_hdr */
+ rbuf->data += sizeof(struct rpc_reply_hdr);
+ rbuf->len -= sizeof(struct rpc_reply_hdr);
+
+ return rbuf->len;
+}
+
+/**
+ * pmic_rpc_set_only() - set arguments and no get
+ * @data0: first argumrnt
+ * @data1: second argument
+ * @data2: third argument
+ * @data3: fourth argument
+ * @num: number of argument
+ * @proc: command/request id
+ *
+ * This function covers case a, b, and c
+ */
+static int pmic_rpc_set_only(uint data0, uint data1, uint data2, uint data3,
+ int num, int proc)
+{
+ struct pmic_ctrl *pm = &pmic_ctrl;
+ struct pmic_buf *tp;
+ struct pmic_buf *rp;
+ int stat;
+
+
+ if (mutex_lock_interruptible(&pmic_mtx))
+ return -ERESTARTSYS;
+
+ if (pm->inited <= 0) {
+ stat = pmic_buf_init();
+ if (stat < 0) {
+ mutex_unlock(&pmic_mtx);
+ return stat;
+ }
+ }
+
+ tp = &pm->tbuf;
+ rp = &pm->rbuf;
+
+ pmic_buf_reset(tp);
+ pmic_buf_reserve(tp, sizeof(struct rpc_request_hdr));
+ pmic_buf_reset(rp);
+
+ if (num > 0)
+ pmic_put_tx_data(tp, data0);
+
+ if (num > 1)
+ pmic_put_tx_data(tp, data1);
+
+ if (num > 2)
+ pmic_put_tx_data(tp, data2);
+
+ if (num > 3)
+ pmic_put_tx_data(tp, data3);
+
+ stat = pmic_rpc_req_reply(tp, rp, proc);
+ if (stat < 0) {
+ mutex_unlock(&pmic_mtx);
+ return stat;
+ }
+
+ pmic_pull_rx_data(rp, &stat); /* result from server */
+
+ mutex_unlock(&pmic_mtx);
+
+ return modem_to_linux_err(stat);
+}
+
+/**
+ * pmic_rpc_set_struct() - set the whole struct
+ * @xflag: indicates an extra argument
+ * @xdata: the extra argument
+ * @*data: starting address of struct
+ * @size: size of struct
+ * @proc: command/request id
+ *
+ * This fucntion covers case d and e
+ */
+static int pmic_rpc_set_struct(int xflag, uint xdata, uint *data, uint size,
+ int proc)
+{
+ struct pmic_ctrl *pm = &pmic_ctrl;
+ struct pmic_buf *tp;
+ struct pmic_buf *rp;
+ int i, stat, more_data;
+
+
+ if (mutex_lock_interruptible(&pmic_mtx))
+ return -ERESTARTSYS;
+
+ if (pm->inited <= 0) {
+ stat = pmic_buf_init();
+ if (stat < 0) {
+ mutex_unlock(&pmic_mtx);
+ return stat;
+ }
+ }
+
+ tp = &pm->tbuf;
+ rp = &pm->rbuf;
+
+ pmic_buf_reset(tp);
+ pmic_buf_reserve(tp, sizeof(struct rpc_request_hdr));
+ pmic_buf_reset(rp);
+
+ if (xflag)
+ pmic_put_tx_data(tp, xdata);
+
+ more_data = 1; /* tell server there have more data followed */
+ pmic_put_tx_data(tp, more_data);
+
+ size >>= 2;
+ for (i = 0; i < size; i++) {
+ pmic_put_tx_data(tp, *data);
+ data++;
+ }
+
+ stat = pmic_rpc_req_reply(tp, rp, proc);
+ if (stat < 0) {
+ mutex_unlock(&pmic_mtx);
+ return stat;
+ }
+
+ pmic_pull_rx_data(rp, &stat); /* result from server */
+
+ mutex_unlock(&pmic_mtx);
+
+ return modem_to_linux_err(stat);
+}
+
+/**
+ * pmic_rpc_set_get() - set one argument and get one argument
+ * @setdata: set argument
+ * @*getdata: memory to store argumnet
+ * @size: size of memory
+ * @proc: command/request id
+ *
+ * This function covers case f
+ */
+static int pmic_rpc_set_get(uint setdata, uint *getdata, int size, int proc)
+{
+ struct pmic_ctrl *pm = &pmic_ctrl;
+ struct pmic_buf *tp;
+ struct pmic_buf *rp;
+ unsigned int *lp;
+ int i, stat, more_data;
+
+
+ if (mutex_lock_interruptible(&pmic_mtx))
+ return -ERESTARTSYS;
+
+ if (pm->inited <= 0) {
+ stat = pmic_buf_init();
+ if (stat < 0) {
+ mutex_unlock(&pmic_mtx);
+ return stat;
+ }
+ }
+
+ tp = &pm->tbuf;
+ rp = &pm->rbuf;
+
+ pmic_buf_reset(tp);
+ pmic_buf_reserve(tp, sizeof(struct rpc_request_hdr));
+ pmic_buf_reset(rp);
+
+ pmic_put_tx_data(tp, setdata);
+
+ /*
+ * more_data = TRUE to ask server reply with requested datum
+ * otherwise, server will reply without datum
+ */
+ more_data = (getdata != NULL);
+ pmic_put_tx_data(tp, more_data);
+
+ stat = pmic_rpc_req_reply(tp, rp, proc);
+ if (stat < 0) {
+ mutex_unlock(&pmic_mtx);
+ return stat;
+ }
+
+ pmic_pull_rx_data(rp, &stat); /* result from server */
+ pmic_pull_rx_data(rp, &more_data);
+
+ if (more_data) { /* more data followed */
+ size >>= 2;
+ lp = getdata;
+ for (i = 0; i < size; i++) {
+ if (pmic_pull_rx_data(rp, lp++) < 0)
+ break; /* not supposed to happen */
+ }
+ }
+
+ mutex_unlock(&pmic_mtx);
+
+ return modem_to_linux_err(stat);
+}
+
+/**
+ * pmic_rpc_get_only() - get one or more than one arguments
+ * @*getdata: memory to store arguments
+ * @size: size of mmory
+ * @proc: command/request id
+ *
+ * This function covers case g
+ */
+static int pmic_rpc_get_only(uint *getdata, int size, int proc)
+{
+ struct pmic_ctrl *pm = &pmic_ctrl;
+ struct pmic_buf *tp;
+ struct pmic_buf *rp;
+ unsigned int *lp;
+ int i, stat, more_data;
+
+
+ if (mutex_lock_interruptible(&pmic_mtx))
+ return -ERESTARTSYS;
+
+ if (pm->inited <= 0) {
+ stat = pmic_buf_init();
+ if (stat < 0) {
+ mutex_unlock(&pmic_mtx);
+ return stat;
+ }
+ }
+
+ tp = &pm->tbuf;
+ rp = &pm->rbuf;
+
+ pmic_buf_reset(tp);
+ pmic_buf_reserve(tp, sizeof(struct rpc_request_hdr));
+ pmic_buf_reset(rp);
+
+ /*
+ * more_data = TRUE to ask server reply with requested datum
+ * otherwise, server will reply without datum
+ */
+ more_data = (getdata != NULL);
+ pmic_put_tx_data(tp, more_data);
+
+ stat = pmic_rpc_req_reply(tp, rp, proc);
+ if (stat < 0) {
+ mutex_unlock(&pmic_mtx);
+ return stat;
+ }
+
+ pmic_pull_rx_data(rp, &stat); /* result from server */
+ pmic_pull_rx_data(rp, &more_data);
+
+ if (more_data) { /* more data followed */
+ size >>= 2;
+ lp = getdata;
+ for (i = 0; i < size; i++) {
+ if (pmic_pull_rx_data(rp, lp++) < 0)
+ break; /* not supposed to happen */
+ }
+ }
+
+ mutex_unlock(&pmic_mtx);
+
+ return modem_to_linux_err(stat);
+}
+
+
+int pmic_lp_mode_control(enum switch_cmd cmd, enum vreg_lp_id id)
+{
+ return pmic_rpc_set_only(cmd, id, 0, 0, 2, LP_MODE_CONTROL_PROC);
+}
+EXPORT_SYMBOL(pmic_lp_mode_control);
+
+int pmic_vreg_set_level(enum vreg_id vreg, int level)
+{
+ return pmic_rpc_set_only(vreg, level, 0, 0, 2, VREG_SET_LEVEL_PROC);
+}
+EXPORT_SYMBOL(pmic_vreg_set_level);
+
+int pmic_vreg_pull_down_switch(enum switch_cmd cmd, enum vreg_pdown_id id)
+{
+ return pmic_rpc_set_only(cmd, id, 0, 0, 2, VREG_PULL_DOWN_SWITCH_PROC);
+}
+EXPORT_SYMBOL(pmic_vreg_pull_down_switch);
+
+int pmic_secure_mpp_control_digital_output(enum mpp_which which,
+ enum mpp_dlogic_level level,
+ enum mpp_dlogic_out_ctrl out)
+{
+ return pmic_rpc_set_only(which, level, out, 0, 3,
+ SECURE_MPP_CONFIG_DIGITAL_OUTPUT_PROC);
+}
+EXPORT_SYMBOL(pmic_secure_mpp_control_digital_output);
+
+int pmic_secure_mpp_config_i_sink(enum mpp_which which,
+ enum mpp_i_sink_level level,
+ enum mpp_i_sink_switch onoff)
+{
+ return pmic_rpc_set_only(which, level, onoff, 0, 3,
+ SECURE_MPP_CONFIG_I_SINK_PROC);
+}
+EXPORT_SYMBOL(pmic_secure_mpp_config_i_sink);
+
+int pmic_secure_mpp_config_digital_input(enum mpp_which which,
+ enum mpp_dlogic_level level,
+ enum mpp_dlogic_in_dbus dbus)
+{
+ return pmic_rpc_set_only(which, level, dbus, 0, 3,
+ SECURE_MPP_CONFIG_DIGITAL_INPUT_PROC);
+}
+EXPORT_SYMBOL(pmic_secure_mpp_config_digital_input);
+
+int pmic_rtc_start(struct rtc_time *time)
+{
+ return pmic_rpc_set_struct(0, 0, (uint *)time, sizeof(*time),
+ RTC_START_PROC);
+}
+EXPORT_SYMBOL(pmic_rtc_start);
+
+int pmic_rtc_stop(void)
+{
+ return pmic_rpc_set_only(0, 0, 0, 0, 0, RTC_STOP_PROC);
+}
+EXPORT_SYMBOL(pmic_rtc_stop);
+
+int pmic_rtc_get_time(struct rtc_time *time)
+{
+ return pmic_rpc_get_only((uint *)time, sizeof(*time),
+ RTC_GET_TIME_PROC);
+}
+EXPORT_SYMBOL(pmic_rtc_get_time);
+
+int pmic_rtc_enable_alarm(enum rtc_alarm alarm,
+ struct rtc_time *time)
+{
+ return pmic_rpc_set_struct(1, alarm, (uint *)time, sizeof(*time),
+ RTC_ENABLE_ALARM_PROC);
+}
+EXPORT_SYMBOL(pmic_rtc_enable_alarm);
+
+int pmic_rtc_disable_alarm(enum rtc_alarm alarm)
+{
+ return pmic_rpc_set_only(alarm, 0, 0, 0, 1, RTC_DISABLE_ALARM_PROC);
+}
+EXPORT_SYMBOL(pmic_rtc_disable_alarm);
+
+int pmic_rtc_get_alarm_time(enum rtc_alarm alarm,
+ struct rtc_time *time)
+{
+ return pmic_rpc_set_get(alarm, (uint *)time, sizeof(*time),
+ RTC_GET_ALARM_TIME_PROC);
+}
+EXPORT_SYMBOL(pmic_rtc_get_alarm_time);
+
+int pmic_rtc_get_alarm_status(uint *status)
+{
+ return pmic_rpc_get_only(status, sizeof(*status),
+ RTC_GET_ALARM_STATUS_PROC);
+}
+EXPORT_SYMBOL(pmic_rtc_get_alarm_status);
+
+int pmic_rtc_set_time_adjust(uint adjust)
+{
+ return pmic_rpc_set_only(adjust, 0, 0, 0, 1,
+ RTC_SET_TIME_ADJUST_PROC);
+}
+EXPORT_SYMBOL(pmic_rtc_set_time_adjust);
+
+int pmic_rtc_get_time_adjust(uint *adjust)
+{
+ return pmic_rpc_get_only(adjust, sizeof(*adjust),
+ RTC_GET_TIME_ADJUST_PROC);
+}
+EXPORT_SYMBOL(pmic_rtc_get_time_adjust);
+
+/*
+ * generic speaker
+ */
+int pmic_speaker_cmd(const enum spkr_cmd cmd)
+{
+ return pmic_rpc_set_only(cmd, 0, 0, 0, 1, SPEAKER_CMD_PROC);
+}
+EXPORT_SYMBOL(pmic_speaker_cmd);
+
+int pmic_set_spkr_configuration(struct spkr_config_mode *cfg)
+{
+ return pmic_rpc_set_struct(0, 0, (uint *)cfg, sizeof(*cfg),
+ SET_SPKR_CONFIGURATION_PROC);
+}
+EXPORT_SYMBOL(pmic_set_spkr_configuration);
+
+int pmic_get_spkr_configuration(struct spkr_config_mode *cfg)
+{
+ return pmic_rpc_get_only((uint *)cfg, sizeof(*cfg),
+ GET_SPKR_CONFIGURATION_PROC);
+}
+EXPORT_SYMBOL(pmic_get_spkr_configuration);
+
+int pmic_spkr_en_right_chan(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1, SPKR_EN_RIGHT_CHAN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_en_right_chan);
+
+int pmic_spkr_is_right_chan_en(uint *enabled)
+{
+ return pmic_rpc_get_only(enabled, sizeof(*enabled),
+ SPKR_IS_RIGHT_CHAN_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_is_right_chan_en);
+
+int pmic_spkr_en_left_chan(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1, SPKR_EN_LEFT_CHAN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_en_left_chan);
+
+int pmic_spkr_is_left_chan_en(uint *enabled)
+{
+ return pmic_rpc_get_only(enabled, sizeof(*enabled),
+ SPKR_IS_LEFT_CHAN_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_is_left_chan_en);
+
+int pmic_set_speaker_gain(enum spkr_gain gain)
+{
+ return pmic_rpc_set_only(gain, 0, 0, 0, 1, SET_SPEAKER_GAIN_PROC);
+}
+EXPORT_SYMBOL(pmic_set_speaker_gain);
+
+int pmic_set_speaker_delay(enum spkr_dly delay)
+{
+ return pmic_rpc_set_only(delay, 0, 0, 0, 1, SET_SPEAKER_DELAY_PROC);
+}
+EXPORT_SYMBOL(pmic_set_speaker_delay);
+
+int pmic_speaker_1k6_zin_enable(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1,
+ SPEAKER_1K6_ZIN_ENABLE_PROC);
+}
+EXPORT_SYMBOL(pmic_speaker_1k6_zin_enable);
+
+int pmic_spkr_set_mux_hpf_corner_freq(enum spkr_hpf_corner_freq freq)
+{
+ return pmic_rpc_set_only(freq, 0, 0, 0, 1,
+ SPKR_SET_MUX_HPF_CORNER_FREQ_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_set_mux_hpf_corner_freq);
+
+int pmic_spkr_get_mux_hpf_corner_freq(enum spkr_hpf_corner_freq *freq)
+{
+ return pmic_rpc_get_only(freq, sizeof(*freq),
+ SPKR_GET_MUX_HPF_CORNER_FREQ_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_get_mux_hpf_corner_freq);
+
+int pmic_spkr_select_usb_with_hpf_20hz(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1,
+ SPKR_SELECT_USB_WITH_HPF_20HZ_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_select_usb_with_hpf_20hz);
+
+int pmic_spkr_is_usb_with_hpf_20hz(uint *enabled)
+{
+ return pmic_rpc_get_only(enabled, sizeof(*enabled),
+ SPKR_IS_USB_WITH_HPF_20HZ_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_is_usb_with_hpf_20hz);
+
+int pmic_spkr_bypass_mux(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1, SPKR_BYPASS_MUX_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_bypass_mux);
+
+int pmic_spkr_is_mux_bypassed(uint *enabled)
+{
+ return pmic_rpc_get_only(enabled, sizeof(*enabled),
+ SPKR_IS_MUX_BYPASSED_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_is_mux_bypassed);
+
+int pmic_spkr_en_hpf(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1, SPKR_EN_HPF_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_en_hpf);
+
+int pmic_spkr_is_hpf_en(uint *enabled)
+{
+ return pmic_rpc_get_only(enabled, sizeof(*enabled),
+ SPKR_IS_HPF_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_is_hpf_en);
+
+int pmic_spkr_en_sink_curr_from_ref_volt_cir(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1,
+ SPKR_EN_SINK_CURR_FROM_REF_VOLT_CIR_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_en_sink_curr_from_ref_volt_cir);
+
+int pmic_spkr_is_sink_curr_from_ref_volt_cir_en(uint *enabled)
+{
+ return pmic_rpc_get_only(enabled, sizeof(*enabled),
+ SPKR_IS_SINK_CURR_FROM_REF_VOLT_CIR_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_is_sink_curr_from_ref_volt_cir_en);
+
+/*
+ * speaker indexed by left_right
+ */
+int pmic_spkr_en(enum spkr_left_right left_right, uint enable)
+{
+ return pmic_rpc_set_only(left_right, enable, 0, 0, 2, SPKR_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_en);
+
+int pmic_spkr_is_en(enum spkr_left_right left_right, uint *enabled)
+{
+ return pmic_rpc_set_get(left_right, enabled, sizeof(*enabled),
+ SPKR_IS_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_is_en);
+
+int pmic_spkr_set_gain(enum spkr_left_right left_right, enum spkr_gain gain)
+{
+ return pmic_rpc_set_only(left_right, gain, 0, 0, 2, SPKR_SET_GAIN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_set_gain);
+
+int pmic_spkr_get_gain(enum spkr_left_right left_right, enum spkr_gain *gain)
+{
+ return pmic_rpc_set_get(left_right, gain, sizeof(*gain),
+ SPKR_GET_GAIN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_get_gain);
+
+int pmic_spkr_set_delay(enum spkr_left_right left_right, enum spkr_dly delay)
+{
+ return pmic_rpc_set_only(left_right, delay, 0, 0, 2,
+ SPKR_SET_DELAY_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_set_delay);
+
+int pmic_spkr_get_delay(enum spkr_left_right left_right, enum spkr_dly *delay)
+{
+ return pmic_rpc_set_get(left_right, delay, sizeof(*delay),
+ SPKR_GET_DELAY_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_get_delay);
+
+int pmic_spkr_en_mute(enum spkr_left_right left_right, uint enabled)
+{
+ return pmic_rpc_set_only(left_right, enabled, 0, 0, 2,
+ SPKR_EN_MUTE_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_en_mute);
+
+int pmic_spkr_is_mute_en(enum spkr_left_right left_right, uint *enabled)
+{
+ return pmic_rpc_set_get(left_right, enabled, sizeof(*enabled),
+ SPKR_IS_MUTE_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_is_mute_en);
+
+/*
+ * mic
+ */
+int pmic_mic_en(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1, MIC_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_mic_en);
+
+int pmic_mic_is_en(uint *enabled)
+{
+ return pmic_rpc_get_only(enabled, sizeof(*enabled), MIC_IS_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_mic_is_en);
+
+int pmic_mic_set_volt(enum mic_volt vol)
+{
+ return pmic_rpc_set_only(vol, 0, 0, 0, 1, MIC_SET_VOLT_PROC);
+}
+EXPORT_SYMBOL(pmic_mic_set_volt);
+
+int pmic_mic_get_volt(enum mic_volt *voltage)
+{
+ return pmic_rpc_get_only(voltage, sizeof(*voltage), MIC_GET_VOLT_PROC);
+}
+EXPORT_SYMBOL(pmic_mic_get_volt);
+
+int pmic_vib_mot_set_volt(uint vol)
+{
+ return pmic_rpc_set_only(vol, 0, 0, 0, 1, VIB_MOT_SET_VOLT_PROC);
+}
+EXPORT_SYMBOL(pmic_vib_mot_set_volt);
+
+int pmic_vib_mot_set_mode(enum pm_vib_mot_mode mode)
+{
+ return pmic_rpc_set_only(mode, 0, 0, 0, 1, VIB_MOT_SET_MODE_PROC);
+}
+EXPORT_SYMBOL(pmic_vib_mot_set_mode);
+
+int pmic_vib_mot_set_polarity(enum pm_vib_mot_pol pol)
+{
+ return pmic_rpc_set_only(pol, 0, 0, 0, 1, VIB_MOT_SET_POLARITY_PROC);
+}
+EXPORT_SYMBOL(pmic_vib_mot_set_polarity);
+
+int pmic_vid_en(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1, VID_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_vid_en);
+
+int pmic_vid_is_en(uint *enabled)
+{
+ return pmic_rpc_get_only(enabled, sizeof(*enabled), VID_IS_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_vid_is_en);
+
+int pmic_vid_load_detect_en(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1, VID_LOAD_DETECT_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_vid_load_detect_en);
+
+int pmic_set_led_intensity(enum ledtype type, int level)
+{
+ return pmic_rpc_set_only(type, level, 0, 0, 2, SET_LED_INTENSITY_PROC);
+}
+EXPORT_SYMBOL(pmic_set_led_intensity);
+
+int pmic_flash_led_set_current(const uint16_t milliamps)
+{
+ return pmic_rpc_set_only(milliamps, 0, 0, 0, 1,
+ FLASH_LED_SET_CURRENT_PROC);
+}
+EXPORT_SYMBOL(pmic_flash_led_set_current);
+
+int pmic_flash_led_set_mode(enum flash_led_mode mode)
+{
+ return pmic_rpc_set_only((int)mode, 0, 0, 0, 1,
+ FLASH_LED_SET_MODE_PROC);
+}
+EXPORT_SYMBOL(pmic_flash_led_set_mode);
+
+int pmic_flash_led_set_polarity(enum flash_led_pol pol)
+{
+ return pmic_rpc_set_only((int)pol, 0, 0, 0, 1,
+ FLASH_LED_SET_POLARITY_PROC);
+}
+EXPORT_SYMBOL(pmic_flash_led_set_polarity);
+
+int pmic_spkr_add_right_left_chan(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1,
+ SPKR_ADD_RIGHT_LEFT_CHAN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_add_right_left_chan);
+
+int pmic_spkr_is_right_left_chan_added(uint *enabled)
+{
+ return pmic_rpc_get_only(enabled, sizeof(*enabled),
+ SPKR_IS_RIGHT_LEFT_CHAN_ADDED_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_is_right_left_chan_added);
+
+int pmic_spkr_en_stereo(uint enable)
+{
+ return pmic_rpc_set_only(enable, 0, 0, 0, 1, SPKR_EN_STEREO_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_en_stereo);
+
+int pmic_spkr_is_stereo_en(uint *enabled)
+{
+ return pmic_rpc_get_only(enabled, sizeof(*enabled),
+ SPKR_IS_STEREO_EN_PROC);
+}
+EXPORT_SYMBOL(pmic_spkr_is_stereo_en);
+
+int pmic_hsed_set_period(
+ enum hsed_controller controller,
+ enum hsed_period_pre_div period_pre_div,
+ enum hsed_period_time period_time
+)
+{
+ return pmic_rpc_set_only(controller, period_pre_div, period_time, 0,
+ 3,
+ HSED_SET_PERIOD_PROC);
+}
+EXPORT_SYMBOL(pmic_hsed_set_period);
+
+int pmic_hsed_set_hysteresis(
+ enum hsed_controller controller,
+ enum hsed_hyst_pre_div hyst_pre_div,
+ enum hsed_hyst_time hyst_time
+)
+{
+ return pmic_rpc_set_only(controller, hyst_pre_div, hyst_time, 0,
+ 3,
+ HSED_SET_HYSTERESIS_PROC);
+}
+EXPORT_SYMBOL(pmic_hsed_set_hysteresis);
+
+int pmic_hsed_set_current_threshold(
+ enum hsed_controller controller,
+ enum hsed_switch switch_hsed,
+ uint32_t current_threshold
+)
+{
+ return pmic_rpc_set_only(controller, switch_hsed, current_threshold, 0,
+ 3,
+ HSED_SET_CURRENT_THRESHOLD_PROC);
+}
+EXPORT_SYMBOL(pmic_hsed_set_current_threshold);
+
+int pmic_hsed_enable(
+ enum hsed_controller controller,
+ enum hsed_enable enable_hsed
+)
+{
+ return pmic_rpc_set_only(controller, enable_hsed, 0, 0,
+ 2,
+ HSED_ENABLE_PROC);
+}
+EXPORT_SYMBOL(pmic_hsed_enable);
diff --git a/arch/arm/mach-msm/pmic8058-gpio.c b/arch/arm/mach-msm/pmic8058-gpio.c
new file mode 100644
index 000000000000..09a465d028d9
--- /dev/null
+++ b/arch/arm/mach-msm/pmic8058-gpio.c
@@ -0,0 +1,150 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 GPIO driver
+ *
+ */
+
+#include <linux/gpio.h>
+#include <linux/mfd/pmic8058.h>
+#include "gpio_chip.h"
+
+#define PM8058_GPIO_TO_INT(n) (PMIC8058_IRQ_BASE + (n))
+
+static int pm8058_gpio_configure(struct gpio_chip *chip,
+ unsigned int gpio,
+ unsigned long flags)
+{
+ int rc = 0, direction;
+
+ gpio -= chip->start;
+
+ if (flags & (GPIOF_INPUT | GPIOF_DRIVE_OUTPUT)) {
+ direction = 0;
+ if (flags & GPIOF_INPUT)
+ direction |= PM_GPIO_DIR_IN;
+ if (flags & GPIOF_DRIVE_OUTPUT)
+ direction |= PM_GPIO_DIR_OUT;
+
+ if (flags & (GPIOF_OUTPUT_LOW | GPIOF_OUTPUT_HIGH)) {
+ if (flags & GPIOF_OUTPUT_HIGH)
+ rc = pm8058_gpio_set(gpio, 1);
+ else
+ rc = pm8058_gpio_set(gpio, 0);
+
+ if (rc) {
+ pr_err("%s: FAIL pm8058_gpio_set(): rc=%d.\n",
+ __func__, rc);
+ goto bail_out;
+ }
+ }
+
+ rc = pm8058_gpio_set_direction(gpio, direction);
+ if (rc)
+ pr_err("%s: FAIL pm8058_gpio_config(): rc=%d.\n",
+ __func__, rc);
+ }
+
+bail_out:
+ return rc;
+}
+
+static int pm8058_gpio_get_irq_num(struct gpio_chip *chip,
+ unsigned int gpio,
+ unsigned int *irqp,
+ unsigned long *irqnumflagsp)
+{
+ gpio -= chip->start;
+ *irqp = PM8058_GPIO_TO_INT(gpio);
+ if (irqnumflagsp)
+ *irqnumflagsp = 0;
+ return 0;
+}
+
+static int pm8058_gpio_read(struct gpio_chip *chip, unsigned n)
+{
+ n -= chip->start;
+ return pm8058_gpio_get(n);
+}
+
+static int pm8058_gpio_write(struct gpio_chip *chip, unsigned n, unsigned on)
+{
+ n -= chip->start;
+ return pm8058_gpio_set(n, on);
+}
+
+struct msm_gpio_chip pm8058_gpio_chip = {
+ .chip = {
+ .start = NR_GPIO_IRQS,
+ .end = NR_GPIO_IRQS + NR_PMIC8058_GPIO_IRQS - 1,
+ .configure = pm8058_gpio_configure,
+ .get_irq_num = pm8058_gpio_get_irq_num,
+ .read = pm8058_gpio_read,
+ .write = pm8058_gpio_write,
+ }
+};
+
+static int __init pm8058_gpio_init(void)
+{
+ int rc;
+
+ rc = register_gpio_chip(&pm8058_gpio_chip.chip);
+ pr_info("%s: register_gpio_chip(): rc=%d\n", __func__, rc);
+
+ return rc;
+}
+device_initcall(pm8058_gpio_init);
diff --git a/arch/arm/mach-msm/pmic8058-mpp.c b/arch/arm/mach-msm/pmic8058-mpp.c
new file mode 100644
index 000000000000..5c4f396c544b
--- /dev/null
+++ b/arch/arm/mach-msm/pmic8058-mpp.c
@@ -0,0 +1,105 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * Qualcomm PMIC8058 MPP driver
+ *
+ */
+
+#include <linux/gpio.h>
+#include <linux/mfd/pmic8058.h>
+#include "gpio_chip.h"
+
+#define PM8058_MPP_TO_INT(n) (PMIC8058_IRQ_BASE + NR_PMIC8058_GPIO_IRQS + (n))
+
+static int pm8058_mpp_get_irq_num(struct gpio_chip *chip,
+ unsigned int gpio,
+ unsigned int *irqp,
+ unsigned long *irqnumflagsp)
+{
+ gpio -= chip->start;
+ *irqp = PM8058_MPP_TO_INT(gpio);
+ if (irqnumflagsp)
+ *irqnumflagsp = 0;
+ return 0;
+}
+
+static int pm8058_mpp_read(struct gpio_chip *chip, unsigned n)
+{
+ n -= chip->start;
+ return pm8058_mpp_get(n);
+}
+
+struct msm_gpio_chip pm8058_mpp_chip = {
+ .chip = {
+ .start = NR_GPIO_IRQS + NR_PMIC8058_GPIO_IRQS,
+ .end = NR_GPIO_IRQS + NR_PMIC8058_GPIO_IRQS +
+ NR_PMIC8058_MPP_IRQS - 1,
+ .get_irq_num = pm8058_mpp_get_irq_num,
+ .read = pm8058_mpp_read,
+ }
+};
+
+static int __init pm8058_mpp_init(void)
+{
+ int rc;
+
+ rc = register_gpio_chip(&pm8058_mpp_chip.chip);
+ pr_info("%s: register_gpio_chip(): rc=%d\n", __func__, rc);
+
+ return rc;
+}
+device_initcall(pm8058_mpp_init);
diff --git a/arch/arm/mach-msm/pmic_debugfs.c b/arch/arm/mach-msm/pmic_debugfs.c
new file mode 100644
index 000000000000..3614c3bcde0e
--- /dev/null
+++ b/arch/arm/mach-msm/pmic_debugfs.c
@@ -0,0 +1,1200 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/uaccess.h>
+
+#include <mach/pmic.h>
+
+
+static int debug_lp_mode_control(char *buf, int size)
+{
+ enum switch_cmd cmd;
+ enum vreg_lp_id id;
+ int cnt;
+
+
+ cnt = sscanf(buf, "%u %u", &cmd, &id);
+ if (cnt < 2) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d", __func__, cnt);
+ return -EINVAL;
+ }
+
+ if (pmic_lp_mode_control(cmd, id) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_vreg_set_level(char *buf, int size)
+{
+ enum vreg_id vreg;
+ int level;
+ int cnt;
+
+ cnt = sscanf(buf, "%u %u", &vreg, &level);
+ if (cnt < 2) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d", __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_vreg_set_level(vreg, level) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_vreg_pull_down_switch(char *buf, int size)
+{
+ enum switch_cmd cmd;
+ enum vreg_pdown_id id;
+ int cnt;
+
+ cnt = sscanf(buf, "%u %u", &cmd, &id);
+ if (cnt < 2) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d", __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_vreg_pull_down_switch(cmd, id) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_secure_mpp_control_digital_output(char *buf, int size)
+{
+ enum mpp_which which;
+ enum mpp_dlogic_level level;
+ enum mpp_dlogic_out_ctrl out;
+ int cnt;
+
+ cnt = sscanf(buf, "%u %u %u", &which, &level, &out);
+ if (cnt < 3) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+
+ if (pmic_secure_mpp_control_digital_output(which, level, out) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_secure_mpp_config_i_sink(char *buf, int size)
+{
+ enum mpp_which which;
+ enum mpp_i_sink_level level;
+ enum mpp_i_sink_switch onoff;
+ int cnt;
+
+ cnt = sscanf(buf, "%u %u %u", &which, &level, &onoff);
+ if (cnt < 3) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+
+ if (pmic_secure_mpp_config_i_sink(which, level, onoff) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_secure_mpp_config_digital_input(char *buf, int size)
+{
+ enum mpp_which which;
+ enum mpp_dlogic_level level;
+ enum mpp_dlogic_in_dbus dbus;
+ int cnt;
+
+ cnt = sscanf(buf, "%u %u %u", &which, &level, &dbus);
+ if (cnt < 3) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_secure_mpp_config_digital_input(which, level, dbus) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_rtc_start(char *buf, int size)
+{
+ uint time;
+ struct rtc_time *hal;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &time);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ hal = (struct rtc_time *)&time;
+ if (pmic_rtc_start(hal) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_rtc_stop(char *buf, int size)
+{
+ if (pmic_rtc_stop() < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_rtc_get_time(char *buf, int size)
+{
+ uint time;
+ struct rtc_time *hal;
+
+ hal = (struct rtc_time *)&time;
+ if (pmic_rtc_get_time(hal) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", time);
+}
+
+static int debug_rtc_alarm_ndx;
+
+int debug_rtc_enable_alarm(char *buf, int size)
+{
+ enum rtc_alarm alarm;
+ struct rtc_time *hal;
+ uint time;
+ int cnt;
+
+
+ cnt = sscanf(buf, "%u %u", &alarm, &time);
+ if (cnt < 2) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ hal = (struct rtc_time *)&time;
+
+ if (pmic_rtc_enable_alarm(alarm, hal) < 0)
+ return -EFAULT;
+
+ debug_rtc_alarm_ndx = alarm;
+ return size;
+}
+
+static int debug_rtc_disable_alarm(char *buf, int size)
+{
+
+ enum rtc_alarm alarm;
+ int cnt;
+
+ cnt = sscanf(buf, "%u", &alarm);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_rtc_disable_alarm(alarm) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_rtc_get_alarm_time(char *buf, int size)
+{
+ uint time;
+ struct rtc_time *hal;
+
+ hal = (struct rtc_time *)&time;
+ if (pmic_rtc_get_alarm_time(debug_rtc_alarm_ndx, hal) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", time);
+}
+static int debug_rtc_get_alarm_status(char *buf, int size)
+{
+ int status;;
+
+ if (pmic_rtc_get_alarm_status(&status) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", status);
+
+}
+
+static int debug_rtc_set_time_adjust(char *buf, int size)
+{
+ uint adjust;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &adjust);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_rtc_set_time_adjust(adjust) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_rtc_get_time_adjust(char *buf, int size)
+{
+ int adjust;;
+
+ if (pmic_rtc_get_time_adjust(&adjust) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", adjust);
+}
+
+static int debug_set_led_intensity(char *buf, int size)
+{
+ enum ledtype type;
+ int level;
+ int cnt;
+
+ cnt = sscanf(buf, "%u %d", &type, &level);
+ if (cnt < 2) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_set_led_intensity(type, level) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_flash_led_set_current(char *buf, int size)
+{
+ int milliamps;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &milliamps);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_flash_led_set_current(milliamps) < 0)
+ return -EFAULT;
+
+ return size;
+}
+static int debug_flash_led_set_mode(char *buf, int size)
+{
+
+ uint mode;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &mode);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_flash_led_set_mode(mode) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_flash_led_set_polarity(char *buf, int size)
+{
+ int pol;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &pol);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_flash_led_set_polarity(pol) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_speaker_cmd(char *buf, int size)
+{
+ int cmd;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &cmd);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_speaker_cmd(cmd) < 0)
+ return -EFAULT;
+
+ return size;
+}
+static int debug_set_speaker_gain(char *buf, int size)
+{
+ int gain;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &gain);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_set_speaker_gain(gain) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_mic_en(char *buf, int size)
+{
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_mic_en(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_mic_is_en(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_mic_is_en(&enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+
+static int debug_mic_set_volt(char *buf, int size)
+{
+ int vol;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &vol);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_mic_set_volt(vol) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_mic_get_volt(char *buf, int size)
+{
+ uint vol;
+
+ if (pmic_mic_get_volt(&vol) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", vol);
+}
+
+static int debug_spkr_en_right_chan(char *buf, int size)
+{
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_en_right_chan(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_spkr_is_right_chan_en(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_spkr_is_right_chan_en(&enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+static int debug_spkr_en_left_chan(char *buf, int size)
+{
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_en_left_chan(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_spkr_is_left_chan_en(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_spkr_is_left_chan_en(&enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+
+static int debug_set_spkr_configuration(char *buf, int size)
+{
+
+ struct spkr_config_mode cfg;
+ int cnt;
+
+ cnt = sscanf(buf, "%d %d %d %d %d %d %d %d",
+ &cfg.is_right_chan_en,
+ &cfg.is_left_chan_en,
+ &cfg.is_right_left_chan_added,
+ &cfg.is_stereo_en,
+ &cfg.is_usb_with_hpf_20hz,
+ &cfg.is_mux_bypassed,
+ &cfg.is_hpf_en,
+ &cfg.is_sink_curr_from_ref_volt_cir_en);
+
+ if (cnt < 8) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+
+ if (pmic_set_spkr_configuration(&cfg) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_get_spkr_configuration(char *buf, int size)
+{
+ struct spkr_config_mode cfg;
+
+ if (pmic_get_spkr_configuration(&cfg) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d %d %d %d %d %d %d %d\n",
+ cfg.is_right_chan_en,
+ cfg.is_left_chan_en,
+ cfg.is_right_left_chan_added,
+ cfg.is_stereo_en,
+ cfg.is_usb_with_hpf_20hz,
+ cfg.is_mux_bypassed,
+ cfg.is_hpf_en,
+ cfg.is_sink_curr_from_ref_volt_cir_en);
+
+}
+
+static int debug_set_speaker_delay(char *buf, int size)
+{
+ int delay;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &delay);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_set_speaker_delay(delay) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_speaker_1k6_zin_enable(char *buf, int size)
+{
+ uint enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%u", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_speaker_1k6_zin_enable(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_spkr_set_mux_hpf_corner_freq(char *buf, int size)
+{
+ int freq;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &freq);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_set_mux_hpf_corner_freq(freq) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_spkr_get_mux_hpf_corner_freq(char *buf, int size)
+{
+ uint freq;
+
+ if (pmic_spkr_get_mux_hpf_corner_freq(&freq) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", freq);
+}
+
+static int debug_spkr_add_right_left_chan(char *buf, int size)
+{
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_add_right_left_chan(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_spkr_is_right_left_chan_added(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_spkr_is_right_left_chan_added(&enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+
+static int debug_spkr_en_stereo(char *buf, int size)
+{
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_en_stereo(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+static int debug_spkr_is_stereo_en(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_spkr_is_stereo_en(&enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+
+static int debug_spkr_select_usb_with_hpf_20hz(char *buf, int size)
+{
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_select_usb_with_hpf_20hz(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+static int debug_spkr_is_usb_with_hpf_20hz(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_spkr_is_usb_with_hpf_20hz(&enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+
+static int debug_spkr_bypass_mux(char *buf, int size)
+{
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_bypass_mux(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+static int debug_spkr_is_mux_bypassed(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_spkr_is_mux_bypassed(&enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+
+static int debug_spkr_en_hpf(char *buf, int size)
+{
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_en_hpf(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+static int debug_spkr_is_hpf_en(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_spkr_is_hpf_en(&enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+
+static int debug_spkr_en_sink_curr_from_ref_volt_cir(char *buf, int size)
+{
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_en_sink_curr_from_ref_volt_cir(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_spkr_is_sink_curr_from_ref_volt_cir_en(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_spkr_is_sink_curr_from_ref_volt_cir_en(&enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+
+static int debug_vib_mot_set_volt(char *buf, int size)
+{
+ int vol;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &vol);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_vib_mot_set_volt(vol) < 0)
+ return -EFAULT;
+
+ return size;
+}
+static int debug_vib_mot_set_mode(char *buf, int size)
+{
+ int mode;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &mode);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_vib_mot_set_mode(mode) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+static int debug_vib_mot_set_polarity(char *buf, int size)
+{
+ int pol;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &pol);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_vib_mot_set_polarity(pol) < 0)
+ return -EFAULT;
+
+ return size;
+}
+static int debug_vid_en(char *buf, int size)
+{
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_vid_en(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+static int debug_vid_is_en(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_vid_is_en(&enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+
+static int debug_vid_load_detect_en(char *buf, int size)
+{
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d", &enable);
+ if (cnt < 1) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_vid_load_detect_en(enable) < 0)
+ return -EFAULT;
+
+ return size;
+}
+
+/**************************************************
+ * speaker indexed by left_right
+**************************************************/
+static enum spkr_left_right debug_spkr_left_right = LEFT_SPKR;
+
+static int debug_spkr_en(char *buf, int size)
+{
+ int left_right;
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d %d", &left_right, &enable);
+ if (cnt < 2) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_en(left_right, enable) >= 0) {
+ debug_spkr_left_right = left_right;
+ return size;
+ }
+ return -EFAULT;
+}
+
+static int debug_spkr_is_en(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_spkr_is_en(debug_spkr_left_right, &enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+
+static int debug_spkr_set_gain(char *buf, int size)
+{
+ int left_right;
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d %d", &left_right, &enable);
+ if (cnt < 2) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_set_gain(left_right, enable) >= 0) {
+ debug_spkr_left_right = left_right;
+ return size;
+ }
+ return -EFAULT;
+}
+
+static int debug_spkr_get_gain(char *buf, int size)
+{
+ uint gain;
+
+ if (pmic_spkr_get_gain(debug_spkr_left_right, &gain) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", gain);
+}
+static int debug_spkr_set_delay(char *buf, int size)
+{
+ int left_right;
+ int delay;
+ int cnt;
+
+ cnt = sscanf(buf, "%d %d", &left_right, &delay);
+ if (cnt < 2) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_set_delay(left_right, delay) >= 0) {
+ debug_spkr_left_right = left_right;
+ return size;
+ }
+ return -EFAULT;
+}
+
+static int debug_spkr_get_delay(char *buf, int size)
+{
+ uint delay;
+
+ if (pmic_spkr_get_delay(debug_spkr_left_right, &delay) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", delay);
+}
+
+static int debug_spkr_en_mute(char *buf, int size)
+{
+ int left_right;
+ int enable;
+ int cnt;
+
+ cnt = sscanf(buf, "%d %d", &left_right, &enable);
+ if (cnt < 2) {
+ printk(KERN_ERR "%s: sscanf failed cnt=%d" , __func__, cnt);
+ return -EINVAL;
+ }
+ if (pmic_spkr_en_mute(left_right, enable) >= 0) {
+ debug_spkr_left_right = left_right;
+ return size;
+ }
+ return -EFAULT;
+}
+
+static int debug_spkr_is_mute_en(char *buf, int size)
+{
+ int enabled;
+
+ if (pmic_spkr_is_mute_en(debug_spkr_left_right, &enabled) < 0)
+ return -EFAULT;
+
+ return snprintf(buf, size, "%d\n", enabled);
+}
+
+/*******************************************************************
+ * debug function table
+*******************************************************************/
+
+struct pmic_debug_desc {
+ int (*get) (char *, int);
+ int (*set) (char *, int);
+};
+
+struct pmic_debug_desc pmic_debug[] = {
+ {NULL, NULL}, /*LIB_NULL_PROC */
+ {NULL, NULL}, /* LIB_RPC_GLUE_CODE_INFO_REMOTE_PROC */
+ {NULL, debug_lp_mode_control}, /* LP_MODE_CONTROL_PROC */
+ {NULL, debug_vreg_set_level}, /*VREG_SET_LEVEL_PROC */
+ {NULL, debug_vreg_pull_down_switch}, /*VREG_PULL_DOWN_SWITCH_PROC */
+ {NULL, debug_secure_mpp_control_digital_output},
+ /* SECURE_MPP_CONFIG_DIGITAL_OUTPUT_PROC */
+ /*SECURE_MPP_CONFIG_I_SINK_PROC */
+ {NULL, debug_secure_mpp_config_i_sink},
+ {NULL, debug_rtc_start}, /*RTC_START_PROC */
+ {NULL, debug_rtc_stop}, /* RTC_STOP_PROC */
+ {debug_rtc_get_time, NULL}, /* RTC_GET_TIME_PROC */
+ {NULL, debug_rtc_enable_alarm}, /* RTC_ENABLE_ALARM_PROC */
+ {NULL , debug_rtc_disable_alarm}, /*RTC_DISABLE_ALARM_PROC */
+ {debug_rtc_get_alarm_time, NULL}, /* RTC_GET_ALARM_TIME_PROC */
+ {debug_rtc_get_alarm_status, NULL}, /* RTC_GET_ALARM_STATUS_PROC */
+ {NULL, debug_rtc_set_time_adjust}, /* RTC_SET_TIME_ADJUST_PROC */
+ {debug_rtc_get_time_adjust, NULL}, /* RTC_GET_TIME_ADJUST_PROC */
+ {NULL, debug_set_led_intensity}, /* SET_LED_INTENSITY_PROC */
+ {NULL, debug_flash_led_set_current}, /* FLASH_LED_SET_CURRENT_PROC */
+ {NULL, debug_flash_led_set_mode}, /* FLASH_LED_SET_MODE_PROC */
+ {NULL, debug_flash_led_set_polarity}, /* FLASH_LED_SET_POLARITY_PROC */
+ {NULL, debug_speaker_cmd}, /* SPEAKER_CMD_PROC */
+ {NULL, debug_set_speaker_gain}, /* SET_SPEAKER_GAIN_PROC */
+ {NULL, debug_vib_mot_set_volt}, /* VIB_MOT_SET_VOLT_PROC */
+ {NULL, debug_vib_mot_set_mode}, /* VIB_MOT_SET_MODE_PROC */
+ {NULL, debug_vib_mot_set_polarity}, /* VIB_MOT_SET_POLARITY_PROC */
+ {NULL, debug_vid_en}, /* VID_EN_PROC */
+ {debug_vid_is_en, NULL}, /* VID_IS_EN_PROC */
+ {NULL, debug_vid_load_detect_en}, /* VID_LOAD_DETECT_EN_PROC */
+ {NULL, debug_mic_en}, /* MIC_EN_PROC */
+ {debug_mic_is_en, NULL}, /* MIC_IS_EN_PROC */
+ {NULL, debug_mic_set_volt}, /* MIC_SET_VOLT_PROC */
+ {debug_mic_get_volt, NULL}, /* MIC_GET_VOLT_PROC */
+ {NULL, debug_spkr_en_right_chan}, /* SPKR_EN_RIGHT_CHAN_PROC */
+ {debug_spkr_is_right_chan_en, NULL}, /* SPKR_IS_RIGHT_CHAN_EN_PROC */
+ {NULL, debug_spkr_en_left_chan}, /* SPKR_EN_LEFT_CHAN_PROC */
+ {debug_spkr_is_left_chan_en, NULL}, /* SPKR_IS_LEFT_CHAN_EN_PROC */
+ {NULL, debug_set_spkr_configuration}, /* SET_SPKR_CONFIGURATION_PROC */
+ {debug_get_spkr_configuration, NULL}, /* GET_SPKR_CONFIGURATION_PROC */
+ {debug_spkr_get_gain, NULL}, /* SPKR_GET_GAIN_PROC */
+ {debug_spkr_is_en, NULL}, /* SPKR_IS_EN_PROC */
+ {NULL, debug_spkr_en_mute}, /* SPKR_EN_MUTE_PROC */
+ {debug_spkr_is_mute_en, NULL}, /* SPKR_IS_MUTE_EN_PROC */
+ {NULL, debug_spkr_set_delay}, /* SPKR_SET_DELAY_PROC */
+ {debug_spkr_get_delay, NULL}, /* SPKR_GET_DELAY_PROC */
+ /* SECURE_MPP_CONFIG_DIGITAL_INPUT_PROC */
+ {NULL, debug_secure_mpp_config_digital_input},
+ {NULL, debug_set_speaker_delay}, /* SET_SPEAKER_DELAY_PROC */
+ {NULL, debug_speaker_1k6_zin_enable}, /* SPEAKER_1K6_ZIN_ENABLE_PROC */
+ /* SPKR_SET_MUX_HPF_CORNER_FREQ_PROC */
+ {NULL, debug_spkr_set_mux_hpf_corner_freq},
+ /* SPKR_GET_MUX_HPF_CORNER_FREQ_PROC */
+ {debug_spkr_get_mux_hpf_corner_freq, NULL},
+ /* SPKR_IS_RIGHT_LEFT_CHAN_ADDED_PROC */
+ {debug_spkr_is_right_left_chan_added, NULL},
+ {NULL, debug_spkr_en_stereo}, /* SPKR_EN_STEREO_PROC */
+ {debug_spkr_is_stereo_en, NULL}, /* SPKR_IS_STEREO_EN_PROC */
+ /* SPKR_SELECT_USB_WITH_HPF_20HZ_PROC */
+ {NULL, debug_spkr_select_usb_with_hpf_20hz},
+ /* SPKR_IS_USB_WITH_HPF_20HZ_PROC */
+ {debug_spkr_is_usb_with_hpf_20hz, NULL},
+ {NULL, debug_spkr_bypass_mux}, /* SPKR_BYPASS_MUX_PROC */
+ {debug_spkr_is_mux_bypassed, NULL}, /* SPKR_IS_MUX_BYPASSED_PROC */
+ {NULL, debug_spkr_en_hpf}, /* SPKR_EN_HPF_PROC */
+ { debug_spkr_is_hpf_en, NULL}, /* SPKR_IS_HPF_EN_PROC */
+ /* SPKR_EN_SINK_CURR_FROM_REF_VOLT_CIR_PROC */
+ {NULL, debug_spkr_en_sink_curr_from_ref_volt_cir},
+ /* SPKR_IS_SINK_CURR_FROM_REF_VOLT_CIR_EN_PROC */
+ {debug_spkr_is_sink_curr_from_ref_volt_cir_en, NULL},
+ /* SPKR_ADD_RIGHT_LEFT_CHAN_PROC */
+ {NULL, debug_spkr_add_right_left_chan},
+ {NULL, debug_spkr_set_gain}, /* SPKR_SET_GAIN_PROC */
+ {NULL , debug_spkr_en}, /* SPKR_EN_PROC */
+};
+
+/***********************************************************************/
+
+#define PROC_END (sizeof(pmic_debug)/sizeof(struct pmic_debug_desc))
+
+
+#define PMIC_DEBUG_BUF 512
+
+static int debug_proc; /* PROC's index */
+
+static char debug_buf[PMIC_DEBUG_BUF];
+
+static int proc_index_set(void *data, u64 val)
+{
+ int ndx;
+
+ ndx = (int)val;
+
+ if (ndx >= 0 && ndx <= PROC_END)
+ debug_proc = ndx;
+
+ return 0;
+}
+
+static int proc_index_get(void *data, u64 *val)
+{
+ *val = (u64)debug_proc;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(
+ proc_index_fops,
+ proc_index_get,
+ proc_index_set,
+ "%llu\n");
+
+
+static int pmic_debugfs_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ return 0;
+}
+
+static int pmic_debugfs_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static ssize_t pmic_debugfs_write(
+ struct file *file,
+ const char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct pmic_debug_desc *pd;
+ int len = 0;
+
+ printk(KERN_INFO "%s: proc=%d count=%d *ppos=%d\n",
+ __func__, debug_proc, count, (uint)*ppos);
+
+ if (count > sizeof(debug_buf))
+ return -EFAULT;
+
+ if (copy_from_user(debug_buf, buff, count))
+ return -EFAULT;
+
+
+ debug_buf[count] = 0; /* end of string */
+
+ pd = &pmic_debug[debug_proc];
+
+ if (pd->set) {
+ len = pd->set(debug_buf, count);
+ printk(KERN_INFO "%s: len=%d\n", __func__, len);
+ return len;
+ }
+
+ return 0;
+}
+
+static ssize_t pmic_debugfs_read(
+ struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct pmic_debug_desc *pd;
+ int len = 0;
+
+ printk(KERN_INFO "%s: proc=%d count=%d *ppos=%d\n",
+ __func__, debug_proc, count, (uint)*ppos);
+
+ pd = &pmic_debug[debug_proc];
+
+ if (*ppos)
+ return 0; /* the end */
+
+ if (pd->get) {
+ len = pd->get(debug_buf, sizeof(debug_buf));
+ if (len > 0) {
+ if (len > count)
+ len = count;
+ if (copy_to_user(buff, debug_buf, len))
+ return -EFAULT;
+ }
+ }
+
+ printk(KERN_INFO "%s: len=%d\n", __func__, len);
+
+ if (len < 0)
+ return 0;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static const struct file_operations pmic_debugfs_fops = {
+ .open = pmic_debugfs_open,
+ .release = pmic_debugfs_release,
+ .read = pmic_debugfs_read,
+ .write = pmic_debugfs_write,
+};
+
+static int __init pmic_debugfs_init(void)
+{
+ struct dentry *dent = debugfs_create_dir("pmic", NULL);
+
+ if (IS_ERR(dent)) {
+ printk(KERN_ERR "%s(%d): debugfs_create_dir fail, error %ld\n",
+ __FILE__, __LINE__, PTR_ERR(dent));
+ return -1;
+ }
+
+ if (debugfs_create_file("index", 0644, dent, 0, &proc_index_fops)
+ == NULL) {
+ printk(KERN_ERR "%s(%d): debugfs_create_file: index fail\n",
+ __FILE__, __LINE__);
+ return -1;
+ }
+
+ if (debugfs_create_file("debug", 0644, dent, 0, &pmic_debugfs_fops)
+ == NULL) {
+ printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
+ __FILE__, __LINE__);
+ return -1;
+ }
+
+ debug_proc = 0;
+ debug_rtc_alarm_ndx = 0;
+
+ return 0;
+}
+
+late_initcall(pmic_debugfs_init);
diff --git a/arch/arm/mach-msm/proc_comm.c b/arch/arm/mach-msm/proc_comm.c
index 915ee704ed3c..937e59b46dfe 100644
--- a/arch/arm/mach-msm/proc_comm.c
+++ b/arch/arm/mach-msm/proc_comm.c
@@ -1,6 +1,7 @@
/* arch/arm/mach-msm/proc_comm.c
*
* Copyright (C) 2007-2008 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -18,16 +19,21 @@
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/spinlock.h>
+#include <linux/module.h>
#include <mach/msm_iomap.h>
#include <mach/system.h>
#include "proc_comm.h"
-#define MSM_A2M_INT(n) (MSM_CSR_BASE + 0x400 + (n) * 4)
+#if defined(CONFIG_ARCH_MSM7X30)
+#define MSM_TRIG_A2M_INT(n) (writel(1 << n, MSM_GCC_BASE + 0x8))
+#else
+#define MSM_TRIG_A2M_INT(n) (writel(1, MSM_CSR_BASE + 0x400 + (n) * 4))
+#endif
static inline void notify_other_proc_comm(void)
{
- writel(1, MSM_A2M_INT(6));
+ MSM_TRIG_A2M_INT(6);
}
#define APP_COMMAND 0x00
@@ -43,68 +49,89 @@ static inline void notify_other_proc_comm(void)
static DEFINE_SPINLOCK(proc_comm_lock);
/* The higher level SMD support will install this to
- * provide a way to check for and handle modem restart.
+ * provide a way to check for and handle modem restart?
*/
int (*msm_check_for_modem_crash)(void);
/* Poll for a state change, checking for possible
* modem crashes along the way (so we don't wait
- * forever while the ARM9 is blowing up).
+ * forever while the ARM9 is blowing up.
*
* Return an error in the event of a modem crash and
* restart so the msm_proc_comm() routine can restart
* the operation from the beginning.
*/
-static int proc_comm_wait_for(void __iomem *addr, unsigned value)
+static int proc_comm_wait_for(unsigned addr, unsigned value)
{
- for (;;) {
+ while (1) {
if (readl(addr) == value)
return 0;
if (msm_check_for_modem_crash)
if (msm_check_for_modem_crash())
return -EAGAIN;
+
+ udelay(5);
}
}
+void msm_proc_comm_reset_modem_now(void)
+{
+ unsigned base = (unsigned)MSM_SHARED_RAM_BASE;
+ unsigned long flags;
+
+ spin_lock_irqsave(&proc_comm_lock, flags);
+
+again:
+ if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY))
+ goto again;
+
+ writel(PCOM_RESET_MODEM, base + APP_COMMAND);
+ writel(0, base + APP_DATA1);
+ writel(0, base + APP_DATA2);
+
+ spin_unlock_irqrestore(&proc_comm_lock, flags);
+
+ notify_other_proc_comm();
+
+ return;
+}
+EXPORT_SYMBOL(msm_proc_comm_reset_modem_now);
+
int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2)
{
- void __iomem *base = MSM_SHARED_RAM_BASE;
+ unsigned base = (unsigned)MSM_SHARED_RAM_BASE;
unsigned long flags;
int ret;
spin_lock_irqsave(&proc_comm_lock, flags);
- for (;;) {
- if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY))
- continue;
-
- writel(cmd, base + APP_COMMAND);
- writel(data1 ? *data1 : 0, base + APP_DATA1);
- writel(data2 ? *data2 : 0, base + APP_DATA2);
-
- notify_other_proc_comm();
-
- if (proc_comm_wait_for(base + APP_COMMAND, PCOM_CMD_DONE))
- continue;
-
- if (readl(base + APP_STATUS) != PCOM_CMD_FAIL) {
- if (data1)
- *data1 = readl(base + APP_DATA1);
- if (data2)
- *data2 = readl(base + APP_DATA2);
- ret = 0;
- } else {
- ret = -EIO;
- }
- break;
+again:
+ if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY))
+ goto again;
+
+ writel(cmd, base + APP_COMMAND);
+ writel(data1 ? *data1 : 0, base + APP_DATA1);
+ writel(data2 ? *data2 : 0, base + APP_DATA2);
+
+ notify_other_proc_comm();
+
+ if (proc_comm_wait_for(base + APP_COMMAND, PCOM_CMD_DONE))
+ goto again;
+
+ if (readl(base + APP_STATUS) == PCOM_CMD_SUCCESS) {
+ if (data1)
+ *data1 = readl(base + APP_DATA1);
+ if (data2)
+ *data2 = readl(base + APP_DATA2);
+ ret = 0;
+ } else {
+ ret = -EIO;
}
writel(PCOM_CMD_IDLE, base + APP_COMMAND);
spin_unlock_irqrestore(&proc_comm_lock, flags);
-
return ret;
}
-
-
+EXPORT_SYMBOL(msm_proc_comm);
diff --git a/arch/arm/mach-msm/proc_comm.h b/arch/arm/mach-msm/proc_comm.h
index 834760f25692..5e08f050d07a 100644
--- a/arch/arm/mach-msm/proc_comm.h
+++ b/arch/arm/mach-msm/proc_comm.h
@@ -1,6 +1,6 @@
/* arch/arm/mach-msm/proc_comm.h
*
- * Copyright (c) 2007 QUALCOMM Incorporated
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -13,8 +13,8 @@
*
*/
-#ifndef _ARCH_ARM_MACH_MSM_PROC_COMM_H_
-#define _ARCH_ARM_MACH_MSM_PROC_COMM_H_
+#ifndef _ARCH_ARM_MACH_MSM_MSM_PROC_COMM_H_
+#define _ARCH_ARM_MACH_MSM_MSM_PROC_COMM_H_
enum {
PCOM_CMD_IDLE = 0x0,
@@ -62,104 +62,110 @@ enum {
PCOM_RESET_CHIP_IMM,
PCOM_PM_VID_EN,
PCOM_VREG_PULLDOWN,
- PCOM_NUM_CMDS,
+ PCOM_GET_MODEM_VERSION,
+ PCOM_CLK_REGIME_SEC_RESET,
+ PCOM_CLK_REGIME_SEC_RESET_ASSERT,
+ PCOM_CLK_REGIME_SEC_RESET_DEASSERT,
+ PCOM_CLK_REGIME_SEC_PLL_REQUEST_WRP,
+ PCOM_CLK_REGIME_SEC_ENABLE,
+ PCOM_CLK_REGIME_SEC_DISABLE,
+ PCOM_CLK_REGIME_SEC_IS_ON,
+ PCOM_CLK_REGIME_SEC_SEL_CLK_INV,
+ PCOM_CLK_REGIME_SEC_SEL_CLK_SRC,
+ PCOM_CLK_REGIME_SEC_SEL_CLK_DIV,
+ PCOM_CLK_REGIME_SEC_ICODEC_CLK_ENABLE,
+ PCOM_CLK_REGIME_SEC_ICODEC_CLK_DISABLE,
+ PCOM_CLK_REGIME_SEC_SEL_SPEED,
+ PCOM_CLK_REGIME_SEC_CONFIG_GP_CLK_WRP,
+ PCOM_CLK_REGIME_SEC_CONFIG_MDH_CLK_WRP,
+ PCOM_CLK_REGIME_SEC_USB_XTAL_ON,
+ PCOM_CLK_REGIME_SEC_USB_XTAL_OFF,
+ PCOM_CLK_REGIME_SEC_SET_QDSP_DME_MODE,
+ PCOM_CLK_REGIME_SEC_SWITCH_ADSP_CLK,
+ PCOM_CLK_REGIME_SEC_GET_MAX_ADSP_CLK_KHZ,
+ PCOM_CLK_REGIME_SEC_GET_I2C_CLK_KHZ,
+ PCOM_CLK_REGIME_SEC_MSM_GET_CLK_FREQ_KHZ,
+ PCOM_CLK_REGIME_SEC_SEL_VFE_SRC,
+ PCOM_CLK_REGIME_SEC_MSM_SEL_CAMCLK,
+ PCOM_CLK_REGIME_SEC_MSM_SEL_LCDCLK,
+ PCOM_CLK_REGIME_SEC_VFE_RAIL_OFF,
+ PCOM_CLK_REGIME_SEC_VFE_RAIL_ON,
+ PCOM_CLK_REGIME_SEC_GRP_RAIL_OFF,
+ PCOM_CLK_REGIME_SEC_GRP_RAIL_ON,
+ PCOM_CLK_REGIME_SEC_VDC_RAIL_OFF,
+ PCOM_CLK_REGIME_SEC_VDC_RAIL_ON,
+ PCOM_CLK_REGIME_SEC_LCD_CTRL,
+ PCOM_CLK_REGIME_SEC_REGISTER_FOR_CPU_RESOURCE,
+ PCOM_CLK_REGIME_SEC_DEREGISTER_FOR_CPU_RESOURCE,
+ PCOM_CLK_REGIME_SEC_RESOURCE_REQUEST_WRP,
+ PCOM_CLK_REGIME_MSM_SEC_SEL_CLK_OWNER,
+ PCOM_CLK_REGIME_SEC_DEVMAN_REQUEST_WRP,
+ PCOM_GPIO_CONFIG,
+ PCOM_GPIO_CONFIGURE_GROUP,
+ PCOM_GPIO_TLMM_SET_PORT,
+ PCOM_GPIO_TLMM_CONFIG_EX,
+ PCOM_SET_FTM_BOOT_COUNT,
+ PCOM_RESERVED0,
+ PCOM_RESERVED1,
+ PCOM_CUSTOMER_CMD1,
+ PCOM_CUSTOMER_CMD2,
+ PCOM_CUSTOMER_CMD3,
+ PCOM_CLK_REGIME_ENTER_APPSBL_CHG_MODE,
+ PCOM_CLK_REGIME_EXIT_APPSBL_CHG_MODE,
+ PCOM_CLK_REGIME_SEC_RAIL_DISABLE,
+ PCOM_CLK_REGIME_SEC_RAIL_ENABLE,
+ PCOM_CLK_REGIME_SEC_RAIL_CONTROL,
+ PCOM_SET_SW_WATCHDOG_STATE,
+ PCOM_PM_MPP_CONFIG_DIGITAL_INPUT,
+ PCOM_PM_MPP_CONFIG_I_SINK,
+ PCOM_RESERVED_101,
+ PCOM_MSM_HSUSB_PHY_RESET,
+ PCOM_GET_BATT_MV_LEVEL,
+ PCOM_CHG_USB_IS_PC_CONNECTED,
+ PCOM_CHG_USB_IS_CHARGER_CONNECTED,
+ PCOM_CHG_USB_IS_DISCONNECTED,
+ PCOM_CHG_USB_IS_AVAILABLE,
+ PCOM_CLK_REGIME_SEC_MSM_SEL_FREQ,
+ PCOM_CLK_REGIME_SEC_SET_PCLK_AXI_POLICY,
+ PCOM_CLKCTL_RPC_RESET_ASSERT,
+ PCOM_CLKCTL_RPC_RESET_DEASSERT,
+ PCOM_CLKCTL_RPC_RAIL_ON,
+ PCOM_CLKCTL_RPC_RAIL_OFF,
+ PCOM_CLKCTL_RPC_RAIL_ENABLE,
+ PCOM_CLKCTL_RPC_RAIL_DISABLE,
+ PCOM_CLKCTL_RPC_RAIL_CONTROL,
+ PCOM_CLKCTL_RPC_MIN_MSMC1,
};
enum {
- PCOM_INVALID_STATUS = 0x0,
- PCOM_READY,
- PCOM_CMD_RUNNING,
- PCOM_CMD_SUCCESS,
- PCOM_CMD_FAIL,
-};
-
-/* List of VREGs that support the Pull Down Resistor setting. */
-enum {
- PM_VREG_PDOWN_MSMA_ID,
- PM_VREG_PDOWN_MSMP_ID,
- PM_VREG_PDOWN_MSME1_ID, /* Not supported in Panoramix */
- PM_VREG_PDOWN_MSMC1_ID, /* Not supported in PM6620 */
- PM_VREG_PDOWN_MSMC2_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_GP3_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_MSME2_ID, /* Supported in PM7500 and Panoramix only */
- PM_VREG_PDOWN_GP4_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_GP1_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_TCXO_ID,
- PM_VREG_PDOWN_PA_ID,
- PM_VREG_PDOWN_RFTX_ID,
- PM_VREG_PDOWN_RFRX1_ID,
- PM_VREG_PDOWN_RFRX2_ID,
- PM_VREG_PDOWN_SYNT_ID,
- PM_VREG_PDOWN_WLAN_ID,
- PM_VREG_PDOWN_USB_ID,
- PM_VREG_PDOWN_MMC_ID,
- PM_VREG_PDOWN_RUIM_ID,
- PM_VREG_PDOWN_MSMC0_ID, /* Supported in PM6610 only */
- PM_VREG_PDOWN_GP2_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_GP5_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_GP6_ID, /* Supported in PM7500 only */
- PM_VREG_PDOWN_RF_ID,
- PM_VREG_PDOWN_RF_VCO_ID,
- PM_VREG_PDOWN_MPLL_ID,
- PM_VREG_PDOWN_S2_ID,
- PM_VREG_PDOWN_S3_ID,
- PM_VREG_PDOWN_RFUBM_ID,
+ PCOM_OEM_FIRST_CMD = 0x10000000,
+ PCOM_OEM_TEST_CMD = PCOM_OEM_FIRST_CMD,
- /* new for HAN */
- PM_VREG_PDOWN_RF1_ID,
- PM_VREG_PDOWN_RF2_ID,
- PM_VREG_PDOWN_RFA_ID,
- PM_VREG_PDOWN_CDC2_ID,
- PM_VREG_PDOWN_RFTX2_ID,
- PM_VREG_PDOWN_USIM_ID,
- PM_VREG_PDOWN_USB2P6_ID,
- PM_VREG_PDOWN_USB3P3_ID,
- PM_VREG_PDOWN_INVALID_ID,
+ /* add OEM PROC COMM commands here */
- /* backward compatible enums only */
- PM_VREG_PDOWN_CAM_ID = PM_VREG_PDOWN_GP1_ID,
- PM_VREG_PDOWN_MDDI_ID = PM_VREG_PDOWN_GP2_ID,
- PM_VREG_PDOWN_RUIM2_ID = PM_VREG_PDOWN_GP3_ID,
- PM_VREG_PDOWN_AUX_ID = PM_VREG_PDOWN_GP4_ID,
- PM_VREG_PDOWN_AUX2_ID = PM_VREG_PDOWN_GP5_ID,
- PM_VREG_PDOWN_BT_ID = PM_VREG_PDOWN_GP6_ID,
-
- PM_VREG_PDOWN_MSME_ID = PM_VREG_PDOWN_MSME1_ID,
- PM_VREG_PDOWN_MSMC_ID = PM_VREG_PDOWN_MSMC1_ID,
- PM_VREG_PDOWN_RFA1_ID = PM_VREG_PDOWN_RFRX2_ID,
- PM_VREG_PDOWN_RFA2_ID = PM_VREG_PDOWN_RFTX2_ID,
- PM_VREG_PDOWN_XO_ID = PM_VREG_PDOWN_TCXO_ID
+ PCOM_OEM_LAST = PCOM_OEM_TEST_CMD,
};
-/* gpio info for PCOM_RPC_GPIO_TLMM_CONFIG_EX */
-
-#define GPIO_ENABLE 0
-#define GPIO_DISABLE 1
-
-#define GPIO_INPUT 0
-#define GPIO_OUTPUT 1
-
-#define GPIO_NO_PULL 0
-#define GPIO_PULL_DOWN 1
-#define GPIO_KEEPER 2
-#define GPIO_PULL_UP 3
-
-#define GPIO_2MA 0
-#define GPIO_4MA 1
-#define GPIO_6MA 2
-#define GPIO_8MA 3
-#define GPIO_10MA 4
-#define GPIO_12MA 5
-#define GPIO_14MA 6
-#define GPIO_16MA 7
-
-#define PCOM_GPIO_CFG(gpio, func, dir, pull, drvstr) \
- ((((gpio) & 0x3FF) << 4) | \
- ((func) & 0xf) | \
- (((dir) & 0x1) << 14) | \
- (((pull) & 0x3) << 15) | \
- (((drvstr) & 0xF) << 17))
+enum {
+ PCOM_INVALID_STATUS = 0x0,
+ PCOM_READY,
+ PCOM_CMD_RUNNING,
+ PCOM_CMD_SUCCESS,
+ PCOM_CMD_FAIL,
+ PCOM_CMD_FAIL_FALSE_RETURNED,
+ PCOM_CMD_FAIL_CMD_OUT_OF_BOUNDS_SERVER,
+ PCOM_CMD_FAIL_CMD_OUT_OF_BOUNDS_CLIENT,
+ PCOM_CMD_FAIL_CMD_UNREGISTERED,
+ PCOM_CMD_FAIL_CMD_LOCKED,
+ PCOM_CMD_FAIL_SERVER_NOT_YET_READY,
+ PCOM_CMD_FAIL_BAD_DESTINATION,
+ PCOM_CMD_FAIL_SERVER_RESET,
+ PCOM_CMD_FAIL_SMSM_NOT_INIT,
+ PCOM_CMD_FAIL_PROC_COMM_BUSY,
+ PCOM_CMD_FAIL_PROC_COMM_NOT_INIT,
+};
+void msm_proc_comm_reset_modem_now(void);
int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2);
#endif
diff --git a/arch/arm/mach-msm/proc_comm_test.c b/arch/arm/mach-msm/proc_comm_test.c
new file mode 100644
index 000000000000..e38b8d651d27
--- /dev/null
+++ b/arch/arm/mach-msm/proc_comm_test.c
@@ -0,0 +1,169 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * PROC COMM TEST Driver source file
+ */
+
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include "proc_comm.h"
+
+static struct dentry *dent;
+static int proc_comm_test_res;
+
+static int proc_comm_reverse_test(void)
+{
+ uint32_t data1, data2;
+ int rc;
+
+ data1 = 10;
+ data2 = 20;
+
+ rc = msm_proc_comm(PCOM_OEM_TEST_CMD, &data1, &data2);
+ if (rc)
+ return rc;
+
+ if ((data1 != 20) || (data2 != 10))
+ return -1;
+
+ return 0;
+}
+
+static ssize_t debug_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ char _buf[16];
+
+ snprintf(_buf, sizeof(_buf), "%i\n", proc_comm_test_res);
+
+ return simple_read_from_buffer(buf, count, pos, _buf, strlen(_buf));
+}
+
+static ssize_t debug_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+
+ unsigned char cmd[64];
+ int len;
+
+ if (count < 1)
+ return 0;
+
+ len = count > 63 ? 63 : count;
+
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+
+ cmd[len] = 0;
+
+ if (cmd[len-1] == '\n') {
+ cmd[len-1] = 0;
+ len--;
+ }
+
+ if (!strncmp(cmd, "reverse_test", 64))
+ proc_comm_test_res = proc_comm_reverse_test();
+ else
+ proc_comm_test_res = -EINVAL;
+
+ if (proc_comm_test_res)
+ pr_err("proc comm test fail %d\n",
+ proc_comm_test_res);
+ else
+ pr_info("proc comm test passed\n");
+
+ return count;
+}
+
+static int debug_release(struct inode *ip, struct file *fp)
+{
+ return 0;
+}
+
+static int debug_open(struct inode *ip, struct file *fp)
+{
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .owner = THIS_MODULE,
+ .open = debug_open,
+ .release = debug_release,
+ .read = debug_read,
+ .write = debug_write,
+};
+
+static void __exit proc_comm_test_mod_exit(void)
+{
+ debugfs_remove(dent);
+}
+
+static int __init proc_comm_test_mod_init(void)
+{
+ dent = debugfs_create_file("proc_comm", 0444, 0, NULL, &debug_ops);
+ proc_comm_test_res = -1;
+ return 0;
+}
+
+module_init(proc_comm_test_mod_init);
+module_exit(proc_comm_test_mod_exit);
+
+MODULE_DESCRIPTION("PROC COMM TEST Driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/arm/mach-msm/remote_spinlock.c b/arch/arm/mach-msm/remote_spinlock.c
new file mode 100644
index 000000000000..6e488865930e
--- /dev/null
+++ b/arch/arm/mach-msm/remote_spinlock.c
@@ -0,0 +1,85 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+
+#include <asm/system.h>
+
+#include <mach/remote_spinlock.h>
+#include "smd_private.h"
+
+#define SMEM_SPINLOCK_COUNT 8
+#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
+
+int _remote_spin_lock_init(remote_spin_lock_id_t id, _remote_spinlock_t *lock)
+{
+ _remote_spinlock_t spinlock_start;
+
+ if (id >= SMEM_SPINLOCK_COUNT)
+ return -EINVAL;
+
+ spinlock_start = smem_alloc(SMEM_SPINLOCK_ARRAY,
+ SMEM_SPINLOCK_ARRAY_SIZE);
+ if (spinlock_start == NULL)
+ return -ENXIO;
+
+ *lock = spinlock_start + id;
+
+ return 0;
+}
+
diff --git a/arch/arm/mach-msm/reset_modem.c b/arch/arm/mach-msm/reset_modem.c
new file mode 100644
index 000000000000..5d5b0f9a0620
--- /dev/null
+++ b/arch/arm/mach-msm/reset_modem.c
@@ -0,0 +1,226 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * MSM architecture driver to reset the modem
+ */
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+
+#include "smd_private.h"
+#include "proc_comm.h"
+
+#define DEBUG
+/* #undef DEBUG */
+#ifdef DEBUG
+#define D(x...) printk(x)
+#else
+#define D(x...) do {} while (0)
+#endif
+
+static ssize_t reset_modem_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ return 0;
+}
+
+static ssize_t reset_modem_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ unsigned char cmd[64];
+ int len;
+ int time;
+ int zero = 0;
+ int r;
+
+ if (count < 1)
+ return 0;
+
+ len = count > 63 ? 63 : count;
+
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+
+ cmd[len] = 0;
+
+ /* lazy */
+ if (cmd[len-1] == '\n') {
+ cmd[len-1] = 0;
+ len--;
+ }
+
+ if (!strncmp(cmd, "wait", 4)) {
+ D(KERN_ERR "INFO:%s:%i:%s: "
+ "MODEM RESTART: WAIT\n",
+ __FILE__,
+ __LINE__,
+ __func__);
+ smsm_reset_modem(SMSM_MODEM_WAIT);
+ } else if (!strncmp(cmd, "continue", 8)) {
+ D(KERN_ERR "INFO:%s:%i:%s: "
+ "MODEM RESTART: CONTINUE\n",
+ __FILE__,
+ __LINE__,
+ __func__);
+ smsm_reset_modem_cont();
+ } else if (!strncmp(cmd, "download", 8)) {
+ D(KERN_ERR "INFO:%s:%i:%s: "
+ "MODEM RESTART: DOWNLOAD\n",
+ __FILE__,
+ __LINE__,
+ __func__);
+ smsm_reset_modem(SMSM_SYSTEM_DOWNLOAD);
+ } else if (sscanf(cmd, "deferred reset %i", &time) == 1) {
+ D(KERN_ERR "INFO:%s:%i:%s: "
+ "MODEM RESTART: DEFERRED RESET %ims\n",
+ __FILE__,
+ __LINE__,
+ __func__,
+ time);
+ if (time == 0) {
+ r = 0;
+ msm_proc_comm_reset_modem_now();
+ } else {
+ r = msm_proc_comm(PCOM_RESET_MODEM, &time, &zero);
+ }
+ if (r < 0)
+ return r;
+ } else if (!strncmp(cmd, "deferred reset", 14)) {
+ D(KERN_ERR "INFO:%s:%i:%s: "
+ "MODEM RESTART: DEFERRED RESET 0ms\n",
+ __FILE__,
+ __LINE__,
+ __func__);
+ r = 0;
+ msm_proc_comm_reset_modem_now();
+ if (r < 0)
+ return r;
+ } else if (!strncmp(cmd, "reset chip now", 14)) {
+ uint param1 = 0x0;
+ uint param2 = 0x0;
+
+ D(KERN_ERR "INFO:%s:%i:%s: "
+ "MODEM RESTART: CHIP RESET IMMEDIATE\n",
+ __FILE__,
+ __LINE__,
+ __func__);
+
+ r = msm_proc_comm(PCOM_RESET_CHIP_IMM, &param1, &param2);
+
+ if (r < 0)
+ return r;
+ } else if (!strncmp(cmd, "reset chip", 10)) {
+
+ uint param1 = 0x0;
+ uint param2 = 0x0;
+
+ D(KERN_ERR "INFO:%s:%i:%s: "
+ "MODEM RESTART: CHIP RESET \n",
+ __FILE__,
+ __LINE__,
+ __func__);
+
+ r = msm_proc_comm(PCOM_RESET_CHIP, &param1, &param2);
+
+ if (r < 0)
+ return r;
+ } else { /* if (!strncmp(cmd, "reset", 5)) */
+ printk(KERN_ERR "INFO:%s:%i:%s: "
+ "MODEM RESTART: RESET\n",
+ __FILE__,
+ __LINE__,
+ __func__);
+ smsm_reset_modem(SMSM_RESET);
+ }
+
+ return count;
+}
+
+static int reset_modem_open(struct inode *ip, struct file *fp)
+{
+ return 0;
+}
+
+static int reset_modem_release(struct inode *ip, struct file *fp)
+{
+ return 0;
+}
+
+static const struct file_operations reset_modem_fops = {
+ .owner = THIS_MODULE,
+ .read = reset_modem_read,
+ .write = reset_modem_write,
+ .open = reset_modem_open,
+ .release = reset_modem_release,
+};
+
+static struct miscdevice reset_modem_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "reset_modem",
+ .fops = &reset_modem_fops,
+};
+
+static int __init reset_modem_init(void)
+{
+ return misc_register(&reset_modem_dev);
+}
+
+module_init(reset_modem_init);
+
+MODULE_DESCRIPTION("Reset Modem");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/rpc_hsusb.c b/arch/arm/mach-msm/rpc_hsusb.c
new file mode 100644
index 000000000000..22dee1c3322b
--- /dev/null
+++ b/arch/arm/mach-msm/rpc_hsusb.c
@@ -0,0 +1,574 @@
+/* linux/arch/arm/mach-msm/rpc_hsusb.c
+ *
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#include <linux/err.h>
+#include <mach/rpc_hsusb.h>
+#include <asm/mach-types.h>
+
+static struct msm_rpc_endpoint *usb_ep;
+static struct msm_rpc_endpoint *chg_ep;
+
+struct msm_chg_rpc_ids {
+ unsigned long prog;
+ unsigned long vers_comp;
+ unsigned chg_usb_charger_connected_proc;
+ unsigned chg_usb_charger_disconnected_proc;
+ unsigned chg_usb_i_is_available_proc;
+ unsigned chg_usb_i_is_not_available_proc;
+};
+
+struct msm_hsusb_rpc_ids {
+ unsigned long prog;
+ unsigned long vers_comp;
+ unsigned long init_phy;
+ unsigned long vbus_pwr_up;
+ unsigned long vbus_pwr_down;
+ unsigned long update_product_id;
+ unsigned long update_serial_num;
+ unsigned long update_is_serial_num_null;
+ unsigned long reset_rework_installed;
+ unsigned long enable_pmic_ulpi_data0;
+ unsigned long disable_pmic_ulpi_data0;
+};
+
+static struct msm_hsusb_rpc_ids usb_rpc_ids;
+static struct msm_chg_rpc_ids chg_rpc_ids;
+
+static int msm_hsusb_init_rpc_ids(unsigned long vers)
+{
+ if (vers == 0x00010001) {
+ usb_rpc_ids.prog = 0x30000064;
+ usb_rpc_ids.vers_comp = 0x00010001;
+ usb_rpc_ids.init_phy = 2;
+ usb_rpc_ids.vbus_pwr_up = 6;
+ usb_rpc_ids.vbus_pwr_down = 7;
+ usb_rpc_ids.update_product_id = 8;
+ usb_rpc_ids.update_serial_num = 9;
+ usb_rpc_ids.update_is_serial_num_null = 10;
+ usb_rpc_ids.reset_rework_installed = 17;
+ usb_rpc_ids.enable_pmic_ulpi_data0 = 18;
+ usb_rpc_ids.disable_pmic_ulpi_data0 = 19;
+ return 0;
+ } else if (vers == 0x00010002) {
+ usb_rpc_ids.prog = 0x30000064;
+ usb_rpc_ids.vers_comp = 0x00010002;
+ usb_rpc_ids.init_phy = 2;
+ usb_rpc_ids.vbus_pwr_up = 6;
+ usb_rpc_ids.vbus_pwr_down = 7;
+ usb_rpc_ids.update_product_id = 8;
+ usb_rpc_ids.update_serial_num = 9;
+ usb_rpc_ids.update_is_serial_num_null = 10;
+ usb_rpc_ids.reset_rework_installed = 17;
+ usb_rpc_ids.enable_pmic_ulpi_data0 = 18;
+ usb_rpc_ids.disable_pmic_ulpi_data0 = 19;
+ return 0;
+ } else {
+ printk(KERN_INFO "%s: no matches found for version\n",
+ __func__);
+ return -ENODATA;
+ }
+}
+
+static int msm_chg_init_rpc_ids(unsigned long vers)
+{
+ if (vers == 0x00010001) {
+ chg_rpc_ids.prog = 0x3000001a;
+ chg_rpc_ids.vers_comp = 0x00010001;
+ chg_rpc_ids.chg_usb_charger_connected_proc = 7;
+ chg_rpc_ids.chg_usb_charger_disconnected_proc = 8;
+ chg_rpc_ids.chg_usb_i_is_available_proc = 9;
+ chg_rpc_ids.chg_usb_i_is_not_available_proc = 10;
+ return 0;
+ } else {
+ printk(KERN_INFO "%s: no matches found for version\n",
+ __func__);
+ return -ENODATA;
+ }
+}
+EXPORT_SYMBOL(msm_chg_init_rpc_ids);
+
+/* rpc connect for hsusb */
+int msm_hsusb_rpc_connect(void)
+{
+
+ if (usb_ep && !IS_ERR(usb_ep)) {
+ printk(KERN_INFO "%s: usb_ep already connected\n", __func__);
+ return 0;
+ }
+
+ /* Initialize rpc ids */
+ if (msm_hsusb_init_rpc_ids(0x00010001)) {
+ printk(KERN_ERR "%s: rpc ids initialization failed\n"
+ , __func__);
+ return -ENODATA;
+ }
+
+ usb_ep = msm_rpc_connect_compatible(usb_rpc_ids.prog,
+ usb_rpc_ids.vers_comp, 0);
+
+ if (IS_ERR(usb_ep)) {
+ printk(KERN_ERR "%s: connect compatible failed vers = %lx\n",
+ __func__, usb_rpc_ids.vers_comp);
+
+ /* Initialize rpc ids */
+ if (msm_hsusb_init_rpc_ids(0x00010002)) {
+ printk(KERN_ERR "%s: rpc ids initialization failed\n",
+ __func__);
+ return -ENODATA;
+ }
+ usb_ep = msm_rpc_connect_compatible(usb_rpc_ids.prog,
+ usb_rpc_ids.vers_comp, 0);
+ }
+
+ if (IS_ERR(usb_ep)) {
+ printk(KERN_ERR "%s: connect compatible failed vers = %lx\n",
+ __func__, usb_rpc_ids.vers_comp);
+ return -EAGAIN;
+ } else
+ printk(KERN_INFO "%s: rpc connect success vers = %lx\n",
+ __func__, usb_rpc_ids.vers_comp);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_hsusb_rpc_connect);
+
+/* rpc connect for charging */
+int msm_chg_rpc_connect(void)
+{
+
+ if (machine_is_msm7201a_surf() || machine_is_msm7x27_surf() ||
+ machine_is_qsd8x50_surf())
+ return -ENOTSUPP;
+
+ if (chg_ep && !IS_ERR(chg_ep)) {
+ printk(KERN_INFO "%s: chg_ep already connected\n", __func__);
+ return 0;
+ }
+
+ /* Initialize rpc ids */
+ if (msm_chg_init_rpc_ids(0x00010001)) {
+ printk(KERN_ERR "%s: rpc ids initialization failed\n"
+ , __func__);
+ return -ENODATA;
+ }
+
+ chg_ep = msm_rpc_connect_compatible(chg_rpc_ids.prog,
+ chg_rpc_ids.vers_comp, 0);
+
+ if (IS_ERR(chg_ep)) {
+ printk(KERN_ERR "%s: connect compatible failed vers = %lx\n",
+ __func__, chg_rpc_ids.vers_comp);
+ return -EAGAIN;
+ } else
+ printk(KERN_INFO "%s: rpc connect success vers = %lx\n",
+ __func__, chg_rpc_ids.vers_comp);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_chg_rpc_connect);
+
+/* rpc call for phy_reset */
+int msm_hsusb_phy_reset(void)
+{
+ int rc = 0;
+ struct hsusb_phy_start_req {
+ struct rpc_request_hdr hdr;
+ } req;
+
+ if (!usb_ep || IS_ERR(usb_ep)) {
+ printk(KERN_ERR "%s: phy_reset rpc failed before call,"
+ "rc = %ld\n", __func__, PTR_ERR(usb_ep));
+ return -EAGAIN;
+ }
+
+ rc = msm_rpc_call(usb_ep, usb_rpc_ids.init_phy,
+ &req, sizeof(req), 5 * HZ);
+
+ if (rc < 0) {
+ printk(KERN_ERR "%s: phy_reset rpc failed! rc = %d\n",
+ __func__, rc);
+ } else
+ printk(KERN_INFO "msm_hsusb_phy_reset\n");
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_hsusb_phy_reset);
+
+/* rpc call for vbus powerup */
+int msm_hsusb_vbus_powerup(void)
+{
+ int rc = 0;
+ struct hsusb_phy_start_req {
+ struct rpc_request_hdr hdr;
+ } req;
+
+ if (!usb_ep || IS_ERR(usb_ep)) {
+ printk(KERN_ERR "%s: vbus_powerup rpc failed before call,"
+ "rc = %ld\n", __func__, PTR_ERR(usb_ep));
+ return -EAGAIN;
+ }
+
+ rc = msm_rpc_call(usb_ep, usb_rpc_ids.vbus_pwr_up,
+ &req, sizeof(req), 5 * HZ);
+
+ if (rc < 0) {
+ printk(KERN_ERR "%s: vbus_powerup failed! rc = %d\n",
+ __func__, rc);
+ } else
+ printk(KERN_INFO "msm_hsusb_vbus_powerup\n");
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_hsusb_vbus_powerup);
+
+/* rpc call for vbus shutdown */
+int msm_hsusb_vbus_shutdown(void)
+{
+ int rc = 0;
+ struct hsusb_phy_start_req {
+ struct rpc_request_hdr hdr;
+ } req;
+
+ if (!usb_ep || IS_ERR(usb_ep)) {
+ printk(KERN_ERR "%s: vbus_shutdown rpc failed before call,"
+ "rc = %ld\n", __func__, PTR_ERR(usb_ep));
+ return -EAGAIN;
+ }
+
+ rc = msm_rpc_call(usb_ep, usb_rpc_ids.vbus_pwr_down,
+ &req, sizeof(req), 5 * HZ);
+
+ if (rc < 0) {
+ printk(KERN_ERR "%s: vbus_shutdown failed! rc = %d\n",
+ __func__, rc);
+ } else
+ printk(KERN_INFO "msm_hsusb_vbus_shutdown\n");
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_hsusb_vbus_shutdown);
+
+int msm_hsusb_send_productID(uint32_t product_id)
+{
+ int rc = 0;
+ struct hsusb_phy_start_req {
+ struct rpc_request_hdr hdr;
+ uint32_t product_id;
+ } req;
+
+ if (!usb_ep || IS_ERR(usb_ep)) {
+ printk(KERN_ERR "%s: rpc connect failed: rc = %ld\n",
+ __func__, PTR_ERR(usb_ep));
+ return -EAGAIN;
+ }
+
+ req.product_id = cpu_to_be32(product_id);
+ rc = msm_rpc_call(usb_ep, usb_rpc_ids.update_product_id,
+ &req, sizeof(req),
+ 5 * HZ);
+ if (rc < 0)
+ printk(KERN_ERR "%s: rpc call failed! error: %d\n",
+ __func__, rc);
+ else
+ printk(KERN_ERR "%s: rpc call success\n" ,
+ __func__);
+ return rc;
+}
+EXPORT_SYMBOL(msm_hsusb_send_productID);
+
+int msm_hsusb_send_serial_number(char *serial_number)
+{
+ int rc = 0, serial_len;
+ struct hsusb_phy_start_req {
+ struct rpc_request_hdr hdr;
+ uint32_t length;
+ char serial_num[20];
+ } req;
+
+ if (!usb_ep || IS_ERR(usb_ep)) {
+ printk(KERN_ERR "%s: rpc connect failed: rc = %ld\n",
+ __func__, PTR_ERR(usb_ep));
+ return -EAGAIN;
+ }
+
+ serial_len = strlen(serial_number)+1;
+ strncpy(req.serial_num, serial_number, 20);
+ req.length = cpu_to_be32(serial_len);
+ rc = msm_rpc_call(usb_ep, usb_rpc_ids.update_serial_num,
+ &req, sizeof(req),
+ 5 * HZ);
+ if (rc < 0)
+ printk(KERN_ERR "%s: rpc call failed! error: %d\n",
+ __func__, rc);
+ else
+ printk(KERN_ERR "%s: rpc call success\n" ,
+ __func__);
+ return rc;
+}
+EXPORT_SYMBOL(msm_hsusb_send_serial_number);
+
+int msm_hsusb_is_serial_num_null(uint32_t val)
+{
+ int rc = 0;
+ struct hsusb_phy_start_req {
+ struct rpc_request_hdr hdr;
+ uint32_t value;
+ } req;
+
+ if (!usb_ep || IS_ERR(usb_ep)) {
+ printk(KERN_ERR "%s: rpc connect failed: rc = %ld\n",
+ __func__, PTR_ERR(usb_ep));
+ return -EAGAIN;
+ }
+ if (!usb_rpc_ids.update_is_serial_num_null) {
+ printk(KERN_ERR "%s: proc id not supported \n", __func__);
+ return -ENODATA;
+ }
+
+ req.value = cpu_to_be32(val);
+ rc = msm_rpc_call(usb_ep, usb_rpc_ids.update_is_serial_num_null,
+ &req, sizeof(req),
+ 5 * HZ);
+ if (rc < 0)
+ printk(KERN_ERR "%s: rpc call failed! error: %d\n" ,
+ __func__, rc);
+ else
+ printk(KERN_ERR "%s: rpc call success\n" ,
+ __func__);
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_hsusb_is_serial_num_null);
+
+int msm_chg_usb_charger_connected(uint32_t device)
+{
+ int rc = 0;
+ struct hsusb_start_req {
+ struct rpc_request_hdr hdr;
+ uint32_t otg_dev;
+ } req;
+
+ if (!chg_ep || IS_ERR(chg_ep))
+ return -EAGAIN;
+ req.otg_dev = cpu_to_be32(device);
+ rc = msm_rpc_call(chg_ep, chg_rpc_ids.chg_usb_charger_connected_proc,
+ &req, sizeof(req), 5 * HZ);
+
+ if (rc < 0) {
+ printk(KERN_ERR "%s: charger_connected failed! rc = %d\n",
+ __func__, rc);
+ } else
+ printk(KERN_INFO "msm_chg_usb_charger_connected\n");
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_chg_usb_charger_connected);
+
+int msm_chg_usb_i_is_available(uint32_t sample)
+{
+ int rc = 0;
+ struct hsusb_start_req {
+ struct rpc_request_hdr hdr;
+ uint32_t i_ma;
+ } req;
+
+ if (!chg_ep || IS_ERR(chg_ep))
+ return -EAGAIN;
+ req.i_ma = cpu_to_be32(sample);
+ rc = msm_rpc_call(chg_ep, chg_rpc_ids.chg_usb_i_is_available_proc,
+ &req, sizeof(req), 5 * HZ);
+
+ if (rc < 0) {
+ printk(KERN_ERR "%s: charger_i_available failed! rc = %d\n",
+ __func__, rc);
+ } else
+ printk(KERN_INFO "msm_chg_usb_i_is_available\n");
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_chg_usb_i_is_available);
+
+int msm_chg_usb_i_is_not_available(void)
+{
+ int rc = 0;
+ struct hsusb_start_req {
+ struct rpc_request_hdr hdr;
+ } req;
+
+ if (!chg_ep || IS_ERR(chg_ep))
+ return -EAGAIN;
+ rc = msm_rpc_call(chg_ep, chg_rpc_ids.chg_usb_i_is_not_available_proc,
+ &req, sizeof(req), 5 * HZ);
+
+ if (rc < 0) {
+ printk(KERN_ERR "%s: charger_i_not_available failed! rc ="
+ "%d \n", __func__, rc);
+ } else
+ printk(KERN_INFO "msm_chg_usb_i_is_not_available\n");
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_chg_usb_i_is_not_available);
+
+int msm_chg_usb_charger_disconnected(void)
+{
+ int rc = 0;
+ struct hsusb_start_req {
+ struct rpc_request_hdr hdr;
+ } req;
+
+ if (!chg_ep || IS_ERR(chg_ep))
+ return -EAGAIN;
+ rc = msm_rpc_call(chg_ep, chg_rpc_ids.chg_usb_charger_disconnected_proc,
+ &req, sizeof(req), 5 * HZ);
+
+ if (rc < 0) {
+ printk(KERN_ERR "%s: charger_disconnected failed! rc = %d\n",
+ __func__, rc);
+ } else
+ printk(KERN_INFO "msm_chg_usb_charger_disconnected\n");
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_chg_usb_charger_disconnected);
+
+/* rpc call to close connection */
+int msm_hsusb_rpc_close(void)
+{
+ int rc = 0;
+
+ if (IS_ERR(usb_ep)) {
+ printk(KERN_ERR "%s: rpc_close failed before call, rc = %ld\n",
+ __func__, PTR_ERR(usb_ep));
+ return -EAGAIN;
+ }
+
+ rc = msm_rpc_close(usb_ep);
+ usb_ep = NULL;
+
+ if (rc < 0) {
+ printk(KERN_ERR "%s: close rpc failed! rc = %d\n",
+ __func__, rc);
+ return -EAGAIN;
+ } else
+ printk(KERN_INFO "rpc close success\n");
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_hsusb_rpc_close);
+
+/* rpc call to close charging connection */
+int msm_chg_rpc_close(void)
+{
+ int rc = 0;
+
+ if (IS_ERR(chg_ep)) {
+ printk(KERN_ERR "%s: rpc_close failed before call, rc = %ld\n",
+ __func__, PTR_ERR(chg_ep));
+ return -EAGAIN;
+ }
+
+ rc = msm_rpc_close(chg_ep);
+ chg_ep = NULL;
+
+ if (rc < 0) {
+ printk(KERN_ERR "%s: close rpc failed! rc = %d\n",
+ __func__, rc);
+ return -EAGAIN;
+ } else
+ printk(KERN_INFO "rpc close success\n");
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_chg_rpc_close);
+
+int msm_hsusb_reset_rework_installed(void)
+{
+ int rc = 0;
+ struct hsusb_start_req {
+ struct rpc_request_hdr hdr;
+ } req;
+ struct hsusb_rpc_rep {
+ struct rpc_reply_hdr hdr;
+ uint32_t rework;
+ } rep;
+
+ memset(&rep, 0, sizeof(rep));
+
+ if (!usb_ep || IS_ERR(usb_ep)) {
+ pr_err("%s: hsusb rpc connection not initialized, rc = %ld\n",
+ __func__, PTR_ERR(usb_ep));
+ return -EAGAIN;
+ }
+
+ rc = msm_rpc_call_reply(usb_ep, usb_rpc_ids.reset_rework_installed,
+ &req, sizeof(req),
+ &rep, sizeof(rep), 5 * HZ);
+
+ if (rc < 0) {
+ pr_err("%s: rpc call failed! error: (%d)"
+ "proc id: (%lx)\n",
+ __func__, rc,
+ usb_rpc_ids.reset_rework_installed);
+ return rc;
+ }
+
+ pr_info("%s: rework: (%d)\n", __func__, rep.rework);
+ return be32_to_cpu(rep.rework);
+}
+EXPORT_SYMBOL(msm_hsusb_reset_rework_installed);
+
+static int msm_hsusb_pmic_ulpidata0_config(int enable)
+{
+ int rc = 0;
+ struct hsusb_start_req {
+ struct rpc_request_hdr hdr;
+ } req;
+
+ if (!usb_ep || IS_ERR(usb_ep)) {
+ pr_err("%s: hsusb rpc connection not initialized, rc = %ld\n",
+ __func__, PTR_ERR(usb_ep));
+ return -EAGAIN;
+ }
+
+ if (enable)
+ rc = msm_rpc_call(usb_ep, usb_rpc_ids.enable_pmic_ulpi_data0,
+ &req, sizeof(req), 5 * HZ);
+ else
+ rc = msm_rpc_call(usb_ep, usb_rpc_ids.disable_pmic_ulpi_data0,
+ &req, sizeof(req), 5 * HZ);
+
+ if (rc < 0)
+ pr_err("%s: rpc call failed! error: %d\n",
+ __func__, rc);
+ return rc;
+}
+
+int msm_hsusb_enable_pmic_ulpidata0(void)
+{
+ return msm_hsusb_pmic_ulpidata0_config(1);
+}
+EXPORT_SYMBOL(msm_hsusb_enable_pmic_ulpidata0);
+
+int msm_hsusb_disable_pmic_ulpidata0(void)
+{
+ return msm_hsusb_pmic_ulpidata0_config(0);
+}
+EXPORT_SYMBOL(msm_hsusb_disable_pmic_ulpidata0);
diff --git a/arch/arm/mach-msm/rpc_server_dog_keepalive.c b/arch/arm/mach-msm/rpc_server_dog_keepalive.c
new file mode 100644
index 000000000000..5e0f46da379d
--- /dev/null
+++ b/arch/arm/mach-msm/rpc_server_dog_keepalive.c
@@ -0,0 +1,77 @@
+/* arch/arm/mach-msm/rpc_server_dog_keepalive.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Iliyan Malchev <ibm@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <mach/msm_rpcrouter.h>
+
+/* dog_keepalive server definitions */
+
+#define DOG_KEEPALIVE_PROG 0x30000015
+#if CONFIG_MSM_AMSS_VERSION==6210
+#define DOG_KEEPALIVE_VERS 0
+#define RPC_DOG_KEEPALIVE_BEACON 1
+#elif (CONFIG_MSM_AMSS_VERSION==6220) || (CONFIG_MSM_AMSS_VERSION==6225)
+#define DOG_KEEPALIVE_VERS 0x731fa727
+#define RPC_DOG_KEEPALIVE_BEACON 2
+#else
+#error "Unsupported AMSS version"
+#endif
+#define DOG_KEEPALIVE_VERS_COMP 0x00010001
+#define RPC_DOG_KEEPALIVE_NULL 0
+
+
+/* TODO: Remove server registration with _VERS when modem is upated with _COMP*/
+
+static int handle_rpc_call(struct msm_rpc_server *server,
+ struct rpc_request_hdr *req, unsigned len)
+{
+ switch (req->procedure) {
+ case RPC_DOG_KEEPALIVE_NULL:
+ return 0;
+ case RPC_DOG_KEEPALIVE_BEACON:
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static struct msm_rpc_server rpc_server[] = {
+ {
+ .prog = DOG_KEEPALIVE_PROG,
+ .vers = DOG_KEEPALIVE_VERS,
+ .rpc_call = handle_rpc_call,
+ },
+ {
+ .prog = DOG_KEEPALIVE_PROG,
+ .vers = DOG_KEEPALIVE_VERS_COMP,
+ .rpc_call = handle_rpc_call,
+ },
+};
+
+static int __init rpc_server_init(void)
+{
+ /* Dual server registration to support backwards compatibility vers */
+ int ret;
+ ret = msm_rpc_create_server(&rpc_server[1]);
+ if (ret < 0)
+ return ret;
+ return msm_rpc_create_server(&rpc_server[0]);
+}
+
+
+module_init(rpc_server_init);
diff --git a/arch/arm/mach-msm/rpc_server_handset.c b/arch/arm/mach-msm/rpc_server_handset.c
new file mode 100644
index 000000000000..f2df4aca231b
--- /dev/null
+++ b/arch/arm/mach-msm/rpc_server_handset.c
@@ -0,0 +1,380 @@
+/* arch/arm/mach-msm/rpc_server_handset.c
+ *
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <asm/mach-types.h>
+
+#include <mach/msm_handset.h>
+#include <mach/msm_rpcrouter.h>
+#include <mach/board.h>
+
+#include "keypad-surf-ffa.h"
+
+#define HS_SERVER_PROG 0x30000062
+#define HS_SERVER_VERS 0x00010001
+
+#define HS_RPC_PROG 0x30000091
+#define HS_RPC_VERS 0x00010001
+
+#define HS_RPC_CB_PROG 0x31000091
+#define HS_RPC_CB_VERS 0x00010001
+
+#define HS_SUBSCRIBE_SRVC_PROC 0x03
+#define HS_EVENT_CB_PROC 1
+
+#define RPC_KEYPAD_NULL_PROC 0
+#define RPC_KEYPAD_PASS_KEY_CODE_PROC 2
+#define RPC_KEYPAD_SET_PWR_KEY_STATE_PROC 3
+
+#define HS_PWR_K 0x6F /* Power key */
+#define HS_END_K 0x51 /* End key or Power key */
+#define HS_STEREO_HEADSET_K 0x82
+#define HS_HEADSET_SWITCH_K 0x84
+#define HS_REL_K 0xFF /* key release */
+
+#define KEY(hs_key, input_key) ((hs_key << 24) | input_key)
+
+struct hs_key_data {
+ uint32_t ver; /* Version number to track sturcture changes */
+ uint32_t code; /* which key? */
+ uint32_t parm; /* key status. Up/down or pressed/released */
+};
+
+enum hs_subs_srvc {
+ HS_SUBS_SEND_CMD = 0, /* Subscribe to send commands to HS */
+ HS_SUBS_RCV_EVNT, /* Subscribe to receive Events from HS */
+ HS_SUBS_SRVC_MAX
+};
+
+enum hs_subs_req {
+ HS_SUBS_REGISTER, /* Subscribe */
+ HS_SUBS_CANCEL, /* Unsubscribe */
+ HS_SUB_STATUS_MAX
+};
+
+enum hs_event_class {
+ HS_EVNT_CLASS_ALL = 0, /* All HS events */
+ HS_EVNT_CLASS_LAST, /* Should always be the last class type */
+ HS_EVNT_CLASS_MAX
+};
+
+enum hs_cmd_class {
+ HS_CMD_CLASS_LCD = 0, /* Send LCD related commands */
+ HS_CMD_CLASS_KPD, /* Send KPD related commands */
+ HS_CMD_CLASS_LAST, /* Should always be the last class type */
+ HS_CMD_CLASS_MAX
+};
+
+/*
+ * Receive events or send command
+ */
+union hs_subs_class {
+ enum hs_event_class evnt;
+ enum hs_cmd_class cmd;
+};
+
+struct hs_subs {
+ uint32_t ver;
+ enum hs_subs_srvc srvc; /* commands or events */
+ enum hs_subs_req req; /* subscribe or unsubscribe */
+ uint32_t host_os;
+ enum hs_subs_req disc; /* discriminator */
+ union hs_subs_class id;
+};
+
+struct hs_event_cb_recv {
+ uint32_t cb_id;
+ uint32_t hs_key_data_ptr;
+ struct hs_key_data key;
+};
+
+static const uint32_t hs_key_map[] = {
+ KEY(HS_PWR_K, KEY_POWER),
+ KEY(HS_END_K, KEY_END),
+ KEY(HS_STEREO_HEADSET_K, SW_HEADPHONE_INSERT),
+ KEY(HS_HEADSET_SWITCH_K, KEY_MEDIA),
+ 0
+};
+
+static struct input_dev *kpdev;
+static struct input_dev *hsdev;
+static struct msm_rpc_client *rpc_client;
+
+static int hs_find_key(uint32_t hscode)
+{
+ int i, key;
+
+ key = KEY(hscode, 0);
+
+ for (i = 0; hs_key_map[i] != 0; i++) {
+ if ((hs_key_map[i] & 0xff000000) == key)
+ return hs_key_map[i] & 0x00ffffff;
+ }
+ return -1;
+}
+
+static void
+report_headset_switch(struct input_dev *dev, int key, int value)
+{
+ struct msm_handset *hs = input_get_drvdata(dev);
+
+ input_report_switch(dev, key, value);
+ input_sync(dev);
+}
+
+/*
+ * tuple format: (key_code, key_param)
+ *
+ * old-architecture:
+ * key-press = (key_code, 0)
+ * key-release = (0xff, key_code)
+ *
+ * new-architecutre:
+ * key-press = (key_code, 0)
+ * key-release = (key_code, 0xff)
+ */
+static void report_hs_key(uint32_t key_code, uint32_t key_parm)
+{
+ int key, temp_key_code;
+
+ if (key_code == HS_REL_K)
+ key = hs_find_key(key_parm);
+ else
+ key = hs_find_key(key_code);
+
+ temp_key_code = key_code;
+
+ if (key_parm == HS_REL_K)
+ key_code = key_parm;
+
+ kpdev = msm_keypad_get_input_dev();
+ hsdev = msm_get_handset_input_dev();
+
+ switch (key) {
+ case KEY_POWER:
+ case KEY_END:
+ if (!kpdev) {
+ printk(KERN_ERR "%s: No input device for reporting "
+ "pwr/end key press\n", __func__);
+ return;
+ }
+ input_report_key(kpdev, key, (key_code != HS_REL_K));
+ input_sync(kpdev);
+ break;
+ case SW_HEADPHONE_INSERT:
+ if (!hsdev) {
+ printk(KERN_ERR "%s: No input device for reporting "
+ "handset events\n", __func__);
+ return;
+ }
+ report_headset_switch(hsdev, key, (key_code != HS_REL_K));
+ break;
+ case KEY_MEDIA:
+ if (!hsdev) {
+ printk(KERN_ERR "%s: No input device for reporting "
+ "handset events\n", __func__);
+ return;
+ }
+ input_report_key(hsdev, key, (key_code != HS_REL_K));
+ input_sync(hsdev);
+ break;
+ case -1:
+ printk(KERN_ERR "%s: No mapping for remote handset event %d\n",
+ __func__, temp_key_code);
+ break;
+ default:
+ printk(KERN_ERR "%s: Unhandled handset key %d\n", __func__,
+ key);
+ }
+}
+
+static int handle_hs_rpc_call(struct msm_rpc_server *server,
+ struct rpc_request_hdr *req, unsigned len)
+{
+ struct rpc_keypad_pass_key_code_args {
+ uint32_t key_code;
+ uint32_t key_parm;
+ };
+
+ switch (req->procedure) {
+ case RPC_KEYPAD_NULL_PROC:
+ return 0;
+
+ case RPC_KEYPAD_PASS_KEY_CODE_PROC: {
+ struct rpc_keypad_pass_key_code_args *args;
+
+ args = (struct rpc_keypad_pass_key_code_args *)(req + 1);
+ args->key_code = be32_to_cpu(args->key_code);
+ args->key_parm = be32_to_cpu(args->key_parm);
+
+ report_hs_key(args->key_code, args->key_parm);
+
+ return 0;
+ }
+
+ case RPC_KEYPAD_SET_PWR_KEY_STATE_PROC:
+ /* This RPC function must be available for the ARM9
+ * to function properly. This function is redundant
+ * when RPC_KEYPAD_PASS_KEY_CODE_PROC is handled. So
+ * input_report_key is not needed.
+ */
+ return 0;
+ default:
+ return -ENODEV;
+ }
+}
+
+static struct msm_rpc_server hs_rpc_server = {
+ .prog = HS_SERVER_PROG,
+ .vers = HS_SERVER_VERS,
+ .rpc_call = handle_hs_rpc_call,
+};
+
+static int process_subs_srvc_callback(struct hs_event_cb_recv *recv)
+{
+ if (!recv)
+ return -ENODATA;
+
+ report_hs_key(be32_to_cpu(recv->key.code), be32_to_cpu(recv->key.parm));
+
+ return 0;
+}
+
+static void process_hs_rpc_request(uint32_t proc, void *data)
+{
+ if (proc == HS_EVENT_CB_PROC)
+ process_subs_srvc_callback(data);
+ else
+ pr_err("%s: unknown rpc proc %d\n", __func__, proc);
+}
+
+static int hs_rpc_register_subs_arg(struct msm_rpc_client *client,
+ void *buffer, void *data)
+{
+ struct hs_subs_rpc_req {
+ uint32_t hs_subs_ptr;
+ struct hs_subs hs_subs;
+ uint32_t hs_cb_id;
+ uint32_t hs_handle_ptr;
+ uint32_t hs_handle_data;
+ };
+
+ struct hs_subs_rpc_req *req = buffer;
+
+ req->hs_subs_ptr = cpu_to_be32(0x1);
+ req->hs_subs.ver = cpu_to_be32(0x1);
+ req->hs_subs.srvc = cpu_to_be32(HS_SUBS_RCV_EVNT);
+ req->hs_subs.req = cpu_to_be32(HS_SUBS_REGISTER);
+ req->hs_subs.host_os = cpu_to_be32(0x4); /* linux */
+ req->hs_subs.disc = cpu_to_be32(HS_SUBS_RCV_EVNT);
+ req->hs_subs.id.evnt = cpu_to_be32(HS_EVNT_CLASS_ALL);
+
+ req->hs_cb_id = cpu_to_be32(0x1);
+
+ req->hs_handle_ptr = cpu_to_be32(0x1);
+ req->hs_handle_data = cpu_to_be32(0x0);
+
+ return sizeof(*req);
+}
+
+static int hs_rpc_register_subs_res(struct msm_rpc_client *client,
+ void *buffer, void *data)
+{
+ uint32_t result;
+
+ result = be32_to_cpu(*((uint32_t *)buffer));
+ pr_debug("%s: request completed: 0x%x\n", __func__, result);
+
+ return 0;
+}
+
+static int hs_cb_func(struct msm_rpc_client *client, void *buffer, int in_size)
+{
+ int rc = -1;
+
+ struct rpc_request_hdr *hdr = buffer;
+
+ hdr->type = be32_to_cpu(hdr->type);
+ hdr->xid = be32_to_cpu(hdr->xid);
+ hdr->rpc_vers = be32_to_cpu(hdr->rpc_vers);
+ hdr->prog = be32_to_cpu(hdr->prog);
+ hdr->vers = be32_to_cpu(hdr->vers);
+ hdr->procedure = be32_to_cpu(hdr->procedure);
+
+ if (hdr->type != 0)
+ return rc;
+ if (hdr->rpc_vers != 2)
+ return rc;
+ if (hdr->prog != HS_RPC_CB_PROG)
+ return rc;
+ if (!msm_rpc_is_compatible_version(HS_RPC_CB_VERS,
+ hdr->vers))
+ return rc;
+
+ process_hs_rpc_request(hdr->procedure,
+ (void *) (hdr + 1));
+
+ msm_rpc_start_accepted_reply(client, hdr->xid,
+ RPC_ACCEPTSTAT_SUCCESS);
+ rc = msm_rpc_send_accepted_reply(client, 0);
+ if (rc) {
+ pr_err("%s: sending reply failed: %d\n", __func__, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int __init hs_rpc_cb_init(void)
+{
+ int rc = 0;
+
+ rpc_client = msm_rpc_register_client("hs",
+ HS_RPC_PROG, HS_RPC_VERS, 0, hs_cb_func);
+
+ if (IS_ERR(rpc_client)) {
+ pr_err("%s: couldn't open rpc client err %ld\n", __func__,
+ PTR_ERR(rpc_client));
+ return PTR_ERR(rpc_client);
+ }
+
+ rc = msm_rpc_client_req(rpc_client, HS_SUBSCRIBE_SRVC_PROC,
+ hs_rpc_register_subs_arg, NULL,
+ hs_rpc_register_subs_res, NULL, -1);
+ if (rc) {
+ pr_err("%s: couldn't send rpc client request\n", __func__);
+ msm_rpc_unregister_client(rpc_client);
+ }
+
+ return rc;
+}
+
+static int __init hs_rpc_init(void)
+{
+ int rc;
+
+ if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa()) {
+ rc = hs_rpc_cb_init();
+ if (rc)
+ pr_err("%s: failed to initialize\n", __func__);
+ }
+
+ return msm_rpc_create_server(&hs_rpc_server);
+}
+module_init(hs_rpc_init);
diff --git a/arch/arm/mach-msm/rpc_server_time_remote.c b/arch/arm/mach-msm/rpc_server_time_remote.c
new file mode 100644
index 000000000000..5e9719a085cf
--- /dev/null
+++ b/arch/arm/mach-msm/rpc_server_time_remote.c
@@ -0,0 +1,90 @@
+/* arch/arm/mach-msm/rpc_server_time_remote.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Iliyan Malchev <ibm@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <mach/msm_rpcrouter.h>
+#include "rpc_server_time_remote.h"
+
+/* time_remote_mtoa server definitions. */
+
+#define TIME_REMOTE_MTOA_PROG 0x3000005d
+#if CONFIG_MSM_AMSS_VERSION==6210
+#define TIME_REMOTE_MTOA_VERS 0
+#elif (CONFIG_MSM_AMSS_VERSION==6220) || (CONFIG_MSM_AMSS_VERSION==6225)
+#define TIME_REMOTE_MTOA_VERS 0x9202a8e4
+#else
+#error "Unknown AMSS version"
+#endif
+#define TIME_REMOTE_MTOA_VERS_COMP 0x00010001
+#define RPC_TIME_REMOTE_MTOA_NULL 0
+#define RPC_TIME_TOD_SET_APPS_BASES 2
+
+struct rpc_time_tod_set_apps_bases_args {
+ uint32_t tick;
+ uint64_t stamp;
+};
+
+static int handle_rpc_call(struct msm_rpc_server *server,
+ struct rpc_request_hdr *req, unsigned len)
+{
+ switch (req->procedure) {
+ case RPC_TIME_REMOTE_MTOA_NULL:
+ return 0;
+
+ case RPC_TIME_TOD_SET_APPS_BASES: {
+ struct rpc_time_tod_set_apps_bases_args *args;
+ args = (struct rpc_time_tod_set_apps_bases_args *)(req + 1);
+ args->tick = be32_to_cpu(args->tick);
+ args->stamp = be64_to_cpu(args->stamp);
+ printk(KERN_INFO "RPC_TIME_TOD_SET_APPS_BASES:\n"
+ "\ttick = %d\n"
+ "\tstamp = %lld\n",
+ args->tick, args->stamp);
+ rtc_hctosys();
+ return 0;
+ }
+ default:
+ return -ENODEV;
+ }
+}
+
+static struct msm_rpc_server rpc_server[] = {
+ {
+ .prog = TIME_REMOTE_MTOA_PROG,
+ .vers = TIME_REMOTE_MTOA_VERS,
+ .rpc_call = handle_rpc_call,
+ },
+ {
+ .prog = TIME_REMOTE_MTOA_PROG,
+ .vers = TIME_REMOTE_MTOA_VERS_COMP,
+ .rpc_call = handle_rpc_call,
+ },
+};
+
+static int __init rpc_server_init(void)
+{
+ /* Dual server registration to support backwards compatibility vers */
+ int ret;
+ ret = msm_rpc_create_server(&rpc_server[1]);
+ if (ret < 0)
+ return ret;
+ return msm_rpc_create_server(&rpc_server[0]);
+}
+
+
+module_init(rpc_server_init);
diff --git a/arch/arm/mach-msm/rpc_server_time_remote.h b/arch/arm/mach-msm/rpc_server_time_remote.h
new file mode 100644
index 000000000000..056666f50013
--- /dev/null
+++ b/arch/arm/mach-msm/rpc_server_time_remote.h
@@ -0,0 +1,21 @@
+/* arch/arm/mach-msm/rpc_server_time_remote.h
+ *
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPC_SERVER_TIME_REMOTE_H
+#define __ARCH_ARM_MACH_MSM_RPC_SERVER_TIME_REMOTE_H
+
+int rtc_hctosys(void);
+
+#endif
diff --git a/arch/arm/mach-msm/sirc.c b/arch/arm/mach-msm/sirc.c
new file mode 100644
index 000000000000..5a64aa44ddbe
--- /dev/null
+++ b/arch/arm/mach-msm/sirc.c
@@ -0,0 +1,239 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <asm/irq.h>
+
+static void sirc_irq_mask(unsigned int irq);
+static void sirc_irq_unmask(unsigned int irq);
+static void sirc_irq_ack(unsigned int irq);
+static int sirc_irq_set_wake(unsigned int irq, unsigned int on);
+static int sirc_irq_set_type(unsigned int irq, unsigned int flow_type);
+static void sirc_irq_handler(unsigned int irq, struct irq_desc *desc);
+
+static unsigned int int_enable;
+static unsigned int wake_enable;
+
+static struct sirc_regs_t sirc_regs = {
+ .int_enable = SPSS_SIRC_INT_ENABLE,
+ .int_enable_clear = SPSS_SIRC_INT_ENABLE_CLEAR,
+ .int_enable_set = SPSS_SIRC_INT_ENABLE_SET,
+ .int_type = SPSS_SIRC_INT_TYPE,
+ .int_polarity = SPSS_SIRC_INT_POLARITY,
+ .int_clear = SPSS_SIRC_INT_CLEAR,
+};
+
+static struct sirc_cascade_regs sirc_reg_table[] = {
+ {
+ .int_status = SPSS_SIRC_IRQ_STATUS,
+ .cascade_irq = INT_SIRC_0,
+ }
+};
+
+static unsigned int save_type;
+static unsigned int save_polarity;
+
+/* Mask off the given interrupt. Keep the int_enable mask in sync with
+ the enable reg, so it can be restored after power collapse. */
+static void sirc_irq_mask(unsigned int irq)
+{
+ unsigned int mask;
+
+
+ mask = 1 << (irq - FIRST_SIRC_IRQ);
+ writel(mask, sirc_regs.int_enable_clear);
+ int_enable &= ~mask;
+ return;
+}
+
+/* Unmask the given interrupt. Keep the int_enable mask in sync with
+ the enable reg, so it can be restored after power collapse. */
+static void sirc_irq_unmask(unsigned int irq)
+{
+ unsigned int mask;
+
+ mask = 1 << (irq - FIRST_SIRC_IRQ);
+ writel(mask, sirc_regs.int_enable_set);
+ int_enable |= mask;
+ return;
+}
+
+static void sirc_irq_ack(unsigned int irq)
+{
+ unsigned int mask;
+
+ mask = 1 << (irq - FIRST_SIRC_IRQ);
+ writel(mask, sirc_regs.int_clear);
+ return;
+}
+
+static int sirc_irq_set_wake(unsigned int irq, unsigned int on)
+{
+ unsigned int mask;
+
+ /* Used to set the interrupt enable mask during power collapse. */
+ mask = 1 << (irq - FIRST_SIRC_IRQ);
+ if (on)
+ wake_enable |= mask;
+ else
+ wake_enable &= ~mask;
+
+ return 0;
+}
+
+static int sirc_irq_set_type(unsigned int irq, unsigned int flow_type)
+{
+ unsigned int mask;
+ unsigned int val;
+
+ mask = 1 << (irq - FIRST_SIRC_IRQ);
+ val = readl(sirc_regs.int_polarity);
+
+ if (flow_type & (IRQF_TRIGGER_LOW | IRQF_TRIGGER_FALLING))
+ val |= mask;
+ else
+ val &= ~mask;
+
+ writel(val, sirc_regs.int_polarity);
+
+ val = readl(sirc_regs.int_type);
+ if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ val |= mask;
+ irq_desc[irq].handle_irq = handle_edge_irq;
+ } else {
+ val &= ~mask;
+ irq_desc[irq].handle_irq = handle_level_irq;
+ }
+
+ writel(val, sirc_regs.int_type);
+
+ return 0;
+}
+
+/* Finds the pending interrupt on the passed cascade irq and redrives it */
+static void sirc_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ unsigned int reg = 0;
+ unsigned int sirq;
+ unsigned int status;
+
+ while ((reg < ARRAY_SIZE(sirc_reg_table)) &&
+ (sirc_reg_table[reg].cascade_irq != irq))
+ reg++;
+
+ status = readl(sirc_reg_table[reg].int_status);
+ status &= SIRC_MASK;
+ if (status == 0)
+ return;
+
+ for (sirq = 0;
+ (sirq < NR_SIRC_IRQS) && ((status & (1U << sirq)) == 0);
+ sirq++)
+ ;
+ generic_handle_irq(sirq+FIRST_SIRC_IRQ);
+
+ desc->chip->ack(irq);
+}
+
+void msm_sirc_enter_sleep(void)
+{
+ save_type = readl(sirc_regs.int_type);
+ save_polarity = readl(sirc_regs.int_polarity);
+ writel(wake_enable, sirc_regs.int_enable);
+ return;
+}
+
+void msm_sirc_exit_sleep(void)
+{
+ writel(save_type, sirc_regs.int_type);
+ writel(save_polarity, sirc_regs.int_polarity);
+ writel(int_enable, sirc_regs.int_enable);
+ return;
+}
+
+static struct irq_chip sirc_irq_chip = {
+ .name = "sirc",
+ .ack = sirc_irq_ack,
+ .mask = sirc_irq_mask,
+ .unmask = sirc_irq_unmask,
+ .set_wake = sirc_irq_set_wake,
+ .set_type = sirc_irq_set_type,
+};
+
+void __init msm_init_sirc(void)
+{
+ int i;
+
+ int_enable = 0;
+ wake_enable = 0;
+
+ for (i = FIRST_SIRC_IRQ; i < LAST_SIRC_IRQ; i++) {
+ set_irq_chip(i, &sirc_irq_chip);
+ set_irq_handler(i, handle_edge_irq);
+ set_irq_flags(i, IRQF_VALID);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sirc_reg_table); i++) {
+ set_irq_chained_handler(sirc_reg_table[i].cascade_irq,
+ sirc_irq_handler);
+ set_irq_wake(sirc_reg_table[i].cascade_irq, 1);
+ }
+ return;
+}
+
diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c
new file mode 100644
index 000000000000..82596828c7e0
--- /dev/null
+++ b/arch/arm/mach-msm/smd.c
@@ -0,0 +1,1960 @@
+/* arch/arm/mach-msm/smd.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/termios.h>
+#include <linux/ctype.h>
+#include <mach/msm_smd.h>
+#include <mach/msm_iomap.h>
+#include <mach/system.h>
+#include <linux/io.h>
+
+#include "smd_private.h"
+#include "proc_comm.h"
+#include "modem_notifier.h"
+
+#define MODULE_NAME "msm_smd"
+#define SMEM_VERSION 0x000B
+#define SMD_VERSION 0x00020000
+
+enum {
+ MSM_SMD_DEBUG = 1U << 0,
+ MSM_SMSM_DEBUG = 1U << 1,
+ MSM_SMD_INFO = 1U << 2,
+ MSM_SMSM_INFO = 1U << 3,
+};
+
+enum {
+ SMEM_APPS_Q6_SMSM = 3,
+ SMEM_Q6_APPS_SMSM = 5,
+ SMSM_NUM_INTR_MUX = 8,
+};
+
+/* Internal definitions which are not exported in some targets */
+enum {
+ SMSM_Q6_I = 2,
+};
+
+enum {
+ SMSM_APPS_DEM_I = 3,
+};
+
+enum {
+ SMD_APPS_QDSP_I = 1,
+ SMD_MODEM_QDSP_I = 2
+};
+
+static int msm_smd_debug_mask;
+module_param_named(debug_mask, msm_smd_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+void *smem_find(unsigned id, unsigned size);
+void smd_diag(void);
+
+static unsigned last_heap_free = 0xffffffff;
+
+#if defined(CONFIG_MSM_SMD_DEBUG)
+#define SMD_DBG(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMD_DEBUG) \
+ printk(KERN_DEBUG x); \
+ } while (0)
+
+#define SMSM_DBG(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMSM_DEBUG) \
+ printk(KERN_DEBUG x); \
+ } while (0)
+
+#define SMD_INFO(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMD_INFO) \
+ printk(KERN_INFO x); \
+ } while (0)
+
+#define SMSM_INFO(x...) do { \
+ if (msm_smd_debug_mask & MSM_SMSM_INFO) \
+ printk(KERN_INFO x); \
+ } while (0)
+#else
+#define SMD_DBG(x...) do { } while (0)
+#define SMSM_DBG(x...) do { } while (0)
+#define SMD_INFO(x...) do { } while (0)
+#define SMSM_INFO(x...) do { } while (0)
+#endif
+
+#if defined(CONFIG_ARCH_MSM7X30)
+#define MSM_TRIG_A2M_INT(n) (writel(1 << n, MSM_GCC_BASE + 0x8))
+#else
+#define MSM_TRIG_A2M_INT(n) (writel(1, MSM_CSR_BASE + 0x400 + (n) * 4))
+#endif
+
+static void notify_other_smsm(uint32_t smsm_entry,
+ uint32_t old_val, uint32_t new_val)
+{
+ uint32_t *smsm_intr_mask;
+ uint32_t *smsm_intr_mux;
+
+ smsm_intr_mask = smem_alloc(SMEM_SMSM_CPU_INTR_MASK,
+ SMSM_NUM_ENTRIES * SMSM_NUM_HOSTS *
+ sizeof(uint32_t));
+
+ /* older protocol don't use smsm_intr_mask,
+ but still communicates with modem */
+ if (!smsm_intr_mask ||
+ (smsm_intr_mask[smsm_entry * SMSM_NUM_HOSTS + SMSM_MODEM] &
+ (old_val ^ new_val)))
+ MSM_TRIG_A2M_INT(5);
+
+ if (smsm_intr_mask &&
+ (smsm_intr_mask[smsm_entry * SMSM_NUM_HOSTS + SMSM_Q6_I] &
+ (old_val ^ new_val))) {
+ smsm_intr_mux = smem_alloc(SMEM_SMD_SMSM_INTR_MUX,
+ SMSM_NUM_INTR_MUX *
+ sizeof(uint32_t));
+ if (smsm_intr_mux)
+ smsm_intr_mux[SMEM_APPS_Q6_SMSM]++;
+
+ MSM_TRIG_A2M_INT(8);
+ }
+}
+
+static inline void notify_other_smd(uint32_t ch_type)
+{
+ if (ch_type == SMD_APPS_MODEM)
+ MSM_TRIG_A2M_INT(0);
+ else if (ch_type == SMD_APPS_QDSP_I)
+ MSM_TRIG_A2M_INT(8);
+}
+
+void smd_diag(void)
+{
+ char *x;
+ int size;
+
+ x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
+ if (x != 0) {
+ x[SZ_DIAG_ERR_MSG - 1] = 0;
+ SMD_INFO("smem: DIAG '%s'\n", x);
+ }
+
+ x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
+ if (x != 0) {
+ x[size - 1] = 0;
+ printk(KERN_ERR "smem: CRASH LOG\n'%s'\n", x);
+ }
+}
+
+extern int (*msm_check_for_modem_crash)(void);
+
+static int check_for_modem_crash(void)
+{
+ uint32_t *smsm;
+
+ smsm = smem_find(ID_SHARED_STATE, SMSM_NUM_ENTRIES * sizeof(uint32_t));
+
+ /* if the modem's not ready yet, we have to hope for the best */
+ if (!smsm)
+ return 0;
+
+ if (smsm[SMSM_MODEM_STATE] & SMSM_RESET) {
+ pr_err("proc_comm: ARM9 has crashed\n");
+ smd_diag();
+ } else {
+ return 0;
+ }
+
+ /* hard reboot if possible FIXME
+ if (msm_reset_hook)
+ msm_reset_hook(0);
+ */
+
+ for (;;)
+ ;
+}
+
+#define SMD_SS_CLOSED 0x00000000
+#define SMD_SS_OPENING 0x00000001
+#define SMD_SS_OPENED 0x00000002
+#define SMD_SS_FLUSHING 0x00000003
+#define SMD_SS_CLOSING 0x00000004
+#define SMD_SS_RESET 0x00000005
+#define SMD_SS_RESET_OPENING 0x00000006
+
+#define SMD_BUF_SIZE 8192
+#define SMD_CHANNELS 64
+
+#define SMD_HEADER_SIZE 20
+
+
+/* the spinlock is used to synchronize between the
+** irq handler and code that mutates the channel
+** list or fiddles with channel state
+*/
+static DEFINE_SPINLOCK(smd_lock);
+static DEFINE_SPINLOCK(smem_lock);
+
+/* the mutex is used during open() and close()
+** operations to avoid races while creating or
+** destroying smd_channel structures
+*/
+static DEFINE_MUTEX(smd_creation_mutex);
+
+static int smd_initialized;
+
+/* 'type' field of smd_alloc_elm structure
+ * has the following breakup
+ * bits 0-7 -> channel type
+ * bits 8-11 -> xfer type
+ * bits 12-31 -> reserved
+ */
+struct smd_alloc_elm {
+ char name[20];
+ uint32_t cid;
+ uint32_t type;
+ uint32_t ref_count;
+};
+
+#define SMD_CHANNEL_TYPE(x) ((x) & 0x000000FF)
+#define SMD_XFER_TYPE(x) (((x) & 0x00000F00) >> 8)
+
+struct smd_half_channel {
+ unsigned state;
+ unsigned char fDSR;
+ unsigned char fCTS;
+ unsigned char fCD;
+ unsigned char fRI;
+ unsigned char fHEAD;
+ unsigned char fTAIL;
+ unsigned char fSTATE;
+ unsigned char fUNUSED;
+ unsigned tail;
+ unsigned head;
+};
+
+struct smd_channel {
+ volatile struct smd_half_channel *send;
+ volatile struct smd_half_channel *recv;
+ unsigned char *send_buf;
+ unsigned char *recv_buf;
+ unsigned buf_size;
+ struct list_head ch_list;
+
+ unsigned current_packet;
+ unsigned n;
+ void *priv;
+ void (*notify)(void *priv, unsigned flags);
+
+ int (*read)(smd_channel_t *ch, void *data, int len);
+ int (*write)(smd_channel_t *ch, const void *data, int len);
+ int (*read_avail)(smd_channel_t *ch);
+ int (*write_avail)(smd_channel_t *ch);
+ int (*read_from_cb)(smd_channel_t *ch, void *data, int len);
+
+ void (*update_state)(smd_channel_t *ch);
+ unsigned last_state;
+
+ char name[20];
+ struct platform_device pdev;
+ unsigned type;
+};
+
+static LIST_HEAD(smd_ch_closed_list);
+static LIST_HEAD(smd_ch_list);
+
+static unsigned char smd_ch_allocated[64];
+static struct work_struct probe_work;
+
+static void smd_alloc_channel(struct smd_alloc_elm *alloc_elm);
+static void *_smem_find(unsigned id, unsigned *size);
+
+static void smd_channel_probe_worker(struct work_struct *work)
+{
+ struct smd_alloc_elm *shared;
+ unsigned n;
+
+ shared = smem_find(ID_CH_ALLOC_TBL, sizeof(*shared) * 64);
+
+ BUG_ON(!shared);
+
+ for (n = 0; n < 64; n++) {
+ if (smd_ch_allocated[n])
+ continue;
+
+ /* channel should be allocated only if APPS
+ processor is involved */
+ if (SMD_CHANNEL_TYPE(shared[n].type) == SMD_MODEM_QDSP_I)
+ continue;
+ if (!shared[n].ref_count)
+ continue;
+ if (!shared[n].name[0])
+ continue;
+
+ smd_alloc_channel(&shared[n]);
+ smd_ch_allocated[n] = 1;
+ }
+}
+
+static char *chstate(unsigned n)
+{
+ switch (n) {
+ case SMD_SS_CLOSED: return "CLOSED";
+ case SMD_SS_OPENING: return "OPENING";
+ case SMD_SS_OPENED: return "OPENED";
+ case SMD_SS_FLUSHING: return "FLUSHING";
+ case SMD_SS_CLOSING: return "CLOSING";
+ case SMD_SS_RESET: return "RESET";
+ case SMD_SS_RESET_OPENING: return "ROPENING";
+ default: return "UNKNOWN";
+ }
+}
+
+/* how many bytes are available for reading */
+static int smd_stream_read_avail(struct smd_channel *ch)
+{
+ return (ch->recv->head - ch->recv->tail) & (ch->buf_size - 1);
+}
+
+/* how many bytes we are free to write */
+static int smd_stream_write_avail(struct smd_channel *ch)
+{
+ return (ch->buf_size - 1) -
+ ((ch->send->head - ch->send->tail) & (ch->buf_size - 1));
+}
+
+static int smd_packet_read_avail(struct smd_channel *ch)
+{
+ if (ch->current_packet) {
+ int n = smd_stream_read_avail(ch);
+ if (n > ch->current_packet)
+ n = ch->current_packet;
+ return n;
+ } else {
+ return 0;
+ }
+}
+
+static int smd_packet_write_avail(struct smd_channel *ch)
+{
+ int n = smd_stream_write_avail(ch);
+ return n > SMD_HEADER_SIZE ? n - SMD_HEADER_SIZE : 0;
+}
+
+static int ch_is_open(struct smd_channel *ch)
+{
+ return (ch->recv->state == SMD_SS_OPENED ||
+ ch->recv->state == SMD_SS_FLUSHING)
+ && (ch->send->state == SMD_SS_OPENED);
+}
+
+/* provide a pointer and length to readable data in the fifo */
+static unsigned ch_read_buffer(struct smd_channel *ch, void **ptr)
+{
+ unsigned head = ch->recv->head;
+ unsigned tail = ch->recv->tail;
+ *ptr = (void *) (ch->recv_buf + tail);
+
+ if (tail <= head)
+ return head - tail;
+ else
+ return ch->buf_size - tail;
+}
+
+/* advance the fifo read pointer after data from ch_read_buffer is consumed */
+static void ch_read_done(struct smd_channel *ch, unsigned count)
+{
+ BUG_ON(count > smd_stream_read_avail(ch));
+ ch->recv->tail = (ch->recv->tail + count) & (ch->buf_size - 1);
+ ch->send->fTAIL = 1;
+}
+
+/* basic read interface to ch_read_{buffer,done} used
+** by smd_*_read() and update_packet_state()
+** will read-and-discard if the _data pointer is null
+*/
+static int ch_read(struct smd_channel *ch, void *_data, int len)
+{
+ void *ptr;
+ unsigned n;
+ unsigned char *data = _data;
+ int orig_len = len;
+
+ while (len > 0) {
+ n = ch_read_buffer(ch, &ptr);
+ if (n == 0)
+ break;
+
+ if (n > len)
+ n = len;
+ if (_data)
+ memcpy(data, ptr, n);
+
+ data += n;
+ len -= n;
+ ch_read_done(ch, n);
+ }
+
+ return orig_len - len;
+}
+
+static void update_stream_state(struct smd_channel *ch)
+{
+ /* streams have no special state requiring updating */
+}
+
+static void update_packet_state(struct smd_channel *ch)
+{
+ unsigned hdr[5];
+ int r;
+
+ /* can't do anything if we're in the middle of a packet */
+ while (ch->current_packet == 0) {
+ /* discard 0 length packets if any */
+
+ /* don't bother unless we can get the full header */
+ if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
+ return;
+
+ r = ch_read(ch, hdr, SMD_HEADER_SIZE);
+ BUG_ON(r != SMD_HEADER_SIZE);
+
+ ch->current_packet = hdr[0];
+ }
+}
+
+/* provide a pointer and length to next free space in the fifo */
+static unsigned ch_write_buffer(struct smd_channel *ch, void **ptr)
+{
+ unsigned head = ch->send->head;
+ unsigned tail = ch->send->tail;
+ *ptr = (void *) (ch->send_buf + head);
+
+ if (head < tail) {
+ return tail - head - 1;
+ } else {
+ if (tail == 0)
+ return ch->buf_size - head - 1;
+ else
+ return ch->buf_size - head;
+ }
+}
+
+/* advace the fifo write pointer after freespace from ch_write_buffer is filled */
+static void ch_write_done(struct smd_channel *ch, unsigned count)
+{
+ BUG_ON(count > smd_stream_write_avail(ch));
+ ch->send->head = (ch->send->head + count) & (ch->buf_size - 1);
+ ch->send->fHEAD = 1;
+}
+
+static void ch_set_state(struct smd_channel *ch, unsigned n)
+{
+ if (n == SMD_SS_OPENED) {
+ ch->send->fDSR = 1;
+ ch->send->fCTS = 1;
+ ch->send->fCD = 1;
+ } else {
+ ch->send->fDSR = 0;
+ ch->send->fCTS = 0;
+ ch->send->fCD = 0;
+ }
+ ch->send->state = n;
+ ch->send->fSTATE = 1;
+ notify_other_smd(ch->type);
+}
+
+static void do_smd_probe(void)
+{
+ struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
+ if (shared->heap_info.free_offset != last_heap_free) {
+ last_heap_free = shared->heap_info.free_offset;
+ schedule_work(&probe_work);
+ }
+}
+
+static void smd_state_change(struct smd_channel *ch,
+ unsigned last, unsigned next)
+{
+ ch->last_state = next;
+
+ SMD_INFO("SMD: ch %d %s -> %s\n", ch->n,
+ chstate(last), chstate(next));
+
+ switch (next) {
+ case SMD_SS_OPENING:
+ if (ch->send->state == SMD_SS_CLOSING ||
+ ch->send->state == SMD_SS_CLOSED) {
+ ch->recv->tail = 0;
+ ch->send->head = 0;
+ ch_set_state(ch, SMD_SS_OPENING);
+ }
+ break;
+ case SMD_SS_OPENED:
+ if (ch->send->state == SMD_SS_OPENING) {
+ ch_set_state(ch, SMD_SS_OPENED);
+ ch->notify(ch->priv, SMD_EVENT_OPEN);
+ }
+ break;
+ case SMD_SS_FLUSHING:
+ case SMD_SS_RESET:
+ /* we should force them to close? */
+ break;
+ case SMD_SS_CLOSED:
+ if (ch->send->state == SMD_SS_OPENED) {
+ ch_set_state(ch, SMD_SS_CLOSING);
+ ch->notify(ch->priv, SMD_EVENT_CLOSE);
+ }
+ break;
+ }
+}
+
+static irqreturn_t smd_irq_handler(int irq, void *data)
+{
+ unsigned long flags;
+ struct smd_channel *ch;
+ int do_notify_modem = 0;
+ int do_notify_qdsp = 0;
+ unsigned ch_flags;
+ unsigned tmp;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry(ch, &smd_ch_list, ch_list) {
+ ch_flags = 0;
+ if (ch_is_open(ch)) {
+ if (ch->recv->fHEAD) {
+ ch->recv->fHEAD = 0;
+ ch_flags |= 1;
+ if (ch->type == SMD_APPS_MODEM)
+ do_notify_modem |= 1;
+ else if (ch->type == SMD_APPS_QDSP_I)
+ do_notify_qdsp |= 1;
+ }
+ if (ch->recv->fTAIL) {
+ ch->recv->fTAIL = 0;
+ ch_flags |= 2;
+ if (ch->type == SMD_APPS_MODEM)
+ do_notify_modem |= 1;
+ else if (ch->type == SMD_APPS_QDSP_I)
+ do_notify_qdsp |= 1;
+ }
+ if (ch->recv->fSTATE) {
+ ch->recv->fSTATE = 0;
+ ch_flags |= 4;
+ if (ch->type == SMD_APPS_MODEM)
+ do_notify_modem |= 1;
+ else if (ch->type == SMD_APPS_QDSP_I)
+ do_notify_qdsp |= 1;
+ }
+ }
+ tmp = ch->recv->state;
+ if (tmp != ch->last_state)
+ smd_state_change(ch, ch->last_state, tmp);
+ if (ch_flags) {
+ ch->update_state(ch);
+ ch->notify(ch->priv, SMD_EVENT_DATA);
+ }
+ }
+ if (do_notify_modem)
+ notify_other_smd(SMD_APPS_MODEM);
+
+ if (do_notify_qdsp)
+ notify_other_smd(SMD_APPS_QDSP_I);
+
+ spin_unlock_irqrestore(&smd_lock, flags);
+ do_smd_probe();
+ return IRQ_HANDLED;
+}
+
+static void smd_fake_irq_handler(unsigned long arg)
+{
+ smd_irq_handler(0, NULL);
+}
+
+static DECLARE_TASKLET(smd_fake_irq_tasklet, smd_fake_irq_handler, 0);
+
+void smd_sleep_exit(void)
+{
+ unsigned long flags;
+ struct smd_channel *ch;
+ unsigned tmp;
+ int need_int = 0;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_for_each_entry(ch, &smd_ch_list, ch_list) {
+ if (ch_is_open(ch)) {
+ if (ch->recv->fHEAD) {
+ SMD_DBG("smd_sleep_exit ch %d fHEAD "
+ "%x %x %x\n",
+ ch->n,
+ ch->recv->fHEAD,
+ ch->recv->head, ch->recv->tail);
+ need_int = 1;
+ break;
+ }
+ if (ch->recv->fTAIL) {
+ SMD_DBG("smd_sleep_exit ch %d fTAIL "
+ "%x %x %x\n",
+ ch->n,
+ ch->recv->fTAIL,
+ ch->send->head, ch->send->tail);
+ need_int = 1;
+ break;
+ }
+ if (ch->recv->fSTATE) {
+ SMD_DBG("smd_sleep_exit ch %d fSTATE %x"
+ "\n", ch->n,
+ ch->recv->fSTATE);
+ need_int = 1;
+ break;
+ }
+ tmp = ch->recv->state;
+ if (tmp != ch->last_state) {
+ SMD_DBG("smd_sleep_exit ch %d "
+ "state %x != %x\n",
+ ch->n, tmp,
+ ch->last_state);
+ need_int = 1;
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&smd_lock, flags);
+ do_smd_probe();
+ if (need_int) {
+ SMD_DBG("smd_sleep_exit need interrupt\n");
+ tasklet_schedule(&smd_fake_irq_tasklet);
+ }
+}
+
+static int smd_is_packet(struct smd_alloc_elm *alloc_elm)
+{
+ if (SMD_XFER_TYPE(alloc_elm->type) == 1)
+ return 0;
+ else if (SMD_XFER_TYPE(alloc_elm->type) == 2)
+ return 1;
+
+ /* for cases where xfer type is 0 */
+ if (!strncmp(alloc_elm->name, "DAL", 3))
+ return 0;
+
+ if (alloc_elm->cid > 4 || alloc_elm->cid == 1)
+ return 1;
+ else
+ return 0;
+}
+
+static int smd_stream_write(smd_channel_t *ch, const void *_data, int len)
+{
+ void *ptr;
+ const unsigned char *buf = _data;
+ unsigned xfer;
+ int orig_len = len;
+
+ SMD_DBG("smd_stream_write() %d -> ch%d\n", len, ch->n);
+ if (len < 0)
+ return -EINVAL;
+ else if (len == 0)
+ return 0;
+
+ while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
+ if (!ch_is_open(ch))
+ break;
+ if (xfer > len)
+ xfer = len;
+ memcpy(ptr, buf, xfer);
+ ch_write_done(ch, xfer);
+ len -= xfer;
+ buf += xfer;
+ if (len == 0)
+ break;
+ }
+
+ if (orig_len - len)
+ notify_other_smd(ch->type);
+
+ return orig_len - len;
+}
+
+static int smd_packet_write(smd_channel_t *ch, const void *_data, int len)
+{
+ int ret;
+ unsigned hdr[5];
+
+ SMD_DBG("smd_packet_write() %d -> ch%d\n", len, ch->n);
+ if (len < 0)
+ return -EINVAL;
+ else if (len == 0)
+ return 0;
+
+ if (smd_stream_write_avail(ch) < (len + SMD_HEADER_SIZE))
+ return -ENOMEM;
+
+ hdr[0] = len;
+ hdr[1] = hdr[2] = hdr[3] = hdr[4] = 0;
+
+
+ ret = smd_stream_write(ch, hdr, sizeof(hdr));
+ if (ret < 0 || ret != sizeof(hdr)) {
+ SMD_DBG("%s failed to write pkt header: "
+ "%d returned\n", __func__, ret);
+ return -1;
+ }
+
+
+ ret = smd_stream_write(ch, _data, len);
+ if (ret < 0 || ret != len) {
+ SMD_DBG("%s failed to write pkt data: "
+ "%d returned\n", __func__, ret);
+ return ret;
+ }
+
+ return len;
+}
+
+static int smd_stream_read(smd_channel_t *ch, void *data, int len)
+{
+ int r;
+
+ if (len < 0)
+ return -EINVAL;
+
+ r = ch_read(ch, data, len);
+ if (r > 0)
+ notify_other_smd(ch->type);
+
+ return r;
+}
+
+static int smd_packet_read(smd_channel_t *ch, void *data, int len)
+{
+ unsigned long flags;
+ int r;
+
+ if (len < 0)
+ return -EINVAL;
+
+ if (len > ch->current_packet)
+ len = ch->current_packet;
+
+ r = ch_read(ch, data, len);
+ if (r > 0)
+ notify_other_smd(ch->type);
+
+ spin_lock_irqsave(&smd_lock, flags);
+ ch->current_packet -= r;
+ update_packet_state(ch);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return r;
+}
+
+static int smd_packet_read_from_cb(smd_channel_t *ch, void *data, int len)
+{
+ int r;
+
+ if (len < 0)
+ return -EINVAL;
+
+ if (len > ch->current_packet)
+ len = ch->current_packet;
+
+ r = ch_read(ch, data, len);
+ if (r > 0)
+ notify_other_smd(ch->type);
+
+ ch->current_packet -= r;
+ update_packet_state(ch);
+
+ return r;
+}
+
+static struct smd_channel *_smd_alloc_channel_v1(uint32_t cid)
+{
+ struct smd_channel *ch;
+ void *shared;
+
+ shared = smem_alloc(ID_SMD_CHANNELS + cid,
+ 2 * (sizeof(struct smd_half_channel) +
+ SMD_BUF_SIZE));
+ if (!shared) {
+ pr_err("smd_alloc_channel: cid %d does not exist\n", cid);
+ return NULL;
+ }
+
+ ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
+ if (ch) {
+ ch->send = shared;
+ ch->send_buf = shared + sizeof(struct smd_half_channel);
+ ch->recv = (struct smd_half_channel *)
+ (ch->send_buf + SMD_BUF_SIZE);
+ ch->recv_buf = (unsigned char *)ch->recv +
+ sizeof(struct smd_half_channel);
+ ch->buf_size = SMD_BUF_SIZE;
+ ch->n = cid;
+ } else
+ pr_err("smd_alloc_channel: out of memory\n");
+
+ return ch;
+}
+
+static struct smd_channel *_smd_alloc_channel_v2(uint32_t cid)
+{
+ struct smd_channel *ch;
+ void *shared, *shared_fifo;
+ unsigned size;
+
+ shared = smem_alloc(ID_SMD_CHANNELS + cid,
+ 2 * sizeof(struct smd_half_channel));
+ if (!shared) {
+ pr_err("smd_alloc_channel: cid %d does not exist\n", cid);
+ return NULL;
+ }
+
+ shared_fifo = _smem_find(SMEM_SMD_FIFO_BASE_ID + cid, &size);
+ if (!shared_fifo) {
+ pr_err("smd_alloc_channel: cid %d fifo do not exist\n", cid);
+ return NULL;
+ }
+ SMD_INFO("smd_alloc_channel: cid %d fifo found; size = %d\n",
+ cid, (size / 2));
+
+ ch = kzalloc(sizeof(struct smd_channel), GFP_KERNEL);
+ if (ch) {
+ ch->send = shared;
+ ch->recv = shared + sizeof(struct smd_half_channel);
+ ch->send_buf = shared_fifo;
+ ch->recv_buf = shared_fifo + (size / 2);
+ ch->buf_size = size / 2;
+ ch->n = cid;
+ } else
+ pr_err("smd_alloc_channel() out of memory\n");
+
+ return ch;
+}
+
+static void smd_alloc_channel(struct smd_alloc_elm *alloc_elm)
+{
+ struct smd_channel *ch;
+ uint32_t *smd_ver;
+
+ smd_ver = smem_alloc(SMEM_VERSION_SMD, 32 * sizeof(uint32_t));
+
+ if (smd_ver && ((smd_ver[VERSION_MODEM] >> 16) >= 1))
+ ch = _smd_alloc_channel_v2(alloc_elm->cid);
+ else
+ ch = _smd_alloc_channel_v1(alloc_elm->cid);
+
+ if (ch == 0)
+ return;
+
+ ch->type = SMD_CHANNEL_TYPE(alloc_elm->type);
+ memcpy(ch->name, alloc_elm->name, 20);
+ ch->name[19] = 0;
+
+ if (smd_is_packet(alloc_elm)) {
+ ch->read = smd_packet_read;
+ ch->write = smd_packet_write;
+ ch->read_avail = smd_packet_read_avail;
+ ch->write_avail = smd_packet_write_avail;
+ ch->update_state = update_packet_state;
+ ch->read_from_cb = smd_packet_read_from_cb;
+ } else {
+ ch->read = smd_stream_read;
+ ch->write = smd_stream_write;
+ ch->read_avail = smd_stream_read_avail;
+ ch->write_avail = smd_stream_write_avail;
+ ch->update_state = update_stream_state;
+ ch->read_from_cb = smd_stream_read;
+ }
+
+ ch->pdev.name = ch->name;
+ ch->pdev.id = ch->type;
+
+ SMD_INFO("smd_alloc_channel() '%s' cid=%d\n",
+ ch->name, ch->n);
+
+ mutex_lock(&smd_creation_mutex);
+ list_add(&ch->ch_list, &smd_ch_closed_list);
+ mutex_unlock(&smd_creation_mutex);
+
+ platform_device_register(&ch->pdev);
+}
+
+static void do_nothing_notify(void *priv, unsigned flags)
+{
+}
+
+struct smd_channel *smd_get_channel(const char *name, uint32_t type)
+{
+ struct smd_channel *ch;
+
+ mutex_lock(&smd_creation_mutex);
+ list_for_each_entry(ch, &smd_ch_closed_list, ch_list) {
+ if (!strcmp(name, ch->name) &&
+ (type == ch->type)) {
+ list_del(&ch->ch_list);
+ mutex_unlock(&smd_creation_mutex);
+ return ch;
+ }
+ }
+ mutex_unlock(&smd_creation_mutex);
+
+ return NULL;
+}
+
+int smd_named_open_on_edge(const char *name, uint32_t edge,
+ smd_channel_t **_ch,
+ void *priv, void (*notify)(void *, unsigned))
+{
+ struct smd_channel *ch;
+ unsigned long flags;
+
+ if (smd_initialized == 0) {
+ SMD_INFO("smd_open() before smd_init()\n");
+ return -ENODEV;
+ }
+
+ SMD_DBG("smd_open('%s', %p, %p)\n", name, priv, notify);
+
+ ch = smd_get_channel(name, edge);
+ if (!ch)
+ return -ENODEV;
+
+ if (notify == 0)
+ notify = do_nothing_notify;
+
+ ch->notify = notify;
+ ch->current_packet = 0;
+ ch->last_state = SMD_SS_CLOSED;
+ ch->priv = priv;
+
+ *_ch = ch;
+
+ SMD_DBG("smd_open: opening '%s'\n", ch->name);
+
+ spin_lock_irqsave(&smd_lock, flags);
+ list_add(&ch->ch_list, &smd_ch_list);
+ SMD_DBG("%s: opening ch %d\n", __func__, ch->n);
+
+ smd_state_change(ch, ch->last_state, SMD_SS_OPENING);
+
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_named_open_on_edge);
+
+
+int smd_open(const char *name, smd_channel_t **_ch,
+ void *priv, void (*notify)(void *, unsigned))
+{
+ return smd_named_open_on_edge(name, SMD_APPS_MODEM, _ch, priv,
+ notify);
+}
+EXPORT_SYMBOL(smd_open);
+
+int smd_close(smd_channel_t *ch)
+{
+ unsigned long flags;
+
+ SMD_INFO("smd_close(%p)\n", ch);
+
+ if (ch == 0)
+ return -1;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ ch->notify = do_nothing_notify;
+ list_del(&ch->ch_list);
+ ch_set_state(ch, SMD_SS_CLOSED);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ mutex_lock(&smd_creation_mutex);
+ list_add(&ch->ch_list, &smd_ch_closed_list);
+ mutex_unlock(&smd_creation_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(smd_close);
+
+int smd_read(smd_channel_t *ch, void *data, int len)
+{
+ return ch->read(ch, data, len);
+}
+EXPORT_SYMBOL(smd_read);
+
+int smd_read_from_cb(smd_channel_t *ch, void *data, int len)
+{
+ return ch->read_from_cb(ch, data, len);
+}
+EXPORT_SYMBOL(smd_read_from_cb);
+
+int smd_write(smd_channel_t *ch, const void *data, int len)
+{
+ return ch->write(ch, data, len);
+}
+EXPORT_SYMBOL(smd_write);
+
+int smd_read_avail(smd_channel_t *ch)
+{
+ return ch->read_avail(ch);
+}
+EXPORT_SYMBOL(smd_read_avail);
+
+int smd_write_avail(smd_channel_t *ch)
+{
+ return ch->write_avail(ch);
+}
+EXPORT_SYMBOL(smd_write_avail);
+
+int smd_wait_until_readable(smd_channel_t *ch, int bytes)
+{
+ return -1;
+}
+
+int smd_wait_until_writable(smd_channel_t *ch, int bytes)
+{
+ return -1;
+}
+
+int smd_cur_packet_size(smd_channel_t *ch)
+{
+ return ch->current_packet;
+}
+
+int smd_tiocmget(smd_channel_t *ch)
+{
+ return (ch->recv->fDSR ? TIOCM_DSR : 0) |
+ (ch->recv->fCTS ? TIOCM_CTS : 0) |
+ (ch->recv->fCD ? TIOCM_CD : 0) |
+ (ch->recv->fRI ? TIOCM_RI : 0) |
+ (ch->send->fCTS ? TIOCM_RTS : 0) |
+ (ch->send->fDSR ? TIOCM_DTR : 0);
+}
+
+int smd_tiocmset(smd_channel_t *ch, unsigned int set, unsigned int clear)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&smd_lock, flags);
+ if (set & TIOCM_DTR)
+ ch->send->fDSR = 1;
+
+ if (set & TIOCM_RTS)
+ ch->send->fCTS = 1;
+
+ if (clear & TIOCM_DTR)
+ ch->send->fDSR = 0;
+
+ if (clear & TIOCM_RTS)
+ ch->send->fCTS = 0;
+
+ ch->send->fSTATE = 1;
+ barrier();
+ notify_other_smd(ch->type);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+ return 0;
+}
+
+
+/* -------------------------------------------------------------------------- */
+
+void *smem_alloc(unsigned id, unsigned size)
+{
+ return smem_find(id, size);
+}
+
+void *smem_get_entry(unsigned id, unsigned *size)
+{
+ return _smem_find(id, size);
+}
+
+static void *_smem_find(unsigned id, unsigned *size)
+{
+ struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
+ struct smem_heap_entry *toc = shared->heap_toc;
+
+ if (id >= SMEM_NUM_ITEMS)
+ return 0;
+
+ if (toc[id].allocated) {
+ *size = toc[id].size;
+ return (void *) (MSM_SHARED_RAM_BASE + toc[id].offset);
+ }
+
+ return 0;
+}
+
+void *smem_find(unsigned id, unsigned size_in)
+{
+ unsigned size;
+ void *ptr;
+
+ ptr = _smem_find(id, &size);
+ if (!ptr)
+ return 0;
+
+ size_in = ALIGN(size_in, 8);
+ if (size_in != size) {
+ pr_err("smem_find(%d, %d): wrong size %d\n",
+ id, size_in, size);
+ return 0;
+ }
+
+ return ptr;
+}
+
+static int smem_init(void)
+{
+ struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
+ uint32_t *smsm, i;
+
+ smsm = smem_alloc(ID_SHARED_STATE,
+ SMSM_NUM_ENTRIES * sizeof(uint32_t));
+
+ if (smsm) {
+ smsm[SMSM_APPS_STATE] = 0;
+ if ((shared->version[VERSION_MODEM] >> 16) >= 0xB)
+ smsm[SMSM_APPS_DEM_I] = 0;
+ }
+
+ smsm = smem_alloc(SMEM_SMSM_CPU_INTR_MASK,
+ SMSM_NUM_ENTRIES * SMSM_NUM_HOSTS * sizeof(uint32_t));
+
+ if (smsm)
+ for (i = 0; i < SMSM_NUM_ENTRIES; i++)
+ smsm[i * SMSM_NUM_HOSTS + SMSM_APPS] = 0xffffffff;
+
+ return 0;
+}
+
+void smsm_reset_modem(unsigned mode)
+{
+ if (mode == SMSM_SYSTEM_DOWNLOAD) {
+ mode = SMSM_RESET | SMSM_SYSTEM_DOWNLOAD;
+ } else if (mode == SMSM_MODEM_WAIT) {
+ mode = SMSM_RESET | SMSM_MODEM_WAIT;
+ } else { /* reset_mode is SMSM_RESET or default */
+ mode = SMSM_RESET;
+ }
+
+ smsm_change_state(SMSM_APPS_STATE, mode, mode);
+}
+EXPORT_SYMBOL(smsm_reset_modem);
+
+void smsm_reset_modem_cont(void)
+{
+ unsigned long flags;
+ uint32_t *smsm;
+
+ spin_lock_irqsave(&smem_lock, flags);
+ smsm = smem_alloc(ID_SHARED_STATE,
+ SMSM_NUM_ENTRIES * sizeof(uint32_t));
+ smsm[SMSM_APPS_STATE] &= ~SMSM_MODEM_WAIT;
+ spin_unlock_irqrestore(&smem_lock, flags);
+}
+EXPORT_SYMBOL(smsm_reset_modem_cont);
+
+static irqreturn_t smsm_irq_handler(int irq, void *data)
+{
+ unsigned long flags;
+ uint32_t *smsm;
+ static uint32_t prev_smem_q6_apps_smsm;
+
+ if (irq == INT_ADSP_A11) {
+ smsm = smem_alloc(SMEM_SMD_SMSM_INTR_MUX,
+ SMSM_NUM_INTR_MUX * sizeof(uint32_t));
+ if (!smsm ||
+ (smsm[SMEM_Q6_APPS_SMSM] == prev_smem_q6_apps_smsm))
+ return IRQ_HANDLED;
+
+ prev_smem_q6_apps_smsm = smsm[SMEM_Q6_APPS_SMSM];
+ }
+
+ spin_lock_irqsave(&smem_lock, flags);
+ smsm = smem_alloc(ID_SHARED_STATE,
+ SMSM_NUM_ENTRIES * sizeof(uint32_t));
+
+ if (smsm == 0) {
+ SMSM_INFO("<SM NO STATE>\n");
+ } else {
+ unsigned old_apps, apps;
+ unsigned modm = smsm[SMSM_MODEM_STATE];
+
+ old_apps = apps = smsm[SMSM_APPS_STATE];
+
+ SMSM_DBG("<SM %08x %08x>\n", apps, modm);
+ if (apps & SMSM_RESET) {
+ /* If we get an interrupt and the apps SMSM_RESET
+ bit is already set, the modem is acking the
+ app's reset ack. */
+ apps &= ~SMSM_RESET;
+
+ /* Issue a fake irq to handle any
+ * smd state changes during reset
+ */
+ smd_fake_irq_handler(0);
+
+ /* queue modem restart notify chain */
+ modem_queue_start_reset_notify();
+
+ } else if (modm & SMSM_RESET) {
+ apps |= SMSM_RESET;
+ } else {
+ apps |= SMSM_INIT;
+ if (modm & SMSM_SMDINIT)
+ apps |= SMSM_SMDINIT;
+ if (modm & SMSM_RPCINIT)
+ apps |= SMSM_RPCINIT;
+ if ((apps & (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT)) ==
+ (SMSM_INIT | SMSM_SMDINIT | SMSM_RPCINIT))
+ apps |= SMSM_RUN;
+ }
+
+ if (smsm[SMSM_APPS_STATE] != apps) {
+ SMSM_DBG("<SM %08x NOTIFY>\n", apps);
+ smsm[SMSM_APPS_STATE] = apps;
+ do_smd_probe();
+ notify_other_smsm(SMSM_APPS_STATE, old_apps, apps);
+ }
+ }
+ spin_unlock_irqrestore(&smem_lock, flags);
+ return IRQ_HANDLED;
+}
+
+int smsm_change_state(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ unsigned long flags;
+ uint32_t *smsm;
+ uint32_t old_state;
+
+ if (smsm_entry >= SMSM_NUM_ENTRIES) {
+ printk(KERN_ERR "smsm_change_state: Invalid entry %d",
+ smsm_entry);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&smem_lock, flags);
+
+ smsm = smem_alloc(ID_SHARED_STATE,
+ SMSM_NUM_ENTRIES * sizeof(uint32_t));
+
+ if (smsm) {
+ old_state = smsm[smsm_entry];
+ smsm[smsm_entry] = (smsm[smsm_entry] & ~clear_mask) | set_mask;
+ SMSM_DBG("smsm_change_state %x\n", smsm[smsm_entry]);
+ notify_other_smsm(SMSM_APPS_STATE, old_state, smsm[smsm_entry]);
+ }
+
+ spin_unlock_irqrestore(&smem_lock, flags);
+
+ if (smsm == NULL) {
+ printk(KERN_ERR "smsm_change_state <SM NO STATE>\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+int smsm_change_intr_mask(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ uint32_t *smsm;
+
+ if (smsm_entry >= SMSM_NUM_ENTRIES) {
+ printk(KERN_ERR "smsm_change_state: Invalid entry %d\n",
+ smsm_entry);
+ return -EINVAL;
+ }
+
+ smsm = smem_alloc(SMEM_SMSM_CPU_INTR_MASK,
+ SMSM_NUM_ENTRIES * SMSM_NUM_HOSTS * sizeof(uint32_t));
+
+ if (smsm) {
+ smsm[smsm_entry * SMSM_NUM_HOSTS + SMSM_APPS] =
+ (smsm[smsm_entry * SMSM_NUM_HOSTS + SMSM_APPS] &
+ ~clear_mask) | set_mask;
+ SMSM_INFO("smsm_entry %d, new intr_mask %x\n", smsm_entry,
+ smsm[smsm_entry * SMSM_NUM_HOSTS + SMSM_APPS]);
+ } else {
+ printk(KERN_ERR "smsm_change_intr_mask <SM NO INTR_MASK>\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask)
+{
+ uint32_t *smsm;
+
+ if ((smsm_entry >= SMSM_NUM_ENTRIES) || (!intr_mask)) {
+ printk(KERN_ERR "smsm_change_state: Invalid input "
+ "entry %d, mask 0x%x\n",
+ smsm_entry, (unsigned int)intr_mask);
+ return -EINVAL;
+ }
+
+ smsm = smem_alloc(SMEM_SMSM_CPU_INTR_MASK,
+ SMSM_NUM_ENTRIES * SMSM_NUM_HOSTS * sizeof(uint32_t));
+
+ if (smsm) {
+ *intr_mask = smsm[smsm_entry * SMSM_NUM_HOSTS + SMSM_APPS];
+ } else {
+ printk(KERN_ERR "smsm_change_intr_mask <SM NO INTR_MASK>\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+uint32_t smsm_get_state(uint32_t smsm_entry)
+{
+ unsigned long flags;
+ uint32_t *smsm;
+ uint32_t rv;
+
+ if (smsm_entry >= SMSM_NUM_ENTRIES) {
+ printk(KERN_ERR "smsm_change_state: Invalid entry %d",
+ smsm_entry);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&smem_lock, flags);
+
+ smsm = smem_alloc(ID_SHARED_STATE,
+ SMSM_NUM_ENTRIES * sizeof(uint32_t));
+
+ if (smsm)
+ rv = smsm[smsm_entry];
+ else
+ rv = 0;
+
+ spin_unlock_irqrestore(&smem_lock, flags);
+
+ if (smsm == NULL)
+ printk(KERN_ERR "smsm_get_state <SM NO STATE>\n");
+ return rv;
+
+}
+
+#define MAX_NUM_SLEEP_CLIENTS 64
+#define MAX_SLEEP_NAME_LEN 8
+
+#define NUM_GPIO_INT_REGISTERS 6
+#define GPIO_SMEM_NUM_GROUPS 2
+#define GPIO_SMEM_MAX_PC_INTERRUPTS 8
+struct tramp_gpio_save {
+ unsigned int enable;
+ unsigned int detect;
+ unsigned int polarity;
+};
+
+struct tramp_gpio_smem {
+ uint16_t num_fired[GPIO_SMEM_NUM_GROUPS];
+ uint16_t fired[GPIO_SMEM_NUM_GROUPS][GPIO_SMEM_MAX_PC_INTERRUPTS];
+ uint32_t enabled[NUM_GPIO_INT_REGISTERS];
+ uint32_t detection[NUM_GPIO_INT_REGISTERS];
+ uint32_t polarity[NUM_GPIO_INT_REGISTERS];
+};
+
+/*
+ * Print debug information on shared memory sleep variables
+ */
+void smsm_print_sleep_info(uint32_t sleep_delay, uint32_t sleep_limit,
+ uint32_t irq_mask, uint32_t wakeup_reason, uint32_t pending_irqs)
+{
+ unsigned long flags;
+ uint32_t *ptr;
+ struct tramp_gpio_smem *gpio;
+
+ spin_lock_irqsave(&smem_lock, flags);
+
+ printk(KERN_ERR "SMEM_SMSM_SLEEP_DELAY: %x\n", sleep_delay);
+ printk(KERN_ERR "SMEM_SMSM_LIMIT_SLEEP: %x\n", sleep_limit);
+
+ ptr = smem_alloc(SMEM_SLEEP_POWER_COLLAPSE_DISABLED, sizeof(*ptr));
+ if (ptr)
+ printk(KERN_ERR "SMEM_SLEEP_POWER_COLLAPSE_DISABLED: %x\n", *ptr);
+ else
+ printk(KERN_ERR "SMEM_SLEEP_POWER_COLLAPSE_DISABLED: missing\n");
+
+ printk(KERN_ERR "SMEM_SMSM_INT_INFO %x %x %x\n",
+ irq_mask, pending_irqs, wakeup_reason);
+
+ gpio = smem_alloc(SMEM_GPIO_INT, sizeof(*gpio));
+ if (gpio) {
+ int i;
+ for (i = 0; i < NUM_GPIO_INT_REGISTERS; i++) {
+ printk(KERN_ERR "SMEM_GPIO_INT: %d: e %x d %x p %x\n",
+ i, gpio->enabled[i], gpio->detection[i],
+ gpio->polarity[i]);
+ }
+ for (i = 0; i < GPIO_SMEM_NUM_GROUPS; i++) {
+ printk(KERN_ERR "SMEM_GPIO_INT: %d: f %d: %d %d...\n",
+ i, gpio->num_fired[i], gpio->fired[i][0],
+ gpio->fired[i][1]);
+ }
+ } else
+ printk(KERN_ERR "SMEM_GPIO_INT: missing\n");
+
+#if 0
+ ptr = smem_alloc(SMEM_SLEEP_STATIC,
+ 2 * MAX_NUM_SLEEP_CLIENTS * (MAX_SLEEP_NAME_LEN + 1));
+ if (ptr)
+ printk(KERN_ERR "SMEM_SLEEP_STATIC: %x %x %x %x\n",
+ ptr[0], ptr[1], ptr[2], ptr[3]);
+ else
+ printk(KERN_ERR "SMEM_SLEEP_STATIC: missing\n");
+#endif
+
+ spin_unlock_irqrestore(&smem_lock, flags);
+}
+
+int smd_core_init(void)
+{
+ int r;
+ SMD_INFO("smd_core_init()\n");
+
+ r = request_irq(INT_A9_M2A_0, smd_irq_handler,
+ IRQF_TRIGGER_RISING, "smd_dev", 0);
+ if (r < 0)
+ return r;
+ r = enable_irq_wake(INT_A9_M2A_0);
+ if (r < 0)
+ printk(KERN_ERR "smd_core_init: "
+ "enable_irq_wake failed for INT_A9_M2A_0\n");
+
+ r = request_irq(INT_A9_M2A_5, smsm_irq_handler,
+ IRQF_TRIGGER_RISING, "smsm_dev", 0);
+ if (r < 0) {
+ free_irq(INT_A9_M2A_0, 0);
+ return r;
+ }
+
+ r = enable_irq_wake(INT_A9_M2A_5);
+ if (r < 0)
+ printk(KERN_ERR "smd_core_init: "
+ "enable_irq_wake failed for INT_A9_M2A_5\n");
+
+ r = request_irq(INT_ADSP_A11, smd_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_SHARED, "smd_dev",
+ smd_irq_handler);
+ if (r < 0)
+ printk(KERN_ERR "smd_core_init: "
+ "request_irq failed for INT_ADSP_A11\n");
+
+ r = request_irq(INT_ADSP_A11, smsm_irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_SHARED, "smsm_dev",
+ smsm_irq_handler);
+ if (r < 0)
+ printk(KERN_ERR "smd_core_init: "
+ "request_irq failed for INT_ADSP_A11\n");
+
+ r = enable_irq_wake(INT_ADSP_A11);
+ if (r < 0)
+ printk(KERN_ERR "smd_core_init: "
+ "enable_irq_wake failed for INT_ADSP_A11\n");
+
+ /* we may have missed a signal while booting -- fake
+ * an interrupt to make sure we process any existing
+ * state
+ */
+ smsm_irq_handler(0, 0);
+
+ SMD_INFO("smd_core_init() done\n");
+
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int debug_f3(char *buf, int max)
+{
+ char *x;
+ int size;
+ int i = 0, j = 0;
+ unsigned cols = 0;
+ char str[4*sizeof(unsigned)+1] = {0};
+
+ i += scnprintf(buf + i, max - i,
+ "Printing to log\n");
+
+ x = smem_get_entry(SMEM_ERR_F3_TRACE_LOG, &size);
+ if (x != 0) {
+ printk(KERN_ERR "smem: F3 TRACE LOG\n");
+ while (size > 0) {
+ if (size >= sizeof(unsigned)) {
+ printk(KERN_ERR "%08x", *((unsigned *) x));
+ for (j = 0; j < sizeof(unsigned); ++j)
+ if (isprint(*(x+j)))
+ str[cols*sizeof(unsigned) + j]
+ = *(x+j);
+ else
+ str[cols*sizeof(unsigned) + j]
+ = '-';
+ x += sizeof(unsigned);
+ size -= sizeof(unsigned);
+ } else {
+ while (size-- > 0)
+ printk(KERN_ERR "%02x",
+ (unsigned) *x++);
+ break;
+ }
+ if (cols == 3) {
+ cols = 0;
+ str[4*sizeof(unsigned)] = 0;
+ printk(KERN_ERR " %s\n", str);
+ str[0] = 0;
+ } else {
+ cols++;
+ printk(KERN_ERR " ");
+ }
+ }
+ printk(KERN_ERR "\n");
+ }
+
+ return max;
+}
+
+static int debug_diag(char *buf, int max)
+{
+ int i = 0;
+
+ i += scnprintf(buf + i, max - i,
+ "Printing to log\n");
+ smd_diag();
+
+ return i;
+}
+
+static int debug_modem_err_f3(char *buf, int max)
+{
+ char *x;
+ int size;
+ int i = 0, j = 0;
+ unsigned cols = 0;
+ char str[4*sizeof(unsigned)+1] = {0};
+
+ x = smem_get_entry(SMEM_ERR_F3_TRACE_LOG, &size);
+ if (x != 0) {
+ printk(KERN_ERR "smem: F3 TRACE LOG\n");
+ while (size > 0 && max - i) {
+ if (size >= sizeof(unsigned)) {
+ i += scnprintf(buf + i, max - i, "%08x",
+ *((unsigned *) x));
+ for (j = 0; j < sizeof(unsigned); ++j)
+ if (isprint(*(x+j)))
+ str[cols*sizeof(unsigned) + j]
+ = *(x+j);
+ else
+ str[cols*sizeof(unsigned) + j]
+ = '-';
+ x += sizeof(unsigned);
+ size -= sizeof(unsigned);
+ } else {
+ while (size-- > 0 && max - i)
+ i += scnprintf(buf + i, max - i,
+ "%02x",
+ (unsigned) *x++);
+ break;
+ }
+ if (cols == 3) {
+ cols = 0;
+ str[4*sizeof(unsigned)] = 0;
+ i += scnprintf(buf + i, max - i, " %s\n",
+ str);
+ str[0] = 0;
+ } else {
+ cols++;
+ i += scnprintf(buf + i, max - i, " ");
+ }
+ }
+ i += scnprintf(buf + i, max - i, "\n");
+ }
+
+ return i;
+}
+
+static int debug_modem_err(char *buf, int max)
+{
+ char *x;
+ int size;
+ int i = 0;
+
+ x = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
+ if (x != 0) {
+ x[SZ_DIAG_ERR_MSG - 1] = 0;
+ i += scnprintf(buf + i, max - i,
+ "smem: DIAG '%s'\n", x);
+ }
+
+ x = smem_get_entry(SMEM_ERR_CRASH_LOG, &size);
+ if (x != 0) {
+ x[size - 1] = 0;
+ i += scnprintf(buf + i, max - i,
+ "smem: CRASH LOG\n'%s'\n", x);
+ }
+ i += scnprintf(buf + i, max - i, "\n");
+
+ return i;
+}
+
+static int dump_ch(char *buf, int max, int n,
+ struct smd_half_channel *s,
+ struct smd_half_channel *r)
+{
+ return scnprintf(
+ buf, max,
+ "ch%02d:"
+ " %8s(%04d/%04d) %c%c%c%c%c%c%c <->"
+ " %8s(%04d/%04d) %c%c%c%c%c%c%c\n", n,
+ chstate(s->state), s->tail, s->head,
+ s->fDSR ? 'D' : 'd',
+ s->fCTS ? 'C' : 'c',
+ s->fCD ? 'C' : 'c',
+ s->fRI ? 'I' : 'i',
+ s->fHEAD ? 'W' : 'w',
+ s->fTAIL ? 'R' : 'r',
+ s->fSTATE ? 'S' : 's',
+ chstate(r->state), r->tail, r->head,
+ r->fDSR ? 'D' : 'd',
+ r->fCTS ? 'R' : 'r',
+ r->fCD ? 'C' : 'c',
+ r->fRI ? 'I' : 'i',
+ r->fHEAD ? 'W' : 'w',
+ r->fTAIL ? 'R' : 'r',
+ r->fSTATE ? 'S' : 's'
+ );
+}
+
+static int debug_read_diag_msg(char *buf, int max)
+{
+ char *msg;
+ int i = 0;
+
+ msg = smem_find(ID_DIAG_ERR_MSG, SZ_DIAG_ERR_MSG);
+
+ if (msg) {
+ msg[SZ_DIAG_ERR_MSG - 1] = 0;
+ i += scnprintf(buf + i, max - i, "diag: '%s'\n", msg);
+ }
+ return i;
+}
+
+static int debug_read_mem(char *buf, int max)
+{
+ unsigned n;
+ struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
+ struct smem_heap_entry *toc = shared->heap_toc;
+ int i = 0;
+
+ i += scnprintf(buf + i, max - i,
+ "heap: init=%d free=%d remain=%d\n",
+ shared->heap_info.initialized,
+ shared->heap_info.free_offset,
+ shared->heap_info.heap_remaining);
+
+ for (n = 0; n < SMD_HEAP_SIZE; n++) {
+ if (toc[n].allocated == 0)
+ continue;
+ i += scnprintf(buf + i, max - i,
+ "%04d: offset %08x size %08x\n",
+ n, toc[n].offset, toc[n].size);
+ }
+ return i;
+}
+
+static int debug_read_ch_v1(char *buf, int max)
+{
+ void *shared;
+ int n, i = 0;
+
+ for (n = 0; n < SMD_CHANNELS; n++) {
+ shared = smem_find(ID_SMD_CHANNELS + n,
+ 2 * (sizeof(struct smd_half_channel) +
+ SMD_BUF_SIZE));
+
+ if (shared == 0)
+ continue;
+ i += dump_ch(buf + i, max - i, n, shared,
+ (shared + sizeof(struct smd_half_channel) +
+ SMD_BUF_SIZE));
+ }
+
+ return i;
+}
+
+static int debug_read_ch_v2(char *buf, int max)
+{
+ void *shared;
+ int n, i = 0;
+
+ for (n = 0; n < SMD_CHANNELS; n++) {
+ shared = smem_find(ID_SMD_CHANNELS + n,
+ 2 * sizeof(struct smd_half_channel));
+
+ if (shared == 0)
+ continue;
+ i += dump_ch(buf + i, max - i, n, shared,
+ (shared + sizeof(struct smd_half_channel)));
+ }
+
+ return i;
+}
+
+static int debug_read_smem_version(char *buf, int max)
+{
+ struct smem_shared *shared = (void *) MSM_SHARED_RAM_BASE;
+ uint32_t n, version, i = 0;
+
+ for (n = 0; n < 32; n++) {
+ version = shared->version[n];
+ i += scnprintf(buf + i, max - i,
+ "entry %d: smem = %d proc_comm = %d\n", n,
+ version >> 16,
+ version & 0xffff);
+ }
+
+ return i;
+}
+
+static int debug_read_smd_version(char *buf, int max)
+{
+ uint32_t *smd_ver;
+ uint32_t n, version, i = 0;
+
+ smd_ver = smem_alloc(SMEM_VERSION_SMD, 32 * sizeof(uint32_t));
+
+ if (smd_ver)
+ for (n = 0; n < 32; n++) {
+ version = smd_ver[n];
+ i += scnprintf(buf + i, max - i,
+ "entry %d: %d.%d\n", n,
+ version >> 16,
+ version & 0xffff);
+ }
+
+ return i;
+}
+
+static int debug_read_alloc_tbl(char *buf, int max)
+{
+ struct smd_alloc_elm *shared;
+ int n, i = 0;
+
+ shared = smem_find(ID_CH_ALLOC_TBL, sizeof(struct smd_alloc_elm[64]));
+
+ BUG_ON(!shared);
+
+ for (n = 0; n < 64; n++) {
+ i += scnprintf(buf + i, max - i,
+ "name=%s cid=%d ch type=%d "
+ "xfer type=%d ref_count=%d\n",
+ shared[n].name,
+ shared[n].cid,
+ SMD_CHANNEL_TYPE(shared[n].type),
+ SMD_XFER_TYPE(shared[n].type),
+ shared[n].ref_count);
+ }
+
+ return i;
+}
+
+static int debug_read_smsm_state(char *buf, int max)
+{
+ uint32_t *smsm;
+ int n, i = 0;
+
+ smsm = smem_find(ID_SHARED_STATE,
+ SMSM_NUM_ENTRIES * sizeof(uint32_t));
+
+ if (smsm)
+ for (n = 0; n < SMSM_NUM_ENTRIES; n++)
+ i += scnprintf(buf + i, max - i, "entry %d: 0x%08x\n",
+ n, smsm[n]);
+
+ return i;
+
+}
+
+static int debug_read_intr_mask(char *buf, int max)
+{
+ uint32_t *smsm;
+ int m, n, i = 0;
+
+ smsm = smem_alloc(SMEM_SMSM_CPU_INTR_MASK,
+ SMSM_NUM_ENTRIES * SMSM_NUM_HOSTS * sizeof(uint32_t));
+
+ if (smsm)
+ for (m = 0; m < SMSM_NUM_ENTRIES; m++) {
+ i += scnprintf(buf + i, max - i, "entry %d:", m);
+ for (n = 0; n < SMSM_NUM_HOSTS; n++)
+ i += scnprintf(buf + i, max - i,
+ " host %d: 0x%08x",
+ n, smsm[m * SMSM_NUM_HOSTS + n]);
+ i += scnprintf(buf + i, max - i, "\n");
+ }
+
+ return i;
+}
+
+static int debug_read_intr_mux(char *buf, int max)
+{
+ uint32_t *smsm;
+ int n, i = 0;
+
+ smsm = smem_alloc(SMEM_SMD_SMSM_INTR_MUX,
+ SMSM_NUM_INTR_MUX * sizeof(uint32_t));
+
+ if (smsm)
+ for (n = 0; n < SMSM_NUM_INTR_MUX; n++)
+ i += scnprintf(buf + i, max - i, "entry %d: %d\n",
+ n, smsm[n]);
+
+ return i;
+}
+
+#define DEBUG_BUFMAX 4096
+static char debug_buffer[DEBUG_BUFMAX];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int (*fill)(char *buf, int max) = file->private_data;
+ int bsize = fill(debug_buffer, DEBUG_BUFMAX);
+ return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ int (*fill)(char *buf, int max))
+{
+ debugfs_create_file(name, mode, dent, fill, &debug_ops);
+}
+
+static void smd_debugfs_init(void)
+{
+ struct dentry *dent;
+ uint32_t *smd_ver;
+
+ dent = debugfs_create_dir("smd", 0);
+ if (IS_ERR(dent))
+ return;
+
+ smd_ver = smem_alloc(SMEM_VERSION_SMD, 32 * sizeof(uint32_t));
+
+ if (smd_ver && ((smd_ver[VERSION_MODEM] >> 16) >= 1))
+ debug_create("ch", 0444, dent, debug_read_ch_v2);
+ else
+ debug_create("ch", 0444, dent, debug_read_ch_v1);
+
+ debug_create("diag", 0444, dent, debug_read_diag_msg);
+ debug_create("mem", 0444, dent, debug_read_mem);
+ debug_create("version", 0444, dent, debug_read_smd_version);
+ debug_create("tbl", 0444, dent, debug_read_alloc_tbl);
+ debug_create("modem_err", 0444, dent, debug_modem_err);
+ debug_create("modem_err_f3", 0444, dent, debug_modem_err_f3);
+ debug_create("print_diag", 0444, dent, debug_diag);
+ debug_create("print_f3", 0444, dent, debug_f3);
+}
+
+static void smsm_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smsm", 0);
+ if (IS_ERR(dent))
+ return;
+
+ debug_create("state", 0444, dent, debug_read_smsm_state);
+ debug_create("intr_mask", 0444, dent, debug_read_intr_mask);
+ debug_create("intr_mux", 0444, dent, debug_read_intr_mux);
+ debug_create("version", 0444, dent, debug_read_smem_version);
+}
+#else
+static void smd_debugfs_init(void) {}
+static void smsm_debugfs_init(void) {}
+#endif
+
+static int __init msm_smd_probe(struct platform_device *pdev)
+{
+ /* enable smd and smsm info messages */
+ msm_smd_debug_mask = 0xc;
+
+ SMD_INFO("smd probe\n");
+
+ INIT_WORK(&probe_work, smd_channel_probe_worker);
+
+ if (smem_init()) {
+ printk(KERN_ERR "smem_init() failed\n");
+ return -1;
+ }
+
+ if (smd_core_init()) {
+ printk(KERN_ERR "smd_core_init() failed\n");
+ return -1;
+ }
+
+ do_smd_probe();
+
+ msm_check_for_modem_crash = check_for_modem_crash;
+
+ smd_initialized = 1;
+
+ smd_debugfs_init();
+ smsm_debugfs_init();
+
+ return 0;
+}
+
+static struct platform_driver msm_smd_driver = {
+ .probe = msm_smd_probe,
+ .driver = {
+ .name = MODULE_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msm_smd_init(void)
+{
+ return platform_driver_register(&msm_smd_driver);
+}
+
+module_init(msm_smd_init);
+
+MODULE_DESCRIPTION("MSM Shared Memory Core");
+MODULE_AUTHOR("Brian Swetland <swetland@google.com>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-msm/smd_ctl2.c b/arch/arm/mach-msm/smd_ctl2.c
new file mode 100644
index 000000000000..8348f94dae68
--- /dev/null
+++ b/arch/arm/mach-msm/smd_ctl2.c
@@ -0,0 +1,677 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * SMD Control Driver -- Provides a binary SMD non-muxed control port
+ * interface.
+ */
+
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <asm/ioctls.h>
+
+#include <mach/msm_smd.h>
+
+#include "modem_notifier.h"
+
+#define NUM_SMD_CTL_PORTS 3
+#define DEVICE_NAME "smdcntl"
+#define MAX_BUF_SIZE 2048
+
+struct smd_ctl_dev {
+ struct cdev cdev;
+ char name[9];
+ struct device *devicep;
+
+ struct smd_channel *ctl_ch;
+ struct mutex ctl_ch_lock;
+ struct mutex rx_lock;
+ struct mutex is_open_lock;
+ struct workqueue_struct *ctl_wq;
+ struct work_struct ctl_work;
+ wait_queue_head_t ctl_wait_queue;
+ wait_queue_head_t ctl_opened_wait_queue;
+
+ int i;
+
+ unsigned char tx_buf[MAX_BUF_SIZE];
+ unsigned char rx_buf[MAX_BUF_SIZE];
+ int bytes_read;
+ int is_open;
+
+ struct notifier_block nb;
+ int has_reset;
+ struct mutex has_reset_lock;
+
+} *smd_ctl_devp[NUM_SMD_CTL_PORTS];
+
+struct class *smd_ctl_classp;
+static dev_t smd_ctl_number;
+
+#define DEBUG
+#undef DEBUG
+
+#ifdef DEBUG
+#define D_DUMP_BUFFER(prestr, cnt, buf) \
+do { \
+ int i; \
+ printk(KERN_ERR "%s", prestr); \
+ for (i = 0; i < cnt; i++) \
+ printk(KERN_ERR "%.2x", buf[i]); \
+ printk(KERN_ERR "\n"); \
+} while (0)
+#else
+#define D_DUMP_BUFFER(prestr, cnt, buf) do {} while (0)
+#endif
+
+#ifdef DEBUG
+#define D(x...) printk(x)
+#else
+#define D(x...) do {} while (0)
+#endif
+
+static void clean_and_signal(struct smd_ctl_dev *smd_ctl_devp)
+{
+ flush_workqueue(smd_ctl_devp->ctl_wq);
+
+ mutex_lock(&smd_ctl_devp->has_reset_lock);
+ smd_ctl_devp->has_reset = 1;
+ mutex_unlock(&smd_ctl_devp->has_reset_lock);
+
+ mutex_lock(&smd_ctl_devp->rx_lock);
+ smd_ctl_devp->bytes_read = 0;
+ mutex_unlock(&smd_ctl_devp->rx_lock);
+
+ mutex_lock(&smd_ctl_devp->is_open_lock);
+ smd_ctl_devp->is_open = 0;
+ mutex_unlock(&smd_ctl_devp->is_open_lock);
+
+ wake_up_interruptible(&smd_ctl_devp->ctl_wait_queue);
+ wake_up_interruptible(&smd_ctl_devp->ctl_opened_wait_queue);
+}
+
+static int modem_notifier(struct notifier_block *this,
+ unsigned long code,
+ void *_cmd)
+{
+ struct smd_ctl_dev *smd_ctl_devp =
+ container_of(this,
+ struct smd_ctl_dev,
+ nb);
+
+ if (!smd_ctl_devp)
+ return NOTIFY_DONE;
+
+ switch (code) {
+ case MODEM_NOTIFIER_START_RESET:
+ printk(KERN_ERR "Notify: start reset ch:%i\n",
+ smd_ctl_devp->i);
+ clean_and_signal(smd_ctl_devp);
+ break;
+ case MODEM_NOTIFIER_END_RESET:
+ printk(KERN_ERR "Notify: end reset\n");
+ break;
+ default:
+ printk(KERN_ERR "Notify: general\n");
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+int smd_ctl_ioctl(struct inode *inode,
+ struct file *file,
+ unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct smd_ctl_dev *smd_ctl_devp;
+
+ smd_ctl_devp = file->private_data;
+
+ switch (cmd) {
+ case TIOCMGET:
+ ret = smd_tiocmget(smd_ctl_devp->ctl_ch);
+ break;
+ case TIOCMSET:
+ ret = smd_tiocmset(smd_ctl_devp->ctl_ch, arg, ~arg);
+ break;
+ default:
+ ret = -1;
+ }
+
+ return ret;
+}
+
+ssize_t smd_ctl_read(struct file *file,
+ char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r;
+ int bytes_read;
+ struct smd_ctl_dev *smd_ctl_devp;
+
+ D(KERN_ERR "%s: read %i bytes\n",
+ __func__, count);
+
+ smd_ctl_devp = file->private_data;
+
+ if (!smd_ctl_devp->ctl_ch)
+ return -EINVAL;
+
+ r = wait_event_interruptible(smd_ctl_devp->ctl_wait_queue,
+ smd_ctl_devp->bytes_read |
+ smd_ctl_devp->has_reset);
+
+ if (smd_ctl_devp->has_reset)
+ return -ENETRESET;
+
+ if (r < 0) {
+ /* qualify error message */
+ if (r != -ERESTARTSYS) {
+ /* we get this anytime a signal comes in */
+ printk(KERN_ERR "ERROR:%s:%i:%s: "
+ "wait_event_interruptible ret %i\n",
+ __FILE__,
+ __LINE__,
+ __func__,
+ r
+ );
+ }
+ return r;
+ }
+
+ /* Here we have a whole packet waiting for us */
+
+ mutex_lock(&smd_ctl_devp->rx_lock);
+ bytes_read = smd_ctl_devp->bytes_read;
+ smd_ctl_devp->bytes_read = 0;
+ mutex_unlock(&smd_ctl_devp->rx_lock);
+
+ D(KERN_ERR "%s: after wait_event_interruptible bytes_read = %i\n",
+ __func__, bytes_read);
+
+ if (bytes_read > count) {
+ printk(KERN_ERR "packet size %i > buffer size %i, "
+ "dropping packet!", bytes_read, count);
+ smd_read(smd_ctl_devp->ctl_ch, 0, bytes_read);
+ return -EINVAL;
+ }
+
+ /* smd_read and copy_to_user need to be merged to only do 1 copy */
+ if (smd_read(smd_ctl_devp->ctl_ch, smd_ctl_devp->rx_buf, bytes_read)
+ != bytes_read) {
+ if (smd_ctl_devp->has_reset)
+ return -ENETRESET;
+
+ printk(KERN_ERR "user read: not enough data?!\n");
+ return -EINVAL;
+ }
+ D_DUMP_BUFFER("read: ", bytes_read, smd_ctl_devp->rx_buf);
+ r = copy_to_user(buf, smd_ctl_devp->rx_buf, bytes_read);
+
+ if (r > 0) {
+ printk(KERN_ERR "ERROR:%s:%i:%s: "
+ "copy_to_user could not copy %i bytes.\n",
+ __FILE__,
+ __LINE__,
+ __func__,
+ r);
+ return r;
+ }
+
+ D(KERN_ERR "%s: just read %i bytes\n",
+ __func__, bytes_read);
+
+ /* Not all packet events get explictly handled, this doesn't
+ matter if a constant stream of packets is streaming in, but
+ eventually a packet will be received and we'll have missed
+ the event. Queuing one more work item will catch this if
+ its happened, but do nothing if it hasn't.
+ */
+ queue_work(smd_ctl_devp->ctl_wq, &smd_ctl_devp->ctl_work);
+
+ D(KERN_ERR "%s: just queued more work\n", __func__);
+
+ return bytes_read;
+}
+
+ssize_t smd_ctl_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int r;
+ struct smd_ctl_dev *smd_ctl_devp;
+
+ if (count > MAX_BUF_SIZE)
+ return -EINVAL;
+
+ D(KERN_ERR "%s: writting %i bytes\n",
+ __func__, count);
+
+ smd_ctl_devp = file->private_data;
+
+ if (!smd_ctl_devp->ctl_ch)
+ return -EINVAL;
+
+ r = wait_event_interruptible(smd_ctl_devp->ctl_opened_wait_queue,
+ smd_ctl_devp->is_open |
+ smd_ctl_devp->has_reset);
+
+ if (smd_ctl_devp->has_reset)
+ return -ENETRESET;
+
+ if (r < 0) {
+ /* qualify error message */
+ if (r != -ERESTARTSYS) {
+ /* we get this anytime a signal comes in */
+ printk(KERN_ERR "ERROR:%s:%i:%s: "
+ "wait_event_interruptible ret %i\n",
+ __FILE__,
+ __LINE__,
+ __func__,
+ r
+ );
+ }
+ return r;
+ }
+
+ D_DUMP_BUFFER("write: ", count, buf);
+
+ r = copy_from_user(smd_ctl_devp->tx_buf, buf, count);
+ if (r > 0) {
+ printk(KERN_ERR "ERROR:%s:%i:%s: "
+ "copy_from_user could not copy %i bytes.\n",
+ __FILE__,
+ __LINE__,
+ __func__,
+ r);
+ return r;
+ }
+
+ D(KERN_ERR "%s: after copy_from_user. count = %i\n",
+ __func__, count);
+
+ r = smd_write(smd_ctl_devp->ctl_ch, smd_ctl_devp->tx_buf, count);
+ if (r != count) {
+ if (smd_ctl_devp->has_reset)
+ return -ENETRESET;
+
+ printk(KERN_ERR "ERROR:%s:%i:%s: "
+ "smd_write(ch,buf,count = %i) ret %i.\n",
+ __FILE__,
+ __LINE__,
+ __func__,
+ count,
+ r);
+ return r;
+ }
+
+ D(KERN_ERR "%s: just wrote %i bytes\n",
+ __func__, count);
+
+ return count;
+}
+
+static void ctl_work_func(struct work_struct *work)
+{
+ /* unsigned char buf[MAX_BUF_SIZE]; */
+ int sz;
+ struct smd_ctl_dev *smd_ctl_devp = container_of(work,
+ struct smd_ctl_dev,
+ ctl_work);
+
+ if (!smd_ctl_devp->ctl_ch)
+ return;
+
+ for (;;) {
+ sz = smd_cur_packet_size(smd_ctl_devp->ctl_ch);
+ if (sz == 0) {
+ D(KERN_ERR "%s: packet size is 0\n", __func__);
+ break;
+ }
+ if (sz > smd_read_avail(smd_ctl_devp->ctl_ch)) {
+ D(KERN_ERR "%s: packet size is %i - "
+ "the whole packet isn't here\n",
+ __func__, sz);
+ break;
+ }
+ if (sz > MAX_BUF_SIZE) {
+ smd_read(smd_ctl_devp->ctl_ch, 0, sz);
+ D(KERN_ERR "%s: packet size is %i - "
+ "greater than max %i, dropping\n",
+ __func__, sz, MAX_BUF_SIZE);
+ continue;
+ }
+
+ /* here we have a packet of size sz ready */
+
+ mutex_lock(&smd_ctl_devp->rx_lock);
+ smd_ctl_devp->bytes_read = sz;
+ mutex_unlock(&smd_ctl_devp->rx_lock);
+ wake_up_interruptible(&smd_ctl_devp->ctl_wait_queue);
+ D(KERN_ERR "%s: after wake_up\n", __func__);
+ break;
+ }
+}
+
+static void ctl_notify(void *priv, unsigned event)
+{
+ struct smd_ctl_dev *smd_ctl_devp = priv;
+
+ if (smd_ctl_devp->ctl_ch == 0)
+ return;
+
+ switch (event) {
+ case SMD_EVENT_DATA: {
+ int sz;
+ D(KERN_ERR "%s: data\n",
+ __func__);
+ sz = smd_cur_packet_size(smd_ctl_devp->ctl_ch);
+ D(KERN_ERR "%s: data sz = %i\n",
+ __func__, sz);
+ D(KERN_ERR "%s: smd_read_avail = %i\n",
+ __func__, smd_read_avail(smd_ctl_devp->ctl_ch));
+ if ((sz > 0) && (sz <= smd_read_avail(smd_ctl_devp->ctl_ch))) {
+ queue_work(smd_ctl_devp->ctl_wq,
+ &smd_ctl_devp->ctl_work);
+ D(KERN_ERR "%s: data just queued\n",
+ __func__);
+ }
+ D(KERN_ERR "%s: data after queueing\n",
+ __func__);
+ break;
+ }
+ case SMD_EVENT_OPEN:
+ D(KERN_ERR "%s: smd opened\n",
+ __func__);
+ smd_ctl_devp->is_open = 1;
+ wake_up_interruptible(&smd_ctl_devp->ctl_opened_wait_queue);
+ break;
+ case SMD_EVENT_CLOSE:
+ smd_ctl_devp->is_open = 0;
+ printk(KERN_ERR "%s: smd closed\n",
+ __func__);
+ break;
+ }
+}
+
+static char *smd_ctl_name[] = {
+ "DATA5_CNTL",
+ "DATA6_CNTL",
+ "DATA7_CNTL",
+};
+
+int smd_ctl_open(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smd_ctl_dev *smd_ctl_devp;
+
+ smd_ctl_devp = container_of(inode->i_cdev, struct smd_ctl_dev, cdev);
+
+ if (!smd_ctl_devp)
+ return -EINVAL;
+
+ file->private_data = smd_ctl_devp;
+
+ mutex_lock(&smd_ctl_devp->ctl_ch_lock);
+ if (smd_ctl_devp->ctl_ch == 0)
+ r = smd_open(smd_ctl_name[smd_ctl_devp->i],
+ &smd_ctl_devp->ctl_ch,
+ smd_ctl_devp,
+ ctl_notify);
+ mutex_unlock(&smd_ctl_devp->ctl_ch_lock);
+
+ return r;
+}
+
+int smd_ctl_release(struct inode *inode, struct file *file)
+{
+ int r = 0;
+ struct smd_ctl_dev *smd_ctl_devp = file->private_data;
+
+ if (!smd_ctl_devp)
+ return -EINVAL;
+
+ clean_and_signal(smd_ctl_devp);
+
+ mutex_lock(&smd_ctl_devp->ctl_ch_lock);
+ if (smd_ctl_devp->ctl_ch != 0) {
+ r = smd_close(smd_ctl_devp->ctl_ch);
+ smd_ctl_devp->ctl_ch = 0;
+ }
+ mutex_unlock(&smd_ctl_devp->ctl_ch_lock);
+
+ mutex_lock(&smd_ctl_devp->has_reset_lock);
+ smd_ctl_devp->has_reset = 0;
+ mutex_unlock(&smd_ctl_devp->has_reset_lock);
+
+ return r;
+}
+
+static const struct file_operations smd_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = smd_ctl_open,
+ .release = smd_ctl_release,
+ .read = smd_ctl_read,
+ .write = smd_ctl_write,
+ .ioctl = smd_ctl_ioctl,
+};
+
+static int __init smd_ctl_init(void)
+{
+ int i;
+ int r;
+ unsigned char buf[32];
+
+ r = alloc_chrdev_region(&smd_ctl_number,
+ 0,
+ NUM_SMD_CTL_PORTS,
+ DEVICE_NAME);
+ if (IS_ERR_VALUE(r)) {
+ printk(KERN_ERR "ERROR:%s:%i:%s: "
+ "alloc_chrdev_region() ret %i.\n",
+ __FILE__,
+ __LINE__,
+ __func__,
+ r);
+ goto error0;
+ }
+
+ smd_ctl_classp = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(smd_ctl_classp)) {
+ printk(KERN_ERR "ERROR:%s:%i:%s: "
+ "class_create() ENOMEM\n",
+ __FILE__,
+ __LINE__,
+ __func__);
+ r = -ENOMEM;
+ goto error1;
+ }
+
+ for (i = 0; i < NUM_SMD_CTL_PORTS; ++i) {
+ smd_ctl_devp[i] = kzalloc(sizeof(struct smd_ctl_dev),
+ GFP_KERNEL);
+ if (IS_ERR(smd_ctl_devp[i])) {
+ printk(KERN_ERR "ERROR:%s:%i:%s kmalloc() ENOMEM\n",
+ __FILE__,
+ __LINE__,
+ __func__);
+ r = -ENOMEM;
+ goto error2;
+ }
+
+ smd_ctl_devp[i]->i = i;
+
+ scnprintf(buf, 32, "ctl%i", i);
+ smd_ctl_devp[i]->ctl_wq = create_singlethread_workqueue(buf);
+ if (&smd_ctl_devp[i]->ctl_wq == 0) {
+ printk(KERN_ERR
+ "%s:%i:%s: "
+ "create_singlethread_workqueue() ret 0\n",
+ __FILE__,
+ __LINE__,
+ __func__);
+ r = -ENOMEM;
+ goto error2;
+ }
+
+ init_waitqueue_head(&smd_ctl_devp[i]->ctl_wait_queue);
+ smd_ctl_devp[i]->is_open = 0;
+ init_waitqueue_head(&smd_ctl_devp[i]->ctl_opened_wait_queue);
+ INIT_WORK(&smd_ctl_devp[i]->ctl_work,
+ ctl_work_func);
+
+ mutex_init(&smd_ctl_devp[i]->ctl_ch_lock);
+ mutex_init(&smd_ctl_devp[i]->rx_lock);
+ mutex_init(&smd_ctl_devp[i]->is_open_lock);
+
+ cdev_init(&smd_ctl_devp[i]->cdev, &smd_ctl_fops);
+ smd_ctl_devp[i]->cdev.owner = THIS_MODULE;
+
+ r = cdev_add(&smd_ctl_devp[i]->cdev,
+ (smd_ctl_number + i),
+ 1);
+
+ if (IS_ERR_VALUE(r)) {
+ printk(KERN_ERR "%s:%i:%s: cdev_add() ret %i\n",
+ __FILE__,
+ __LINE__,
+ __func__,
+ r);
+ destroy_workqueue(smd_ctl_devp[i]->ctl_wq);
+ kfree(smd_ctl_devp[i]);
+ goto error2;
+ }
+
+ smd_ctl_devp[i]->devicep =
+ device_create(smd_ctl_classp,
+ NULL,
+ (smd_ctl_number + i),
+ NULL,
+ DEVICE_NAME "%d",
+ i);
+
+ if (IS_ERR(smd_ctl_devp[i]->devicep)) {
+ printk(KERN_ERR "%s:%i:%s: "
+ "device_create() ENOMEM\n",
+ __FILE__,
+ __LINE__,
+ __func__);
+ r = -ENOMEM;
+ cdev_del(&smd_ctl_devp[i]->cdev);
+ destroy_workqueue(smd_ctl_devp[i]->ctl_wq);
+ kfree(smd_ctl_devp[i]);
+ goto error2;
+ }
+
+ smd_ctl_devp[i]->nb.notifier_call = modem_notifier;
+ modem_register_notifier(&smd_ctl_devp[i]->nb);
+ mutex_init(&smd_ctl_devp[i]->has_reset_lock);
+
+ }
+
+ printk(KERN_INFO "SMD Control Port Driver Initialized.\n");
+ return 0;
+
+ error2:
+ if (i > 0) {
+ while (--i >= 0) {
+ cdev_del(&smd_ctl_devp[i]->cdev);
+ destroy_workqueue(smd_ctl_devp[i]->ctl_wq);
+ kfree(smd_ctl_devp[i]);
+ device_destroy(smd_ctl_classp,
+ MKDEV(MAJOR(smd_ctl_number), i));
+ }
+ }
+
+ class_destroy(smd_ctl_classp);
+ error1:
+ unregister_chrdev_region(MAJOR(smd_ctl_number), NUM_SMD_CTL_PORTS);
+ error0:
+ return r;
+}
+
+static void __exit smd_ctl_cleanup(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_SMD_CTL_PORTS; ++i) {
+ modem_unregister_notifier(&smd_ctl_devp[i]->nb);
+ cdev_del(&smd_ctl_devp[i]->cdev);
+ kfree(smd_ctl_devp[i]);
+ device_destroy(smd_ctl_classp,
+ MKDEV(MAJOR(smd_ctl_number), i));
+ }
+
+ class_destroy(smd_ctl_classp);
+
+ unregister_chrdev_region(MAJOR(smd_ctl_number), NUM_SMD_CTL_PORTS);
+}
+
+module_init(smd_ctl_init);
+module_exit(smd_ctl_cleanup);
+
+MODULE_DESCRIPTION("MSM Shared Memory Control Port");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/smd_nmea.c b/arch/arm/mach-msm/smd_nmea.c
new file mode 100644
index 000000000000..35e1cbdca1e6
--- /dev/null
+++ b/arch/arm/mach-msm/smd_nmea.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * SMD NMEA Driver -- Provides GPS NMEA device to SMD port interface.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/miscdevice.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+
+#include <mach/msm_smd.h>
+
+#define MAX_BUF_SIZE 200
+
+static DEFINE_MUTEX(nmea_ch_lock);
+static DEFINE_MUTEX(nmea_rx_buf_lock);
+
+static DECLARE_WAIT_QUEUE_HEAD(nmea_wait_queue);
+
+struct nmea_device_t {
+ struct miscdevice misc;
+
+ struct smd_channel *ch;
+
+ unsigned char rx_buf[MAX_BUF_SIZE];
+ unsigned int bytes_read;
+};
+
+struct nmea_device_t *nmea_devp;
+
+static void nmea_work_func(struct work_struct *ws)
+{
+ int sz;
+
+ for (;;) {
+ sz = smd_cur_packet_size(nmea_devp->ch);
+ if (sz == 0)
+ break;
+ if (sz > smd_read_avail(nmea_devp->ch))
+ break;
+ if (sz > MAX_BUF_SIZE) {
+ smd_read(nmea_devp->ch, 0, sz);
+ continue;
+ }
+
+ mutex_lock(&nmea_rx_buf_lock);
+ if (smd_read(nmea_devp->ch, nmea_devp->rx_buf, sz) != sz) {
+ mutex_unlock(&nmea_rx_buf_lock);
+ printk(KERN_ERR "nmea: not enough data?!\n");
+ continue;
+ }
+ nmea_devp->bytes_read = sz;
+ mutex_unlock(&nmea_rx_buf_lock);
+ wake_up_interruptible(&nmea_wait_queue);
+ }
+}
+
+struct workqueue_struct *nmea_wq;
+static DECLARE_WORK(nmea_work, nmea_work_func);
+
+static void nmea_notify(void *priv, unsigned event)
+{
+ switch (event) {
+ case SMD_EVENT_DATA: {
+ int sz;
+ sz = smd_cur_packet_size(nmea_devp->ch);
+ if ((sz > 0) && (sz <= smd_read_avail(nmea_devp->ch)))
+ queue_work(nmea_wq, &nmea_work);
+ break;
+ }
+ case SMD_EVENT_OPEN:
+ printk(KERN_INFO "nmea: smd opened\n");
+ break;
+ case SMD_EVENT_CLOSE:
+ printk(KERN_INFO "nmea: smd closed\n");
+ break;
+ }
+}
+
+static ssize_t nmea_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int r;
+ int bytes_read;
+
+ r = wait_event_interruptible(nmea_wait_queue,
+ nmea_devp->bytes_read);
+ if (r < 0) {
+ /* qualify error message */
+ if (r != -ERESTARTSYS) {
+ /* we get this anytime a signal comes in */
+ printk(KERN_ERR "ERROR:%s:%i:%s: "
+ "wait_event_interruptible ret %i\n",
+ __FILE__,
+ __LINE__,
+ __func__,
+ r
+ );
+ }
+ return r;
+ }
+
+ mutex_lock(&nmea_rx_buf_lock);
+ bytes_read = nmea_devp->bytes_read;
+ nmea_devp->bytes_read = 0;
+ r = copy_to_user(buf, nmea_devp->rx_buf, bytes_read);
+ mutex_unlock(&nmea_rx_buf_lock);
+
+ if (r > 0) {
+ printk(KERN_ERR "ERROR:%s:%i:%s: "
+ "copy_to_user could not copy %i bytes.\n",
+ __FILE__,
+ __LINE__,
+ __func__,
+ r);
+ return r;
+ }
+
+ return bytes_read;
+}
+
+static int nmea_open(struct inode *ip, struct file *fp)
+{
+ int r = 0;
+
+ mutex_lock(&nmea_ch_lock);
+ if (nmea_devp->ch == 0)
+ r = smd_open("GPSNMEA", &nmea_devp->ch, nmea_devp, nmea_notify);
+ mutex_unlock(&nmea_ch_lock);
+
+ return r;
+}
+
+static int nmea_release(struct inode *ip, struct file *fp)
+{
+ int r = 0;
+
+ mutex_lock(&nmea_ch_lock);
+ if (nmea_devp->ch != 0) {
+ r = smd_close(nmea_devp->ch);
+ nmea_devp->ch = 0;
+ }
+ mutex_unlock(&nmea_ch_lock);
+
+ return r;
+}
+
+static const struct file_operations nmea_fops = {
+ .owner = THIS_MODULE,
+ .read = nmea_read,
+ .open = nmea_open,
+ .release = nmea_release,
+};
+
+static struct nmea_device_t nmea_device = {
+ .misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "nmea",
+ .fops = &nmea_fops,
+ }
+};
+
+static void __exit nmea_exit(void)
+{
+ destroy_workqueue(nmea_wq);
+ misc_deregister(&nmea_device.misc);
+}
+
+static int __init nmea_init(void)
+{
+ int ret;
+
+ nmea_device.bytes_read = 0;
+ nmea_devp = &nmea_device;
+
+ nmea_wq = create_singlethread_workqueue("nmea");
+ if (nmea_wq == 0)
+ return -ENOMEM;
+
+ ret = misc_register(&nmea_device.misc);
+ return ret;
+}
+
+module_init(nmea_init);
+module_exit(nmea_exit);
+
+MODULE_DESCRIPTION("MSM Shared Memory NMEA Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/smd_private.h b/arch/arm/mach-msm/smd_private.h
new file mode 100644
index 000000000000..3dec5d8ef794
--- /dev/null
+++ b/arch/arm/mach-msm/smd_private.h
@@ -0,0 +1,256 @@
+/* arch/arm/mach-msm/smd_private.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
+
+struct smem_heap_info
+{
+ unsigned initialized;
+ unsigned free_offset;
+ unsigned heap_remaining;
+ unsigned reserved;
+};
+
+struct smem_heap_entry
+{
+ unsigned allocated;
+ unsigned offset;
+ unsigned size;
+ unsigned reserved;
+};
+
+struct smem_proc_comm
+{
+ unsigned command;
+ unsigned status;
+ unsigned data1;
+ unsigned data2;
+};
+
+#define PC_APPS 0
+#define PC_MODEM 1
+
+#define VERSION_QDSP6 4
+#define VERSION_APPS_SBL 6
+#define VERSION_MODEM_SBL 7
+#define VERSION_APPS 8
+#define VERSION_MODEM 9
+
+#define SMD_HEAP_SIZE 512
+
+struct smem_shared
+{
+ struct smem_proc_comm proc_comm[4];
+ unsigned version[32];
+ struct smem_heap_info heap_info;
+ struct smem_heap_entry heap_toc[SMD_HEAP_SIZE];
+};
+
+#if defined(CONFIG_MSM_SMD_PKG4)
+struct smsm_interrupt_info {
+ uint32_t aArm_en_mask;
+ uint32_t aArm_interrupts_pending;
+ uint32_t aArm_wakeup_reason;
+ uint32_t aArm_rpc_prog;
+ uint32_t aArm_rpc_proc;
+ char aArm_smd_port_name[20];
+ uint32_t aArm_gpio_info;
+};
+#elif defined(CONFIG_MSM_SMD_PKG3)
+struct smsm_interrupt_info {
+ uint32_t aArm_en_mask;
+ uint32_t aArm_interrupts_pending;
+ uint32_t aArm_wakeup_reason;
+};
+#else
+#error No SMD Package Specified; aborting
+#endif
+
+#if defined(CONFIG_MSM_N_WAY_SMSM)
+enum {
+ SMSM_APPS_STATE,
+ SMSM_MODEM_STATE,
+ SMSM_Q6_STATE,
+ SMSM_APPS_DEM,
+ SMSM_MODEM_DEM,
+ SMSM_Q6_DEM,
+ SMSM_POWER_MASTER_DEM,
+ SMSM_TIME_MASTER_DEM,
+ SMSM_NUM_ENTRIES,
+};
+#else
+enum {
+ SMSM_APPS_STATE = 1,
+ SMSM_MODEM_STATE = 3,
+ SMSM_NUM_ENTRIES,
+};
+#endif
+
+enum {
+ SMSM_APPS,
+ SMSM_MODEM,
+ SMSM_Q6,
+ SMSM_NUM_HOSTS,
+};
+
+#define SZ_DIAG_ERR_MSG 0xC8
+#define ID_DIAG_ERR_MSG SMEM_DIAG_ERR_MESSAGE
+#define ID_SMD_CHANNELS SMEM_SMD_BASE_ID
+#define ID_SHARED_STATE SMEM_SMSM_SHARED_STATE
+#define ID_CH_ALLOC_TBL SMEM_CHANNEL_ALLOC_TBL
+
+#define SMSM_INIT 0x00000001
+#define SMSM_OSENTERED 0x00000002
+#define SMSM_SMDWAIT 0x00000004
+#define SMSM_SMDINIT 0x00000008
+#define SMSM_RPCWAIT 0x00000010
+#define SMSM_RPCINIT 0x00000020
+#define SMSM_RESET 0x00000040
+#define SMSM_RSA 0x00000080
+#define SMSM_RUN 0x00000100
+#define SMSM_PWRC 0x00000200
+#define SMSM_TIMEWAIT 0x00000400
+#define SMSM_TIMEINIT 0x00000800
+#define SMSM_PWRC_EARLY_EXIT 0x00001000
+#define SMSM_WFPI 0x00002000
+#define SMSM_SLEEP 0x00004000
+#define SMSM_SLEEPEXIT 0x00008000
+#define SMSM_OEMSBL_RELEASE 0x00010000
+#define SMSM_APPS_REBOOT 0x00020000
+#define SMSM_SYSTEM_POWER_DOWN 0x00040000
+#define SMSM_SYSTEM_REBOOT 0x00080000
+#define SMSM_SYSTEM_DOWNLOAD 0x00100000
+#define SMSM_PWRC_SUSPEND 0x00200000
+#define SMSM_APPS_SHUTDOWN 0x00400000
+#define SMSM_SMD_LOOPBACK 0x00800000
+#define SMSM_RUN_QUIET 0x01000000
+#define SMSM_MODEM_WAIT 0x02000000
+#define SMSM_MODEM_BREAK 0x04000000
+#define SMSM_MODEM_CONTINUE 0x08000000
+#define SMSM_UNKNOWN 0x80000000
+
+#define SMSM_WKUP_REASON_RPC 0x00000001
+#define SMSM_WKUP_REASON_INT 0x00000002
+#define SMSM_WKUP_REASON_GPIO 0x00000004
+#define SMSM_WKUP_REASON_TIMER 0x00000008
+#define SMSM_WKUP_REASON_ALARM 0x00000010
+#define SMSM_WKUP_REASON_RESET 0x00000020
+
+void *smem_alloc(unsigned id, unsigned size);
+void *smem_get_entry(unsigned id, unsigned *size);
+int smsm_change_state(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask);
+int smsm_change_intr_mask(uint32_t smsm_entry,
+ uint32_t clear_mask, uint32_t set_mask);
+int smsm_get_intr_mask(uint32_t smsm_entry, uint32_t *intr_mask);
+uint32_t smsm_get_state(uint32_t smsm_entry);
+void smsm_print_sleep_info(uint32_t sleep_delay, uint32_t sleep_limit,
+ uint32_t irq_mask, uint32_t wakeup_reason, uint32_t pending_irqs);
+void smsm_reset_modem(unsigned mode);
+void smsm_reset_modem_cont(void);
+void smd_sleep_exit(void);
+
+#define SMEM_NUM_SMD_STREAM_CHANNELS 64
+#define SMEM_NUM_SMD_BLOCK_CHANNELS 64
+
+enum {
+ /* fixed items */
+ SMEM_PROC_COMM = 0,
+ SMEM_HEAP_INFO,
+ SMEM_ALLOCATION_TABLE,
+ SMEM_VERSION_INFO,
+ SMEM_HW_RESET_DETECT,
+ SMEM_AARM_WARM_BOOT,
+ SMEM_DIAG_ERR_MESSAGE,
+ SMEM_SPINLOCK_ARRAY,
+ SMEM_MEMORY_BARRIER_LOCATION,
+
+ /* dynamic items */
+ SMEM_AARM_PARTITION_TABLE,
+ SMEM_AARM_BAD_BLOCK_TABLE,
+ SMEM_RESERVE_BAD_BLOCKS,
+ SMEM_WM_UUID,
+ SMEM_CHANNEL_ALLOC_TBL,
+ SMEM_SMD_BASE_ID,
+ SMEM_SMEM_LOG_IDX = SMEM_SMD_BASE_ID + SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_SMEM_LOG_EVENTS,
+ SMEM_SMEM_STATIC_LOG_IDX,
+ SMEM_SMEM_STATIC_LOG_EVENTS,
+ SMEM_SMEM_SLOW_CLOCK_SYNC,
+ SMEM_SMEM_SLOW_CLOCK_VALUE,
+ SMEM_BIO_LED_BUF,
+ SMEM_SMSM_SHARED_STATE,
+ SMEM_SMSM_INT_INFO,
+ SMEM_SMSM_SLEEP_DELAY,
+ SMEM_SMSM_LIMIT_SLEEP,
+ SMEM_SLEEP_POWER_COLLAPSE_DISABLED,
+ SMEM_KEYPAD_KEYS_PRESSED,
+ SMEM_KEYPAD_STATE_UPDATED,
+ SMEM_KEYPAD_STATE_IDX,
+ SMEM_GPIO_INT,
+ SMEM_MDDI_LCD_IDX,
+ SMEM_MDDI_HOST_DRIVER_STATE,
+ SMEM_MDDI_LCD_DISP_STATE,
+ SMEM_LCD_CUR_PANEL,
+ SMEM_MARM_BOOT_SEGMENT_INFO,
+ SMEM_AARM_BOOT_SEGMENT_INFO,
+ SMEM_SLEEP_STATIC,
+ SMEM_SCORPION_FREQUENCY,
+ SMEM_SMD_PROFILES,
+ SMEM_TSSC_BUSY,
+ SMEM_HS_SUSPEND_FILTER_INFO,
+ SMEM_BATT_INFO,
+ SMEM_APPS_BOOT_MODE,
+ SMEM_VERSION_FIRST,
+ SMEM_VERSION_SMD = SMEM_VERSION_FIRST,
+ SMEM_VERSION_LAST = SMEM_VERSION_FIRST + 24,
+ SMEM_OSS_RRCASN1_BUF1,
+ SMEM_OSS_RRCASN1_BUF2,
+ SMEM_ID_VENDOR0,
+ SMEM_ID_VENDOR1,
+ SMEM_ID_VENDOR2,
+ SMEM_HW_SW_BUILD_ID,
+ SMEM_SMD_BLOCK_PORT_BASE_ID,
+ SMEM_SMD_BLOCK_PORT_PROC0_HEAP = SMEM_SMD_BLOCK_PORT_BASE_ID +
+ SMEM_NUM_SMD_BLOCK_CHANNELS,
+ SMEM_SMD_BLOCK_PORT_PROC1_HEAP = SMEM_SMD_BLOCK_PORT_PROC0_HEAP +
+ SMEM_NUM_SMD_BLOCK_CHANNELS,
+ SMEM_I2C_MUTEX = SMEM_SMD_BLOCK_PORT_PROC1_HEAP +
+ SMEM_NUM_SMD_BLOCK_CHANNELS,
+ SMEM_SCLK_CONVERSION,
+ SMEM_SMD_SMSM_INTR_MUX,
+ SMEM_SMSM_CPU_INTR_MASK,
+ SMEM_APPS_DEM_SLAVE_DATA,
+ SMEM_QDSP6_DEM_SLAVE_DATA,
+ SMEM_CLKREGIM_BSP,
+ SMEM_CLKREGIM_SOURCES,
+ SMEM_SMD_FIFO_BASE_ID,
+ SMEM_USABLE_RAM_PARTITION_TABLE = SMEM_SMD_FIFO_BASE_ID +
+ SMEM_NUM_SMD_STREAM_CHANNELS,
+ SMEM_POWER_ON_STATUS_INFO,
+ SMEM_DAL_AREA,
+ SMEM_SMEM_LOG_POWER_IDX,
+ SMEM_SMEM_LOG_POWER_WRAP,
+ SMEM_SMEM_LOG_POWER_EVENTS,
+ SMEM_ERR_CRASH_LOG,
+ SMEM_ERR_F3_TRACE_LOG,
+ SMEM_SMD_BRIDGE_ALLOC_TABLE,
+ SMEM_SMDLITE_TABLE,
+ SMEM_SD_IMG_UPGRADE_STATUS,
+ SMEM_NUM_ITEMS,
+};
+
+#endif
diff --git a/arch/arm/mach-msm/smd_qmi.c b/arch/arm/mach-msm/smd_qmi.c
new file mode 100644
index 000000000000..e6ae979d8df8
--- /dev/null
+++ b/arch/arm/mach-msm/smd_qmi.c
@@ -0,0 +1,844 @@
+/* arch/arm/mach-msm/smd_qmi.c
+ *
+ * QMI Control Driver -- Manages network data connections.
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/miscdevice.h>
+#include <linux/workqueue.h>
+
+#include <asm/uaccess.h>
+#include <mach/msm_smd.h>
+
+#define QMI_CTL 0x00
+#define QMI_WDS 0x01
+#define QMI_DMS 0x02
+#define QMI_NAS 0x03
+
+#define QMI_RESULT_SUCCESS 0x0000
+#define QMI_RESULT_FAILURE 0x0001
+
+struct qmi_msg {
+ unsigned char service;
+ unsigned char client_id;
+ unsigned short txn_id;
+ unsigned short type;
+ unsigned short size;
+ unsigned char *tlv;
+};
+
+#define qmi_ctl_client_id 0
+
+#define STATE_OFFLINE 0
+#define STATE_QUERYING 1
+#define STATE_ONLINE 2
+
+struct qmi_ctxt {
+ struct miscdevice misc;
+
+ struct mutex lock;
+
+ unsigned char ctl_txn_id;
+ unsigned char wds_client_id;
+ unsigned short wds_txn_id;
+
+ unsigned wds_busy;
+ unsigned wds_handle;
+ unsigned state_dirty;
+ unsigned state;
+
+ unsigned char addr[4];
+ unsigned char mask[4];
+ unsigned char gateway[4];
+ unsigned char dns1[4];
+ unsigned char dns2[4];
+
+ smd_channel_t *ch;
+ const char *ch_name;
+
+ struct work_struct open_work;
+ struct work_struct read_work;
+};
+
+static struct qmi_ctxt *qmi_minor_to_ctxt(unsigned n);
+
+static void qmi_read_work(struct work_struct *ws);
+static void qmi_open_work(struct work_struct *work);
+
+void qmi_ctxt_init(struct qmi_ctxt *ctxt, unsigned n)
+{
+ mutex_init(&ctxt->lock);
+ INIT_WORK(&ctxt->read_work, qmi_read_work);
+ INIT_WORK(&ctxt->open_work, qmi_open_work);
+ ctxt->ctl_txn_id = 1;
+ ctxt->wds_txn_id = 1;
+ ctxt->wds_busy = 1;
+ ctxt->state = STATE_OFFLINE;
+
+}
+
+static struct workqueue_struct *qmi_wq;
+
+static int verbose = 0;
+
+/* anyone waiting for a state change waits here */
+static DECLARE_WAIT_QUEUE_HEAD(qmi_wait_queue);
+
+
+static void qmi_dump_msg(struct qmi_msg *msg, const char *prefix)
+{
+ unsigned sz, n;
+ unsigned char *x;
+
+ if (!verbose)
+ return;
+
+ printk(KERN_INFO
+ "qmi: %s: svc=%02x cid=%02x tid=%04x type=%04x size=%04x\n",
+ prefix, msg->service, msg->client_id,
+ msg->txn_id, msg->type, msg->size);
+
+ x = msg->tlv;
+ sz = msg->size;
+
+ while (sz >= 3) {
+ sz -= 3;
+
+ n = x[1] | (x[2] << 8);
+ if (n > sz)
+ break;
+
+ printk(KERN_INFO "qmi: %s: tlv: %02x %04x { ",
+ prefix, x[0], n);
+ x += 3;
+ sz -= n;
+ while (n-- > 0)
+ printk("%02x ", *x++);
+ printk("}\n");
+ }
+}
+
+int qmi_add_tlv(struct qmi_msg *msg,
+ unsigned type, unsigned size, const void *data)
+{
+ unsigned char *x = msg->tlv + msg->size;
+
+ x[0] = type;
+ x[1] = size;
+ x[2] = size >> 8;
+
+ memcpy(x + 3, data, size);
+
+ msg->size += (size + 3);
+
+ return 0;
+}
+
+/* Extract a tagged item from a qmi message buffer,
+** taking care not to overrun the buffer.
+*/
+static int qmi_get_tlv(struct qmi_msg *msg,
+ unsigned type, unsigned size, void *data)
+{
+ unsigned char *x = msg->tlv;
+ unsigned len = msg->size;
+ unsigned n;
+
+ while (len >= 3) {
+ len -= 3;
+
+ /* size of this item */
+ n = x[1] | (x[2] << 8);
+ if (n > len)
+ break;
+
+ if (x[0] == type) {
+ if (n != size)
+ return -1;
+ memcpy(data, x + 3, size);
+ return 0;
+ }
+
+ x += (n + 3);
+ len -= n;
+ }
+
+ return -1;
+}
+
+static unsigned qmi_get_status(struct qmi_msg *msg, unsigned *error)
+{
+ unsigned short status[2];
+ if (qmi_get_tlv(msg, 0x02, sizeof(status), status)) {
+ *error = 0;
+ return QMI_RESULT_FAILURE;
+ } else {
+ *error = status[1];
+ return status[0];
+ }
+}
+
+/* 0x01 <qmux-header> <payload> */
+#define QMUX_HEADER 13
+
+/* should be >= HEADER + FOOTER */
+#define QMUX_OVERHEAD 16
+
+static int qmi_send(struct qmi_ctxt *ctxt, struct qmi_msg *msg)
+{
+ unsigned char *data;
+ unsigned hlen;
+ unsigned len;
+ int r;
+
+ qmi_dump_msg(msg, "send");
+
+ if (msg->service == QMI_CTL) {
+ hlen = QMUX_HEADER - 1;
+ } else {
+ hlen = QMUX_HEADER;
+ }
+
+ /* QMUX length is total header + total payload - IFC selector */
+ len = hlen + msg->size - 1;
+ if (len > 0xffff)
+ return -1;
+
+ data = msg->tlv - hlen;
+
+ /* prepend encap and qmux header */
+ *data++ = 0x01; /* ifc selector */
+
+ /* qmux header */
+ *data++ = len;
+ *data++ = len >> 8;
+ *data++ = 0x00; /* flags: client */
+ *data++ = msg->service;
+ *data++ = msg->client_id;
+
+ /* qmi header */
+ *data++ = 0x00; /* flags: send */
+ *data++ = msg->txn_id;
+ if (msg->service != QMI_CTL)
+ *data++ = msg->txn_id >> 8;
+
+ *data++ = msg->type;
+ *data++ = msg->type >> 8;
+ *data++ = msg->size;
+ *data++ = msg->size >> 8;
+
+ /* len + 1 takes the interface selector into account */
+ r = smd_write(ctxt->ch, msg->tlv - hlen, len + 1);
+
+ if (r != len) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+static void qmi_process_ctl_msg(struct qmi_ctxt *ctxt, struct qmi_msg *msg)
+{
+ unsigned err;
+ if (msg->type == 0x0022) {
+ unsigned char n[2];
+ if (qmi_get_status(msg, &err))
+ return;
+ if (qmi_get_tlv(msg, 0x01, sizeof(n), n))
+ return;
+ if (n[0] == QMI_WDS) {
+ printk(KERN_INFO
+ "qmi: ctl: wds use client_id 0x%02x\n", n[1]);
+ ctxt->wds_client_id = n[1];
+ ctxt->wds_busy = 0;
+ }
+ }
+}
+
+static int qmi_network_get_profile(struct qmi_ctxt *ctxt);
+
+static void swapaddr(unsigned char *src, unsigned char *dst)
+{
+ dst[0] = src[3];
+ dst[1] = src[2];
+ dst[2] = src[1];
+ dst[3] = src[0];
+}
+
+static unsigned char zero[4];
+static void qmi_read_runtime_profile(struct qmi_ctxt *ctxt, struct qmi_msg *msg)
+{
+ unsigned char tmp[4];
+ unsigned r;
+
+ r = qmi_get_tlv(msg, 0x1e, 4, tmp);
+ swapaddr(r ? zero : tmp, ctxt->addr);
+ r = qmi_get_tlv(msg, 0x21, 4, tmp);
+ swapaddr(r ? zero : tmp, ctxt->mask);
+ r = qmi_get_tlv(msg, 0x20, 4, tmp);
+ swapaddr(r ? zero : tmp, ctxt->gateway);
+ r = qmi_get_tlv(msg, 0x15, 4, tmp);
+ swapaddr(r ? zero : tmp, ctxt->dns1);
+ r = qmi_get_tlv(msg, 0x16, 4, tmp);
+ swapaddr(r ? zero : tmp, ctxt->dns2);
+}
+
+static void qmi_process_unicast_wds_msg(struct qmi_ctxt *ctxt,
+ struct qmi_msg *msg)
+{
+ unsigned err;
+ switch (msg->type) {
+ case 0x0021:
+ if (qmi_get_status(msg, &err)) {
+ printk(KERN_ERR
+ "qmi: wds: network stop failed (%04x)\n", err);
+ } else {
+ printk(KERN_INFO
+ "qmi: wds: network stopped\n");
+ ctxt->state = STATE_OFFLINE;
+ ctxt->state_dirty = 1;
+ }
+ break;
+ case 0x0020:
+ if (qmi_get_status(msg, &err)) {
+ printk(KERN_ERR
+ "qmi: wds: network start failed (%04x)\n", err);
+ } else if (qmi_get_tlv(msg, 0x01, sizeof(ctxt->wds_handle), &ctxt->wds_handle)) {
+ printk(KERN_INFO
+ "qmi: wds no handle?\n");
+ } else {
+ printk(KERN_INFO
+ "qmi: wds: got handle 0x%08x\n",
+ ctxt->wds_handle);
+ }
+ break;
+ case 0x002D:
+ printk("qmi: got network profile\n");
+ if (ctxt->state == STATE_QUERYING) {
+ qmi_read_runtime_profile(ctxt, msg);
+ ctxt->state = STATE_ONLINE;
+ ctxt->state_dirty = 1;
+ }
+ break;
+ default:
+ printk(KERN_ERR "qmi: unknown msg type 0x%04x\n", msg->type);
+ }
+ ctxt->wds_busy = 0;
+}
+
+static void qmi_process_broadcast_wds_msg(struct qmi_ctxt *ctxt,
+ struct qmi_msg *msg)
+{
+ if (msg->type == 0x0022) {
+ unsigned char n[2];
+ if (qmi_get_tlv(msg, 0x01, sizeof(n), n))
+ return;
+ switch (n[0]) {
+ case 1:
+ printk(KERN_INFO "qmi: wds: DISCONNECTED\n");
+ ctxt->state = STATE_OFFLINE;
+ ctxt->state_dirty = 1;
+ break;
+ case 2:
+ printk(KERN_INFO "qmi: wds: CONNECTED\n");
+ ctxt->state = STATE_QUERYING;
+ ctxt->state_dirty = 1;
+ qmi_network_get_profile(ctxt);
+ break;
+ case 3:
+ printk(KERN_INFO "qmi: wds: SUSPENDED\n");
+ ctxt->state = STATE_OFFLINE;
+ ctxt->state_dirty = 1;
+ }
+ } else {
+ printk(KERN_ERR "qmi: unknown bcast msg type 0x%04x\n", msg->type);
+ }
+}
+
+static void qmi_process_wds_msg(struct qmi_ctxt *ctxt,
+ struct qmi_msg *msg)
+{
+ printk("wds: %04x @ %02x\n", msg->type, msg->client_id);
+ if (msg->client_id == ctxt->wds_client_id) {
+ qmi_process_unicast_wds_msg(ctxt, msg);
+ } else if (msg->client_id == 0xff) {
+ qmi_process_broadcast_wds_msg(ctxt, msg);
+ } else {
+ printk(KERN_ERR
+ "qmi_process_wds_msg client id 0x%02x unknown\n",
+ msg->client_id);
+ }
+}
+
+static void qmi_process_qmux(struct qmi_ctxt *ctxt,
+ unsigned char *buf, unsigned sz)
+{
+ struct qmi_msg msg;
+
+ /* require a full header */
+ if (sz < 5)
+ return;
+
+ /* require a size that matches the buffer size */
+ if (sz != (buf[0] | (buf[1] << 8)))
+ return;
+
+ /* only messages from a service (bit7=1) are allowed */
+ if (buf[2] != 0x80)
+ return;
+
+ msg.service = buf[3];
+ msg.client_id = buf[4];
+
+ /* annoyingly, CTL messages have a shorter TID */
+ if (buf[3] == 0) {
+ if (sz < 7)
+ return;
+ msg.txn_id = buf[6];
+ buf += 7;
+ sz -= 7;
+ } else {
+ if (sz < 8)
+ return;
+ msg.txn_id = buf[6] | (buf[7] << 8);
+ buf += 8;
+ sz -= 8;
+ }
+
+ /* no type and size!? */
+ if (sz < 4)
+ return;
+ sz -= 4;
+
+ msg.type = buf[0] | (buf[1] << 8);
+ msg.size = buf[2] | (buf[3] << 8);
+ msg.tlv = buf + 4;
+
+ if (sz != msg.size)
+ return;
+
+ qmi_dump_msg(&msg, "recv");
+
+ mutex_lock(&ctxt->lock);
+ switch (msg.service) {
+ case QMI_CTL:
+ qmi_process_ctl_msg(ctxt, &msg);
+ break;
+ case QMI_WDS:
+ qmi_process_wds_msg(ctxt, &msg);
+ break;
+ default:
+ printk(KERN_ERR "qmi: msg from unknown svc 0x%02x\n",
+ msg.service);
+ break;
+ }
+ mutex_unlock(&ctxt->lock);
+
+ wake_up(&qmi_wait_queue);
+}
+
+#define QMI_MAX_PACKET (256 + QMUX_OVERHEAD)
+
+static void qmi_read_work(struct work_struct *ws)
+{
+ struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, read_work);
+ struct smd_channel *ch = ctxt->ch;
+ unsigned char buf[QMI_MAX_PACKET];
+ int sz;
+
+ for (;;) {
+ sz = smd_cur_packet_size(ch);
+ if (sz == 0)
+ break;
+ if (sz < smd_read_avail(ch))
+ break;
+ if (sz > QMI_MAX_PACKET) {
+ smd_read(ch, 0, sz);
+ continue;
+ }
+ if (smd_read(ch, buf, sz) != sz) {
+ printk(KERN_ERR "qmi: not enough data?!\n");
+ continue;
+ }
+
+ /* interface selector must be 1 */
+ if (buf[0] != 0x01)
+ continue;
+
+ qmi_process_qmux(ctxt, buf + 1, sz - 1);
+ }
+}
+
+static int qmi_request_wds_cid(struct qmi_ctxt *ctxt);
+
+static void qmi_open_work(struct work_struct *ws)
+{
+ struct qmi_ctxt *ctxt = container_of(ws, struct qmi_ctxt, open_work);
+ mutex_lock(&ctxt->lock);
+ qmi_request_wds_cid(ctxt);
+ mutex_unlock(&ctxt->lock);
+}
+
+static void qmi_notify(void *priv, unsigned event)
+{
+ struct qmi_ctxt *ctxt = priv;
+
+ switch (event) {
+ case SMD_EVENT_DATA: {
+ int sz;
+ sz = smd_cur_packet_size(ctxt->ch);
+ if ((sz > 0) && (sz <= smd_read_avail(ctxt->ch))) {
+ queue_work(qmi_wq, &ctxt->read_work);
+ }
+ break;
+ }
+ case SMD_EVENT_OPEN:
+ printk(KERN_INFO "qmi: smd opened\n");
+ queue_work(qmi_wq, &ctxt->open_work);
+ break;
+ case SMD_EVENT_CLOSE:
+ printk(KERN_INFO "qmi: smd closed\n");
+ break;
+ }
+}
+
+static int qmi_request_wds_cid(struct qmi_ctxt *ctxt)
+{
+ unsigned char data[64 + QMUX_OVERHEAD];
+ struct qmi_msg msg;
+ unsigned char n;
+
+ msg.service = QMI_CTL;
+ msg.client_id = qmi_ctl_client_id;
+ msg.txn_id = ctxt->ctl_txn_id;
+ msg.type = 0x0022;
+ msg.size = 0;
+ msg.tlv = data + QMUX_HEADER;
+
+ ctxt->ctl_txn_id += 2;
+
+ n = QMI_WDS;
+ qmi_add_tlv(&msg, 0x01, 0x01, &n);
+
+ return qmi_send(ctxt, &msg);
+}
+
+static int qmi_network_get_profile(struct qmi_ctxt *ctxt)
+{
+ unsigned char data[96 + QMUX_OVERHEAD];
+ struct qmi_msg msg;
+
+ msg.service = QMI_WDS;
+ msg.client_id = ctxt->wds_client_id;
+ msg.txn_id = ctxt->wds_txn_id;
+ msg.type = 0x002D;
+ msg.size = 0;
+ msg.tlv = data + QMUX_HEADER;
+
+ ctxt->wds_txn_id += 2;
+
+ return qmi_send(ctxt, &msg);
+}
+
+static int qmi_network_up(struct qmi_ctxt *ctxt, char *apn)
+{
+ unsigned char data[96 + QMUX_OVERHEAD];
+ struct qmi_msg msg;
+ char *user;
+ char *pass;
+
+ for (user = apn; *user; user++) {
+ if (*user == ' ') {
+ *user++ = 0;
+ break;
+ }
+ }
+ for (pass = user; *pass; pass++) {
+ if (*pass == ' ') {
+ *pass++ = 0;
+ break;
+ }
+ }
+
+ msg.service = QMI_WDS;
+ msg.client_id = ctxt->wds_client_id;
+ msg.txn_id = ctxt->wds_txn_id;
+ msg.type = 0x0020;
+ msg.size = 0;
+ msg.tlv = data + QMUX_HEADER;
+
+ ctxt->wds_txn_id += 2;
+
+ qmi_add_tlv(&msg, 0x14, strlen(apn), apn);
+ if (*user) {
+ unsigned char x;
+ x = 3;
+ qmi_add_tlv(&msg, 0x16, 1, &x);
+ qmi_add_tlv(&msg, 0x17, strlen(user), user);
+ if (*pass)
+ qmi_add_tlv(&msg, 0x18, strlen(pass), pass);
+ }
+ return qmi_send(ctxt, &msg);
+}
+
+static int qmi_network_down(struct qmi_ctxt *ctxt)
+{
+ unsigned char data[16 + QMUX_OVERHEAD];
+ struct qmi_msg msg;
+
+ msg.service = QMI_WDS;
+ msg.client_id = ctxt->wds_client_id;
+ msg.txn_id = ctxt->wds_txn_id;
+ msg.type = 0x0021;
+ msg.size = 0;
+ msg.tlv = data + QMUX_HEADER;
+
+ ctxt->wds_txn_id += 2;
+
+ qmi_add_tlv(&msg, 0x01, sizeof(ctxt->wds_handle), &ctxt->wds_handle);
+
+ return qmi_send(ctxt, &msg);
+}
+
+static int qmi_print_state(struct qmi_ctxt *ctxt, char *buf, int max)
+{
+ int i;
+ char *statename;
+
+ if (ctxt->state == STATE_ONLINE) {
+ statename = "up";
+ } else if (ctxt->state == STATE_OFFLINE) {
+ statename = "down";
+ } else {
+ statename = "busy";
+ }
+
+ i = scnprintf(buf, max, "STATE=%s\n", statename);
+ i += scnprintf(buf + i, max - i, "CID=%d\n",ctxt->wds_client_id);
+
+ if (ctxt->state != STATE_ONLINE){
+ return i;
+ }
+
+ i += scnprintf(buf + i, max - i, "ADDR=%d.%d.%d.%d\n",
+ ctxt->addr[0], ctxt->addr[1], ctxt->addr[2], ctxt->addr[3]);
+ i += scnprintf(buf + i, max - i, "MASK=%d.%d.%d.%d\n",
+ ctxt->mask[0], ctxt->mask[1], ctxt->mask[2], ctxt->mask[3]);
+ i += scnprintf(buf + i, max - i, "GATEWAY=%d.%d.%d.%d\n",
+ ctxt->gateway[0], ctxt->gateway[1], ctxt->gateway[2],
+ ctxt->gateway[3]);
+ i += scnprintf(buf + i, max - i, "DNS1=%d.%d.%d.%d\n",
+ ctxt->dns1[0], ctxt->dns1[1], ctxt->dns1[2], ctxt->dns1[3]);
+ i += scnprintf(buf + i, max - i, "DNS2=%d.%d.%d.%d\n",
+ ctxt->dns2[0], ctxt->dns2[1], ctxt->dns2[2], ctxt->dns2[3]);
+
+ return i;
+}
+
+static ssize_t qmi_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct qmi_ctxt *ctxt = fp->private_data;
+ char msg[256];
+ int len;
+ int r;
+
+ mutex_lock(&ctxt->lock);
+ for (;;) {
+ if (ctxt->state_dirty) {
+ ctxt->state_dirty = 0;
+ len = qmi_print_state(ctxt, msg, 256);
+ break;
+ }
+ mutex_unlock(&ctxt->lock);
+ r = wait_event_interruptible(qmi_wait_queue, ctxt->state_dirty);
+ if (r < 0)
+ return r;
+ mutex_lock(&ctxt->lock);
+ }
+ mutex_unlock(&ctxt->lock);
+
+ if (len > count)
+ len = count;
+
+ if (copy_to_user(buf, msg, len))
+ return -EFAULT;
+
+ return len;
+}
+
+
+static ssize_t qmi_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct qmi_ctxt *ctxt = fp->private_data;
+ unsigned char cmd[64];
+ int len;
+ int r;
+
+ if (count < 1)
+ return 0;
+
+ len = count > 63 ? 63 : count;
+
+ if (copy_from_user(cmd, buf, len))
+ return -EFAULT;
+
+ cmd[len] = 0;
+
+ /* lazy */
+ if (cmd[len-1] == '\n') {
+ cmd[len-1] = 0;
+ len--;
+ }
+
+ if (!strncmp(cmd, "verbose", 7)) {
+ verbose = 1;
+ } else if (!strncmp(cmd, "terse", 5)) {
+ verbose = 0;
+ } else if (!strncmp(cmd, "poll", 4)) {
+ ctxt->state_dirty = 1;
+ wake_up(&qmi_wait_queue);
+ } else if (!strncmp(cmd, "down", 4)) {
+retry_down:
+ mutex_lock(&ctxt->lock);
+ if (ctxt->wds_busy) {
+ mutex_unlock(&ctxt->lock);
+ r = wait_event_interruptible(qmi_wait_queue, !ctxt->wds_busy);
+ if (r < 0)
+ return r;
+ goto retry_down;
+ }
+ ctxt->wds_busy = 1;
+ qmi_network_down(ctxt);
+ mutex_unlock(&ctxt->lock);
+ } else if (!strncmp(cmd, "up:", 3)) {
+retry_up:
+ mutex_lock(&ctxt->lock);
+ if (ctxt->wds_busy) {
+ mutex_unlock(&ctxt->lock);
+ r = wait_event_interruptible(qmi_wait_queue, !ctxt->wds_busy);
+ if (r < 0)
+ return r;
+ goto retry_up;
+ }
+ ctxt->wds_busy = 1;
+ qmi_network_up(ctxt, cmd+3);
+ mutex_unlock(&ctxt->lock);
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static int qmi_open(struct inode *ip, struct file *fp)
+{
+ struct qmi_ctxt *ctxt = qmi_minor_to_ctxt(MINOR(ip->i_rdev));
+ int r = 0;
+
+ if (!ctxt) {
+ printk(KERN_ERR "unknown qmi misc %d\n", MINOR(ip->i_rdev));
+ return -ENODEV;
+ }
+
+ fp->private_data = ctxt;
+
+ mutex_lock(&ctxt->lock);
+ if (ctxt->ch == 0)
+ r = smd_open(ctxt->ch_name, &ctxt->ch, ctxt, qmi_notify);
+ if (r == 0)
+ wake_up(&qmi_wait_queue);
+ mutex_unlock(&ctxt->lock);
+
+ return r;
+}
+
+static int qmi_release(struct inode *ip, struct file *fp)
+{
+ return 0;
+}
+
+static struct file_operations qmi_fops = {
+ .owner = THIS_MODULE,
+ .read = qmi_read,
+ .write = qmi_write,
+ .open = qmi_open,
+ .release = qmi_release,
+};
+
+static struct qmi_ctxt qmi_device0 = {
+ .ch_name = "SMD_DATA5_CNTL",
+ .misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "qmi0",
+ .fops = &qmi_fops,
+ }
+};
+static struct qmi_ctxt qmi_device1 = {
+ .ch_name = "SMD_DATA6_CNTL",
+ .misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "qmi1",
+ .fops = &qmi_fops,
+ }
+};
+static struct qmi_ctxt qmi_device2 = {
+ .ch_name = "SMD_DATA7_CNTL",
+ .misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "qmi2",
+ .fops = &qmi_fops,
+ }
+};
+
+static struct qmi_ctxt *qmi_minor_to_ctxt(unsigned n)
+{
+ if (n == qmi_device0.misc.minor)
+ return &qmi_device0;
+ if (n == qmi_device1.misc.minor)
+ return &qmi_device1;
+ if (n == qmi_device2.misc.minor)
+ return &qmi_device2;
+ return 0;
+}
+
+static int __init qmi_init(void)
+{
+ int ret;
+
+ qmi_wq = create_singlethread_workqueue("qmi");
+ if (qmi_wq == 0)
+ return -ENOMEM;
+
+ qmi_ctxt_init(&qmi_device0, 0);
+ qmi_ctxt_init(&qmi_device1, 1);
+ qmi_ctxt_init(&qmi_device2, 2);
+
+ ret = misc_register(&qmi_device0.misc);
+ if (ret == 0)
+ ret = misc_register(&qmi_device1.misc);
+ if (ret == 0)
+ ret = misc_register(&qmi_device2.misc);
+ return ret;
+}
+
+module_init(qmi_init);
diff --git a/arch/arm/mach-msm/smd_rpcrouter.c b/arch/arm/mach-msm/smd_rpcrouter.c
new file mode 100644
index 000000000000..64e0bca35fcf
--- /dev/null
+++ b/arch/arm/mach-msm/smd_rpcrouter.c
@@ -0,0 +1,2199 @@
+/* arch/arm/mach-msm/smd_rpcrouter.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* TODO: handle cases where smd_write() will tempfail due to full fifo */
+/* TODO: thread priority? schedule a work to bump it? */
+/* TODO: maybe make server_list_lock a mutex */
+/* TODO: pool fragments to avoid kmalloc/kfree churn */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/cdev.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+
+#include <asm/byteorder.h>
+
+#include <mach/msm_smd.h>
+#include <mach/smem_log.h>
+#include "smd_rpcrouter.h"
+#include "modem_notifier.h"
+
+enum {
+ SMEM_LOG = 1U << 0,
+ RTR_DBG = 1U << 1,
+ R2R_MSG = 1U << 2,
+ R2R_RAW = 1U << 3,
+ RPC_MSG = 1U << 4,
+ NTFY_MSG = 1U << 5,
+ RAW_PMR = 1U << 6,
+ RAW_PMW = 1U << 7,
+ R2R_RAW_HDR = 1U << 8,
+};
+static int smd_rpcrouter_debug_mask;
+module_param_named(debug_mask, smd_rpcrouter_debug_mask,
+ int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define DIAG(x...) printk(KERN_ERR "[RR] ERROR " x)
+
+#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
+#define D(x...) do { \
+if (smd_rpcrouter_debug_mask & RTR_DBG) \
+ printk(KERN_ERR x); \
+} while (0)
+
+#define RR(x...) do { \
+if (smd_rpcrouter_debug_mask & R2R_MSG) \
+ printk(KERN_ERR "[RR] "x); \
+} while (0)
+
+#define RAW(x...) do { \
+if (smd_rpcrouter_debug_mask & R2R_RAW) \
+ printk(KERN_ERR "[RAW] "x); \
+} while (0)
+
+#define RAW_HDR(x...) do { \
+if (smd_rpcrouter_debug_mask & R2R_RAW_HDR) \
+ printk(KERN_ERR "[HDR] "x); \
+} while (0)
+
+#define RAW_PMR(x...) do { \
+if (smd_rpcrouter_debug_mask & RAW_PMR) \
+ printk(KERN_ERR "[PMR] "x); \
+} while (0)
+
+#define RAW_PMR_NOMASK(x...) do { \
+ printk(KERN_ERR "[PMR] "x); \
+} while (0)
+
+#define RAW_PMW(x...) do { \
+if (smd_rpcrouter_debug_mask & RAW_PMW) \
+ printk(KERN_ERR "[PMW] "x); \
+} while (0)
+
+#define RAW_PMW_NOMASK(x...) do { \
+ printk(KERN_ERR "[PMW] "x); \
+} while (0)
+
+#define IO(x...) do { \
+if (smd_rpcrouter_debug_mask & RPC_MSG) \
+ printk(KERN_ERR "[RPC] "x); \
+} while (0)
+
+#define NTFY(x...) do { \
+if (smd_rpcrouter_debug_mask & NTFY_MSG) \
+ printk(KERN_ERR "[NOTIFY] "x); \
+} while (0)
+#else
+#define D(x...) do { } while (0)
+#define RR(x...) do { } while (0)
+#define RAW(x...) do { } while (0)
+#define RAW_HDR(x...) do { } while (0)
+#define RAW_PMR(x...) do { } while (0)
+#define RAW_PMR_NO_MASK(x...) do { } while (0)
+#define RAW_PMW(x...) do { } while (0)
+#define RAW_PMW_NO_MASK(x...) do { } while (0)
+#define IO(x...) do { } while (0)
+#define NTFY(x...) do { } while (0)
+#endif
+
+
+static LIST_HEAD(local_endpoints);
+static LIST_HEAD(remote_endpoints);
+
+static LIST_HEAD(server_list);
+
+static smd_channel_t *smd_channel;
+static int initialized;
+static wait_queue_head_t newserver_wait;
+static wait_queue_head_t smd_wait;
+
+static DEFINE_SPINLOCK(local_endpoints_lock);
+static DEFINE_SPINLOCK(remote_endpoints_lock);
+static DEFINE_SPINLOCK(server_list_lock);
+static DEFINE_SPINLOCK(smd_lock);
+
+static struct workqueue_struct *rpcrouter_workqueue;
+static int rpcrouter_need_len;
+
+static atomic_t next_xid = ATOMIC_INIT(1);
+static atomic_t pm_mid = ATOMIC_INIT(1);
+
+static void do_read_data(struct work_struct *work);
+static void do_create_pdevs(struct work_struct *work);
+static void do_create_rpcrouter_pdev(struct work_struct *work);
+
+static DECLARE_WORK(work_read_data, do_read_data);
+static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
+static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
+
+#define RR_STATE_IDLE 0
+#define RR_STATE_HEADER 1
+#define RR_STATE_BODY 2
+#define RR_STATE_ERROR 3
+
+/* After restart notification, local ep keep
+ * state for server restart and for ep notify.
+ * Server restart cleared by R-R new svr msg.
+ * NTFY cleared by calling msm_rpc_clear_netreset
+*/
+
+#define RESTART_NORMAL 0
+#define RESTART_PEND_SVR 1
+#define RESTART_PEND_NTFY 2
+#define RESTART_PEND_NTFY_SVR 3
+
+/* State for remote ep following restart */
+#define RESTART_QUOTA_ABORT 1
+
+struct rr_context {
+ struct rr_packet *pkt;
+ uint8_t *ptr;
+ uint32_t state; /* current assembly state */
+ uint32_t count; /* bytes needed in this state */
+};
+
+struct rr_context the_rr_context;
+
+static struct platform_device rpcrouter_pdev = {
+ .name = "oncrpc_router",
+ .id = -1,
+};
+
+
+static int rpcrouter_send_control_msg(union rr_control_msg *msg)
+{
+ struct rr_header hdr;
+ unsigned long flags;
+ int need;
+
+ if (!(msg->cmd == RPCROUTER_CTRL_CMD_HELLO) && !initialized) {
+ printk(KERN_ERR "rpcrouter_send_control_msg(): Warning, "
+ "router not initialized\n");
+ return -EINVAL;
+ }
+
+ hdr.version = RPCROUTER_VERSION;
+ hdr.type = msg->cmd;
+ hdr.src_pid = RPCROUTER_PID_LOCAL;
+ hdr.src_cid = RPCROUTER_ROUTER_ADDRESS;
+ hdr.confirm_rx = 0;
+ hdr.size = sizeof(*msg);
+ hdr.dst_pid = 0;
+ hdr.dst_cid = RPCROUTER_ROUTER_ADDRESS;
+
+ /* TODO: what if channel is full? */
+
+ need = sizeof(hdr) + hdr.size;
+ spin_lock_irqsave(&smd_lock, flags);
+ while (smd_write_avail(smd_channel) < need) {
+ spin_unlock_irqrestore(&smd_lock, flags);
+ msleep(250);
+ spin_lock_irqsave(&smd_lock, flags);
+ }
+ smd_write(smd_channel, &hdr, sizeof(hdr));
+ smd_write(smd_channel, msg, hdr.size);
+ spin_unlock_irqrestore(&smd_lock, flags);
+ return 0;
+}
+
+static void modem_reset_start_cleanup(void)
+{
+ struct msm_rpc_endpoint *ept;
+ struct rr_remote_endpoint *r_ept;
+ struct rr_packet *pkt, *tmp_pkt;
+ struct rr_fragment *frag, *next;
+ struct msm_rpc_reply *reply, *reply_tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local_endpoints_lock, flags);
+ /* remove all partial packets received */
+ list_for_each_entry(ept, &local_endpoints, list) {
+ RR("modem_reset_start_clenup PID %x, remotepid:%d \n",
+ ept->dst_pid, RPCROUTER_PID_REMOTE);
+ /* remove replies */
+ spin_lock(&ept->reply_q_lock);
+ list_for_each_entry_safe(reply, reply_tmp,
+ &ept->reply_pend_q, list) {
+ list_del(&reply->list);
+ kfree(reply);
+ }
+ list_for_each_entry_safe(reply, reply_tmp,
+ &ept->reply_avail_q, list) {
+ list_del(&reply->list);
+ kfree(reply);
+ }
+ spin_unlock(&ept->reply_q_lock);
+ if (ept->dst_pid == RPCROUTER_PID_REMOTE) {
+ spin_lock(&ept->incomplete_lock);
+ list_for_each_entry_safe(pkt, tmp_pkt,
+ &ept->incomplete, list) {
+ list_del(&pkt->list);
+ frag = pkt->first;
+ while (frag != NULL) {
+ next = frag->next;
+ kfree(frag);
+ frag = next;
+ }
+ kfree(pkt);
+ }
+ spin_unlock(&ept->incomplete_lock);
+ /* remove all completed packets waiting to be read*/
+ spin_lock(&ept->read_q_lock);
+ list_for_each_entry_safe(pkt, tmp_pkt, &ept->read_q,
+ list) {
+ list_del(&pkt->list);
+ frag = pkt->first;
+ while (frag != NULL) {
+ next = frag->next;
+ kfree(frag);
+ frag = next;
+ }
+ kfree(pkt);
+ }
+ spin_unlock(&ept->read_q_lock);
+ /* Set restart state for local ep */
+ RR("EPT:0x%p, State %d RESTART_PEND_NTFY_SVR "
+ "PROG:0x%08x VERS:0x%08x \n",
+ ept, ept->restart_state, be32_to_cpu(ept->dst_prog),
+ be32_to_cpu(ept->dst_vers));
+ spin_lock(&ept->restart_lock);
+ ept->restart_state = RESTART_PEND_NTFY_SVR;
+ spin_unlock(&ept->restart_lock);
+ wake_up(&ept->wait_q);
+ }
+ }
+
+ spin_unlock_irqrestore(&local_endpoints_lock, flags);
+
+ /* Unblock endpoints waiting for quota ack*/
+ spin_lock_irqsave(&remote_endpoints_lock, flags);
+ list_for_each_entry(r_ept, &remote_endpoints, list) {
+ spin_lock(&r_ept->quota_lock);
+ r_ept->quota_restart_state = RESTART_QUOTA_ABORT;
+ RR("Set STATE_PENDING PID:0x%08x CID:0x%08x \n", r_ept->pid,
+ r_ept->cid);
+ spin_unlock(&r_ept->quota_lock);
+ wake_up(&r_ept->quota_wait);
+ }
+ spin_unlock_irqrestore(&remote_endpoints_lock, flags);
+
+}
+
+
+static struct rr_server *rpcrouter_create_server(uint32_t pid,
+ uint32_t cid,
+ uint32_t prog,
+ uint32_t ver)
+{
+ struct rr_server *server;
+ unsigned long flags;
+ int rc;
+
+ server = kmalloc(sizeof(struct rr_server), GFP_KERNEL);
+ if (!server)
+ return ERR_PTR(-ENOMEM);
+
+ memset(server, 0, sizeof(struct rr_server));
+ server->pid = pid;
+ server->cid = cid;
+ server->prog = prog;
+ server->vers = ver;
+
+ spin_lock_irqsave(&server_list_lock, flags);
+ list_add_tail(&server->list, &server_list);
+ spin_unlock_irqrestore(&server_list_lock, flags);
+
+ if (pid == RPCROUTER_PID_REMOTE) {
+ rc = msm_rpcrouter_create_server_cdev(server);
+ if (rc < 0)
+ goto out_fail;
+ }
+ return server;
+out_fail:
+ spin_lock_irqsave(&server_list_lock, flags);
+ list_del(&server->list);
+ spin_unlock_irqrestore(&server_list_lock, flags);
+ kfree(server);
+ return ERR_PTR(rc);
+}
+
+static void rpcrouter_destroy_server(struct rr_server *server)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&server_list_lock, flags);
+ list_del(&server->list);
+ spin_unlock_irqrestore(&server_list_lock, flags);
+ device_destroy(msm_rpcrouter_class, server->device_number);
+ kfree(server);
+}
+
+static struct rr_server *rpcrouter_lookup_server(uint32_t prog, uint32_t ver)
+{
+ struct rr_server *server;
+ unsigned long flags;
+
+ spin_lock_irqsave(&server_list_lock, flags);
+ list_for_each_entry(server, &server_list, list) {
+ if (server->prog == prog
+ && server->vers == ver) {
+ spin_unlock_irqrestore(&server_list_lock, flags);
+ return server;
+ }
+ }
+ spin_unlock_irqrestore(&server_list_lock, flags);
+ return NULL;
+}
+
+static struct rr_server *rpcrouter_lookup_server_by_dev(dev_t dev)
+{
+ struct rr_server *server;
+ unsigned long flags;
+
+ spin_lock_irqsave(&server_list_lock, flags);
+ list_for_each_entry(server, &server_list, list) {
+ if (server->device_number == dev) {
+ spin_unlock_irqrestore(&server_list_lock, flags);
+ return server;
+ }
+ }
+ spin_unlock_irqrestore(&server_list_lock, flags);
+ return NULL;
+}
+
+struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
+{
+ struct msm_rpc_endpoint *ept;
+ unsigned long flags;
+
+ ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL);
+ if (!ept)
+ return NULL;
+ memset(ept, 0, sizeof(struct msm_rpc_endpoint));
+ ept->cid = (uint32_t) ept;
+ ept->pid = RPCROUTER_PID_LOCAL;
+ ept->dev = dev;
+
+ if ((dev != msm_rpcrouter_devno) && (dev != MKDEV(0, 0))) {
+ struct rr_server *srv;
+ /*
+ * This is a userspace client which opened
+ * a program/ver devicenode. Bind the client
+ * to that destination
+ */
+ srv = rpcrouter_lookup_server_by_dev(dev);
+ /* TODO: bug? really? */
+ BUG_ON(!srv);
+
+ ept->dst_pid = srv->pid;
+ ept->dst_cid = srv->cid;
+ ept->dst_prog = cpu_to_be32(srv->prog);
+ ept->dst_vers = cpu_to_be32(srv->vers);
+ } else {
+ /* mark not connected */
+ ept->dst_pid = 0xffffffff;
+ }
+
+ init_waitqueue_head(&ept->wait_q);
+ INIT_LIST_HEAD(&ept->read_q);
+ spin_lock_init(&ept->read_q_lock);
+ INIT_LIST_HEAD(&ept->reply_avail_q);
+ INIT_LIST_HEAD(&ept->reply_pend_q);
+ spin_lock_init(&ept->reply_q_lock);
+ spin_lock_init(&ept->restart_lock);
+ init_waitqueue_head(&ept->restart_wait);
+ ept->restart_state = RESTART_NORMAL;
+ INIT_LIST_HEAD(&ept->incomplete);
+ spin_lock_init(&ept->incomplete_lock);
+
+ spin_lock_irqsave(&local_endpoints_lock, flags);
+ list_add_tail(&ept->list, &local_endpoints);
+ spin_unlock_irqrestore(&local_endpoints_lock, flags);
+ return ept;
+}
+
+int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept)
+{
+ int rc;
+ union rr_control_msg msg;
+ struct msm_rpc_reply *reply, *reply_tmp;
+ unsigned long flags;
+
+ msg.cmd = RPCROUTER_CTRL_CMD_REMOVE_CLIENT;
+ msg.cli.pid = ept->pid;
+ msg.cli.cid = ept->cid;
+
+ RR("x REMOVE_CLIENT id=%d:%08x\n", ept->pid, ept->cid);
+ rc = rpcrouter_send_control_msg(&msg);
+ if (rc < 0)
+ return rc;
+
+ /* Free replies */
+ spin_lock_irqsave(&ept->reply_q_lock, flags);
+ list_for_each_entry_safe(reply, reply_tmp, &ept->reply_pend_q, list) {
+ list_del(&reply->list);
+ kfree(reply);
+ }
+ list_for_each_entry_safe(reply, reply_tmp, &ept->reply_avail_q, list) {
+ list_del(&reply->list);
+ kfree(reply);
+ }
+ spin_unlock_irqrestore(&ept->reply_q_lock, flags);
+
+ list_del(&ept->list);
+ kfree(ept);
+ return 0;
+}
+
+static int rpcrouter_create_remote_endpoint(uint32_t cid)
+{
+ struct rr_remote_endpoint *new_c;
+ unsigned long flags;
+
+ new_c = kmalloc(sizeof(struct rr_remote_endpoint), GFP_KERNEL);
+ if (!new_c)
+ return -ENOMEM;
+ memset(new_c, 0, sizeof(struct rr_remote_endpoint));
+
+ new_c->cid = cid;
+ new_c->pid = RPCROUTER_PID_REMOTE;
+ init_waitqueue_head(&new_c->quota_wait);
+ spin_lock_init(&new_c->quota_lock);
+
+ spin_lock_irqsave(&remote_endpoints_lock, flags);
+ list_add_tail(&new_c->list, &remote_endpoints);
+ new_c->quota_restart_state = RESTART_NORMAL;
+ spin_unlock_irqrestore(&remote_endpoints_lock, flags);
+ return 0;
+}
+
+static struct msm_rpc_endpoint *rpcrouter_lookup_local_endpoint(uint32_t cid)
+{
+ struct msm_rpc_endpoint *ept;
+ unsigned long flags;
+
+ spin_lock_irqsave(&local_endpoints_lock, flags);
+ list_for_each_entry(ept, &local_endpoints, list) {
+ if (ept->cid == cid) {
+ spin_unlock_irqrestore(&local_endpoints_lock, flags);
+ return ept;
+ }
+ }
+ spin_unlock_irqrestore(&local_endpoints_lock, flags);
+ return NULL;
+}
+
+static struct rr_remote_endpoint *rpcrouter_lookup_remote_endpoint(uint32_t cid)
+{
+ struct rr_remote_endpoint *ept;
+ unsigned long flags;
+
+ spin_lock_irqsave(&remote_endpoints_lock, flags);
+ list_for_each_entry(ept, &remote_endpoints, list) {
+ if (ept->cid == cid) {
+ spin_unlock_irqrestore(&remote_endpoints_lock, flags);
+ return ept;
+ }
+ }
+ spin_unlock_irqrestore(&remote_endpoints_lock, flags);
+ return NULL;
+}
+
+static void handle_server_restart(struct rr_server *server, uint32_t cid,
+ uint32_t prog, uint32_t vers)
+{
+ struct rr_remote_endpoint *r_ept;
+ struct msm_rpc_endpoint *ept;
+ unsigned long flags;
+ r_ept = rpcrouter_lookup_remote_endpoint(cid);
+ if (r_ept && (r_ept->quota_restart_state !=
+ RESTART_NORMAL)) {
+ spin_lock_irqsave(&r_ept->quota_lock, flags);
+ r_ept->tx_quota_cntr = 0;
+ r_ept->quota_restart_state =
+ RESTART_NORMAL;
+ spin_unlock_irqrestore(&r_ept->quota_lock, flags);
+ printk(KERN_INFO "rpcrouter: Remote EP %0x Reset\n",
+ (unsigned int)r_ept);
+ wake_up(&r_ept->quota_wait);
+ }
+ spin_lock_irqsave(&local_endpoints_lock, flags);
+ list_for_each_entry(ept, &local_endpoints, list) {
+ if ((be32_to_cpu(ept->dst_prog) == prog) &&
+ (be32_to_cpu(ept->dst_vers) == vers) &&
+ (ept->restart_state & RESTART_PEND_SVR)) {
+ spin_lock(&ept->restart_lock);
+ ept->restart_state &= ~RESTART_PEND_SVR;
+ spin_unlock(&ept->restart_lock);
+ D("rpcrouter: Local EPT Reset %08x:%08x \n",
+ prog, vers);
+ wake_up(&ept->restart_wait);
+ wake_up(&ept->wait_q);
+ }
+ }
+ spin_unlock_irqrestore(&local_endpoints_lock, flags);
+}
+
+static int process_control_msg(union rr_control_msg *msg, int len)
+{
+ union rr_control_msg ctl;
+ struct rr_server *server;
+ struct rr_remote_endpoint *r_ept;
+ int rc = 0;
+ unsigned long flags;
+ static int first = 1;
+
+ if (len != sizeof(*msg)) {
+ printk(KERN_ERR "rpcrouter: r2r msg size %d != %d\n",
+ len, sizeof(*msg));
+ return -EINVAL;
+ }
+
+ switch (msg->cmd) {
+ case RPCROUTER_CTRL_CMD_HELLO:
+ RR("o HELLO\n");
+
+ RR("x HELLO\n");
+ memset(&ctl, 0, sizeof(ctl));
+ ctl.cmd = RPCROUTER_CTRL_CMD_HELLO;
+ rpcrouter_send_control_msg(&ctl);
+
+ initialized = 1;
+
+ /* Send list of servers one at a time */
+ ctl.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
+
+ /* TODO: long time to hold a spinlock... */
+ spin_lock_irqsave(&server_list_lock, flags);
+ list_for_each_entry(server, &server_list, list) {
+ if (server->pid != RPCROUTER_PID_LOCAL)
+ continue;
+ ctl.srv.pid = server->pid;
+ ctl.srv.cid = server->cid;
+ ctl.srv.prog = server->prog;
+ ctl.srv.vers = server->vers;
+
+ RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
+ server->pid, server->cid,
+ server->prog, server->vers);
+
+ rpcrouter_send_control_msg(&ctl);
+ }
+ spin_unlock_irqrestore(&server_list_lock, flags);
+
+ if (first) {
+ first = 0;
+ queue_work(rpcrouter_workqueue,
+ &work_create_rpcrouter_pdev);
+ }
+ break;
+
+ case RPCROUTER_CTRL_CMD_RESUME_TX:
+ RR("o RESUME_TX id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
+
+ r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
+ if (!r_ept) {
+ printk(KERN_ERR
+ "rpcrouter: Unable to resume client\n");
+ break;
+ }
+ spin_lock_irqsave(&r_ept->quota_lock, flags);
+ r_ept->tx_quota_cntr = 0;
+ spin_unlock_irqrestore(&r_ept->quota_lock, flags);
+ wake_up(&r_ept->quota_wait);
+ break;
+
+ case RPCROUTER_CTRL_CMD_NEW_SERVER:
+ if (msg->srv.vers == 0) {
+ pr_err(
+ "rpcrouter: Server create rejected, version = 0, "
+ "program = %08x\n", msg->srv.prog);
+ break;
+ }
+
+ RR("o NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
+ msg->srv.pid, msg->srv.cid, msg->srv.prog, msg->srv.vers);
+
+ server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
+
+ if (!server) {
+ server = rpcrouter_create_server(
+ msg->srv.pid, msg->srv.cid,
+ msg->srv.prog, msg->srv.vers);
+ if (!server)
+ return -ENOMEM;
+ /*
+ * XXX: Verify that its okay to add the
+ * client to our remote client list
+ * if we get a NEW_SERVER notification
+ */
+ if (!rpcrouter_lookup_remote_endpoint(msg->srv.cid)) {
+ rc = rpcrouter_create_remote_endpoint(
+ msg->srv.cid);
+ if (rc < 0)
+ printk(KERN_ERR
+ "rpcrouter:Client create"
+ "error (%d)\n", rc);
+ }
+ schedule_work(&work_create_pdevs);
+ wake_up(&newserver_wait);
+ } else {
+ if ((server->pid == msg->srv.pid) &&
+ (server->cid == msg->srv.cid)) {
+ handle_server_restart(server, msg->srv.cid,
+ msg->srv.prog,
+ msg->srv.vers);
+ } else {
+ server->pid = msg->srv.pid;
+ server->cid = msg->srv.cid;
+ }
+ }
+ break;
+
+ case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
+ RR("o REMOVE_SERVER prog=%08x:%d\n",
+ msg->srv.prog, msg->srv.vers);
+ server = rpcrouter_lookup_server(msg->srv.prog, msg->srv.vers);
+ if (server)
+ rpcrouter_destroy_server(server);
+ break;
+
+ case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
+ RR("o REMOVE_CLIENT id=%d:%08x\n", msg->cli.pid, msg->cli.cid);
+ if (msg->cli.pid != RPCROUTER_PID_REMOTE) {
+ printk(KERN_ERR
+ "rpcrouter: Denying remote removal of "
+ "local client\n");
+ break;
+ }
+ r_ept = rpcrouter_lookup_remote_endpoint(msg->cli.cid);
+ if (r_ept) {
+ spin_lock_irqsave(&remote_endpoints_lock, flags);
+ list_del(&r_ept->list);
+ spin_unlock_irqrestore(&remote_endpoints_lock, flags);
+ kfree(r_ept);
+ }
+
+ /* Notify local clients of this event */
+ printk(KERN_ERR "rpcrouter: LOCAL NOTIFICATION NOT IMP\n");
+ rc = -ENOSYS;
+
+ break;
+ case RPCROUTER_CTRL_CMD_PING:
+ /* No action needed for ping messages received */
+ RR("o PING\n");
+ break;
+ default:
+ RR("o UNKNOWN(%08x)\n", msg->cmd);
+ rc = -ENOSYS;
+ }
+
+ return rc;
+}
+
+static void do_create_rpcrouter_pdev(struct work_struct *work)
+{
+ platform_device_register(&rpcrouter_pdev);
+}
+
+static void do_create_pdevs(struct work_struct *work)
+{
+ unsigned long flags;
+ struct rr_server *server;
+
+ /* TODO: race if destroyed while being registered */
+ spin_lock_irqsave(&server_list_lock, flags);
+ list_for_each_entry(server, &server_list, list) {
+ if (server->pid == RPCROUTER_PID_REMOTE) {
+ if (server->pdev_name[0] == 0) {
+ spin_unlock_irqrestore(&server_list_lock,
+ flags);
+ msm_rpcrouter_create_server_pdev(server);
+ schedule_work(&work_create_pdevs);
+ return;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&server_list_lock, flags);
+}
+
+static void rpcrouter_smdnotify(void *_dev, unsigned event)
+{
+ if (event != SMD_EVENT_DATA)
+ return;
+
+ wake_up(&smd_wait);
+}
+
+static void *rr_malloc(unsigned sz)
+{
+ void *ptr = kmalloc(sz, GFP_KERNEL);
+ if (ptr)
+ return ptr;
+
+ printk(KERN_ERR "rpcrouter: kmalloc of %d failed, retrying...\n", sz);
+ do {
+ ptr = kmalloc(sz, GFP_KERNEL);
+ } while (!ptr);
+
+ return ptr;
+}
+
+/* TODO: deal with channel teardown / restore */
+static int rr_read(void *data, int len)
+{
+ int rc;
+ unsigned long flags;
+// printk("rr_read() %d\n", len);
+ for(;;) {
+ spin_lock_irqsave(&smd_lock, flags);
+ if (smd_read_avail(smd_channel) >= len) {
+ rc = smd_read(smd_channel, data, len);
+ spin_unlock_irqrestore(&smd_lock, flags);
+ if (rc == len)
+ return 0;
+ else
+ return -EIO;
+ }
+ rpcrouter_need_len = len;
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+// printk("rr_read: waiting (%d)\n", len);
+ wait_event(smd_wait, smd_read_avail(smd_channel) >= len);
+ }
+ return 0;
+}
+
+#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
+static char *type_to_str(int i)
+{
+ switch (i) {
+ case RPCROUTER_CTRL_CMD_DATA:
+ return "data ";
+ case RPCROUTER_CTRL_CMD_HELLO:
+ return "hello ";
+ case RPCROUTER_CTRL_CMD_BYE:
+ return "bye ";
+ case RPCROUTER_CTRL_CMD_NEW_SERVER:
+ return "new_srvr";
+ case RPCROUTER_CTRL_CMD_REMOVE_SERVER:
+ return "rmv_srvr";
+ case RPCROUTER_CTRL_CMD_REMOVE_CLIENT:
+ return "rmv_clnt";
+ case RPCROUTER_CTRL_CMD_RESUME_TX:
+ return "resum_tx";
+ case RPCROUTER_CTRL_CMD_EXIT:
+ return "cmd_exit";
+ default:
+ return "invalid";
+ }
+}
+#endif
+
+static uint32_t r2r_buf[RPCROUTER_MSGSIZE_MAX];
+
+static void do_read_data(struct work_struct *work)
+{
+ struct rr_header hdr;
+ struct rr_packet *pkt;
+ struct rr_fragment *frag;
+ struct msm_rpc_endpoint *ept;
+#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
+ struct rpc_request_hdr *rq;
+#endif
+ uint32_t pm, mid;
+ unsigned long flags;
+
+ if (rr_read(&hdr, sizeof(hdr)))
+ goto fail_io;
+
+ RR("- ver=%d type=%d src=%d:%08x crx=%d siz=%d dst=%d:%08x\n",
+ hdr.version, hdr.type, hdr.src_pid, hdr.src_cid,
+ hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
+ RAW_HDR("[r rr_h] "
+ "ver=%i,type=%s,src_pid=%08x,src_cid=%08x,"
+ "confirm_rx=%i,size=%3i,dst_pid=%08x,dst_cid=%08x\n",
+ hdr.version, type_to_str(hdr.type), hdr.src_pid, hdr.src_cid,
+ hdr.confirm_rx, hdr.size, hdr.dst_pid, hdr.dst_cid);
+
+ if (hdr.version != RPCROUTER_VERSION) {
+ DIAG("version %d != %d\n", hdr.version, RPCROUTER_VERSION);
+ goto fail_data;
+ }
+ if (hdr.size > RPCROUTER_MSGSIZE_MAX) {
+ DIAG("msg size %d > max %d\n", hdr.size, RPCROUTER_MSGSIZE_MAX);
+ goto fail_data;
+ }
+
+ if (hdr.dst_cid == RPCROUTER_ROUTER_ADDRESS) {
+ if (rr_read(r2r_buf, hdr.size))
+ goto fail_io;
+ process_control_msg((void*) r2r_buf, hdr.size);
+ goto done;
+ }
+
+ if (hdr.size < sizeof(pm)) {
+ DIAG("runt packet (no pacmark)\n");
+ goto fail_data;
+ }
+ if (rr_read(&pm, sizeof(pm)))
+ goto fail_io;
+
+ hdr.size -= sizeof(pm);
+
+ frag = rr_malloc(hdr.size + sizeof(*frag));
+ frag->next = NULL;
+ frag->length = hdr.size;
+ if (rr_read(frag->data, hdr.size))
+ goto fail_io;
+
+#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
+ if ((smd_rpcrouter_debug_mask & RAW_PMR) &&
+ ((pm >> 30 & 0x1) || (pm >> 31 & 0x1))) {
+ uint32_t xid = 0;
+ if (pm >> 30 & 0x1) {
+ rq = (struct rpc_request_hdr *) frag->data;
+ xid = ntohl(rq->xid);
+ }
+ if ((pm >> 31 & 0x1) || (pm >> 30 & 0x1))
+ RAW_PMR_NOMASK("xid:0x%03x first=%i,last=%i,mid=%3i,"
+ "len=%3i,dst_cid=%08x\n",
+ xid,
+ pm >> 30 & 0x1,
+ pm >> 31 & 0x1,
+ pm >> 16 & 0xFF,
+ pm & 0xFFFF, hdr.dst_cid);
+ }
+
+ if (smd_rpcrouter_debug_mask & SMEM_LOG) {
+ rq = (struct rpc_request_hdr *) frag->data;
+ if (rq->xid == 0)
+ smem_log_event(SMEM_LOG_PROC_ID_APPS |
+ RPC_ROUTER_LOG_EVENT_MID_READ,
+ PACMARK_MID(pm),
+ hdr.dst_cid,
+ hdr.src_cid);
+ else
+ smem_log_event(SMEM_LOG_PROC_ID_APPS |
+ RPC_ROUTER_LOG_EVENT_MSG_READ,
+ ntohl(rq->xid),
+ hdr.dst_cid,
+ hdr.src_cid);
+ }
+#endif
+
+ ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
+ if (!ept) {
+ DIAG("no local ept for cid %08x\n", hdr.dst_cid);
+ kfree(frag);
+ goto done;
+ }
+
+ /* See if there is already a partial packet that matches our mid
+ * and if so, append this fragment to that packet.
+ */
+ mid = PACMARK_MID(pm);
+ spin_lock_irqsave(&ept->incomplete_lock, flags);
+ list_for_each_entry(pkt, &ept->incomplete, list) {
+ if (pkt->mid == mid) {
+ pkt->last->next = frag;
+ pkt->last = frag;
+ pkt->length += frag->length;
+ if (PACMARK_LAST(pm)) {
+ list_del(&pkt->list);
+ spin_unlock_irqrestore(&ept->incomplete_lock,
+ flags);
+ goto packet_complete;
+ }
+ spin_unlock_irqrestore(&ept->incomplete_lock, flags);
+ goto done;
+ }
+ }
+ spin_unlock_irqrestore(&ept->incomplete_lock, flags);
+ /* This mid is new -- create a packet for it, and put it on
+ * the incomplete list if this fragment is not a last fragment,
+ * otherwise put it on the read queue.
+ */
+ pkt = rr_malloc(sizeof(struct rr_packet));
+ pkt->first = frag;
+ pkt->last = frag;
+ memcpy(&pkt->hdr, &hdr, sizeof(hdr));
+ pkt->mid = mid;
+ pkt->length = frag->length;
+ if (!PACMARK_LAST(pm)) {
+ list_add_tail(&pkt->list, &ept->incomplete);
+ goto done;
+ }
+
+packet_complete:
+ spin_lock_irqsave(&ept->read_q_lock, flags);
+ list_add_tail(&pkt->list, &ept->read_q);
+ wake_up(&ept->wait_q);
+ spin_unlock_irqrestore(&ept->read_q_lock, flags);
+done:
+
+ if (hdr.confirm_rx) {
+ union rr_control_msg msg;
+
+ msg.cmd = RPCROUTER_CTRL_CMD_RESUME_TX;
+ msg.cli.pid = hdr.dst_pid;
+ msg.cli.cid = hdr.dst_cid;
+
+ RR("x RESUME_TX id=%d:%08x\n", msg.cli.pid, msg.cli.cid);
+ rpcrouter_send_control_msg(&msg);
+
+#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
+ if (smd_rpcrouter_debug_mask & SMEM_LOG)
+ smem_log_event(SMEM_LOG_PROC_ID_APPS |
+ RPC_ROUTER_LOG_EVENT_MSG_CFM_SNT,
+ RPCROUTER_PID_LOCAL,
+ hdr.dst_cid,
+ hdr.src_cid);
+#endif
+
+ }
+
+ queue_work(rpcrouter_workqueue, &work_read_data);
+ return;
+
+fail_io:
+fail_data:
+ printk(KERN_ERR "rpc_router has died\n");
+}
+
+void msm_rpc_setup_req(struct rpc_request_hdr *hdr, uint32_t prog,
+ uint32_t vers, uint32_t proc)
+{
+ memset(hdr, 0, sizeof(struct rpc_request_hdr));
+ hdr->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
+ hdr->rpc_vers = cpu_to_be32(2);
+ hdr->prog = cpu_to_be32(prog);
+ hdr->vers = cpu_to_be32(vers);
+ hdr->procedure = cpu_to_be32(proc);
+}
+EXPORT_SYMBOL(msm_rpc_setup_req);
+
+struct msm_rpc_endpoint *msm_rpc_open(void)
+{
+ struct msm_rpc_endpoint *ept;
+
+ ept = msm_rpcrouter_create_local_endpoint(MKDEV(0, 0));
+ if (ept == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ return ept;
+}
+
+int msm_rpc_close(struct msm_rpc_endpoint *ept)
+{
+ return msm_rpcrouter_destroy_local_endpoint(ept);
+}
+EXPORT_SYMBOL(msm_rpc_close);
+
+static int msm_rpc_write_pkt(
+ struct rr_header *hdr,
+ struct msm_rpc_endpoint *ept,
+ struct rr_remote_endpoint *r_ept,
+ void *buffer,
+ int count,
+ int first,
+ int last,
+ uint32_t mid
+ )
+{
+#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
+ struct rpc_request_hdr *rq = buffer;
+#endif
+ uint32_t pacmark;
+ unsigned long flags;
+ int needed;
+
+ DEFINE_WAIT(__wait);
+
+ /* Create routing header */
+ hdr->type = RPCROUTER_CTRL_CMD_DATA;
+ hdr->version = RPCROUTER_VERSION;
+ hdr->src_pid = ept->pid;
+ hdr->src_cid = ept->cid;
+ hdr->confirm_rx = 0;
+ hdr->size = count + sizeof(uint32_t);
+
+ for (;;) {
+ prepare_to_wait(&ept->restart_wait, &__wait,
+ TASK_INTERRUPTIBLE);
+ spin_lock_irqsave(&ept->restart_lock, flags);
+ if (ept->restart_state == RESTART_NORMAL) {
+ spin_unlock_irqrestore(&ept->restart_lock, flags);
+ break;
+ }
+ if (signal_pending(current) &&
+ ((!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))) {
+ spin_unlock_irqrestore(&ept->restart_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&ept->restart_lock, flags);
+ schedule();
+ }
+ finish_wait(&ept->restart_wait, &__wait);
+
+ if (signal_pending(current) &&
+ (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
+ return -ERESTARTSYS;
+ }
+
+ for (;;) {
+ prepare_to_wait(&r_ept->quota_wait, &__wait,
+ TASK_INTERRUPTIBLE);
+ spin_lock_irqsave(&r_ept->quota_lock, flags);
+ if ((r_ept->tx_quota_cntr < RPCROUTER_DEFAULT_RX_QUOTA) ||
+ (r_ept->quota_restart_state != RESTART_NORMAL))
+ break;
+ if (signal_pending(current) &&
+ (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
+ break;
+ spin_unlock_irqrestore(&r_ept->quota_lock, flags);
+ schedule();
+ }
+ finish_wait(&r_ept->quota_wait, &__wait);
+
+ if (r_ept->quota_restart_state != RESTART_NORMAL) {
+ spin_lock(&ept->restart_lock);
+ ept->restart_state &= ~RESTART_PEND_NTFY;
+ spin_unlock(&ept->restart_lock);
+ spin_unlock_irqrestore(&r_ept->quota_lock, flags);
+ return -ENETRESET;
+ }
+
+ if (signal_pending(current) &&
+ (!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
+ spin_unlock_irqrestore(&r_ept->quota_lock, flags);
+ return -ERESTARTSYS;
+ }
+ r_ept->tx_quota_cntr++;
+ if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA) {
+ hdr->confirm_rx = 1;
+
+#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
+ if (smd_rpcrouter_debug_mask & SMEM_LOG) {
+ if (rq->xid == 0)
+ smem_log_event(SMEM_LOG_PROC_ID_APPS |
+ RPC_ROUTER_LOG_EVENT_MID_CFM_REQ,
+ hdr->dst_pid,
+ hdr->dst_cid,
+ hdr->src_cid);
+ else
+ smem_log_event(SMEM_LOG_PROC_ID_APPS |
+ RPC_ROUTER_LOG_EVENT_MSG_CFM_REQ,
+ hdr->dst_pid,
+ hdr->dst_cid,
+ hdr->src_cid);
+ }
+#endif
+
+ }
+ pacmark = PACMARK(count, mid, first, last);
+
+ spin_unlock_irqrestore(&r_ept->quota_lock, flags);
+
+ spin_lock_irqsave(&smd_lock, flags);
+ spin_lock(&ept->restart_lock);
+ if (ept->restart_state != RESTART_NORMAL) {
+ ept->restart_state &= ~RESTART_PEND_NTFY;
+ spin_unlock(&ept->restart_lock);
+ spin_unlock_irqrestore(&smd_lock, flags);
+ return -ENETRESET;
+ }
+
+ needed = sizeof(*hdr) + hdr->size;
+ while ((ept->restart_state == RESTART_NORMAL) &&
+ (smd_write_avail(smd_channel) < needed)) {
+ spin_unlock(&ept->restart_lock);
+ spin_unlock_irqrestore(&smd_lock, flags);
+ msleep(250);
+ spin_lock_irqsave(&smd_lock, flags);
+ spin_lock(&ept->restart_lock);
+ }
+ if (ept->restart_state != RESTART_NORMAL) {
+ ept->restart_state &= ~RESTART_PEND_NTFY;
+ spin_unlock(&ept->restart_lock);
+ spin_unlock_irqrestore(&smd_lock, flags);
+ return -ENETRESET;
+ }
+
+ /* TODO: deal with full fifo */
+ smd_write(smd_channel, hdr, sizeof(*hdr));
+ RAW_HDR("[w rr_h] "
+ "ver=%i,type=%s,src_pid=%08x,src_cid=%08x,"
+ "confirm_rx=%i,size=%3i,dst_pid=%08x,dst_cid=%08x\n",
+ hdr->version, type_to_str(hdr->type), hdr->src_pid, hdr->src_cid,
+ hdr->confirm_rx, hdr->size, hdr->dst_pid, hdr->dst_cid);
+ smd_write(smd_channel, &pacmark, sizeof(pacmark));
+
+#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
+ if ((smd_rpcrouter_debug_mask & RAW_PMW) &&
+ ((pacmark >> 30 & 0x1) || (pacmark >> 31 & 0x1))) {
+ uint32_t xid = 0;
+ if (pacmark >> 30 & 0x1)
+ xid = ntohl(rq->xid);
+ if ((pacmark >> 31 & 0x1) || (pacmark >> 30 & 0x1))
+ RAW_PMW_NOMASK("xid:0x%03x first=%i,last=%i,mid=%3i,"
+ "len=%3i,src_cid=%x\n",
+ xid,
+ pacmark >> 30 & 0x1,
+ pacmark >> 31 & 0x1,
+ pacmark >> 16 & 0xFF,
+ pacmark & 0xFFFF, hdr->src_cid);
+ }
+#endif
+
+ smd_write(smd_channel, buffer, count);
+ spin_unlock(&ept->restart_lock);
+ spin_unlock_irqrestore(&smd_lock, flags);
+
+#if defined(CONFIG_MSM_ONCRPCROUTER_DEBUG)
+ if (smd_rpcrouter_debug_mask & SMEM_LOG) {
+ if (rq->xid == 0)
+ smem_log_event(SMEM_LOG_PROC_ID_APPS |
+ RPC_ROUTER_LOG_EVENT_MID_WRITTEN,
+ PACMARK_MID(pacmark),
+ hdr->dst_cid,
+ hdr->src_cid);
+ else
+ smem_log_event(SMEM_LOG_PROC_ID_APPS |
+ RPC_ROUTER_LOG_EVENT_MSG_WRITTEN,
+ ntohl(rq->xid),
+ hdr->dst_cid,
+ hdr->src_cid);
+ }
+#endif
+
+ return needed;
+}
+
+static struct msm_rpc_reply *get_pend_reply(struct msm_rpc_endpoint *ept,
+ uint32_t xid)
+{
+ unsigned long flags;
+ struct msm_rpc_reply *reply;
+ spin_lock_irqsave(&ept->reply_q_lock, flags);
+ list_for_each_entry(reply, &ept->reply_pend_q, list) {
+ if (reply->xid == xid) {
+ list_del(&reply->list);
+ spin_unlock_irqrestore(&ept->reply_q_lock, flags);
+ return reply;
+ }
+ }
+ spin_unlock_irqrestore(&ept->reply_q_lock, flags);
+ return NULL;
+}
+
+void get_requesting_client(struct msm_rpc_endpoint *ept, uint32_t xid,
+ struct msm_rpc_client_info *clnt_info)
+{
+ unsigned long flags;
+ struct msm_rpc_reply *reply;
+
+ if (!clnt_info)
+ return;
+
+ spin_lock_irqsave(&ept->reply_q_lock, flags);
+ list_for_each_entry(reply, &ept->reply_pend_q, list) {
+ if (reply->xid == xid) {
+ clnt_info->pid = reply->pid;
+ clnt_info->cid = reply->cid;
+ clnt_info->prog = reply->prog;
+ clnt_info->vers = reply->vers;
+ spin_unlock_irqrestore(&ept->reply_q_lock, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ept->reply_q_lock, flags);
+ return;
+}
+
+static void set_avail_reply(struct msm_rpc_endpoint *ept,
+ struct msm_rpc_reply *reply)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ept->reply_q_lock, flags);
+ list_add_tail(&reply->list, &ept->reply_avail_q);
+ spin_unlock_irqrestore(&ept->reply_q_lock, flags);
+}
+
+static struct msm_rpc_reply *get_avail_reply(struct msm_rpc_endpoint *ept)
+{
+ struct msm_rpc_reply *reply;
+ unsigned long flags;
+ if (list_empty(&ept->reply_avail_q)) {
+ if (ept->reply_cnt >= RPCROUTER_PEND_REPLIES_MAX) {
+ printk(KERN_ERR
+ "exceeding max replies of %d \n",
+ RPCROUTER_PEND_REPLIES_MAX);
+ return 0;
+ }
+ reply = kmalloc(sizeof(struct msm_rpc_reply), GFP_KERNEL);
+ if (!reply)
+ return 0;
+ D("Adding reply 0x%08x \n", (unsigned int)reply);
+ memset(reply, 0, sizeof(struct msm_rpc_reply));
+ spin_lock_irqsave(&ept->reply_q_lock, flags);
+ ept->reply_cnt++;
+ spin_unlock_irqrestore(&ept->reply_q_lock, flags);
+ } else {
+ spin_lock_irqsave(&ept->reply_q_lock, flags);
+ reply = list_first_entry(&ept->reply_avail_q,
+ struct msm_rpc_reply,
+ list);
+ list_del(&reply->list);
+ spin_unlock_irqrestore(&ept->reply_q_lock, flags);
+ }
+ return reply;
+}
+
+static void set_pend_reply(struct msm_rpc_endpoint *ept,
+ struct msm_rpc_reply *reply)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ept->reply_q_lock, flags);
+ list_add_tail(&reply->list, &ept->reply_pend_q);
+ spin_unlock_irqrestore(&ept->reply_q_lock, flags);
+}
+
+int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
+{
+ struct rr_header hdr;
+ struct rpc_request_hdr *rq = buffer;
+ struct rr_remote_endpoint *r_ept;
+ struct msm_rpc_reply *reply;
+ int max_tx;
+ int tx_cnt;
+ char *tx_buf;
+ int rc;
+ int first_pkt = 1;
+ uint32_t mid;
+
+ /* snoop the RPC packet and enforce permissions */
+
+ /* has to have at least the xid and type fields */
+ if (count < (sizeof(uint32_t) * 2)) {
+ printk(KERN_ERR "rr_write: rejecting runt packet\n");
+ return -EINVAL;
+ }
+
+ if (rq->type == 0) {
+ /* RPC CALL */
+ if (count < (sizeof(uint32_t) * 6)) {
+ printk(KERN_ERR
+ "rr_write: rejecting runt call packet\n");
+ return -EINVAL;
+ }
+ if (ept->dst_pid == 0xffffffff) {
+ printk(KERN_ERR "rr_write: not connected\n");
+ return -ENOTCONN;
+ }
+ if ((ept->dst_prog != rq->prog) ||
+ ((be32_to_cpu(ept->dst_vers) & 0x0fff0000) !=
+ (be32_to_cpu(rq->vers) & 0x0fff0000))) {
+ printk(KERN_ERR
+ "rr_write: cannot write to %08x:%08x "
+ "(bound to %08x:%08x)\n",
+ be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
+ be32_to_cpu(ept->dst_prog),
+ be32_to_cpu(ept->dst_vers));
+ return -EINVAL;
+ }
+ hdr.dst_pid = ept->dst_pid;
+ hdr.dst_cid = ept->dst_cid;
+ IO("CALL to %08x:%d @ %d:%08x (%d bytes)\n",
+ be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
+ ept->dst_pid, ept->dst_cid, count);
+ } else {
+ /* RPC REPLY */
+ reply = get_pend_reply(ept, rq->xid);
+ if (!reply) {
+ printk(KERN_ERR
+ "rr_write: rejecting, reply not found \n");
+ return -EINVAL;
+ }
+ hdr.dst_pid = reply->pid;
+ hdr.dst_cid = reply->cid;
+ set_avail_reply(ept, reply);
+ IO("REPLY to xid=%d @ %d:%08x (%d bytes)\n",
+ be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count);
+ }
+
+ r_ept = rpcrouter_lookup_remote_endpoint(hdr.dst_cid);
+
+ if (!r_ept) {
+ printk(KERN_ERR
+ "msm_rpc_write(): No route to ept "
+ "[PID %x CID %x]\n", hdr.dst_pid, hdr.dst_cid);
+ return -EHOSTUNREACH;
+ }
+
+ tx_cnt = count;
+ tx_buf = buffer;
+ mid = atomic_add_return(1, &pm_mid) & 0xFF;
+ /* The modem's router can only take 500 bytes of data. The
+ first 8 bytes it uses on the modem side for addressing,
+ the next 4 bytes are for the pacmark header. */
+ max_tx = RPCROUTER_MSGSIZE_MAX - 8 - sizeof(uint32_t);
+ IO("Writing %d bytes, max pkt size is %d\n",
+ tx_cnt, max_tx);
+ while (tx_cnt > 0) {
+ if (tx_cnt > max_tx) {
+ rc = msm_rpc_write_pkt(&hdr, ept, r_ept,
+ tx_buf, max_tx,
+ first_pkt, 0, mid);
+ if (rc < 0)
+ return rc;
+ IO("Wrote %d bytes First %d, Last 0 mid %d\n",
+ rc, first_pkt, mid);
+ tx_cnt -= max_tx;
+ tx_buf += max_tx;
+ } else {
+ rc = msm_rpc_write_pkt(&hdr, ept, r_ept,
+ tx_buf, tx_cnt,
+ first_pkt, 1, mid);
+ if (rc < 0)
+ return rc;
+ IO("Wrote %d bytes First %d Last 1 mid %d\n",
+ rc, first_pkt, mid);
+ break;
+ }
+ first_pkt = 0;
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(msm_rpc_write);
+
+/*
+ * NOTE: It is the responsibility of the caller to kfree buffer
+ */
+int msm_rpc_read(struct msm_rpc_endpoint *ept, void **buffer,
+ unsigned user_len, long timeout)
+{
+ struct rr_fragment *frag, *next;
+ char *buf;
+ int rc;
+
+ rc = __msm_rpc_read(ept, &frag, user_len, timeout);
+ if (rc <= 0)
+ return rc;
+
+ /* single-fragment messages conveniently can be
+ * returned as-is (the buffer is at the front)
+ */
+ if (frag->next == 0) {
+ *buffer = (void*) frag;
+ return rc;
+ }
+
+ /* multi-fragment messages, we have to do it the
+ * hard way, which is rather disgusting right now
+ */
+ buf = rr_malloc(rc);
+ *buffer = buf;
+
+ while (frag != NULL) {
+ memcpy(buf, frag->data, frag->length);
+ next = frag->next;
+ buf += frag->length;
+ kfree(frag);
+ frag = next;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpc_read);
+
+int msm_rpc_call(struct msm_rpc_endpoint *ept, uint32_t proc,
+ void *_request, int request_size,
+ long timeout)
+{
+ return msm_rpc_call_reply(ept, proc,
+ _request, request_size,
+ NULL, 0, timeout);
+}
+EXPORT_SYMBOL(msm_rpc_call);
+
+int msm_rpc_call_reply(struct msm_rpc_endpoint *ept, uint32_t proc,
+ void *_request, int request_size,
+ void *_reply, int reply_size,
+ long timeout)
+{
+ struct rpc_request_hdr *req = _request;
+ struct rpc_reply_hdr *reply;
+ int rc;
+
+ if (request_size < sizeof(*req))
+ return -ETOOSMALL;
+
+ if (ept->dst_pid == 0xffffffff)
+ return -ENOTCONN;
+
+ memset(req, 0, sizeof(*req));
+ req->xid = cpu_to_be32(atomic_add_return(1, &next_xid));
+ req->rpc_vers = cpu_to_be32(2);
+ req->prog = ept->dst_prog;
+ req->vers = ept->dst_vers;
+ req->procedure = cpu_to_be32(proc);
+
+ rc = msm_rpc_write(ept, req, request_size);
+ if (rc < 0)
+ return rc;
+
+ for (;;) {
+ rc = msm_rpc_read(ept, (void*) &reply, -1, timeout);
+ if (rc < 0)
+ return rc;
+ if (rc < (3 * sizeof(uint32_t))) {
+ rc = -EIO;
+ break;
+ }
+ /* we should not get CALL packets -- ignore them */
+ if (reply->type == 0) {
+ kfree(reply);
+ continue;
+ }
+ /* If an earlier call timed out, we could get the (no
+ * longer wanted) reply for it. Ignore replies that
+ * we don't expect
+ */
+ if (reply->xid != req->xid) {
+ kfree(reply);
+ continue;
+ }
+ if (reply->reply_stat != 0) {
+ rc = -EPERM;
+ break;
+ }
+ if (reply->data.acc_hdr.accept_stat != 0) {
+ rc = -EINVAL;
+ break;
+ }
+ if (_reply == NULL) {
+ rc = 0;
+ break;
+ }
+ if (rc > reply_size) {
+ rc = -ENOMEM;
+ } else {
+ memcpy(_reply, reply, rc);
+ }
+ break;
+ }
+ kfree(reply);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpc_call_reply);
+
+
+static inline int ept_packet_available(struct msm_rpc_endpoint *ept)
+{
+ unsigned long flags;
+ int ret;
+ spin_lock_irqsave(&ept->read_q_lock, flags);
+ ret = !list_empty(&ept->read_q);
+ spin_unlock_irqrestore(&ept->read_q_lock, flags);
+ return ret;
+}
+
+int __msm_rpc_read(struct msm_rpc_endpoint *ept,
+ struct rr_fragment **frag_ret,
+ unsigned len, long timeout)
+{
+ struct rr_packet *pkt;
+ struct rpc_request_hdr *rq;
+ struct msm_rpc_reply *reply;
+ DEFINE_WAIT(__wait);
+ unsigned long flags;
+ int rc;
+
+ IO("READ on ept %p\n", ept);
+ spin_lock_irqsave(&ept->restart_lock, flags);
+ if (ept->restart_state != RESTART_NORMAL) {
+ ept->restart_state &= ~RESTART_PEND_NTFY;
+ spin_unlock_irqrestore(&ept->restart_lock, flags);
+ return -ENETRESET;
+ }
+ spin_unlock_irqrestore(&ept->restart_lock, flags);
+
+ if (ept->flags & MSM_RPC_UNINTERRUPTIBLE) {
+ if (timeout < 0) {
+ wait_event(ept->wait_q, ept_packet_available(ept));
+ if (!msm_rpc_clear_netreset(ept))
+ return -ENETRESET;
+ } else {
+ rc = wait_event_timeout(
+ ept->wait_q, ept_packet_available(ept),
+ timeout);
+ if (!msm_rpc_clear_netreset(ept))
+ return -ENETRESET;
+ if (rc == 0)
+ return -ETIMEDOUT;
+ }
+ } else {
+ if (timeout < 0) {
+ rc = wait_event_interruptible(
+ ept->wait_q, ept_packet_available(ept));
+ if (!msm_rpc_clear_netreset(ept))
+ return -ENETRESET;
+ if (rc < 0)
+ return rc;
+ } else {
+ rc = wait_event_interruptible_timeout(
+ ept->wait_q, ept_packet_available(ept),
+ timeout);
+ if (!msm_rpc_clear_netreset(ept))
+ return -ENETRESET;
+ if (rc == 0)
+ return -ETIMEDOUT;
+ }
+ }
+
+ spin_lock_irqsave(&ept->read_q_lock, flags);
+ if (list_empty(&ept->read_q)) {
+ spin_unlock_irqrestore(&ept->read_q_lock, flags);
+ return -EAGAIN;
+ }
+ pkt = list_first_entry(&ept->read_q, struct rr_packet, list);
+ if (pkt->length > len) {
+ spin_unlock_irqrestore(&ept->read_q_lock, flags);
+ return -ETOOSMALL;
+ }
+ list_del(&pkt->list);
+ spin_unlock_irqrestore(&ept->read_q_lock, flags);
+
+ rc = pkt->length;
+
+ *frag_ret = pkt->first;
+ rq = (void*) pkt->first->data;
+ if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) {
+ /* RPC CALL */
+ reply = get_avail_reply(ept);
+ if (!reply)
+ return -ENOMEM;
+ reply->cid = pkt->hdr.src_cid;
+ reply->pid = pkt->hdr.src_pid;
+ reply->xid = rq->xid;
+ reply->prog = rq->prog;
+ reply->vers = rq->vers;
+ set_pend_reply(ept, reply);
+ }
+
+ kfree(pkt);
+
+ IO("READ on ept %p (%d bytes)\n", ept, rc);
+ return rc;
+}
+
+int msm_rpc_is_compatible_version(uint32_t server_version,
+ uint32_t client_version)
+{
+
+ if ((server_version & RPC_VERSION_MODE_MASK) !=
+ (client_version & RPC_VERSION_MODE_MASK))
+ return 0;
+
+ if (server_version & RPC_VERSION_MODE_MASK)
+ return server_version == client_version;
+
+ return ((server_version & RPC_VERSION_MAJOR_MASK) ==
+ (client_version & RPC_VERSION_MAJOR_MASK)) &&
+ ((server_version & RPC_VERSION_MINOR_MASK) >=
+ (client_version & RPC_VERSION_MINOR_MASK));
+}
+EXPORT_SYMBOL(msm_rpc_is_compatible_version);
+
+int msm_rpc_get_compatible_server(uint32_t prog,
+ uint32_t ver,
+ uint32_t *found_vers)
+{
+ struct rr_server *server;
+ unsigned long flags;
+ uint32_t found = -1;
+ if (found_vers == NULL)
+ return 0;
+
+ spin_lock_irqsave(&server_list_lock, flags);
+ list_for_each_entry(server, &server_list, list) {
+ if ((server->prog == prog) &&
+ msm_rpc_is_compatible_version(server->vers, ver)) {
+ *found_vers = server->vers;
+ spin_unlock_irqrestore(&server_list_lock, flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&server_list_lock, flags);
+ return found;
+}
+EXPORT_SYMBOL(msm_rpc_get_compatible_server);
+
+struct msm_rpc_endpoint *msm_rpc_connect_compatible(uint32_t prog,
+ uint32_t vers, unsigned flags)
+{
+ uint32_t found_vers;
+ int ret;
+ ret = msm_rpc_get_compatible_server(prog, vers, &found_vers);
+ if (ret < 0)
+ return ERR_PTR(-EHOSTUNREACH);
+ if (found_vers != vers) {
+ D("RPC Using new version 0x%08x(0x%08x) prog 0x%08x",
+ vers, found_vers, prog);
+ D(" ... Continuing\n");
+ }
+ return msm_rpc_connect(prog, found_vers, flags);
+}
+EXPORT_SYMBOL(msm_rpc_connect_compatible);
+
+struct msm_rpc_endpoint *msm_rpc_connect(uint32_t prog, uint32_t vers, unsigned flags)
+{
+ struct msm_rpc_endpoint *ept;
+ struct rr_server *server;
+
+ server = rpcrouter_lookup_server(prog, vers);
+ if (!server)
+ return ERR_PTR(-EHOSTUNREACH);
+
+ ept = msm_rpc_open();
+ if (IS_ERR(ept))
+ return ept;
+
+ ept->flags = flags;
+ ept->dst_pid = server->pid;
+ ept->dst_cid = server->cid;
+ ept->dst_prog = cpu_to_be32(prog);
+ ept->dst_vers = cpu_to_be32(vers);
+
+ return ept;
+}
+EXPORT_SYMBOL(msm_rpc_connect);
+
+/* TODO: permission check? */
+int msm_rpc_register_server(struct msm_rpc_endpoint *ept,
+ uint32_t prog, uint32_t vers)
+{
+ int rc;
+ union rr_control_msg msg;
+ struct rr_server *server;
+
+ server = rpcrouter_create_server(ept->pid, ept->cid,
+ prog, vers);
+ if (!server)
+ return -ENODEV;
+
+ msg.srv.cmd = RPCROUTER_CTRL_CMD_NEW_SERVER;
+ msg.srv.pid = ept->pid;
+ msg.srv.cid = ept->cid;
+ msg.srv.prog = prog;
+ msg.srv.vers = vers;
+
+ RR("x NEW_SERVER id=%d:%08x prog=%08x:%08x\n",
+ ept->pid, ept->cid, prog, vers);
+
+ rc = rpcrouter_send_control_msg(&msg);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int msm_rpc_clear_netreset(struct msm_rpc_endpoint *ept)
+{
+ unsigned long flags;
+ int rc = 1;
+ RR("RESET RESTART FLAG for EPT:%08x \n", (unsigned int)ept);
+ spin_lock_irqsave(&ept->restart_lock, flags);
+ if (ept->restart_state != RESTART_NORMAL) {
+ ept->restart_state &= ~RESTART_PEND_NTFY;
+ rc = 0;
+ }
+ spin_unlock_irqrestore(&ept->restart_lock, flags);
+ return rc;
+}
+
+/* TODO: permission check -- disallow unreg of somebody else's server */
+int msm_rpc_unregister_server(struct msm_rpc_endpoint *ept,
+ uint32_t prog, uint32_t vers)
+{
+ struct rr_server *server;
+ server = rpcrouter_lookup_server(prog, vers);
+
+ if (!server)
+ return -ENOENT;
+ rpcrouter_destroy_server(server);
+ return 0;
+}
+
+static int msm_rpcrouter_modem_notify(struct notifier_block *this,
+ unsigned long code,
+ void *_cmd)
+{
+ switch (code) {
+ case MODEM_NOTIFIER_START_RESET:
+ NTFY("%s: MODEM_NOTIFIER_START_RESET", __func__);
+ modem_reset_start_cleanup();
+ break;
+ case MODEM_NOTIFIER_END_RESET:
+ NTFY("%s: MODEM_NOTIFIER_END_RESET", __func__);
+ break;
+ default:
+ NTFY("%s: default", __func__);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block msm_rpcrouter_nb = {
+ .notifier_call = msm_rpcrouter_modem_notify,
+};
+
+static int msm_rpcrouter_probe(struct platform_device *pdev)
+{
+ int rc;
+
+ /* Initialize what we need to start processing */
+ INIT_LIST_HEAD(&local_endpoints);
+ INIT_LIST_HEAD(&remote_endpoints);
+
+ init_waitqueue_head(&newserver_wait);
+ init_waitqueue_head(&smd_wait);
+
+ rpcrouter_workqueue = create_singlethread_workqueue("rpcrouter");
+ if (!rpcrouter_workqueue)
+ return -ENOMEM;
+
+ rc = msm_rpcrouter_init_devices();
+ if (rc < 0)
+ goto fail_destroy_workqueue;
+
+ rc = modem_register_notifier(&msm_rpcrouter_nb);
+ if (rc < 0)
+ goto fail_remove_devices;
+
+ /* Open up SMD channel 2 */
+ initialized = 0;
+ rc = smd_open("RPCCALL", &smd_channel, NULL, rpcrouter_smdnotify);
+ if (rc < 0)
+ goto fail_remove_reset_notifier;
+
+ queue_work(rpcrouter_workqueue, &work_read_data);
+ return 0;
+
+ fail_remove_reset_notifier:
+ modem_unregister_notifier(&msm_rpcrouter_nb);
+ fail_remove_devices:
+ msm_rpcrouter_exit_devices();
+ fail_destroy_workqueue:
+ destroy_workqueue(rpcrouter_workqueue);
+ return rc;
+}
+
+static struct platform_driver msm_smd_channel2_driver = {
+ .probe = msm_rpcrouter_probe,
+ .driver = {
+ .name = "RPCCALL",
+ .owner = THIS_MODULE,
+ },
+};
+
+#if defined(CONFIG_DEBUG_FS)
+#define HSIZE 13
+
+struct sym {
+ uint32_t val;
+ char *str;
+ struct hlist_node node;
+};
+
+static struct sym oncrpc_syms[] = {
+ { 0x30000000, "CM" },
+ { 0x30000001, "DB" },
+ { 0x30000002, "SND" },
+ { 0x30000003, "WMS" },
+ { 0x30000004, "PDSM" },
+ { 0x30000005, "MISC_MODEM_APIS" },
+ { 0x30000006, "MISC_APPS_APIS" },
+ { 0x30000007, "JOYST" },
+ { 0x30000008, "VJOY" },
+ { 0x30000009, "JOYSTC" },
+ { 0x3000000a, "ADSPRTOSATOM" },
+ { 0x3000000b, "ADSPRTOSMTOA" },
+ { 0x3000000c, "I2C" },
+ { 0x3000000d, "TIME_REMOTE" },
+ { 0x3000000e, "NV" },
+ { 0x3000000f, "CLKRGM_SEC" },
+ { 0x30000010, "RDEVMAP" },
+ { 0x30000011, "FS_RAPI" },
+ { 0x30000012, "PBMLIB" },
+ { 0x30000013, "AUDMGR" },
+ { 0x30000014, "MVS" },
+ { 0x30000015, "DOG_KEEPALIVE" },
+ { 0x30000016, "GSDI_EXP" },
+ { 0x30000017, "AUTH" },
+ { 0x30000018, "NVRUIMI" },
+ { 0x30000019, "MMGSDILIB" },
+ { 0x3000001a, "CHARGER" },
+ { 0x3000001b, "UIM" },
+ { 0x3000001C, "ONCRPCTEST" },
+ { 0x3000001d, "PDSM_ATL" },
+ { 0x3000001e, "FS_XMOUNT" },
+ { 0x3000001f, "SECUTIL " },
+ { 0x30000020, "MCCMEID" },
+ { 0x30000021, "PM_STROBE_FLASH" },
+ { 0x30000022, "DS707_EXTIF" },
+ { 0x30000023, "SMD BRIDGE_MODEM" },
+ { 0x30000024, "SMD PORT_MGR" },
+ { 0x30000025, "BUS_PERF" },
+ { 0x30000026, "BUS_MON" },
+ { 0x30000027, "MC" },
+ { 0x30000028, "MCCAP" },
+ { 0x30000029, "MCCDMA" },
+ { 0x3000002a, "MCCDS" },
+ { 0x3000002b, "MCCSCH" },
+ { 0x3000002c, "MCCSRID" },
+ { 0x3000002d, "SNM" },
+ { 0x3000002e, "MCCSYOBJ" },
+ { 0x3000002f, "DS707_APIS" },
+ { 0x30000030, "DS_MP_SHIM_APPS_ASYNC" },
+ { 0x30000031, "DSRLP_APIS" },
+ { 0x30000032, "RLP_APIS" },
+ { 0x30000033, "DS_MP_SHIM_MODEM" },
+ { 0x30000034, "DSHDR_APIS" },
+ { 0x30000035, "DSHDR_MDM_APIS" },
+ { 0x30000036, "DS_MP_SHIM_APPS" },
+ { 0x30000037, "HDRMC_APIS" },
+ { 0x30000038, "SMD_BRIDGE_MTOA" },
+ { 0x30000039, "SMD_BRIDGE_ATOM" },
+ { 0x3000003a, "DPMAPP_OTG" },
+ { 0x3000003b, "DIAG" },
+ { 0x3000003c, "GSTK_EXP" },
+ { 0x3000003d, "DSBC_MDM_APIS" },
+ { 0x3000003e, "HDRMRLP_MDM_APIS" },
+ { 0x3000003f, "HDRMRLP_APPS_APIS" },
+ { 0x30000040, "HDRMC_MRLP_APIS" },
+ { 0x30000041, "PDCOMM_APP_API" },
+ { 0x30000042, "DSAT_APIS" },
+ { 0x30000043, "MISC_RF_APIS" },
+ { 0x30000044, "CMIPAPP" },
+ { 0x30000045, "DSMP_UMTS_MODEM_APIS" },
+ { 0x30000046, "DSMP_UMTS_APPS_APIS" },
+ { 0x30000047, "DSUCSDMPSHIM" },
+ { 0x30000048, "TIME_REMOTE_ATOM" },
+ { 0x3000004a, "SD" },
+ { 0x3000004b, "MMOC" },
+ { 0x3000004c, "WLAN_ADP_FTM" },
+ { 0x3000004d, "WLAN_CP_CM" },
+ { 0x3000004e, "FTM_WLAN" },
+ { 0x3000004f, "SDCC_CPRM" },
+ { 0x30000050, "CPRMINTERFACE" },
+ { 0x30000051, "DATA_ON_MODEM_MTOA_APIS" },
+ { 0x30000052, "DATA_ON_APPS_ATOM_APIS" },
+ { 0x30000053, "MISC_MODEM_APIS_NONWINMOB" },
+ { 0x30000054, "MISC_APPS_APIS_NONWINMOB" },
+ { 0x30000055, "PMEM_REMOTE" },
+ { 0x30000056, "TCXOMGR" },
+ { 0x30000057, "DSUCSDAPPIF_APIS" },
+ { 0x30000058, "BT" },
+ { 0x30000059, "PD_COMMS_API" },
+ { 0x3000005a, "PD_COMMS_CLIENT_API" },
+ { 0x3000005b, "PDAPI" },
+ { 0x3000005c, "LSA_SUPL_DSM" },
+ { 0x3000005d, "TIME_REMOTE_MTOA" },
+ { 0x3000005e, "FTM_BT" },
+ { 0X3000005f, "DSUCSDAPPIF_APIS" },
+ { 0X30000060, "PMAPP_GEN" },
+ { 0X30000061, "PM_LIB" },
+ { 0X30000062, "KEYPAD" },
+ { 0X30000063, "HSU_APP_APIS" },
+ { 0X30000064, "HSU_MDM_APIS" },
+ { 0X30000065, "ADIE_ADC_REMOTE_ATOM " },
+ { 0X30000066, "TLMM_REMOTE_ATOM" },
+ { 0X30000067, "UI_CALLCTRL" },
+ { 0X30000068, "UIUTILS" },
+ { 0X30000069, "PRL" },
+ { 0X3000006a, "HW" },
+ { 0X3000006b, "OEM_RAPI" },
+ { 0X3000006c, "WMSPM" },
+ { 0X3000006d, "BTPF" },
+ { 0X3000006e, "CLKRGM_SYNC_EVENT" },
+ { 0X3000006f, "USB_APPS_RPC" },
+ { 0X30000070, "USB_MODEM_RPC" },
+ { 0X30000071, "ADC" },
+ { 0X30000072, "CAMERAREMOTED" },
+ { 0X30000073, "SECAPIREMOTED" },
+ { 0X30000074, "DSATAPI" },
+ { 0X30000075, "CLKCTL_RPC" },
+ { 0X30000076, "BREWAPPCOORD" },
+ { 0X30000077, "ALTENVSHELL" },
+ { 0X30000078, "WLAN_TRP_UTILS" },
+ { 0X30000079, "GPIO_RPC" },
+ { 0X3000007a, "PING_RPC" },
+ { 0X3000007b, "DSC_DCM_API" },
+ { 0X3000007c, "L1_DS" },
+ { 0X3000007d, "QCHATPK_APIS" },
+ { 0X3000007e, "GPS_API" },
+ { 0X3000007f, "OSS_RRCASN_REMOTE" },
+ { 0X30000080, "PMAPP_OTG_REMOTE" },
+ { 0X30000081, "PING_MDM_RPC" },
+ { 0X30000082, "PING_KERNEL_RPC" },
+ { 0X30000083, "TIMETICK" },
+ { 0X30000084, "WM_BTHCI_FTM " },
+ { 0X30000085, "WM_BT_PF" },
+ { 0X30000086, "IPA_IPC_APIS" },
+ { 0X30000087, "UKCC_IPC_APIS" },
+ { 0X30000088, "CMIPSMS " },
+ { 0X30000089, "VBATT_REMOTE" },
+ { 0X3000008a, "MFPAL" },
+ { 0X3000008b, "DSUMTSPDPREG" },
+ { 0X3000fe00, "RESTART_DAEMON NUMBER 0" },
+ { 0X3000fe01, "RESTART_DAEMON NUMBER 1" },
+ { 0X3000feff, "RESTART_DAEMON NUMBER 255" },
+ { 0X3000fffe, "BACKWARDS_COMPATIBILITY_IN_RPC_CLNT_LOOKUP" },
+ { 0X3000ffff, "RPC_ROUTER_SERVER_PROGRAM" },
+};
+
+#define ONCRPC_SYM 0
+
+static struct sym_tbl {
+ struct sym *data;
+ int size;
+ struct hlist_head hlist[HSIZE];
+} tbl[] = {
+ { oncrpc_syms, ARRAY_SIZE(oncrpc_syms) },
+};
+
+#define hash(val) (val % HSIZE)
+
+static void init_syms(void)
+{
+ int i;
+ int j;
+
+ for (i = 0; i < ARRAY_SIZE(tbl); ++i)
+ for (j = 0; j < HSIZE; ++j)
+ INIT_HLIST_HEAD(&tbl[i].hlist[j]);
+
+ for (i = 0; i < ARRAY_SIZE(tbl); ++i)
+ for (j = 0; j < tbl[i].size; ++j) {
+ INIT_HLIST_NODE(&tbl[i].data[j].node);
+ hlist_add_head(&tbl[i].data[j].node,
+ &tbl[i].hlist[hash(tbl[i].data[j].val)]);
+ }
+}
+
+static char *find_sym(uint32_t id, uint32_t val)
+{
+ struct hlist_node *n;
+ struct sym *s;
+
+ hlist_for_each(n, &tbl[id].hlist[hash(val)]) {
+ s = hlist_entry(n, struct sym, node);
+ if (s->val == val)
+ return s->str;
+ }
+
+ return 0;
+}
+
+static int dump_servers(char *buf, int max)
+{
+ int i = 0;
+ unsigned long flags;
+ struct rr_server *svr;
+ char *sym;
+
+ spin_lock_irqsave(&server_list_lock, flags);
+ list_for_each_entry(svr, &server_list, list) {
+ i += scnprintf(buf + i, max - i, "pdev_name: %s\n",
+ svr->pdev_name);
+ i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", svr->pid);
+ i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", svr->cid);
+ i += scnprintf(buf + i, max - i, "prog: 0x%08x", svr->prog);
+ sym = find_sym(ONCRPC_SYM, svr->prog);
+ if (sym)
+ i += scnprintf(buf + i, max - i, " (%s)\n", sym);
+ else
+ i += scnprintf(buf + i, max - i, "\n");
+ i += scnprintf(buf + i, max - i, "vers: 0x%08x\n", svr->vers);
+ i += scnprintf(buf + i, max - i, "\n");
+ }
+ spin_unlock_irqrestore(&server_list_lock, flags);
+
+ return i;
+}
+
+static int dump_remote_endpoints(char *buf, int max)
+{
+ int i = 0;
+ unsigned long flags;
+ struct rr_remote_endpoint *ept;
+
+ spin_lock_irqsave(&remote_endpoints_lock, flags);
+ list_for_each_entry(ept, &remote_endpoints, list) {
+ i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", ept->pid);
+ i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", ept->cid);
+ i += scnprintf(buf + i, max - i, "tx_quota_cntr: %i\n",
+ ept->tx_quota_cntr);
+ i += scnprintf(buf + i, max - i, "quota_restart_state: %i\n",
+ ept->quota_restart_state);
+ i += scnprintf(buf + i, max - i, "\n");
+ }
+ spin_unlock_irqrestore(&remote_endpoints_lock, flags);
+
+ return i;
+}
+
+static int dump_msm_rpc_endpoint(char *buf, int max)
+{
+ int i = 0;
+ unsigned long flags;
+ struct msm_rpc_reply *reply;
+ struct msm_rpc_endpoint *ept;
+ struct rr_packet *pkt;
+ char *sym;
+
+ spin_lock_irqsave(&local_endpoints_lock, flags);
+ list_for_each_entry(ept, &local_endpoints, list) {
+ i += scnprintf(buf + i, max - i, "pid: 0x%08x\n", ept->pid);
+ i += scnprintf(buf + i, max - i, "cid: 0x%08x\n", ept->cid);
+ i += scnprintf(buf + i, max - i, "dst_pid: 0x%08x\n",
+ ept->dst_pid);
+ i += scnprintf(buf + i, max - i, "dst_cid: 0x%08x\n",
+ ept->dst_cid);
+ i += scnprintf(buf + i, max - i, "dst_prog: 0x%08x",
+ be32_to_cpu(ept->dst_prog));
+ sym = find_sym(ONCRPC_SYM, be32_to_cpu(ept->dst_prog));
+ if (sym)
+ i += scnprintf(buf + i, max - i, " (%s)\n", sym);
+ else
+ i += scnprintf(buf + i, max - i, "\n");
+ i += scnprintf(buf + i, max - i, "dst_vers: 0x%08x\n",
+ be32_to_cpu(ept->dst_vers));
+ i += scnprintf(buf + i, max - i, "reply_cnt: %i\n",
+ ept->reply_cnt);
+ i += scnprintf(buf + i, max - i, "restart_state: %i\n",
+ ept->restart_state);
+
+ i += scnprintf(buf + i, max - i, "outstanding xids:\n");
+ spin_lock(&ept->reply_q_lock);
+ list_for_each_entry(reply, &ept->reply_pend_q, list)
+ i += scnprintf(buf + i, max - i, " xid = %u\n",
+ ntohl(reply->xid));
+ spin_unlock(&ept->reply_q_lock);
+
+ i += scnprintf(buf + i, max - i, "complete unread packets:\n");
+ spin_lock(&ept->read_q_lock);
+ list_for_each_entry(pkt, &ept->read_q, list) {
+ i += scnprintf(buf + i, max - i, " mid = %i\n",
+ pkt->mid);
+ i += scnprintf(buf + i, max - i, " length = %i\n",
+ pkt->length);
+ }
+ spin_unlock(&ept->read_q_lock);
+ i += scnprintf(buf + i, max - i, "\n");
+ }
+ spin_unlock_irqrestore(&local_endpoints_lock, flags);
+
+ return i;
+}
+
+#define DEBUG_BUFMAX 4096
+static char debug_buffer[DEBUG_BUFMAX];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int (*fill)(char *buf, int max) = file->private_data;
+ int bsize = fill(debug_buffer, DEBUG_BUFMAX);
+ return simple_read_from_buffer(buf, count, ppos, debug_buffer, bsize);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ int (*fill)(char *buf, int max))
+{
+ debugfs_create_file(name, mode, dent, fill, &debug_ops);
+}
+
+static void debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smd_rpcrouter", 0);
+ if (IS_ERR(dent))
+ return;
+
+ debug_create("dump_msm_rpc_endpoints", 0444, dent,
+ dump_msm_rpc_endpoint);
+ debug_create("dump_remote_endpoints", 0444, dent,
+ dump_remote_endpoints);
+ debug_create("dump_servers", 0444, dent,
+ dump_servers);
+
+ init_syms();
+}
+
+#else
+static void debugfs_init(void) {}
+#endif
+
+
+static int __init rpcrouter_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&msm_smd_channel2_driver);
+ if (ret)
+ return ret;
+
+ debugfs_init();
+
+ return ret;
+}
+
+module_init(rpcrouter_init);
+MODULE_DESCRIPTION("MSM RPC Router");
+MODULE_AUTHOR("San Mehat <san@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-msm/smd_rpcrouter.h b/arch/arm/mach-msm/smd_rpcrouter.h
new file mode 100644
index 000000000000..fa5bc84267f7
--- /dev/null
+++ b/arch/arm/mach-msm/smd_rpcrouter.h
@@ -0,0 +1,210 @@
+/** arch/arm/mach-msm/smd_rpcrouter.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMD_RPCROUTER_H
+#define _ARCH_ARM_MACH_MSM_SMD_RPCROUTER_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+
+#include <mach/msm_smd.h>
+#include <mach/msm_rpcrouter.h>
+
+/* definitions for the R2R wire protcol */
+
+#define RPCROUTER_VERSION 1
+#define RPCROUTER_PROCESSORS_MAX 4
+#define RPCROUTER_MSGSIZE_MAX 512
+#define RPCROUTER_PEND_REPLIES_MAX 32
+
+#define RPCROUTER_CLIENT_BCAST_ID 0xffffffff
+#define RPCROUTER_ROUTER_ADDRESS 0xfffffffe
+
+#define RPCROUTER_PID_LOCAL 1
+#define RPCROUTER_PID_REMOTE 0
+
+#define RPCROUTER_CTRL_CMD_DATA 1
+#define RPCROUTER_CTRL_CMD_HELLO 2
+#define RPCROUTER_CTRL_CMD_BYE 3
+#define RPCROUTER_CTRL_CMD_NEW_SERVER 4
+#define RPCROUTER_CTRL_CMD_REMOVE_SERVER 5
+#define RPCROUTER_CTRL_CMD_REMOVE_CLIENT 6
+#define RPCROUTER_CTRL_CMD_RESUME_TX 7
+#define RPCROUTER_CTRL_CMD_EXIT 8
+#define RPCROUTER_CTRL_CMD_PING 9
+
+#define RPCROUTER_DEFAULT_RX_QUOTA 5
+
+union rr_control_msg {
+ uint32_t cmd;
+ struct {
+ uint32_t cmd;
+ uint32_t prog;
+ uint32_t vers;
+ uint32_t pid;
+ uint32_t cid;
+ } srv;
+ struct {
+ uint32_t cmd;
+ uint32_t pid;
+ uint32_t cid;
+ } cli;
+};
+
+struct rr_header {
+ uint32_t version;
+ uint32_t type;
+ uint32_t src_pid;
+ uint32_t src_cid;
+ uint32_t confirm_rx;
+ uint32_t size;
+ uint32_t dst_pid;
+ uint32_t dst_cid;
+};
+
+/* internals */
+
+#define RPCROUTER_MAX_REMOTE_SERVERS 100
+
+struct rr_fragment {
+ unsigned char data[RPCROUTER_MSGSIZE_MAX];
+ uint32_t length;
+ struct rr_fragment *next;
+};
+
+struct rr_packet {
+ struct list_head list;
+ struct rr_fragment *first;
+ struct rr_fragment *last;
+ struct rr_header hdr;
+ uint32_t mid;
+ uint32_t length;
+};
+
+#define PACMARK_LAST(n) ((n) & 0x80000000)
+#define PACMARK_MID(n) (((n) >> 16) & 0xFF)
+#define PACMARK_LEN(n) ((n) & 0xFFFF)
+
+static inline uint32_t PACMARK(uint32_t len, uint32_t mid, uint32_t first,
+ uint32_t last)
+{
+ return (len & 0xFFFF) |
+ ((mid & 0xFF) << 16) |
+ ((!!first) << 30) |
+ ((!!last) << 31);
+}
+
+struct rr_server {
+ struct list_head list;
+
+ uint32_t pid;
+ uint32_t cid;
+ uint32_t prog;
+ uint32_t vers;
+
+ dev_t device_number;
+ struct cdev cdev;
+ struct device *device;
+ struct rpcsvr_platform_device p_device;
+ char pdev_name[32];
+};
+
+struct rr_remote_endpoint {
+ uint32_t pid;
+ uint32_t cid;
+
+ int tx_quota_cntr;
+ int quota_restart_state;
+ spinlock_t quota_lock;
+ wait_queue_head_t quota_wait;
+
+ struct list_head list;
+};
+
+struct msm_rpc_reply {
+ struct list_head list;
+ uint32_t pid;
+ uint32_t cid;
+ uint32_t prog; /* be32 */
+ uint32_t vers; /* be32 */
+ uint32_t xid; /* be32 */
+};
+
+struct msm_rpc_endpoint {
+ struct list_head list;
+
+ /* incomplete packets waiting for assembly */
+ struct list_head incomplete;
+ spinlock_t incomplete_lock;
+
+ /* complete packets waiting to be read */
+ struct list_head read_q;
+ spinlock_t read_q_lock;
+ wait_queue_head_t wait_q;
+ unsigned flags;
+
+ /* restart handling */
+ int restart_state;
+ spinlock_t restart_lock;
+ wait_queue_head_t restart_wait;
+
+ /* endpoint address */
+ uint32_t pid;
+ uint32_t cid;
+
+ /* bound remote address
+ * if not connected (dst_pid == 0xffffffff) RPC_CALL writes fail
+ * RPC_CALLs must be to the prog/vers below or they will fail
+ */
+ uint32_t dst_pid;
+ uint32_t dst_cid;
+ uint32_t dst_prog; /* be32 */
+ uint32_t dst_vers; /* be32 */
+
+ /* reply queue for inbound messages */
+ struct list_head reply_pend_q;
+ struct list_head reply_avail_q;
+ spinlock_t reply_q_lock;
+ uint32_t reply_cnt;
+
+ /* device node if this endpoint is accessed via userspace */
+ dev_t dev;
+};
+
+/* shared between smd_rpcrouter*.c */
+
+int __msm_rpc_read(struct msm_rpc_endpoint *ept,
+ struct rr_fragment **frag,
+ unsigned len, long timeout);
+
+struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev);
+int msm_rpcrouter_destroy_local_endpoint(struct msm_rpc_endpoint *ept);
+
+int msm_rpcrouter_create_server_cdev(struct rr_server *server);
+int msm_rpcrouter_create_server_pdev(struct rr_server *server);
+
+int msm_rpcrouter_init_devices(void);
+void msm_rpcrouter_exit_devices(void);
+
+void get_requesting_client(struct msm_rpc_endpoint *ept, uint32_t xid,
+ struct msm_rpc_client_info *clnt_info);
+
+extern dev_t msm_rpcrouter_devno;
+extern struct class *msm_rpcrouter_class;
+#endif
diff --git a/arch/arm/mach-msm/smd_rpcrouter_clients.c b/arch/arm/mach-msm/smd_rpcrouter_clients.c
new file mode 100644
index 000000000000..5baf79a7ae4b
--- /dev/null
+++ b/arch/arm/mach-msm/smd_rpcrouter_clients.c
@@ -0,0 +1,607 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * SMD RPCROUTER CLIENTS module.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+
+#include <mach/msm_rpcrouter.h>
+#include "smd_rpcrouter.h"
+
+struct msm_rpc_client_cb_item {
+ struct list_head list;
+
+ void *buf;
+ int size;
+};
+
+struct msm_rpc_cb_table_item {
+ struct list_head list;
+
+ uint32_t cb_id;
+ void *cb_func;
+};
+
+static int rpc_clients_cb_thread(void *data)
+{
+ struct msm_rpc_client_cb_item *cb_item;
+ struct msm_rpc_client *client;
+ struct rpc_request_hdr *req;
+
+ client = data;
+ for (;;) {
+ wait_event(client->cb_wait, client->cb_avail);
+ if (client->exit_flag)
+ break;
+
+ client->cb_avail = 0;
+ mutex_lock(&client->cb_item_list_lock);
+ while (!list_empty(&client->cb_item_list)) {
+ cb_item = list_first_entry(
+ &client->cb_item_list,
+ struct msm_rpc_client_cb_item,
+ list);
+ list_del(&cb_item->list);
+ mutex_unlock(&client->cb_item_list_lock);
+ req = (struct rpc_request_hdr *)cb_item->buf;
+
+ if (be32_to_cpu(req->type) != 0)
+ goto bad_rpc;
+ if (be32_to_cpu(req->rpc_vers) != 2)
+ goto bad_rpc;
+ if (be32_to_cpu(req->prog) !=
+ (client->prog | 0x01000000))
+ goto bad_rpc;
+
+ client->cb_func(client,
+ cb_item->buf, cb_item->size);
+ bad_rpc:
+ kfree(cb_item->buf);
+ kfree(cb_item);
+ mutex_lock(&client->cb_item_list_lock);
+ }
+ mutex_unlock(&client->cb_item_list_lock);
+ }
+ complete_and_exit(&client->cb_complete, 0);
+}
+
+static int rpc_clients_thread(void *data)
+{
+ void *buffer;
+ uint32_t type;
+ struct msm_rpc_client *client;
+ int rc = 0;
+ struct msm_rpc_client_cb_item *cb_item;
+ struct rpc_request_hdr *req;
+
+ client = data;
+ for (;;) {
+ rc = msm_rpc_read(client->ept, &buffer, -1, HZ);
+ if (client->exit_flag)
+ break;
+ if (rc < ((int)(sizeof(uint32_t) * 2)))
+ continue;
+
+ type = be32_to_cpu(*((uint32_t *)buffer + 1));
+ if (type == 1) {
+ client->buf = buffer;
+ client->read_avail = 1;
+ wake_up(&client->reply_wait);
+ } else if (type == 0) {
+ cb_item = kmalloc(sizeof(*cb_item), GFP_KERNEL);
+ if (!cb_item) {
+ pr_err("%s: no memory for cb item\n",
+ __func__);
+ continue;
+ }
+
+ if (client->cb_thread == NULL) {
+ req = (struct rpc_request_hdr *)buffer;
+
+ if ((be32_to_cpu(req->rpc_vers) == 2) &&
+ (be32_to_cpu(req->prog) ==
+ (client->prog | 0x01000000)))
+ client->cb_func(client, buffer, rc);
+ kfree(buffer);
+ } else {
+ INIT_LIST_HEAD(&cb_item->list);
+ cb_item->buf = buffer;
+ cb_item->size = rc;
+ mutex_lock(&client->cb_item_list_lock);
+ list_add_tail(&cb_item->list,
+ &client->cb_item_list);
+ mutex_unlock(&client->cb_item_list_lock);
+ client->cb_avail = 1;
+ wake_up(&client->cb_wait);
+ }
+ }
+ }
+ complete_and_exit(&client->complete, 0);
+}
+
+static struct msm_rpc_client *msm_rpc_create_client(void)
+{
+ struct msm_rpc_client *client;
+
+ client = kmalloc(sizeof(struct msm_rpc_client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ client->req = kmalloc(MSM_RPC_MSGSIZE_MAX, GFP_KERNEL);
+ if (!client->req) {
+ kfree(client);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ client->reply = kmalloc(MSM_RPC_MSGSIZE_MAX, GFP_KERNEL);
+ if (!client->reply) {
+ kfree(client->req);
+ kfree(client);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init_waitqueue_head(&client->reply_wait);
+ mutex_init(&client->req_lock);
+ mutex_init(&client->reply_lock);
+ client->buf = NULL;
+ client->read_avail = 0;
+ client->cb_buf = NULL;
+ client->cb_size = 0;
+ client->exit_flag = 0;
+ init_completion(&client->complete);
+ init_completion(&client->cb_complete);
+ INIT_LIST_HEAD(&client->cb_item_list);
+ mutex_init(&client->cb_item_list_lock);
+ client->cb_avail = 0;
+ init_waitqueue_head(&client->cb_wait);
+ INIT_LIST_HEAD(&client->cb_list);
+ mutex_init(&client->cb_list_lock);
+ atomic_set(&client->next_cb_id, 1);
+
+ return client;
+}
+
+void msm_rpc_remove_all_cb_func(struct msm_rpc_client *client)
+{
+ struct msm_rpc_cb_table_item *cb_item, *tmp_cb_item;
+
+ mutex_lock(&client->cb_list_lock);
+ list_for_each_entry_safe(cb_item, tmp_cb_item,
+ &client->cb_list, list) {
+ list_del(&cb_item->list);
+ kfree(cb_item);
+ }
+ mutex_unlock(&client->cb_list_lock);
+}
+
+/*
+ * Interface to be used to register the client.
+ *
+ * name: string representing the client
+ *
+ * prog: program number of the client
+ *
+ * ver: version number of the client
+ *
+ * create_cb_thread: if set calls the callback function from a seprate thread
+ * which helps the client requests to be processed without
+ * getting loaded by callback handling.
+ *
+ * cb_func: function to be called if callback request is received.
+ * unmarshaling should be handled by the user in callback function
+ *
+ * Return Value:
+ * Pointer to initialized client data sturcture
+ * Or, the error code if registration fails.
+ *
+ */
+struct msm_rpc_client *msm_rpc_register_client(
+ const char *name,
+ uint32_t prog, uint32_t ver,
+ uint32_t create_cb_thread,
+ int (*cb_func)(struct msm_rpc_client *, void *, int))
+{
+ struct msm_rpc_client *client;
+ struct msm_rpc_endpoint *ept;
+ int rc;
+
+ client = msm_rpc_create_client();
+ if (IS_ERR(client))
+ return client;
+
+ ept = msm_rpc_connect_compatible(prog, ver, MSM_RPC_UNINTERRUPTIBLE);
+ if (IS_ERR(ept)) {
+ kfree(client);
+ return (struct msm_rpc_client *)ept;
+ }
+
+ client->prog = prog;
+ client->ver = ver;
+ client->ept = ept;
+ client->cb_func = cb_func;
+
+ /* start the read thread */
+ client->read_thread = kthread_run(rpc_clients_thread, client,
+ "k%sclntd", name);
+ if (IS_ERR(client->read_thread)) {
+ rc = PTR_ERR(client->read_thread);
+ msm_rpc_close(client->ept);
+ kfree(client);
+ return ERR_PTR(rc);
+ }
+
+ if (!create_cb_thread || (cb_func == NULL)) {
+ client->cb_thread = NULL;
+ return client;
+ }
+
+ /* start the callback thread */
+ client->cb_thread = kthread_run(rpc_clients_cb_thread, client,
+ "k%sclntcbd", name);
+ if (IS_ERR(client->cb_thread)) {
+ rc = PTR_ERR(client->cb_thread);
+ client->exit_flag = 1;
+ wait_for_completion(&client->complete);
+ msm_rpc_close(client->ept);
+ kfree(client);
+ return ERR_PTR(rc);
+ }
+
+ return client;
+}
+EXPORT_SYMBOL(msm_rpc_register_client);
+
+/*
+ * Interface to be used to unregister the client
+ * No client operations should be done once the unregister function
+ * is called.
+ *
+ * client: pointer to client data structure.
+ *
+ * Return Value:
+ * Always returns 0 (success).
+ */
+int msm_rpc_unregister_client(struct msm_rpc_client *client)
+{
+ pr_info("%s: stopping client...\n", __func__);
+ client->exit_flag = 1;
+ if (client->cb_thread) {
+ client->cb_avail = 1;
+ wake_up(&client->cb_wait);
+ wait_for_completion(&client->cb_complete);
+ }
+
+ wait_for_completion(&client->complete);
+
+ msm_rpc_close(client->ept);
+ msm_rpc_remove_all_cb_func(client);
+ kfree(client->req);
+ kfree(client->reply);
+ kfree(client);
+ return 0;
+}
+EXPORT_SYMBOL(msm_rpc_unregister_client);
+
+/*
+ * Interface to be used to send a client request.
+ * If the request takes any arguments or expects any return, the user
+ * should handle it in 'arg_func' and 'ret_func' respectively.
+ * Marshaling and Unmarshaling should be handled by the user in argument
+ * and return functions.
+ *
+ * client: pointer to client data sturcture
+ *
+ * proc: procedure being requested
+ *
+ * arg_func: argument function pointer. 'buf' is where arguments needs to
+ * be filled. 'data' is arg_data.
+ *
+ * ret_func: return function pointer. 'buf' is where returned data should
+ * be read from. 'data' is ret_data.
+ *
+ * arg_data: passed as an input parameter to argument function.
+ *
+ * ret_data: passed as an input parameter to return function.
+ *
+ * timeout: timeout for reply wait in jiffies. If negative timeout is
+ * specified a default timeout of 10s is used.
+ *
+ * Return Value:
+ * 0 on success, otherwise an error code is returned.
+ */
+int msm_rpc_client_req(struct msm_rpc_client *client, uint32_t proc,
+ int (*arg_func)(struct msm_rpc_client *client,
+ void *buf, void *data),
+ void *arg_data,
+ int (*ret_func)(struct msm_rpc_client *client,
+ void *buf, void *data),
+ void *ret_data, long timeout)
+{
+ int size = 0;
+ struct rpc_reply_hdr *rpc_rsp;
+ int rc = 0;
+
+ mutex_lock(&client->req_lock);
+
+ msm_rpc_setup_req((struct rpc_request_hdr *)client->req, client->prog,
+ client->ver, proc);
+ size = sizeof(struct rpc_request_hdr);
+
+ if (arg_func) {
+ rc = arg_func(client, (void *)((struct rpc_request_hdr *)
+ client->req + 1), arg_data);
+ if (rc < 0)
+ goto release_locks;
+ else
+ size += rc;
+ }
+
+ rc = msm_rpc_write(client->ept, client->req, size);
+ if (rc < 0) {
+ pr_err("%s: couldn't send RPC request:%d\n", __func__, rc);
+ goto release_locks;
+ } else
+ rc = 0;
+
+ if (timeout < 0)
+ timeout = msecs_to_jiffies(10000);
+
+ rc = wait_event_timeout(client->reply_wait,
+ client->read_avail, timeout);
+ if (rc == 0) {
+ rc = -ETIMEDOUT;
+ goto release_locks;
+ } else
+ rc = 0;
+
+ client->read_avail = 0;
+
+ rpc_rsp = (struct rpc_reply_hdr *)client->buf;
+ if (be32_to_cpu(rpc_rsp->reply_stat) != RPCMSG_REPLYSTAT_ACCEPTED) {
+ pr_err("%s: RPC call was denied! %d\n", __func__,
+ be32_to_cpu(rpc_rsp->reply_stat));
+ rc = -EPERM;
+ goto free_and_release;
+ }
+
+ if (be32_to_cpu(rpc_rsp->data.acc_hdr.accept_stat) !=
+ RPC_ACCEPTSTAT_SUCCESS) {
+ pr_err("%s: RPC call was not successful (%d)\n", __func__,
+ be32_to_cpu(rpc_rsp->data.acc_hdr.accept_stat));
+ rc = -EINVAL;
+ goto free_and_release;
+ }
+
+ if (ret_func)
+ rc = ret_func(client, (void *)(rpc_rsp + 1), ret_data);
+
+ free_and_release:
+ kfree(client->buf);
+ release_locks:
+ mutex_unlock(&client->req_lock);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpc_client_req);
+
+/*
+ * Interface to be used to start accepted reply message required in
+ * callback handling. Returns the buffer pointer to attach any
+ * payload. Should call msm_rpc_send_accepted_reply to complete
+ * sending reply. Marshaling should be handled by user for the payload.
+ *
+ * client: pointer to client data structure
+ *
+ * xid: transaction id. Has to be same as the one in callback request.
+ *
+ * accept_status: acceptance status
+ *
+ * Return Value:
+ * pointer to buffer to attach the payload.
+ */
+void *msm_rpc_start_accepted_reply(struct msm_rpc_client *client,
+ uint32_t xid, uint32_t accept_status)
+{
+ struct rpc_reply_hdr *reply;
+
+ mutex_lock(&client->reply_lock);
+
+ reply = (struct rpc_reply_hdr *)client->reply;
+
+ reply->xid = cpu_to_be32(xid);
+ reply->type = cpu_to_be32(1); /* reply */
+ reply->reply_stat = cpu_to_be32(RPCMSG_REPLYSTAT_ACCEPTED);
+
+ reply->data.acc_hdr.accept_stat = cpu_to_be32(accept_status);
+ reply->data.acc_hdr.verf_flavor = 0;
+ reply->data.acc_hdr.verf_length = 0;
+
+ return reply + 1;
+}
+EXPORT_SYMBOL(msm_rpc_start_accepted_reply);
+
+/*
+ * Interface to be used to send accepted reply required in callback handling.
+ * msm_rpc_start_accepted_reply should have been called before.
+ * Marshaling should be handled by user for the payload.
+ *
+ * client: pointer to client data structure
+ *
+ * size: additional payload size
+ *
+ * Return Value:
+ * 0 on success, otherwise returns an error code.
+ */
+int msm_rpc_send_accepted_reply(struct msm_rpc_client *client, uint32_t size)
+{
+ int rc = 0;
+
+ size += sizeof(struct rpc_reply_hdr);
+ rc = msm_rpc_write(client->ept, client->reply, size);
+ if (rc > 0)
+ rc = 0;
+
+ mutex_unlock(&client->reply_lock);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpc_send_accepted_reply);
+
+/*
+ * Interface to be used to add a callback function.
+ * If the call back function is already in client's 'cb_id - cb_func'
+ * table, then that cb_id is returned. otherwise, new entry
+ * is added to the above table and corresponding cb_id is returned.
+ *
+ * client: pointer to client data structure
+ *
+ * cb_func: callback function
+ *
+ * Return Value:
+ * callback ID on success, otherwise returns an error code.
+ * If cb_func is NULL, the callback Id returned is 0xffffffff.
+ * This tells the other processor that no callback is reqested.
+ */
+int msm_rpc_add_cb_func(struct msm_rpc_client *client, void *cb_func)
+{
+ struct msm_rpc_cb_table_item *cb_item;
+
+ if (cb_func == NULL)
+ return MSM_RPC_CLIENT_NULL_CB_ID;
+
+ mutex_lock(&client->cb_list_lock);
+ list_for_each_entry(cb_item, &client->cb_list, list) {
+ if (cb_item->cb_func == cb_func) {
+ mutex_unlock(&client->cb_list_lock);
+ return cb_item->cb_id;
+ }
+ }
+ mutex_unlock(&client->cb_list_lock);
+
+ cb_item = kmalloc(sizeof(struct msm_rpc_cb_table_item), GFP_KERNEL);
+ if (!cb_item)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cb_item->list);
+ cb_item->cb_id = atomic_add_return(1, &client->next_cb_id);
+ cb_item->cb_func = cb_func;
+
+ mutex_lock(&client->cb_list_lock);
+ list_add_tail(&cb_item->list, &client->cb_list);
+ mutex_unlock(&client->cb_list_lock);
+
+ return cb_item->cb_id;
+}
+EXPORT_SYMBOL(msm_rpc_add_cb_func);
+
+/*
+ * Interface to be used to get a callback function from a callback ID.
+ * If no entry is found, NULL is returned.
+ *
+ * client: pointer to client data structure
+ *
+ * cb_id: callback ID
+ *
+ * Return Value:
+ * callback function pointer if entry with given cb_id is found,
+ * otherwise returns NULL.
+ */
+void *msm_rpc_get_cb_func(struct msm_rpc_client *client, uint32_t cb_id)
+{
+ struct msm_rpc_cb_table_item *cb_item;
+
+ mutex_lock(&client->cb_list_lock);
+ list_for_each_entry(cb_item, &client->cb_list, list) {
+ if (cb_item->cb_id == cb_id) {
+ mutex_unlock(&client->cb_list_lock);
+ return cb_item->cb_func;
+ }
+ }
+ mutex_unlock(&client->cb_list_lock);
+ return NULL;
+}
+EXPORT_SYMBOL(msm_rpc_get_cb_func);
+
+/*
+ * Interface to be used to remove a callback function.
+ *
+ * client: pointer to client data structure
+ *
+ * cb_func: callback function
+ *
+ */
+void msm_rpc_remove_cb_func(struct msm_rpc_client *client, void *cb_func)
+{
+ struct msm_rpc_cb_table_item *cb_item, *tmp_cb_item;
+
+ if (cb_func == NULL)
+ return;
+
+ mutex_lock(&client->cb_list_lock);
+ list_for_each_entry_safe(cb_item, tmp_cb_item,
+ &client->cb_list, list) {
+ if (cb_item->cb_func == cb_func) {
+ list_del(&cb_item->list);
+ kfree(cb_item);
+ mutex_unlock(&client->cb_list_lock);
+ return;
+ }
+ }
+ mutex_unlock(&client->cb_list_lock);
+}
+EXPORT_SYMBOL(msm_rpc_remove_cb_func);
diff --git a/arch/arm/mach-msm/smd_rpcrouter_device.c b/arch/arm/mach-msm/smd_rpcrouter_device.c
new file mode 100644
index 000000000000..b57746cbf392
--- /dev/null
+++ b/arch/arm/mach-msm/smd_rpcrouter_device.c
@@ -0,0 +1,380 @@
+/* arch/arm/mach-msm/smd_rpcrouter_device.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
+ * Author: San Mehat <san@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/cdev.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+
+#include "smd_rpcrouter.h"
+
+#define SAFETY_MEM_SIZE 65536
+
+/* Next minor # available for a remote server */
+static int next_minor = 1;
+
+struct class *msm_rpcrouter_class;
+dev_t msm_rpcrouter_devno;
+
+static struct cdev rpcrouter_cdev;
+static struct device *rpcrouter_device;
+
+static int rpcrouter_open(struct inode *inode, struct file *filp)
+{
+ int rc;
+ struct msm_rpc_endpoint *ept;
+
+ rc = nonseekable_open(inode, filp);
+ if (rc < 0)
+ return rc;
+
+ ept = msm_rpcrouter_create_local_endpoint(inode->i_rdev);
+ if (!ept)
+ return -ENOMEM;
+
+ filp->private_data = ept;
+ return 0;
+}
+
+static int rpcrouter_release(struct inode *inode, struct file *filp)
+{
+ struct msm_rpc_endpoint *ept;
+ static unsigned int rpcrouter_release_cnt;
+
+ ept = (struct msm_rpc_endpoint *) filp->private_data;
+
+ /* A user program with many files open when ends abruptly,
+ * will cause a flood of REMOVE_CLIENT messages to the
+ * remote processor. This will cause remote processors
+ * internal queue to overflow. Inserting a sleep here
+ * regularly is the effecient option.
+ */
+ if (rpcrouter_release_cnt++ % 2)
+ msleep(1);
+
+ return msm_rpcrouter_destroy_local_endpoint(ept);
+}
+
+static ssize_t rpcrouter_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct msm_rpc_endpoint *ept;
+ struct rr_fragment *frag, *next;
+ int rc;
+
+ ept = (struct msm_rpc_endpoint *) filp->private_data;
+
+ rc = __msm_rpc_read(ept, &frag, count, -1);
+ if (rc < 0)
+ return rc;
+
+ count = rc;
+
+ while (frag != NULL) {
+ if (copy_to_user(buf, frag->data, frag->length)) {
+ printk(KERN_ERR
+ "rpcrouter: could not copy all read data to user!\n");
+ rc = -EFAULT;
+ }
+ buf += frag->length;
+ next = frag->next;
+ kfree(frag);
+ frag = next;
+ }
+
+ return rc;
+}
+
+static ssize_t rpcrouter_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct msm_rpc_endpoint *ept;
+ int rc = 0;
+ void *k_buffer;
+
+ ept = (struct msm_rpc_endpoint *) filp->private_data;
+
+ /* A check for safety, this seems non-standard */
+ if (count > SAFETY_MEM_SIZE)
+ return -EINVAL;
+
+ k_buffer = kmalloc(count, GFP_KERNEL);
+ if (!k_buffer)
+ return -ENOMEM;
+
+ if (copy_from_user(k_buffer, buf, count)) {
+ rc = -EFAULT;
+ goto write_out_free;
+ }
+
+ rc = msm_rpc_write(ept, k_buffer, count);
+ if (rc < 0)
+ goto write_out_free;
+
+ rc = count;
+write_out_free:
+ kfree(k_buffer);
+ return rc;
+}
+
+static unsigned int rpcrouter_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct msm_rpc_endpoint *ept;
+ unsigned mask = 0;
+ ept = (struct msm_rpc_endpoint *) filp->private_data;
+
+ /* If there's data already in the read queue, return POLLIN.
+ * Else, wait for the requested amount of time, and check again.
+ */
+
+ if (!list_empty(&ept->read_q))
+ mask |= POLLIN;
+ if (ept->restart_state != 0)
+ mask |= POLLERR;
+
+ if (!mask) {
+ poll_wait(filp, &ept->wait_q, wait);
+ if (!list_empty(&ept->read_q))
+ mask |= POLLIN;
+ if (ept->restart_state != 0)
+ mask |= POLLERR;
+ }
+
+ return mask;
+}
+
+static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct msm_rpc_endpoint *ept;
+ struct rpcrouter_ioctl_server_args server_args;
+ int rc = 0;
+ uint32_t n;
+
+ ept = (struct msm_rpc_endpoint *) filp->private_data;
+ switch (cmd) {
+
+ case RPC_ROUTER_IOCTL_GET_VERSION:
+ n = RPC_ROUTER_VERSION_V1;
+ rc = put_user(n, (unsigned int *) arg);
+ break;
+
+ case RPC_ROUTER_IOCTL_GET_MTU:
+ /* the pacmark word reduces the actual payload
+ * possible per message
+ */
+ n = RPCROUTER_MSGSIZE_MAX - sizeof(uint32_t);
+ rc = put_user(n, (unsigned int *) arg);
+ break;
+
+ case RPC_ROUTER_IOCTL_REGISTER_SERVER:
+ rc = copy_from_user(&server_args, (void *) arg,
+ sizeof(server_args));
+ if (rc < 0)
+ break;
+ msm_rpc_register_server(ept,
+ server_args.prog,
+ server_args.vers);
+ break;
+
+ case RPC_ROUTER_IOCTL_UNREGISTER_SERVER:
+ rc = copy_from_user(&server_args, (void *) arg,
+ sizeof(server_args));
+ if (rc < 0)
+ break;
+
+ msm_rpc_unregister_server(ept,
+ server_args.prog,
+ server_args.vers);
+ break;
+
+ case RPC_ROUTER_IOCTL_CLEAR_NETRESET:
+ msm_rpc_clear_netreset(ept);
+ break;
+
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ return rc;
+}
+
+static struct file_operations rpcrouter_server_fops = {
+ .owner = THIS_MODULE,
+ .open = rpcrouter_open,
+ .release = rpcrouter_release,
+ .read = rpcrouter_read,
+ .write = rpcrouter_write,
+ .poll = rpcrouter_poll,
+ .unlocked_ioctl = rpcrouter_ioctl,
+};
+
+static struct file_operations rpcrouter_router_fops = {
+ .owner = THIS_MODULE,
+ .open = rpcrouter_open,
+ .release = rpcrouter_release,
+ .read = rpcrouter_read,
+ .write = rpcrouter_write,
+ .poll = rpcrouter_poll,
+ .unlocked_ioctl = rpcrouter_ioctl,
+};
+
+int msm_rpcrouter_create_server_cdev(struct rr_server *server)
+{
+ int rc;
+ uint32_t dev_vers;
+
+ if (next_minor == RPCROUTER_MAX_REMOTE_SERVERS) {
+ printk(KERN_ERR
+ "rpcrouter: Minor numbers exhausted - Increase "
+ "RPCROUTER_MAX_REMOTE_SERVERS\n");
+ return -ENOBUFS;
+ }
+
+ /* Servers with bit 31 set are remote msm servers with hashkey version.
+ * Servers with bit 31 not set are remote msm servers with
+ * backwards compatible version type in which case the minor number
+ * (lower 16 bits) is set to zero.
+ *
+ */
+ if ((server->vers & 0x80000000))
+ dev_vers = server->vers;
+ else
+ dev_vers = server->vers & 0xffff0000;
+
+ server->device_number =
+ MKDEV(MAJOR(msm_rpcrouter_devno), next_minor++);
+
+ server->device =
+ device_create(msm_rpcrouter_class, rpcrouter_device,
+ server->device_number, NULL, "%.8x:%.8x",
+ server->prog, dev_vers);
+ if (IS_ERR(server->device)) {
+ printk(KERN_ERR
+ "rpcrouter: Unable to create device (%ld)\n",
+ PTR_ERR(server->device));
+ return PTR_ERR(server->device);;
+ }
+
+ cdev_init(&server->cdev, &rpcrouter_server_fops);
+ server->cdev.owner = THIS_MODULE;
+
+ rc = cdev_add(&server->cdev, server->device_number, 1);
+ if (rc < 0) {
+ printk(KERN_ERR
+ "rpcrouter: Unable to add chrdev (%d)\n", rc);
+ device_destroy(msm_rpcrouter_class, server->device_number);
+ return rc;
+ }
+ return 0;
+}
+
+/* for backward compatible version type (31st bit cleared)
+ * clearing minor number (lower 16 bits) in device name
+ * is neccessary for driver binding
+ */
+int msm_rpcrouter_create_server_pdev(struct rr_server *server)
+{
+ sprintf(server->pdev_name, "rs%.8x:%.8x",
+ server->prog,
+ (server->vers & RPC_VERSION_MODE_MASK) ? server->vers :
+ (server->vers & RPC_VERSION_MAJOR_MASK));
+
+ server->p_device.base.id = -1;
+ server->p_device.base.name = server->pdev_name;
+
+ server->p_device.prog = server->prog;
+ server->p_device.vers = server->vers;
+
+ platform_device_register(&server->p_device.base);
+ return 0;
+}
+
+int msm_rpcrouter_init_devices(void)
+{
+ int rc;
+ int major;
+
+ /* Create the device nodes */
+ msm_rpcrouter_class = class_create(THIS_MODULE, "oncrpc");
+ if (IS_ERR(msm_rpcrouter_class)) {
+ rc = -ENOMEM;
+ printk(KERN_ERR
+ "rpcrouter: failed to create oncrpc class\n");
+ goto fail;
+ }
+
+ rc = alloc_chrdev_region(&msm_rpcrouter_devno, 0,
+ RPCROUTER_MAX_REMOTE_SERVERS + 1,
+ "oncrpc");
+ if (rc < 0) {
+ printk(KERN_ERR
+ "rpcrouter: Failed to alloc chardev region (%d)\n", rc);
+ goto fail_destroy_class;
+ }
+
+ major = MAJOR(msm_rpcrouter_devno);
+ rpcrouter_device = device_create(msm_rpcrouter_class, NULL,
+ msm_rpcrouter_devno, NULL, "%.8x:%d",
+ 0, 0);
+ if (IS_ERR(rpcrouter_device)) {
+ rc = -ENOMEM;
+ goto fail_unregister_cdev_region;
+ }
+
+ cdev_init(&rpcrouter_cdev, &rpcrouter_router_fops);
+ rpcrouter_cdev.owner = THIS_MODULE;
+
+ rc = cdev_add(&rpcrouter_cdev, msm_rpcrouter_devno, 1);
+ if (rc < 0)
+ goto fail_destroy_device;
+
+ return 0;
+
+fail_destroy_device:
+ device_destroy(msm_rpcrouter_class, msm_rpcrouter_devno);
+fail_unregister_cdev_region:
+ unregister_chrdev_region(msm_rpcrouter_devno,
+ RPCROUTER_MAX_REMOTE_SERVERS + 1);
+fail_destroy_class:
+ class_destroy(msm_rpcrouter_class);
+fail:
+ return rc;
+}
+
+void msm_rpcrouter_exit_devices(void)
+{
+ cdev_del(&rpcrouter_cdev);
+ device_destroy(msm_rpcrouter_class, msm_rpcrouter_devno);
+ unregister_chrdev_region(msm_rpcrouter_devno,
+ RPCROUTER_MAX_REMOTE_SERVERS + 1);
+ class_destroy(msm_rpcrouter_class);
+}
+
diff --git a/arch/arm/mach-msm/smd_rpcrouter_servers.c b/arch/arm/mach-msm/smd_rpcrouter_servers.c
new file mode 100644
index 000000000000..eb74f50c79b4
--- /dev/null
+++ b/arch/arm/mach-msm/smd_rpcrouter_servers.c
@@ -0,0 +1,434 @@
+/* arch/arm/mach-msm/rpc_servers.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Iliyan Malchev <ibm@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/cdev.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+
+#include <linux/uaccess.h>
+
+#include <mach/msm_rpcrouter.h>
+#include "smd_rpcrouter.h"
+
+static struct msm_rpc_endpoint *endpoint;
+
+#define FLAG_REGISTERED 0x0001
+
+static LIST_HEAD(rpc_server_list);
+static DEFINE_MUTEX(rpc_server_list_lock);
+static int rpc_servers_active;
+static uint32_t current_xid;
+
+static void rpc_server_register(struct msm_rpc_server *server)
+{
+ int rc;
+ rc = msm_rpc_register_server(endpoint, server->prog, server->vers);
+ if (rc < 0)
+ printk(KERN_ERR "[rpcserver] error registering %p @ %08x:%d\n",
+ server, server->prog, server->vers);
+}
+
+static struct msm_rpc_server *rpc_server_find(uint32_t prog, uint32_t vers)
+{
+ struct msm_rpc_server *server;
+
+ mutex_lock(&rpc_server_list_lock);
+ list_for_each_entry(server, &rpc_server_list, list) {
+ if ((server->prog == prog) &&
+ msm_rpc_is_compatible_version(server->vers, vers)) {
+ mutex_unlock(&rpc_server_list_lock);
+ return server;
+ }
+ }
+ mutex_unlock(&rpc_server_list_lock);
+ return NULL;
+}
+
+static void rpc_server_register_all(void)
+{
+ struct msm_rpc_server *server;
+
+ mutex_lock(&rpc_server_list_lock);
+ list_for_each_entry(server, &rpc_server_list, list) {
+ if (!(server->flags & FLAG_REGISTERED)) {
+ rpc_server_register(server);
+ server->flags |= FLAG_REGISTERED;
+ }
+ }
+ mutex_unlock(&rpc_server_list_lock);
+}
+
+int msm_rpc_create_server(struct msm_rpc_server *server)
+{
+ /* make sure we're in a sane state first */
+ server->flags = 0;
+ INIT_LIST_HEAD(&server->list);
+ mutex_init(&server->cb_req_lock);
+ mutex_init(&server->reply_lock);
+
+ server->reply = kmalloc(MSM_RPC_MSGSIZE_MAX, GFP_KERNEL);
+ if (!server->reply)
+ return -ENOMEM;
+
+ server->cb_req = kmalloc(MSM_RPC_MSGSIZE_MAX, GFP_KERNEL);
+ if (!server->cb_req) {
+ kfree(server->reply);
+ return -ENOMEM;
+ }
+
+ server->cb_ept = msm_rpc_open();
+ if (IS_ERR(server->cb_ept)) {
+ kfree(server->reply);
+ kfree(server->cb_req);
+ return PTR_ERR(server->cb_ept);
+ }
+
+ server->cb_ept->flags = MSM_RPC_UNINTERRUPTIBLE;
+ server->cb_ept->dst_prog = cpu_to_be32(server->prog | 0x01000000);
+ server->cb_ept->dst_vers = cpu_to_be32(server->vers);
+
+ mutex_lock(&rpc_server_list_lock);
+ list_add(&server->list, &rpc_server_list);
+ if (rpc_servers_active) {
+ rpc_server_register(server);
+ server->flags |= FLAG_REGISTERED;
+ }
+ mutex_unlock(&rpc_server_list_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_rpc_create_server);
+
+static int rpc_send_accepted_void_reply(struct msm_rpc_endpoint *client,
+ uint32_t xid, uint32_t accept_status)
+{
+ int rc = 0;
+ uint8_t reply_buf[sizeof(struct rpc_reply_hdr)];
+ struct rpc_reply_hdr *reply = (struct rpc_reply_hdr *)reply_buf;
+
+ reply->xid = cpu_to_be32(xid);
+ reply->type = cpu_to_be32(1); /* reply */
+ reply->reply_stat = cpu_to_be32(RPCMSG_REPLYSTAT_ACCEPTED);
+
+ reply->data.acc_hdr.accept_stat = cpu_to_be32(accept_status);
+ reply->data.acc_hdr.verf_flavor = 0;
+ reply->data.acc_hdr.verf_length = 0;
+
+ rc = msm_rpc_write(client, reply_buf, sizeof(reply_buf));
+ if (rc == -ENETRESET) {
+ /* Modem restarted, drop reply, clear state */
+ msm_rpc_clear_netreset(client);
+ }
+ if (rc < 0)
+ printk(KERN_ERR
+ "%s: could not write response: %d\n",
+ __FUNCTION__, rc);
+
+ return rc;
+}
+
+/*
+ * Interface to be used to start accepted reply message for a
+ * request. Returns the buffer pointer to attach any payload.
+ * Should call msm_rpc_server_send_accepted_reply to complete sending
+ * reply. Marshaling should be handled by user for the payload.
+ *
+ * server: pointer to server data structure
+ *
+ * xid: transaction id. Has to be same as the one in request.
+ *
+ * accept_status: acceptance status
+ *
+ * Return Value:
+ * pointer to buffer to attach the payload.
+ */
+void *msm_rpc_server_start_accepted_reply(struct msm_rpc_server *server,
+ uint32_t xid, uint32_t accept_status)
+{
+ struct rpc_reply_hdr *reply;
+
+ mutex_lock(&server->reply_lock);
+
+ reply = (struct rpc_reply_hdr *)server->reply;
+
+ reply->xid = cpu_to_be32(xid);
+ reply->type = cpu_to_be32(1); /* reply */
+ reply->reply_stat = cpu_to_be32(RPCMSG_REPLYSTAT_ACCEPTED);
+
+ reply->data.acc_hdr.accept_stat = cpu_to_be32(accept_status);
+ reply->data.acc_hdr.verf_flavor = 0;
+ reply->data.acc_hdr.verf_length = 0;
+
+ return reply + 1;
+}
+EXPORT_SYMBOL(msm_rpc_server_start_accepted_reply);
+
+/*
+ * Interface to be used to send accepted reply for a request.
+ * msm_rpc_server_start_accepted_reply should have been called before.
+ * Marshaling should be handled by user for the payload.
+ *
+ * server: pointer to server data structure
+ *
+ * size: additional payload size
+ *
+ * Return Value:
+ * 0 on success, otherwise returns an error code.
+ */
+int msm_rpc_server_send_accepted_reply(struct msm_rpc_server *server,
+ uint32_t size)
+{
+ int rc = 0;
+
+ size += sizeof(struct rpc_reply_hdr);
+ rc = msm_rpc_write(endpoint, server->reply, size);
+ if (rc > 0)
+ rc = 0;
+
+ mutex_unlock(&server->reply_lock);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpc_server_send_accepted_reply);
+
+/*
+ * Interface to be used to send a server callback request.
+ * If the request takes any arguments or expects any return, the user
+ * should handle it in 'arg_func' and 'ret_func' respectively.
+ * Marshaling and Unmarshaling should be handled by the user in argument
+ * and return functions.
+ *
+ * server: pointer to server data sturcture
+ *
+ * clnt_info: pointer to client information data structure.
+ * callback will be sent to this client.
+ *
+ * cb_proc: callback procedure being requested
+ *
+ * arg_func: argument function pointer. 'buf' is where arguments needs to
+ * be filled. 'data' is arg_data.
+ *
+ * ret_func: return function pointer. 'buf' is where returned data should
+ * be read from. 'data' is ret_data.
+ *
+ * arg_data: passed as an input parameter to argument function.
+ *
+ * ret_data: passed as an input parameter to return function.
+ *
+ * timeout: timeout for reply wait in jiffies. If negative timeout is
+ * specified a default timeout of 10s is used.
+ *
+ * Return Value:
+ * 0 on success, otherwise an error code is returned.
+ */
+int msm_rpc_server_cb_req(struct msm_rpc_server *server,
+ struct msm_rpc_client_info *clnt_info,
+ uint32_t cb_proc,
+ int (*arg_func)(struct msm_rpc_server *server,
+ void *buf, void *data),
+ void *arg_data,
+ int (*ret_func)(struct msm_rpc_server *server,
+ void *buf, void *data),
+ void *ret_data, long timeout)
+{
+ int size = 0;
+ struct rpc_reply_hdr *rpc_rsp;
+ void *buffer;
+ int rc = 0;
+
+ if (!clnt_info)
+ return -EINVAL;
+
+ mutex_lock(&server->cb_req_lock);
+
+ msm_rpc_setup_req((struct rpc_request_hdr *)server->cb_req,
+ (server->prog | 0x01000000),
+ be32_to_cpu(clnt_info->vers), cb_proc);
+ size = sizeof(struct rpc_request_hdr);
+
+ if (arg_func) {
+ rc = arg_func(server, (void *)((struct rpc_request_hdr *)
+ server->cb_req + 1), arg_data);
+ if (rc < 0)
+ goto release_locks;
+ else
+ size += rc;
+ }
+
+ server->cb_ept->dst_pid = clnt_info->pid;
+ server->cb_ept->dst_cid = clnt_info->cid;
+ rc = msm_rpc_write(server->cb_ept, server->cb_req, size);
+ if (rc < 0) {
+ pr_err("%s: couldn't send RPC CB request:%d\n", __func__, rc);
+ goto release_locks;
+ } else
+ rc = 0;
+
+ if (timeout < 0)
+ timeout = msecs_to_jiffies(10000);
+
+ rc = msm_rpc_read(server->cb_ept, &buffer, -1, timeout);
+ if ((rc < ((int)(sizeof(uint32_t) * 2))) ||
+ (be32_to_cpu(*((uint32_t *)buffer + 1)) != 1)) {
+ printk(KERN_ERR "%s: could not read: %d\n", __func__, rc);
+ kfree(buffer);
+ goto free_and_release;
+ } else
+ rc = 0;
+
+ rpc_rsp = (struct rpc_reply_hdr *)buffer;
+
+ if (be32_to_cpu(rpc_rsp->reply_stat) != RPCMSG_REPLYSTAT_ACCEPTED) {
+ pr_err("%s: RPC cb req was denied! %d\n", __func__,
+ be32_to_cpu(rpc_rsp->reply_stat));
+ rc = -EPERM;
+ goto free_and_release;
+ }
+
+ if (be32_to_cpu(rpc_rsp->data.acc_hdr.accept_stat) !=
+ RPC_ACCEPTSTAT_SUCCESS) {
+ pr_err("%s: RPC cb req was not successful (%d)\n", __func__,
+ be32_to_cpu(rpc_rsp->data.acc_hdr.accept_stat));
+ rc = -EINVAL;
+ goto free_and_release;
+ }
+
+ if (ret_func)
+ rc = ret_func(server, (void *)(rpc_rsp + 1), ret_data);
+
+free_and_release:
+ kfree(buffer);
+release_locks:
+ mutex_unlock(&server->cb_req_lock);
+ return rc;
+}
+EXPORT_SYMBOL(msm_rpc_server_cb_req);
+
+void msm_rpc_server_get_requesting_client(struct msm_rpc_client_info *clnt_info)
+{
+ if (!clnt_info)
+ return;
+
+ get_requesting_client(endpoint, current_xid, clnt_info);
+}
+
+static int rpc_servers_thread(void *data)
+{
+ void *buffer;
+ struct rpc_request_hdr *req;
+ struct msm_rpc_server *server;
+ int rc;
+
+ for (;;) {
+ rc = wait_event_interruptible(endpoint->wait_q,
+ !list_empty(&endpoint->read_q));
+ rc = msm_rpc_read(endpoint, &buffer, -1, -1);
+ if (rc < 0) {
+ printk(KERN_ERR "%s: could not read: %d\n",
+ __FUNCTION__, rc);
+ break;
+ }
+
+ req = (struct rpc_request_hdr *)buffer;
+
+ current_xid = req->xid;
+
+ req->type = be32_to_cpu(req->type);
+ req->xid = be32_to_cpu(req->xid);
+ req->rpc_vers = be32_to_cpu(req->rpc_vers);
+ req->prog = be32_to_cpu(req->prog);
+ req->vers = be32_to_cpu(req->vers);
+ req->procedure = be32_to_cpu(req->procedure);
+
+ server = rpc_server_find(req->prog, req->vers);
+
+ if (req->rpc_vers != 2)
+ continue;
+ if (req->type != 0)
+ continue;
+ if (!server) {
+ rpc_send_accepted_void_reply(
+ endpoint, req->xid,
+ RPC_ACCEPTSTAT_PROG_UNAVAIL);
+ continue;
+ }
+
+ rc = server->rpc_call(server, req, rc);
+
+ if (rc == 0) {
+ msm_rpc_server_start_accepted_reply(
+ server, req->xid,
+ RPC_ACCEPTSTAT_SUCCESS);
+ msm_rpc_server_send_accepted_reply(server, 0);
+ } else if (rc < 0) {
+ msm_rpc_server_start_accepted_reply(
+ server, req->xid,
+ RPC_ACCEPTSTAT_PROC_UNAVAIL);
+ msm_rpc_server_send_accepted_reply(server, 0);
+ }
+ kfree(buffer);
+ }
+ do_exit(0);
+}
+
+static int rpcservers_probe(struct platform_device *pdev)
+{
+ struct task_struct *server_thread;
+
+ endpoint = msm_rpc_open();
+ if (IS_ERR(endpoint))
+ return PTR_ERR(endpoint);
+
+ /* we're online -- register any servers installed beforehand */
+ rpc_servers_active = 1;
+ current_xid = 0;
+ rpc_server_register_all();
+
+ /* start the kernel thread */
+ server_thread = kthread_run(rpc_servers_thread, NULL, "krpcserversd");
+ if (IS_ERR(server_thread))
+ return PTR_ERR(server_thread);
+
+ return 0;
+}
+
+static struct platform_driver rpcservers_driver = {
+ .probe = rpcservers_probe,
+ .driver = {
+ .name = "oncrpc_router",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init rpc_servers_init(void)
+{
+ return platform_driver_register(&rpcservers_driver);
+}
+
+module_init(rpc_servers_init);
+
+MODULE_DESCRIPTION("MSM RPC Servers");
+MODULE_AUTHOR("Iliyan Malchev <ibm@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-msm/smd_tty.c b/arch/arm/mach-msm/smd_tty.c
new file mode 100644
index 000000000000..d4272faff974
--- /dev/null
+++ b/arch/arm/mach-msm/smd_tty.c
@@ -0,0 +1,284 @@
+/* arch/arm/mach-msm/smd_tty.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <mach/msm_smd.h>
+#include "smd_private.h"
+
+#define MAX_SMD_TTYS 37
+
+static DEFINE_MUTEX(smd_tty_lock);
+
+struct smd_tty_info {
+ smd_channel_t *ch;
+ struct tty_struct *tty;
+ int open_count;
+ struct work_struct tty_work;
+};
+
+static struct smd_tty_info smd_tty[MAX_SMD_TTYS];
+static struct workqueue_struct *smd_tty_wq;
+
+static void smd_tty_work_func(struct work_struct *work)
+{
+ unsigned char *ptr;
+ int avail;
+ struct smd_tty_info *info = container_of(work,
+ struct smd_tty_info,
+ tty_work);
+ struct tty_struct *tty = info->tty;
+
+ if (!tty)
+ return;
+
+ for (;;) {
+ if (test_bit(TTY_THROTTLED, &tty->flags)) break;
+
+ mutex_lock(&smd_tty_lock);
+ if (info->ch == 0) {
+ mutex_unlock(&smd_tty_lock);
+ break;
+ }
+
+ avail = smd_read_avail(info->ch);
+ if (avail == 0) {
+ mutex_unlock(&smd_tty_lock);
+ break;
+ }
+
+ avail = tty_prepare_flip_string(tty, &ptr, avail);
+
+ if (smd_read(info->ch, ptr, avail) != avail) {
+ /* shouldn't be possible since we're in interrupt
+ ** context here and nobody else could 'steal' our
+ ** characters.
+ */
+ printk(KERN_ERR "OOPS - smd_tty_buffer mismatch?!");
+ }
+ mutex_unlock(&smd_tty_lock);
+
+ tty_flip_buffer_push(tty);
+ }
+
+ /* XXX only when writable and necessary */
+ tty_wakeup(tty);
+}
+
+static void smd_tty_notify(void *priv, unsigned event)
+{
+ struct smd_tty_info *info = priv;
+
+ if (event != SMD_EVENT_DATA)
+ return;
+
+ queue_work(smd_tty_wq, &info->tty_work);
+}
+
+static int smd_tty_open(struct tty_struct *tty, struct file *f)
+{
+ int res = 0;
+ int n = tty->index;
+ struct smd_tty_info *info;
+ const char *name;
+
+ if (n == 0)
+ name = "DS";
+ else if (n == 7)
+ name = "DATA1";
+ else if (n == 21)
+ name = "DATA21";
+ else if (n == 27)
+ name = "GPSNMEA";
+ else if (n == 36)
+ name = "LOOPBACK";
+ else
+ return -ENODEV;
+
+ info = smd_tty + n;
+
+ mutex_lock(&smd_tty_lock);
+ tty->driver_data = info;
+
+ if (info->open_count++ == 0) {
+ info->tty = tty;
+ if (!info->ch) {
+ if (n == 36) {
+ /* set smsm state to SMSM_SMD_LOOPBACK state
+ ** and wait allowing enough time for Modem side
+ ** to open the loopback port (Currently, this is
+ ** this is effecient than polling).
+ */
+ smsm_change_state(SMSM_APPS_STATE,
+ 0, SMSM_SMD_LOOPBACK);
+ msleep(100);
+ } else if ((n == 0) || (n == 7))
+ tty->low_latency = 1;
+
+ res = smd_open(name, &info->ch, info,
+ smd_tty_notify);
+ }
+ }
+ mutex_unlock(&smd_tty_lock);
+
+ return res;
+}
+
+static void smd_tty_close(struct tty_struct *tty, struct file *f)
+{
+ struct smd_tty_info *info = tty->driver_data;
+
+ if (info == 0)
+ return;
+
+ mutex_lock(&smd_tty_lock);
+ if (--info->open_count == 0) {
+ info->tty = 0;
+ tty->driver_data = 0;
+ if (info->ch) {
+ smd_close(info->ch);
+ info->ch = 0;
+ }
+ }
+ mutex_unlock(&smd_tty_lock);
+}
+
+static int smd_tty_write(struct tty_struct *tty, const unsigned char *buf, int len)
+{
+ struct smd_tty_info *info = tty->driver_data;
+ int avail;
+
+ /* if we're writing to a packet channel we will
+ ** never be able to write more data than there
+ ** is currently space for
+ */
+ avail = smd_write_avail(info->ch);
+ if (len > avail)
+ len = avail;
+
+ return smd_write(info->ch, buf, len);
+}
+
+static int smd_tty_write_room(struct tty_struct *tty)
+{
+ struct smd_tty_info *info = tty->driver_data;
+ return smd_write_avail(info->ch);
+}
+
+static int smd_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct smd_tty_info *info = tty->driver_data;
+ return smd_read_avail(info->ch);
+}
+
+static void smd_tty_unthrottle(struct tty_struct *tty)
+{
+ struct smd_tty_info *info = tty->driver_data;
+ queue_work(smd_tty_wq, &info->tty_work);
+ return;
+}
+
+static int smd_tty_tiocmget(struct tty_struct *tty, struct file *file)
+{
+ struct smd_tty_info *info = tty->driver_data;
+
+ return smd_tiocmget(info->ch);
+}
+
+static int smd_tty_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+{
+ struct smd_tty_info *info = tty->driver_data;
+
+ return smd_tiocmset(info->ch, set, clear);
+}
+
+static struct tty_operations smd_tty_ops = {
+ .open = smd_tty_open,
+ .close = smd_tty_close,
+ .write = smd_tty_write,
+ .write_room = smd_tty_write_room,
+ .chars_in_buffer = smd_tty_chars_in_buffer,
+ .unthrottle = smd_tty_unthrottle,
+ .tiocmget = smd_tty_tiocmget,
+ .tiocmset = smd_tty_tiocmset,
+};
+
+static struct tty_driver *smd_tty_driver;
+
+static int __init smd_tty_init(void)
+{
+ int ret;
+
+ smd_tty_wq = create_singlethread_workqueue("smd_tty");
+ if (smd_tty_wq == 0)
+ return -ENOMEM;
+
+ smd_tty_driver = alloc_tty_driver(MAX_SMD_TTYS);
+ if (smd_tty_driver == 0) {
+ destroy_workqueue(smd_tty_wq);
+ return -ENOMEM;
+ }
+
+ smd_tty_driver->owner = THIS_MODULE;
+ smd_tty_driver->driver_name = "smd_tty_driver";
+ smd_tty_driver->name = "smd";
+ smd_tty_driver->major = 0;
+ smd_tty_driver->minor_start = 0;
+ smd_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ smd_tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ smd_tty_driver->init_termios = tty_std_termios;
+ smd_tty_driver->init_termios.c_iflag = 0;
+ smd_tty_driver->init_termios.c_oflag = 0;
+ smd_tty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD;
+ smd_tty_driver->init_termios.c_lflag = 0;
+ smd_tty_driver->flags = TTY_DRIVER_RESET_TERMIOS |
+ TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_set_operations(smd_tty_driver, &smd_tty_ops);
+
+ ret = tty_register_driver(smd_tty_driver);
+ if (ret) return ret;
+
+ /* this should be dynamic */
+ tty_register_device(smd_tty_driver, 0, 0);
+ INIT_WORK(&smd_tty[0].tty_work, smd_tty_work_func);
+
+ tty_register_device(smd_tty_driver, 7, 0);
+ INIT_WORK(&smd_tty[7].tty_work, smd_tty_work_func);
+
+ tty_register_device(smd_tty_driver, 27, 0);
+ INIT_WORK(&smd_tty[27].tty_work, smd_tty_work_func);
+
+ tty_register_device(smd_tty_driver, 36, 0);
+ INIT_WORK(&smd_tty[36].tty_work, smd_tty_work_func);
+
+ tty_register_device(smd_tty_driver, 21, 0);
+ INIT_WORK(&smd_tty[21].tty_work, smd_tty_work_func);
+
+ return 0;
+}
+
+module_init(smd_tty_init);
diff --git a/arch/arm/mach-msm/smem_log.c b/arch/arm/mach-msm/smem_log.c
new file mode 100644
index 000000000000..c2809109fc62
--- /dev/null
+++ b/arch/arm/mach-msm/smem_log.c
@@ -0,0 +1,2024 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * Shared memory logging implementation.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/remote_spinlock.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/string.h>
+
+#include <mach/msm_iomap.h>
+#include <mach/smem_log.h>
+
+#include "smd_private.h"
+
+#define DEBUG
+#undef DEBUG
+
+#ifdef DEBUG
+#define D_DUMP_BUFFER(prestr, cnt, buf) \
+do { \
+ int i; \
+ printk(KERN_ERR "%s", prestr); \
+ for (i = 0; i < cnt; i++) \
+ printk(KERN_ERR "%.2x", buf[i]); \
+ printk(KERN_ERR "\n"); \
+} while (0)
+#else
+#define D_DUMP_BUFFER(prestr, cnt, buf)
+#endif
+
+#ifdef DEBUG
+#define D(x...) printk(x)
+#else
+#define D(x...) do {} while (0)
+#endif
+
+#define TIMESTAMP_ADDR (MSM_CSR_BASE + 0x04)
+
+struct smem_log_item {
+ uint32_t identifier;
+ uint32_t timetick;
+ uint32_t data1;
+ uint32_t data2;
+ uint32_t data3;
+};
+
+#define SMEM_LOG_NUM_ENTRIES 2000
+#define SMEM_LOG_EVENTS_SIZE (sizeof(struct smem_log_item) * \
+ SMEM_LOG_NUM_ENTRIES)
+
+#define SMEM_LOG_NUM_STATIC_ENTRIES 150
+#define SMEM_STATIC_LOG_EVENTS_SIZE (sizeof(struct smem_log_item) * \
+ SMEM_LOG_NUM_STATIC_ENTRIES)
+
+#define SMEM_LOG_NUM_POWER_ENTRIES 2000
+#define SMEM_POWER_LOG_EVENTS_SIZE (sizeof(struct smem_log_item) * \
+ SMEM_LOG_NUM_POWER_ENTRIES)
+
+#define SMEM_SPINLOCK_SMEM_LOG 2
+#define SMEM_SPINLOCK_STATIC_LOG 5
+/* POWER shares with SMEM_SPINLOCK_SMEM_LOG */
+
+static remote_spinlock_t remote_spinlock;
+static remote_spinlock_t remote_spinlock_static;
+
+struct smem_log_inst {
+ int which_log;
+ struct smem_log_item __iomem *events;
+ uint32_t __iomem *idx;
+ int num;
+ remote_spinlock_t *remote_spinlock;
+};
+
+enum smem_logs {
+ GEN = 0,
+ STA,
+ POW,
+ NUM
+};
+
+static struct smem_log_inst inst[NUM];
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define HSIZE 13
+
+struct sym {
+ uint32_t val;
+ char *str;
+ struct hlist_node node;
+};
+
+struct sym id_syms[] = {
+ { SMEM_LOG_PROC_ID_MODEM, "MODM" },
+ { SMEM_LOG_PROC_ID_Q6, "QDSP" },
+ { SMEM_LOG_PROC_ID_APPS, "APPS" },
+};
+
+struct sym base_syms[] = {
+ { SMEM_LOG_ONCRPC_EVENT_BASE, "ONCRPC" },
+ { SMEM_LOG_SMEM_EVENT_BASE, "SMEM" },
+ { SMEM_LOG_TMC_EVENT_BASE, "TMC" },
+ { SMEM_LOG_TIMETICK_EVENT_BASE, "TIMETICK" },
+ { SMEM_LOG_DEM_EVENT_BASE, "DEM" },
+ { SMEM_LOG_ERROR_EVENT_BASE, "ERROR" },
+ { SMEM_LOG_DCVS_EVENT_BASE, "DCVS" },
+ { SMEM_LOG_SLEEP_EVENT_BASE, "SLEEP" },
+ { SMEM_LOG_RPC_ROUTER_EVENT_BASE, "ROUTER" },
+};
+
+struct sym event_syms[] = {
+#if defined(CONFIG_MSM_N_WAY_SMSM)
+ { DEM_SMSM_ISR, "SMSM_ISR" },
+ { DEM_STATE_CHANGE, "STATE_CHANGE" },
+ { DEM_STATE_MACHINE_ENTER, "STATE_MACHINE_ENTER" },
+ { DEM_ENTER_SLEEP, "ENTER_SLEEP" },
+ { DEM_END_SLEEP, "END_SLEEP" },
+ { DEM_SETUP_SLEEP, "SETUP_SLEEP" },
+ { DEM_SETUP_POWER_COLLAPSE, "SETUP_POWER_COLLAPSE" },
+ { DEM_SETUP_SUSPEND, "SETUP_SUSPEND" },
+ { DEM_EARLY_EXIT, "EARLY_EXIT" },
+ { DEM_WAKEUP_REASON, "WAKEUP_REASON" },
+ { DEM_DETECT_WAKEUP, "DETECT_WAKEUP" },
+ { DEM_DETECT_RESET, "DETECT_RESET" },
+ { DEM_DETECT_SLEEPEXIT, "DETECT_SLEEPEXIT" },
+ { DEM_DETECT_RUN, "DETECT_RUN" },
+ { DEM_APPS_SWFI, "APPS_SWFI" },
+ { DEM_SEND_WAKEUP, "SEND_WAKEUP" },
+ { DEM_ASSERT_OKTS, "ASSERT_OKTS" },
+ { DEM_NEGATE_OKTS, "NEGATE_OKTS" },
+ { DEM_PROC_COMM_CMD, "PROC_COMM_CMD" },
+ { DEM_REMOVE_PROC_PWR, "REMOVE_PROC_PWR" },
+ { DEM_RESTORE_PROC_PWR, "RESTORE_PROC_PWR" },
+ { DEM_SMI_CLK_DISABLED, "SMI_CLK_DISABLED" },
+ { DEM_SMI_CLK_ENABLED, "SMI_CLK_ENABLED" },
+ { DEM_MAO_INTS, "MAO_INTS" },
+ { DEM_APPS_WAKEUP_INT, "APPS_WAKEUP_INT" },
+ { DEM_PROC_WAKEUP, "PROC_WAKEUP" },
+ { DEM_PROC_POWERUP, "PROC_POWERUP" },
+ { DEM_TIMER_EXPIRED, "TIMER_EXPIRED" },
+ { DEM_SEND_BATTERY_INFO, "SEND_BATTERY_INFO" },
+ { DEM_REMOTE_PWR_CB, "REMOTE_PWR_CB" },
+ { DEM_TIME_SYNC_START, "TIME_SYNC_START" },
+ { DEM_TIME_SYNC_SEND_VALUE, "TIME_SYNC_SEND_VALUE" },
+ { DEM_TIME_SYNC_DONE, "TIME_SYNC_DONE" },
+ { DEM_TIME_SYNC_REQUEST, "TIME_SYNC_REQUEST" },
+ { DEM_TIME_SYNC_POLL, "TIME_SYNC_POLL" },
+ { DEM_TIME_SYNC_INIT, "TIME_SYNC_INIT" },
+ { DEM_INIT, "INIT" },
+#else
+
+ { DEM_NO_SLEEP, "NO_SLEEP" },
+ { DEM_INSUF_TIME, "INSUF_TIME" },
+ { DEMAPPS_ENTER_SLEEP, "APPS_ENTER_SLEEP" },
+ { DEMAPPS_DETECT_WAKEUP, "APPS_DETECT_WAKEUP" },
+ { DEMAPPS_END_APPS_TCXO, "APPS_END_APPS_TCXO" },
+ { DEMAPPS_ENTER_SLEEPEXIT, "APPS_ENTER_SLEEPEXIT" },
+ { DEMAPPS_END_APPS_SLEEP, "APPS_END_APPS_SLEEP" },
+ { DEMAPPS_SETUP_APPS_PWRCLPS, "APPS_SETUP_APPS_PWRCLPS" },
+ { DEMAPPS_PWRCLPS_EARLY_EXIT, "APPS_PWRCLPS_EARLY_EXIT" },
+ { DEMMOD_SEND_WAKEUP, "MOD_SEND_WAKEUP" },
+ { DEMMOD_NO_APPS_VOTE, "MOD_NO_APPS_VOTE" },
+ { DEMMOD_NO_TCXO_SLEEP, "MOD_NO_TCXO_SLEEP" },
+ { DEMMOD_BT_CLOCK, "MOD_BT_CLOCK" },
+ { DEMMOD_UART_CLOCK, "MOD_UART_CLOCK" },
+ { DEMMOD_OKTS, "MOD_OKTS" },
+ { DEM_SLEEP_INFO, "SLEEP_INFO" },
+ { DEMMOD_TCXO_END, "MOD_TCXO_END" },
+ { DEMMOD_END_SLEEP_SIG, "MOD_END_SLEEP_SIG" },
+ { DEMMOD_SETUP_APPSSLEEP, "MOD_SETUP_APPSSLEEP" },
+ { DEMMOD_ENTER_TCXO, "MOD_ENTER_TCXO" },
+ { DEMMOD_WAKE_APPS, "MOD_WAKE_APPS" },
+ { DEMMOD_POWER_COLLAPSE_APPS, "MOD_POWER_COLLAPSE_APPS" },
+ { DEMMOD_RESTORE_APPS_PWR, "MOD_RESTORE_APPS_PWR" },
+ { DEMAPPS_ASSERT_OKTS, "APPS_ASSERT_OKTS" },
+ { DEMAPPS_RESTART_START_TIMER, "APPS_RESTART_START_TIMER" },
+ { DEMAPPS_ENTER_RUN, "APPS_ENTER_RUN" },
+ { DEMMOD_MAO_INTS, "MOD_MAO_INTS" },
+ { DEMMOD_POWERUP_APPS_CALLED, "MOD_POWERUP_APPS_CALLED" },
+ { DEMMOD_PC_TIMER_EXPIRED, "MOD_PC_TIMER_EXPIRED" },
+ { DEM_DETECT_SLEEPEXIT, "_DETECT_SLEEPEXIT" },
+ { DEM_DETECT_RUN, "DETECT_RUN" },
+ { DEM_SET_APPS_TIMER, "SET_APPS_TIMER" },
+ { DEM_NEGATE_OKTS, "NEGATE_OKTS" },
+ { DEMMOD_APPS_WAKEUP_INT, "MOD_APPS_WAKEUP_INT" },
+ { DEMMOD_APPS_SWFI, "MOD_APPS_SWFI" },
+ { DEM_SEND_BATTERY_INFO, "SEND_BATTERY_INFO" },
+ { DEM_SMI_CLK_DISABLED, "SMI_CLK_DISABLED" },
+ { DEM_SMI_CLK_ENABLED, "SMI_CLK_ENABLED" },
+ { DEMAPPS_SETUP_APPS_SUSPEND, "APPS_SETUP_APPS_SUSPEND" },
+ { DEM_RPC_EARLY_EXIT, "RPC_EARLY_EXIT" },
+ { DEMAPPS_WAKEUP_REASON, "APPS_WAKEUP_REASON" },
+ { DEM_INIT, "INIT" },
+#endif
+ { DEMMOD_UMTS_BASE, "MOD_UMTS_BASE" },
+ { DEMMOD_GL1_GO_TO_SLEEP, "GL1_GO_TO_SLEEP" },
+ { DEMMOD_GL1_SLEEP_START, "GL1_SLEEP_START" },
+ { DEMMOD_GL1_AFTER_GSM_CLK_ON, "GL1_AFTER_GSM_CLK_ON" },
+ { DEMMOD_GL1_BEFORE_RF_ON, "GL1_BEFORE_RF_ON" },
+ { DEMMOD_GL1_AFTER_RF_ON, "GL1_AFTER_RF_ON" },
+ { DEMMOD_GL1_FRAME_TICK, "GL1_FRAME_TICK" },
+ { DEMMOD_GL1_WCDMA_START, "GL1_WCDMA_START" },
+ { DEMMOD_GL1_WCDMA_ENDING, "GL1_WCDMA_ENDING" },
+ { DEMMOD_UMTS_NOT_OKTS, "UMTS_NOT_OKTS" },
+ { DEMMOD_UMTS_START_TCXO_SHUTDOWN, "UMTS_START_TCXO_SHUTDOWN" },
+ { DEMMOD_UMTS_END_TCXO_SHUTDOWN, "UMTS_END_TCXO_SHUTDOWN" },
+ { DEMMOD_UMTS_START_ARM_HALT, "UMTS_START_ARM_HALT" },
+ { DEMMOD_UMTS_END_ARM_HALT, "UMTS_END_ARM_HALT" },
+ { DEMMOD_UMTS_NEXT_WAKEUP_SCLK, "UMTS_NEXT_WAKEUP_SCLK" },
+ { TIME_REMOTE_LOG_EVENT_START, "START" },
+ { TIME_REMOTE_LOG_EVENT_GOTO_WAIT,
+ "GOTO_WAIT" },
+ { TIME_REMOTE_LOG_EVENT_GOTO_INIT,
+ "GOTO_INIT" },
+ { ERR_ERROR_FATAL, "ERR_ERROR_FATAL" },
+ { ERR_ERROR_FATAL_TASK, "ERR_ERROR_FATAL_TASK" },
+ { DCVSAPPS_LOG_IDLE, "DCVSAPPS_LOG_IDLE" },
+ { DCVSAPPS_LOG_ERR, "DCVSAPPS_LOG_ERR" },
+ { DCVSAPPS_LOG_CHG, "DCVSAPPS_LOG_CHG" },
+ { DCVSAPPS_LOG_REG, "DCVSAPPS_LOG_REG" },
+ { DCVSAPPS_LOG_DEREG, "DCVSAPPS_LOG_DEREG" },
+ { SMEM_LOG_EVENT_CB, "CB" },
+ { SMEM_LOG_EVENT_START, "START" },
+ { SMEM_LOG_EVENT_INIT, "INIT" },
+ { SMEM_LOG_EVENT_RUNNING, "RUNNING" },
+ { SMEM_LOG_EVENT_STOP, "STOP" },
+ { SMEM_LOG_EVENT_RESTART, "RESTART" },
+ { SMEM_LOG_EVENT_SS, "SS" },
+ { SMEM_LOG_EVENT_READ, "READ" },
+ { SMEM_LOG_EVENT_WRITE, "WRITE" },
+ { SMEM_LOG_EVENT_SIGS1, "SIGS1" },
+ { SMEM_LOG_EVENT_SIGS2, "SIGS2" },
+ { SMEM_LOG_EVENT_WRITE_DM, "WRITE_DM" },
+ { SMEM_LOG_EVENT_READ_DM, "READ_DM" },
+ { SMEM_LOG_EVENT_SKIP_DM, "SKIP_DM" },
+ { SMEM_LOG_EVENT_STOP_DM, "STOP_DM" },
+ { SMEM_LOG_EVENT_ISR, "ISR" },
+ { SMEM_LOG_EVENT_TASK, "TASK" },
+ { SMEM_LOG_EVENT_RS, "RS" },
+ { ONCRPC_LOG_EVENT_SMD_WAIT, "SMD_WAIT" },
+ { ONCRPC_LOG_EVENT_RPC_WAIT, "RPC_WAIT" },
+ { ONCRPC_LOG_EVENT_RPC_BOTH_WAIT, "RPC_BOTH_WAIT" },
+ { ONCRPC_LOG_EVENT_RPC_INIT, "RPC_INIT" },
+ { ONCRPC_LOG_EVENT_RUNNING, "RUNNING" },
+ { ONCRPC_LOG_EVENT_APIS_INITED, "APIS_INITED" },
+ { ONCRPC_LOG_EVENT_AMSS_RESET, "AMSS_RESET" },
+ { ONCRPC_LOG_EVENT_SMD_RESET, "SMD_RESET" },
+ { ONCRPC_LOG_EVENT_ONCRPC_RESET, "ONCRPC_RESET" },
+ { ONCRPC_LOG_EVENT_CB, "CB" },
+ { ONCRPC_LOG_EVENT_STD_CALL, "STD_CALL" },
+ { ONCRPC_LOG_EVENT_STD_REPLY, "STD_REPLY" },
+ { ONCRPC_LOG_EVENT_STD_CALL_ASYNC, "STD_CALL_ASYNC" },
+ { NO_SLEEP_OLD, "NO_SLEEP_OLD" },
+ { INSUF_TIME, "INSUF_TIME" },
+ { MOD_UART_CLOCK, "MOD_UART_CLOCK" },
+ { SLEEP_INFO, "SLEEP_INFO" },
+ { MOD_TCXO_END, "MOD_TCXO_END" },
+ { MOD_ENTER_TCXO, "MOD_ENTER_TCXO" },
+ { NO_SLEEP_NEW, "NO_SLEEP_NEW" },
+ { RPC_ROUTER_LOG_EVENT_UNKNOWN, "UNKNOWN" },
+ { RPC_ROUTER_LOG_EVENT_MSG_READ, "MSG_READ" },
+ { RPC_ROUTER_LOG_EVENT_MSG_WRITTEN, "MSG_WRITTEN" },
+ { RPC_ROUTER_LOG_EVENT_MSG_CFM_REQ, "MSG_CFM_REQ" },
+ { RPC_ROUTER_LOG_EVENT_MSG_CFM_SNT, "MSG_CFM_SNT" },
+ { RPC_ROUTER_LOG_EVENT_MID_READ, "MID_READ" },
+ { RPC_ROUTER_LOG_EVENT_MID_WRITTEN, "MID_WRITTEN" },
+ { RPC_ROUTER_LOG_EVENT_MID_CFM_REQ, "MID_CFM_REQ" },
+
+};
+
+struct sym oncrpc_syms[] = {
+ { 0x30000000, "CM" },
+ { 0x30000001, "DB" },
+ { 0x30000002, "SND" },
+ { 0x30000003, "WMS" },
+ { 0x30000004, "PDSM" },
+ { 0x30000005, "MISC_MODEM_APIS" },
+ { 0x30000006, "MISC_APPS_APIS" },
+ { 0x30000007, "JOYST" },
+ { 0x30000008, "VJOY" },
+ { 0x30000009, "JOYSTC" },
+ { 0x3000000a, "ADSPRTOSATOM" },
+ { 0x3000000b, "ADSPRTOSMTOA" },
+ { 0x3000000c, "I2C" },
+ { 0x3000000d, "TIME_REMOTE" },
+ { 0x3000000e, "NV" },
+ { 0x3000000f, "CLKRGM_SEC" },
+ { 0x30000010, "RDEVMAP" },
+ { 0x30000011, "FS_RAPI" },
+ { 0x30000012, "PBMLIB" },
+ { 0x30000013, "AUDMGR" },
+ { 0x30000014, "MVS" },
+ { 0x30000015, "DOG_KEEPALIVE" },
+ { 0x30000016, "GSDI_EXP" },
+ { 0x30000017, "AUTH" },
+ { 0x30000018, "NVRUIMI" },
+ { 0x30000019, "MMGSDILIB" },
+ { 0x3000001a, "CHARGER" },
+ { 0x3000001b, "UIM" },
+ { 0x3000001C, "ONCRPCTEST" },
+ { 0x3000001d, "PDSM_ATL" },
+ { 0x3000001e, "FS_XMOUNT" },
+ { 0x3000001f, "SECUTIL " },
+ { 0x30000020, "MCCMEID" },
+ { 0x30000021, "PM_STROBE_FLASH" },
+ { 0x30000022, "DS707_EXTIF" },
+ { 0x30000023, "SMD BRIDGE_MODEM" },
+ { 0x30000024, "SMD PORT_MGR" },
+ { 0x30000025, "BUS_PERF" },
+ { 0x30000026, "BUS_MON" },
+ { 0x30000027, "MC" },
+ { 0x30000028, "MCCAP" },
+ { 0x30000029, "MCCDMA" },
+ { 0x3000002a, "MCCDS" },
+ { 0x3000002b, "MCCSCH" },
+ { 0x3000002c, "MCCSRID" },
+ { 0x3000002d, "SNM" },
+ { 0x3000002e, "MCCSYOBJ" },
+ { 0x3000002f, "DS707_APIS" },
+ { 0x30000030, "DS_MP_SHIM_APPS_ASYNC" },
+ { 0x30000031, "DSRLP_APIS" },
+ { 0x30000032, "RLP_APIS" },
+ { 0x30000033, "DS_MP_SHIM_MODEM" },
+ { 0x30000034, "DSHDR_APIS" },
+ { 0x30000035, "DSHDR_MDM_APIS" },
+ { 0x30000036, "DS_MP_SHIM_APPS" },
+ { 0x30000037, "HDRMC_APIS" },
+ { 0x30000038, "SMD_BRIDGE_MTOA" },
+ { 0x30000039, "SMD_BRIDGE_ATOM" },
+ { 0x3000003a, "DPMAPP_OTG" },
+ { 0x3000003b, "DIAG" },
+ { 0x3000003c, "GSTK_EXP" },
+ { 0x3000003d, "DSBC_MDM_APIS" },
+ { 0x3000003e, "HDRMRLP_MDM_APIS" },
+ { 0x3000003f, "HDRMRLP_APPS_APIS" },
+ { 0x30000040, "HDRMC_MRLP_APIS" },
+ { 0x30000041, "PDCOMM_APP_API" },
+ { 0x30000042, "DSAT_APIS" },
+ { 0x30000043, "MISC_RF_APIS" },
+ { 0x30000044, "CMIPAPP" },
+ { 0x30000045, "DSMP_UMTS_MODEM_APIS" },
+ { 0x30000046, "DSMP_UMTS_APPS_APIS" },
+ { 0x30000047, "DSUCSDMPSHIM" },
+ { 0x30000048, "TIME_REMOTE_ATOM" },
+ { 0x3000004a, "SD" },
+ { 0x3000004b, "MMOC" },
+ { 0x3000004c, "WLAN_ADP_FTM" },
+ { 0x3000004d, "WLAN_CP_CM" },
+ { 0x3000004e, "FTM_WLAN" },
+ { 0x3000004f, "SDCC_CPRM" },
+ { 0x30000050, "CPRMINTERFACE" },
+ { 0x30000051, "DATA_ON_MODEM_MTOA_APIS" },
+ { 0x30000052, "DATA_ON_APPS_ATOM_APIS" },
+ { 0x30000053, "MISC_MODEM_APIS_NONWINMOB" },
+ { 0x30000054, "MISC_APPS_APIS_NONWINMOB" },
+ { 0x30000055, "PMEM_REMOTE" },
+ { 0x30000056, "TCXOMGR" },
+ { 0x30000057, "DSUCSDAPPIF_APIS" },
+ { 0x30000058, "BT" },
+ { 0x30000059, "PD_COMMS_API" },
+ { 0x3000005a, "PD_COMMS_CLIENT_API" },
+ { 0x3000005b, "PDAPI" },
+ { 0x3000005c, "LSA_SUPL_DSM" },
+ { 0x3000005d, "TIME_REMOTE_MTOA" },
+ { 0x3000005e, "FTM_BT" },
+ { 0X3000005f, "DSUCSDAPPIF_APIS" },
+ { 0X30000060, "PMAPP_GEN" },
+ { 0X30000061, "PM_LIB" },
+ { 0X30000062, "KEYPAD" },
+ { 0X30000063, "HSU_APP_APIS" },
+ { 0X30000064, "HSU_MDM_APIS" },
+ { 0X30000065, "ADIE_ADC_REMOTE_ATOM " },
+ { 0X30000066, "TLMM_REMOTE_ATOM" },
+ { 0X30000067, "UI_CALLCTRL" },
+ { 0X30000068, "UIUTILS" },
+ { 0X30000069, "PRL" },
+ { 0X3000006a, "HW" },
+ { 0X3000006b, "OEM_RAPI" },
+ { 0X3000006c, "WMSPM" },
+ { 0X3000006d, "BTPF" },
+ { 0X3000006e, "CLKRGM_SYNC_EVENT" },
+ { 0X3000006f, "USB_APPS_RPC" },
+ { 0X30000070, "USB_MODEM_RPC" },
+ { 0X30000071, "ADC" },
+ { 0X30000072, "CAMERAREMOTED" },
+ { 0X30000073, "SECAPIREMOTED" },
+ { 0X30000074, "DSATAPI" },
+ { 0X30000075, "CLKCTL_RPC" },
+ { 0X30000076, "BREWAPPCOORD" },
+ { 0X30000077, "ALTENVSHELL" },
+ { 0X30000078, "WLAN_TRP_UTILS" },
+ { 0X30000079, "GPIO_RPC" },
+ { 0X3000007a, "PING_RPC" },
+ { 0X3000007b, "DSC_DCM_API" },
+ { 0X3000007c, "L1_DS" },
+ { 0X3000007d, "QCHATPK_APIS" },
+ { 0X3000007e, "GPS_API" },
+ { 0X3000007f, "OSS_RRCASN_REMOTE" },
+ { 0X30000080, "PMAPP_OTG_REMOTE" },
+ { 0X30000081, "PING_MDM_RPC" },
+ { 0X30000082, "PING_KERNEL_RPC" },
+ { 0X30000083, "TIMETICK" },
+ { 0X30000084, "WM_BTHCI_FTM " },
+ { 0X30000085, "WM_BT_PF" },
+ { 0X30000086, "IPA_IPC_APIS" },
+ { 0X30000087, "UKCC_IPC_APIS" },
+ { 0X30000088, "CMIPSMS " },
+ { 0X30000089, "VBATT_REMOTE" },
+ { 0X3000008a, "MFPAL" },
+ { 0X3000008b, "DSUMTSPDPREG" },
+ { 0X3000fe00, "RESTART_DAEMON NUMBER 0" },
+ { 0X3000fe01, "RESTART_DAEMON NUMBER 1" },
+ { 0X3000feff, "RESTART_DAEMON NUMBER 255" },
+ { 0X3000fffe, "BACKWARDS_COMPATIBILITY_IN_RPC_CLNT_LOOKUP" },
+ { 0X3000ffff, "RPC_ROUTER_SERVER_PROGRAM" },
+ { 0x31000000, "CM CB" },
+ { 0x31000001, "DB CB" },
+ { 0x31000002, "SND CB" },
+ { 0x31000003, "WMS CB" },
+ { 0x31000004, "PDSM CB" },
+ { 0x31000005, "MISC_MODEM_APIS CB" },
+ { 0x31000006, "MISC_APPS_APIS CB" },
+ { 0x31000007, "JOYST CB" },
+ { 0x31000008, "VJOY CB" },
+ { 0x31000009, "JOYSTC CB" },
+ { 0x3100000a, "ADSPRTOSATOM CB" },
+ { 0x3100000b, "ADSPRTOSMTOA CB" },
+ { 0x3100000c, "I2C CB" },
+ { 0x3100000d, "TIME_REMOTE CB" },
+ { 0x3100000e, "NV CB" },
+ { 0x3100000f, "CLKRGM_SEC CB" },
+ { 0x31000010, "RDEVMAP CB" },
+ { 0x31000011, "FS_RAPI CB" },
+ { 0x31000012, "PBMLIB CB" },
+ { 0x31000013, "AUDMGR CB" },
+ { 0x31000014, "MVS CB" },
+ { 0x31000015, "DOG_KEEPALIVE CB" },
+ { 0x31000016, "GSDI_EXP CB" },
+ { 0x31000017, "AUTH CB" },
+ { 0x31000018, "NVRUIMI CB" },
+ { 0x31000019, "MMGSDILIB CB" },
+ { 0x3100001a, "CHARGER CB" },
+ { 0x3100001b, "UIM CB" },
+ { 0x3100001C, "ONCRPCTEST CB" },
+ { 0x3100001d, "PDSM_ATL CB" },
+ { 0x3100001e, "FS_XMOUNT CB" },
+ { 0x3100001f, "SECUTIL CB" },
+ { 0x31000020, "MCCMEID" },
+ { 0x31000021, "PM_STROBE_FLASH CB" },
+ { 0x31000022, "DS707_EXTIF CB" },
+ { 0x31000023, "SMD BRIDGE_MODEM CB" },
+ { 0x31000024, "SMD PORT_MGR CB" },
+ { 0x31000025, "BUS_PERF CB" },
+ { 0x31000026, "BUS_MON CB" },
+ { 0x31000027, "MC CB" },
+ { 0x31000028, "MCCAP CB" },
+ { 0x31000029, "MCCDMA CB" },
+ { 0x3100002a, "MCCDS CB" },
+ { 0x3100002b, "MCCSCH CB" },
+ { 0x3100002c, "MCCSRID CB" },
+ { 0x3100002d, "SNM CB" },
+ { 0x3100002e, "MCCSYOBJ CB" },
+ { 0x3100002f, "DS707_APIS CB" },
+ { 0x31000030, "DS_MP_SHIM_APPS_ASYNC CB" },
+ { 0x31000031, "DSRLP_APIS CB" },
+ { 0x31000032, "RLP_APIS CB" },
+ { 0x31000033, "DS_MP_SHIM_MODEM CB" },
+ { 0x31000034, "DSHDR_APIS CB" },
+ { 0x31000035, "DSHDR_MDM_APIS CB" },
+ { 0x31000036, "DS_MP_SHIM_APPS CB" },
+ { 0x31000037, "HDRMC_APIS CB" },
+ { 0x31000038, "SMD_BRIDGE_MTOA CB" },
+ { 0x31000039, "SMD_BRIDGE_ATOM CB" },
+ { 0x3100003a, "DPMAPP_OTG CB" },
+ { 0x3100003b, "DIAG CB" },
+ { 0x3100003c, "GSTK_EXP CB" },
+ { 0x3100003d, "DSBC_MDM_APIS CB" },
+ { 0x3100003e, "HDRMRLP_MDM_APIS CB" },
+ { 0x3100003f, "HDRMRLP_APPS_APIS CB" },
+ { 0x31000040, "HDRMC_MRLP_APIS CB" },
+ { 0x31000041, "PDCOMM_APP_API CB" },
+ { 0x31000042, "DSAT_APIS CB" },
+ { 0x31000043, "MISC_RF_APIS CB" },
+ { 0x31000044, "CMIPAPP CB" },
+ { 0x31000045, "DSMP_UMTS_MODEM_APIS CB" },
+ { 0x31000046, "DSMP_UMTS_APPS_APIS CB" },
+ { 0x31000047, "DSUCSDMPSHIM CB" },
+ { 0x31000048, "TIME_REMOTE_ATOM CB" },
+ { 0x3100004a, "SD CB" },
+ { 0x3100004b, "MMOC CB" },
+ { 0x3100004c, "WLAN_ADP_FTM CB" },
+ { 0x3100004d, "WLAN_CP_CM CB" },
+ { 0x3100004e, "FTM_WLAN CB" },
+ { 0x3100004f, "SDCC_CPRM CB" },
+ { 0x31000050, "CPRMINTERFACE CB" },
+ { 0x31000051, "DATA_ON_MODEM_MTOA_APIS CB" },
+ { 0x31000052, "DATA_ON_APPS_ATOM_APIS CB" },
+ { 0x31000053, "MISC_APIS_NONWINMOB CB" },
+ { 0x31000054, "MISC_APPS_APIS_NONWINMOB CB" },
+ { 0x31000055, "PMEM_REMOTE CB" },
+ { 0x31000056, "TCXOMGR CB" },
+ { 0x31000057, "DSUCSDAPPIF_APIS CB" },
+ { 0x31000058, "BT CB" },
+ { 0x31000059, "PD_COMMS_API CB" },
+ { 0x3100005a, "PD_COMMS_CLIENT_API CB" },
+ { 0x3100005b, "PDAPI CB" },
+ { 0x3100005c, "LSA_SUPL_DSM CB" },
+ { 0x3100005d, "TIME_REMOTE_MTOA CB" },
+ { 0x3100005e, "FTM_BT CB" },
+ { 0X3100005f, "DSUCSDAPPIF_APIS CB" },
+ { 0X31000060, "PMAPP_GEN CB" },
+ { 0X31000061, "PM_LIB CB" },
+ { 0X31000062, "KEYPAD CB" },
+ { 0X31000063, "HSU_APP_APIS CB" },
+ { 0X31000064, "HSU_MDM_APIS CB" },
+ { 0X31000065, "ADIE_ADC_REMOTE_ATOM CB" },
+ { 0X31000066, "TLMM_REMOTE_ATOM CB" },
+ { 0X31000067, "UI_CALLCTRL CB" },
+ { 0X31000068, "UIUTILS CB" },
+ { 0X31000069, "PRL CB" },
+ { 0X3100006a, "HW CB" },
+ { 0X3100006b, "OEM_RAPI CB" },
+ { 0X3100006c, "WMSPM CB" },
+ { 0X3100006d, "BTPF CB" },
+ { 0X3100006e, "CLKRGM_SYNC_EVENT CB" },
+ { 0X3100006f, "USB_APPS_RPC CB" },
+ { 0X31000070, "USB_MODEM_RPC CB" },
+ { 0X31000071, "ADC CB" },
+ { 0X31000072, "CAMERAREMOTED CB" },
+ { 0X31000073, "SECAPIREMOTED CB" },
+ { 0X31000074, "DSATAPI CB" },
+ { 0X31000075, "CLKCTL_RPC CB" },
+ { 0X31000076, "BREWAPPCOORD CB" },
+ { 0X31000077, "ALTENVSHELL CB" },
+ { 0X31000078, "WLAN_TRP_UTILS CB" },
+ { 0X31000079, "GPIO_RPC CB" },
+ { 0X3100007a, "PING_RPC CB" },
+ { 0X3100007b, "DSC_DCM_API CB" },
+ { 0X3100007c, "L1_DS CB" },
+ { 0X3100007d, "QCHATPK_APIS CB" },
+ { 0X3100007e, "GPS_API CB" },
+ { 0X3100007f, "OSS_RRCASN_REMOTE CB" },
+ { 0X31000080, "PMAPP_OTG_REMOTE CB" },
+ { 0X31000081, "PING_MDM_RPC CB" },
+ { 0X31000082, "PING_KERNEL_RPC CB" },
+ { 0X31000083, "TIMETICK CB" },
+ { 0X31000084, "WM_BTHCI_FTM CB" },
+ { 0X31000085, "WM_BT_PF CB" },
+ { 0X31000086, "IPA_IPC_APIS CB" },
+ { 0X31000087, "UKCC_IPC_APIS CB" },
+ { 0X31000088, "CMIPSMS CB" },
+ { 0X31000089, "VBATT_REMOTE CB" },
+ { 0X3100008a, "MFPAL CB" },
+ { 0X3100008b, "DSUMTSPDPREG CB" },
+ { 0X3100fe00, "RESTART_DAEMON NUMBER 0 CB" },
+ { 0X3100fe01, "RESTART_DAEMON NUMBER 1 CB" },
+ { 0X3100feff, "RESTART_DAEMON NUMBER 255 CB" },
+ { 0X3100fffe, "BACKWARDS_COMPATIBILITY_IN_RPC_CLNT_LOOKUP CB" },
+ { 0X3100ffff, "RPC_ROUTER_SERVER_PROGRAM CB" },
+};
+
+struct sym wakeup_syms[] = {
+ { 0x00000040, "OTHER" },
+ { 0x00000020, "RESET" },
+ { 0x00000010, "ALARM" },
+ { 0x00000008, "TIMER" },
+ { 0x00000004, "GPIO" },
+ { 0x00000002, "INT" },
+ { 0x00000001, "RPC" },
+ { 0x00000000, "NONE" },
+};
+
+struct sym wakeup_int_syms[] = {
+ { 0, "MDDI_EXT" },
+ { 1, "MDDI_PRI" },
+ { 2, "MDDI_CLIENT"},
+ { 3, "USB_OTG" },
+ { 4, "I2CC" },
+ { 5, "SDC1_0" },
+ { 6, "SDC1_1" },
+ { 7, "SDC2_0" },
+ { 8, "SDC2_1" },
+ { 9, "ADSP_A9A11" },
+ { 10, "UART1" },
+ { 11, "UART2" },
+ { 12, "UART3" },
+ { 13, "DP_RX_DATA" },
+ { 14, "DP_RX_DATA2" },
+ { 15, "DP_RX_DATA3" },
+ { 16, "DM_UART" },
+ { 17, "DM_DP_RX_DATA" },
+ { 18, "KEYSENSE" },
+ { 19, "HSSD" },
+ { 20, "NAND_WR_ER_DONE" },
+ { 21, "NAND_OP_DONE" },
+ { 22, "TCHSCRN1" },
+ { 23, "TCHSCRN2" },
+ { 24, "TCHSCRN_SSBI" },
+ { 25, "USB_HS" },
+ { 26, "UART2_DM_RX" },
+ { 27, "UART2_DM" },
+ { 28, "SDC4_1" },
+ { 29, "SDC4_0" },
+ { 30, "SDC3_1" },
+ { 31, "SDC3_0" },
+};
+
+struct sym smsm_syms[] = {
+ { 0x80000000, "UN" },
+ { 0x7F000000, "ERR" },
+ { 0x00800000, "SMLP" },
+ { 0x00400000, "ADWN" },
+ { 0x00200000, "PWRS" },
+ { 0x00100000, "DWLD" },
+ { 0x00080000, "SRBT" },
+ { 0x00040000, "SDWN" },
+ { 0x00020000, "ARBT" },
+ { 0x00010000, "REL" },
+ { 0x00008000, "SLE" },
+ { 0x00004000, "SLP" },
+ { 0x00002000, "WFPI" },
+ { 0x00001000, "EEX" },
+ { 0x00000800, "TIN" },
+ { 0x00000400, "TWT" },
+ { 0x00000200, "PWRC" },
+ { 0x00000100, "RUN" },
+ { 0x00000080, "SA" },
+ { 0x00000040, "RES" },
+ { 0x00000020, "RIN" },
+ { 0x00000010, "RWT" },
+ { 0x00000008, "SIN" },
+ { 0x00000004, "SWT" },
+ { 0x00000002, "OE" },
+ { 0x00000001, "I" },
+};
+
+/* never reorder */
+struct sym voter_d2_syms[] = {
+ { 0x00000001, NULL },
+ { 0x00000002, NULL },
+ { 0x00000004, NULL },
+ { 0x00000008, NULL },
+ { 0x00000010, NULL },
+ { 0x00000020, NULL },
+ { 0x00000040, NULL },
+ { 0x00000080, NULL },
+ { 0x00000100, NULL },
+ { 0x00000200, NULL },
+ { 0x00000400, NULL },
+ { 0x00000800, NULL },
+ { 0x00001000, NULL },
+ { 0x00002000, NULL },
+ { 0x00004000, NULL },
+ { 0x00008000, NULL },
+ { 0x00010000, NULL },
+ { 0x00020000, NULL },
+ { 0x00040000, NULL },
+ { 0x00080000, NULL },
+ { 0x00100000, NULL },
+ { 0x00200000, NULL },
+ { 0x00400000, NULL },
+ { 0x00800000, NULL },
+ { 0x01000000, NULL },
+ { 0x02000000, NULL },
+ { 0x04000000, NULL },
+ { 0x08000000, NULL },
+ { 0x10000000, NULL },
+ { 0x20000000, NULL },
+ { 0x40000000, NULL },
+ { 0x80000000, NULL },
+};
+
+/* never reorder */
+struct sym voter_d3_syms[] = {
+ { 0x00000001, NULL },
+ { 0x00000002, NULL },
+ { 0x00000004, NULL },
+ { 0x00000008, NULL },
+ { 0x00000010, NULL },
+ { 0x00000020, NULL },
+ { 0x00000040, NULL },
+ { 0x00000080, NULL },
+ { 0x00000100, NULL },
+ { 0x00000200, NULL },
+ { 0x00000400, NULL },
+ { 0x00000800, NULL },
+ { 0x00001000, NULL },
+ { 0x00002000, NULL },
+ { 0x00004000, NULL },
+ { 0x00008000, NULL },
+ { 0x00010000, NULL },
+ { 0x00020000, NULL },
+ { 0x00040000, NULL },
+ { 0x00080000, NULL },
+ { 0x00100000, NULL },
+ { 0x00200000, NULL },
+ { 0x00400000, NULL },
+ { 0x00800000, NULL },
+ { 0x01000000, NULL },
+ { 0x02000000, NULL },
+ { 0x04000000, NULL },
+ { 0x08000000, NULL },
+ { 0x10000000, NULL },
+ { 0x20000000, NULL },
+ { 0x40000000, NULL },
+ { 0x80000000, NULL },
+};
+
+struct sym dem_state_master_syms[] = {
+ { 0, "INIT" },
+ { 1, "RUN" },
+ { 2, "SLEEP_WAIT" },
+ { 3, "SLEEP_CONFIRMED" },
+ { 4, "SLEEP_EXIT" },
+ { 5, "RSA" },
+ { 6, "EARLY_EXIT" },
+ { 7, "RSA_DELAYED" },
+ { 8, "RSA_CHECK_INTS" },
+ { 9, "RSA_CONFIRMED" },
+ { 10, "RSA_WAKING" },
+ { 11, "RSA_RESTORE" },
+ { 12, "RESET" },
+};
+
+struct sym dem_state_slave_syms[] = {
+ { 0, "INIT" },
+ { 1, "RUN" },
+ { 2, "SLEEP_WAIT" },
+ { 3, "SLEEP_EXIT" },
+ { 4, "SLEEP_RUN_PENDING" },
+ { 5, "POWER_COLLAPSE" },
+ { 6, "CHECK_INTERRUPTS" },
+ { 7, "SWFI" },
+ { 8, "WFPI" },
+ { 9, "EARLY_EXIT" },
+ { 10, "RESET_RECOVER" },
+ { 11, "RESET_ACKNOWLEDGE" },
+ { 12, "ERROR" },
+};
+
+struct sym smsm_entry_type_syms[] = {
+ { 0, "SMSM_APPS_STATE" },
+ { 1, "SMSM_MODEM_STATE" },
+ { 2, "SMSM_Q6_STATE" },
+ { 3, "SMSM_APPS_DEM" },
+ { 4, "SMSM_MODEM_DEM" },
+ { 5, "SMSM_Q6_DEM" },
+ { 6, "SMSM_POWER_MASTER_DEM" },
+ { 7, "SMSM_TIME_MASTER_DEM" },
+};
+
+struct sym smsm_state_syms[] = {
+ { 0x00000001, "INIT" },
+ { 0x00000002, "OSENTERED" },
+ { 0x00000004, "SMDWAIT" },
+ { 0x00000008, "SMDINIT" },
+ { 0x00000010, "RPCWAIT" },
+ { 0x00000020, "RPCINIT" },
+ { 0x00000040, "RESET" },
+ { 0x00000080, "RSA" },
+ { 0x00000100, "RUN" },
+ { 0x00000200, "PWRC" },
+ { 0x00000400, "TIMEWAIT" },
+ { 0x00000800, "TIMEINIT" },
+ { 0x00001000, "PWRC_EARLY_EXIT" },
+ { 0x00002000, "WFPI" },
+ { 0x00004000, "SLEEP" },
+ { 0x00008000, "SLEEPEXIT" },
+ { 0x00010000, "OEMSBL_RELEASE" },
+ { 0x00020000, "APPS_REBOOT" },
+ { 0x00040000, "SYSTEM_POWER_DOWN" },
+ { 0x00080000, "SYSTEM_REBOOT" },
+ { 0x00100000, "SYSTEM_DOWNLOAD" },
+ { 0x00200000, "PWRC_SUSPEND" },
+ { 0x00400000, "APPS_SHUTDOWN" },
+ { 0x00800000, "SMD_LOOPBACK" },
+ { 0x01000000, "RUN_QUIET" },
+ { 0x02000000, "MODEM_WAIT" },
+ { 0x04000000, "MODEM_BREAK" },
+ { 0x08000000, "MODEM_CONTINUE" },
+ { 0x80000000, "UNKNOWN" },
+};
+
+#define ID_SYM 0
+#define BASE_SYM 1
+#define EVENT_SYM 2
+#define ONCRPC_SYM 3
+#define WAKEUP_SYM 4
+#define WAKEUP_INT_SYM 5
+#define SMSM_SYM 6
+#define VOTER_D2_SYM 7
+#define VOTER_D3_SYM 8
+#define DEM_STATE_MASTER_SYM 9
+#define DEM_STATE_SLAVE_SYM 10
+#define SMSM_ENTRY_TYPE_SYM 11
+#define SMSM_STATE_SYM 12
+
+static struct sym_tbl {
+ struct sym *data;
+ int size;
+ struct hlist_head hlist[HSIZE];
+} tbl[] = {
+ { id_syms, ARRAY_SIZE(id_syms) },
+ { base_syms, ARRAY_SIZE(base_syms) },
+ { event_syms, ARRAY_SIZE(event_syms) },
+ { oncrpc_syms, ARRAY_SIZE(oncrpc_syms) },
+ { wakeup_syms, ARRAY_SIZE(wakeup_syms) },
+ { wakeup_int_syms, ARRAY_SIZE(wakeup_int_syms) },
+ { smsm_syms, ARRAY_SIZE(smsm_syms) },
+ { voter_d2_syms, ARRAY_SIZE(voter_d2_syms) },
+ { voter_d3_syms, ARRAY_SIZE(voter_d3_syms) },
+ { dem_state_master_syms, ARRAY_SIZE(dem_state_master_syms) },
+ { dem_state_slave_syms, ARRAY_SIZE(dem_state_slave_syms) },
+ { smsm_entry_type_syms, ARRAY_SIZE(smsm_entry_type_syms) },
+ { smsm_state_syms, ARRAY_SIZE(smsm_state_syms) },
+};
+
+static void find_voters(void)
+{
+ void *x, *next;
+ unsigned size;
+ int i = 0, j = 0;
+
+ x = smem_get_entry(SMEM_SLEEP_STATIC, &size);
+ next = x;
+ while (next && (next < (x + size)) &&
+ ((i + j) < (ARRAY_SIZE(voter_d3_syms) +
+ ARRAY_SIZE(voter_d2_syms)))) {
+
+ if (i < ARRAY_SIZE(voter_d3_syms)) {
+ voter_d3_syms[i].str = (char *) next;
+ i++;
+ } else if (i >= ARRAY_SIZE(voter_d3_syms) &&
+ j < ARRAY_SIZE(voter_d2_syms)) {
+ voter_d2_syms[j].str = (char *) next;
+ j++;
+ }
+
+ next += 9;
+ }
+}
+
+#define hash(val) (val % HSIZE)
+
+static void init_syms(void)
+{
+ int i;
+ int j;
+
+ for (i = 0; i < ARRAY_SIZE(tbl); ++i)
+ for (j = 0; j < HSIZE; ++j)
+ INIT_HLIST_HEAD(&tbl[i].hlist[j]);
+
+ for (i = 0; i < ARRAY_SIZE(tbl); ++i)
+ for (j = 0; j < tbl[i].size; ++j) {
+ INIT_HLIST_NODE(&tbl[i].data[j].node);
+ hlist_add_head(&tbl[i].data[j].node,
+ &tbl[i].hlist[hash(tbl[i].data[j].val)]);
+ }
+}
+
+static char *find_sym(uint32_t id, uint32_t val)
+{
+ struct hlist_node *n;
+ struct sym *s;
+
+ hlist_for_each(n, &tbl[id].hlist[hash(val)]) {
+ s = hlist_entry(n, struct sym, node);
+ if (s->val == val)
+ return s->str;
+ }
+
+ return 0;
+}
+
+#else
+static void init_syms(void) {}
+#endif
+
+static inline unsigned int read_timestamp(void)
+{
+ unsigned int tick;
+
+ do {
+ tick = readl(TIMESTAMP_ADDR);
+ } while (tick != (tick = readl(TIMESTAMP_ADDR)));
+
+ return tick;
+}
+
+static void smem_log_event_from_user(struct smem_log_inst *inst,
+ const char __user *buf, int size, int num)
+{
+ uint32_t idx;
+ uint32_t next_idx;
+ unsigned long flags;
+ uint32_t identifier = 0;
+ uint32_t timetick = 0;
+ int first = 1;
+ int ret;
+
+ remote_spin_lock_irqsave(inst->remote_spinlock, flags);
+
+ while (num--) {
+ idx = *inst->idx;
+
+ if (idx < inst->num) {
+ ret = copy_from_user(&inst->events[idx],
+ buf, size);
+ if (ret) {
+ printk("ERROR %s:%i tried to write "
+ "%i got ret %i",
+ __func__, __LINE__,
+ size, size - ret);
+ goto out;
+ }
+
+ if (first) {
+ identifier =
+ inst->events[idx].
+ identifier;
+ timetick = read_timestamp();
+ first = 0;
+ } else {
+ identifier |= SMEM_LOG_CONT;
+ }
+ inst->events[idx].identifier =
+ identifier;
+ inst->events[idx].timetick =
+ timetick;
+ }
+
+ next_idx = idx + 1;
+ if (next_idx >= inst->num)
+ next_idx = 0;
+ *inst->idx = next_idx;
+
+ buf += sizeof(struct smem_log_item);
+ }
+
+ out:
+ remote_spin_unlock_irqrestore(inst->remote_spinlock, flags);
+}
+
+static void _smem_log_event(
+ struct smem_log_item __iomem *events,
+ uint32_t __iomem *_idx,
+ remote_spinlock_t *lock,
+ int num,
+ uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3)
+{
+ struct smem_log_item item;
+ uint32_t idx;
+ uint32_t next_idx;
+ unsigned long flags;
+
+ item.timetick = read_timestamp();
+ item.identifier = id;
+ item.data1 = data1;
+ item.data2 = data2;
+ item.data3 = data3;
+
+ remote_spin_lock_irqsave(lock, flags);
+
+ idx = *_idx;
+
+ if (idx < num) {
+ memcpy(&events[idx],
+ &item, sizeof(item));
+ }
+
+ next_idx = idx + 1;
+ if (next_idx >= num)
+ next_idx = 0;
+ *_idx = next_idx;
+
+ remote_spin_unlock_irqrestore(lock, flags);
+}
+
+static void _smem_log_event6(
+ struct smem_log_item __iomem *events,
+ uint32_t __iomem *_idx,
+ remote_spinlock_t *lock,
+ int num,
+ uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3, uint32_t data4, uint32_t data5,
+ uint32_t data6)
+{
+ struct smem_log_item item[2];
+ uint32_t idx;
+ uint32_t next_idx;
+ unsigned long flags;
+
+ item[0].timetick = read_timestamp();
+ item[0].identifier = id;
+ item[0].data1 = data1;
+ item[0].data2 = data2;
+ item[0].data3 = data3;
+ item[1].identifier = item[0].identifier;
+ item[1].timetick = item[0].timetick;
+ item[1].data1 = data4;
+ item[1].data2 = data5;
+ item[1].data3 = data6;
+
+ remote_spin_lock_irqsave(lock, flags);
+
+ idx = *_idx;
+
+ if (idx < (num-1)) {
+ memcpy(&events[idx],
+ &item, sizeof(item));
+ }
+
+ next_idx = idx + 2;
+ if (next_idx >= num)
+ next_idx = 0;
+ *_idx = next_idx;
+
+ remote_spin_unlock_irqrestore(lock, flags);
+}
+
+void smem_log_event(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3)
+{
+ _smem_log_event(inst[GEN].events, inst[GEN].idx,
+ inst[GEN].remote_spinlock, SMEM_LOG_NUM_ENTRIES,
+ id, data1, data2, data3);
+}
+
+void smem_log_event6(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3, uint32_t data4, uint32_t data5,
+ uint32_t data6)
+{
+ _smem_log_event6(inst[GEN].events, inst[GEN].idx,
+ inst[GEN].remote_spinlock, SMEM_LOG_NUM_ENTRIES,
+ id, data1, data2, data3, data4, data5, data6);
+}
+
+void smem_log_event_to_static(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3)
+{
+ _smem_log_event(inst[STA].events, inst[STA].idx,
+ inst[STA].remote_spinlock, SMEM_LOG_NUM_STATIC_ENTRIES,
+ id, data1, data2, data3);
+}
+
+void smem_log_event6_to_static(uint32_t id, uint32_t data1, uint32_t data2,
+ uint32_t data3, uint32_t data4, uint32_t data5,
+ uint32_t data6)
+{
+ _smem_log_event6(inst[STA].events, inst[STA].idx,
+ inst[STA].remote_spinlock, SMEM_LOG_NUM_STATIC_ENTRIES,
+ id, data1, data2, data3, data4, data5, data6);
+}
+
+static int _smem_log_init(void)
+{
+ inst[GEN].which_log = GEN;
+ inst[GEN].events =
+ (struct smem_log_item *)smem_alloc(SMEM_SMEM_LOG_EVENTS,
+ SMEM_LOG_EVENTS_SIZE);
+ inst[GEN].idx = (uint32_t *)smem_alloc(SMEM_SMEM_LOG_IDX,
+ sizeof(uint32_t));
+ if (!inst[GEN].events || !inst[GEN].idx) {
+ pr_err("%s: no log or log_idx allocated, "
+ "smem_log disabled\n", __func__);
+ }
+ inst[GEN].num = SMEM_LOG_NUM_ENTRIES;
+ inst[GEN].remote_spinlock = &remote_spinlock;
+
+ inst[STA].which_log = STA;
+ inst[STA].events =
+ (struct smem_log_item *)
+ smem_alloc(SMEM_SMEM_STATIC_LOG_EVENTS,
+ SMEM_STATIC_LOG_EVENTS_SIZE);
+ inst[STA].idx = (uint32_t *)smem_alloc(SMEM_SMEM_STATIC_LOG_IDX,
+ sizeof(uint32_t));
+ if (!inst[STA].events || !inst[STA].idx) {
+ pr_err("%s: no static log or log_idx "
+ "allocated, smem_log disabled\n", __func__);
+ }
+ inst[STA].num = SMEM_LOG_NUM_STATIC_ENTRIES;
+ inst[STA].remote_spinlock = &remote_spinlock_static;
+
+ inst[POW].which_log = POW;
+ inst[POW].events =
+ (struct smem_log_item *)
+ smem_alloc(SMEM_SMEM_LOG_POWER_EVENTS,
+ SMEM_POWER_LOG_EVENTS_SIZE);
+ inst[POW].idx = (uint32_t *)smem_alloc(SMEM_SMEM_LOG_POWER_IDX,
+ sizeof(uint32_t));
+ if (!inst[POW].events || !inst[POW].idx) {
+ pr_err("%s: no power log or log_idx "
+ "allocated, smem_log disabled\n", __func__);
+ }
+ inst[POW].num = SMEM_LOG_NUM_POWER_ENTRIES;
+ inst[POW].remote_spinlock = &remote_spinlock;
+
+ remote_spin_lock_init(&remote_spinlock,
+ SMEM_SPINLOCK_SMEM_LOG);
+ remote_spin_lock_init(&remote_spinlock_static,
+ SMEM_SPINLOCK_STATIC_LOG);
+
+ init_syms();
+
+ return 0;
+}
+
+static ssize_t smem_log_read_bin(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int idx;
+ int orig_idx;
+ unsigned long flags;
+ int ret;
+ int tot_bytes = 0;
+ struct smem_log_inst *inst;
+
+ inst = fp->private_data;
+
+ remote_spin_lock_irqsave(inst->remote_spinlock, flags);
+
+ orig_idx = *inst->idx;
+ idx = orig_idx;
+
+ while (1) {
+ idx--;
+ if (idx < 0)
+ idx = inst->num - 1;
+ if (idx == orig_idx) {
+ ret = tot_bytes;
+ break;
+ }
+
+ if ((tot_bytes + sizeof(struct smem_log_item)) > count) {
+ ret = tot_bytes;
+ break;
+ }
+
+ ret = copy_to_user(buf, &inst[GEN].events[idx],
+ sizeof(struct smem_log_item));
+ if (ret) {
+ ret = -EIO;
+ break;
+ }
+
+ tot_bytes += sizeof(struct smem_log_item);
+
+ buf += sizeof(struct smem_log_item);
+ }
+
+ remote_spin_unlock_irqrestore(inst->remote_spinlock, flags);
+
+ return ret;
+}
+
+static ssize_t smem_log_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ char loc_buf[128];
+ int i;
+ int idx;
+ int orig_idx;
+ unsigned long flags;
+ int ret;
+ int tot_bytes = 0;
+ struct smem_log_inst *inst;
+
+ inst = fp->private_data;
+
+ remote_spin_lock_irqsave(inst->remote_spinlock, flags);
+
+ orig_idx = *inst->idx;
+ idx = orig_idx;
+
+ while (1) {
+ idx--;
+ if (idx < 0)
+ idx = inst->num - 1;
+ if (idx == orig_idx) {
+ ret = tot_bytes;
+ break;
+ }
+
+ i = scnprintf(loc_buf, 128,
+ "0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ inst->events[idx].identifier,
+ inst->events[idx].timetick,
+ inst->events[idx].data1,
+ inst->events[idx].data2,
+ inst->events[idx].data3);
+ if (i == 0) {
+ ret = -EIO;
+ break;
+ }
+
+ if ((tot_bytes + i) > count) {
+ ret = tot_bytes;
+ break;
+ }
+
+ tot_bytes += i;
+
+ ret = copy_to_user(buf, loc_buf, i);
+ if (ret) {
+ ret = -EIO;
+ break;
+ }
+
+ buf += i;
+ }
+
+ remote_spin_unlock_irqrestore(inst->remote_spinlock, flags);
+
+ return ret;
+}
+
+static ssize_t smem_log_write_bin(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ if (count < sizeof(struct smem_log_item))
+ return -EINVAL;
+
+ smem_log_event_from_user(fp->private_data, buf,
+ sizeof(struct smem_log_item),
+ count / sizeof(struct smem_log_item));
+
+ return count;
+}
+
+static ssize_t smem_log_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ int ret;
+ const char delimiters[] = " ,;";
+ char locbuf[256] = {0};
+ uint32_t val[10];
+ int vals = 0;
+ char *token;
+ char *running;
+ struct smem_log_inst *inst;
+ unsigned long res;
+
+ inst = fp->private_data;
+
+ if (count < 0) {
+ printk(KERN_ERR "ERROR: %s passed neg count = %i\n",
+ __func__, count);
+ return -EINVAL;
+ }
+
+ count = count > 255 ? 255 : count;
+
+ locbuf[count] = '\0';
+
+ ret = copy_from_user(locbuf, buf, count);
+ if (ret != 0) {
+ printk(KERN_ERR "ERROR: %s could not copy %i bytes\n",
+ __func__, ret);
+ return -EINVAL;
+ }
+
+ D(KERN_ERR "%s: ", __func__);
+ D_DUMP_BUFFER("We got", len, locbuf);
+
+ running = locbuf;
+
+ token = strsep(&running, delimiters);
+ while (token && vals < ARRAY_SIZE(val)) {
+ if (*token != '\0') {
+ D(KERN_ERR "%s: ", __func__);
+ D_DUMP_BUFFER("", strlen(token), token);
+ ret = strict_strtoul(token, 0, &res);
+ if (ret) {
+ printk(KERN_ERR "ERROR: %s:%i got bad char "
+ "at strict_strtoul\n",
+ __func__, __LINE__-4);
+ return -EINVAL;
+ }
+ val[vals++] = res;
+ }
+ token = strsep(&running, delimiters);
+ }
+
+ if (vals > 5) {
+ if (inst->which_log == GEN)
+ smem_log_event6(val[0], val[2], val[3], val[4],
+ val[7], val[8], val[9]);
+ else if (inst->which_log == STA)
+ smem_log_event6_to_static(val[0],
+ val[2], val[3], val[4],
+ val[7], val[8], val[9]);
+ else
+ return -1;
+ } else {
+ if (inst->which_log == GEN)
+ smem_log_event(val[0], val[2], val[3], val[4]);
+ else if (inst->which_log == STA)
+ smem_log_event_to_static(val[0],
+ val[2], val[3], val[4]);
+ else
+ return -1;
+ }
+
+ return count;
+}
+
+static int smem_log_open(struct inode *ip, struct file *fp)
+{
+ fp->private_data = &inst[GEN];
+
+ return 0;
+}
+
+
+static int smem_log_release(struct inode *ip, struct file *fp)
+{
+ return 0;
+}
+
+static int smem_log_ioctl(struct inode *ip, struct file *fp,
+ unsigned int cmd, unsigned long arg);
+
+static const struct file_operations smem_log_fops = {
+ .owner = THIS_MODULE,
+ .read = smem_log_read,
+ .write = smem_log_write,
+ .open = smem_log_open,
+ .release = smem_log_release,
+ .ioctl = smem_log_ioctl,
+};
+
+static const struct file_operations smem_log_bin_fops = {
+ .owner = THIS_MODULE,
+ .read = smem_log_read_bin,
+ .write = smem_log_write_bin,
+ .open = smem_log_open,
+ .release = smem_log_release,
+ .ioctl = smem_log_ioctl,
+};
+
+static int smem_log_ioctl(struct inode *ip, struct file *fp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct smem_log_inst *inst;
+
+ inst = fp->private_data;
+
+ switch (cmd) {
+ default:
+ return -ENOTTY;
+
+ case SMIOC_SETMODE:
+ if (arg == SMIOC_TEXT) {
+ D("%s set text mode\n", __func__);
+ fp->f_op = &smem_log_fops;
+ } else if (arg == SMIOC_BINARY) {
+ D("%s set bin mode\n", __func__);
+ fp->f_op = &smem_log_bin_fops;
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case SMIOC_SETLOG:
+ if (arg == SMIOC_LOG)
+ fp->private_data = &inst[GEN];
+ else if (arg == SMIOC_STATIC_LOG)
+ fp->private_data = &inst[STA];
+ else
+ return -EINVAL;
+ break;
+ }
+
+ return 0;
+}
+
+static struct miscdevice smem_log_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "smem_log",
+ .fops = &smem_log_fops,
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int _debug_dump(int log, char *buf, int max)
+{
+ unsigned int idx;
+ int orig_idx;
+ unsigned long flags;
+ int i = 0;
+
+ if (!inst[log].events)
+ return 0;
+
+ remote_spin_lock_irqsave(inst[log].remote_spinlock, flags);
+
+ orig_idx = *inst[log].idx;
+ idx = orig_idx;
+
+ while (1) {
+ idx++;
+ if (idx > inst[log].num - 1)
+ idx = 0;
+ if (idx == orig_idx)
+ break;
+
+ if (idx < inst[log].num) {
+ if (!inst[log].events[idx].identifier)
+ continue;
+
+ i += scnprintf(buf + i, max - i,
+ "%08x %08x %08x %08x %08x\n",
+ inst[log].events[idx].identifier,
+ inst[log].events[idx].timetick,
+ inst[log].events[idx].data1,
+ inst[log].events[idx].data2,
+ inst[log].events[idx].data3);
+ }
+ }
+
+ remote_spin_unlock_irqrestore(inst[log].remote_spinlock, flags);
+
+ return i;
+}
+
+static int _debug_dump_sym(int log, char *buf, int max)
+{
+ unsigned int idx;
+ int orig_idx;
+ unsigned long flags;
+ int i = 0;
+
+ char *proc;
+ char *sub;
+ char *id;
+ char *sym = NULL;
+
+ uint32_t data[3];
+
+ uint32_t proc_val = 0;
+ uint32_t sub_val = 0;
+ uint32_t id_val = 0;
+ uint32_t id_only_val = 0;
+ uint32_t data1 = 0;
+ uint32_t data2 = 0;
+ uint32_t data3 = 0;
+
+ int k;
+
+ if (!inst[log].events)
+ return 0;
+
+ find_voters(); /* need to call each time in case voters come and go */
+
+ i += scnprintf(buf + i, max - i, "Voters:\n");
+ for (k = 0; k < ARRAY_SIZE(voter_d3_syms); ++k)
+ if (voter_d3_syms[k].str)
+ i += scnprintf(buf + i, max - i, "%s ",
+ voter_d3_syms[k].str);
+ for (k = 0; k < ARRAY_SIZE(voter_d2_syms); ++k)
+ if (voter_d2_syms[k].str)
+ i += scnprintf(buf + i, max - i, "%s ",
+ voter_d2_syms[k].str);
+ i += scnprintf(buf + i, max - i, "\n");
+
+ remote_spin_lock_irqsave(inst[log].remote_spinlock, flags);
+
+ orig_idx = *inst[log].idx;
+ idx = orig_idx;
+
+ while (1) {
+ idx++;
+ if (idx > inst[log].num - 1)
+ idx = 0;
+ if (idx == orig_idx) {
+ i += scnprintf(buf + i, max - i, "\n");
+ break;
+ }
+ if (idx < inst[log].num) {
+ if (!inst[log].events[idx].identifier)
+ continue;
+
+ proc_val = PROC & inst[log].events[idx].identifier;
+ sub_val = SUB & inst[log].events[idx].identifier;
+ id_val = (SUB | ID) & inst[log].events[idx].identifier;
+ id_only_val = ID & inst[log].events[idx].identifier;
+ data1 = inst[log].events[idx].data1;
+ data2 = inst[log].events[idx].data2;
+ data3 = inst[log].events[idx].data3;
+
+ if (!(proc_val & SMEM_LOG_CONT)) {
+ i += scnprintf(buf + i, max - i, "\n");
+
+ proc = find_sym(ID_SYM, proc_val);
+
+ if (proc)
+ i += scnprintf(buf + i, max - i,
+ "%4s: ",
+ proc);
+ else
+ i += scnprintf(buf + i, max - i,
+ "%04x: ",
+ PROC &
+ inst[log].events[idx].
+ identifier);
+
+ i += scnprintf(buf + i, max - i,
+ "%10u ",
+ inst[log].events[idx].timetick);
+
+ sub = find_sym(BASE_SYM, sub_val);
+
+ if (sub)
+ i += scnprintf(buf + i, max - i,
+ "%9s: ",
+ sub);
+ else
+ i += scnprintf(buf + i, max - i,
+ "%08x: ",
+ sub_val);
+
+ id = find_sym(EVENT_SYM, id_val);
+
+ if (id)
+ i += scnprintf(buf + i, max - i,
+ "%11s: ",
+ id);
+ else
+ i += scnprintf(buf + i, max - i,
+ "%08x: ",
+ id_only_val);
+ }
+
+ if ((proc_val & SMEM_LOG_CONT) &&
+ (id_val == ONCRPC_LOG_EVENT_STD_CALL ||
+ id_val == ONCRPC_LOG_EVENT_STD_REPLY)) {
+ data[0] = data1;
+ data[1] = data2;
+ data[2] = data3;
+ i += scnprintf(buf + i, max - i,
+ " %.16s",
+ (char *) data);
+ } else if (proc_val & SMEM_LOG_CONT) {
+ i += scnprintf(buf + i, max - i,
+ " %08x %08x %08x",
+ data1,
+ data2,
+ data3);
+ } else if (id_val == ONCRPC_LOG_EVENT_STD_CALL) {
+ sym = find_sym(ONCRPC_SYM, data2);
+
+ if (sym)
+ i += scnprintf(buf + i, max - i,
+ "xid:%4i %8s proc:%3i",
+ data1,
+ sym,
+ data3);
+ else
+ i += scnprintf(buf + i, max - i,
+ "xid:%4i %08x proc:%3i",
+ data1,
+ data2,
+ data3);
+#if defined(CONFIG_MSM_N_WAY_SMSM)
+ } else if (id_val == DEM_STATE_CHANGE) {
+ if (data1 == 1) {
+ i += scnprintf(buf + i,
+ max - i,
+ "MASTER: ");
+ sym = find_sym(DEM_STATE_MASTER_SYM,
+ data2);
+ } else if (data1 == 0) {
+ i += scnprintf(buf + i,
+ max - i,
+ " SLAVE: ");
+ sym = find_sym(DEM_STATE_SLAVE_SYM,
+ data2);
+ } else {
+ i += scnprintf(buf + i,
+ max - i,
+ "%x: ",
+ data1);
+ sym = NULL;
+ }
+ if (sym)
+ i += scnprintf(buf + i,
+ max - i,
+ "from:%s ",
+ sym);
+ else
+ i += scnprintf(buf + i,
+ max - i,
+ "from:0x%x ",
+ data2);
+
+ if (data1 == 1)
+ sym = find_sym(DEM_STATE_MASTER_SYM,
+ data3);
+ else if (data1 == 0)
+ sym = find_sym(DEM_STATE_SLAVE_SYM,
+ data3);
+ else
+ sym = NULL;
+ if (sym)
+ i += scnprintf(buf + i,
+ max - i,
+ "to:%s ",
+ sym);
+ else
+ i += scnprintf(buf + i,
+ max - i,
+ "to:0x%x ",
+ data3);
+
+ } else if (id_val == DEM_STATE_MACHINE_ENTER) {
+ i += scnprintf(buf + i,
+ max - i,
+ "swfi:%i timer:%i manexit:%i",
+ data1, data2, data3);
+
+ } else if (id_val == DEM_TIME_SYNC_REQUEST ||
+ id_val == DEM_TIME_SYNC_POLL ||
+ id_val == DEM_TIME_SYNC_INIT) {
+ sym = find_sym(SMSM_ENTRY_TYPE_SYM,
+ data1);
+ if (sym)
+ i += scnprintf(buf + i,
+ max - i,
+ "hostid:%s",
+ sym);
+ else
+ i += scnprintf(buf + i,
+ max - i,
+ "hostid:%x",
+ data1);
+
+ } else if (id_val == DEM_TIME_SYNC_START ||
+ id_val == DEM_TIME_SYNC_SEND_VALUE) {
+ unsigned mask = 0x1;
+ unsigned tmp = 0;
+ if (id_val == DEM_TIME_SYNC_START)
+ i += scnprintf(buf + i,
+ max - i,
+ "req:");
+ else
+ i += scnprintf(buf + i,
+ max - i,
+ "pol:");
+ while (mask) {
+ if (mask & data1) {
+ sym = find_sym(
+ SMSM_ENTRY_TYPE_SYM,
+ tmp);
+ if (sym)
+ i += scnprintf(buf + i,
+ max - i,
+ "%s ",
+ sym);
+ else
+ i += scnprintf(buf + i,
+ max - i,
+ "%i ",
+ tmp);
+ }
+ mask <<= 1;
+ tmp++;
+ }
+ if (id_val == DEM_TIME_SYNC_SEND_VALUE)
+ i += scnprintf(buf + i,
+ max - i,
+ "tick:%x",
+ data2);
+ } else if (id_val == DEM_SMSM_ISR) {
+ unsigned vals[] = {data2, data3};
+ unsigned j;
+ unsigned mask;
+ unsigned tmp;
+ unsigned once;
+ sym = find_sym(SMSM_ENTRY_TYPE_SYM,
+ data1);
+ if (sym)
+ i += scnprintf(buf + i,
+ max - i,
+ "%s ",
+ sym);
+ else
+ i += scnprintf(buf + i,
+ max - i,
+ "%x ",
+ data1);
+
+ for (j = 0; j < ARRAY_SIZE(vals); ++j) {
+ i += scnprintf(buf + i, max - i, "[");
+ mask = 0x80000000;
+ once = 0;
+ while (mask) {
+ tmp = vals[j] & mask;
+ mask >>= 1;
+ if (!tmp)
+ continue;
+ sym = find_sym(SMSM_STATE_SYM,
+ tmp);
+
+ if (once)
+ i += scnprintf(buf + i,
+ max - i,
+ " ");
+ if (sym)
+ i += scnprintf(buf + i,
+ max - i,
+ "%s",
+ sym);
+ else
+ i += scnprintf(buf + i,
+ max - i,
+ "0x%x",
+ tmp);
+ once = 1;
+ }
+ i += scnprintf(buf + i, max - i, "] ");
+ }
+#else
+ } else if (id_val == DEMAPPS_WAKEUP_REASON) {
+ unsigned mask = 0x80000000;
+ unsigned tmp = 0;
+ while (mask) {
+ tmp = data1 & mask;
+ mask >>= 1;
+ if (!tmp)
+ continue;
+ sym = find_sym(WAKEUP_SYM, tmp);
+ if (sym)
+ i += scnprintf(buf + i,
+ max - i,
+ "%s ",
+ sym);
+ else
+ i += scnprintf(buf + i,
+ max - i,
+ "%08x ",
+ tmp);
+ }
+ i += scnprintf(buf + i, max - i,
+ "%08x %08x",
+ data2,
+ data3);
+ } else if (id_val == DEMMOD_APPS_WAKEUP_INT) {
+ sym = find_sym(WAKEUP_INT_SYM, data1);
+
+ if (sym)
+ i += scnprintf(buf + i, max - i,
+ "%s %08x %08x",
+ sym,
+ data2,
+ data3);
+ else
+ i += scnprintf(buf + i, max - i,
+ "%08x %08x %08x",
+ data1,
+ data2,
+ data3);
+ } else if (id_val == DEM_NO_SLEEP ||
+ id_val == NO_SLEEP_NEW) {
+ unsigned vals[] = {data3, data2};
+ unsigned j;
+ unsigned mask;
+ unsigned tmp;
+ unsigned once;
+ i += scnprintf(buf + i, max - i, "%08x ",
+ data1);
+ i += scnprintf(buf + i, max - i, "[");
+ once = 0;
+ for (j = 0; j < ARRAY_SIZE(vals); ++j) {
+ mask = 0x00000001;
+ while (mask) {
+ tmp = vals[j] & mask;
+ mask <<= 1;
+ if (!tmp)
+ continue;
+ if (j == 0)
+ sym = find_sym(
+ VOTER_D3_SYM,
+ tmp);
+ else
+ sym = find_sym(
+ VOTER_D2_SYM,
+ tmp);
+
+ if (once)
+ i += scnprintf(buf + i,
+ max - i,
+ " ");
+ if (sym)
+ i += scnprintf(buf + i,
+ max - i,
+ "%s",
+ sym);
+ else
+ i += scnprintf(buf + i,
+ max - i,
+ "%08x",
+ tmp);
+ once = 1;
+ }
+ }
+ i += scnprintf(buf + i, max - i, "] ");
+#endif
+ } else if (id_val == SMEM_LOG_EVENT_CB) {
+ unsigned vals[] = {data2, data3};
+ unsigned j;
+ unsigned mask;
+ unsigned tmp;
+ unsigned once;
+ i += scnprintf(buf + i, max - i, "%08x ",
+ data1);
+ for (j = 0; j < ARRAY_SIZE(vals); ++j) {
+ i += scnprintf(buf + i, max - i, "[");
+ mask = 0x80000000;
+ once = 0;
+ while (mask) {
+ tmp = vals[j] & mask;
+ mask >>= 1;
+ if (!tmp)
+ continue;
+ sym = find_sym(SMSM_SYM, tmp);
+
+ if (once)
+ i += scnprintf(buf + i,
+ max - i,
+ " ");
+ if (sym)
+ i += scnprintf(buf + i,
+ max - i,
+ "%s",
+ sym);
+ else
+ i += scnprintf(buf + i,
+ max - i,
+ "%08x",
+ tmp);
+ once = 1;
+ }
+ i += scnprintf(buf + i, max - i, "] ");
+ }
+ } else {
+ i += scnprintf(buf + i, max - i,
+ "%08x %08x %08x",
+ data1,
+ data2,
+ data3);
+ }
+ }
+ }
+
+ remote_spin_unlock_irqrestore(inst[log].remote_spinlock, flags);
+
+ return i;
+}
+
+static int debug_dump(char *buf, int max)
+{
+ return _debug_dump(GEN, buf, max);
+}
+
+static int debug_dump_sym(char *buf, int max)
+{
+ return _debug_dump_sym(GEN, buf, max);
+}
+
+static int debug_dump_static(char *buf, int max)
+{
+ return _debug_dump(STA, buf, max);
+}
+
+static int debug_dump_static_sym(char *buf, int max)
+{
+ return _debug_dump_sym(STA, buf, max);
+}
+
+static int debug_dump_power(char *buf, int max)
+{
+ return _debug_dump(POW, buf, max);
+}
+
+static int debug_dump_power_sym(char *buf, int max)
+{
+ return _debug_dump_sym(POW, buf, max);
+}
+
+#define SMEM_LOG_ITEM_PRINT_SIZE 160
+
+#define EVENTS_PRINT_SIZE \
+(SMEM_LOG_ITEM_PRINT_SIZE * SMEM_LOG_NUM_ENTRIES)
+
+static char debug_buffer[EVENTS_PRINT_SIZE];
+
+static ssize_t debug_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int (*fill)(char *buf, int max) = file->private_data;
+ int bsize = fill(debug_buffer, EVENTS_PRINT_SIZE);
+ return simple_read_from_buffer(buf, count, ppos, debug_buffer,
+ bsize);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations debug_ops = {
+ .read = debug_read,
+ .open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+ struct dentry *dent,
+ int (*fill)(char *buf, int max))
+{
+ debugfs_create_file(name, mode, dent, fill, &debug_ops);
+}
+
+static void smem_log_debugfs_init(void)
+{
+ struct dentry *dent;
+
+ dent = debugfs_create_dir("smem_log", 0);
+ if (IS_ERR(dent))
+ return;
+
+ debug_create("dump", 0444, dent, debug_dump);
+ debug_create("dump_sym", 0444, dent, debug_dump_sym);
+ debug_create("dump_static", 0444, dent, debug_dump_static);
+ debug_create("dump_static_sym", 0444, dent, debug_dump_static_sym);
+ debug_create("dump_power", 0444, dent, debug_dump_power);
+ debug_create("dump_power_sym", 0444, dent, debug_dump_power_sym);
+}
+#else
+static void smem_log_debugfs_init(void) {}
+#endif
+
+static int __init smem_log_init(void)
+{
+ int ret;
+
+ ret = _smem_log_init();
+ if (ret < 0)
+ return ret;
+
+ smem_log_debugfs_init();
+
+ return misc_register(&smem_log_dev);
+}
+
+
+module_init(smem_log_init);
+
+MODULE_DESCRIPTION("smem log");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-msm/socinfo.c b/arch/arm/mach-msm/socinfo.c
new file mode 100644
index 000000000000..0c65e25c9c66
--- /dev/null
+++ b/arch/arm/mach-msm/socinfo.c
@@ -0,0 +1,430 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora Forum nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this software
+ * may be relicensed by the recipient under the terms of the GNU General Public
+ * License version 2 ("GPL") and only version 2, in which case the provisions of
+ * the GPL apply INSTEAD OF those given above. If the recipient relicenses the
+ * software under the GPL, then the identification text in the MODULE_LICENSE
+ * macro must be changed to reflect "GPLv2" instead of "Dual BSD/GPL". Once a
+ * recipient changes the license terms to the GPL, subsequent recipients shall
+ * not relicense under alternate licensing terms, including the BSD or dual
+ * BSD/GPL terms. In addition, the following license statement immediately
+ * below and between the words START and END shall also then apply when this
+ * software is relicensed under the GPL:
+ *
+ * START
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 and only version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * END
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * SOC Info Routines
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/sysdev.h>
+#include "socinfo.h"
+#include "smd_private.h"
+
+enum {
+ HW_PLATFORM_UNKNOWN = 0,
+ HW_PLATFORM_SURF = 1,
+ HW_PLATFORM_FFA = 2,
+ HW_PLATFORM_FLUID = 3,
+ HW_PLATFORM_INVALID
+};
+
+char *hw_platform[] = {
+ "Unknown",
+ "Surf",
+ "FFA",
+ "Fluid"
+};
+
+/* Used to parse shared memory. Must match the modem. */
+struct socinfo_v1 {
+ uint32_t format;
+ uint32_t id;
+ uint32_t version;
+ char build_id[32];
+};
+
+struct socinfo_v2 {
+ struct socinfo_v1 v1;
+
+ /* only valid when format==2 */
+ uint32_t raw_id;
+ uint32_t raw_version;
+};
+
+struct socinfo_v3 {
+ struct socinfo_v2 v2;
+
+ /* only valid when format==3 */
+ uint32_t hw_platform;
+};
+
+static union {
+ struct socinfo_v1 v1;
+ struct socinfo_v2 v2;
+ struct socinfo_v3 v3;
+} *socinfo;
+
+static enum msm_cpu cpu_of_id[] = {
+
+ /* 7x01 IDs */
+ [1] = MSM_CPU_7X01,
+ [16] = MSM_CPU_7X01,
+ [17] = MSM_CPU_7X01,
+ [18] = MSM_CPU_7X01,
+ [19] = MSM_CPU_7X01,
+ [23] = MSM_CPU_7X01,
+ [25] = MSM_CPU_7X01,
+ [26] = MSM_CPU_7X01,
+ [32] = MSM_CPU_7X01,
+ [33] = MSM_CPU_7X01,
+ [34] = MSM_CPU_7X01,
+ [35] = MSM_CPU_7X01,
+
+ /* 7x25 IDs */
+ [20] = MSM_CPU_7X25,
+ [21] = MSM_CPU_7X25,
+ [24] = MSM_CPU_7X25,
+ [27] = MSM_CPU_7X25,
+ [39] = MSM_CPU_7X25,
+ [40] = MSM_CPU_7X25,
+ [41] = MSM_CPU_7X25,
+ [42] = MSM_CPU_7X25,
+
+ /* 7x27 IDs */
+ [43] = MSM_CPU_7X27,
+ [44] = MSM_CPU_7X27,
+ [61] = MSM_CPU_7X27,
+
+ /* 8x50 IDs */
+ [30] = MSM_CPU_8X50,
+ [36] = MSM_CPU_8X50,
+ [37] = MSM_CPU_8X50,
+ [38] = MSM_CPU_8X50,
+
+ /* 7x30 IDs */
+ [59] = MSM_CPU_7X30,
+ [60] = MSM_CPU_7X30,
+
+ /* Uninitialized IDs are not known to run Linux.
+ MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
+ considered as unknown CPU. Any ID > 60 is invalid. */
+};
+
+static enum msm_cpu cur_cpu;
+
+uint32_t socinfo_get_id(void)
+{
+ return (socinfo) ? socinfo->v1.id : 0;
+}
+
+uint32_t socinfo_get_version(void)
+{
+ return (socinfo) ? socinfo->v1.version : 0;
+}
+
+char *socinfo_get_build_id(void)
+{
+ return (socinfo) ? socinfo->v1.build_id : NULL;
+}
+
+uint32_t socinfo_get_raw_id(void)
+{
+ return socinfo ?
+ (socinfo->v1.format == 2 ? socinfo->v2.raw_id : 0)
+ : 0;
+}
+
+uint32_t socinfo_get_raw_version(void)
+{
+ return socinfo ?
+ (socinfo->v1.format == 2 ? socinfo->v2.raw_version : 0)
+ : 0;
+}
+
+uint32_t socinfo_get_platform_type(void)
+{
+ return socinfo ?
+ (socinfo->v1.format == 3 ? socinfo->v3.hw_platform : 0)
+ : 0;
+}
+
+enum msm_cpu socinfo_get_msm_cpu(void)
+{
+ return cur_cpu;
+}
+
+static ssize_t
+socinfo_show_id(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ char *buf)
+{
+ if (!socinfo) {
+ pr_err("%s: No socinfo found!\n", __func__);
+ return 0;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", socinfo_get_id());
+}
+
+static ssize_t
+socinfo_show_version(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ char *buf)
+{
+ uint32_t version;
+
+ if (!socinfo) {
+ pr_err("%s: No socinfo found!\n", __func__);
+ return 0;
+ }
+
+ version = socinfo_get_version();
+ return snprintf(buf, PAGE_SIZE, "%u.%u\n",
+ SOCINFO_VERSION_MAJOR(version),
+ SOCINFO_VERSION_MINOR(version));
+}
+
+static ssize_t
+socinfo_show_build_id(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ char *buf)
+{
+ if (!socinfo) {
+ pr_err("%s: No socinfo found!\n", __func__);
+ return 0;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%-.32s\n", socinfo_get_build_id());
+}
+
+static ssize_t
+socinfo_show_raw_id(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ char *buf)
+{
+ if (!socinfo) {
+ pr_err("%s: No socinfo found!\n", __func__);
+ return 0;
+ }
+ if (socinfo->v1.format != 2) {
+ pr_err("%s: Raw ID not available!\n", __func__);
+ return 0;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", socinfo_get_raw_id());
+}
+
+static ssize_t
+socinfo_show_raw_version(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ char *buf)
+{
+ if (!socinfo) {
+ pr_err("%s: No socinfo found!\n", __func__);
+ return 0;
+ }
+ if (socinfo->v1.format != 2) {
+ pr_err("%s: Raw version not available!\n", __func__);
+ return 0;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", socinfo_get_raw_version());
+}
+
+static ssize_t
+socinfo_show_platform_type(struct sys_device *dev,
+ struct sysdev_attribute *attr,
+ char *buf)
+{
+ uint32_t hw_type;
+
+ if (!socinfo) {
+ pr_err("%s: No socinfo found!\n", __func__);
+ return 0;
+ }
+ if (socinfo->v1.format != 3) {
+ pr_err("%s: platform type not available!\n", __func__);
+ return 0;
+ }
+
+ hw_type = socinfo_get_platform_type();
+ if (hw_type >= HW_PLATFORM_INVALID) {
+ pr_err("%s: Invalid hardware platform type found\n",
+ __func__);
+ hw_type = HW_PLATFORM_UNKNOWN;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%-.32s\n", hw_platform[hw_type]);
+}
+
+static struct sysdev_attribute socinfo_v1_files[] = {
+ _SYSDEV_ATTR(id, 0444, socinfo_show_id, NULL),
+ _SYSDEV_ATTR(version, 0444, socinfo_show_version, NULL),
+ _SYSDEV_ATTR(build_id, 0444, socinfo_show_build_id, NULL),
+};
+
+static struct sysdev_attribute socinfo_v2_files[] = {
+ _SYSDEV_ATTR(raw_id, 0444, socinfo_show_raw_id, NULL),
+ _SYSDEV_ATTR(raw_version, 0444, socinfo_show_raw_version, NULL),
+};
+
+static struct sysdev_attribute socinfo_v3_files[] = {
+ _SYSDEV_ATTR(hw_platform, 0444, socinfo_show_platform_type, NULL),
+};
+
+static struct sysdev_class soc_sysdev_class = {
+ .name = "soc",
+};
+
+static struct sys_device soc_sys_device = {
+ .id = 0,
+ .cls = &soc_sysdev_class,
+};
+
+static void __init socinfo_create_files(struct sys_device *dev,
+ struct sysdev_attribute files[],
+ int size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ int err = sysdev_create_file(dev, &files[i]);
+ if (err) {
+ pr_err("%s: sysdev_create_file(%s)=%d\n",
+ __func__, files[i].attr.name, err);
+ return;
+ }
+ }
+}
+
+static void __init socinfo_init_sysdev(void)
+{
+ int err;
+
+ err = sysdev_class_register(&soc_sysdev_class);
+ if (err) {
+ pr_err("%s: sysdev_class_register fail (%d)\n",
+ __func__, err);
+ return;
+ }
+ err = sysdev_register(&soc_sys_device);
+ if (err) {
+ pr_err("%s: sysdev_register fail (%d)\n",
+ __func__, err);
+ return;
+ }
+ socinfo_create_files(&soc_sys_device, socinfo_v1_files,
+ ARRAY_SIZE(socinfo_v1_files));
+ if (socinfo->v1.format < 2)
+ return;
+ socinfo_create_files(&soc_sys_device, socinfo_v2_files,
+ ARRAY_SIZE(socinfo_v2_files));
+
+ if (socinfo->v1.format < 3)
+ return;
+
+ socinfo_create_files(&soc_sys_device, socinfo_v3_files,
+ ARRAY_SIZE(socinfo_v3_files));
+
+}
+
+int __init socinfo_init(void)
+{
+ socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID, sizeof(struct socinfo_v3));
+ if (!socinfo)
+ socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
+ sizeof(struct socinfo_v2));
+
+ if (!socinfo)
+ socinfo = smem_alloc(SMEM_HW_SW_BUILD_ID,
+ sizeof(struct socinfo_v1));
+
+ if (!socinfo) {
+ pr_err("%s: Can't find SMEM_HW_SW_BUILD_ID\n",
+ __func__);
+ return -EIO;
+ }
+
+ WARN(!socinfo_get_id(), "Unknown SOC ID!\n");
+ WARN(socinfo_get_id() >= ARRAY_SIZE(cpu_of_id),
+ "New IDs added! ID => CPU mapping might need an update.\n");
+
+ if (socinfo->v1.id < ARRAY_SIZE(cpu_of_id))
+ cur_cpu = cpu_of_id[socinfo->v1.id];
+
+ socinfo_init_sysdev();
+
+ switch (socinfo->v1.format) {
+ case 1:
+ pr_info("%s: v%u, id=%u, ver=%u.%u\n",
+ __func__, socinfo->v1.format, socinfo->v1.id,
+ SOCINFO_VERSION_MAJOR(socinfo->v1.version),
+ SOCINFO_VERSION_MINOR(socinfo->v1.version));
+ break;
+ case 2:
+ pr_info("%s: v%u, id=%u, ver=%u.%u, "
+ "raw_id=%u, raw_ver=%u\n",
+ __func__, socinfo->v1.format, socinfo->v1.id,
+ SOCINFO_VERSION_MAJOR(socinfo->v1.version),
+ SOCINFO_VERSION_MINOR(socinfo->v1.version),
+ socinfo->v2.raw_id, socinfo->v2.raw_version);
+ break;
+ case 3:
+ pr_info("%s: v%u, id=%u, ver=%u.%u, "
+ "raw_id=%u, raw_ver=%u, hw_plat=%u\n",
+ __func__, socinfo->v1.format, socinfo->v1.id,
+ SOCINFO_VERSION_MAJOR(socinfo->v1.version),
+ SOCINFO_VERSION_MINOR(socinfo->v1.version),
+ socinfo->v2.raw_id, socinfo->v2.raw_version,
+ socinfo->v3.hw_platform);
+ break;
+ default:
+ pr_err("%s: Unknown format found\n", __func__);
+ break;
+ }
+
+ return 0;
+}
diff --git a/arch/arm/mach-msm/socinfo.h b/arch/arm/mach-msm/socinfo.h
new file mode 100644
index 000000000000..a3c04b19f4ba
--- /dev/null
+++ b/arch/arm/mach-msm/socinfo.h
@@ -0,0 +1,97 @@
+/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SOCINFO_H_
+#define _ARCH_ARM_MACH_MSM_SOCINFO_H_
+
+/*
+ * SOC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits. For example:
+ * 1.0 -> 0x00010000
+ * 2.3 -> 0x00020003
+ */
+#define SOCINFO_VERSION_MAJOR(ver) ((ver & 0xffff0000) >> 16)
+#define SOCINFO_VERSION_MINOR(ver) (ver & 0x0000ffff)
+
+enum msm_cpu {
+ MSM_CPU_UNKNOWN = 0,
+ MSM_CPU_7X01,
+ MSM_CPU_7X25,
+ MSM_CPU_7X27,
+ MSM_CPU_8X50,
+ MSM_CPU_7X30,
+};
+
+enum msm_cpu socinfo_get_msm_cpu(void);
+uint32_t socinfo_get_id(void);
+uint32_t socinfo_get_version(void);
+char *socinfo_get_build_id(void);
+uint32_t socinfo_get_platform_type(void);
+int __init socinfo_init(void) __must_check;
+
+static inline int cpu_is_msm7x01(void)
+{
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X01;
+}
+
+static inline int cpu_is_msm7x25(void)
+{
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25;
+}
+
+static inline int cpu_is_msm7x27(void)
+{
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27;
+}
+
+static inline int cpu_is_qsd8x50(void)
+{
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8X50;
+}
+
+static inline int cpu_is_msm7x30(void)
+{
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X30;
+}
+
+#endif
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 4855b8ca5101..02fbdca60525 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -1,6 +1,7 @@
/* linux/arch/arm/mach-msm/timer.c
*
* Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -25,21 +26,62 @@
#include <asm/mach/time.h>
#include <mach/msm_iomap.h>
-#define MSM_DGT_BASE (MSM_GPT_BASE + 0x10)
+#include "smd_private.h"
+#include "timer.h"
+
+enum {
+ MSM_TIMER_DEBUG_SYNC = 1U << 0,
+};
+static int msm_timer_debug_mask;
+module_param_named(debug_mask, msm_timer_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#if defined(CONFIG_ARCH_MSM7X30)
+#define MSM_GPT_BASE (MSM_TMR_BASE + 0x4)
+#define MSM_DGT_BASE (MSM_TMR_BASE + 0x24)
+#else
+#define MSM_GPT_BASE MSM_TMR_BASE
+#define MSM_DGT_BASE (MSM_TMR_BASE + 0x10)
+#endif
+
+#if defined(CONFIG_ARCH_MSM_ARM11)
#define MSM_DGT_SHIFT (5)
+#else
+#define MSM_DGT_SHIFT (0)
+#endif
#define TIMER_MATCH_VAL 0x0000
#define TIMER_COUNT_VAL 0x0004
#define TIMER_ENABLE 0x0008
-#define TIMER_ENABLE_CLR_ON_MATCH_EN 2
-#define TIMER_ENABLE_EN 1
-#define TIMER_CLEAR 0x000C
+#define TIMER_ENABLE_EN 1
-#define CSR_PROTECTION 0x0020
-#define CSR_PROTECTION_EN 1
+#if defined(CONFIG_ARCH_QSD8X50)
+#define DGT_HZ 4800000 /* Uses TCXO/4 (19.2 MHz / 4) */
+#elif defined(CONFIG_ARCH_MSM7X30)
+#define DGT_HZ 6144000 /* Uses LPXO/4 (24.576 MHz / 4) */
+#else
+#define DGT_HZ 19200000 /* Uses TCXO (19.2 MHz) */
+#endif
#define GPT_HZ 32768
-#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */
+#define SCLK_HZ 32768
+
+#if defined(CONFIG_MSM_N_WAY_SMSM)
+/* Time Master State Bits */
+#define MASTER_BITS_PER_CPU 1
+#define MASTER_TIME_PENDING \
+ (0x01UL << (MASTER_BITS_PER_CPU * SMSM_APPS_STATE))
+
+/* Time Slave State Bits */
+#define SLAVE_TIME_REQUEST 0x0400
+#define SLAVE_TIME_POLL 0x0800
+#define SLAVE_TIME_INIT 0x1000
+#endif
+
+enum {
+ MSM_CLOCK_FLAGS_UNSTABLE_COUNT = 1U << 0,
+ MSM_CLOCK_FLAGS_ODD_MATCH_WRITE = 1U << 1,
+ MSM_CLOCK_FLAGS_DELAYED_WRITE_POST = 1U << 2,
+};
struct msm_clock {
struct clock_event_device clockevent;
@@ -48,6 +90,30 @@ struct msm_clock {
void __iomem *regbase;
uint32_t freq;
uint32_t shift;
+ uint32_t flags;
+ uint32_t write_delay;
+ uint32_t last_set;
+ uint32_t sleep_offset;
+ uint32_t alarm_vtime;
+ uint32_t non_sleep_offset;
+ uint32_t in_sync;
+ cycle_t stopped_tick;
+ int stopped;
+ uint32_t rollover_offset;
+ uint32_t last_sync_gpt;
+ u64 last_sync_jiffies;
+};
+enum {
+ MSM_CLOCK_GPT,
+ MSM_CLOCK_DGT,
+};
+static struct msm_clock msm_clocks[];
+static struct msm_clock *msm_active_clock;
+
+struct msm_timer_sync_data_t {
+ struct msm_clock *clock;
+ uint32_t timeout;
+ int exit_sleep;
};
static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
@@ -57,31 +123,82 @@ static irqreturn_t msm_timer_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static uint32_t msm_read_timer_count(struct msm_clock *clock)
+{
+ uint32_t t1, t2;
+ int loop_count = 0;
+
+ t1 = readl(clock->regbase + TIMER_COUNT_VAL);
+ if (!(clock->flags & MSM_CLOCK_FLAGS_UNSTABLE_COUNT))
+ return t1;
+ while (1) {
+ t2 = readl(clock->regbase + TIMER_COUNT_VAL);
+ if (t1 == t2)
+ return t1;
+ if (loop_count++ > 10) {
+ printk(KERN_ERR "msm_read_timer_count timer %s did not"
+ "stabilize %u != %u\n", clock->clockevent.name,
+ t2, t1);
+ return t2;
+ }
+ t1 = t2;
+ }
+}
+
static cycle_t msm_gpt_read(struct clocksource *cs)
{
- return readl(MSM_GPT_BASE + TIMER_COUNT_VAL);
+ struct msm_clock *clock = &msm_clocks[MSM_CLOCK_GPT];
+ if (clock->stopped)
+ return clock->stopped_tick;
+ else
+ return msm_read_timer_count(clock) + clock->sleep_offset;
}
static cycle_t msm_dgt_read(struct clocksource *cs)
{
- return readl(MSM_DGT_BASE + TIMER_COUNT_VAL) >> MSM_DGT_SHIFT;
+ struct msm_clock *clock = &msm_clocks[MSM_CLOCK_DGT];
+ if (clock->stopped)
+ return clock->stopped_tick >> MSM_DGT_SHIFT;
+ return (msm_read_timer_count(clock) + clock->sleep_offset)
+ >> MSM_DGT_SHIFT;
}
static int msm_timer_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
- struct msm_clock *clock = container_of(evt, struct msm_clock, clockevent);
- uint32_t now = readl(clock->regbase + TIMER_COUNT_VAL);
- uint32_t alarm = now + (cycles << clock->shift);
+ int i;
+ struct msm_clock *clock;
+ uint32_t now;
+ uint32_t alarm;
int late;
+ clock = container_of(evt, struct msm_clock, clockevent);
+ now = msm_read_timer_count(clock);
+ alarm = now + (cycles << clock->shift);
+ if (clock->flags & MSM_CLOCK_FLAGS_ODD_MATCH_WRITE)
+ while (now == clock->last_set)
+ now = msm_read_timer_count(clock);
writel(alarm, clock->regbase + TIMER_MATCH_VAL);
- now = readl(clock->regbase + TIMER_COUNT_VAL);
+ if (clock->flags & MSM_CLOCK_FLAGS_DELAYED_WRITE_POST) {
+ /* read the counter four extra times to make sure write posts
+ before reading the time */
+ for (i = 0; i < 4; i++)
+ readl(clock->regbase + TIMER_COUNT_VAL);
+ }
+ now = msm_read_timer_count(clock);
+ clock->last_set = now;
+ clock->alarm_vtime = alarm + clock->sleep_offset;
late = now - alarm;
- if (late >= (-2 << clock->shift) && late < DGT_HZ*5) {
- printk(KERN_NOTICE "msm_timer_set_next_event(%lu) clock %s, "
- "alarm already expired, now %x, alarm %x, late %d\n",
- cycles, clock->clockevent.name, now, alarm, late);
+ if (late >= (int)(-clock->write_delay << clock->shift) && late < DGT_HZ*5) {
+ static int print_limit = 10;
+ if (print_limit > 0) {
+ print_limit--;
+ printk(KERN_NOTICE "msm_timer_set_next_event(%lu) "
+ "clock %s, alarm already expired, now %x, "
+ "alarm %x, late %d%s\n",
+ cycles, clock->clockevent.name, now, alarm, late,
+ print_limit ? "" : " stop printing");
+ }
return -ETIME;
}
return 0;
@@ -90,23 +207,538 @@ static int msm_timer_set_next_event(unsigned long cycles,
static void msm_timer_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
- struct msm_clock *clock = container_of(evt, struct msm_clock, clockevent);
+ struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
+ struct msm_clock *clock;
+ unsigned long irq_flags;
+
+ clock = container_of(evt, struct msm_clock, clockevent);
+ local_irq_save(irq_flags);
+
switch (mode) {
case CLOCK_EVT_MODE_RESUME:
case CLOCK_EVT_MODE_PERIODIC:
break;
case CLOCK_EVT_MODE_ONESHOT:
+ clock->stopped = 0;
+ clock->sleep_offset = -msm_read_timer_count(clock) +
+ clock->stopped_tick;
+ msm_active_clock = clock;
writel(TIMER_ENABLE_EN, clock->regbase + TIMER_ENABLE);
+ if (clock != gpt_clk)
+ writel(TIMER_ENABLE_EN,
+ gpt_clk->regbase + TIMER_ENABLE);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
+ msm_active_clock = NULL;
+ clock->in_sync = 0;
+ clock->stopped = 1;
+ clock->stopped_tick = msm_read_timer_count(clock) +
+ clock->sleep_offset;
writel(0, clock->regbase + TIMER_ENABLE);
+ if (clock != gpt_clk) {
+ gpt_clk->in_sync = 0;
+ writel(0, gpt_clk->regbase + TIMER_ENABLE);
+ }
break;
}
+ local_irq_restore(irq_flags);
}
+/*
+ * Retrieve the cycle count from sclk and optionally synchronize local clock
+ * with the sclk value.
+ *
+ * time_start and time_expired are callbacks that must be specified. The
+ * protocol uses them to detect timeout. The update callback is optional.
+ * If not NULL, update will be called so that it can update local clock.
+ *
+ * The function does not use the argument data directly; it passes data to
+ * the callbacks.
+ *
+ * Return value:
+ * 0: the operation failed
+ * >0: the slow clock value after time-sync
+ */
+#if defined(CONFIG_MSM_N_WAY_SMSM)
+static uint32_t msm_timer_do_sync_to_sclk(
+ void (*time_start)(struct msm_timer_sync_data_t *data),
+ bool (*time_expired)(struct msm_timer_sync_data_t *data),
+ void (*update)(struct msm_timer_sync_data_t *, uint32_t, uint32_t),
+ struct msm_timer_sync_data_t *data)
+{
+ uint32_t *smem_clock;
+ uint32_t smem_clock_val;
+ uint32_t state;
+
+ smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE, sizeof(uint32_t));
+ if (smem_clock == NULL) {
+ printk(KERN_ERR "no smem clock\n");
+ return 0;
+ }
+
+ state = smsm_get_state(SMSM_MODEM_STATE);
+ if ((state & SMSM_INIT) == 0) {
+ printk(KERN_ERR "smsm not initialized\n");
+ return 0;
+ }
+
+ time_start(data);
+ while ((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) &
+ MASTER_TIME_PENDING) {
+ if (time_expired(data)) {
+ printk(KERN_INFO "get_smem_clock: timeout 1 still "
+ "invalid state %x\n", state);
+ return 0;
+ }
+ }
+
+ smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_POLL | SLAVE_TIME_INIT,
+ SLAVE_TIME_REQUEST);
+
+ time_start(data);
+ while (!((state = smsm_get_state(SMSM_TIME_MASTER_DEM)) &
+ MASTER_TIME_PENDING)) {
+ if (time_expired(data)) {
+ printk(KERN_INFO "get_smem_clock: timeout 2 still "
+ "invalid state %x\n", state);
+ smem_clock_val = 0;
+ goto sync_sclk_exit;
+ }
+ }
+
+ smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST, SLAVE_TIME_POLL);
+
+ time_start(data);
+ do {
+ smem_clock_val = *smem_clock;
+ } while (smem_clock_val == 0 && !time_expired(data));
+
+ state = smsm_get_state(SMSM_TIME_MASTER_DEM);
+
+ if (smem_clock_val) {
+ if (update != NULL)
+ update(data, smem_clock_val, SCLK_HZ);
+
+ if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC)
+ printk(KERN_INFO
+ "get_smem_clock: state %x clock %u\n",
+ state, smem_clock_val);
+ } else {
+ printk(KERN_INFO "get_smem_clock: timeout state %x clock %u\n",
+ state, smem_clock_val);
+ }
+
+sync_sclk_exit:
+ smsm_change_state(SMSM_APPS_DEM, SLAVE_TIME_REQUEST | SLAVE_TIME_POLL,
+ SLAVE_TIME_INIT);
+ return smem_clock_val;
+}
+#else /* CONFIG_MSM_N_WAY_SMSM */
+static uint32_t msm_timer_do_sync_to_sclk(
+ void (*time_start)(struct msm_timer_sync_data_t *data),
+ bool (*time_expired)(struct msm_timer_sync_data_t *data),
+ void (*update)(struct msm_timer_sync_data_t *, uint32_t, uint32_t),
+ struct msm_timer_sync_data_t *data)
+{
+ uint32_t *smem_clock;
+ uint32_t smem_clock_val;
+ uint32_t last_state;
+ uint32_t state;
+
+ smem_clock = smem_alloc(SMEM_SMEM_SLOW_CLOCK_VALUE,
+ sizeof(uint32_t));
+
+ if (smem_clock == NULL) {
+ printk(KERN_ERR "no smem clock\n");
+ return 0;
+ }
+
+ last_state = state = smsm_get_state(SMSM_MODEM_STATE);
+ smem_clock_val = *smem_clock;
+ if (smem_clock_val) {
+ printk(KERN_INFO "get_smem_clock: invalid start state %x "
+ "clock %u\n", state, smem_clock_val);
+ smsm_change_state(SMSM_APPS_STATE,
+ SMSM_TIMEWAIT, SMSM_TIMEINIT);
+
+ time_start(data);
+ while (*smem_clock != 0 && !time_expired(data))
+ ;
+
+ smem_clock_val = *smem_clock;
+ if (smem_clock_val) {
+ printk(KERN_INFO "get_smem_clock: timeout still "
+ "invalid state %x clock %u\n",
+ state, smem_clock_val);
+ return 0;
+ }
+ }
+
+ time_start(data);
+ smsm_change_state(SMSM_APPS_STATE, SMSM_TIMEINIT, SMSM_TIMEWAIT);
+ do {
+ smem_clock_val = *smem_clock;
+ state = smsm_get_state(SMSM_MODEM_STATE);
+ if (state != last_state) {
+ last_state = state;
+ if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC)
+ printk(KERN_INFO
+ "get_smem_clock: state %x clock %u\n",
+ state, smem_clock_val);
+ }
+ } while (smem_clock_val == 0 && !time_expired(data));
+
+ if (smem_clock_val) {
+ if (update != NULL)
+ update(data, smem_clock_val, SCLK_HZ);
+ } else {
+ printk(KERN_INFO "get_smem_clock: timeout state %x clock %u\n",
+ state, smem_clock_val);
+ }
+
+ smsm_change_state(SMSM_APPS_STATE, SMSM_TIMEWAIT, SMSM_TIMEINIT);
+ time_start(data);
+ while (*smem_clock != 0 && !time_expired(data))
+ ;
+
+ if (*smem_clock)
+ printk(KERN_INFO "get_smem_clock: exit timeout state %x "
+ "clock %u\n", state, *smem_clock);
+ return smem_clock_val;
+}
+#endif /* CONFIG_MSM_N_WAY_SMSM */
+
+/*
+ * Callback function that initializes the timeout value.
+ */
+static void msm_timer_sync_to_sclk_time_start(
+ struct msm_timer_sync_data_t *data)
+{
+ /* approx 1/128th of a second */
+ uint32_t delta = data->clock->freq >> 7 << data->clock->shift;
+ data->timeout = msm_read_timer_count(data->clock) + delta;
+}
+
+/*
+ * Callback function that checks the timeout.
+ */
+static bool msm_timer_sync_to_sclk_time_expired(
+ struct msm_timer_sync_data_t *data)
+{
+ uint32_t delta = msm_read_timer_count(data->clock) - data->timeout;
+ return ((int32_t) delta) > 0;
+}
+
+/*
+ * Callback function that updates local clock from the specified source clock
+ * value and frequency.
+ */
+static void msm_timer_sync_update(struct msm_timer_sync_data_t *data,
+ uint32_t src_clk_val, uint32_t src_clk_freq)
+{
+ struct msm_clock *dst_clk = data->clock;
+ uint32_t dst_clk_val = msm_read_timer_count(dst_clk);
+ uint32_t new_offset;
+
+ if ((dst_clk->freq << dst_clk->shift) == src_clk_freq) {
+ new_offset = src_clk_val - dst_clk_val;
+ } else {
+ uint64_t temp;
+
+ /* separate multiplication and division steps to reduce
+ rounding error */
+ temp = src_clk_val;
+ temp *= dst_clk->freq << dst_clk->shift;
+ do_div(temp, src_clk_freq);
+
+ new_offset = (uint32_t)(temp) - dst_clk_val;
+ }
+
+ if (dst_clk->sleep_offset + dst_clk->non_sleep_offset != new_offset) {
+ if (data->exit_sleep)
+ dst_clk->sleep_offset =
+ new_offset - dst_clk->non_sleep_offset;
+ else
+ dst_clk->non_sleep_offset =
+ new_offset - dst_clk->sleep_offset;
+
+ if (msm_timer_debug_mask & MSM_TIMER_DEBUG_SYNC)
+ printk(KERN_INFO "sync clock %s: "
+ "src %u, new offset %u + %u\n",
+ dst_clk->clocksource.name, src_clk_val,
+ dst_clk->sleep_offset,
+ dst_clk->non_sleep_offset);
+ }
+}
+
+/*
+ * Synchronize GPT clock with sclk.
+ */
+static void msm_timer_sync_gpt_to_sclk(int exit_sleep)
+{
+ struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
+ struct msm_timer_sync_data_t data;
+ uint32_t ret;
+
+ if (gpt_clk->in_sync)
+ return;
+
+ data.clock = gpt_clk;
+ data.timeout = 0;
+ data.exit_sleep = exit_sleep;
+
+ ret = msm_timer_do_sync_to_sclk(
+ msm_timer_sync_to_sclk_time_start,
+ msm_timer_sync_to_sclk_time_expired,
+ msm_timer_sync_update,
+ &data);
+
+ if (ret)
+ gpt_clk->in_sync = 1;
+}
+
+/*
+ * Synchronize clock with GPT clock.
+ */
+static void msm_timer_sync_to_gpt(struct msm_clock *clock, int exit_sleep)
+{
+ struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
+ struct msm_timer_sync_data_t data;
+ uint32_t gpt_clk_val;
+ u64 gpt_period = (1ULL << 32) * HZ / GPT_HZ;
+ u64 now = get_jiffies_64();
+
+ BUG_ON(clock == gpt_clk);
+
+ if (clock->in_sync &&
+ (now - clock->last_sync_jiffies < (gpt_period >> 1)))
+ return;
+
+ gpt_clk_val = msm_read_timer_count(gpt_clk)
+ + gpt_clk->sleep_offset + gpt_clk->non_sleep_offset;
+
+ if (exit_sleep && gpt_clk_val < clock->last_sync_gpt)
+ clock->non_sleep_offset -= clock->rollover_offset;
+
+ data.clock = clock;
+ data.timeout = 0;
+ data.exit_sleep = exit_sleep;
+
+ msm_timer_sync_update(&data, gpt_clk_val, GPT_HZ);
+
+ clock->in_sync = 1;
+ clock->last_sync_gpt = gpt_clk_val;
+ clock->last_sync_jiffies = now;
+}
+
+static void msm_timer_reactivate_alarm(struct msm_clock *clock)
+{
+ long alarm_delta = clock->alarm_vtime - clock->sleep_offset -
+ msm_read_timer_count(clock);
+ alarm_delta >>= clock->shift;
+ if (alarm_delta < (long)clock->write_delay + 4)
+ alarm_delta = clock->write_delay + 4;
+ while (msm_timer_set_next_event(alarm_delta, &clock->clockevent))
+ ;
+}
+
+int64_t msm_timer_enter_idle(void)
+{
+ struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
+ struct msm_clock *clock = msm_active_clock;
+ uint32_t alarm;
+ uint32_t count;
+ int32_t delta;
+
+ BUG_ON(clock != &msm_clocks[MSM_CLOCK_GPT] &&
+ clock != &msm_clocks[MSM_CLOCK_DGT]);
+
+ msm_timer_sync_gpt_to_sclk(0);
+ if (clock != gpt_clk)
+ msm_timer_sync_to_gpt(clock, 0);
+
+ count = msm_read_timer_count(clock);
+ if (clock->stopped++ == 0)
+ clock->stopped_tick = count + clock->sleep_offset;
+ alarm = readl(clock->regbase + TIMER_MATCH_VAL);
+ delta = alarm - count;
+ if (delta <= -(int32_t)((clock->freq << clock->shift) >> 10)) {
+ /* timer should have triggered 1ms ago */
+ printk(KERN_ERR "msm_timer_enter_idle: timer late %d, "
+ "reprogram it\n", delta);
+ msm_timer_reactivate_alarm(clock);
+ }
+ if (delta <= 0)
+ return 0;
+ return clocksource_cyc2ns((alarm - count) >> clock->shift , clock->clocksource.mult, clock->clocksource.shift);
+}
+
+void msm_timer_exit_idle(int low_power)
+{
+ struct msm_clock *gpt_clk = &msm_clocks[MSM_CLOCK_GPT];
+ struct msm_clock *clock = msm_active_clock;
+ uint32_t enabled;
+
+ BUG_ON(clock != &msm_clocks[MSM_CLOCK_GPT] &&
+ clock != &msm_clocks[MSM_CLOCK_DGT]);
+
+ if (!low_power)
+ goto exit_idle_exit;
+
+ enabled = readl(gpt_clk->regbase + TIMER_ENABLE) & TIMER_ENABLE_EN;
+ if (!enabled)
+ writel(TIMER_ENABLE_EN, gpt_clk->regbase + TIMER_ENABLE);
+
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+ gpt_clk->in_sync = 0;
+#else
+ gpt_clk->in_sync = gpt_clk->in_sync && enabled;
+#endif
+ msm_timer_sync_gpt_to_sclk(1);
+
+ if (clock == gpt_clk)
+ goto exit_idle_alarm;
+
+ enabled = readl(clock->regbase + TIMER_ENABLE) & TIMER_ENABLE_EN;
+ if (!enabled)
+ writel(TIMER_ENABLE_EN, clock->regbase + TIMER_ENABLE);
+
+#if defined(CONFIG_ARCH_MSM_SCORPION)
+ clock->in_sync = 0;
+#else
+ clock->in_sync = clock->in_sync && enabled;
+#endif
+ msm_timer_sync_to_gpt(clock, 1);
+
+exit_idle_alarm:
+ msm_timer_reactivate_alarm(clock);
+
+exit_idle_exit:
+ clock->stopped--;
+}
+
+/*
+ * Callback function that initializes the timeout value.
+ */
+static void msm_timer_get_sclk_time_start(
+ struct msm_timer_sync_data_t *data)
+{
+ data->timeout = 10000;
+}
+
+/*
+ * Callback function that checks the timeout.
+ */
+static bool msm_timer_get_sclk_time_expired(
+ struct msm_timer_sync_data_t *data)
+{
+ return --data->timeout <= 0;
+}
+
+/*
+ * Retrieve the cycle count from the sclk and convert it into
+ * nanoseconds.
+ *
+ * On exit, if period is not NULL, it contains the period of the
+ * sclk in nanoseconds, i.e. how long the cycle count wraps around.
+ *
+ * Return value:
+ * 0: the operation failed; period is not set either
+ * >0: time in nanoseconds
+ */
+int64_t msm_timer_get_sclk_time(int64_t *period)
+{
+ struct msm_timer_sync_data_t data;
+ uint32_t clock_value;
+ int64_t tmp;
+
+ memset(&data, 0, sizeof(data));
+
+ clock_value = msm_timer_do_sync_to_sclk(
+ msm_timer_get_sclk_time_start,
+ msm_timer_get_sclk_time_expired,
+ NULL,
+ &data);
+
+ if (!clock_value)
+ return 0;
+
+ if (period) {
+ tmp = 1LL << 32;
+ tmp = tmp * NSEC_PER_SEC / SCLK_HZ;
+ *period = tmp;
+ }
+
+ tmp = (int64_t)clock_value;
+ tmp = tmp * NSEC_PER_SEC / SCLK_HZ;
+ return tmp;
+}
+
+int __init msm_timer_init_time_sync(void)
+{
+#if defined(CONFIG_MSM_N_WAY_SMSM)
+ int ret = smsm_change_intr_mask(SMSM_TIME_MASTER_DEM, 0xFFFFFFFF, 0);
+
+ if (ret) {
+ printk(KERN_ERR "%s: failed to clear interrupt mask, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ smsm_change_state(SMSM_APPS_DEM,
+ SLAVE_TIME_REQUEST | SLAVE_TIME_POLL, SLAVE_TIME_INIT);
+#endif
+
+ return 0;
+}
+
+unsigned long long sched_clock(void)
+{
+ static cycle_t saved_ticks;
+ static int saved_ticks_valid;
+ static unsigned long long base;
+ static unsigned long long last_result;
+
+ unsigned long irq_flags;
+ static cycle_t last_ticks;
+ cycle_t ticks;
+ static unsigned long long result;
+ struct clocksource *cs;
+ struct msm_clock *clock = msm_active_clock;
+
+ local_irq_save(irq_flags);
+ if (clock) {
+ cs = &clock->clocksource;
+
+ last_ticks = saved_ticks;
+ saved_ticks = ticks = cs->read(cs);
+ if (!saved_ticks_valid) {
+ saved_ticks_valid = 1;
+ last_ticks = ticks;
+ base -= clocksource_cyc2ns(ticks, cs->mult, cs->shift);
+ }
+ if (ticks < last_ticks) {
+ base += clocksource_cyc2ns(cs->mask, cs->mult, cs->shift);
+ base += clocksource_cyc2ns(1, cs->mult, cs->shift);
+ }
+ last_result = result = clocksource_cyc2ns(ticks, cs->mult, cs->shift) + base;
+ } else {
+ base = result = last_result;
+ saved_ticks_valid = 0;
+ }
+ local_irq_restore(irq_flags);
+ return result;
+}
+
+#ifdef CONFIG_MSM7X00A_USE_GP_TIMER
+ #define DG_TIMER_RATING 100
+#else
+ #define DG_TIMER_RATING 300
+#endif
+
static struct msm_clock msm_clocks[] = {
- {
+ [MSM_CLOCK_GPT] = {
.clockevent = {
.name = "gp_timer",
.features = CLOCK_EVT_FEAT_ONESHOT,
@@ -120,46 +752,54 @@ static struct msm_clock msm_clocks[] = {
.rating = 200,
.read = msm_gpt_read,
.mask = CLOCKSOURCE_MASK(32),
- .shift = 24,
+ .shift = 17,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
.irq = {
.name = "gp_timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_RISING,
+ .flags = IRQF_DISABLED | IRQF_TIMER |
+ IRQF_TRIGGER_RISING,
.handler = msm_timer_interrupt,
.dev_id = &msm_clocks[0].clockevent,
.irq = INT_GP_TIMER_EXP
},
.regbase = MSM_GPT_BASE,
- .freq = GPT_HZ
+ .freq = GPT_HZ,
+ .flags =
+ MSM_CLOCK_FLAGS_UNSTABLE_COUNT |
+ MSM_CLOCK_FLAGS_ODD_MATCH_WRITE |
+ MSM_CLOCK_FLAGS_DELAYED_WRITE_POST,
+ .write_delay = 9,
},
- {
+ [MSM_CLOCK_DGT] = {
.clockevent = {
.name = "dg_timer",
.features = CLOCK_EVT_FEAT_ONESHOT,
.shift = 32 + MSM_DGT_SHIFT,
- .rating = 300,
+ .rating = DG_TIMER_RATING,
.set_next_event = msm_timer_set_next_event,
.set_mode = msm_timer_set_mode,
},
.clocksource = {
.name = "dg_timer",
- .rating = 300,
+ .rating = DG_TIMER_RATING,
.read = msm_dgt_read,
- .mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT)),
+ .mask = CLOCKSOURCE_MASK((32-MSM_DGT_SHIFT)),
.shift = 24 - MSM_DGT_SHIFT,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
},
.irq = {
.name = "dg_timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_TRIGGER_RISING,
+ .flags = IRQF_DISABLED | IRQF_TIMER |
+ IRQF_TRIGGER_RISING,
.handler = msm_timer_interrupt,
.dev_id = &msm_clocks[1].clockevent,
.irq = INT_DEBUG_TIMER_EXP
},
.regbase = MSM_DGT_BASE,
.freq = DGT_HZ >> MSM_DGT_SHIFT,
- .shift = MSM_DGT_SHIFT
+ .shift = MSM_DGT_SHIFT,
+ .write_delay = 2,
}
};
@@ -173,15 +813,27 @@ static void __init msm_timer_init(void)
struct clock_event_device *ce = &clock->clockevent;
struct clocksource *cs = &clock->clocksource;
writel(0, clock->regbase + TIMER_ENABLE);
- writel(0, clock->regbase + TIMER_CLEAR);
writel(~0, clock->regbase + TIMER_MATCH_VAL);
+ if ((clock->freq << clock->shift) == GPT_HZ) {
+ clock->rollover_offset = 0;
+ } else {
+ uint64_t temp;
+
+ temp = clock->freq << clock->shift;
+ temp <<= 32;
+ temp /= GPT_HZ;
+
+ clock->rollover_offset = (uint32_t) temp;
+ }
+
ce->mult = div_sc(clock->freq, NSEC_PER_SEC, ce->shift);
/* allow at least 10 seconds to notice that the timer wrapped */
ce->max_delta_ns =
clockevent_delta2ns(0xf0000000 >> clock->shift, ce);
- /* 4 gets rounded down to 3 */
- ce->min_delta_ns = clockevent_delta2ns(4, ce);
+ /* ticks gets rounded down by one */
+ ce->min_delta_ns =
+ clockevent_delta2ns(clock->write_delay + 4, ce);
ce->cpumask = cpumask_of(0);
cs->mult = clocksource_hz2mult(clock->freq, cs->shift);
diff --git a/arch/arm/mach-msm/timer.h b/arch/arm/mach-msm/timer.h
new file mode 100644
index 000000000000..161751e99c36
--- /dev/null
+++ b/arch/arm/mach-msm/timer.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Code Aurora nor
+ * the names of its contributors may be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_TIMER_H_
+#define _ARCH_ARM_MACH_MSM_TIMER_H_
+
+extern struct sys_timer msm_timer;
+
+int64_t msm_timer_enter_idle(void);
+void msm_timer_exit_idle(int low_power);
+int64_t msm_timer_get_sclk_time(int64_t *period);
+int msm_timer_init_time_sync(void);
+#endif
diff --git a/arch/arm/mach-msm/vreg.c b/arch/arm/mach-msm/vreg.c
index fcb0b9f25684..cb7a71b55f43 100644
--- a/arch/arm/mach-msm/vreg.c
+++ b/arch/arm/mach-msm/vreg.c
@@ -1,6 +1,7 @@
/* arch/arm/mach-msm/vreg.c
*
* Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2009, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@@ -18,49 +19,78 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/debugfs.h>
+#include <linux/string.h>
#include <mach/vreg.h>
#include "proc_comm.h"
+#if defined(CONFIG_MSM_VREG_SWITCH_INVERTED)
+#define VREG_SWITCH_ENABLE 0
+#define VREG_SWITCH_DISABLE 1
+#else
+#define VREG_SWITCH_ENABLE 1
+#define VREG_SWITCH_DISABLE 0
+#endif
+
struct vreg {
const char *name;
unsigned id;
+ int status;
+ unsigned refcnt;
};
-#define VREG(_name, _id) { .name = _name, .id = _id, }
+#define VREG(_name, _id, _status, _refcnt) \
+ { .name = _name, .id = _id, .status = _status, .refcnt = _refcnt }
static struct vreg vregs[] = {
- VREG("msma", 0),
- VREG("msmp", 1),
- VREG("msme1", 2),
- VREG("msmc1", 3),
- VREG("msmc2", 4),
- VREG("gp3", 5),
- VREG("msme2", 6),
- VREG("gp4", 7),
- VREG("gp1", 8),
- VREG("tcxo", 9),
- VREG("pa", 10),
- VREG("rftx", 11),
- VREG("rfrx1", 12),
- VREG("rfrx2", 13),
- VREG("synt", 14),
- VREG("wlan", 15),
- VREG("usb", 16),
- VREG("boost", 17),
- VREG("mmc", 18),
- VREG("ruim", 19),
- VREG("msmc0", 20),
- VREG("gp2", 21),
- VREG("gp5", 22),
- VREG("gp6", 23),
- VREG("rf", 24),
- VREG("rf_vco", 26),
- VREG("mpll", 27),
- VREG("s2", 28),
- VREG("s3", 29),
- VREG("rfubm", 30),
- VREG("ncp", 31),
+ VREG("msma", 0, 0, 0),
+ VREG("msmp", 1, 0, 0),
+ VREG("msme1", 2, 0, 0),
+ VREG("msmc1", 3, 0, 0),
+ VREG("msmc2", 4, 0, 0),
+ VREG("gp3", 5, 0, 0),
+ VREG("msme2", 6, 0, 0),
+ VREG("gp4", 7, 0, 0),
+ VREG("gp1", 8, 0, 0),
+ VREG("tcxo", 9, 0, 0),
+ VREG("pa", 10, 0, 0),
+ VREG("rftx", 11, 0, 0),
+ VREG("rfrx1", 12, 0, 0),
+ VREG("rfrx2", 13, 0, 0),
+ VREG("synt", 14, 0, 0),
+ VREG("wlan", 15, 0, 0),
+ VREG("usb", 16, 0, 0),
+ VREG("boost", 17, 0, 0),
+ VREG("mmc", 18, 0, 0),
+ VREG("ruim", 19, 0, 0),
+ VREG("msmc0", 20, 0, 0),
+ VREG("gp2", 21, 0, 0),
+ VREG("gp5", 22, 0, 0),
+ VREG("gp6", 23, 0, 0),
+ VREG("rf", 24, 0, 0),
+ VREG("rf_vco", 26, 0, 0),
+ VREG("mpll", 27, 0, 0),
+ VREG("s2", 28, 0, 0),
+ VREG("s3", 29, 0, 0),
+ VREG("rfubm", 30, 0, 0),
+ VREG("ncp", 31, 0, 0),
+ VREG("gp7", 32, 0, 0),
+ VREG("gp8", 33, 0, 0),
+ VREG("gp9", 34, 0, 0),
+ VREG("gp10", 35, 0, 0),
+ VREG("gp11", 36, 0, 0),
+ VREG("gp12", 37, 0, 0),
+ VREG("gp13", 38, 0, 0),
+ VREG("gp14", 39, 0, 0),
+ VREG("gp15", 40, 0, 0),
+ VREG("gp16", 41, 0, 0),
+ VREG("gp17", 42, 0, 0),
+ VREG("s4", 43, 0, 0),
+ VREG("usb2", 44, 0, 0),
+ VREG("wlan2", 45, 0, 0),
+ VREG("xo_out", 46, 0, 0),
+ VREG("lvsw0", 47, 0, 0),
+ VREG("lvsw1", 48, 0, 0),
};
struct vreg *vreg_get(struct device *dev, const char *id)
@@ -70,8 +100,9 @@ struct vreg *vreg_get(struct device *dev, const char *id)
if (!strcmp(vregs[n].name, id))
return vregs + n;
}
- return 0;
+ return ERR_PTR(-ENOENT);
}
+EXPORT_SYMBOL(vreg_get);
void vreg_put(struct vreg *vreg)
{
@@ -80,22 +111,44 @@ void vreg_put(struct vreg *vreg)
int vreg_enable(struct vreg *vreg)
{
unsigned id = vreg->id;
- unsigned enable = 1;
- return msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+ int enable = VREG_SWITCH_ENABLE;
+
+ if (vreg->refcnt == 0)
+ vreg->status = msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+
+ if ((vreg->refcnt < UINT_MAX) && (!vreg->status))
+ vreg->refcnt++;
+
+ return vreg->status;
}
+EXPORT_SYMBOL(vreg_enable);
-void vreg_disable(struct vreg *vreg)
+int vreg_disable(struct vreg *vreg)
{
unsigned id = vreg->id;
- unsigned enable = 0;
- msm_proc_comm(PCOM_VREG_SWITCH, &id, &enable);
+ int disable = VREG_SWITCH_DISABLE;
+
+ if (!vreg->refcnt)
+ return 0;
+
+ if (vreg->refcnt == 1)
+ vreg->status = msm_proc_comm(PCOM_VREG_SWITCH, &id, &disable);
+
+ if (!vreg->status)
+ vreg->refcnt--;
+
+ return vreg->status;
}
+EXPORT_SYMBOL(vreg_disable);
int vreg_set_level(struct vreg *vreg, unsigned mv)
{
unsigned id = vreg->id;
- return msm_proc_comm(PCOM_VREG_SET_LEVEL, &id, &mv);
+
+ vreg->status = msm_proc_comm(PCOM_VREG_SET_LEVEL, &id, &mv);
+ return vreg->status;
}
+EXPORT_SYMBOL(vreg_set_level);
#if defined(CONFIG_DEBUG_FS)
@@ -118,24 +171,59 @@ static int vreg_debug_set(void *data, u64 val)
static int vreg_debug_get(void *data, u64 *val)
{
- return -ENOSYS;
+ struct vreg *vreg = data;
+
+ if (!vreg->status)
+ *val = 0;
+ else
+ *val = 1;
+
+ return 0;
+}
+
+static int vreg_debug_count_set(void *data, u64 val)
+{
+ struct vreg *vreg = data;
+ if (val > UINT_MAX)
+ val = UINT_MAX;
+ vreg->refcnt = val;
+ return 0;
+}
+
+static int vreg_debug_count_get(void *data, u64 *val)
+{
+ struct vreg *vreg = data;
+
+ *val = vreg->refcnt;
+
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(vreg_fops, vreg_debug_get, vreg_debug_set, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(vreg_count_fops, vreg_debug_count_get,
+ vreg_debug_count_set, "%llu\n");
static int __init vreg_debug_init(void)
{
struct dentry *dent;
int n;
+ char name[32];
+ const char *refcnt_name = "_refcnt";
dent = debugfs_create_dir("vreg", 0);
if (IS_ERR(dent))
return 0;
- for (n = 0; n < ARRAY_SIZE(vregs); n++)
+ for (n = 0; n < ARRAY_SIZE(vregs); n++) {
(void) debugfs_create_file(vregs[n].name, 0644,
dent, vregs + n, &vreg_fops);
+ strlcpy(name, vregs[n].name, sizeof(name));
+ strlcat(name, refcnt_name, sizeof(name));
+ (void) debugfs_create_file(name, 0644,
+ dent, vregs + n, &vreg_count_fops);
+ }
+
return 0;
}
diff --git a/arch/arm/mach-mx1/clock.c b/arch/arm/mach-mx1/clock.c
index d1b588519ad2..6cf2d4a7511d 100644
--- a/arch/arm/mach-mx1/clock.c
+++ b/arch/arm/mach-mx1/clock.c
@@ -570,7 +570,6 @@ static struct clk_lookup lookups[] __initdata = {
int __init mx1_clocks_init(unsigned long fref)
{
unsigned int reg;
- int i;
/* disable clocks we are able to */
__raw_writel(0, SCM_GCCR);
@@ -592,8 +591,7 @@ int __init mx1_clocks_init(unsigned long fref)
reg = (reg & CCM_CSCR_CLKO_MASK) >> CCM_CSCR_CLKO_OFFSET;
clko_clk.parent = (struct clk *)clko_clocks[reg];
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
clk_enable(&hclk);
clk_enable(&fclk);
diff --git a/arch/arm/mach-mx2/clock_imx21.c b/arch/arm/mach-mx2/clock_imx21.c
index 91901b5d56c2..e82b489d1215 100644
--- a/arch/arm/mach-mx2/clock_imx21.c
+++ b/arch/arm/mach-mx2/clock_imx21.c
@@ -968,7 +968,6 @@ static struct clk_lookup lookups[] = {
*/
int __init mx21_clocks_init(unsigned long lref, unsigned long href)
{
- int i;
u32 cscr;
external_low_reference = lref;
@@ -986,8 +985,7 @@ int __init mx21_clocks_init(unsigned long lref, unsigned long href)
else
spll_clk.parent = &fpm_clk;
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
/* Turn off all clock gates */
__raw_writel(0, CCM_PCCR0);
diff --git a/arch/arm/mach-mx2/clock_imx27.c b/arch/arm/mach-mx2/clock_imx27.c
index b010bf9ceaab..18c53a6487fa 100644
--- a/arch/arm/mach-mx2/clock_imx27.c
+++ b/arch/arm/mach-mx2/clock_imx27.c
@@ -719,7 +719,6 @@ static void __init to2_adjust_clocks(void)
int __init mx27_clocks_init(unsigned long fref)
{
u32 cscr = __raw_readl(CCM_CSCR);
- int i;
external_high_reference = fref;
@@ -736,8 +735,7 @@ int __init mx27_clocks_init(unsigned long fref)
to2_adjust_clocks();
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
/* Turn off all clocks we do not need */
__raw_writel(0, CCM_PCCR0);
diff --git a/arch/arm/mach-mx2/mxt_td60.c b/arch/arm/mach-mx2/mxt_td60.c
index 03dbbdc98955..8bcc1a5b8829 100644
--- a/arch/arm/mach-mx2/mxt_td60.c
+++ b/arch/arm/mach-mx2/mxt_td60.c
@@ -58,21 +58,6 @@ static unsigned int mxt_td60_pins[] __initdata = {
PE9_PF_UART3_RXD,
PE10_PF_UART3_CTS,
PE11_PF_UART3_RTS,
- /* UART3 */
- PB26_AF_UART4_RTS,
- PB28_AF_UART4_TXD,
- PB29_AF_UART4_CTS,
- PB31_AF_UART4_RXD,
- /* UART4 */
- PB18_AF_UART5_TXD,
- PB19_AF_UART5_RXD,
- PB20_AF_UART5_CTS,
- PB21_AF_UART5_RTS,
- /* UART5 */
- PB10_AF_UART6_TXD,
- PB12_AF_UART6_CTS,
- PB11_AF_UART6_RXD,
- PB13_AF_UART6_RTS,
/* FEC */
PD0_AIN_FEC_TXD0,
PD1_AIN_FEC_TXD1,
@@ -261,12 +246,6 @@ static struct imxuart_platform_data uart_pdata[] = {
.flags = IMXUART_HAVE_RTSCTS,
}, {
.flags = IMXUART_HAVE_RTSCTS,
- }, {
- .flags = IMXUART_HAVE_RTSCTS,
- }, {
- .flags = IMXUART_HAVE_RTSCTS,
- }, {
- .flags = IMXUART_HAVE_RTSCTS,
},
};
@@ -278,9 +257,6 @@ static void __init mxt_td60_board_init(void)
mxc_register_device(&mxc_uart_device0, &uart_pdata[0]);
mxc_register_device(&mxc_uart_device1, &uart_pdata[1]);
mxc_register_device(&mxc_uart_device2, &uart_pdata[2]);
- mxc_register_device(&mxc_uart_device3, &uart_pdata[3]);
- mxc_register_device(&mxc_uart_device4, &uart_pdata[4]);
- mxc_register_device(&mxc_uart_device5, &uart_pdata[5]);
mxc_register_device(&mxc_nand_device, &mxt_td60_nand_board_info);
i2c_register_board_info(0, mxt_td60_i2c_devices,
diff --git a/arch/arm/mach-mx25/clock.c b/arch/arm/mach-mx25/clock.c
index ef26951a5275..66916f104812 100644
--- a/arch/arm/mach-mx25/clock.c
+++ b/arch/arm/mach-mx25/clock.c
@@ -173,6 +173,7 @@ DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL);
DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL);
DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL);
DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL);
+DEFINE_CLOCK(fec_clk, 0, CCM_CGCR0, 23, get_rate_ipg, NULL);
#define _REGISTER_CLOCK(d, n, c) \
{ \
@@ -204,15 +205,12 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
_REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk)
_REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk)
+ _REGISTER_CLOCK("fec.0", NULL, fec_clk)
};
int __init mx25_clocks_init(unsigned long fref)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
-
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
return 0;
diff --git a/arch/arm/mach-mx25/devices.c b/arch/arm/mach-mx25/devices.c
index 63511de3a559..9fdeea1c083b 100644
--- a/arch/arm/mach-mx25/devices.c
+++ b/arch/arm/mach-mx25/devices.c
@@ -419,3 +419,22 @@ int __init mxc_register_gpios(void)
return mxc_gpio_init(imx_gpio_ports, ARRAY_SIZE(imx_gpio_ports));
}
+static struct resource mx25_fec_resources[] = {
+ {
+ .start = MX25_FEC_BASE_ADDR,
+ .end = MX25_FEC_BASE_ADDR + 0xfff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MX25_INT_FEC,
+ .end = MX25_INT_FEC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device mx25_fec_device = {
+ .name = "fec",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(mx25_fec_resources),
+ .resource = mx25_fec_resources,
+};
diff --git a/arch/arm/mach-mx25/devices.h b/arch/arm/mach-mx25/devices.h
index fe6bf88ad1dd..fe5420fcd11f 100644
--- a/arch/arm/mach-mx25/devices.h
+++ b/arch/arm/mach-mx25/devices.h
@@ -17,3 +17,4 @@ extern struct platform_device mxc_keypad_device;
extern struct platform_device mxc_i2c_device0;
extern struct platform_device mxc_i2c_device1;
extern struct platform_device mxc_i2c_device2;
+extern struct platform_device mx25_fec_device;
diff --git a/arch/arm/mach-mx25/mx25pdk.c b/arch/arm/mach-mx25/mx25pdk.c
index d23ae571c03f..921bc99ea231 100644
--- a/arch/arm/mach-mx25/mx25pdk.c
+++ b/arch/arm/mach-mx25/mx25pdk.c
@@ -18,10 +18,11 @@
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/gpio.h>
-#include <linux/smsc911x.h>
+#include <linux/fec.h>
#include <linux/platform_device.h>
#include <mach/hardware.h>
@@ -35,16 +36,57 @@
#include <mach/mx25.h>
#include <mach/mxc_nand.h>
#include "devices.h"
-#include <mach/iomux-v3.h>
+#include <mach/iomux.h>
static struct imxuart_platform_data uart_pdata = {
.flags = IMXUART_HAVE_RTSCTS,
};
+static struct pad_desc mx25pdk_pads[] = {
+ MX25_PAD_FEC_MDC__FEC_MDC,
+ MX25_PAD_FEC_MDIO__FEC_MDIO,
+ MX25_PAD_FEC_TDATA0__FEC_TDATA0,
+ MX25_PAD_FEC_TDATA1__FEC_TDATA1,
+ MX25_PAD_FEC_TX_EN__FEC_TX_EN,
+ MX25_PAD_FEC_RDATA0__FEC_RDATA0,
+ MX25_PAD_FEC_RDATA1__FEC_RDATA1,
+ MX25_PAD_FEC_RX_DV__FEC_RX_DV,
+ MX25_PAD_FEC_TX_CLK__FEC_TX_CLK,
+ MX25_PAD_A17__GPIO_2_3, /* FEC_EN, GPIO 35 */
+ MX25_PAD_D12__GPIO_4_8, /* FEC_RESET_B, GPIO 104 */
+};
+
+static struct fec_platform_data mx25_fec_pdata = {
+ .phy = PHY_INTERFACE_MODE_RMII,
+};
+
+#define FEC_ENABLE_GPIO 35
+#define FEC_RESET_B_GPIO 104
+
+static void __init mx25pdk_fec_reset(void)
+{
+ gpio_request(FEC_ENABLE_GPIO, "FEC PHY enable");
+ gpio_request(FEC_RESET_B_GPIO, "FEC PHY reset");
+
+ gpio_direction_output(FEC_ENABLE_GPIO, 0); /* drop PHY power */
+ gpio_direction_output(FEC_RESET_B_GPIO, 0); /* assert reset */
+ udelay(2);
+
+ /* turn on PHY power and lift reset */
+ gpio_set_value(FEC_ENABLE_GPIO, 1);
+ gpio_set_value(FEC_RESET_B_GPIO, 1);
+}
+
static void __init mx25pdk_init(void)
{
+ mxc_iomux_v3_setup_multiple_pads(mx25pdk_pads,
+ ARRAY_SIZE(mx25pdk_pads));
+
mxc_register_device(&mxc_uart_device0, &uart_pdata);
mxc_register_device(&mxc_usbh2, NULL);
+
+ mx25pdk_fec_reset();
+ mxc_register_device(&mx25_fec_device, &mx25_fec_pdata);
}
static void __init mx25pdk_timer_init(void)
diff --git a/arch/arm/mach-mx3/Kconfig b/arch/arm/mach-mx3/Kconfig
index ea8ed109a7c2..28294416b0af 100644
--- a/arch/arm/mach-mx3/Kconfig
+++ b/arch/arm/mach-mx3/Kconfig
@@ -49,6 +49,7 @@ config MACH_PCM037_EET
config MACH_MX31LITE
bool "Support MX31 LITEKIT (LogicPD)"
select ARCH_MX31
+ select MXC_ULPI if USB_ULPI
help
Include support for MX31 LITEKIT platform. This includes specific
configurations for the board and its peripherals.
@@ -63,7 +64,7 @@ config MACH_MX31_3DS
config MACH_MX31MOBOARD
bool "Support mx31moboard platforms (EPFL Mobots group)"
select ARCH_MX31
- select MXC_ULPI
+ select MXC_ULPI if USB_ULPI
help
Include support for mx31moboard platform. This includes specific
configurations for the board and its peripherals.
diff --git a/arch/arm/mach-mx3/clock-imx35.c b/arch/arm/mach-mx3/clock-imx35.c
index 7584b4c6c556..f3f41fa4f21b 100644
--- a/arch/arm/mach-mx3/clock-imx35.c
+++ b/arch/arm/mach-mx3/clock-imx35.c
@@ -485,15 +485,13 @@ static struct clk_lookup lookups[] = {
int __init mx35_clocks_init()
{
- int i;
unsigned int ll = 0;
#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
ll = (3 << 16);
#endif
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
/* Turn off all clocks except the ones we need to survive, namely:
* EMI, GPIO1/2/3, GPT, IOMUX, MAX and eventually uart
diff --git a/arch/arm/mach-mx3/clock.c b/arch/arm/mach-mx3/clock.c
index 27a318af0d20..b5c39a016db7 100644
--- a/arch/arm/mach-mx3/clock.c
+++ b/arch/arm/mach-mx3/clock.c
@@ -578,12 +578,10 @@ static struct clk_lookup lookups[] = {
int __init mx31_clocks_init(unsigned long fref)
{
u32 reg;
- int i;
ckih_rate = fref;
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
/* change the csi_clk parent if necessary */
reg = __raw_readl(MXC_CCM_CCMR);
diff --git a/arch/arm/mach-mx3/mm.c b/arch/arm/mach-mx3/mm.c
index bedf5b8d976a..6858a4f9806c 100644
--- a/arch/arm/mach-mx3/mm.c
+++ b/arch/arm/mach-mx3/mm.c
@@ -65,6 +65,11 @@ static struct map_desc mxc_io_desc[] __initdata = {
.pfn = __phys_to_pfn(AIPS2_BASE_ADDR),
.length = AIPS2_SIZE,
.type = MT_DEVICE_NONSHARED
+ }, {
+ .virtual = SPBA0_BASE_ADDR_VIRT,
+ .pfn = __phys_to_pfn(SPBA0_BASE_ADDR),
+ .length = SPBA0_SIZE,
+ .type = MT_DEVICE_NONSHARED
},
};
diff --git a/arch/arm/mach-mx3/mx31ads.c b/arch/arm/mach-mx3/mx31ads.c
index 0497c152be18..3e7bafa2ddbb 100644
--- a/arch/arm/mach-mx3/mx31ads.c
+++ b/arch/arm/mach-mx3/mx31ads.c
@@ -494,11 +494,6 @@ static void mxc_init_i2c(void)
*/
static struct map_desc mx31ads_io_desc[] __initdata = {
{
- .virtual = SPBA0_BASE_ADDR_VIRT,
- .pfn = __phys_to_pfn(SPBA0_BASE_ADDR),
- .length = SPBA0_SIZE,
- .type = MT_DEVICE_NONSHARED
- }, {
.virtual = CS4_BASE_ADDR_VIRT,
.pfn = __phys_to_pfn(CS4_BASE_ADDR),
.length = CS4_SIZE / 2,
diff --git a/arch/arm/mach-mx3/mx31lite.c b/arch/arm/mach-mx3/mx31lite.c
index def6b6736594..789b20d1730f 100644
--- a/arch/arm/mach-mx3/mx31lite.c
+++ b/arch/arm/mach-mx3/mx31lite.c
@@ -135,6 +135,7 @@ static struct spi_board_info mc13783_spi_dev __initdata = {
* USB
*/
+#if defined(CONFIG_USB_ULPI)
#define USB_PAD_CFG (PAD_CTL_DRV_MAX | PAD_CTL_SRE_FAST | PAD_CTL_HYS_CMOS | \
PAD_CTL_ODE_CMOS | PAD_CTL_100K_PU)
@@ -180,6 +181,7 @@ static struct mxc_usbh_platform_data usbh2_pdata = {
.portsc = MXC_EHCI_MODE_ULPI | MXC_EHCI_UTMI_8BIT,
.flags = MXC_EHCI_POWER_PINS_ENABLED,
};
+#endif
/*
* NOR flash
@@ -212,11 +214,6 @@ static struct platform_device physmap_flash_device = {
*/
static struct map_desc mx31lite_io_desc[] __initdata = {
{
- .virtual = SPBA0_BASE_ADDR_VIRT,
- .pfn = __phys_to_pfn(SPBA0_BASE_ADDR),
- .length = SPBA0_SIZE,
- .type = MT_DEVICE_NONSHARED
- }, {
.virtual = CS4_BASE_ADDR_VIRT,
.pfn = __phys_to_pfn(CS4_BASE_ADDR),
.length = CS4_SIZE,
@@ -261,11 +258,13 @@ static void __init mxc_board_init(void)
mxc_register_device(&mxc_spi_device1, &spi1_pdata);
spi_register_board_info(&mc13783_spi_dev, 1);
+#if defined(CONFIG_USB_ULPI)
/* USB */
usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
mxc_register_device(&mxc_usbh2, &usbh2_pdata);
+#endif
/* SMSC9117 IRQ pin */
ret = gpio_request(IOMUX_TO_GPIO(MX31_PIN_SFS6), "sms9117-irq");
diff --git a/arch/arm/mach-mx3/mx31moboard-devboard.c b/arch/arm/mach-mx3/mx31moboard-devboard.c
index 8fc624f141cb..438428eaf769 100644
--- a/arch/arm/mach-mx3/mx31moboard-devboard.c
+++ b/arch/arm/mach-mx3/mx31moboard-devboard.c
@@ -179,7 +179,7 @@ static int __init devboard_usbh1_init(void)
usbh1_pdata.otg = otg;
- return mxc_register_device(&mx31_usbh1, &usbh1_pdata);
+ return mxc_register_device(&mxc_usbh1, &usbh1_pdata);
}
/*
diff --git a/arch/arm/mach-mx3/mx31moboard-marxbot.c b/arch/arm/mach-mx3/mx31moboard-marxbot.c
index 85184a35e674..1f44b9ccbb0f 100644
--- a/arch/arm/mach-mx3/mx31moboard-marxbot.c
+++ b/arch/arm/mach-mx3/mx31moboard-marxbot.c
@@ -294,7 +294,7 @@ static int __init marxbot_usbh1_init(void)
usbh1_pdata.otg = otg;
- return mxc_register_device(&mx31_usbh1, &usbh1_pdata);
+ return mxc_register_device(&mxc_usbh1, &usbh1_pdata);
}
/*
diff --git a/arch/arm/mach-mx3/mx31moboard.c b/arch/arm/mach-mx3/mx31moboard.c
index b70529145936..cfd605d078ec 100644
--- a/arch/arm/mach-mx3/mx31moboard.c
+++ b/arch/arm/mach-mx3/mx31moboard.c
@@ -346,6 +346,8 @@ static struct fsl_usb2_platform_data usb_pdata = {
.phy_mode = FSL_USB2_PHY_ULPI,
};
+#if defined(CONFIG_USB_ULPI)
+
#define USBH2_EN_B IOMUX_TO_GPIO(MX31_PIN_SCK6)
static int moboard_usbh2_hw_init(struct platform_device *pdev)
@@ -392,8 +394,11 @@ static int __init moboard_usbh2_init(void)
usbh2_pdata.otg = otg_ulpi_create(&mxc_ulpi_access_ops,
USB_OTG_DRV_VBUS | USB_OTG_DRV_VBUS_EXT);
- return mxc_register_device(&mx31_usbh2, &usbh2_pdata);
+ return mxc_register_device(&mxc_usbh2, &usbh2_pdata);
}
+#else
+static inline int moboard_usbh2_init(void) { return 0; }
+#endif
static struct gpio_led mx31moboard_leds[] = {
diff --git a/arch/arm/mach-mx3/mx31pdk.c b/arch/arm/mach-mx3/mx31pdk.c
index 0f7a2f06bc2d..18715f1aa7eb 100644
--- a/arch/arm/mach-mx3/mx31pdk.c
+++ b/arch/arm/mach-mx3/mx31pdk.c
@@ -211,11 +211,6 @@ static int __init mx31pdk_init_expio(void)
*/
static struct map_desc mx31pdk_io_desc[] __initdata = {
{
- .virtual = SPBA0_BASE_ADDR_VIRT,
- .pfn = __phys_to_pfn(SPBA0_BASE_ADDR),
- .length = SPBA0_SIZE,
- .type = MT_DEVICE_NONSHARED,
- }, {
.virtual = CS5_BASE_ADDR_VIRT,
.pfn = __phys_to_pfn(CS5_BASE_ADDR),
.length = CS5_SIZE,
diff --git a/arch/arm/mach-mx3/pcm037.c b/arch/arm/mach-mx3/pcm037.c
index 6cbaabedf386..5be396917c99 100644
--- a/arch/arm/mach-mx3/pcm037.c
+++ b/arch/arm/mach-mx3/pcm037.c
@@ -322,16 +322,25 @@ static int pcm037_camera_power(struct device *dev, int on)
return 0;
}
-static struct i2c_board_info pcm037_i2c_2_devices[] = {
+static struct i2c_board_info pcm037_i2c_camera[] = {
{
I2C_BOARD_INFO("mt9t031", 0x5d),
+ }, {
+ I2C_BOARD_INFO("mt9v022", 0x48),
},
};
-static struct soc_camera_link iclink = {
+static struct soc_camera_link iclink_mt9v022 = {
+ .bus_id = 0, /* Must match with the camera ID */
+ .board_info = &pcm037_i2c_camera[1],
+ .i2c_adapter_id = 2,
+ .module_name = "mt9v022",
+};
+
+static struct soc_camera_link iclink_mt9t031 = {
.bus_id = 0, /* Must match with the camera ID */
.power = pcm037_camera_power,
- .board_info = &pcm037_i2c_2_devices[0],
+ .board_info = &pcm037_i2c_camera[0],
.i2c_adapter_id = 2,
.module_name = "mt9t031",
};
@@ -345,11 +354,19 @@ static struct i2c_board_info pcm037_i2c_devices[] = {
}
};
-static struct platform_device pcm037_camera = {
+static struct platform_device pcm037_mt9t031 = {
.name = "soc-camera-pdrv",
.id = 0,
.dev = {
- .platform_data = &iclink,
+ .platform_data = &iclink_mt9t031,
+ },
+};
+
+static struct platform_device pcm037_mt9v022 = {
+ .name = "soc-camera-pdrv",
+ .id = 1,
+ .dev = {
+ .platform_data = &iclink_mt9v022,
},
};
@@ -449,7 +466,8 @@ static int __init pcm037_camera_alloc_dma(const size_t buf_size)
static struct platform_device *devices[] __initdata = {
&pcm037_flash,
&pcm037_sram_device,
- &pcm037_camera,
+ &pcm037_mt9t031,
+ &pcm037_mt9v022,
};
static struct ipu_platform_data mx3_ipu_data = {
@@ -599,7 +617,7 @@ static void __init mxc_board_init(void)
if (!ret)
gpio_direction_output(IOMUX_TO_GPIO(MX31_PIN_CSI_D5), 1);
else
- iclink.power = NULL;
+ iclink_mt9t031.power = NULL;
if (!pcm037_camera_alloc_dma(4 * 1024 * 1024))
mxc_register_device(&mx3_camera, &camera_pdata);
diff --git a/arch/arm/mach-mxc91231/clock.c b/arch/arm/mach-mxc91231/clock.c
index ecfa37fef8ad..5c85075d8a56 100644
--- a/arch/arm/mach-mxc91231/clock.c
+++ b/arch/arm/mach-mxc91231/clock.c
@@ -624,7 +624,6 @@ static struct clk_lookup lookups[] = {
int __init mxc91231_clocks_init(unsigned long fref)
{
void __iomem *gpt_base;
- int i;
ckih_rate = fref;
@@ -632,8 +631,7 @@ int __init mxc91231_clocks_init(unsigned long fref)
sdhc_clk[0].parent = clk_sdhc_parent(&sdhc_clk[0]);
sdhc_clk[1].parent = clk_sdhc_parent(&sdhc_clk[1]);
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
gpt_base = MXC91231_IO_ADDRESS(MXC91231_GPT1_BASE_ADDR);
mxc_timer_init(&gpt_clk, gpt_base, MXC91231_INT_GPT);
diff --git a/arch/arm/mach-nomadik/cpu-8815.c b/arch/arm/mach-nomadik/cpu-8815.c
index f93c59634191..9bf33b30a025 100644
--- a/arch/arm/mach-nomadik/cpu-8815.c
+++ b/arch/arm/mach-nomadik/cpu-8815.c
@@ -86,11 +86,19 @@ static struct amba_device cpu8815_amba_gpio[] = {
},
};
+static struct amba_device cpu8815_amba_rng = {
+ .dev = {
+ .init_name = "rng",
+ },
+ __MEM_4K_RESOURCE(NOMADIK_RNG_BASE),
+};
+
static struct amba_device *amba_devs[] __initdata = {
cpu8815_amba_gpio + 0,
cpu8815_amba_gpio + 1,
cpu8815_amba_gpio + 2,
cpu8815_amba_gpio + 3,
+ &cpu8815_amba_rng
};
static int __init cpu8815_init(void)
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index 9ce17f13d3f1..b6a537c875b8 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -3,7 +3,7 @@
#
# Common support
-obj-y := io.o id.o sram.o irq.o mux.o serial.o devices.o
+obj-y := io.o id.o sram.o irq.o mux.o flash.o serial.o devices.o
obj-y += clock.o clock_data.o opp_data.o
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 7e70c3c08da6..096f2ed102cb 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -18,18 +18,19 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/input.h>
#include <linux/smc91x.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <plat/tc.h>
#include <mach/gpio.h>
#include <plat/mux.h>
+#include <plat/flash.h>
#include <plat/fpga.h>
#include <plat/keypad.h>
#include <plat/common.h>
@@ -150,9 +151,9 @@ static struct mtd_partition nor_partitions[] = {
},
};
-static struct flash_platform_data nor_data = {
- .map_name = "cfi_probe",
+static struct physmap_flash_data nor_data = {
.width = 2,
+ .set_vpp = omap1_set_vpp,
.parts = nor_partitions,
.nr_parts = ARRAY_SIZE(nor_partitions),
};
@@ -164,7 +165,7 @@ static struct resource nor_resource = {
};
static struct platform_device nor_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &nor_data,
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index fa7cecea19f9..d1100e4f65ac 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -26,6 +26,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/input.h>
#include <linux/i2c/tps65010.h>
#include <linux/smc91x.h>
@@ -35,7 +36,6 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <plat/mux.h>
@@ -45,6 +45,7 @@
#include <plat/usb.h>
#include <plat/keypad.h>
#include <plat/common.h>
+#include <plat/flash.h>
#include "board-h2.h"
@@ -121,9 +122,9 @@ static struct mtd_partition h2_nor_partitions[] = {
}
};
-static struct flash_platform_data h2_nor_data = {
- .map_name = "cfi_probe",
+static struct physmap_flash_data h2_nor_data = {
.width = 2,
+ .set_vpp = omap1_set_vpp,
.parts = h2_nor_partitions,
.nr_parts = ARRAY_SIZE(h2_nor_partitions),
};
@@ -134,7 +135,7 @@ static struct resource h2_nor_resource = {
};
static struct platform_device h2_nor_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &h2_nor_data,
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 6a7f9c391cf1..a53ab8297d25 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -25,6 +25,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/input.h>
#include <linux/spi/spi.h>
#include <linux/i2c/tps65010.h>
@@ -37,7 +38,6 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <mach/irqs.h>
@@ -47,6 +47,7 @@
#include <plat/keypad.h>
#include <plat/dma.h>
#include <plat/common.h>
+#include <plat/flash.h>
#include "board-h3.h"
@@ -126,9 +127,9 @@ static struct mtd_partition nor_partitions[] = {
}
};
-static struct flash_platform_data nor_data = {
- .map_name = "cfi_probe",
+static struct physmap_flash_data nor_data = {
.width = 2,
+ .set_vpp = omap1_set_vpp,
.parts = nor_partitions,
.nr_parts = ARRAY_SIZE(nor_partitions),
};
@@ -139,7 +140,7 @@ static struct resource nor_resource = {
};
static struct platform_device nor_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &nor_data,
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index 2133b006f6a3..5d12fd35681b 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -22,16 +22,17 @@
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/input.h>
#include <linux/smc91x.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <plat/mux.h>
+#include <plat/flash.h>
#include <plat/fpga.h>
#include <mach/gpio.h>
#include <plat/tc.h>
@@ -94,9 +95,9 @@ static struct mtd_partition innovator_partitions[] = {
}
};
-static struct flash_platform_data innovator_flash_data = {
- .map_name = "cfi_probe",
+static struct physmap_flash_data innovator_flash_data = {
.width = 2,
+ .set_vpp = omap1_set_vpp,
.parts = innovator_partitions,
.nr_parts = ARRAY_SIZE(innovator_partitions),
};
@@ -108,7 +109,7 @@ static struct resource innovator_flash_resource = {
};
static struct platform_device innovator_flash_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &innovator_flash_data,
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index ccea4f448e9a..80d862001def 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -37,6 +37,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/i2c/tps65010.h>
@@ -46,8 +47,8 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
+#include <plat/flash.h>
#include <plat/usb.h>
#include <plat/mux.h>
#include <plat/tc.h>
@@ -94,9 +95,9 @@ static struct mtd_partition osk_partitions[] = {
}
};
-static struct flash_platform_data osk_flash_data = {
- .map_name = "cfi_probe",
+static struct physmap_flash_data osk_flash_data = {
.width = 2,
+ .set_vpp = omap1_set_vpp,
.parts = osk_partitions,
.nr_parts = ARRAY_SIZE(osk_partitions),
};
@@ -107,7 +108,7 @@ static struct resource osk_flash_resource = {
};
static struct platform_device osk5912_flash_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &osk_flash_data,
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index 9fe887262bdf..569b4c9085cd 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include <linux/apm-emulation.h>
@@ -31,9 +32,9 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
#include <mach/gpio.h>
+#include <plat/flash.h>
#include <plat/mux.h>
#include <plat/usb.h>
#include <plat/tc.h>
@@ -126,9 +127,9 @@ static struct mtd_partition palmte_rom_partitions[] = {
},
};
-static struct flash_platform_data palmte_rom_data = {
- .map_name = "map_rom",
+static struct physmap_flash_data palmte_rom_data = {
.width = 2,
+ .set_vpp = omap1_set_vpp,
.parts = palmte_rom_partitions,
.nr_parts = ARRAY_SIZE(palmte_rom_partitions),
};
@@ -140,7 +141,7 @@ static struct resource palmte_rom_resource = {
};
static struct platform_device palmte_rom_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = -1,
.dev = {
.platform_data = &palmte_rom_data,
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index af068e3e0fe7..6ad49a2cc1a0 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -21,16 +21,17 @@
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/leds.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
#include <plat/led.h>
#include <mach/gpio.h>
+#include <plat/flash.h>
#include <plat/mux.h>
#include <plat/usb.h>
#include <plat/dma.h>
@@ -104,9 +105,9 @@ static struct mtd_partition palmtt_partitions[] = {
}
};
-static struct flash_platform_data palmtt_flash_data = {
- .map_name = "cfi_probe",
+static struct physmap_flash_data palmtt_flash_data = {
.width = 2,
+ .set_vpp = omap1_set_vpp,
.parts = palmtt_partitions,
.nr_parts = ARRAY_SIZE(palmtt_partitions),
};
@@ -118,7 +119,7 @@ static struct resource palmtt_flash_resource = {
};
static struct platform_device palmtt_flash_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &palmtt_flash_data,
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index c7a3b6f36500..6641de9257ef 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -25,14 +25,15 @@
#include <linux/interrupt.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
#include <mach/gpio.h>
+#include <plat/flash.h>
#include <plat/mux.h>
#include <plat/usb.h>
#include <plat/dma.h>
@@ -126,10 +127,9 @@ static struct mtd_partition palmz71_rom_partitions[] = {
},
};
-static struct flash_platform_data palmz71_rom_data = {
- .map_name = "map_rom",
- .name = "onboardrom",
+static struct physmap_flash_data palmz71_rom_data = {
.width = 2,
+ .set_vpp = omap1_set_vpp,
.parts = palmz71_rom_partitions,
.nr_parts = ARRAY_SIZE(palmz71_rom_partitions),
};
@@ -141,7 +141,7 @@ static struct resource palmz71_rom_resource = {
};
static struct platform_device palmz71_rom_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = -1,
.dev = {
.platform_data = &palmz71_rom_data,
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index 1387a4f15da9..e854d5741c88 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -18,19 +18,20 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/input.h>
#include <linux/smc91x.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <plat/tc.h>
#include <mach/gpio.h>
#include <plat/mux.h>
#include <plat/fpga.h>
+#include <plat/flash.h>
#include <plat/keypad.h>
#include <plat/common.h>
#include <plat/board.h>
@@ -117,9 +118,9 @@ static struct mtd_partition nor_partitions[] = {
},
};
-static struct flash_platform_data nor_data = {
- .map_name = "cfi_probe",
+static struct physmap_flash_data nor_data = {
.width = 2,
+ .set_vpp = omap1_set_vpp,
.parts = nor_partitions,
.nr_parts = ARRAY_SIZE(nor_partitions),
};
@@ -131,7 +132,7 @@ static struct resource nor_resource = {
};
static struct platform_device nor_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &nor_data,
diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
index 7a97fac83d8d..2fb1e5f8e2ec 100644
--- a/arch/arm/mach-omap1/board-sx1.c
+++ b/arch/arm/mach-omap1/board-sx1.c
@@ -22,6 +22,7 @@
#include <linux/notifier.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/types.h>
#include <linux/i2c.h>
#include <linux/errno.h>
@@ -29,10 +30,10 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <mach/gpio.h>
+#include <plat/flash.h>
#include <plat/mux.h>
#include <plat/dma.h>
#include <plat/irda.h>
@@ -287,9 +288,9 @@ static struct mtd_partition sx1_partitions[] = {
}
};
-static struct flash_platform_data sx1_flash_data = {
- .map_name = "cfi_probe",
+static struct physmap_flash_data sx1_flash_data = {
.width = 2,
+ .set_vpp = omap1_set_vpp,
.parts = sx1_partitions,
.nr_parts = ARRAY_SIZE(sx1_partitions),
};
@@ -310,7 +311,7 @@ static struct resource sx1_old_flash_resource[] = {
};
static struct platform_device sx1_flash_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &sx1_flash_data,
@@ -327,7 +328,7 @@ static struct resource sx1_new_flash_resource = {
};
static struct platform_device sx1_flash_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &sx1_flash_data,
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 169183537997..87b9436fe7c0 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -18,6 +18,7 @@
#include <linux/irq.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/mtd/physmap.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/serial_8250.h>
@@ -27,11 +28,11 @@
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
-#include <asm/mach/flash.h>
#include <asm/mach/map.h>
#include <plat/common.h>
#include <mach/gpio.h>
+#include <plat/flash.h>
#include <plat/mux.h>
#include <plat/tc.h>
#include <plat/usb.h>
@@ -86,9 +87,9 @@ static int __init ext_uart_init(void)
}
arch_initcall(ext_uart_init);
-static struct flash_platform_data voiceblue_flash_data = {
- .map_name = "cfi_probe",
+static struct physmap_flash_data voiceblue_flash_data = {
.width = 2,
+ .set_vpp = omap1_set_vpp,
};
static struct resource voiceblue_flash_resource = {
@@ -98,7 +99,7 @@ static struct resource voiceblue_flash_resource = {
};
static struct platform_device voiceblue_flash_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &voiceblue_flash_data,
diff --git a/arch/arm/mach-omap1/clock_data.c b/arch/arm/mach-omap1/clock_data.c
index ab995a9c606c..65e7b5b85d83 100644
--- a/arch/arm/mach-omap1/clock_data.c
+++ b/arch/arm/mach-omap1/clock_data.c
@@ -599,7 +599,7 @@ static struct clk i2c_ick = {
static struct omap_clk omap_clks[] = {
/* non-ULPD clocks */
CLK(NULL, "ck_ref", &ck_ref, CK_16XX | CK_1510 | CK_310 | CK_7XX),
- CLK(NULL, "ck_dpll1", &ck_dpll1, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "ck_dpll1", &ck_dpll1, CK_16XX | CK_1510 | CK_310 | CK_7XX),
/* CK_GEN1 clocks */
CLK(NULL, "ck_dpll1out", &ck_dpll1out.clk, CK_16XX),
CLK(NULL, "ck_sossi", &sossi_ck, CK_16XX),
@@ -627,7 +627,7 @@ static struct omap_clk omap_clks[] = {
CLK(NULL, "tc2_ck", &tc2_ck, CK_16XX),
CLK(NULL, "dma_ck", &dma_ck, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "dma_lcdfree_ck", &dma_lcdfree_ck, CK_16XX),
- CLK(NULL, "api_ck", &api_ck.clk, CK_16XX | CK_1510 | CK_310),
+ CLK(NULL, "api_ck", &api_ck.clk, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK(NULL, "lb_ck", &lb_ck.clk, CK_1510 | CK_310),
CLK(NULL, "rhea1_ck", &rhea1_ck, CK_16XX),
CLK(NULL, "rhea2_ck", &rhea2_ck, CK_16XX),
@@ -658,6 +658,10 @@ static struct omap_clk omap_clks[] = {
CLK("i2c_omap.1", "fck", &i2c_fck, CK_16XX | CK_1510 | CK_310 | CK_7XX),
CLK("i2c_omap.1", "ick", &i2c_ick, CK_16XX),
CLK("i2c_omap.1", "ick", &dummy_ck, CK_1510 | CK_310 | CK_7XX),
+ CLK("omap1_spi100k.1", "fck", &dummy_ck, CK_7XX),
+ CLK("omap1_spi100k.1", "ick", &dummy_ck, CK_7XX),
+ CLK("omap1_spi100k.2", "fck", &dummy_ck, CK_7XX),
+ CLK("omap1_spi100k.2", "ick", &dummy_ck, CK_7XX),
CLK("omap_uwire", "fck", &armxor_ck.clk, CK_16XX | CK_1510 | CK_310),
CLK("omap-mcbsp.1", "ick", &dspper_ck, CK_16XX),
CLK("omap-mcbsp.1", "ick", &dummy_ck, CK_1510 | CK_310),
@@ -674,7 +678,7 @@ static struct omap_clk omap_clks[] = {
* init
*/
-static struct clk_functions omap1_clk_functions __initdata = {
+static struct clk_functions omap1_clk_functions = {
.clk_enable = omap1_clk_enable,
.clk_disable = omap1_clk_disable,
.clk_round_rate = omap1_clk_round_rate,
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index 23ded2d49600..a2d07aa75c9e 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/spi/spi.h>
#include <mach/hardware.h>
#include <asm/mach/map.h>
@@ -23,6 +24,7 @@
#include <plat/mux.h>
#include <mach/gpio.h>
#include <plat/mmc.h>
+#include <plat/omap7xx.h>
/*-------------------------------------------------------------------------*/
@@ -196,6 +198,38 @@ void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
/*-------------------------------------------------------------------------*/
+/* OMAP7xx SPI support */
+#if defined(CONFIG_SPI_OMAP_100K) || defined(CONFIG_SPI_OMAP_100K_MODULE)
+
+struct platform_device omap_spi1 = {
+ .name = "omap1_spi100k",
+ .id = 1,
+};
+
+struct platform_device omap_spi2 = {
+ .name = "omap1_spi100k",
+ .id = 2,
+};
+
+static void omap_init_spi100k(void)
+{
+ omap_spi1.dev.platform_data = ioremap(OMAP7XX_SPI1_BASE, 0x7ff);
+ if (omap_spi1.dev.platform_data)
+ platform_device_register(&omap_spi1);
+
+ omap_spi2.dev.platform_data = ioremap(OMAP7XX_SPI2_BASE, 0x7ff);
+ if (omap_spi2.dev.platform_data)
+ platform_device_register(&omap_spi2);
+}
+
+#else
+static inline void omap_init_spi100k(void)
+{
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
#if defined(CONFIG_OMAP_STI)
#define OMAP1_STI_BASE 0xfffea000
@@ -263,6 +297,7 @@ static int __init omap1_init_devices(void)
omap_init_mbox();
omap_init_rtc();
+ omap_init_spi100k();
omap_init_sti();
return 0;
diff --git a/arch/arm/mach-omap1/flash.c b/arch/arm/mach-omap1/flash.c
new file mode 100644
index 000000000000..0b07a78eeaa7
--- /dev/null
+++ b/arch/arm/mach-omap1/flash.c
@@ -0,0 +1,33 @@
+/*
+ * Flash support for OMAP1
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+
+#include <plat/io.h>
+#include <plat/tc.h>
+
+void omap1_set_vpp(struct map_info *map, int enable)
+{
+ static int count;
+ u32 l;
+
+ if (enable) {
+ if (count++ == 0) {
+ l = omap_readl(EMIFS_CONFIG);
+ l |= OMAP_EMIFS_CONFIG_WP;
+ omap_writel(l, EMIFS_CONFIG);
+ }
+ } else {
+ if (count && (--count == 0)) {
+ l = omap_readl(EMIFS_CONFIG);
+ l &= ~OMAP_EMIFS_CONFIG_WP;
+ omap_writel(l, EMIFS_CONFIG);
+ }
+ }
+}
diff --git a/arch/arm/mach-omap1/mux.c b/arch/arm/mach-omap1/mux.c
index 07212cc621ae..84341377232d 100644
--- a/arch/arm/mach-omap1/mux.c
+++ b/arch/arm/mach-omap1/mux.c
@@ -62,6 +62,14 @@ MUX_CFG_7XX("MMC_7XX_DAT0", 2, 17, 0, 16, 1, 0)
/* I2C interface */
MUX_CFG_7XX("I2C_7XX_SCL", 5, 1, 0, 0, 1, 0)
MUX_CFG_7XX("I2C_7XX_SDA", 5, 5, 0, 0, 1, 0)
+
+/* SPI pins */
+MUX_CFG_7XX("SPI_7XX_1", 6, 5, 4, 4, 1, 0)
+MUX_CFG_7XX("SPI_7XX_2", 6, 9, 4, 8, 1, 0)
+MUX_CFG_7XX("SPI_7XX_3", 6, 13, 4, 12, 1, 0)
+MUX_CFG_7XX("SPI_7XX_4", 6, 17, 4, 16, 1, 0)
+MUX_CFG_7XX("SPI_7XX_5", 8, 25, 0, 24, 0, 0)
+MUX_CFG_7XX("SPI_7XX_6", 9, 5, 0, 4, 0, 0)
};
#define OMAP7XX_PINS_SZ ARRAY_SIZE(omap7xx_pins)
#else
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 10eafa70a909..606bf04f51b6 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -80,6 +80,7 @@ config MACH_OVERO
config MACH_OMAP3EVM
bool "OMAP 3530 EVM board"
depends on ARCH_OMAP3 && ARCH_OMAP34XX
+ select OMAP_PACKAGE_CBB
config MACH_OMAP3517EVM
bool "OMAP3517/ AM3517 EVM board"
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index e508904fb67e..31042ee7e772 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -18,6 +18,7 @@
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/delay.h>
#include <linux/i2c/twl.h>
#include <linux/err.h>
@@ -28,7 +29,6 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
#include <mach/gpio.h>
#include <plat/mux.h>
@@ -74,8 +74,7 @@ static struct mtd_partition sdp2430_partitions[] = {
}
};
-static struct flash_platform_data sdp2430_flash_data = {
- .map_name = "cfi_probe",
+static struct physmap_flash_data sdp2430_flash_data = {
.width = 2,
.parts = sdp2430_partitions,
.nr_parts = ARRAY_SIZE(sdp2430_partitions),
@@ -88,7 +87,7 @@ static struct resource sdp2430_flash_resource = {
};
static struct platform_device sdp2430_flash_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &sdp2430_flash_data,
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index 739059632811..739059632811 100755..100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index cfb7f1257d20..ffb005101fa8 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/mtd/physmap.h>
#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/i2c.h>
@@ -29,7 +30,6 @@
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
-#include <asm/mach/flash.h>
#include <plat/control.h>
#include <mach/gpio.h>
@@ -115,8 +115,7 @@ static struct mtd_partition h4_partitions[] = {
}
};
-static struct flash_platform_data h4_flash_data = {
- .map_name = "cfi_probe",
+static struct physmap_flash_data h4_flash_data = {
.width = 2,
.parts = h4_partitions,
.nr_parts = ARRAY_SIZE(h4_partitions),
@@ -127,7 +126,7 @@ static struct resource h4_flash_resource = {
};
static struct platform_device h4_flash_device = {
- .name = "omapflash",
+ .name = "physmap-flash",
.id = 0,
.dev = {
.platform_data = &h4_flash_data,
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c
index 8dd277c36661..5c8474c75a34 100755..100644
--- a/arch/arm/mach-omap2/board-zoom-peripherals.c
+++ b/arch/arm/mach-omap2/board-zoom-peripherals.c
@@ -24,6 +24,7 @@
#include <plat/common.h>
#include <plat/usb.h>
+#include "mux.h"
#include "mmc-twl4030.h"
/* Zoom2 has Qwerty keyboard*/
@@ -63,21 +64,21 @@ static int board_keymap[] = {
KEY(5, 1, KEY_H),
KEY(5, 2, KEY_J),
KEY(5, 3, KEY_F3),
+ KEY(5, 4, KEY_UNKNOWN),
KEY(5, 5, KEY_VOLUMEDOWN),
KEY(5, 6, KEY_M),
- KEY(5, 7, KEY_ENTER),
+ KEY(5, 7, KEY_RIGHT),
KEY(6, 0, KEY_Q),
KEY(6, 1, KEY_A),
KEY(6, 2, KEY_N),
KEY(6, 3, KEY_BACKSPACE),
KEY(6, 6, KEY_P),
- KEY(6, 7, KEY_SELECT),
+ KEY(6, 7, KEY_UP),
KEY(7, 0, KEY_PROG1), /*MACRO 1 <User defined> */
KEY(7, 1, KEY_PROG2), /*MACRO 2 <User defined> */
KEY(7, 2, KEY_PROG3), /*MACRO 3 <User defined> */
KEY(7, 3, KEY_PROG4), /*MACRO 4 <User defined> */
- KEY(7, 5, KEY_RIGHT),
- KEY(7, 6, KEY_UP),
+ KEY(7, 6, KEY_SELECT),
KEY(7, 7, KEY_DOWN)
};
@@ -263,9 +264,17 @@ static int __init omap_i2c_init(void)
return 0;
}
+static void enable_board_wakeup_source(void)
+{
+ /* T2 interrupt line (keypad) */
+ omap_mux_init_signal("sys_nirq",
+ OMAP_WAKEUP_EN | OMAP_PIN_INPUT_PULLUP);
+}
+
void __init zoom_peripherals_init(void)
{
omap_i2c_init();
omap_serial_init();
usb_musb_init();
+ enable_board_wakeup_source();
}
diff --git a/arch/arm/mach-omap2/board-zoom3.c b/arch/arm/mach-omap2/board-zoom3.c
index a9fe9181b010..6512b2143f32 100644
--- a/arch/arm/mach-omap2/board-zoom3.c
+++ b/arch/arm/mach-omap2/board-zoom3.c
@@ -20,6 +20,7 @@
#include <plat/common.h>
#include <plat/board.h>
+#include <plat/usb.h>
#include "mux.h"
#include "sdram-hynix-h8mbx00u0mer-0em.h"
@@ -51,11 +52,24 @@ static struct omap_board_mux board_mux[] __initdata = {
#define board_mux NULL
#endif
+static struct ehci_hcd_omap_platform_data ehci_pdata __initconst = {
+ .port_mode[0] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+ .port_mode[1] = EHCI_HCD_OMAP_MODE_PHY,
+ .port_mode[2] = EHCI_HCD_OMAP_MODE_UNKNOWN,
+ .phy_reset = true,
+ .reset_gpio_port[0] = -EINVAL,
+ .reset_gpio_port[1] = 64,
+ .reset_gpio_port[2] = -EINVAL,
+};
+
static void __init omap_zoom_init(void)
{
omap3_mux_init(board_mux, OMAP_PACKAGE_CBP);
zoom_peripherals_init();
zoom_debugboard_init();
+
+ omap_mux_init_gpio(64, OMAP_PIN_OUTPUT);
+ usb_ehci_init(&ehci_pdata);
}
MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
diff --git a/arch/arm/mach-omap2/clock2xxx.c b/arch/arm/mach-omap2/clock2xxx.c
index d0e3fb7f9298..5420356eb407 100644
--- a/arch/arm/mach-omap2/clock2xxx.c
+++ b/arch/arm/mach-omap2/clock2xxx.c
@@ -449,40 +449,78 @@ int omap2_select_table_rate(struct clk *clk, unsigned long rate)
#ifdef CONFIG_CPU_FREQ
/*
* Walk PRCM rate table and fillout cpufreq freq_table
+ * XXX This should be replaced by an OPP layer in the near future
*/
-static struct cpufreq_frequency_table freq_table[ARRAY_SIZE(rate_table)];
+static struct cpufreq_frequency_table *freq_table;
void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
{
- struct prcm_config *prcm;
+ const struct prcm_config *prcm;
+ long sys_ck_rate;
int i = 0;
+ int tbl_sz = 0;
+
+ sys_ck_rate = clk_get_rate(sclk);
for (prcm = rate_table; prcm->mpu_speed; prcm++) {
if (!(prcm->flags & cpu_mask))
continue;
- if (prcm->xtal_speed != sys_ck.rate)
+ if (prcm->xtal_speed != sys_ck_rate)
continue;
/* don't put bypass rates in table */
if (prcm->dpll_speed == prcm->xtal_speed)
continue;
- freq_table[i].index = i;
- freq_table[i].frequency = prcm->mpu_speed / 1000;
- i++;
+ tbl_sz++;
}
- if (i == 0) {
- printk(KERN_WARNING "%s: failed to initialize frequency "
- "table\n", __func__);
+ /*
+ * XXX Ensure that we're doing what CPUFreq expects for this error
+ * case and the following one
+ */
+ if (tbl_sz == 0) {
+ pr_warning("%s: no matching entries in rate_table\n",
+ __func__);
+ return;
+ }
+
+ /* Include the CPUFREQ_TABLE_END terminator entry */
+ tbl_sz++;
+
+ freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * tbl_sz,
+ GFP_ATOMIC);
+ if (!freq_table) {
+ pr_err("%s: could not kzalloc frequency table\n", __func__);
return;
}
+ for (prcm = rate_table; prcm->mpu_speed; prcm++) {
+ if (!(prcm->flags & cpu_mask))
+ continue;
+ if (prcm->xtal_speed != sys_ck_rate)
+ continue;
+
+ /* don't put bypass rates in table */
+ if (prcm->dpll_speed == prcm->xtal_speed)
+ continue;
+
+ freq_table[i].index = i;
+ freq_table[i].frequency = prcm->mpu_speed / 1000;
+ i++;
+ }
+
freq_table[i].index = i;
freq_table[i].frequency = CPUFREQ_TABLE_END;
*table = &freq_table[0];
}
+
+void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
+{
+ kfree(freq_table);
+}
+
#endif
struct clk_functions omap2_clk_functions = {
@@ -494,6 +532,7 @@ struct clk_functions omap2_clk_functions = {
.clk_disable_unused = omap2_clk_disable_unused,
#ifdef CONFIG_CPU_FREQ
.clk_init_cpufreq_table = omap2_clk_init_cpufreq_table,
+ .clk_exit_cpufreq_table = omap2_clk_exit_cpufreq_table,
#endif
};
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index ded32364f32b..d4217b93e10b 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -34,7 +34,6 @@
#include <asm/div64.h>
#include <asm/clkdev.h>
-#include <plat/sdrc.h>
#include "clock.h"
#include "clock34xx.h"
#include "sdrc.h"
diff --git a/arch/arm/mach-omap2/clock34xx_data.c b/arch/arm/mach-omap2/clock34xx_data.c
index 8bdcc9cc7f9a..c6031d74d6f6 100644
--- a/arch/arm/mach-omap2/clock34xx_data.c
+++ b/arch/arm/mach-omap2/clock34xx_data.c
@@ -776,6 +776,8 @@ static struct clk dpll4_m5_ck = {
.clksel_mask = OMAP3430_CLKSEL_CAM_MASK,
.clksel = div16_dpll4_clksel,
.clkdm_name = "dpll4_clkdm",
+ .set_rate = &omap2_clksel_set_rate,
+ .round_rate = &omap2_clksel_round_rate,
.recalc = &omap2_clksel_recalc,
};
@@ -1500,6 +1502,7 @@ static struct clk uart2_fck = {
.parent = &core_48m_fck,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP3430_EN_UART2_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
.recalc = &followparent_recalc,
};
@@ -1509,6 +1512,7 @@ static struct clk uart1_fck = {
.parent = &core_48m_fck,
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP3430_EN_UART1_SHIFT,
+ .clkdm_name = "core_l4_clkdm",
.recalc = &followparent_recalc,
};
@@ -2745,7 +2749,7 @@ static struct clk mcbsp4_ick = {
};
static const struct clksel mcbsp_234_clksel[] = {
- { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
+ { .parent = &per_96m_fck, .rates = common_mcbsp_96m_rates },
{ .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
{ .parent = NULL }
};
diff --git a/arch/arm/mach-omap2/clockdomain.c b/arch/arm/mach-omap2/clockdomain.c
index 1a45ed1e8ba1..dd285f001467 100644
--- a/arch/arm/mach-omap2/clockdomain.c
+++ b/arch/arm/mach-omap2/clockdomain.c
@@ -559,7 +559,7 @@ int omap2_clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
* downstream clocks for debugging purposes?
*/
- if (!clkdm || !clk)
+ if (!clkdm || !clk || !clkdm->clktrctrl_mask)
return -EINVAL;
if (atomic_inc_return(&clkdm->usecount) > 1)
@@ -610,7 +610,7 @@ int omap2_clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
* downstream clocks for debugging purposes?
*/
- if (!clkdm || !clk)
+ if (!clkdm || !clk || !clkdm->clktrctrl_mask)
return -EINVAL;
#ifdef DEBUG
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index a8749e8017b9..5a7996402c53 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -33,7 +33,6 @@
#include <plat/sdrc.h>
#include <plat/gpmc.h>
#include <plat/serial.h>
-#include <plat/mux.h>
#include <plat/vram.h>
#include "clock.h"
@@ -73,21 +72,21 @@ static struct map_desc omap24xx_io_desc[] __initdata = {
#ifdef CONFIG_ARCH_OMAP2420
static struct map_desc omap242x_io_desc[] __initdata = {
{
- .virtual = DSP_MEM_24XX_VIRT,
- .pfn = __phys_to_pfn(DSP_MEM_24XX_PHYS),
- .length = DSP_MEM_24XX_SIZE,
+ .virtual = DSP_MEM_2420_VIRT,
+ .pfn = __phys_to_pfn(DSP_MEM_2420_PHYS),
+ .length = DSP_MEM_2420_SIZE,
.type = MT_DEVICE
},
{
- .virtual = DSP_IPI_24XX_VIRT,
- .pfn = __phys_to_pfn(DSP_IPI_24XX_PHYS),
- .length = DSP_IPI_24XX_SIZE,
+ .virtual = DSP_IPI_2420_VIRT,
+ .pfn = __phys_to_pfn(DSP_IPI_2420_PHYS),
+ .length = DSP_IPI_2420_SIZE,
.type = MT_DEVICE
},
{
- .virtual = DSP_MMU_24XX_VIRT,
- .pfn = __phys_to_pfn(DSP_MMU_24XX_PHYS),
- .length = DSP_MMU_24XX_SIZE,
+ .virtual = DSP_MMU_2420_VIRT,
+ .pfn = __phys_to_pfn(DSP_MMU_2420_PHYS),
+ .length = DSP_MMU_2420_SIZE,
.type = MT_DEVICE
},
};
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index e071b3fd1878..a8febd3cea17 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -977,6 +977,38 @@ static void __init omap_mux_init_list(struct omap_mux *superset)
}
}
+#ifdef CONFIG_OMAP_MUX
+
+static void omap_mux_init_package(struct omap_mux *superset,
+ struct omap_mux *package_subset,
+ struct omap_ball *package_balls)
+{
+ if (package_subset)
+ omap_mux_package_fixup(package_subset, superset);
+ if (package_balls)
+ omap_mux_package_init_balls(package_balls, superset);
+}
+
+static void omap_mux_init_signals(struct omap_board_mux *board_mux)
+{
+ omap_mux_set_cmdline_signals();
+ omap_mux_set_board_signals(board_mux);
+}
+
+#else
+
+static void omap_mux_init_package(struct omap_mux *superset,
+ struct omap_mux *package_subset,
+ struct omap_ball *package_balls)
+{
+}
+
+static void omap_mux_init_signals(struct omap_board_mux *board_mux)
+{
+}
+
+#endif
+
int __init omap_mux_init(u32 mux_pbase, u32 mux_size,
struct omap_mux *superset,
struct omap_mux *package_subset,
@@ -993,14 +1025,9 @@ int __init omap_mux_init(u32 mux_pbase, u32 mux_size,
return -ENODEV;
}
-#ifdef CONFIG_OMAP_MUX
- omap_mux_package_fixup(package_subset, superset);
- omap_mux_package_init_balls(package_balls, superset);
- omap_mux_set_cmdline_signals();
- omap_mux_set_board_signals(board_mux);
-#endif
-
+ omap_mux_init_package(superset, package_subset, package_balls);
omap_mux_init_list(superset);
+ omap_mux_init_signals(board_mux);
return 0;
}
diff --git a/arch/arm/mach-omap2/opp2420_data.c b/arch/arm/mach-omap2/opp2420_data.c
index 126a9396b3a8..e6dda694fd5c 100644
--- a/arch/arm/mach-omap2/opp2420_data.c
+++ b/arch/arm/mach-omap2/opp2420_data.c
@@ -9,45 +9,47 @@
* The OMAP2 processor can be run at several discrete 'PRCM configurations'.
* These configurations are characterized by voltage and speed for clocks.
* The device is only validated for certain combinations. One way to express
- * these combinations is via the 'ratio's' which the clocks operate with
+ * these combinations is via the 'ratios' which the clocks operate with
* respect to each other. These ratio sets are for a given voltage/DPLL
- * setting. All configurations can be described by a DPLL setting and a ratio
- * There are 3 ratio sets for the 2430 and X ratio sets for 2420.
- *
- * 2430 differs from 2420 in that there are no more phase synchronizers used.
- * They both have a slightly different clock domain setup. 2420(iva1,dsp) vs
- * 2430 (iva2.1, NOdsp, mdm)
+ * setting. All configurations can be described by a DPLL setting and a ratio.
*
* XXX Missing voltage data.
+ * XXX Missing 19.2MHz sys_clk rate sets (needed for N800/N810)
*
* THe format described in this file is deprecated. Once a reasonable
* OPP API exists, the data in this file should be converted to use it.
*
* This is technically part of the OMAP2xxx clock code.
+ *
+ * Considerable work is still needed to fully support dynamic frequency
+ * changes on OMAP2xxx-series chips. Readers interested in such a
+ * project are encouraged to review the Maemo Diablo RX-34 and RX-44
+ * kernel source at:
+ * http://repository.maemo.org/pool/diablo/free/k/kernel-source-diablo/
*/
#include "opp2xxx.h"
#include "sdrc.h"
#include "clock.h"
-/*-------------------------------------------------------------------------
- * Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
+/*
+ * Key dividers which make up a PRCM set. Ratios for a PRCM are mandated.
* xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,
* CM_CLKSEL_DSP, CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL,
* CM_CLKSEL2_PLL, CM_CLKSEL_MDM
*
- * Filling in table based on H4 boards and 2430-SDPs variants available.
- * There are quite a few more rates combinations which could be defined.
+ * Filling in table based on H4 boards available. There are quite a
+ * few more rate combinations which could be defined.
*
- * When multiple values are defined the start up will try and choose the
- * fastest one. If a 'fast' value is defined, then automatically, the /2
- * one should be included as it can be used. Generally having more that
- * one fast set does not make sense, as static timings need to be changed
- * to change the set. The exception is the bypass setting which is
- * availble for low power bypass.
+ * When multiple values are defined the start up will try and choose
+ * the fastest one. If a 'fast' value is defined, then automatically,
+ * the /2 one should be included as it can be used. Generally having
+ * more than one fast set does not make sense, as static timings need
+ * to be changed to change the set. The exception is the bypass
+ * setting which is available for low power bypass.
*
* Note: This table needs to be sorted, fastest to slowest.
- *-------------------------------------------------------------------------*/
+ **/
const struct prcm_config omap2420_rate_table[] = {
/* PRCM I - FAST */
{S12M, S660M, S330M, RI_CM_CLKSEL_MPU_VAL, /* 330MHz ARM */
diff --git a/arch/arm/mach-omap2/opp2430_data.c b/arch/arm/mach-omap2/opp2430_data.c
index edb81672c844..1b9596ae201e 100644
--- a/arch/arm/mach-omap2/opp2430_data.c
+++ b/arch/arm/mach-omap2/opp2430_data.c
@@ -1,5 +1,5 @@
/*
- * opp2420_data.c - old-style "OPP" table for OMAP2420
+ * opp2430_data.c - old-style "OPP" table for OMAP2430
*
* Copyright (C) 2005-2009 Texas Instruments, Inc.
* Copyright (C) 2004-2009 Nokia Corporation
@@ -9,16 +9,16 @@
* The OMAP2 processor can be run at several discrete 'PRCM configurations'.
* These configurations are characterized by voltage and speed for clocks.
* The device is only validated for certain combinations. One way to express
- * these combinations is via the 'ratio's' which the clocks operate with
+ * these combinations is via the 'ratios' which the clocks operate with
* respect to each other. These ratio sets are for a given voltage/DPLL
- * setting. All configurations can be described by a DPLL setting and a ratio
- * There are 3 ratio sets for the 2430 and X ratio sets for 2420.
+ * setting. All configurations can be described by a DPLL setting and a ratio.
*
* 2430 differs from 2420 in that there are no more phase synchronizers used.
* They both have a slightly different clock domain setup. 2420(iva1,dsp) vs
* 2430 (iva2.1, NOdsp, mdm)
*
* XXX Missing voltage data.
+ * XXX Missing 19.2MHz sys_clk rate sets.
*
* THe format described in this file is deprecated. Once a reasonable
* OPP API exists, the data in this file should be converted to use it.
@@ -30,24 +30,24 @@
#include "sdrc.h"
#include "clock.h"
-/*-------------------------------------------------------------------------
- * Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
+/*
+ * Key dividers which make up a PRCM set. Ratios for a PRCM are mandated.
* xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,
* CM_CLKSEL_DSP, CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL,
* CM_CLKSEL2_PLL, CM_CLKSEL_MDM
*
- * Filling in table based on H4 boards and 2430-SDPs variants available.
- * There are quite a few more rates combinations which could be defined.
+ * Filling in table based on 2430-SDPs variants available. There are
+ * quite a few more rate combinations which could be defined.
*
- * When multiple values are defined the start up will try and choose the
- * fastest one. If a 'fast' value is defined, then automatically, the /2
- * one should be included as it can be used. Generally having more that
- * one fast set does not make sense, as static timings need to be changed
- * to change the set. The exception is the bypass setting which is
- * availble for low power bypass.
+ * When multiple values are defined the start up will try and choose
+ * the fastest one. If a 'fast' value is defined, then automatically,
+ * the /2 one should be included as it can be used. Generally having
+ * more than one fast set does not make sense, as static timings need
+ * to be changed to change the set. The exception is the bypass
+ * setting which is available for low power bypass.
*
* Note: This table needs to be sorted, fastest to slowest.
- *-------------------------------------------------------------------------*/
+ */
const struct prcm_config omap2430_rate_table[] = {
/* PRCM #4 - ratio2 (ES2.1) - FAST */
{S13M, S798M, S399M, R2_CM_CLKSEL_MPU_VAL, /* 399MHz ARM */
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 81ed252a0f8a..c6cc809afb79 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -124,8 +124,8 @@ static void omap3_core_save_context(void)
control_padconf_off |= START_PADCONF_SAVE;
omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF);
/* wait for the save to complete */
- while (!omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
- & PADCONF_SAVE_DONE)
+ while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
+ & PADCONF_SAVE_DONE))
;
/* Save the Interrupt controller context */
omap_intc_save_context();
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 19805a7de06c..837b34757ffc 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -125,6 +125,13 @@ static struct plat_serial8250_port serial_platform_data3[] = {
}
};
#endif
+static inline unsigned int __serial_read_reg(struct uart_port *up,
+ int offset)
+{
+ offset <<= up->regshift;
+ return (unsigned int)__raw_readb(up->membase + offset);
+}
+
static inline unsigned int serial_read_reg(struct plat_serial8250_port *up,
int offset)
{
@@ -583,11 +590,12 @@ static unsigned int serial_in_override(struct uart_port *up, int offset)
{
if (UART_RX == offset) {
unsigned int lsr;
- lsr = serial_read_reg(omap_uart[up->line].p, UART_LSR);
+ lsr = __serial_read_reg(up, UART_LSR);
if (!(lsr & UART_LSR_DR))
return -EPERM;
}
- return serial_read_reg(omap_uart[up->line].p, offset);
+
+ return __serial_read_reg(up, offset);
}
void __init omap_serial_early_init(void)
@@ -686,15 +694,16 @@ void __init omap_serial_init_port(int port)
DEV_CREATE_FILE(dev, &dev_attr_sleep_timeout);
}
- /* omap44xx: Never read empty UART fifo
- * omap3xxx: Never read empty UART fifo on UARTs
- * with IP rev >=0x52
- */
- if (cpu_is_omap44xx())
- uart->p->serial_in = serial_in_override;
- else if ((serial_read_reg(uart->p, UART_OMAP_MVER) & 0xFF)
- >= UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV)
- uart->p->serial_in = serial_in_override;
+ /*
+ * omap44xx: Never read empty UART fifo
+ * omap3xxx: Never read empty UART fifo on UARTs
+ * with IP rev >=0x52
+ */
+ if (cpu_is_omap44xx())
+ uart->p->serial_in = serial_in_override;
+ else if ((serial_read_reg(uart->p, UART_OMAP_MVER) & 0xFF)
+ >= UART_OMAP_NO_EMPTY_FIFO_READ_IP_REV)
+ uart->p->serial_in = serial_in_override;
}
/**
diff --git a/arch/arm/mach-pnx4008/clock.c b/arch/arm/mach-pnx4008/clock.c
index 898c0e88acbc..9d1975fa4d9f 100644
--- a/arch/arm/mach-pnx4008/clock.c
+++ b/arch/arm/mach-pnx4008/clock.c
@@ -22,8 +22,9 @@
#include <linux/delay.h>
#include <linux/io.h>
-#include <mach/hardware.h>
+#include <asm/clkdev.h>
+#include <mach/hardware.h>
#include <mach/clock.h>
#include "clock.h"
@@ -56,18 +57,19 @@ static void propagate_rate(struct clk *clk)
}
}
-static inline void clk_reg_disable(struct clk *clk)
+static void clk_reg_disable(struct clk *clk)
{
if (clk->enable_reg)
__raw_writel(__raw_readl(clk->enable_reg) &
~(1 << clk->enable_shift), clk->enable_reg);
}
-static inline void clk_reg_enable(struct clk *clk)
+static int clk_reg_enable(struct clk *clk)
{
if (clk->enable_reg)
__raw_writel(__raw_readl(clk->enable_reg) |
(1 << clk->enable_shift), clk->enable_reg);
+ return 0;
}
static inline void clk_reg_disable1(struct clk *clk)
@@ -636,31 +638,34 @@ static struct clk flash_ck = {
static struct clk i2c0_ck = {
.name = "i2c0_ck",
.parent = &per_ck,
- .flags = NEEDS_INITIALIZATION,
- .round_rate = &on_off_round_rate,
- .set_rate = &on_off_set_rate,
+ .flags = NEEDS_INITIALIZATION | FIXED_RATE,
.enable_shift = 0,
.enable_reg = I2CCLKCTRL_REG,
+ .rate = 13000000,
+ .enable = clk_reg_enable,
+ .disable = clk_reg_disable,
};
static struct clk i2c1_ck = {
.name = "i2c1_ck",
.parent = &per_ck,
- .flags = NEEDS_INITIALIZATION,
- .round_rate = &on_off_round_rate,
- .set_rate = &on_off_set_rate,
+ .flags = NEEDS_INITIALIZATION | FIXED_RATE,
.enable_shift = 1,
.enable_reg = I2CCLKCTRL_REG,
+ .rate = 13000000,
+ .enable = clk_reg_enable,
+ .disable = clk_reg_disable,
};
static struct clk i2c2_ck = {
.name = "i2c2_ck",
.parent = &per_ck,
- .flags = NEEDS_INITIALIZATION,
- .round_rate = &on_off_round_rate,
- .set_rate = &on_off_set_rate,
+ .flags = NEEDS_INITIALIZATION | FIXED_RATE,
.enable_shift = 2,
.enable_reg = USB_OTG_CLKCTRL_REG,
+ .rate = 13000000,
+ .enable = clk_reg_enable,
+ .disable = clk_reg_disable,
};
static struct clk spi0_ck = {
@@ -738,16 +743,16 @@ static struct clk wdt_ck = {
.name = "wdt_ck",
.parent = &per_ck,
.flags = NEEDS_INITIALIZATION,
- .round_rate = &on_off_round_rate,
- .set_rate = &on_off_set_rate,
.enable_shift = 0,
.enable_reg = TIMCLKCTRL_REG,
+ .enable = clk_reg_enable,
+ .disable = clk_reg_disable,
};
/* These clocks are visible outside this module
* and can be initialized
*/
-static struct clk *onchip_clks[] = {
+static struct clk *onchip_clks[] __initdata = {
&ck_13MHz,
&ck_pll1,
&ck_pll4,
@@ -777,49 +782,74 @@ static struct clk *onchip_clks[] = {
&wdt_ck,
};
-static int local_clk_enable(struct clk *clk)
-{
- int ret = 0;
-
- if (!(clk->flags & FIXED_RATE) && !clk->rate && clk->set_rate
- && clk->user_rate)
- ret = clk->set_rate(clk, clk->user_rate);
- return ret;
-}
+static struct clk_lookup onchip_clkreg[] = {
+ { .clk = &ck_13MHz, .con_id = "ck_13MHz" },
+ { .clk = &ck_pll1, .con_id = "ck_pll1" },
+ { .clk = &ck_pll4, .con_id = "ck_pll4" },
+ { .clk = &ck_pll5, .con_id = "ck_pll5" },
+ { .clk = &ck_pll3, .con_id = "ck_pll3" },
+ { .clk = &vfp9_ck, .con_id = "vfp9_ck" },
+ { .clk = &m2hclk_ck, .con_id = "m2hclk_ck" },
+ { .clk = &hclk_ck, .con_id = "hclk_ck" },
+ { .clk = &dma_ck, .con_id = "dma_ck" },
+ { .clk = &flash_ck, .con_id = "flash_ck" },
+ { .clk = &dum_ck, .con_id = "dum_ck" },
+ { .clk = &keyscan_ck, .con_id = "keyscan_ck" },
+ { .clk = &pwm1_ck, .con_id = "pwm1_ck" },
+ { .clk = &pwm2_ck, .con_id = "pwm2_ck" },
+ { .clk = &jpeg_ck, .con_id = "jpeg_ck" },
+ { .clk = &ms_ck, .con_id = "ms_ck" },
+ { .clk = &touch_ck, .con_id = "touch_ck" },
+ { .clk = &i2c0_ck, .dev_id = "pnx-i2c.0" },
+ { .clk = &i2c1_ck, .dev_id = "pnx-i2c.1" },
+ { .clk = &i2c2_ck, .dev_id = "pnx-i2c.2" },
+ { .clk = &spi0_ck, .con_id = "spi0_ck" },
+ { .clk = &spi1_ck, .con_id = "spi1_ck" },
+ { .clk = &uart3_ck, .con_id = "uart3_ck" },
+ { .clk = &uart4_ck, .con_id = "uart4_ck" },
+ { .clk = &uart5_ck, .con_id = "uart5_ck" },
+ { .clk = &uart6_ck, .con_id = "uart6_ck" },
+ { .clk = &wdt_ck, .dev_id = "pnx4008-watchdog" },
+};
static void local_clk_disable(struct clk *clk)
{
- if (!(clk->flags & FIXED_RATE) && clk->rate && clk->set_rate)
- clk->set_rate(clk, 0);
-}
+ if (WARN_ON(clk->usecount == 0))
+ return;
-static void local_clk_unuse(struct clk *clk)
-{
- if (clk->usecount > 0 && !(--clk->usecount)) {
- local_clk_disable(clk);
+ if (!(--clk->usecount)) {
+ if (clk->disable)
+ clk->disable(clk);
+ else if (!(clk->flags & FIXED_RATE) && clk->rate && clk->set_rate)
+ clk->set_rate(clk, 0);
if (clk->parent)
- local_clk_unuse(clk->parent);
+ local_clk_disable(clk->parent);
}
}
-static int local_clk_use(struct clk *clk)
+static int local_clk_enable(struct clk *clk)
{
int ret = 0;
- if (clk->usecount++ == 0) {
- if (clk->parent)
- ret = local_clk_use(clk->parent);
- if (ret != 0) {
- clk->usecount--;
- goto out;
+ if (clk->usecount == 0) {
+ if (clk->parent) {
+ ret = local_clk_enable(clk->parent);
+ if (ret != 0)
+ goto out;
}
- ret = local_clk_enable(clk);
+ if (clk->enable)
+ ret = clk->enable(clk);
+ else if (!(clk->flags & FIXED_RATE) && !clk->rate && clk->set_rate
+ && clk->user_rate)
+ ret = clk->set_rate(clk, clk->user_rate);
if (ret != 0 && clk->parent) {
- local_clk_unuse(clk->parent);
- clk->usecount--;
+ local_clk_disable(clk->parent);
+ goto out;
}
+
+ clk->usecount++;
}
out:
return ret;
@@ -866,35 +896,6 @@ out:
EXPORT_SYMBOL(clk_set_rate);
-struct clk *clk_get(struct device *dev, const char *id)
-{
- struct clk *clk = ERR_PTR(-ENOENT);
- struct clk **clkp;
-
- clock_lock();
- for (clkp = onchip_clks; clkp < onchip_clks + ARRAY_SIZE(onchip_clks);
- clkp++) {
- if (strcmp(id, (*clkp)->name) == 0
- && try_module_get((*clkp)->owner)) {
- clk = (*clkp);
- break;
- }
- }
- clock_unlock();
-
- return clk;
-}
-EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
- clock_lock();
- if (clk && !IS_ERR(clk))
- module_put(clk->owner);
- clock_unlock();
-}
-EXPORT_SYMBOL(clk_put);
-
unsigned long clk_get_rate(struct clk *clk)
{
unsigned long ret;
@@ -907,10 +908,10 @@ EXPORT_SYMBOL(clk_get_rate);
int clk_enable(struct clk *clk)
{
- int ret = 0;
+ int ret;
clock_lock();
- ret = local_clk_use(clk);
+ ret = local_clk_enable(clk);
clock_unlock();
return ret;
}
@@ -920,7 +921,7 @@ EXPORT_SYMBOL(clk_enable);
void clk_disable(struct clk *clk)
{
clock_lock();
- local_clk_unuse(clk);
+ local_clk_disable(clk);
clock_unlock();
}
@@ -967,18 +968,24 @@ static int __init clk_init(void)
for (clkp = onchip_clks; clkp < onchip_clks + ARRAY_SIZE(onchip_clks);
clkp++) {
- if (((*clkp)->flags & NEEDS_INITIALIZATION)
- && ((*clkp)->set_rate)) {
- (*clkp)->user_rate = (*clkp)->rate;
- local_set_rate((*clkp), (*clkp)->user_rate);
- if ((*clkp)->set_parent)
- (*clkp)->set_parent((*clkp), (*clkp)->parent);
+ struct clk *clk = *clkp;
+ if (clk->flags & NEEDS_INITIALIZATION) {
+ if (clk->set_rate) {
+ clk->user_rate = clk->rate;
+ local_set_rate(clk, clk->user_rate);
+ if (clk->set_parent)
+ clk->set_parent(clk, clk->parent);
+ }
+ if (clk->enable && clk->usecount)
+ clk->enable(clk);
+ if (clk->disable && !clk->usecount)
+ clk->disable(clk);
}
pr_debug("%s: clock %s, rate %ld\n",
- __func__, (*clkp)->name, (*clkp)->rate);
+ __func__, clk->name, clk->rate);
}
- local_clk_use(&ck_pll4);
+ local_clk_enable(&ck_pll4);
/* if ck_13MHz is not used, disable it. */
if (ck_13MHz.usecount == 0)
@@ -987,6 +994,8 @@ static int __init clk_init(void)
/* Disable autoclocking */
__raw_writeb(0xff, AUTOCLK_CTRL);
+ clkdev_add_table(onchip_clkreg, ARRAY_SIZE(onchip_clkreg));
+
return 0;
}
diff --git a/arch/arm/mach-pnx4008/clock.h b/arch/arm/mach-pnx4008/clock.h
index cd58f372cfd0..39720d6c0d01 100644
--- a/arch/arm/mach-pnx4008/clock.h
+++ b/arch/arm/mach-pnx4008/clock.h
@@ -14,8 +14,6 @@
#define __ARCH_ARM_PNX4008_CLOCK_H__
struct clk {
- struct list_head node;
- struct module *owner;
const char *name;
struct clk *parent;
struct clk *propagate_next;
@@ -29,9 +27,11 @@ struct clk {
u8 enable_shift1;
u32 enable_reg1;
u32 parent_switch_reg;
- u32(*round_rate) (struct clk *, u32);
+ u32(*round_rate) (struct clk *, u32);
int (*set_rate) (struct clk *, u32);
int (*set_parent) (struct clk * clk, struct clk * parent);
+ int (*enable)(struct clk *);
+ void (*disable)(struct clk *);
};
/* Flags */
diff --git a/arch/arm/mach-pnx4008/i2c.c b/arch/arm/mach-pnx4008/i2c.c
index f3fea29c00d3..8103f9644e2d 100644
--- a/arch/arm/mach-pnx4008/i2c.c
+++ b/arch/arm/mach-pnx4008/i2c.c
@@ -18,120 +18,24 @@
#include <mach/irqs.h>
#include <mach/i2c.h>
-static int set_clock_run(struct platform_device *pdev)
-{
- struct clk *clk;
- char name[10];
- int retval = 0;
-
- snprintf(name, 10, "i2c%d_ck", pdev->id);
- clk = clk_get(&pdev->dev, name);
- if (!IS_ERR(clk)) {
- clk_set_rate(clk, 1);
- clk_put(clk);
- } else
- retval = -ENOENT;
-
- return retval;
-}
-
-static int set_clock_stop(struct platform_device *pdev)
-{
- struct clk *clk;
- char name[10];
- int retval = 0;
-
- snprintf(name, 10, "i2c%d_ck", pdev->id);
- clk = clk_get(&pdev->dev, name);
- if (!IS_ERR(clk)) {
- clk_set_rate(clk, 0);
- clk_put(clk);
- } else
- retval = -ENOENT;
-
- return retval;
-}
-
-static int i2c_pnx_suspend(struct platform_device *pdev, pm_message_t state)
-{
- int retval = 0;
-#ifdef CONFIG_PM
- retval = set_clock_run(pdev);
-#endif
- return retval;
-}
-
-static int i2c_pnx_resume(struct platform_device *pdev)
-{
- int retval = 0;
-#ifdef CONFIG_PM
- retval = set_clock_run(pdev);
-#endif
- return retval;
-}
-
-static u32 calculate_input_freq(struct platform_device *pdev)
-{
- return HCLK_MHZ;
-}
-
-
-static struct i2c_pnx_algo_data pnx_algo_data0 = {
+static struct i2c_pnx_data i2c0_data = {
+ .name = I2C_CHIP_NAME "0",
.base = PNX4008_I2C1_BASE,
.irq = I2C_1_INT,
};
-static struct i2c_pnx_algo_data pnx_algo_data1 = {
+static struct i2c_pnx_data i2c1_data = {
+ .name = I2C_CHIP_NAME "1",
.base = PNX4008_I2C2_BASE,
.irq = I2C_2_INT,
};
-static struct i2c_pnx_algo_data pnx_algo_data2 = {
+static struct i2c_pnx_data i2c2_data = {
+ .name = "USB-I2C",
.base = (PNX4008_USB_CONFIG_BASE + 0x300),
.irq = USB_I2C_INT,
};
-static struct i2c_adapter pnx_adapter0 = {
- .name = I2C_CHIP_NAME "0",
- .algo_data = &pnx_algo_data0,
-};
-static struct i2c_adapter pnx_adapter1 = {
- .name = I2C_CHIP_NAME "1",
- .algo_data = &pnx_algo_data1,
-};
-
-static struct i2c_adapter pnx_adapter2 = {
- .name = "USB-I2C",
- .algo_data = &pnx_algo_data2,
-};
-
-static struct i2c_pnx_data i2c0_data = {
- .suspend = i2c_pnx_suspend,
- .resume = i2c_pnx_resume,
- .calculate_input_freq = calculate_input_freq,
- .set_clock_run = set_clock_run,
- .set_clock_stop = set_clock_stop,
- .adapter = &pnx_adapter0,
-};
-
-static struct i2c_pnx_data i2c1_data = {
- .suspend = i2c_pnx_suspend,
- .resume = i2c_pnx_resume,
- .calculate_input_freq = calculate_input_freq,
- .set_clock_run = set_clock_run,
- .set_clock_stop = set_clock_stop,
- .adapter = &pnx_adapter1,
-};
-
-static struct i2c_pnx_data i2c2_data = {
- .suspend = i2c_pnx_suspend,
- .resume = i2c_pnx_resume,
- .calculate_input_freq = calculate_input_freq,
- .set_clock_run = set_clock_run,
- .set_clock_stop = set_clock_stop,
- .adapter = &pnx_adapter2,
-};
-
static struct platform_device i2c0_device = {
.name = "pnx-i2c",
.id = 0,
diff --git a/arch/arm/mach-pnx4008/include/mach/clkdev.h b/arch/arm/mach-pnx4008/include/mach/clkdev.h
new file mode 100644
index 000000000000..04b37a89801c
--- /dev/null
+++ b/arch/arm/mach-pnx4008/include/mach/clkdev.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_MACH_CLKDEV_H
+#define __ASM_MACH_CLKDEV_H
+
+#define __clk_get(clk) ({ 1; })
+#define __clk_put(clk) do { } while (0)
+
+#endif
diff --git a/arch/arm/mach-pnx4008/include/mach/timex.h b/arch/arm/mach-pnx4008/include/mach/timex.h
index 5ff0196c0f16..b383c7de7ab4 100644
--- a/arch/arm/mach-pnx4008/include/mach/timex.h
+++ b/arch/arm/mach-pnx4008/include/mach/timex.h
@@ -14,60 +14,6 @@
#ifndef __PNX4008_TIMEX_H
#define __PNX4008_TIMEX_H
-#include <linux/io.h>
-#include <mach/hardware.h>
-
#define CLOCK_TICK_RATE 1000000
-#define TICKS2USECS(x) (x)
-
-/* MilliSecond Timer - Chapter 21 Page 202 */
-
-#define MSTIM_INT IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x0))
-#define MSTIM_CTRL IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x4))
-#define MSTIM_COUNTER IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x8))
-#define MSTIM_MCTRL IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x14))
-#define MSTIM_MATCH0 IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x18))
-#define MSTIM_MATCH1 IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x1c))
-
-/* High Speed Timer - Chpater 22, Page 205 */
-
-#define HSTIM_INT IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x0))
-#define HSTIM_CTRL IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x4))
-#define HSTIM_COUNTER IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x8))
-#define HSTIM_PMATCH IO_ADDRESS((PNX4008_HSTIMER_BASE + 0xC))
-#define HSTIM_PCOUNT IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x10))
-#define HSTIM_MCTRL IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x14))
-#define HSTIM_MATCH0 IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x18))
-#define HSTIM_MATCH1 IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x1c))
-#define HSTIM_MATCH2 IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x20))
-#define HSTIM_CCR IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x28))
-#define HSTIM_CR0 IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x2C))
-#define HSTIM_CR1 IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x30))
-
-/* IMPORTANT: both timers are UPCOUNTING */
-
-/* xSTIM_MCTRL bit definitions */
-#define MR0_INT 1
-#define RESET_COUNT0 (1<<1)
-#define STOP_COUNT0 (1<<2)
-#define MR1_INT (1<<3)
-#define RESET_COUNT1 (1<<4)
-#define STOP_COUNT1 (1<<5)
-#define MR2_INT (1<<6)
-#define RESET_COUNT2 (1<<7)
-#define STOP_COUNT2 (1<<8)
-
-/* xSTIM_CTRL bit definitions */
-#define COUNT_ENAB 1
-#define RESET_COUNT (1<<1)
-#define DEBUG_EN (1<<2)
-
-/* xSTIM_INT bit definitions */
-#define MATCH0_INT 1
-#define MATCH1_INT (1<<1)
-#define MATCH2_INT (1<<2)
-#define RTC_TICK0 (1<<4)
-#define RTC_TICK1 (1<<5)
-
#endif
diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
index b3d8d53e32ef..1f0585329be4 100644
--- a/arch/arm/mach-pnx4008/pm.c
+++ b/arch/arm/mach-pnx4008/pm.c
@@ -21,6 +21,8 @@
#include <linux/io.h>
#include <asm/cacheflush.h>
+
+#include <mach/hardware.h>
#include <mach/pm.h>
#include <mach/clock.h>
diff --git a/arch/arm/mach-pnx4008/time.c b/arch/arm/mach-pnx4008/time.c
index fc0ba183fe12..0c8aad4bb0dc 100644
--- a/arch/arm/mach-pnx4008/time.c
+++ b/arch/arm/mach-pnx4008/time.c
@@ -30,6 +30,8 @@
#include <asm/mach/time.h>
#include <asm/errno.h>
+#include "time.h"
+
/*! Note: all timers are UPCOUNTING */
/*!
diff --git a/arch/arm/mach-pnx4008/time.h b/arch/arm/mach-pnx4008/time.h
new file mode 100644
index 000000000000..75e88c570aa7
--- /dev/null
+++ b/arch/arm/mach-pnx4008/time.h
@@ -0,0 +1,70 @@
+/*
+ * arch/arm/mach-pnx4008/include/mach/timex.h
+ *
+ * PNX4008 timers header file
+ *
+ * Author: Dmitry Chigirev <source@mvista.com>
+ *
+ * 2005 (c) MontaVista Software, Inc. This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ */
+#ifndef PNX_TIME_H
+#define PNX_TIME_H
+
+#include <linux/io.h>
+#include <mach/hardware.h>
+
+#define TICKS2USECS(x) (x)
+
+/* MilliSecond Timer - Chapter 21 Page 202 */
+
+#define MSTIM_INT IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x0))
+#define MSTIM_CTRL IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x4))
+#define MSTIM_COUNTER IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x8))
+#define MSTIM_MCTRL IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x14))
+#define MSTIM_MATCH0 IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x18))
+#define MSTIM_MATCH1 IO_ADDRESS((PNX4008_MSTIMER_BASE + 0x1c))
+
+/* High Speed Timer - Chpater 22, Page 205 */
+
+#define HSTIM_INT IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x0))
+#define HSTIM_CTRL IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x4))
+#define HSTIM_COUNTER IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x8))
+#define HSTIM_PMATCH IO_ADDRESS((PNX4008_HSTIMER_BASE + 0xC))
+#define HSTIM_PCOUNT IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x10))
+#define HSTIM_MCTRL IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x14))
+#define HSTIM_MATCH0 IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x18))
+#define HSTIM_MATCH1 IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x1c))
+#define HSTIM_MATCH2 IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x20))
+#define HSTIM_CCR IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x28))
+#define HSTIM_CR0 IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x2C))
+#define HSTIM_CR1 IO_ADDRESS((PNX4008_HSTIMER_BASE + 0x30))
+
+/* IMPORTANT: both timers are UPCOUNTING */
+
+/* xSTIM_MCTRL bit definitions */
+#define MR0_INT 1
+#define RESET_COUNT0 (1<<1)
+#define STOP_COUNT0 (1<<2)
+#define MR1_INT (1<<3)
+#define RESET_COUNT1 (1<<4)
+#define STOP_COUNT1 (1<<5)
+#define MR2_INT (1<<6)
+#define RESET_COUNT2 (1<<7)
+#define STOP_COUNT2 (1<<8)
+
+/* xSTIM_CTRL bit definitions */
+#define COUNT_ENAB 1
+#define RESET_COUNT (1<<1)
+#define DEBUG_EN (1<<2)
+
+/* xSTIM_INT bit definitions */
+#define MATCH0_INT 1
+#define MATCH1_INT (1<<1)
+#define MATCH2_INT (1<<2)
+#define RTC_TICK0 (1<<4)
+#define RTC_TICK1 (1<<5)
+
+#endif
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 8a0837ea0294..385c30ee3f23 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -37,6 +37,8 @@ config MACH_ZYLONITE320
config MACH_LITTLETON
bool "PXA3xx Form Factor Platform (aka Littleton)"
select PXA3xx
+ select CPU_PXA300
+ select CPU_PXA310
select PXA_SSP
config MACH_TAVOREVB
@@ -415,6 +417,24 @@ config MACH_TREO680
Say Y here if you intend to run this kernel on Palm Treo 680
smartphone.
+config MACH_RAUMFELD_RC
+ bool "Raumfeld Controller"
+ select PXA3xx
+ select CPU_PXA300
+ select HAVE_PWM
+
+config MACH_RAUMFELD_CONNECTOR
+ bool "Raumfeld Connector"
+ select PXA3xx
+ select CPU_PXA300
+ select PXA_SSP
+
+config MACH_RAUMFELD_SPEAKER
+ bool "Raumfeld Speaker"
+ select PXA3xx
+ select CPU_PXA300
+ select PXA_SSP
+
config PXA_SHARPSL
bool "SHARP Zaurus SL-5600, SL-C7xx and SL-Cxx00 Models"
select SHARP_SCOOP
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index f64afda7e6f6..9d831939b3c5 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -89,6 +89,9 @@ obj-$(CONFIG_MACH_E740) += e740.o
obj-$(CONFIG_MACH_E750) += e750.o
obj-$(CONFIG_MACH_E400) += e400.o
obj-$(CONFIG_MACH_E800) += e800.o
+obj-$(CONFIG_MACH_RAUMFELD_RC) += raumfeld.o
+obj-$(CONFIG_MACH_RAUMFELD_CONNECTOR) += raumfeld.o
+obj-$(CONFIG_MACH_RAUMFELD_SPEAKER) += raumfeld.o
# Support for blinky lights
led-y := leds.o
diff --git a/arch/arm/mach-pxa/clock.c b/arch/arm/mach-pxa/clock.c
index 49ae38292310..abba0089a2ae 100644
--- a/arch/arm/mach-pxa/clock.c
+++ b/arch/arm/mach-pxa/clock.c
@@ -78,11 +78,3 @@ const struct clkops clk_cken_ops = {
.enable = clk_cken_enable,
.disable = clk_cken_disable,
};
-
-void clks_register(struct clk_lookup *clks, size_t num)
-{
- int i;
-
- for (i = 0; i < num; i++)
- clkdev_add(&clks[i]);
-}
diff --git a/arch/arm/mach-pxa/clock.h b/arch/arm/mach-pxa/clock.h
index 978a3667e90d..d8488742b807 100644
--- a/arch/arm/mach-pxa/clock.h
+++ b/arch/arm/mach-pxa/clock.h
@@ -67,7 +67,3 @@ extern void clk_pxa3xx_cken_enable(struct clk *);
extern void clk_pxa3xx_cken_disable(struct clk *);
#endif
-void clks_register(struct clk_lookup *clks, size_t num);
-int clk_add_alias(const char *alias, const char *alias_name, char *id,
- struct device *dev);
-
diff --git a/arch/arm/mach-pxa/eseries.c b/arch/arm/mach-pxa/eseries.c
index 91417f035069..96ed13081639 100644
--- a/arch/arm/mach-pxa/eseries.c
+++ b/arch/arm/mach-pxa/eseries.c
@@ -128,6 +128,6 @@ static struct clk_lookup eseries_clkregs[] = {
void eseries_register_clks(void)
{
- clks_register(eseries_clkregs, ARRAY_SIZE(eseries_clkregs));
+ clkdev_add_table(eseries_clkregs, ARRAY_SIZE(eseries_clkregs));
}
diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h
index 50f1297bf5ac..e741bf1bfb2d 100644
--- a/arch/arm/mach-pxa/include/mach/hardware.h
+++ b/arch/arm/mach-pxa/include/mach/hardware.h
@@ -250,20 +250,17 @@
#define cpu_is_pxa930() \
({ \
- unsigned int id = read_cpuid(CPUID_ID); \
- __cpu_is_pxa930(id); \
+ __cpu_is_pxa930(read_cpuid_id()); \
})
#define cpu_is_pxa935() \
({ \
- unsigned int id = read_cpuid(CPUID_ID); \
- __cpu_is_pxa935(id); \
+ __cpu_is_pxa935(read_cpuid_id()); \
})
#define cpu_is_pxa950() \
({ \
- unsigned int id = read_cpuid(CPUID_ID); \
- __cpu_is_pxa950(id); \
+ __cpu_is_pxa950(read_cpuid_id()); \
})
diff --git a/arch/arm/mach-pxa/include/mach/zylonite.h b/arch/arm/mach-pxa/include/mach/zylonite.h
index bf6785adccf4..9edf645368d6 100644
--- a/arch/arm/mach-pxa/include/mach/zylonite.h
+++ b/arch/arm/mach-pxa/include/mach/zylonite.h
@@ -8,13 +8,6 @@
/* the following variables are processor specific and initialized
* by the corresponding zylonite_pxa3xx_init()
*/
-struct platform_mmc_slot {
- int gpio_cd;
- int gpio_wp;
-};
-
-extern struct platform_mmc_slot zylonite_mmc_slot[];
-
extern int gpio_eth_irq;
extern int gpio_debug_led1;
extern int gpio_debug_led2;
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index f28c1715b910..fa527b258d61 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -110,6 +110,12 @@ static mfp_cfg_t littleton_mfp_cfg[] __initdata = {
GPIO7_MMC1_CLK,
GPIO8_MMC1_CMD,
GPIO15_GPIO, /* card detect */
+
+ /* UART3 */
+ GPIO107_UART3_CTS,
+ GPIO108_UART3_RTS,
+ GPIO109_UART3_TXD,
+ GPIO110_UART3_RXD,
};
static struct resource smc91x_resources[] = {
diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c
index 8a38d604dc77..189f330719a2 100644
--- a/arch/arm/mach-pxa/magician.c
+++ b/arch/arm/mach-pxa/magician.c
@@ -381,7 +381,7 @@ err:
return ret;
}
-static int magician_backlight_notify(int brightness)
+static int magician_backlight_notify(struct device *dev, int brightness)
{
gpio_set_value(EGPIO_MAGICIAN_BL_POWER, brightness);
if (brightness >= 200) {
diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c
index 59140217890a..e100af78b166 100644
--- a/arch/arm/mach-pxa/palmld.c
+++ b/arch/arm/mach-pxa/palmld.c
@@ -270,7 +270,7 @@ err:
return ret;
}
-static int palmld_backlight_notify(int brightness)
+static int palmld_backlight_notify(struct device *dev, int brightness)
{
gpio_set_value(GPIO_NR_PALMLD_BL_POWER, brightness);
gpio_set_value(GPIO_NR_PALMLD_LCD_POWER, brightness);
diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c
index 7f89ca20f13a..8fe3ec27568f 100644
--- a/arch/arm/mach-pxa/palmt5.c
+++ b/arch/arm/mach-pxa/palmt5.c
@@ -209,7 +209,7 @@ err:
return ret;
}
-static int palmt5_backlight_notify(int brightness)
+static int palmt5_backlight_notify(struct device *dev, int brightness)
{
gpio_set_value(GPIO_NR_PALMT5_BL_POWER, brightness);
gpio_set_value(GPIO_NR_PALMT5_LCD_POWER, brightness);
diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
index 308417592007..b992f07ece21 100644
--- a/arch/arm/mach-pxa/palmtc.c
+++ b/arch/arm/mach-pxa/palmtc.c
@@ -185,7 +185,7 @@ err:
return ret;
}
-static int palmtc_backlight_notify(int brightness)
+static int palmtc_backlight_notify(struct device *dev, int brightness)
{
/* backlight is on when GPIO16 AF0 is high */
gpio_set_value(GPIO_NR_PALMTC_BL_POWER, brightness);
diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c
index 265d62bae7de..dc728d6ab94e 100644
--- a/arch/arm/mach-pxa/palmte2.c
+++ b/arch/arm/mach-pxa/palmte2.c
@@ -181,7 +181,7 @@ err:
return ret;
}
-static int palmte2_backlight_notify(int brightness)
+static int palmte2_backlight_notify(struct device *dev, int brightness)
{
gpio_set_value(GPIO_NR_PALMTE2_BL_POWER, brightness);
gpio_set_value(GPIO_NR_PALMTE2_LCD_POWER, brightness);
diff --git a/arch/arm/mach-pxa/palmtreo.c b/arch/arm/mach-pxa/palmtreo.c
index 606eb7e8a17e..b433bb496711 100644
--- a/arch/arm/mach-pxa/palmtreo.c
+++ b/arch/arm/mach-pxa/palmtreo.c
@@ -375,7 +375,7 @@ err:
return ret;
}
-static int treo_backlight_notify(int brightness)
+static int treo_backlight_notify(struct device *dev, int brightness)
{
gpio_set_value(GPIO_NR_TREO_BL_POWER, brightness);
return TREO_MAX_INTENSITY - brightness;
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 7bf18c2f002f..b37a025c0b7b 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -269,7 +269,7 @@ err:
return ret;
}
-static int palmtx_backlight_notify(int brightness)
+static int palmtx_backlight_notify(struct device *dev, int brightness)
{
gpio_set_value(GPIO_NR_PALMTX_BL_POWER, brightness);
gpio_set_value(GPIO_NR_PALMTX_LCD_POWER, brightness);
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index d787ac7cfdd8..1c5d68a94511 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -196,7 +196,7 @@ err:
return ret;
}
-static int palmz72_backlight_notify(int brightness)
+static int palmz72_backlight_notify(struct device *dev, int brightness)
{
gpio_set_value(GPIO_NR_PALMZ72_BL_POWER, brightness);
gpio_set_value(GPIO_NR_PALMZ72_LCD_POWER, brightness);
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index e5eeb3a62d01..c2b938a4d5c9 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -293,7 +293,7 @@ static struct pxamci_platform_data poodle_mci_platform_data = {
.init = poodle_mci_init,
.setpower = poodle_mci_setpower,
.exit = poodle_mci_exit,
- .gpio_card_detect = POODLE_IRQ_GPIO_nSD_DETECT,
+ .gpio_card_detect = POODLE_GPIO_nSD_DETECT,
.gpio_card_ro = POODLE_GPIO_nSD_WP,
.gpio_power = -1,
};
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index 2c1b0b70d01d..0b9ad30bfd51 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -349,7 +349,7 @@ static int __init pxa25x_init(void)
reset_status = RCSR;
- clks_register(pxa25x_clkregs, ARRAY_SIZE(pxa25x_clkregs));
+ clkdev_add_table(pxa25x_clkregs, ARRAY_SIZE(pxa25x_clkregs));
if ((ret = pxa_init_dma(IRQ_DMA, 16)))
return ret;
@@ -370,7 +370,7 @@ static int __init pxa25x_init(void)
/* Only add HWUART for PXA255/26x; PXA210/250 do not have it. */
if (cpu_is_pxa255())
- clks_register(&pxa25x_hwuart_clkreg, 1);
+ clkdev_add(&pxa25x_hwuart_clkreg);
return ret;
}
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 6a0b73167e03..d783123e2d48 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -392,7 +392,7 @@ static int __init pxa27x_init(void)
reset_status = RCSR;
- clks_register(pxa27x_clkregs, ARRAY_SIZE(pxa27x_clkregs));
+ clkdev_add_table(pxa27x_clkregs, ARRAY_SIZE(pxa27x_clkregs));
if ((ret = pxa_init_dma(IRQ_DMA, 32)))
return ret;
diff --git a/arch/arm/mach-pxa/pxa300.c b/arch/arm/mach-pxa/pxa300.c
index f4af6e2bef89..40bb16501d86 100644
--- a/arch/arm/mach-pxa/pxa300.c
+++ b/arch/arm/mach-pxa/pxa300.c
@@ -102,12 +102,12 @@ static int __init pxa300_init(void)
if (cpu_is_pxa300() || cpu_is_pxa310()) {
mfp_init_base(io_p2v(MFPR_BASE));
mfp_init_addr(pxa300_mfp_addr_map);
- clks_register(ARRAY_AND_SIZE(common_clkregs));
+ clkdev_add_table(ARRAY_AND_SIZE(common_clkregs));
}
if (cpu_is_pxa310()) {
mfp_init_addr(pxa310_mfp_addr_map);
- clks_register(ARRAY_AND_SIZE(pxa310_clkregs));
+ clkdev_add_table(ARRAY_AND_SIZE(pxa310_clkregs));
}
return 0;
diff --git a/arch/arm/mach-pxa/pxa320.c b/arch/arm/mach-pxa/pxa320.c
index c7373e74a109..8d614ecd8e99 100644
--- a/arch/arm/mach-pxa/pxa320.c
+++ b/arch/arm/mach-pxa/pxa320.c
@@ -90,7 +90,7 @@ static int __init pxa320_init(void)
if (cpu_is_pxa320()) {
mfp_init_base(io_p2v(MFPR_BASE));
mfp_init_addr(pxa320_mfp_addr_map);
- clks_register(ARRAY_AND_SIZE(pxa320_clkregs));
+ clkdev_add_table(ARRAY_AND_SIZE(pxa320_clkregs));
}
return 0;
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index fcb0721f4669..4d7c03e72504 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -634,7 +634,7 @@ static int __init pxa3xx_init(void)
*/
ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
- clks_register(pxa3xx_clkregs, ARRAY_SIZE(pxa3xx_clkregs));
+ clkdev_add_table(pxa3xx_clkregs, ARRAY_SIZE(pxa3xx_clkregs));
if ((ret = pxa_init_dma(IRQ_DMA, 32)))
return ret;
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
new file mode 100644
index 000000000000..06717d7995cb
--- /dev/null
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -0,0 +1,1100 @@
+/*
+ * arch/arm/mach-pxa/raumfeld.c
+ *
+ * Support for the following Raumfeld devices:
+ *
+ * * Controller
+ * * Connector
+ * * Speaker S/M
+ *
+ * See http://www.raumfeld.com for details.
+ *
+ * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sysdev.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/smsc911x.h>
+#include <linux/input.h>
+#include <linux/rotary_encoder.h>
+#include <linux/gpio_keys.h>
+#include <linux/input/eeti_ts.h>
+#include <linux/leds.h>
+#include <linux/w1-gpio.h>
+#include <linux/sched.h>
+#include <linux/pwm_backlight.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_gpio.h>
+#include <linux/lis3lv02d.h>
+#include <linux/pda_power.h>
+#include <linux/power_supply.h>
+#include <linux/pda_power.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/max8660.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+
+#include <mach/hardware.h>
+#include <mach/pxa3xx-regs.h>
+#include <mach/mfp-pxa3xx.h>
+#include <mach/mfp-pxa300.h>
+#include <mach/ohci.h>
+#include <mach/pxafb.h>
+#include <mach/mmc.h>
+#include <plat/i2c.h>
+#include <plat/pxa3xx_nand.h>
+
+#include "generic.h"
+#include "devices.h"
+#include "clock.h"
+
+/* common GPIO definitions */
+
+/* inputs */
+#define GPIO_ON_OFF (14)
+#define GPIO_VOLENC_A (19)
+#define GPIO_VOLENC_B (20)
+#define GPIO_CHARGE_DONE (23)
+#define GPIO_CHARGE_IND (27)
+#define GPIO_TOUCH_IRQ (32)
+#define GPIO_ETH_IRQ (40)
+#define GPIO_SPI_MISO (98)
+#define GPIO_ACCEL_IRQ (104)
+#define GPIO_RESCUE_BOOT (115)
+#define GPIO_DOCK_DETECT (116)
+#define GPIO_KEY1 (117)
+#define GPIO_KEY2 (118)
+#define GPIO_KEY3 (119)
+#define GPIO_CHARGE_USB_OK (112)
+#define GPIO_CHARGE_DC_OK (101)
+#define GPIO_CHARGE_USB_SUSP (102)
+
+/* outputs */
+#define GPIO_SHUTDOWN_SUPPLY (16)
+#define GPIO_SHUTDOWN_BATT (18)
+#define GPIO_CHRG_PEN2 (31)
+#define GPIO_TFT_VA_EN (33)
+#define GPIO_SPDIF_CS (34)
+#define GPIO_LED2 (35)
+#define GPIO_LED1 (36)
+#define GPIO_SPDIF_RESET (38)
+#define GPIO_SPI_CLK (95)
+#define GPIO_MCLK_DAC_CS (96)
+#define GPIO_SPI_MOSI (97)
+#define GPIO_W1_PULLUP_ENABLE (105)
+#define GPIO_DISPLAY_ENABLE (106)
+#define GPIO_MCLK_RESET (111)
+#define GPIO_W2W_RESET (113)
+#define GPIO_W2W_PDN (114)
+#define GPIO_CODEC_RESET (120)
+#define GPIO_AUDIO_VA_ENABLE (124)
+#define GPIO_ACCEL_CS (125)
+#define GPIO_ONE_WIRE (126)
+
+/*
+ * GPIO configurations
+ */
+static mfp_cfg_t raumfeld_controller_pin_config[] __initdata = {
+ /* UART1 */
+ GPIO77_UART1_RXD,
+ GPIO78_UART1_TXD,
+ GPIO79_UART1_CTS,
+ GPIO81_UART1_DSR,
+ GPIO83_UART1_DTR,
+ GPIO84_UART1_RTS,
+
+ /* UART3 */
+ GPIO110_UART3_RXD,
+
+ /* USB Host */
+ GPIO0_2_USBH_PEN,
+ GPIO1_2_USBH_PWR,
+
+ /* I2C */
+ GPIO21_I2C_SCL | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
+ GPIO22_I2C_SDA | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
+
+ /* SPI */
+ GPIO34_GPIO, /* SPDIF_CS */
+ GPIO96_GPIO, /* MCLK_CS */
+ GPIO125_GPIO, /* ACCEL_CS */
+
+ /* MMC */
+ GPIO3_MMC1_DAT0,
+ GPIO4_MMC1_DAT1,
+ GPIO5_MMC1_DAT2,
+ GPIO6_MMC1_DAT3,
+ GPIO7_MMC1_CLK,
+ GPIO8_MMC1_CMD,
+
+ /* One-wire */
+ GPIO126_GPIO | MFP_LPM_FLOAT,
+ GPIO105_GPIO | MFP_PULL_LOW | MFP_LPM_PULL_LOW,
+
+ /* CHRG_USB_OK */
+ GPIO101_GPIO | MFP_PULL_HIGH,
+ /* CHRG_USB_OK */
+ GPIO112_GPIO | MFP_PULL_HIGH,
+ /* CHRG_USB_SUSP */
+ GPIO102_GPIO,
+ /* DISPLAY_ENABLE */
+ GPIO106_GPIO,
+ /* DOCK_DETECT */
+ GPIO116_GPIO | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
+
+ /* LCD */
+ GPIO54_LCD_LDD_0,
+ GPIO55_LCD_LDD_1,
+ GPIO56_LCD_LDD_2,
+ GPIO57_LCD_LDD_3,
+ GPIO58_LCD_LDD_4,
+ GPIO59_LCD_LDD_5,
+ GPIO60_LCD_LDD_6,
+ GPIO61_LCD_LDD_7,
+ GPIO62_LCD_LDD_8,
+ GPIO63_LCD_LDD_9,
+ GPIO64_LCD_LDD_10,
+ GPIO65_LCD_LDD_11,
+ GPIO66_LCD_LDD_12,
+ GPIO67_LCD_LDD_13,
+ GPIO68_LCD_LDD_14,
+ GPIO69_LCD_LDD_15,
+ GPIO70_LCD_LDD_16,
+ GPIO71_LCD_LDD_17,
+ GPIO72_LCD_FCLK,
+ GPIO73_LCD_LCLK,
+ GPIO74_LCD_PCLK,
+ GPIO75_LCD_BIAS,
+};
+
+static mfp_cfg_t raumfeld_connector_pin_config[] __initdata = {
+ /* UART1 */
+ GPIO77_UART1_RXD,
+ GPIO78_UART1_TXD,
+ GPIO79_UART1_CTS,
+ GPIO81_UART1_DSR,
+ GPIO83_UART1_DTR,
+ GPIO84_UART1_RTS,
+
+ /* UART3 */
+ GPIO110_UART3_RXD,
+
+ /* USB Host */
+ GPIO0_2_USBH_PEN,
+ GPIO1_2_USBH_PWR,
+
+ /* I2C */
+ GPIO21_I2C_SCL | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
+ GPIO22_I2C_SDA | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
+
+ /* SPI */
+ GPIO34_GPIO, /* SPDIF_CS */
+ GPIO96_GPIO, /* MCLK_CS */
+ GPIO125_GPIO, /* ACCEL_CS */
+
+ /* MMC */
+ GPIO3_MMC1_DAT0,
+ GPIO4_MMC1_DAT1,
+ GPIO5_MMC1_DAT2,
+ GPIO6_MMC1_DAT3,
+ GPIO7_MMC1_CLK,
+ GPIO8_MMC1_CMD,
+
+ /* Ethernet */
+ GPIO1_nCS2, /* CS */
+ GPIO40_GPIO | MFP_PULL_HIGH, /* IRQ */
+
+ /* SSP for I2S */
+ GPIO85_SSP1_SCLK,
+ GPIO89_SSP1_EXTCLK,
+ GPIO86_SSP1_FRM,
+ GPIO87_SSP1_TXD,
+ GPIO88_SSP1_RXD,
+ GPIO90_SSP1_SYSCLK,
+
+ /* SSP2 for S/PDIF */
+ GPIO25_SSP2_SCLK,
+ GPIO26_SSP2_FRM,
+ GPIO27_SSP2_TXD,
+ GPIO29_SSP2_EXTCLK,
+};
+
+static mfp_cfg_t raumfeld_speaker_pin_config[] __initdata = {
+ /* UART1 */
+ GPIO77_UART1_RXD,
+ GPIO78_UART1_TXD,
+ GPIO79_UART1_CTS,
+ GPIO81_UART1_DSR,
+ GPIO83_UART1_DTR,
+ GPIO84_UART1_RTS,
+
+ /* UART3 */
+ GPIO110_UART3_RXD,
+
+ /* USB Host */
+ GPIO0_2_USBH_PEN,
+ GPIO1_2_USBH_PWR,
+
+ /* I2C */
+ GPIO21_I2C_SCL | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
+ GPIO22_I2C_SDA | MFP_LPM_FLOAT | MFP_PULL_FLOAT,
+
+ /* SPI */
+ GPIO34_GPIO, /* SPDIF_CS */
+ GPIO96_GPIO, /* MCLK_CS */
+ GPIO125_GPIO, /* ACCEL_CS */
+
+ /* MMC */
+ GPIO3_MMC1_DAT0,
+ GPIO4_MMC1_DAT1,
+ GPIO5_MMC1_DAT2,
+ GPIO6_MMC1_DAT3,
+ GPIO7_MMC1_CLK,
+ GPIO8_MMC1_CMD,
+
+ /* Ethernet */
+ GPIO1_nCS2, /* CS */
+ GPIO40_GPIO | MFP_PULL_HIGH, /* IRQ */
+
+ /* SSP for I2S */
+ GPIO85_SSP1_SCLK,
+ GPIO89_SSP1_EXTCLK,
+ GPIO86_SSP1_FRM,
+ GPIO87_SSP1_TXD,
+ GPIO88_SSP1_RXD,
+ GPIO90_SSP1_SYSCLK,
+};
+
+/*
+ * SMSC LAN9220 Ethernet
+ */
+
+static struct resource smc91x_resources[] = {
+ {
+ .start = PXA3xx_CS2_PHYS,
+ .end = PXA3xx_CS2_PHYS + 0xfffff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = gpio_to_irq(GPIO_ETH_IRQ),
+ .end = gpio_to_irq(GPIO_ETH_IRQ),
+ .flags = IORESOURCE_IRQ | IRQF_TRIGGER_FALLING,
+ }
+};
+
+static struct smsc911x_platform_config raumfeld_smsc911x_config = {
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+ .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+ .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+ .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS,
+};
+
+static struct platform_device smc91x_device = {
+ .name = "smsc911x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(smc91x_resources),
+ .resource = smc91x_resources,
+ .dev = {
+ .platform_data = &raumfeld_smsc911x_config,
+ }
+};
+
+/**
+ * NAND
+ */
+
+static struct mtd_partition raumfeld_nand_partitions[] = {
+ {
+ .name = "Bootloader",
+ .offset = 0,
+ .size = 0xa0000,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ {
+ .name = "BootloaderEnvironment",
+ .offset = 0xa0000,
+ .size = 0x20000,
+ },
+ {
+ .name = "BootloaderSplashScreen",
+ .offset = 0xc0000,
+ .size = 0x60000,
+ },
+ {
+ .name = "UBI",
+ .offset = 0x120000,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct pxa3xx_nand_platform_data raumfeld_nand_info = {
+ .enable_arbiter = 1,
+ .keep_config = 1,
+ .parts = raumfeld_nand_partitions,
+ .nr_parts = ARRAY_SIZE(raumfeld_nand_partitions),
+};
+
+/**
+ * USB (OHCI) support
+ */
+
+static struct pxaohci_platform_data raumfeld_ohci_info = {
+ .port_mode = PMM_GLOBAL_MODE,
+ .flags = ENABLE_PORT1,
+};
+
+/**
+ * Rotary encoder input device
+ */
+
+static struct rotary_encoder_platform_data raumfeld_rotary_encoder_info = {
+ .steps = 24,
+ .axis = REL_X,
+ .relative_axis = 1,
+ .gpio_a = GPIO_VOLENC_A,
+ .gpio_b = GPIO_VOLENC_B,
+ .inverted_a = 1,
+ .inverted_b = 0,
+};
+
+static struct platform_device rotary_encoder_device = {
+ .name = "rotary-encoder",
+ .id = 0,
+ .dev = {
+ .platform_data = &raumfeld_rotary_encoder_info,
+ }
+};
+
+/**
+ * GPIO buttons
+ */
+
+static struct gpio_keys_button gpio_keys_button[] = {
+ {
+ .code = KEY_F1,
+ .type = EV_KEY,
+ .gpio = GPIO_KEY1,
+ .active_low = 1,
+ .wakeup = 0,
+ .debounce_interval = 5, /* ms */
+ .desc = "Button 1",
+ },
+ {
+ .code = KEY_F2,
+ .type = EV_KEY,
+ .gpio = GPIO_KEY2,
+ .active_low = 1,
+ .wakeup = 0,
+ .debounce_interval = 5, /* ms */
+ .desc = "Button 2",
+ },
+ {
+ .code = KEY_F3,
+ .type = EV_KEY,
+ .gpio = GPIO_KEY3,
+ .active_low = 1,
+ .wakeup = 0,
+ .debounce_interval = 5, /* ms */
+ .desc = "Button 3",
+ },
+ {
+ .code = KEY_F4,
+ .type = EV_KEY,
+ .gpio = GPIO_RESCUE_BOOT,
+ .active_low = 0,
+ .wakeup = 0,
+ .debounce_interval = 5, /* ms */
+ .desc = "rescue boot button",
+ },
+ {
+ .code = KEY_F5,
+ .type = EV_KEY,
+ .gpio = GPIO_DOCK_DETECT,
+ .active_low = 1,
+ .wakeup = 0,
+ .debounce_interval = 5, /* ms */
+ .desc = "dock detect",
+ },
+ {
+ .code = KEY_F6,
+ .type = EV_KEY,
+ .gpio = GPIO_ON_OFF,
+ .active_low = 0,
+ .wakeup = 0,
+ .debounce_interval = 5, /* ms */
+ .desc = "on/off button",
+ },
+};
+
+static struct gpio_keys_platform_data gpio_keys_platform_data = {
+ .buttons = gpio_keys_button,
+ .nbuttons = ARRAY_SIZE(gpio_keys_button),
+ .rep = 0,
+};
+
+static struct platform_device raumfeld_gpio_keys_device = {
+ .name = "gpio-keys",
+ .id = -1,
+ .dev = {
+ .platform_data = &gpio_keys_platform_data,
+ }
+};
+
+/**
+ * GPIO LEDs
+ */
+
+static struct gpio_led raumfeld_leds[] = {
+ {
+ .name = "raumfeld:1",
+ .gpio = GPIO_LED1,
+ .active_low = 1,
+ .default_state = LEDS_GPIO_DEFSTATE_ON,
+ },
+ {
+ .name = "raumfeld:2",
+ .gpio = GPIO_LED2,
+ .active_low = 0,
+ .default_state = LEDS_GPIO_DEFSTATE_OFF,
+ }
+};
+
+static struct gpio_led_platform_data raumfeld_led_platform_data = {
+ .leds = raumfeld_leds,
+ .num_leds = ARRAY_SIZE(raumfeld_leds),
+};
+
+static struct platform_device raumfeld_led_device = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev = {
+ .platform_data = &raumfeld_led_platform_data,
+ },
+};
+
+/**
+ * One-wire (W1 bus) support
+ */
+
+static void w1_enable_external_pullup(int enable)
+{
+ gpio_set_value(GPIO_W1_PULLUP_ENABLE, enable);
+ msleep(100);
+}
+
+static struct w1_gpio_platform_data w1_gpio_platform_data = {
+ .pin = GPIO_ONE_WIRE,
+ .is_open_drain = 0,
+ .enable_external_pullup = w1_enable_external_pullup,
+};
+
+struct platform_device raumfeld_w1_gpio_device = {
+ .name = "w1-gpio",
+ .dev = {
+ .platform_data = &w1_gpio_platform_data
+ }
+};
+
+static void __init raumfeld_w1_init(void)
+{
+ int ret = gpio_request(GPIO_W1_PULLUP_ENABLE,
+ "W1 external pullup enable");
+
+ if (ret < 0)
+ pr_warning("Unable to request GPIO_W1_PULLUP_ENABLE\n");
+ else
+ gpio_direction_output(GPIO_W1_PULLUP_ENABLE, 0);
+
+ platform_device_register(&raumfeld_w1_gpio_device);
+}
+
+/**
+ * Framebuffer device
+ */
+
+/* PWM controlled backlight */
+static struct platform_pwm_backlight_data raumfeld_pwm_backlight_data = {
+ .pwm_id = 0,
+ .max_brightness = 100,
+ .dft_brightness = 100,
+ /* 10000 ns = 10 ms ^= 100 kHz */
+ .pwm_period_ns = 10000,
+};
+
+static struct platform_device raumfeld_pwm_backlight_device = {
+ .name = "pwm-backlight",
+ .dev = {
+ .parent = &pxa27x_device_pwm0.dev,
+ .platform_data = &raumfeld_pwm_backlight_data,
+ }
+};
+
+/* LT3593 controlled backlight */
+static struct gpio_led raumfeld_lt3593_led = {
+ .name = "backlight",
+ .gpio = mfp_to_gpio(MFP_PIN_GPIO17),
+ .default_state = LEDS_GPIO_DEFSTATE_ON,
+};
+
+static struct gpio_led_platform_data raumfeld_lt3593_platform_data = {
+ .leds = &raumfeld_lt3593_led,
+ .num_leds = 1,
+};
+
+static struct platform_device raumfeld_lt3593_device = {
+ .name = "leds-lt3593",
+ .id = -1,
+ .dev = {
+ .platform_data = &raumfeld_lt3593_platform_data,
+ },
+};
+
+static struct pxafb_mode_info sharp_lq043t3dx02_mode = {
+ .pixclock = 111000,
+ .xres = 480,
+ .yres = 272,
+ .bpp = 16,
+ .hsync_len = 4,
+ .left_margin = 2,
+ .right_margin = 1,
+ .vsync_len = 1,
+ .upper_margin = 3,
+ .lower_margin = 1,
+ .sync = 0,
+};
+
+static struct pxafb_mach_info raumfeld_sharp_lcd_info = {
+ .modes = &sharp_lq043t3dx02_mode,
+ .num_modes = 1,
+ .video_mem_size = 0x400000,
+ .lcd_conn = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
+};
+
+static void __init raumfeld_lcd_init(void)
+{
+ int ret;
+
+ set_pxa_fb_info(&raumfeld_sharp_lcd_info);
+
+ /* Earlier devices had the backlight regulator controlled
+ * via PWM, later versions use another controller for that */
+ if ((system_rev & 0xff) < 2) {
+ mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
+ pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
+ platform_device_register(&raumfeld_pwm_backlight_device);
+ } else
+ platform_device_register(&raumfeld_lt3593_device);
+
+ ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable");
+ if (ret < 0)
+ pr_warning("Unable to request GPIO_TFT_VA_EN\n");
+ else
+ gpio_direction_output(GPIO_TFT_VA_EN, 1);
+
+ ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable");
+ if (ret < 0)
+ pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n");
+ else
+ gpio_direction_output(GPIO_DISPLAY_ENABLE, 1);
+}
+
+/**
+ * SPI devices
+ */
+
+struct spi_gpio_platform_data raumfeld_spi_platform_data = {
+ .sck = GPIO_SPI_CLK,
+ .mosi = GPIO_SPI_MOSI,
+ .miso = GPIO_SPI_MISO,
+ .num_chipselect = 3,
+};
+
+static struct platform_device raumfeld_spi_device = {
+ .name = "spi_gpio",
+ .id = 0,
+ .dev = {
+ .platform_data = &raumfeld_spi_platform_data,
+ }
+};
+
+static struct lis3lv02d_platform_data lis3_pdata = {
+ .click_flags = LIS3_CLICK_SINGLE_X |
+ LIS3_CLICK_SINGLE_Y |
+ LIS3_CLICK_SINGLE_Z,
+ .irq_cfg = LIS3_IRQ1_CLICK | LIS3_IRQ2_CLICK,
+ .wakeup_flags = LIS3_WAKEUP_X_LO | LIS3_WAKEUP_X_HI |
+ LIS3_WAKEUP_Y_LO | LIS3_WAKEUP_Y_HI |
+ LIS3_WAKEUP_Z_LO | LIS3_WAKEUP_Z_HI,
+ .wakeup_thresh = 10,
+ .click_thresh_x = 10,
+ .click_thresh_y = 10,
+ .click_thresh_z = 10,
+};
+
+#define SPI_AK4104 \
+{ \
+ .modalias = "ak4104", \
+ .max_speed_hz = 10000, \
+ .bus_num = 0, \
+ .chip_select = 0, \
+ .controller_data = (void *) GPIO_SPDIF_CS, \
+}
+
+#define SPI_LIS3 \
+{ \
+ .modalias = "lis3lv02d_spi", \
+ .max_speed_hz = 1000000, \
+ .bus_num = 0, \
+ .chip_select = 1, \
+ .controller_data = (void *) GPIO_ACCEL_CS, \
+ .platform_data = &lis3_pdata, \
+ .irq = gpio_to_irq(GPIO_ACCEL_IRQ), \
+}
+
+#define SPI_DAC7512 \
+{ \
+ .modalias = "dac7512", \
+ .max_speed_hz = 1000000, \
+ .bus_num = 0, \
+ .chip_select = 2, \
+ .controller_data = (void *) GPIO_MCLK_DAC_CS, \
+}
+
+static struct spi_board_info connector_spi_devices[] __initdata = {
+ SPI_AK4104,
+ SPI_DAC7512,
+};
+
+static struct spi_board_info speaker_spi_devices[] __initdata = {
+ SPI_DAC7512,
+};
+
+static struct spi_board_info controller_spi_devices[] __initdata = {
+ SPI_LIS3,
+};
+
+/**
+ * MMC for Marvell Libertas 8688 via SDIO
+ */
+
+static int raumfeld_mci_init(struct device *dev, irq_handler_t isr, void *data)
+{
+ gpio_set_value(GPIO_W2W_RESET, 1);
+ gpio_set_value(GPIO_W2W_PDN, 1);
+
+ return 0;
+}
+
+static void raumfeld_mci_exit(struct device *dev, void *data)
+{
+ gpio_set_value(GPIO_W2W_RESET, 0);
+ gpio_set_value(GPIO_W2W_PDN, 0);
+}
+
+static struct pxamci_platform_data raumfeld_mci_platform_data = {
+ .init = raumfeld_mci_init,
+ .exit = raumfeld_mci_exit,
+ .detect_delay = 20,
+ .gpio_card_detect = -1,
+ .gpio_card_ro = -1,
+ .gpio_power = -1,
+};
+
+/*
+ * External power / charge logic
+ */
+
+static int power_supply_init(struct device *dev)
+{
+ return 0;
+}
+
+static void power_supply_exit(struct device *dev)
+{
+}
+
+static int raumfeld_is_ac_online(void)
+{
+ return !gpio_get_value(GPIO_CHARGE_DC_OK);
+}
+
+static int raumfeld_is_usb_online(void)
+{
+ return 0;
+}
+
+static char *raumfeld_power_supplicants[] = { "ds2760-battery.0" };
+
+static struct pda_power_pdata power_supply_info = {
+ .init = power_supply_init,
+ .is_ac_online = raumfeld_is_ac_online,
+ .is_usb_online = raumfeld_is_usb_online,
+ .exit = power_supply_exit,
+ .supplied_to = raumfeld_power_supplicants,
+ .num_supplicants = ARRAY_SIZE(raumfeld_power_supplicants)
+};
+
+static struct resource power_supply_resources[] = {
+ {
+ .name = "ac",
+ .flags = IORESOURCE_IRQ |
+ IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE,
+ .start = GPIO_CHARGE_DC_OK,
+ .end = GPIO_CHARGE_DC_OK,
+ },
+};
+
+static irqreturn_t charge_done_irq(int irq, void *dev_id)
+{
+ struct power_supply *psy;
+
+ psy = power_supply_get_by_name("ds2760-battery.0");
+
+ if (psy)
+ power_supply_set_battery_charged(psy);
+
+ return IRQ_HANDLED;
+}
+
+static struct platform_device raumfeld_power_supply = {
+ .name = "pda-power",
+ .id = -1,
+ .dev = {
+ .platform_data = &power_supply_info,
+ },
+ .resource = power_supply_resources,
+ .num_resources = ARRAY_SIZE(power_supply_resources),
+};
+
+static void __init raumfeld_power_init(void)
+{
+ int ret;
+
+ /* Set PEN2 high to enable maximum charge current */
+ ret = gpio_request(GPIO_CHRG_PEN2, "CHRG_PEN2");
+ if (ret < 0)
+ pr_warning("Unable to request GPIO_CHRG_PEN2\n");
+ else
+ gpio_direction_output(GPIO_CHRG_PEN2, 1);
+
+ ret = gpio_request(GPIO_CHARGE_DC_OK, "CABLE_DC_OK");
+ if (ret < 0)
+ pr_warning("Unable to request GPIO_CHARGE_DC_OK\n");
+
+ ret = gpio_request(GPIO_CHARGE_USB_SUSP, "CHARGE_USB_SUSP");
+ if (ret < 0)
+ pr_warning("Unable to request GPIO_CHARGE_USB_SUSP\n");
+ else
+ gpio_direction_output(GPIO_CHARGE_USB_SUSP, 0);
+
+ power_supply_resources[0].start = gpio_to_irq(GPIO_CHARGE_DC_OK);
+ power_supply_resources[0].end = gpio_to_irq(GPIO_CHARGE_DC_OK);
+
+ ret = request_irq(gpio_to_irq(GPIO_CHARGE_DONE),
+ &charge_done_irq, IORESOURCE_IRQ_LOWEDGE,
+ "charge_done", NULL);
+
+ if (ret < 0)
+ printk(KERN_ERR "%s: unable to register irq %d\n", __func__,
+ GPIO_CHARGE_DONE);
+ else
+ platform_device_register(&raumfeld_power_supply);
+}
+
+/* Fixed regulator for AUDIO_VA, 0-0048 maps to the cs4270 codec device */
+
+static struct regulator_consumer_supply audio_va_consumer_supply =
+ REGULATOR_SUPPLY("va", "0-0048");
+
+struct regulator_init_data audio_va_initdata = {
+ .consumer_supplies = &audio_va_consumer_supply,
+ .num_consumer_supplies = 1,
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct fixed_voltage_config audio_va_config = {
+ .supply_name = "audio_va",
+ .microvolts = 5000000,
+ .gpio = GPIO_AUDIO_VA_ENABLE,
+ .enable_high = 1,
+ .enabled_at_boot = 0,
+ .init_data = &audio_va_initdata,
+};
+
+static struct platform_device audio_va_device = {
+ .name = "reg-fixed-voltage",
+ .id = 0,
+ .dev = {
+ .platform_data = &audio_va_config,
+ },
+};
+
+/* Dummy supplies for Codec's VD/VLC */
+
+static struct regulator_consumer_supply audio_dummy_supplies[] = {
+ REGULATOR_SUPPLY("vd", "0-0048"),
+ REGULATOR_SUPPLY("vlc", "0-0048"),
+};
+
+struct regulator_init_data audio_dummy_initdata = {
+ .consumer_supplies = audio_dummy_supplies,
+ .num_consumer_supplies = ARRAY_SIZE(audio_dummy_supplies),
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+};
+
+static struct fixed_voltage_config audio_dummy_config = {
+ .supply_name = "audio_vd",
+ .microvolts = 3300000,
+ .gpio = -1,
+ .init_data = &audio_dummy_initdata,
+};
+
+static struct platform_device audio_supply_dummy_device = {
+ .name = "reg-fixed-voltage",
+ .id = 1,
+ .dev = {
+ .platform_data = &audio_dummy_config,
+ },
+};
+
+static struct platform_device *audio_regulator_devices[] = {
+ &audio_va_device,
+ &audio_supply_dummy_device,
+};
+
+/**
+ * Regulator support via MAX8660
+ */
+
+static struct regulator_consumer_supply vcc_mmc_supply =
+ REGULATOR_SUPPLY("vmmc", "pxa2xx-mci.0");
+
+static struct regulator_init_data vcc_mmc_init_data = {
+ .constraints = {
+ .min_uV = 3300000,
+ .max_uV = 3300000,
+ .valid_modes_mask = REGULATOR_MODE_NORMAL,
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS |
+ REGULATOR_CHANGE_VOLTAGE |
+ REGULATOR_CHANGE_MODE,
+ },
+ .consumer_supplies = &vcc_mmc_supply,
+ .num_consumer_supplies = 1,
+};
+
+struct max8660_subdev_data max8660_v6_subdev_data = {
+ .id = MAX8660_V6,
+ .name = "vmmc",
+ .platform_data = &vcc_mmc_init_data,
+};
+
+static struct max8660_platform_data max8660_pdata = {
+ .subdevs = &max8660_v6_subdev_data,
+ .num_subdevs = 1,
+};
+
+/**
+ * I2C devices
+ */
+
+static struct i2c_board_info raumfeld_pwri2c_board_info = {
+ .type = "max8660",
+ .addr = 0x34,
+ .platform_data = &max8660_pdata,
+};
+
+static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = {
+ .type = "cs4270",
+ .addr = 0x48,
+};
+
+static struct eeti_ts_platform_data eeti_ts_pdata = {
+ .irq_active_high = 1,
+};
+
+static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = {
+ .type = "eeti_ts",
+ .addr = 0x0a,
+ .irq = gpio_to_irq(GPIO_TOUCH_IRQ),
+ .platform_data = &eeti_ts_pdata,
+};
+
+static struct platform_device *raumfeld_common_devices[] = {
+ &raumfeld_gpio_keys_device,
+ &raumfeld_led_device,
+ &raumfeld_spi_device,
+};
+
+static void __init raumfeld_audio_init(void)
+{
+ int ret;
+
+ ret = gpio_request(GPIO_CODEC_RESET, "cs4270 reset");
+ if (ret < 0)
+ pr_warning("unable to request GPIO_CODEC_RESET\n");
+ else
+ gpio_direction_output(GPIO_CODEC_RESET, 1);
+
+ ret = gpio_request(GPIO_SPDIF_RESET, "ak4104 s/pdif reset");
+ if (ret < 0)
+ pr_warning("unable to request GPIO_SPDIF_RESET\n");
+ else
+ gpio_direction_output(GPIO_SPDIF_RESET, 1);
+
+ ret = gpio_request(GPIO_MCLK_RESET, "MCLK reset");
+ if (ret < 0)
+ pr_warning("unable to request GPIO_MCLK_RESET\n");
+ else
+ gpio_direction_output(GPIO_MCLK_RESET, 1);
+
+ platform_add_devices(ARRAY_AND_SIZE(audio_regulator_devices));
+}
+
+static void __init raumfeld_common_init(void)
+{
+ int ret;
+
+ /* The on/off button polarity has changed after revision 1 */
+ if ((system_rev & 0xff) > 1) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gpio_keys_button); i++)
+ if (!strcmp(gpio_keys_button[i].desc, "on/off button"))
+ gpio_keys_button[i].active_low = 1;
+ }
+
+ enable_irq_wake(IRQ_WAKEUP0);
+
+ pxa3xx_set_nand_info(&raumfeld_nand_info);
+ pxa3xx_set_i2c_power_info(NULL);
+ pxa_set_ohci_info(&raumfeld_ohci_info);
+ pxa_set_mci_info(&raumfeld_mci_platform_data);
+ pxa_set_i2c_info(NULL);
+ pxa_set_ffuart_info(NULL);
+
+ ret = gpio_request(GPIO_W2W_RESET, "Wi2Wi reset");
+ if (ret < 0)
+ pr_warning("Unable to request GPIO_W2W_RESET\n");
+ else
+ gpio_direction_output(GPIO_W2W_RESET, 0);
+
+ ret = gpio_request(GPIO_W2W_PDN, "Wi2Wi powerup");
+ if (ret < 0)
+ pr_warning("Unable to request GPIO_W2W_PDN\n");
+ else
+ gpio_direction_output(GPIO_W2W_PDN, 0);
+
+ /* this can be used to switch off the device */
+ ret = gpio_request(GPIO_SHUTDOWN_SUPPLY,
+ "supply shutdown");
+ if (ret < 0)
+ pr_warning("Unable to request GPIO_SHUTDOWN_SUPPLY\n");
+ else
+ gpio_direction_output(GPIO_SHUTDOWN_SUPPLY, 0);
+
+ platform_add_devices(ARRAY_AND_SIZE(raumfeld_common_devices));
+ i2c_register_board_info(1, &raumfeld_pwri2c_board_info, 1);
+}
+
+static void __init raumfeld_controller_init(void)
+{
+ int ret;
+
+ pxa3xx_mfp_config(ARRAY_AND_SIZE(raumfeld_controller_pin_config));
+ platform_device_register(&rotary_encoder_device);
+ spi_register_board_info(ARRAY_AND_SIZE(controller_spi_devices));
+ i2c_register_board_info(0, &raumfeld_controller_i2c_board_info, 1);
+
+ ret = gpio_request(GPIO_SHUTDOWN_BATT, "battery shutdown");
+ if (ret < 0)
+ pr_warning("Unable to request GPIO_SHUTDOWN_BATT\n");
+ else
+ gpio_direction_output(GPIO_SHUTDOWN_BATT, 0);
+
+ raumfeld_common_init();
+ raumfeld_power_init();
+ raumfeld_lcd_init();
+ raumfeld_w1_init();
+}
+
+static void __init raumfeld_connector_init(void)
+{
+ pxa3xx_mfp_config(ARRAY_AND_SIZE(raumfeld_connector_pin_config));
+ spi_register_board_info(ARRAY_AND_SIZE(connector_spi_devices));
+ i2c_register_board_info(0, &raumfeld_connector_i2c_board_info, 1);
+
+ platform_device_register(&smc91x_device);
+
+ raumfeld_audio_init();
+ raumfeld_common_init();
+}
+
+static void __init raumfeld_speaker_init(void)
+{
+ pxa3xx_mfp_config(ARRAY_AND_SIZE(raumfeld_speaker_pin_config));
+ spi_register_board_info(ARRAY_AND_SIZE(speaker_spi_devices));
+ i2c_register_board_info(0, &raumfeld_connector_i2c_board_info, 1);
+
+ platform_device_register(&smc91x_device);
+ platform_device_register(&rotary_encoder_device);
+
+ raumfeld_audio_init();
+ raumfeld_common_init();
+}
+
+/* physical memory regions */
+#define RAUMFELD_SDRAM_BASE 0xa0000000 /* SDRAM region */
+
+#ifdef CONFIG_MACH_RAUMFELD_RC
+MACHINE_START(RAUMFELD_RC, "Raumfeld Controller")
+ .phys_io = 0x40000000,
+ .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
+ .boot_params = RAUMFELD_SDRAM_BASE + 0x100,
+ .init_machine = raumfeld_controller_init,
+ .map_io = pxa_map_io,
+ .init_irq = pxa3xx_init_irq,
+ .timer = &pxa_timer,
+MACHINE_END
+#endif
+
+#ifdef CONFIG_MACH_RAUMFELD_CONNECTOR
+MACHINE_START(RAUMFELD_CONNECTOR, "Raumfeld Connector")
+ .phys_io = 0x40000000,
+ .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
+ .boot_params = RAUMFELD_SDRAM_BASE + 0x100,
+ .init_machine = raumfeld_connector_init,
+ .map_io = pxa_map_io,
+ .init_irq = pxa3xx_init_irq,
+ .timer = &pxa_timer,
+MACHINE_END
+#endif
+
+#ifdef CONFIG_MACH_RAUMFELD_SPEAKER
+MACHINE_START(RAUMFELD_SPEAKER, "Raumfeld Speaker")
+ .phys_io = 0x40000000,
+ .io_pg_offst = (io_p2v(0x40000000) >> 18) & 0xfffc,
+ .boot_params = RAUMFELD_SDRAM_BASE + 0x100,
+ .init_machine = raumfeld_speaker_init,
+ .map_io = pxa_map_io,
+ .init_irq = pxa3xx_init_irq,
+ .timer = &pxa_timer,
+MACHINE_END
+#endif
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 4b50f144fa48..28352c0b8c34 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -389,13 +389,13 @@ static struct gpio_keys_button spitz_gpio_keys[] = {
.type = EV_SW,
.code = 0,
.gpio = SPITZ_GPIO_SWA,
- .desc = "Display Down",
+ .desc = "Display Down",
},
{
.type = EV_SW,
.code = 1,
.gpio = SPITZ_GPIO_SWB,
- .desc = "Lid Closed",
+ .desc = "Lid Closed",
},
};
diff --git a/arch/arm/mach-pxa/viper.c b/arch/arm/mach-pxa/viper.c
index 5352b4e5a7dd..89f258c9e126 100644
--- a/arch/arm/mach-pxa/viper.c
+++ b/arch/arm/mach-pxa/viper.c
@@ -379,7 +379,7 @@ err_request_bckl:
return ret;
}
-static int viper_backlight_notify(int brightness)
+static int viper_backlight_notify(struct device *dev, int brightness)
{
gpio_set_value(VIPER_LCD_EN_GPIO, !!brightness);
gpio_set_value(VIPER_BCKLIGHT_EN_GPIO, !!brightness);
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 5b986a8bd9e6..75f2a37f945d 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -25,6 +25,7 @@
#include <linux/mtd/physmap.h>
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
+#include <linux/apm-emulation.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@@ -626,8 +627,27 @@ static void zeus_power_off(void)
pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP);
}
-int zeus_get_pcb_info(struct i2c_client *client, unsigned gpio,
- unsigned ngpio, void *context)
+#ifdef CONFIG_APM_EMULATION
+static void zeus_get_power_status(struct apm_power_info *info)
+{
+ /* Power supply is always present */
+ info->ac_line_status = APM_AC_ONLINE;
+ info->battery_status = APM_BATTERY_STATUS_NOT_PRESENT;
+ info->battery_flag = APM_BATTERY_FLAG_NOT_PRESENT;
+}
+
+static inline void zeus_setup_apm(void)
+{
+ apm_get_power_status = zeus_get_power_status;
+}
+#else
+static inline void zeus_setup_apm(void)
+{
+}
+#endif
+
+static int zeus_get_pcb_info(struct i2c_client *client, unsigned gpio,
+ unsigned ngpio, void *context)
{
int i;
u8 pcb_info = 0;
@@ -726,9 +746,18 @@ static mfp_cfg_t zeus_pin_config[] __initdata = {
GPIO99_GPIO, /* CF RDY */
};
+/*
+ * DM9k MSCx settings: SRAM, 16 bits
+ * 17 cycles delay first access
+ * 5 cycles delay next access
+ * 13 cycles recovery time
+ * faster device
+ */
+#define DM9K_MSC_VALUE 0xe4c9
+
static void __init zeus_init(void)
{
- u16 dm9000_msc = 0xe279;
+ u16 dm9000_msc = DM9K_MSC_VALUE;
system_rev = __raw_readw(ZEUS_CPLD_VERSION);
pr_info("Zeus CPLD V%dI%d\n", (system_rev & 0xf0) >> 4, (system_rev & 0x0f));
@@ -738,6 +767,7 @@ static void __init zeus_init(void)
MSC1 = (MSC1 & 0xffff0000) | dm9000_msc;
pm_power_off = zeus_power_off;
+ zeus_setup_apm();
pxa2xx_mfp_config(ARRAY_AND_SIZE(zeus_pin_config));
diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c
index b66e9e2d06e7..2b4043c04d0c 100644
--- a/arch/arm/mach-pxa/zylonite.c
+++ b/arch/arm/mach-pxa/zylonite.c
@@ -36,9 +36,6 @@
#include "devices.h"
#include "generic.h"
-#define MAX_SLOTS 3
-struct platform_mmc_slot zylonite_mmc_slot[MAX_SLOTS];
-
int gpio_eth_irq;
int gpio_debug_led1;
int gpio_debug_led2;
@@ -220,84 +217,28 @@ static inline void zylonite_init_lcd(void) {}
#endif
#if defined(CONFIG_MMC)
-static int zylonite_mci_ro(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- return gpio_get_value(zylonite_mmc_slot[pdev->id].gpio_wp);
-}
-
-static int zylonite_mci_init(struct device *dev,
- irq_handler_t zylonite_detect_int,
- void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- int err, cd_irq, gpio_cd, gpio_wp;
-
- cd_irq = gpio_to_irq(zylonite_mmc_slot[pdev->id].gpio_cd);
- gpio_cd = zylonite_mmc_slot[pdev->id].gpio_cd;
- gpio_wp = zylonite_mmc_slot[pdev->id].gpio_wp;
-
- /*
- * setup GPIO for Zylonite MMC controller
- */
- err = gpio_request(gpio_cd, "mmc card detect");
- if (err)
- goto err_request_cd;
- gpio_direction_input(gpio_cd);
-
- err = gpio_request(gpio_wp, "mmc write protect");
- if (err)
- goto err_request_wp;
- gpio_direction_input(gpio_wp);
-
- err = request_irq(cd_irq, zylonite_detect_int,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "MMC card detect", data);
- if (err) {
- printk(KERN_ERR "%s: MMC/SD/SDIO: "
- "can't request card detect IRQ\n", __func__);
- goto err_request_irq;
- }
-
- return 0;
-
-err_request_irq:
- gpio_free(gpio_wp);
-err_request_wp:
- gpio_free(gpio_cd);
-err_request_cd:
- return err;
-}
-
-static void zylonite_mci_exit(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- int cd_irq, gpio_cd, gpio_wp;
-
- cd_irq = gpio_to_irq(zylonite_mmc_slot[pdev->id].gpio_cd);
- gpio_cd = zylonite_mmc_slot[pdev->id].gpio_cd;
- gpio_wp = zylonite_mmc_slot[pdev->id].gpio_wp;
-
- free_irq(cd_irq, data);
- gpio_free(gpio_cd);
- gpio_free(gpio_wp);
-}
-
static struct pxamci_platform_data zylonite_mci_platform_data = {
.detect_delay = 20,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
- .init = zylonite_mci_init,
- .exit = zylonite_mci_exit,
- .get_ro = zylonite_mci_ro,
- .gpio_card_detect = -1,
- .gpio_card_ro = -1,
+ .gpio_card_detect = EXT_GPIO(0),
+ .gpio_card_ro = EXT_GPIO(2),
.gpio_power = -1,
};
static struct pxamci_platform_data zylonite_mci2_platform_data = {
.detect_delay = 20,
.ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .gpio_card_detect = EXT_GPIO(1),
+ .gpio_card_ro = EXT_GPIO(3),
+ .gpio_power = -1,
+};
+
+static struct pxamci_platform_data zylonite_mci3_platform_data = {
+ .detect_delay = 20,
+ .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34,
+ .gpio_card_detect = EXT_GPIO(30),
+ .gpio_card_ro = EXT_GPIO(31),
+ .gpio_power = -1,
};
static void __init zylonite_init_mmc(void)
@@ -305,7 +246,7 @@ static void __init zylonite_init_mmc(void)
pxa_set_mci_info(&zylonite_mci_platform_data);
pxa3xx_set_mci2_info(&zylonite_mci2_platform_data);
if (cpu_is_pxa310())
- pxa3xx_set_mci3_info(&zylonite_mci_platform_data);
+ pxa3xx_set_mci3_info(&zylonite_mci3_platform_data);
}
#else
static inline void zylonite_init_mmc(void) {}
diff --git a/arch/arm/mach-pxa/zylonite_pxa300.c b/arch/arm/mach-pxa/zylonite_pxa300.c
index 84095440a878..3aa73b3e33f2 100644
--- a/arch/arm/mach-pxa/zylonite_pxa300.c
+++ b/arch/arm/mach-pxa/zylonite_pxa300.c
@@ -129,8 +129,8 @@ static mfp_cfg_t common_mfp_cfg[] __initdata = {
GPIO22_I2C_SDA,
/* GPIO */
- GPIO18_GPIO, /* GPIO Expander #0 INT_N */
- GPIO19_GPIO, /* GPIO Expander #1 INT_N */
+ GPIO18_GPIO | MFP_PULL_HIGH, /* GPIO Expander #0 INT_N */
+ GPIO19_GPIO | MFP_PULL_HIGH, /* GPIO Expander #1 INT_N */
};
static mfp_cfg_t pxa300_mfp_cfg[] __initdata = {
@@ -258,10 +258,6 @@ void __init zylonite_pxa300_init(void)
/* detect LCD panel */
zylonite_detect_lcd_panel();
- /* MMC card detect & write protect for controller 0 */
- zylonite_mmc_slot[0].gpio_cd = EXT_GPIO(0);
- zylonite_mmc_slot[0].gpio_wp = EXT_GPIO(2);
-
/* WM9713 IRQ */
wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO26);
@@ -276,10 +272,6 @@ void __init zylonite_pxa300_init(void)
if (cpu_is_pxa310()) {
pxa3xx_mfp_config(ARRAY_AND_SIZE(pxa310_mfp_cfg));
gpio_eth_irq = mfp_to_gpio(MFP_PIN_GPIO102);
-
- /* MMC card detect & write protect for controller 2 */
- zylonite_mmc_slot[2].gpio_cd = EXT_GPIO(30);
- zylonite_mmc_slot[2].gpio_wp = EXT_GPIO(31);
}
/* GPIOs for Debug LEDs */
diff --git a/arch/arm/mach-pxa/zylonite_pxa320.c b/arch/arm/mach-pxa/zylonite_pxa320.c
index 60d08f23f5e4..9942bac4cf7d 100644
--- a/arch/arm/mach-pxa/zylonite_pxa320.c
+++ b/arch/arm/mach-pxa/zylonite_pxa320.c
@@ -209,10 +209,6 @@ void __init zylonite_pxa320_init(void)
gpio_debug_led1 = mfp_to_gpio(MFP_PIN_GPIO1_2);
gpio_debug_led2 = mfp_to_gpio(MFP_PIN_GPIO4_2);
- /* MMC card detect & write protect for controller 0 */
- zylonite_mmc_slot[0].gpio_cd = mfp_to_gpio(MFP_PIN_GPIO1);
- zylonite_mmc_slot[0].gpio_wp = mfp_to_gpio(MFP_PIN_GPIO5);
-
/* WM9713 IRQ */
wm9713_irq = mfp_to_gpio(MFP_PIN_GPIO15);
}
diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c
index 9f293438e020..90bd4ef71b2c 100644
--- a/arch/arm/mach-realview/core.c
+++ b/arch/arm/mach-realview/core.c
@@ -346,10 +346,7 @@ static struct clk_lookup lookups[] = {
static int __init clk_init(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
return 0;
}
arch_initcall(clk_init);
diff --git a/arch/arm/mach-realview/include/mach/board-pb1176.h b/arch/arm/mach-realview/include/mach/board-pb1176.h
index 34b80b7d40b8..2f5ccb298858 100644
--- a/arch/arm/mach-realview/include/mach/board-pb1176.h
+++ b/arch/arm/mach-realview/include/mach/board-pb1176.h
@@ -74,8 +74,8 @@
#define REALVIEW_PB1176_L220_BASE 0x10110000 /* L220 registers */
/*
- * Control register SYS_RESETCTL is set to 1 to force a soft reset
+ * Control register SYS_RESETCTL Bit 8 is set to 1 to force a soft reset
*/
-#define REALVIEW_PB1176_SYS_LOCKVAL_RSTCTL 0x0100
+#define REALVIEW_PB1176_SYS_SOFT_RESET 0x0100
#endif /* __ASM_ARCH_BOARD_PB1176_H */
diff --git a/arch/arm/mach-realview/include/mach/platform.h b/arch/arm/mach-realview/include/mach/platform.h
index 4f46bf71e752..86c0c4435a46 100644
--- a/arch/arm/mach-realview/include/mach/platform.h
+++ b/arch/arm/mach-realview/include/mach/platform.h
@@ -140,7 +140,7 @@
* SYS_CLD, SYS_BOOTCS
*/
#define REALVIEW_SYS_LOCK_LOCKED (1 << 16)
-#define REALVIEW_SYS_LOCKVAL_MASK 0xA05F /* Enable write access */
+#define REALVIEW_SYS_LOCK_VAL 0xA05F /* Enable write access */
/*
* REALVIEW_SYS_FLASH
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 917f8ca3abff..7d857d300558 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -381,6 +381,20 @@ static struct sys_timer realview_eb_timer = {
.init = realview_eb_timer_init,
};
+static void realview_eb_reset(char mode)
+{
+ void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
+ void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
+
+ /*
+ * To reset, we hit the on-board reset register
+ * in the system FPGA
+ */
+ __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
+ if (core_tile_eb11mp())
+ __raw_writel(0x0008, reset_ctrl);
+}
+
static void __init realview_eb_init(void)
{
int i;
@@ -408,6 +422,7 @@ static void __init realview_eb_init(void)
#ifdef CONFIG_LEDS
leds_event = realview_leds_event;
#endif
+ realview_reset = realview_eb_reset;
}
MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index 7fb726d5f8b9..44392e51dd50 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -292,12 +292,10 @@ static struct sys_timer realview_pb1176_timer = {
static void realview_pb1176_reset(char mode)
{
- void __iomem *hdr_ctrl = __io_address(REALVIEW_SYS_BASE) +
- REALVIEW_SYS_RESETCTL_OFFSET;
- void __iomem *rst_hdr_ctrl = __io_address(REALVIEW_SYS_BASE) +
- REALVIEW_SYS_LOCK_OFFSET;
- __raw_writel(REALVIEW_SYS_LOCKVAL_MASK, rst_hdr_ctrl);
- __raw_writel(REALVIEW_PB1176_SYS_LOCKVAL_RSTCTL, hdr_ctrl);
+ void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
+ void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
+ __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
+ __raw_writel(REALVIEW_PB1176_SYS_SOFT_RESET, reset_ctrl);
}
static void realview_pb1176_fixup(struct machine_desc *mdesc,
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index 9bbbfc05f225..3e02731af959 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -301,17 +301,16 @@ static struct sys_timer realview_pb11mp_timer = {
static void realview_pb11mp_reset(char mode)
{
- void __iomem *hdr_ctrl = __io_address(REALVIEW_SYS_BASE) +
- REALVIEW_SYS_RESETCTL_OFFSET;
- unsigned int val;
+ void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
+ void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
/*
* To reset, we hit the on-board reset register
* in the system FPGA
*/
- val = __raw_readl(hdr_ctrl);
- val |= REALVIEW_PB11MP_SYS_CTRL_RESET_CONFIGCLR;
- __raw_writel(val, hdr_ctrl);
+ __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
+ __raw_writel(0x0000, reset_ctrl);
+ __raw_writel(0x0004, reset_ctrl);
}
static void __init realview_pb11mp_init(void)
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
index fe861e96c566..fe4e25c4201a 100644
--- a/arch/arm/mach-realview/realview_pba8.c
+++ b/arch/arm/mach-realview/realview_pba8.c
@@ -272,6 +272,20 @@ static struct sys_timer realview_pba8_timer = {
.init = realview_pba8_timer_init,
};
+static void realview_pba8_reset(char mode)
+{
+ void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
+ void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
+
+ /*
+ * To reset, we hit the on-board reset register
+ * in the system FPGA
+ */
+ __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
+ __raw_writel(0x0000, reset_ctrl);
+ __raw_writel(0x0004, reset_ctrl);
+}
+
static void __init realview_pba8_init(void)
{
int i;
@@ -291,6 +305,7 @@ static void __init realview_pba8_init(void)
#ifdef CONFIG_LEDS
leds_event = realview_leds_event;
#endif
+ realview_reset = realview_pba8_reset;
}
MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index ec39488e2b42..a21a4b395f73 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -324,6 +324,20 @@ static void realview_pbx_fixup(struct machine_desc *mdesc, struct tag *tags,
#endif
}
+static void realview_pbx_reset(char mode)
+{
+ void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL);
+ void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK);
+
+ /*
+ * To reset, we hit the on-board reset register
+ * in the system FPGA
+ */
+ __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl);
+ __raw_writel(0x0000, reset_ctrl);
+ __raw_writel(0x0004, reset_ctrl);
+}
+
static void __init realview_pbx_init(void)
{
int i;
@@ -358,6 +372,7 @@ static void __init realview_pbx_init(void)
#ifdef CONFIG_LEDS
leds_event = realview_leds_event;
#endif
+ realview_reset = realview_pbx_reset;
}
MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
diff --git a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h b/arch/arm/mach-s3c2410/include/mach/spi-gpio.h
index 980a099e209c..dcef2287cb38 100644
--- a/arch/arm/mach-s3c2410/include/mach/spi-gpio.h
+++ b/arch/arm/mach-s3c2410/include/mach/spi-gpio.h
@@ -3,7 +3,7 @@
* Copyright (c) 2006 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
- * S3C2410 - SPI Controller platfrom_device info
+ * S3C2410 - SPI Controller platform_device info
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/mach-s3c2412/clock.c b/arch/arm/mach-s3c2412/clock.c
index a037df5e1c2d..0c0505b025cb 100644
--- a/arch/arm/mach-s3c2412/clock.c
+++ b/arch/arm/mach-s3c2412/clock.c
@@ -124,7 +124,9 @@ static struct clk clk_usysclk = {
.name = "usysclk",
.id = -1,
.parent = &clk_xtal,
- .set_parent = s3c2412_setparent_usysclk,
+ .ops = &(struct clk_ops) {
+ .set_parent = s3c2412_setparent_usysclk,
+ },
};
static struct clk clk_mrefclk = {
@@ -199,10 +201,12 @@ static int s3c2412_setrate_usbsrc(struct clk *clk, unsigned long rate)
static struct clk clk_usbsrc = {
.name = "usbsrc",
.id = -1,
- .get_rate = s3c2412_getrate_usbsrc,
- .set_rate = s3c2412_setrate_usbsrc,
- .round_rate = s3c2412_roundrate_usbsrc,
- .set_parent = s3c2412_setparent_usbsrc,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2412_getrate_usbsrc,
+ .set_rate = s3c2412_setrate_usbsrc,
+ .round_rate = s3c2412_roundrate_usbsrc,
+ .set_parent = s3c2412_setparent_usbsrc,
+ },
};
static int s3c2412_setparent_msysclk(struct clk *clk, struct clk *parent)
@@ -225,7 +229,9 @@ static int s3c2412_setparent_msysclk(struct clk *clk, struct clk *parent)
static struct clk clk_msysclk = {
.name = "msysclk",
.id = -1,
- .set_parent = s3c2412_setparent_msysclk,
+ .ops = &(struct clk_ops) {
+ .set_parent = s3c2412_setparent_msysclk,
+ },
};
static int s3c2412_setparent_armclk(struct clk *clk, struct clk *parent)
@@ -264,7 +270,9 @@ static struct clk clk_armclk = {
.name = "armclk",
.id = -1,
.parent = &clk_msysclk,
- .set_parent = s3c2412_setparent_armclk,
+ .ops = &(struct clk_ops) {
+ .set_parent = s3c2412_setparent_armclk,
+ },
};
/* these next clocks have an divider immediately after them,
@@ -337,10 +345,12 @@ static int s3c2412_setrate_uart(struct clk *clk, unsigned long rate)
static struct clk clk_uart = {
.name = "uartclk",
.id = -1,
- .get_rate = s3c2412_getrate_uart,
- .set_rate = s3c2412_setrate_uart,
- .set_parent = s3c2412_setparent_uart,
- .round_rate = s3c2412_roundrate_clksrc,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2412_getrate_uart,
+ .set_rate = s3c2412_setrate_uart,
+ .set_parent = s3c2412_setparent_uart,
+ .round_rate = s3c2412_roundrate_clksrc,
+ },
};
static int s3c2412_setparent_i2s(struct clk *clk, struct clk *parent)
@@ -388,10 +398,12 @@ static int s3c2412_setrate_i2s(struct clk *clk, unsigned long rate)
static struct clk clk_i2s = {
.name = "i2sclk",
.id = -1,
- .get_rate = s3c2412_getrate_i2s,
- .set_rate = s3c2412_setrate_i2s,
- .set_parent = s3c2412_setparent_i2s,
- .round_rate = s3c2412_roundrate_clksrc,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2412_getrate_i2s,
+ .set_rate = s3c2412_setrate_i2s,
+ .set_parent = s3c2412_setparent_i2s,
+ .round_rate = s3c2412_roundrate_clksrc,
+ },
};
static int s3c2412_setparent_cam(struct clk *clk, struct clk *parent)
@@ -438,10 +450,12 @@ static int s3c2412_setrate_cam(struct clk *clk, unsigned long rate)
static struct clk clk_cam = {
.name = "camif-upll", /* same as 2440 name */
.id = -1,
- .get_rate = s3c2412_getrate_cam,
- .set_rate = s3c2412_setrate_cam,
- .set_parent = s3c2412_setparent_cam,
- .round_rate = s3c2412_roundrate_clksrc,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2412_getrate_cam,
+ .set_rate = s3c2412_setrate_cam,
+ .set_parent = s3c2412_setparent_cam,
+ .round_rate = s3c2412_roundrate_clksrc,
+ },
};
/* standard clock definitions */
diff --git a/arch/arm/mach-s3c2440/clock.c b/arch/arm/mach-s3c2440/clock.c
index d1c29b2537cd..3dc2426e2345 100644
--- a/arch/arm/mach-s3c2440/clock.c
+++ b/arch/arm/mach-s3c2440/clock.c
@@ -98,8 +98,10 @@ static struct clk s3c2440_clk_cam = {
static struct clk s3c2440_clk_cam_upll = {
.name = "camif-upll",
.id = -1,
- .set_rate = s3c2440_camif_upll_setrate,
- .round_rate = s3c2440_camif_upll_round,
+ .ops = &(struct clk_ops) {
+ .set_rate = s3c2440_camif_upll_setrate,
+ .round_rate = s3c2440_camif_upll_round,
+ },
};
static struct clk s3c2440_clk_ac97 = {
diff --git a/arch/arm/mach-s3c2442/clock.c b/arch/arm/mach-s3c2442/clock.c
index ea1aa1f5157a..d9b692a12480 100644
--- a/arch/arm/mach-s3c2442/clock.c
+++ b/arch/arm/mach-s3c2442/clock.c
@@ -109,8 +109,10 @@ static struct clk s3c2442_clk_cam = {
static struct clk s3c2442_clk_cam_upll = {
.name = "camif-upll",
.id = -1,
- .set_rate = s3c2442_camif_upll_setrate,
- .round_rate = s3c2442_camif_upll_round,
+ .ops = &(struct clk_ops) {
+ .set_rate = s3c2442_camif_upll_setrate,
+ .round_rate = s3c2442_camif_upll_round,
+ },
};
static int s3c2442_clk_add(struct sys_device *sysdev)
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c
index 2785d69c95b0..3eb8b935d64c 100644
--- a/arch/arm/mach-s3c2443/clock.c
+++ b/arch/arm/mach-s3c2443/clock.c
@@ -187,7 +187,9 @@ static int s3c2443_setparent_epllref(struct clk *clk, struct clk *parent)
static struct clk clk_epllref = {
.name = "epllref",
.id = -1,
- .set_parent = s3c2443_setparent_epllref,
+ .ops = &(struct clk_ops) {
+ .set_parent = s3c2443_setparent_epllref,
+ },
};
static unsigned long s3c2443_getrate_mdivclk(struct clk *clk)
@@ -205,7 +207,9 @@ static struct clk clk_mdivclk = {
.name = "mdivclk",
.parent = &clk_mpllref,
.id = -1,
- .get_rate = s3c2443_getrate_mdivclk,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_getrate_mdivclk,
+ },
};
static int s3c2443_setparent_msysclk(struct clk *clk, struct clk *parent)
@@ -232,7 +236,9 @@ static struct clk clk_msysclk = {
.name = "msysclk",
.parent = &clk_xtal,
.id = -1,
- .set_parent = s3c2443_setparent_msysclk,
+ .ops = &(struct clk_ops) {
+ .set_parent = s3c2443_setparent_msysclk,
+ },
};
/* armdiv
@@ -273,7 +279,9 @@ static int s3c2443_setparent_armclk(struct clk *clk, struct clk *parent)
static struct clk clk_arm = {
.name = "armclk",
.id = -1,
- .set_parent = s3c2443_setparent_armclk,
+ .ops = &(struct clk_ops) {
+ .set_parent = s3c2443_setparent_armclk,
+ },
};
/* esysclk
@@ -302,7 +310,9 @@ static struct clk clk_esysclk = {
.name = "esysclk",
.parent = &clk_epll,
.id = -1,
- .set_parent = s3c2443_setparent_esysclk,
+ .ops = &(struct clk_ops) {
+ .set_parent = s3c2443_setparent_esysclk,
+ },
};
/* uartclk
@@ -341,9 +351,11 @@ static struct clk clk_uart = {
.name = "uartclk",
.id = -1,
.parent = &clk_esysclk,
- .get_rate = s3c2443_getrate_uart,
- .set_rate = s3c2443_setrate_uart,
- .round_rate = s3c2443_roundrate_clksrc16,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_getrate_uart,
+ .set_rate = s3c2443_setrate_uart,
+ .round_rate = s3c2443_roundrate_clksrc16,
+ },
};
/* hsspi
@@ -384,9 +396,11 @@ static struct clk clk_hsspi = {
.parent = &clk_esysclk,
.ctrlbit = S3C2443_SCLKCON_HSSPICLK,
.enable = s3c2443_clkcon_enable_s,
- .get_rate = s3c2443_getrate_hsspi,
- .set_rate = s3c2443_setrate_hsspi,
- .round_rate = s3c2443_roundrate_clksrc4,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_getrate_hsspi,
+ .set_rate = s3c2443_setrate_hsspi,
+ .round_rate = s3c2443_roundrate_clksrc4,
+ },
};
/* usbhost
@@ -426,9 +440,11 @@ static struct clk clk_usb_bus_host = {
.parent = &clk_esysclk,
.ctrlbit = S3C2443_SCLKCON_USBHOST,
.enable = s3c2443_clkcon_enable_s,
- .get_rate = s3c2443_getrate_usbhost,
- .set_rate = s3c2443_setrate_usbhost,
- .round_rate = s3c2443_roundrate_clksrc4,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_getrate_usbhost,
+ .set_rate = s3c2443_setrate_usbhost,
+ .round_rate = s3c2443_roundrate_clksrc4,
+ },
};
/* clk_hsmcc_div
@@ -468,9 +484,11 @@ static struct clk clk_hsmmc_div = {
.name = "hsmmc-div",
.id = -1,
.parent = &clk_esysclk,
- .get_rate = s3c2443_getrate_hsmmc_div,
- .set_rate = s3c2443_setrate_hsmmc_div,
- .round_rate = s3c2443_roundrate_clksrc4,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_getrate_hsmmc_div,
+ .set_rate = s3c2443_setrate_hsmmc_div,
+ .round_rate = s3c2443_roundrate_clksrc4,
+ },
};
static int s3c2443_setparent_hsmmc(struct clk *clk, struct clk *parent)
@@ -505,7 +523,9 @@ static struct clk clk_hsmmc = {
.id = -1,
.parent = &clk_hsmmc_div,
.enable = s3c2443_enable_hsmmc,
- .set_parent = s3c2443_setparent_hsmmc,
+ .ops = &(struct clk_ops) {
+ .set_parent = s3c2443_setparent_hsmmc,
+ },
};
/* i2s_eplldiv
@@ -543,9 +563,11 @@ static struct clk clk_i2s_eplldiv = {
.name = "i2s-eplldiv",
.id = -1,
.parent = &clk_esysclk,
- .get_rate = s3c2443_getrate_i2s_eplldiv,
- .set_rate = s3c2443_setrate_i2s_eplldiv,
- .round_rate = s3c2443_roundrate_clksrc16,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_getrate_i2s_eplldiv,
+ .set_rate = s3c2443_setrate_i2s_eplldiv,
+ .round_rate = s3c2443_roundrate_clksrc16,
+ },
};
/* i2s-ref
@@ -578,7 +600,9 @@ static struct clk clk_i2s = {
.parent = &clk_i2s_eplldiv,
.ctrlbit = S3C2443_SCLKCON_I2SCLK,
.enable = s3c2443_clkcon_enable_s,
- .set_parent = s3c2443_setparent_i2s,
+ .ops = &(struct clk_ops) {
+ .set_parent = s3c2443_setparent_i2s,
+ },
};
/* cam-if
@@ -618,9 +642,11 @@ static struct clk clk_cam = {
.parent = &clk_esysclk,
.ctrlbit = S3C2443_SCLKCON_CAMCLK,
.enable = s3c2443_clkcon_enable_s,
- .get_rate = s3c2443_getrate_cam,
- .set_rate = s3c2443_setrate_cam,
- .round_rate = s3c2443_roundrate_clksrc16,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_getrate_cam,
+ .set_rate = s3c2443_setrate_cam,
+ .round_rate = s3c2443_roundrate_clksrc16,
+ },
};
/* display-if
@@ -660,9 +686,11 @@ static struct clk clk_display = {
.parent = &clk_esysclk,
.ctrlbit = S3C2443_SCLKCON_DISPCLK,
.enable = s3c2443_clkcon_enable_s,
- .get_rate = s3c2443_getrate_display,
- .set_rate = s3c2443_setrate_display,
- .round_rate = s3c2443_roundrate_clksrc256,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_getrate_display,
+ .set_rate = s3c2443_setrate_display,
+ .round_rate = s3c2443_roundrate_clksrc256,
+ },
};
/* prediv
@@ -685,7 +713,9 @@ static struct clk clk_prediv = {
.name = "prediv",
.id = -1,
.parent = &clk_msysclk,
- .get_rate = s3c2443_prediv_getrate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c2443_prediv_getrate,
+ },
};
/* standard clock definitions */
@@ -1074,14 +1104,7 @@ void __init s3c2443_init_clocks(int xtal)
/* register clocks from clock array */
- clkp = init_clocks;
- for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) {
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
- }
+ s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
/* We must be careful disabling the clocks we are not intending to
* be using at boot time, as subsystems such as the LCD which do
diff --git a/arch/arm/mach-s3c6400/include/mach/entry-macro.S b/arch/arm/mach-s3c6400/include/mach/entry-macro.S
index fbd90d2cf355..33a8fe240882 100644
--- a/arch/arm/mach-s3c6400/include/mach/entry-macro.S
+++ b/arch/arm/mach-s3c6400/include/mach/entry-macro.S
@@ -12,33 +12,7 @@
* warranty of any kind, whether express or implied.
*/
-#include <asm/hardware/vic.h>
#include <mach/map.h>
#include <plat/irqs.h>
- .macro disable_fiq
- .endm
-
- .macro get_irqnr_preamble, base, tmp
- ldr \base, =S3C_VA_VIC0
- .endm
-
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
-
- .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
-
- @ check the vic0
- mov \irqnr, # S3C_IRQ_OFFSET + 31
- ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
- teq \irqstat, #0
-
- @ otherwise try vic1
- addeq \tmp, \base, #(S3C_VA_VIC1 - S3C_VA_VIC0)
- addeq \irqnr, \irqnr, #32
- ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
- teqeq \irqstat, #0
-
- clzne \irqstat, \irqstat
- subne \irqnr, \irqnr, \irqstat
- .endm
+#include <asm/entry-macro-vic2.S>
diff --git a/arch/arm/mach-s3c6400/include/mach/map.h b/arch/arm/mach-s3c6400/include/mach/map.h
index 106ee13581e2..d4cd3abe3cba 100644
--- a/arch/arm/mach-s3c6400/include/mach/map.h
+++ b/arch/arm/mach-s3c6400/include/mach/map.h
@@ -70,8 +70,8 @@
#define S3C64XX_VA_USB_HSPHY S3C_ADDR_CPU(0x00200000)
/* place VICs close together */
-#define S3C_VA_VIC0 (S3C_VA_IRQ + 0x00)
-#define S3C_VA_VIC1 (S3C_VA_IRQ + 0x10000)
+#define VA_VIC0 (S3C_VA_IRQ + 0x00)
+#define VA_VIC1 (S3C_VA_IRQ + 0x10000)
/* compatibiltiy defines. */
#define S3C_PA_TIMER S3C64XX_PA_TIMER
diff --git a/arch/arm/mach-s3c6400/include/mach/tick.h b/arch/arm/mach-s3c6400/include/mach/tick.h
index d9c0dc7014ec..ebe18a9469b8 100644
--- a/arch/arm/mach-s3c6400/include/mach/tick.h
+++ b/arch/arm/mach-s3c6400/include/mach/tick.h
@@ -20,7 +20,7 @@
*/
static inline u32 s3c24xx_ostimer_pending(void)
{
- u32 pend = __raw_readl(S3C_VA_VIC0 + VIC_RAW_STATUS);
+ u32 pend = __raw_readl(VA_VIC0 + VIC_RAW_STATUS);
return pend & 1 << (IRQ_TIMER4_VIC - S3C64XX_IRQ_VIC0(0));
}
diff --git a/arch/arm/mach-s5pc100/setup-sdhci.c b/arch/arm/mach-s5pc100/setup-sdhci.c
index 4385986a3da0..ea7ff19adb95 100644
--- a/arch/arm/mach-s5pc100/setup-sdhci.c
+++ b/arch/arm/mach-s5pc100/setup-sdhci.c
@@ -28,8 +28,8 @@
char *s5pc100_hsmmc_clksrcs[4] = {
[0] = "hsmmc",
[1] = "hsmmc",
- /* [2] = "mmc_bus", not yet succesfuuly used yet */
- /* [3] = "48m", - note not succesfully used yet */
+ /* [2] = "mmc_bus", not yet successfully used yet */
+ /* [3] = "48m", - note not successfully used yet */
};
diff --git a/arch/arm/mach-u300/clock.c b/arch/arm/mach-u300/clock.c
index 111f7ea32b38..c174ed1f3691 100644
--- a/arch/arm/mach-u300/clock.c
+++ b/arch/arm/mach-u300/clock.c
@@ -1276,11 +1276,8 @@ static struct clk_lookup lookups[] = {
static void __init clk_register(void)
{
- int i;
-
/* Register the lookups */
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
}
/*
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 20b6ebb6783a..8359a73d0041 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -85,11 +85,8 @@ static struct clk_lookup lookups[] = {
static int __init clk_init(void)
{
- int i;
-
/* register the clock lookups */
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
return 0;
}
arch_initcall(clk_init);
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index e13be7c444ca..9ddb49b1cb71 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -851,8 +851,7 @@ void __init versatile_init(void)
{
int i;
- for (i = 0; i < ARRAY_SIZE(lookups); i++)
- clkdev_add(&lookups[i]);
+ clkdev_add_table(lookups, ARRAY_SIZE(lookups));
platform_device_register(&versatile_flash_device);
platform_device_register(&versatile_i2c_device);
diff --git a/arch/arm/mach-w90x900/clock.c b/arch/arm/mach-w90x900/clock.c
index b785994bab0a..2c371ff22e51 100644
--- a/arch/arm/mach-w90x900/clock.c
+++ b/arch/arm/mach-w90x900/clock.c
@@ -90,12 +90,3 @@ void nuc900_subclk_enable(struct clk *clk, int enable)
__raw_writel(clken, W90X900_VA_CLKPWR + SUBCLK);
}
-
-
-void clks_register(struct clk_lookup *clks, size_t num)
-{
- int i;
-
- for (i = 0; i < num; i++)
- clkdev_add(&clks[i]);
-}
diff --git a/arch/arm/mach-w90x900/clock.h b/arch/arm/mach-w90x900/clock.h
index f5816a06eed6..c56ddab3d912 100644
--- a/arch/arm/mach-w90x900/clock.h
+++ b/arch/arm/mach-w90x900/clock.h
@@ -14,7 +14,6 @@
void nuc900_clk_enable(struct clk *clk, int enable);
void nuc900_subclk_enable(struct clk *clk, int enable);
-void clks_register(struct clk_lookup *clks, size_t num);
struct clk {
unsigned long cken;
diff --git a/arch/arm/mach-w90x900/cpu.c b/arch/arm/mach-w90x900/cpu.c
index 20dc0c96214d..6f5ca532883f 100644
--- a/arch/arm/mach-w90x900/cpu.c
+++ b/arch/arm/mach-w90x900/cpu.c
@@ -208,6 +208,6 @@ void __init nuc900_map_io(struct map_desc *mach_desc, int mach_size)
void __init nuc900_init_clocks(void)
{
- clks_register(nuc900_clkregs, ARRAY_SIZE(nuc900_clkregs));
+ clkdev_add_table(nuc900_clkregs, ARRAY_SIZE(nuc900_clkregs));
}
diff --git a/arch/arm/mach-w90x900/include/mach/system.h b/arch/arm/mach-w90x900/include/mach/system.h
index 940640066857..ce228bdc66dd 100644
--- a/arch/arm/mach-w90x900/include/mach/system.h
+++ b/arch/arm/mach-w90x900/include/mach/system.h
@@ -15,7 +15,15 @@
*
*/
+#include <linux/io.h>
#include <asm/proc-fns.h>
+#include <mach/map.h>
+#include <mach/regs-timer.h>
+
+#define WTCR (TMR_BA + 0x1C)
+#define WTCLK (1 << 10)
+#define WTE (1 << 7)
+#define WTRE (1 << 1)
static void arch_idle(void)
{
@@ -23,6 +31,11 @@ static void arch_idle(void)
static void arch_reset(char mode, const char *cmd)
{
- cpu_reset(0);
+ if (mode == 's') {
+ /* Jump into ROM at address 0 */
+ cpu_reset(0);
+ } else {
+ __raw_writel(WTE | WTRE | WTCLK, WTCR);
+ }
}
diff --git a/arch/arm/mach-w90x900/time.c b/arch/arm/mach-w90x900/time.c
index 4128af870b41..b80f769bc135 100644
--- a/arch/arm/mach-w90x900/time.c
+++ b/arch/arm/mach-w90x900/time.c
@@ -42,7 +42,10 @@
#define TICKS_PER_SEC 100
#define PRESCALE 0x63 /* Divider = prescale + 1 */
-unsigned int timer0_load;
+#define TDR_SHIFT 24
+#define TDR_MASK ((1 << TDR_SHIFT) - 1)
+
+static unsigned int timer0_load;
static void nuc900_clockevent_setmode(enum clock_event_mode mode,
struct clock_event_device *clk)
@@ -88,7 +91,7 @@ static int nuc900_clockevent_setnextevent(unsigned long evt,
static struct clock_event_device nuc900_clockevent_device = {
.name = "nuc900-timer0",
.shift = 32,
- .features = CLOCK_EVT_MODE_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = nuc900_clockevent_setmode,
.set_next_event = nuc900_clockevent_setnextevent,
.rating = 300,
@@ -112,8 +115,23 @@ static struct irqaction nuc900_timer0_irq = {
.handler = nuc900_timer0_interrupt,
};
-static void __init nuc900_clockevents_init(unsigned int rate)
+static void __init nuc900_clockevents_init(void)
{
+ unsigned int rate;
+ struct clk *clk = clk_get(NULL, "timer0");
+
+ BUG_ON(IS_ERR(clk));
+
+ __raw_writel(0x00, REG_TCSR0);
+
+ clk_enable(clk);
+ rate = clk_get_rate(clk) / (PRESCALE + 1);
+
+ timer0_load = (rate / TICKS_PER_SEC);
+
+ __raw_writel(RESETINT, REG_TISR);
+ setup_irq(IRQ_TIMER0, &nuc900_timer0_irq);
+
nuc900_clockevent_device.mult = div_sc(rate, NSEC_PER_SEC,
nuc900_clockevent_device.shift);
nuc900_clockevent_device.max_delta_ns = clockevent_delta2ns(0xffffffff,
@@ -127,26 +145,35 @@ static void __init nuc900_clockevents_init(unsigned int rate)
static cycle_t nuc900_get_cycles(struct clocksource *cs)
{
- return ~__raw_readl(REG_TDR1);
+ return (~__raw_readl(REG_TDR1)) & TDR_MASK;
}
static struct clocksource clocksource_nuc900 = {
.name = "nuc900-timer1",
.rating = 200,
.read = nuc900_get_cycles,
- .mask = CLOCKSOURCE_MASK(32),
- .shift = 20,
+ .mask = CLOCKSOURCE_MASK(TDR_SHIFT),
+ .shift = 10,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static void __init nuc900_clocksource_init(unsigned int rate)
+static void __init nuc900_clocksource_init(void)
{
unsigned int val;
+ unsigned int rate;
+ struct clk *clk = clk_get(NULL, "timer1");
+
+ BUG_ON(IS_ERR(clk));
+
+ __raw_writel(0x00, REG_TCSR1);
+
+ clk_enable(clk);
+ rate = clk_get_rate(clk) / (PRESCALE + 1);
__raw_writel(0xffffffff, REG_TICR1);
val = __raw_readl(REG_TCSR1);
- val |= (COUNTEN | PERIOD);
+ val |= (COUNTEN | PERIOD | PRESCALE);
__raw_writel(val, REG_TCSR1);
clocksource_nuc900.mult =
@@ -156,25 +183,8 @@ static void __init nuc900_clocksource_init(unsigned int rate)
static void __init nuc900_timer_init(void)
{
- struct clk *ck_ext = clk_get(NULL, "ext");
- unsigned int rate;
-
- BUG_ON(IS_ERR(ck_ext));
-
- rate = clk_get_rate(ck_ext);
- clk_put(ck_ext);
- rate = rate / (PRESCALE + 0x01);
-
- /* set a known state */
- __raw_writel(0x00, REG_TCSR0);
- __raw_writel(0x00, REG_TCSR1);
- __raw_writel(RESETINT, REG_TISR);
- timer0_load = (rate / TICKS_PER_SEC);
-
- setup_irq(IRQ_TIMER0, &nuc900_timer0_irq);
-
- nuc900_clocksource_init(rate);
- nuc900_clockevents_init(rate);
+ nuc900_clocksource_init();
+ nuc900_clockevents_init();
}
struct sys_timer nuc900_timer = {
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 827e238e5d4a..e8d34a80851c 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -27,6 +27,9 @@ obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o
obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o
obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o
+AFLAGS_abort-ev6.o :=-Wa,-march=armv6k
+AFLAGS_abort-ev7.o :=-Wa,-march=armv7-a
+
obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
@@ -39,6 +42,9 @@ obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o
obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o
+AFLAGS_cache-v6.o :=-Wa,-march=armv6
+AFLAGS_cache-v7.o :=-Wa,-march=armv7-a
+
obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
@@ -58,6 +64,9 @@ obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o
obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o
+AFLAGS_tlb-v6.o :=-Wa,-march=armv6
+AFLAGS_tlb-v7.o :=-Wa,-march=armv7-a
+
obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o
obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o
obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
@@ -84,6 +93,9 @@ obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
obj-$(CONFIG_CPU_V6) += proc-v6.o
obj-$(CONFIG_CPU_V7) += proc-v7.o
+AFLAGS_proc-v6.o :=-Wa,-march=armv6
+AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
+
obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b270d6228fe2..0c5eb6983cef 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -898,11 +898,7 @@ static int __init alignment_init(void)
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
- res = proc_mkdir("cpu", NULL);
- if (!res)
- return -ENOMEM;
-
- res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, res);
+ res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL);
if (!res)
return -ENOMEM;
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index a89444a3c016..7148e53e6078 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -157,7 +157,7 @@ ENTRY(fa_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_dma_inv_range)
+fa_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry
@@ -180,7 +180,7 @@ ENTRY(fa_dma_inv_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(fa_dma_clean_range)
+fa_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -205,6 +205,30 @@ ENTRY(fa_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(fa_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq fa_dma_clean_range
+ bcs fa_dma_inv_range
+ b fa_dma_flush_range
+ENDPROC(fa_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(fa_dma_unmap_area)
+ mov pc, lr
+ENDPROC(fa_dma_unmap_area)
+
__INITDATA
.type fa_cache_fns, #object
@@ -215,7 +239,7 @@ ENTRY(fa_cache_fns)
.long fa_coherent_kern_range
.long fa_coherent_user_range
.long fa_flush_kern_dcache_area
- .long fa_dma_inv_range
- .long fa_dma_clean_range
+ .long fa_dma_map_area
+ .long fa_dma_unmap_area
.long fa_dma_flush_range
.size fa_cache_fns, . - fa_cache_fns
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index 2a482731ea36..c2ff3c599fee 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -84,20 +84,6 @@ ENTRY(v3_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
- * dma_inv_range(start, end)
- *
- * Invalidate (discard) the specified virtual address range.
- * May not write back any entries. If 'start' or 'end'
- * are not cache line aligned, those lines must be written
- * back.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_dma_inv_range)
- /* FALLTHROUGH */
-
-/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -108,18 +94,29 @@ ENTRY(v3_dma_inv_range)
ENTRY(v3_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
+ mov pc, lr
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v3_dma_unmap_area)
+ teq r2, #DMA_TO_DEVICE
+ bne v3_dma_flush_range
/* FALLTHROUGH */
/*
- * dma_clean_range(start, end)
- *
- * Clean (write back) the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
*/
-ENTRY(v3_dma_clean_range)
+ENTRY(v3_dma_map_area)
mov pc, lr
+ENDPROC(v3_dma_unmap_area)
+ENDPROC(v3_dma_map_area)
__INITDATA
@@ -131,7 +128,7 @@ ENTRY(v3_cache_fns)
.long v3_coherent_kern_range
.long v3_coherent_user_range
.long v3_flush_kern_dcache_area
- .long v3_dma_inv_range
- .long v3_dma_clean_range
+ .long v3_dma_map_area
+ .long v3_dma_unmap_area
.long v3_dma_flush_range
.size v3_cache_fns, . - v3_cache_fns
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 5c7da3e372e9..4810f7e3e813 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -94,20 +94,6 @@ ENTRY(v4_flush_kern_dcache_area)
/* FALLTHROUGH */
/*
- * dma_inv_range(start, end)
- *
- * Invalidate (discard) the specified virtual address range.
- * May not write back any entries. If 'start' or 'end'
- * are not cache line aligned, those lines must be written
- * back.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v4_dma_inv_range)
- /* FALLTHROUGH */
-
-/*
* dma_flush_range(start, end)
*
* Clean and invalidate the specified virtual address range.
@@ -120,18 +106,29 @@ ENTRY(v4_dma_flush_range)
mov r0, #0
mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
#endif
+ mov pc, lr
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4_dma_unmap_area)
+ teq r2, #DMA_TO_DEVICE
+ bne v4_dma_flush_range
/* FALLTHROUGH */
/*
- * dma_clean_range(start, end)
- *
- * Clean (write back) the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
*/
-ENTRY(v4_dma_clean_range)
+ENTRY(v4_dma_map_area)
mov pc, lr
+ENDPROC(v4_dma_unmap_area)
+ENDPROC(v4_dma_map_area)
__INITDATA
@@ -143,7 +140,7 @@ ENTRY(v4_cache_fns)
.long v4_coherent_kern_range
.long v4_coherent_user_range
.long v4_flush_kern_dcache_area
- .long v4_dma_inv_range
- .long v4_dma_clean_range
+ .long v4_dma_map_area
+ .long v4_dma_unmap_area
.long v4_dma_flush_range
.size v4_cache_fns, . - v4_cache_fns
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 3dbedf1ec0e7..df8368afa102 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -173,7 +173,7 @@ ENTRY(v4wb_coherent_user_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_dma_inv_range)
+v4wb_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -194,7 +194,7 @@ ENTRY(v4wb_dma_inv_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wb_dma_clean_range)
+v4wb_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -216,6 +216,30 @@ ENTRY(v4wb_dma_clean_range)
.globl v4wb_dma_flush_range
.set v4wb_dma_flush_range, v4wb_coherent_kern_range
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4wb_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq v4wb_dma_clean_range
+ bcs v4wb_dma_inv_range
+ b v4wb_dma_flush_range
+ENDPROC(v4wb_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4wb_dma_unmap_area)
+ mov pc, lr
+ENDPROC(v4wb_dma_unmap_area)
+
__INITDATA
.type v4wb_cache_fns, #object
@@ -226,7 +250,7 @@ ENTRY(v4wb_cache_fns)
.long v4wb_coherent_kern_range
.long v4wb_coherent_user_range
.long v4wb_flush_kern_dcache_area
- .long v4wb_dma_inv_range
- .long v4wb_dma_clean_range
+ .long v4wb_dma_map_area
+ .long v4wb_dma_unmap_area
.long v4wb_dma_flush_range
.size v4wb_cache_fns, . - v4wb_cache_fns
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index b3b7410270b4..45c70312f43b 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -142,23 +142,12 @@ ENTRY(v4wt_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(v4wt_dma_inv_range)
+v4wt_dma_inv_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
- /* FALLTHROUGH */
-
-/*
- * dma_clean_range(start, end)
- *
- * Clean the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v4wt_dma_clean_range)
mov pc, lr
/*
@@ -172,6 +161,29 @@ ENTRY(v4wt_dma_clean_range)
.globl v4wt_dma_flush_range
.equ v4wt_dma_flush_range, v4wt_dma_inv_range
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4wt_dma_unmap_area)
+ add r1, r1, r0
+ teq r2, #DMA_TO_DEVICE
+ bne v4wt_dma_inv_range
+ /* FALLTHROUGH */
+
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v4wt_dma_map_area)
+ mov pc, lr
+ENDPROC(v4wt_dma_unmap_area)
+ENDPROC(v4wt_dma_map_area)
+
__INITDATA
.type v4wt_cache_fns, #object
@@ -182,7 +194,7 @@ ENTRY(v4wt_cache_fns)
.long v4wt_coherent_kern_range
.long v4wt_coherent_user_range
.long v4wt_flush_kern_dcache_area
- .long v4wt_dma_inv_range
- .long v4wt_dma_clean_range
+ .long v4wt_dma_map_area
+ .long v4wt_dma_unmap_area
.long v4wt_dma_flush_range
.size v4wt_cache_fns, . - v4wt_cache_fns
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 4ba0a24ce6f5..9d89c67a1cc3 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -195,7 +195,7 @@ ENTRY(v6_flush_kern_dcache_area)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v6_dma_inv_range)
+v6_dma_inv_range:
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
@@ -228,7 +228,7 @@ ENTRY(v6_dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v6_dma_clean_range)
+v6_dma_clean_range:
bic r0, r0, #D_CACHE_LINE_SIZE - 1
1:
#ifdef HARVARD_CACHE
@@ -263,6 +263,32 @@ ENTRY(v6_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v6_dma_map_area)
+ add r1, r1, r0
+ teq r2, #DMA_FROM_DEVICE
+ beq v6_dma_inv_range
+ b v6_dma_clean_range
+ENDPROC(v6_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v6_dma_unmap_area)
+ add r1, r1, r0
+ teq r2, #DMA_TO_DEVICE
+ bne v6_dma_inv_range
+ mov pc, lr
+ENDPROC(v6_dma_unmap_area)
+
__INITDATA
.type v6_cache_fns, #object
@@ -273,7 +299,7 @@ ENTRY(v6_cache_fns)
.long v6_coherent_kern_range
.long v6_coherent_user_range
.long v6_flush_kern_dcache_area
- .long v6_dma_inv_range
- .long v6_dma_clean_range
+ .long v6_dma_map_area
+ .long v6_dma_unmap_area
.long v6_dma_flush_range
.size v6_cache_fns, . - v6_cache_fns
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 9073db849fb4..bcd64f265870 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -216,7 +216,7 @@ ENDPROC(v7_flush_kern_dcache_area)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v7_dma_inv_range)
+v7_dma_inv_range:
dcache_line_size r2, r3
sub r3, r2, #1
tst r0, r3
@@ -240,7 +240,7 @@ ENDPROC(v7_dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-ENTRY(v7_dma_clean_range)
+v7_dma_clean_range:
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
@@ -271,6 +271,32 @@ ENTRY(v7_dma_flush_range)
mov pc, lr
ENDPROC(v7_dma_flush_range)
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v7_dma_map_area)
+ add r1, r1, r0
+ teq r2, #DMA_FROM_DEVICE
+ beq v7_dma_inv_range
+ b v7_dma_clean_range
+ENDPROC(v7_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v7_dma_unmap_area)
+ add r1, r1, r0
+ teq r2, #DMA_TO_DEVICE
+ bne v7_dma_inv_range
+ mov pc, lr
+ENDPROC(v7_dma_unmap_area)
+
__INITDATA
.type v7_cache_fns, #object
@@ -281,7 +307,7 @@ ENTRY(v7_cache_fns)
.long v7_coherent_kern_range
.long v7_coherent_user_range
.long v7_flush_kern_dcache_area
- .long v7_dma_inv_range
- .long v7_dma_clean_range
+ .long v7_dma_map_area
+ .long v7_dma_unmap_area
.long v7_dma_flush_range
.size v7_cache_fns, . - v7_cache_fns
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 5d180cb0bd94..c3154928bccd 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -221,15 +221,14 @@ static int __init xsc3_l2_init(void)
if (!cpu_is_xsc3() || !xsc3_l2_present())
return 0;
- if (!(get_cr() & CR_L2)) {
+ if (get_cr() & CR_L2) {
pr_info("XScale3 L2 cache enabled.\n");
- adjust_cr(CR_L2, CR_L2);
xsc3_l2_inv_all();
- }
- outer_cache.inv_range = xsc3_l2_inv_range;
- outer_cache.clean_range = xsc3_l2_clean_range;
- outer_cache.flush_range = xsc3_l2_flush_range;
+ outer_cache.inv_range = xsc3_l2_inv_range;
+ outer_cache.clean_range = xsc3_l2_clean_range;
+ outer_cache.flush_range = xsc3_l2_flush_range;
+ }
return 0;
}
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index 70997d5bee2d..5eb4fd93893d 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -68,12 +68,13 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
}
void feroceon_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
+ flush_cache_page(vma, vaddr, page_to_pfn(from));
feroceon_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index de9c06854ad7..f72303e1d804 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -38,7 +38,7 @@ v3_copy_user_page(void *kto, const void *kfrom)
}
void v3_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 7370a7142b04..598c51ad5071 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -69,7 +69,7 @@ mc_copy_user_page(void *from, void *to)
}
void v4_mc_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto = kmap_atomic(to, KM_USER1);
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index 9ab098414227..7c2eb55cd4a9 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -48,12 +48,13 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
}
void v4wb_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
+ flush_cache_page(vma, vaddr, page_to_pfn(from));
v4wb_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 300efafd6643..172e6a55458e 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -44,7 +44,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
}
void v4wt_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 0fa1319273de..8bca4dea6dfa 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -34,7 +34,7 @@ static DEFINE_SPINLOCK(v6_lock);
* attack the kernel's existing mapping of these pages.
*/
static void v6_copy_user_highpage_nonaliasing(struct page *to,
- struct page *from, unsigned long vaddr)
+ struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
@@ -81,7 +81,7 @@ static void discard_old_kernel_data(void *kto)
* Copy the page, taking account of the cache colour.
*/
static void v6_copy_user_highpage_aliasing(struct page *to,
- struct page *from, unsigned long vaddr)
+ struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long kfrom, kto;
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index bc4525f5ab23..747ad4140fc7 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -71,12 +71,13 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
}
void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto, *kfrom;
kto = kmap_atomic(to, KM_USER0);
kfrom = kmap_atomic(from, KM_USER1);
+ flush_cache_page(vma, vaddr, page_to_pfn(from));
xsc3_mc_copy_user_page(kto, kfrom);
kunmap_atomic(kfrom, KM_USER1);
kunmap_atomic(kto, KM_USER0);
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 76824d3e966a..9920c0ae2096 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -91,7 +91,7 @@ mc_copy_user_page(void *from, void *to)
}
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr)
+ unsigned long vaddr, struct vm_area_struct *vma)
{
void *kto = kmap_atomic(to, KM_USER1);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 26325cb5d368..64daef2173bd 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -404,78 +404,44 @@ EXPORT_SYMBOL(dma_free_coherent);
* platforms with CONFIG_DMABOUNCE.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
-void dma_cache_maint(const void *start, size_t size, int direction)
+void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
+ enum dma_data_direction dir)
{
- void (*inner_op)(const void *, const void *);
- void (*outer_op)(unsigned long, unsigned long);
-
- BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
-
- switch (direction) {
- case DMA_FROM_DEVICE: /* invalidate only */
- inner_op = dmac_inv_range;
- outer_op = outer_inv_range;
- break;
- case DMA_TO_DEVICE: /* writeback only */
- inner_op = dmac_clean_range;
- outer_op = outer_clean_range;
- break;
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- inner_op = dmac_flush_range;
- outer_op = outer_flush_range;
- break;
- default:
- BUG();
- }
+ unsigned long paddr;
+
+ BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
+
+ dmac_map_area(kaddr, size, dir);
- inner_op(start, start + size);
- outer_op(__pa(start), __pa(start) + size);
+ paddr = __pa(kaddr);
+ if (dir == DMA_FROM_DEVICE) {
+ outer_inv_range(paddr, paddr + size);
+ } else {
+ outer_clean_range(paddr, paddr + size);
+ }
+ /* FIXME: non-speculating: flush on bidirectional mappings? */
}
-EXPORT_SYMBOL(dma_cache_maint);
+EXPORT_SYMBOL(___dma_single_cpu_to_dev);
-static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
- size_t size, int direction)
+void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
+ enum dma_data_direction dir)
{
- void *vaddr;
- unsigned long paddr;
- void (*inner_op)(const void *, const void *);
- void (*outer_op)(unsigned long, unsigned long);
-
- switch (direction) {
- case DMA_FROM_DEVICE: /* invalidate only */
- inner_op = dmac_inv_range;
- outer_op = outer_inv_range;
- break;
- case DMA_TO_DEVICE: /* writeback only */
- inner_op = dmac_clean_range;
- outer_op = outer_clean_range;
- break;
- case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- inner_op = dmac_flush_range;
- outer_op = outer_flush_range;
- break;
- default:
- BUG();
- }
+ BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
- if (!PageHighMem(page)) {
- vaddr = page_address(page) + offset;
- inner_op(vaddr, vaddr + size);
- } else {
- vaddr = kmap_high_get(page);
- if (vaddr) {
- vaddr += offset;
- inner_op(vaddr, vaddr + size);
- kunmap_high(page);
- }
+ /* FIXME: non-speculating: not required */
+ /* don't bother invalidating if DMA to device */
+ if (dir != DMA_TO_DEVICE) {
+ unsigned long paddr = __pa(kaddr);
+ outer_inv_range(paddr, paddr + size);
}
- paddr = page_to_phys(page) + offset;
- outer_op(paddr, paddr + size);
+ dmac_unmap_area(kaddr, size, dir);
}
+EXPORT_SYMBOL(___dma_single_dev_to_cpu);
-void dma_cache_maint_page(struct page *page, unsigned long offset,
- size_t size, int dir)
+static void dma_cache_maint_page(struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ void (*op)(const void *, size_t, int))
{
/*
* A single sg entry may refer to multiple physically contiguous
@@ -486,20 +452,62 @@ void dma_cache_maint_page(struct page *page, unsigned long offset,
size_t left = size;
do {
size_t len = left;
- if (PageHighMem(page) && len + offset > PAGE_SIZE) {
- if (offset >= PAGE_SIZE) {
- page += offset / PAGE_SIZE;
- offset %= PAGE_SIZE;
+ void *vaddr;
+
+ if (PageHighMem(page)) {
+ if (len + offset > PAGE_SIZE) {
+ if (offset >= PAGE_SIZE) {
+ page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+ }
+ len = PAGE_SIZE - offset;
}
- len = PAGE_SIZE - offset;
+ vaddr = kmap_high_get(page);
+ if (vaddr) {
+ vaddr += offset;
+ op(vaddr, len, dir);
+ kunmap_high(page);
+ }
+ } else {
+ vaddr = page_address(page) + offset;
+ op(vaddr, len, dir);
}
- dma_cache_maint_contiguous(page, offset, len, dir);
offset = 0;
page++;
left -= len;
} while (left);
}
-EXPORT_SYMBOL(dma_cache_maint_page);
+
+void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ unsigned long paddr;
+
+ dma_cache_maint_page(page, off, size, dir, dmac_map_area);
+
+ paddr = page_to_phys(page) + off;
+ if (dir == DMA_FROM_DEVICE) {
+ outer_inv_range(paddr, paddr + size);
+ } else {
+ outer_clean_range(paddr, paddr + size);
+ }
+ /* FIXME: non-speculating: flush on bidirectional mappings? */
+}
+EXPORT_SYMBOL(___dma_page_cpu_to_dev);
+
+void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ unsigned long paddr = page_to_phys(page) + off;
+
+ /* FIXME: non-speculating: not required */
+ /* don't bother invalidating if DMA to device */
+ if (dir != DMA_TO_DEVICE)
+ outer_inv_range(paddr, paddr + size);
+
+ dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
+}
+EXPORT_SYMBOL(___dma_page_dev_to_cpu);
/**
* dma_map_sg - map a set of SG buffers for streaming mode DMA
@@ -573,8 +581,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i) {
- dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
- sg_dma_len(s), dir);
+ if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
+ sg_dma_len(s), dir))
+ continue;
+
+ __dma_page_dev_to_cpu(sg_page(s), s->offset,
+ s->length, dir);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
@@ -597,9 +609,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
sg_dma_len(s), dir))
continue;
- if (!arch_is_coherent())
- dma_cache_maint_page(sg_page(s), s->offset,
- s->length, dir);
+ __dma_page_cpu_to_dev(sg_page(s), s->offset,
+ s->length, dir);
}
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 6f3a4b7a3b82..e34f095e2090 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -13,6 +13,7 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
+#include <asm/smp_plat.h>
#include <asm/system.h>
#include <asm/tlbflush.h>
@@ -87,13 +88,26 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig
if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
__flush_icache_all();
}
+#else
+#define flush_pfn_alias(pfn,vaddr) do { } while (0)
+#endif
+#ifdef CONFIG_SMP
+static void flush_ptrace_access_other(void *args)
+{
+ __flush_icache_all();
+}
+#endif
+
+static
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
- unsigned long uaddr, void *kaddr,
- unsigned long len, int write)
+ unsigned long uaddr, void *kaddr, unsigned long len)
{
if (cache_is_vivt()) {
- vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
+ unsigned long addr = (unsigned long)kaddr;
+ __cpuc_coherent_kern_range(addr, addr + len);
+ }
return;
}
@@ -104,16 +118,37 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
}
/* VIPT non-aliasing cache */
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
- vma->vm_flags & VM_EXEC) {
+ if (vma->vm_flags & VM_EXEC) {
unsigned long addr = (unsigned long)kaddr;
- /* only flushing the kernel mapping on non-aliasing VIPT */
__cpuc_coherent_kern_range(addr, addr + len);
+#ifdef CONFIG_SMP
+ if (cache_ops_need_broadcast())
+ smp_call_function(flush_ptrace_access_other,
+ NULL, 1);
+#endif
}
}
-#else
-#define flush_pfn_alias(pfn,vaddr) do { } while (0)
+
+/*
+ * Copy user data from/to a page which is mapped into a different
+ * processes address space. Really, we want to allow our "user
+ * space" model to handle this.
+ *
+ * Note that this code needs to run on the current CPU.
+ */
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long uaddr, void *dst, const void *src,
+ unsigned long len)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
#endif
+ memcpy(dst, src, len);
+ flush_ptrace_access(vma, page, uaddr, dst, len);
+#ifdef CONFIG_SMP
+ preempt_enable();
+#endif
+}
void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 52c40d155672..a340569b991e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -32,19 +32,21 @@
static unsigned long phys_initrd_start __initdata = 0;
static unsigned long phys_initrd_size __initdata = 0;
-static void __init early_initrd(char **p)
+static int __init early_initrd(char *p)
{
unsigned long start, size;
+ char *endp;
- start = memparse(*p, p);
- if (**p == ',') {
- size = memparse((*p) + 1, p);
+ start = memparse(p, &endp);
+ if (*endp == ',') {
+ size = memparse(endp + 1, NULL);
phys_initrd_start = start;
phys_initrd_size = size;
}
+ return 0;
}
-__early_param("initrd=", early_initrd);
+early_param("initrd", early_initrd);
static int __init parse_tag_initrd(const struct tag *tag)
{
@@ -616,7 +618,7 @@ void __init mem_init(void)
"%dK data, %dK init, %luK highmem)\n",
nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10,
datasize >> 10, initsize >> 10,
- (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
+ totalhigh_pages << (PAGE_SHIFT-10));
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
extern int sysctl_overcommit_memory;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0ab75c60f7cf..28c8b950ef04 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm)
* which requires the new ioremap'd region to be referenced, the CPU will
* reference the _old_ region.
*
- * Note that get_vm_area() allocates a guard 4K page, so we need to mask
- * the size back to 1MB aligned or we will overflow in the loop below.
+ * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
+ * mask the size back to 1MB aligned or we will overflow in the loop below.
*/
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
@@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
}
#endif
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- *
- * 'flags' are the extra L_PTE_ flags that you want to specify for this
- * mapping. See <asm/pgtable.h> for more information.
- */
-void __iomem *
-__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
- unsigned int mtype)
+void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+ unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
const struct mem_type *type;
int err;
@@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
*/
size = PAGE_ALIGN(offset + size);
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
@@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
flush_cache_vmap(addr, addr + size);
return (void __iomem *) (offset + addr);
}
-EXPORT_SYMBOL(__arm_ioremap_pfn);
-void __iomem *
-__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
unsigned long last_addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
@@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
if (!size || last_addr < phys_addr)
return NULL;
- return __arm_ioremap_pfn(pfn, offset, size, mtype);
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+ caller);
+}
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *
+__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
+ unsigned int mtype)
+{
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__arm_ioremap_pfn);
+
+void __iomem *
+__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+{
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 1708da82da96..88f5d71248d9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -100,18 +100,17 @@ static struct cachepolicy cache_policies[] __initdata = {
* writebuffer to be turned off. (Note: the write
* buffer should not be on and the cache off).
*/
-static void __init early_cachepolicy(char **p)
+static int __init early_cachepolicy(char *p)
{
int i;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
int len = strlen(cache_policies[i].policy);
- if (memcmp(*p, cache_policies[i].policy, len) == 0) {
+ if (memcmp(p, cache_policies[i].policy, len) == 0) {
cachepolicy = i;
cr_alignment &= ~cache_policies[i].cr_mask;
cr_no_alignment &= ~cache_policies[i].cr_mask;
- *p += len;
break;
}
}
@@ -130,36 +129,37 @@ static void __init early_cachepolicy(char **p)
}
flush_cache_all();
set_cr(cr_alignment);
+ return 0;
}
-__early_param("cachepolicy=", early_cachepolicy);
+early_param("cachepolicy", early_cachepolicy);
-static void __init early_nocache(char **__unused)
+static int __init early_nocache(char *__unused)
{
char *p = "buffered";
printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
- early_cachepolicy(&p);
+ early_cachepolicy(p);
+ return 0;
}
-__early_param("nocache", early_nocache);
+early_param("nocache", early_nocache);
-static void __init early_nowrite(char **__unused)
+static int __init early_nowrite(char *__unused)
{
char *p = "uncached";
printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
- early_cachepolicy(&p);
+ early_cachepolicy(p);
+ return 0;
}
-__early_param("nowb", early_nowrite);
+early_param("nowb", early_nowrite);
-static void __init early_ecc(char **p)
+static int __init early_ecc(char *p)
{
- if (memcmp(*p, "on", 2) == 0) {
+ if (memcmp(p, "on", 2) == 0)
ecc_mask = PMD_PROTECTION;
- *p += 2;
- } else if (memcmp(*p, "off", 3) == 0) {
+ else if (memcmp(p, "off", 3) == 0)
ecc_mask = 0;
- *p += 3;
- }
+ return 0;
}
-__early_param("ecc=", early_ecc);
+early_param("ecc", early_ecc);
static int __init noalign_setup(char *__unused)
{
@@ -670,9 +670,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M;
* bytes. This can be used to increase (or decrease) the vmalloc
* area - the default is 128m.
*/
-static void __init early_vmalloc(char **arg)
+static int __init early_vmalloc(char *arg)
{
- vmalloc_reserve = memparse(*arg, arg);
+ vmalloc_reserve = memparse(arg, NULL);
if (vmalloc_reserve < SZ_16M) {
vmalloc_reserve = SZ_16M;
@@ -687,8 +687,9 @@ static void __init early_vmalloc(char **arg)
"vmalloc area is too big, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
+ return 0;
}
-__early_param("vmalloc=", early_vmalloc);
+early_param("vmalloc", early_vmalloc);
#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 374a8311bc84..9bfeb6b9509a 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -74,6 +74,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
+void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
+ size_t size, unsigned int mtype, void *caller)
+{
+ return __arm_ioremap_pfn(pfn, offset, size, mtype);
+}
+
void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
unsigned int mtype)
{
@@ -81,6 +87,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap);
+void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
+{
+ return __arm_ioremap(phys_addr, size, mtype);
+}
+
void __iounmap(volatile void __iomem *addr)
{
}
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 8012e24282b2..72507c630ceb 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -265,7 +265,7 @@ ENTRY(arm1020_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-ENTRY(arm1020_dma_inv_range)
+arm1020_dma_inv_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -295,7 +295,7 @@ ENTRY(arm1020_dma_inv_range)
*
* (same as v4wb)
*/
-ENTRY(arm1020_dma_clean_range)
+arm1020_dma_clean_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -330,6 +330,30 @@ ENTRY(arm1020_dma_flush_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm1020_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq arm1020_dma_clean_range
+ bcs arm1020_dma_inv_range
+ b arm1020_dma_flush_range
+ENDPROC(arm1020_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm1020_dma_unmap_area)
+ mov pc, lr
+ENDPROC(arm1020_dma_unmap_area)
+
ENTRY(arm1020_cache_fns)
.long arm1020_flush_kern_cache_all
.long arm1020_flush_user_cache_all
@@ -337,8 +361,8 @@ ENTRY(arm1020_cache_fns)
.long arm1020_coherent_kern_range
.long arm1020_coherent_user_range
.long arm1020_flush_kern_dcache_area
- .long arm1020_dma_inv_range
- .long arm1020_dma_clean_range
+ .long arm1020_dma_map_area
+ .long arm1020_dma_unmap_area
.long arm1020_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 41fe25d234f5..d27829805609 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -258,7 +258,7 @@ ENTRY(arm1020e_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-ENTRY(arm1020e_dma_inv_range)
+arm1020e_dma_inv_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -284,7 +284,7 @@ ENTRY(arm1020e_dma_inv_range)
*
* (same as v4wb)
*/
-ENTRY(arm1020e_dma_clean_range)
+arm1020e_dma_clean_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -316,6 +316,30 @@ ENTRY(arm1020e_dma_flush_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm1020e_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq arm1020e_dma_clean_range
+ bcs arm1020e_dma_inv_range
+ b arm1020e_dma_flush_range
+ENDPROC(arm1020e_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm1020e_dma_unmap_area)
+ mov pc, lr
+ENDPROC(arm1020e_dma_unmap_area)
+
ENTRY(arm1020e_cache_fns)
.long arm1020e_flush_kern_cache_all
.long arm1020e_flush_user_cache_all
@@ -323,8 +347,8 @@ ENTRY(arm1020e_cache_fns)
.long arm1020e_coherent_kern_range
.long arm1020e_coherent_user_range
.long arm1020e_flush_kern_dcache_area
- .long arm1020e_dma_inv_range
- .long arm1020e_dma_clean_range
+ .long arm1020e_dma_map_area
+ .long arm1020e_dma_unmap_area
.long arm1020e_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 20a5b1b31a70..ce13e4a827de 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -247,7 +247,7 @@ ENTRY(arm1022_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-ENTRY(arm1022_dma_inv_range)
+arm1022_dma_inv_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -273,7 +273,7 @@ ENTRY(arm1022_dma_inv_range)
*
* (same as v4wb)
*/
-ENTRY(arm1022_dma_clean_range)
+arm1022_dma_clean_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -305,6 +305,30 @@ ENTRY(arm1022_dma_flush_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm1022_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq arm1022_dma_clean_range
+ bcs arm1022_dma_inv_range
+ b arm1022_dma_flush_range
+ENDPROC(arm1022_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm1022_dma_unmap_area)
+ mov pc, lr
+ENDPROC(arm1022_dma_unmap_area)
+
ENTRY(arm1022_cache_fns)
.long arm1022_flush_kern_cache_all
.long arm1022_flush_user_cache_all
@@ -312,8 +336,8 @@ ENTRY(arm1022_cache_fns)
.long arm1022_coherent_kern_range
.long arm1022_coherent_user_range
.long arm1022_flush_kern_dcache_area
- .long arm1022_dma_inv_range
- .long arm1022_dma_clean_range
+ .long arm1022_dma_map_area
+ .long arm1022_dma_unmap_area
.long arm1022_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 96aedb10fcc4..636672a29c6d 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -241,7 +241,7 @@ ENTRY(arm1026_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-ENTRY(arm1026_dma_inv_range)
+arm1026_dma_inv_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
tst r0, #CACHE_DLINESIZE - 1
@@ -267,7 +267,7 @@ ENTRY(arm1026_dma_inv_range)
*
* (same as v4wb)
*/
-ENTRY(arm1026_dma_clean_range)
+arm1026_dma_clean_range:
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CACHE_DLINESIZE - 1
@@ -299,6 +299,30 @@ ENTRY(arm1026_dma_flush_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm1026_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq arm1026_dma_clean_range
+ bcs arm1026_dma_inv_range
+ b arm1026_dma_flush_range
+ENDPROC(arm1026_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm1026_dma_unmap_area)
+ mov pc, lr
+ENDPROC(arm1026_dma_unmap_area)
+
ENTRY(arm1026_cache_fns)
.long arm1026_flush_kern_cache_all
.long arm1026_flush_user_cache_all
@@ -306,8 +330,8 @@ ENTRY(arm1026_cache_fns)
.long arm1026_coherent_kern_range
.long arm1026_coherent_user_range
.long arm1026_flush_kern_dcache_area
- .long arm1026_dma_inv_range
- .long arm1026_dma_clean_range
+ .long arm1026_dma_map_area
+ .long arm1026_dma_unmap_area
.long arm1026_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 471669e2d7cb..8be81992645d 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -239,7 +239,7 @@ ENTRY(arm920_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-ENTRY(arm920_dma_inv_range)
+arm920_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -262,7 +262,7 @@ ENTRY(arm920_dma_inv_range)
*
* (same as v4wb)
*/
-ENTRY(arm920_dma_clean_range)
+arm920_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -288,6 +288,30 @@ ENTRY(arm920_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm920_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq arm920_dma_clean_range
+ bcs arm920_dma_inv_range
+ b arm920_dma_flush_range
+ENDPROC(arm920_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm920_dma_unmap_area)
+ mov pc, lr
+ENDPROC(arm920_dma_unmap_area)
+
ENTRY(arm920_cache_fns)
.long arm920_flush_kern_cache_all
.long arm920_flush_user_cache_all
@@ -295,8 +319,8 @@ ENTRY(arm920_cache_fns)
.long arm920_coherent_kern_range
.long arm920_coherent_user_range
.long arm920_flush_kern_dcache_area
- .long arm920_dma_inv_range
- .long arm920_dma_clean_range
+ .long arm920_dma_map_area
+ .long arm920_dma_unmap_area
.long arm920_dma_flush_range
#endif
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index ee111b00fa41..c0ff8e4b1074 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -241,7 +241,7 @@ ENTRY(arm922_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-ENTRY(arm922_dma_inv_range)
+arm922_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -264,7 +264,7 @@ ENTRY(arm922_dma_inv_range)
*
* (same as v4wb)
*/
-ENTRY(arm922_dma_clean_range)
+arm922_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -290,6 +290,30 @@ ENTRY(arm922_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm922_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq arm922_dma_clean_range
+ bcs arm922_dma_inv_range
+ b arm922_dma_flush_range
+ENDPROC(arm922_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm922_dma_unmap_area)
+ mov pc, lr
+ENDPROC(arm922_dma_unmap_area)
+
ENTRY(arm922_cache_fns)
.long arm922_flush_kern_cache_all
.long arm922_flush_user_cache_all
@@ -297,8 +321,8 @@ ENTRY(arm922_cache_fns)
.long arm922_coherent_kern_range
.long arm922_coherent_user_range
.long arm922_flush_kern_dcache_area
- .long arm922_dma_inv_range
- .long arm922_dma_clean_range
+ .long arm922_dma_map_area
+ .long arm922_dma_unmap_area
.long arm922_dma_flush_range
#endif
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 8deb5bde58e4..3c6cffe400f6 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -283,7 +283,7 @@ ENTRY(arm925_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-ENTRY(arm925_dma_inv_range)
+arm925_dma_inv_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -308,7 +308,7 @@ ENTRY(arm925_dma_inv_range)
*
* (same as v4wb)
*/
-ENTRY(arm925_dma_clean_range)
+arm925_dma_clean_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -341,6 +341,30 @@ ENTRY(arm925_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm925_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq arm925_dma_clean_range
+ bcs arm925_dma_inv_range
+ b arm925_dma_flush_range
+ENDPROC(arm925_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm925_dma_unmap_area)
+ mov pc, lr
+ENDPROC(arm925_dma_unmap_area)
+
ENTRY(arm925_cache_fns)
.long arm925_flush_kern_cache_all
.long arm925_flush_user_cache_all
@@ -348,8 +372,8 @@ ENTRY(arm925_cache_fns)
.long arm925_coherent_kern_range
.long arm925_coherent_user_range
.long arm925_flush_kern_dcache_area
- .long arm925_dma_inv_range
- .long arm925_dma_clean_range
+ .long arm925_dma_map_area
+ .long arm925_dma_unmap_area
.long arm925_dma_flush_range
ENTRY(cpu_arm925_dcache_clean_area)
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 64db6e275a44..75b707c9cce1 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -246,7 +246,7 @@ ENTRY(arm926_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-ENTRY(arm926_dma_inv_range)
+arm926_dma_inv_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -271,7 +271,7 @@ ENTRY(arm926_dma_inv_range)
*
* (same as v4wb)
*/
-ENTRY(arm926_dma_clean_range)
+arm926_dma_clean_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -304,6 +304,30 @@ ENTRY(arm926_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm926_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq arm926_dma_clean_range
+ bcs arm926_dma_inv_range
+ b arm926_dma_flush_range
+ENDPROC(arm926_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm926_dma_unmap_area)
+ mov pc, lr
+ENDPROC(arm926_dma_unmap_area)
+
ENTRY(arm926_cache_fns)
.long arm926_flush_kern_cache_all
.long arm926_flush_user_cache_all
@@ -311,8 +335,8 @@ ENTRY(arm926_cache_fns)
.long arm926_coherent_kern_range
.long arm926_coherent_user_range
.long arm926_flush_kern_dcache_area
- .long arm926_dma_inv_range
- .long arm926_dma_clean_range
+ .long arm926_dma_map_area
+ .long arm926_dma_unmap_area
.long arm926_dma_flush_range
ENTRY(cpu_arm926_dcache_clean_area)
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 8196b9f401fb..1af1657819eb 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -171,7 +171,7 @@ ENTRY(arm940_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm940_dma_inv_range)
+arm940_dma_inv_range:
mov ip, #0
mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
@@ -192,7 +192,7 @@ ENTRY(arm940_dma_inv_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(arm940_dma_clean_range)
+arm940_dma_clean_range:
ENTRY(cpu_arm940_dcache_clean_area)
mov ip, #0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -233,6 +233,30 @@ ENTRY(arm940_dma_flush_range)
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm940_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq arm940_dma_clean_range
+ bcs arm940_dma_inv_range
+ b arm940_dma_flush_range
+ENDPROC(arm940_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm940_dma_unmap_area)
+ mov pc, lr
+ENDPROC(arm940_dma_unmap_area)
+
ENTRY(arm940_cache_fns)
.long arm940_flush_kern_cache_all
.long arm940_flush_user_cache_all
@@ -240,8 +264,8 @@ ENTRY(arm940_cache_fns)
.long arm940_coherent_kern_range
.long arm940_coherent_user_range
.long arm940_flush_kern_dcache_area
- .long arm940_dma_inv_range
- .long arm940_dma_clean_range
+ .long arm940_dma_map_area
+ .long arm940_dma_unmap_area
.long arm940_dma_flush_range
__INIT
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 9a951239c86c..1664b6aaff79 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -215,7 +215,7 @@ ENTRY(arm946_flush_kern_dcache_area)
* - end - virtual end address
* (same as arm926)
*/
-ENTRY(arm946_dma_inv_range)
+arm946_dma_inv_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -240,7 +240,7 @@ ENTRY(arm946_dma_inv_range)
*
* (same as arm926)
*/
-ENTRY(arm946_dma_clean_range)
+arm946_dma_clean_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -275,6 +275,30 @@ ENTRY(arm946_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm946_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq arm946_dma_clean_range
+ bcs arm946_dma_inv_range
+ b arm946_dma_flush_range
+ENDPROC(arm946_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(arm946_dma_unmap_area)
+ mov pc, lr
+ENDPROC(arm946_dma_unmap_area)
+
ENTRY(arm946_cache_fns)
.long arm946_flush_kern_cache_all
.long arm946_flush_user_cache_all
@@ -282,8 +306,8 @@ ENTRY(arm946_cache_fns)
.long arm946_coherent_kern_range
.long arm946_coherent_user_range
.long arm946_flush_kern_dcache_area
- .long arm946_dma_inv_range
- .long arm946_dma_clean_range
+ .long arm946_dma_map_area
+ .long arm946_dma_unmap_area
.long arm946_dma_flush_range
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index dbc39383e66a..53e632343849 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -274,7 +274,7 @@ ENTRY(feroceon_range_flush_kern_dcache_area)
* (same as v4wb)
*/
.align 5
-ENTRY(feroceon_dma_inv_range)
+feroceon_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -288,7 +288,7 @@ ENTRY(feroceon_dma_inv_range)
mov pc, lr
.align 5
-ENTRY(feroceon_range_dma_inv_range)
+feroceon_range_dma_inv_range:
mrs r2, cpsr
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -314,7 +314,7 @@ ENTRY(feroceon_range_dma_inv_range)
* (same as v4wb)
*/
.align 5
-ENTRY(feroceon_dma_clean_range)
+feroceon_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -324,7 +324,7 @@ ENTRY(feroceon_dma_clean_range)
mov pc, lr
.align 5
-ENTRY(feroceon_range_dma_clean_range)
+feroceon_range_dma_clean_range:
mrs r2, cpsr
cmp r1, r0
subne r1, r1, #1 @ top address is inclusive
@@ -367,6 +367,44 @@ ENTRY(feroceon_range_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(feroceon_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq feroceon_dma_clean_range
+ bcs feroceon_dma_inv_range
+ b feroceon_dma_flush_range
+ENDPROC(feroceon_dma_map_area)
+
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(feroceon_range_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq feroceon_range_dma_clean_range
+ bcs feroceon_range_dma_inv_range
+ b feroceon_range_dma_flush_range
+ENDPROC(feroceon_range_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(feroceon_dma_unmap_area)
+ mov pc, lr
+ENDPROC(feroceon_dma_unmap_area)
+
ENTRY(feroceon_cache_fns)
.long feroceon_flush_kern_cache_all
.long feroceon_flush_user_cache_all
@@ -374,8 +412,8 @@ ENTRY(feroceon_cache_fns)
.long feroceon_coherent_kern_range
.long feroceon_coherent_user_range
.long feroceon_flush_kern_dcache_area
- .long feroceon_dma_inv_range
- .long feroceon_dma_clean_range
+ .long feroceon_dma_map_area
+ .long feroceon_dma_unmap_area
.long feroceon_dma_flush_range
ENTRY(feroceon_range_cache_fns)
@@ -385,8 +423,8 @@ ENTRY(feroceon_range_cache_fns)
.long feroceon_coherent_kern_range
.long feroceon_coherent_user_range
.long feroceon_range_flush_kern_dcache_area
- .long feroceon_range_dma_inv_range
- .long feroceon_range_dma_clean_range
+ .long feroceon_range_dma_map_area
+ .long feroceon_dma_unmap_area
.long feroceon_range_dma_flush_range
.align 5
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 9674d36cc97d..caa31154e7db 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -218,7 +218,7 @@ ENTRY(mohawk_flush_kern_dcache_area)
*
* (same as v4wb)
*/
-ENTRY(mohawk_dma_inv_range)
+mohawk_dma_inv_range:
tst r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
@@ -241,7 +241,7 @@ ENTRY(mohawk_dma_inv_range)
*
* (same as v4wb)
*/
-ENTRY(mohawk_dma_clean_range)
+mohawk_dma_clean_range:
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
@@ -268,6 +268,30 @@ ENTRY(mohawk_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(mohawk_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq mohawk_dma_clean_range
+ bcs mohawk_dma_inv_range
+ b mohawk_dma_flush_range
+ENDPROC(mohawk_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(mohawk_dma_unmap_area)
+ mov pc, lr
+ENDPROC(mohawk_dma_unmap_area)
+
ENTRY(mohawk_cache_fns)
.long mohawk_flush_kern_cache_all
.long mohawk_flush_user_cache_all
@@ -275,8 +299,8 @@ ENTRY(mohawk_cache_fns)
.long mohawk_coherent_kern_range
.long mohawk_coherent_user_range
.long mohawk_flush_kern_dcache_area
- .long mohawk_dma_inv_range
- .long mohawk_dma_clean_range
+ .long mohawk_dma_map_area
+ .long mohawk_dma_unmap_area
.long mohawk_dma_flush_range
ENTRY(cpu_mohawk_dcache_clean_area)
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 96456f548798..e5797f1c1db7 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -257,7 +257,7 @@ ENTRY(xsc3_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xsc3_dma_inv_range)
+xsc3_dma_inv_range:
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
@@ -278,7 +278,7 @@ ENTRY(xsc3_dma_inv_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xsc3_dma_clean_range)
+xsc3_dma_clean_range:
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
add r0, r0, #CACHELINESIZE
@@ -304,6 +304,30 @@ ENTRY(xsc3_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ data write barrier
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(xsc3_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq xsc3_dma_clean_range
+ bcs xsc3_dma_inv_range
+ b xsc3_dma_flush_range
+ENDPROC(xsc3_dma_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(xsc3_dma_unmap_area)
+ mov pc, lr
+ENDPROC(xsc3_dma_unmap_area)
+
ENTRY(xsc3_cache_fns)
.long xsc3_flush_kern_cache_all
.long xsc3_flush_user_cache_all
@@ -311,8 +335,8 @@ ENTRY(xsc3_cache_fns)
.long xsc3_coherent_kern_range
.long xsc3_coherent_user_range
.long xsc3_flush_kern_dcache_area
- .long xsc3_dma_inv_range
- .long xsc3_dma_clean_range
+ .long xsc3_dma_map_area
+ .long xsc3_dma_unmap_area
.long xsc3_dma_flush_range
ENTRY(cpu_xsc3_dcache_clean_area)
@@ -407,6 +431,13 @@ __xsc3_setup:
adr r5, xsc3_crval
ldmia r5, {r5, r6}
+
+#ifdef CONFIG_CACHE_XSC3L2
+ mrc p15, 1, r0, c0, c0, 1 @ get L2 present information
+ ands r0, r0, #0xf8
+ orrne r6, r6, #(1 << 26) @ enable L2 if present
+#endif
+
mrc p15, 0, r0, c1, c0, 0 @ get control register
bic r0, r0, r5 @ ..V. ..R. .... ..A.
orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 93df47265f2d..63037e2162f2 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -315,7 +315,7 @@ ENTRY(xscale_flush_kern_dcache_area)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xscale_dma_inv_range)
+xscale_dma_inv_range:
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -336,7 +336,7 @@ ENTRY(xscale_dma_inv_range)
* - start - virtual start address
* - end - virtual end address
*/
-ENTRY(xscale_dma_clean_range)
+xscale_dma_clean_range:
bic r0, r0, #CACHELINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
@@ -363,6 +363,43 @@ ENTRY(xscale_dma_flush_range)
mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer
mov pc, lr
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(xscale_dma_map_area)
+ add r1, r1, r0
+ cmp r2, #DMA_TO_DEVICE
+ beq xscale_dma_clean_range
+ bcs xscale_dma_inv_range
+ b xscale_dma_flush_range
+ENDPROC(xscale_dma_map_area)
+
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(xscale_dma_a0_map_area)
+ add r1, r1, r0
+ teq r2, #DMA_TO_DEVICE
+ beq xscale_dma_clean_range
+ b xscale_dma_flush_range
+ENDPROC(xscsale_dma_a0_map_area)
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(xscale_dma_unmap_area)
+ mov pc, lr
+ENDPROC(xscale_dma_unmap_area)
+
ENTRY(xscale_cache_fns)
.long xscale_flush_kern_cache_all
.long xscale_flush_user_cache_all
@@ -370,8 +407,8 @@ ENTRY(xscale_cache_fns)
.long xscale_coherent_kern_range
.long xscale_coherent_user_range
.long xscale_flush_kern_dcache_area
- .long xscale_dma_inv_range
- .long xscale_dma_clean_range
+ .long xscale_dma_map_area
+ .long xscale_dma_unmap_area
.long xscale_dma_flush_range
/*
@@ -394,8 +431,8 @@ ENTRY(xscale_80200_A0_A1_cache_fns)
.long xscale_coherent_kern_range
.long xscale_coherent_user_range
.long xscale_flush_kern_dcache_area
- .long xscale_dma_flush_range
- .long xscale_dma_clean_range
+ .long xscale_dma_a0_map_area
+ .long xscale_dma_unmap_area
.long xscale_dma_flush_range
ENTRY(cpu_xscale_dcache_clean_area)
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index a26a605b73bd..0cb1848bd876 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -40,7 +40,6 @@ ENTRY(v7wbi_flush_user_tlb_range)
asid r3, r3 @ mask ASID
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
- vma_vm_flags r2, r2 @ get vma->vm_flags
1:
#ifdef CONFIG_SMP
mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable)
diff --git a/arch/arm/plat-iop/io.c b/arch/arm/plat-iop/io.c
index ed0bbece0d61..e15bc17db90b 100644
--- a/arch/arm/plat-iop/io.c
+++ b/arch/arm/plat-iop/io.c
@@ -34,7 +34,8 @@ void * __iomem __iop3xx_ioremap(unsigned long cookie, size_t size,
retval = (void *) IOP3XX_PMMR_PHYS_TO_VIRT(cookie);
break;
default:
- retval = __arm_ioremap(cookie, size, mtype);
+ retval = __arm_ioremap_caller(cookie, size, mtype,
+ __builtin_return_address(0));
}
return retval;
diff --git a/arch/arm/plat-mxc/include/mach/iomux-mx25.h b/arch/arm/plat-mxc/include/mach/iomux-mx25.h
index 810c47f56e77..9af494f0ab3d 100644
--- a/arch/arm/plat-mxc/include/mach/iomux-mx25.h
+++ b/arch/arm/plat-mxc/include/mach/iomux-mx25.h
@@ -58,19 +58,19 @@
#define MX25_PAD_A18__A18 IOMUX_PAD(0x23c, 0x020, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_A18__GPIO_2_4 IOMUX_PAD(0x23c, 0x020, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A18__FEC_COL IOMUX_PAD(0x23c, 0x020, 0x17, 0x504, 0, NO_PAD_CTL)
+#define MX25_PAD_A18__FEC_COL IOMUX_PAD(0x23c, 0x020, 0x17, 0x504, 0, NO_PAD_CTRL)
#define MX25_PAD_A19__A19 IOMUX_PAD(0x240, 0x024, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A19__FEC_RX_ER IOMUX_PAD(0x240, 0x024, 0x17, 0x518, 0, NO_PAD_CTL)
+#define MX25_PAD_A19__FEC_RX_ER IOMUX_PAD(0x240, 0x024, 0x17, 0x518, 0, NO_PAD_CTRL)
#define MX25_PAD_A19__GPIO_2_5 IOMUX_PAD(0x240, 0x024, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_A20__A20 IOMUX_PAD(0x244, 0x028, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_A20__GPIO_2_6 IOMUX_PAD(0x244, 0x028, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A20__FEC_RDATA2 IOMUX_PAD(0x244, 0x028, 0x17, 0x50c, 0, NO_PAD_CTL)
+#define MX25_PAD_A20__FEC_RDATA2 IOMUX_PAD(0x244, 0x028, 0x17, 0x50c, 0, NO_PAD_CTRL)
#define MX25_PAD_A21__A21 IOMUX_PAD(0x248, 0x02c, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_A21__GPIO_2_7 IOMUX_PAD(0x248, 0x02c, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A21__FEC_RDATA3 IOMUX_PAD(0x248, 0x02c, 0x17, 0x510, 0, NO_PAD_CTL)
+#define MX25_PAD_A21__FEC_RDATA3 IOMUX_PAD(0x248, 0x02c, 0x17, 0x510, 0, NO_PAD_CTRL)
#define MX25_PAD_A22__A22 IOMUX_PAD(0x000, 0x030, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_A22__GPIO_2_8 IOMUX_PAD(0x000, 0x030, 0x15, 0, 0, NO_PAD_CTRL)
@@ -80,11 +80,11 @@
#define MX25_PAD_A24__A24 IOMUX_PAD(0x250, 0x038, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_A24__GPIO_2_10 IOMUX_PAD(0x250, 0x038, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A24__FEC_RX_CLK IOMUX_PAD(0x250, 0x038, 0x17, 0x514, 0, NO_PAD_CTL)
+#define MX25_PAD_A24__FEC_RX_CLK IOMUX_PAD(0x250, 0x038, 0x17, 0x514, 0, NO_PAD_CTRL)
#define MX25_PAD_A25__A25 IOMUX_PAD(0x254, 0x03c, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_A25__GPIO_2_11 IOMUX_PAD(0x254, 0x03c, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_A25__FEC_CRS IOMUX_PAD(0x254, 0x03c, 0x17, 0x508, 0, NO_PAD_CTL)
+#define MX25_PAD_A25__FEC_CRS IOMUX_PAD(0x254, 0x03c, 0x17, 0x508, 0, NO_PAD_CTRL)
#define MX25_PAD_EB0__EB0 IOMUX_PAD(0x258, 0x040, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_EB0__AUD4_TXD IOMUX_PAD(0x258, 0x040, 0x14, 0x464, 0, NO_PAD_CTRL)
@@ -112,7 +112,7 @@
#define MX25_PAD_CS5__UART5_RTS IOMUX_PAD(0x268, 0x058, 0x13, 0x574, 0, NO_PAD_CTRL)
#define MX25_PAD_CS5__GPIO_3_21 IOMUX_PAD(0x268, 0x058, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_NF_CE0__NF_CE0 IOMUX_PAD(0x26c, 0x05c, 0x10, 0, 0, NO_PAD_CTL)
+#define MX25_PAD_NF_CE0__NF_CE0 IOMUX_PAD(0x26c, 0x05c, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_NF_CE0__GPIO_3_22 IOMUX_PAD(0x26c, 0x05c, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_ECB__ECB IOMUX_PAD(0x270, 0x060, 0x10, 0, 0, NO_PAD_CTRL)
@@ -229,28 +229,28 @@
#define MX25_PAD_LD7__GPIO_1_21 IOMUX_PAD(0x2dc, 0x0e4, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_LD8__LD8 IOMUX_PAD(0x2e0, 0x0e8, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_LD8__FEC_TX_ERR IOMUX_PAD(0x2e0, 0x0e8, 0x15, 0, 0, NO_PAD_CTL)
+#define MX25_PAD_LD8__FEC_TX_ERR IOMUX_PAD(0x2e0, 0x0e8, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_LD9__LD9 IOMUX_PAD(0x2e4, 0x0ec, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_LD9__FEC_COL IOMUX_PAD(0x2e4, 0x0ec, 0x15, 0x504, 1, NO_PAD_CTL)
+#define MX25_PAD_LD9__FEC_COL IOMUX_PAD(0x2e4, 0x0ec, 0x15, 0x504, 1, NO_PAD_CTRL)
#define MX25_PAD_LD10__LD10 IOMUX_PAD(0x2e8, 0x0f0, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_LD10__FEC_RX_ER IOMUX_PAD(0x2e8, 0x0f0, 0x15, 0x518, 1, NO_PAD_CTL)
+#define MX25_PAD_LD10__FEC_RX_ER IOMUX_PAD(0x2e8, 0x0f0, 0x15, 0x518, 1, NO_PAD_CTRL)
#define MX25_PAD_LD11__LD11 IOMUX_PAD(0x2ec, 0x0f4, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_LD11__FEC_RDATA2 IOMUX_PAD(0x2ec, 0x0f4, 0x15, 0x50c, 1, NO_PAD_CTL)
+#define MX25_PAD_LD11__FEC_RDATA2 IOMUX_PAD(0x2ec, 0x0f4, 0x15, 0x50c, 1, NO_PAD_CTRL)
#define MX25_PAD_LD12__LD12 IOMUX_PAD(0x2f0, 0x0f8, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_LD12__FEC_RDATA3 IOMUX_PAD(0x2f0, 0x0f8, 0x15, 0x510, 1, NO_PAD_CTL)
+#define MX25_PAD_LD12__FEC_RDATA3 IOMUX_PAD(0x2f0, 0x0f8, 0x15, 0x510, 1, NO_PAD_CTRL)
#define MX25_PAD_LD13__LD13 IOMUX_PAD(0x2f4, 0x0fc, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_LD13__FEC_TDATA2 IOMUX_PAD(0x2f4, 0x0fc, 0x15, 0, 0, NO_PAD_CTL)
+#define MX25_PAD_LD13__FEC_TDATA2 IOMUX_PAD(0x2f4, 0x0fc, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_LD14__LD14 IOMUX_PAD(0x2f8, 0x100, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_LD14__FEC_TDATA3 IOMUX_PAD(0x2f8, 0x100, 0x15, 0, 0, NO_PAD_CTL)
+#define MX25_PAD_LD14__FEC_TDATA3 IOMUX_PAD(0x2f8, 0x100, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_LD15__LD15 IOMUX_PAD(0x2fc, 0x104, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_LD15__FEC_RX_CLK IOMUX_PAD(0x2fc, 0x104, 0x15, 0x514, 1, NO_PAD_CTL)
+#define MX25_PAD_LD15__FEC_RX_CLK IOMUX_PAD(0x2fc, 0x104, 0x15, 0x514, 1, NO_PAD_CTRL)
#define MX25_PAD_HSYNC__HSYNC IOMUX_PAD(0x300, 0x108, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_HSYNC__GPIO_1_22 IOMUX_PAD(0x300, 0x108, 0x15, 0, 0, NO_PAD_CTRL)
@@ -265,7 +265,7 @@
#define MX25_PAD_OE_ACD__GPIO_1_25 IOMUX_PAD(0x30c, 0x114, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_CONTRAST__CONTRAST IOMUX_PAD(0x310, 0x118, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_CONTRAST__FEC_CRS IOMUX_PAD(0x310, 0x118, 0x15, 0x508, 1, NO_PAD_CTL)
+#define MX25_PAD_CONTRAST__FEC_CRS IOMUX_PAD(0x310, 0x118, 0x15, 0x508, 1, NO_PAD_CTRL)
#define MX25_PAD_PWM__PWM IOMUX_PAD(0x314, 0x11c, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_PWM__GPIO_1_26 IOMUX_PAD(0x314, 0x11c, 0x15, 0, 0, NO_PAD_CTRL)
@@ -354,19 +354,19 @@
#define MX25_PAD_UART2_TXD__GPIO_4_27 IOMUX_PAD(0x37c, 0x184, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_UART2_RTS__UART2_RTS IOMUX_PAD(0x380, 0x188, 0x10, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_UART2_RTS__FEC_COL IOMUX_PAD(0x380, 0x188, 0x12, 0x504, 2, NO_PAD_CTL)
+#define MX25_PAD_UART2_RTS__FEC_COL IOMUX_PAD(0x380, 0x188, 0x12, 0x504, 2, NO_PAD_CTRL)
#define MX25_PAD_UART2_RTS__GPIO_4_28 IOMUX_PAD(0x380, 0x188, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_UART2_CTS__FEC_RX_ER IOMUX_PAD(0x384, 0x18c, 0x12, 0x518, 2, NO_PAD_CTL)
+#define MX25_PAD_UART2_CTS__FEC_RX_ER IOMUX_PAD(0x384, 0x18c, 0x12, 0x518, 2, NO_PAD_CTRL)
#define MX25_PAD_UART2_CTS__UART2_CTS IOMUX_PAD(0x384, 0x18c, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_UART2_CTS__GPIO_4_29 IOMUX_PAD(0x384, 0x18c, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_SD1_CMD__SD1_CMD IOMUX_PAD(0x388, 0x190, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
-#define MX25_PAD_SD1_CMD__FEC_RDATA2 IOMUX_PAD(0x388, 0x190, 0x12, 0x50c, 2, NO_PAD_CTL)
+#define MX25_PAD_SD1_CMD__FEC_RDATA2 IOMUX_PAD(0x388, 0x190, 0x12, 0x50c, 2, NO_PAD_CTRL)
#define MX25_PAD_SD1_CMD__GPIO_2_23 IOMUX_PAD(0x388, 0x190, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_SD1_CLK__SD1_CLK IOMUX_PAD(0x38c, 0x194, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
-#define MX25_PAD_SD1_CLK__FEC_RDATA3 IOMUX_PAD(0x38c, 0x194, 0x12, 0x510, 2, NO_PAD_CTL)
+#define MX25_PAD_SD1_CLK__FEC_RDATA3 IOMUX_PAD(0x38c, 0x194, 0x12, 0x510, 2, NO_PAD_CTRL)
#define MX25_PAD_SD1_CLK__GPIO_2_24 IOMUX_PAD(0x38c, 0x194, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_SD1_DATA0__SD1_DATA0 IOMUX_PAD(0x390, 0x198, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
@@ -377,11 +377,11 @@
#define MX25_PAD_SD1_DATA1__GPIO_2_26 IOMUX_PAD(0x394, 0x19c, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_SD1_DATA2__SD1_DATA2 IOMUX_PAD(0x398, 0x1a0, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
-#define MX25_PAD_SD1_DATA2__FEC_RX_CLK IOMUX_PAD(0x398, 0x1a0, 0x15, 0x514, 2, NO_PAD_CTL)
+#define MX25_PAD_SD1_DATA2__FEC_RX_CLK IOMUX_PAD(0x398, 0x1a0, 0x15, 0x514, 2, NO_PAD_CTRL)
#define MX25_PAD_SD1_DATA2__GPIO_2_27 IOMUX_PAD(0x398, 0x1a0, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_SD1_DATA3__SD1_DATA3 IOMUX_PAD(0x39c, 0x1a4, 0x10, 0, 0, PAD_CTL_PUS_47K_UP)
-#define MX25_PAD_SD1_DATA3__FEC_CRS IOMUX_PAD(0x39c, 0x1a4, 0x10, 0x508, 2, NO_PAD_CTL)
+#define MX25_PAD_SD1_DATA3__FEC_CRS IOMUX_PAD(0x39c, 0x1a4, 0x10, 0x508, 2, NO_PAD_CTRL)
#define MX25_PAD_SD1_DATA3__GPIO_2_28 IOMUX_PAD(0x39c, 0x1a4, 0x15, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_KPP_ROW0__KPP_ROW0 IOMUX_PAD(0x3a0, 0x1a8, 0x10, 0, 0, PAD_CTL_PKE)
@@ -410,7 +410,7 @@
#define MX25_PAD_KPP_COL3__KPP_COL3 IOMUX_PAD(0x3bc, 0x1c4, 0x10, 0, 0, PAD_CTL_PKE | PAD_CTL_ODE)
#define MX25_PAD_KPP_COL3__GPIO_3_4 IOMUX_PAD(0x3bc, 0x1c4, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_FEC_MDC__FEC_MDC IOMUX_PAD(0x3c0, 0x1c8, 0x10, 0, 0, NO_PAD_CTL)
+#define MX25_PAD_FEC_MDC__FEC_MDC IOMUX_PAD(0x3c0, 0x1c8, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_FEC_MDC__AUD4_TXD IOMUX_PAD(0x3c0, 0x1c8, 0x12, 0x464, 1, NO_PAD_CTRL)
#define MX25_PAD_FEC_MDC__GPIO_3_5 IOMUX_PAD(0x3c0, 0x1c8, 0x15, 0, 0, NO_PAD_CTRL)
@@ -418,23 +418,23 @@
#define MX25_PAD_FEC_MDIO__AUD4_RXD IOMUX_PAD(0x3c4, 0x1cc, 0x12, 0x460, 1, NO_PAD_CTRL)
#define MX25_PAD_FEC_MDIO__GPIO_3_6 IOMUX_PAD(0x3c4, 0x1cc, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_FEC_TDATA0__FEC_TDATA0 IOMUX_PAD(0x3c8, 0x1d0, 0x10, 0, 0, NO_PAD_CTL)
+#define MX25_PAD_FEC_TDATA0__FEC_TDATA0 IOMUX_PAD(0x3c8, 0x1d0, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_FEC_TDATA0__GPIO_3_7 IOMUX_PAD(0x3c8, 0x1d0, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_FEC_TDATA1__FEC_TDATA1 IOMUX_PAD(0x3cc, 0x1d4, 0x10, 0, 0, NO_PAD_CTL)
+#define MX25_PAD_FEC_TDATA1__FEC_TDATA1 IOMUX_PAD(0x3cc, 0x1d4, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_FEC_TDATA1__AUD4_TXFS IOMUX_PAD(0x3cc, 0x1d4, 0x12, 0x474, 1, NO_PAD_CTRL)
#define MX25_PAD_FEC_TDATA1__GPIO_3_8 IOMUX_PAD(0x3cc, 0x1d4, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_FEC_TX_EN__FEC_TX_EN IOMUX_PAD(0x3d0, 0x1d8, 0x10, 0, 0, NO_PAD_CTL)
+#define MX25_PAD_FEC_TX_EN__FEC_TX_EN IOMUX_PAD(0x3d0, 0x1d8, 0x10, 0, 0, NO_PAD_CTRL)
#define MX25_PAD_FEC_TX_EN__GPIO_3_9 IOMUX_PAD(0x3d0, 0x1d8, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_FEC_RDATA0__FEC_RDATA0 IOMUX_PAD(0x3d4, 0x1dc, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTL)
+#define MX25_PAD_FEC_RDATA0__FEC_RDATA0 IOMUX_PAD(0x3d4, 0x1dc, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTRL)
#define MX25_PAD_FEC_RDATA0__GPIO_3_10 IOMUX_PAD(0x3d4, 0x1dc, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_FEC_RDATA1__FEC_RDATA1 IOMUX_PAD(0x3d8, 0x1e0, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTL)
+#define MX25_PAD_FEC_RDATA1__FEC_RDATA1 IOMUX_PAD(0x3d8, 0x1e0, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTRL)
#define MX25_PAD_FEC_RDATA1__GPIO_3_11 IOMUX_PAD(0x3d8, 0x1e0, 0x15, 0, 0, NO_PAD_CTRL)
-#define MX25_PAD_FEC_RX_DV__FEC_RX_DV IOMUX_PAD(0x3dc, 0x1e4, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTL)
+#define MX25_PAD_FEC_RX_DV__FEC_RX_DV IOMUX_PAD(0x3dc, 0x1e4, 0x10, 0, 0, PAD_CTL_PUS_100K_DOWN | NO_PAD_CTRL)
#define MX25_PAD_FEC_RX_DV__CAN2_RX IOMUX_PAD(0x3dc, 0x1e4, 0x14, 0x484, 0, PAD_CTL_PUS_22K_UP)
#define MX25_PAD_FEC_RX_DV__GPIO_3_12 IOMUX_PAD(0x3dc, 0x1e4, 0x15, 0, 0, NO_PAD_CTRL)
diff --git a/arch/arm/plat-mxc/include/mach/mx25.h b/arch/arm/plat-mxc/include/mach/mx25.h
index 91e738144804..854e2dc58481 100644
--- a/arch/arm/plat-mxc/include/mach/mx25.h
+++ b/arch/arm/plat-mxc/include/mach/mx25.h
@@ -41,4 +41,8 @@
#define UART1_BASE_ADDR 0x43f90000
#define UART2_BASE_ADDR 0x43f94000
+#define MX25_FEC_BASE_ADDR 0x50038000
+
+#define MX25_INT_FEC 57
+
#endif /* __MACH_MX25_H__ */
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 89cafc937249..d9f8c844c385 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -36,10 +36,6 @@ static struct clk_functions *arch_clock;
* Standard clock functions defined in include/linux/clk.h
*-------------------------------------------------------------------------*/
-/* This functions is moved to arch/arm/common/clkdev.c. For OMAP4 since
- * clock framework is not up , it is defined here to avoid rework in
- * every driver. Also dummy prcm reset function is added */
-
int clk_enable(struct clk *clk)
{
unsigned long flags;
@@ -305,7 +301,6 @@ void clk_enable_init_clocks(void)
clk_enable(clkp);
}
}
-EXPORT_SYMBOL(clk_enable_init_clocks);
/*
* Low level helpers
@@ -334,7 +329,16 @@ void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
arch_clock->clk_init_cpufreq_table(table);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
-EXPORT_SYMBOL(clk_init_cpufreq_table);
+
+void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (arch_clock->clk_exit_cpufreq_table)
+ arch_clock->clk_exit_cpufreq_table(table);
+ spin_unlock_irqrestore(&clockfw_lock, flags);
+}
#endif
/*-------------------------------------------------------------------------*/
diff --git a/arch/arm/plat-omap/cpu-omap.c b/arch/arm/plat-omap/cpu-omap.c
index f8ddbdd8b076..6d3d33360056 100644
--- a/arch/arm/plat-omap/cpu-omap.c
+++ b/arch/arm/plat-omap/cpu-omap.c
@@ -134,6 +134,7 @@ static int __init omap_cpu_init(struct cpufreq_policy *policy)
static int omap_cpu_exit(struct cpufreq_policy *policy)
{
+ clk_exit_cpufreq_table(&freq_table);
clk_put(mpu_clk);
return 0;
}
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 09d82b3c66ce..728c64204184 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -1183,7 +1183,7 @@ void omap_dma_unlink_lch(int lch_head, int lch_queue)
}
if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
- (dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) {
+ (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
"before unlinking\n");
dump_stack();
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 04846811d0aa..d17620c50c28 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -192,6 +192,7 @@ struct gpio_bank {
u32 saved_risingdetect;
#endif
u32 level_mask;
+ u32 toggle_mask;
spinlock_t lock;
struct gpio_chip chip;
struct clk *dbck;
@@ -749,6 +750,44 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
}
#endif
+/*
+ * This only applies to chips that can't do both rising and falling edge
+ * detection at once. For all other chips, this function is a noop.
+ */
+static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
+{
+ void __iomem *reg = bank->base;
+ u32 l = 0;
+
+ switch (bank->method) {
+#ifdef CONFIG_ARCH_OMAP1
+ case METHOD_MPUIO:
+ reg += OMAP_MPUIO_GPIO_INT_EDGE;
+ break;
+#endif
+#ifdef CONFIG_ARCH_OMAP15XX
+ case METHOD_GPIO_1510:
+ reg += OMAP1510_GPIO_INT_CONTROL;
+ break;
+#endif
+#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
+ case METHOD_GPIO_7XX:
+ reg += OMAP7XX_GPIO_INT_CONTROL;
+ break;
+#endif
+ default:
+ return;
+ }
+
+ l = __raw_readl(reg);
+ if ((l >> gpio) & 1)
+ l &= ~(1 << gpio);
+ else
+ l |= 1 << gpio;
+
+ __raw_writel(l, reg);
+}
+
static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
{
void __iomem *reg = bank->base;
@@ -759,6 +798,8 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
case METHOD_MPUIO:
reg += OMAP_MPUIO_GPIO_INT_EDGE;
l = __raw_readl(reg);
+ if (trigger & IRQ_TYPE_EDGE_BOTH)
+ bank->toggle_mask |= 1 << gpio;
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 1 << gpio;
else if (trigger & IRQ_TYPE_EDGE_FALLING)
@@ -771,6 +812,8 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
case METHOD_GPIO_1510:
reg += OMAP1510_GPIO_INT_CONTROL;
l = __raw_readl(reg);
+ if (trigger & IRQ_TYPE_EDGE_BOTH)
+ bank->toggle_mask |= 1 << gpio;
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 1 << gpio;
else if (trigger & IRQ_TYPE_EDGE_FALLING)
@@ -803,6 +846,8 @@ static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
case METHOD_GPIO_7XX:
reg += OMAP7XX_GPIO_INT_CONTROL;
l = __raw_readl(reg);
+ if (trigger & IRQ_TYPE_EDGE_BOTH)
+ bank->toggle_mask |= 1 << gpio;
if (trigger & IRQ_TYPE_EDGE_RISING)
l |= 1 << gpio;
else if (trigger & IRQ_TYPE_EDGE_FALLING)
@@ -1072,7 +1117,7 @@ static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int ena
*/
static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
{
- unsigned long flags;
+ unsigned long uninitialized_var(flags);
switch (bank->method) {
#ifdef CONFIG_ARCH_OMAP16XX
@@ -1217,7 +1262,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
{
void __iomem *isr_reg = NULL;
u32 isr;
- unsigned int gpio_irq;
+ unsigned int gpio_irq, gpio_index;
struct gpio_bank *bank;
u32 retrigger = 0;
int unmasked = 0;
@@ -1284,9 +1329,23 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
gpio_irq = bank->virtual_irq_start;
for (; isr != 0; isr >>= 1, gpio_irq++) {
+ gpio_index = get_gpio_index(irq_to_gpio(gpio_irq));
+
if (!(isr & 1))
continue;
+#ifdef CONFIG_ARCH_OMAP1
+ /*
+ * Some chips can't respond to both rising and falling
+ * at the same time. If this irq was requested with
+ * both flags, we need to flip the ICR data for the IRQ
+ * to respond to the IRQ for the opposite direction.
+ * This will be indicated in the bank toggle_mask.
+ */
+ if (bank->toggle_mask & (1 << gpio_index))
+ _toggle_gpio_edge_triggering(bank, gpio_index);
+#endif
+
generic_handle_irq(gpio_irq);
}
}
diff --git a/arch/arm/plat-omap/include/plat/board.h b/arch/arm/plat-omap/include/plat/board.h
index 376ce18216ff..5cd622039da0 100644
--- a/arch/arm/plat-omap/include/plat/board.h
+++ b/arch/arm/plat-omap/include/plat/board.h
@@ -99,7 +99,6 @@ struct fb_info;
struct omap_backlight_config {
int default_intensity;
int (*set_power)(struct device *dev, int state);
- int (*check_fb)(struct fb_info *fb);
};
struct omap_fbmem_config {
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 309b6d1dccdb..94fe2a0ce40a 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -119,6 +119,7 @@ struct clk_functions {
void (*clk_disable_unused)(struct clk *clk);
#ifdef CONFIG_CPU_FREQ
void (*clk_init_cpufreq_table)(struct cpufreq_frequency_table **);
+ void (*clk_exit_cpufreq_table)(struct cpufreq_frequency_table **);
#endif
};
@@ -135,6 +136,7 @@ extern unsigned long followparent_recalc(struct clk *clk);
extern void clk_enable_init_clocks(void);
#ifdef CONFIG_CPU_FREQ
extern void clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
+extern void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
#endif
extern const struct clkops clkops_null;
diff --git a/arch/arm/plat-omap/include/plat/control.h b/arch/arm/plat-omap/include/plat/control.h
index 2ae884378638..a745d62fad0d 100644
--- a/arch/arm/plat-omap/include/plat/control.h
+++ b/arch/arm/plat-omap/include/plat/control.h
@@ -147,7 +147,7 @@
#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
#define OMAP343X_CONTROL_DEBOBS(i) (OMAP2_CONTROL_GENERAL + 0x01B0 \
- + ((i) >> 1) * 4 + (!(i) & 1) * 2)
+ + ((i) >> 1) * 4 + (!((i) & 1)) * 2)
#define OMAP343X_CONTROL_PROG_IO0 (OMAP2_CONTROL_GENERAL + 0x01D4)
#define OMAP343X_CONTROL_PROG_IO1 (OMAP2_CONTROL_GENERAL + 0x01D8)
#define OMAP343X_CONTROL_DSS_DPLL_SPREADING (OMAP2_CONTROL_GENERAL + 0x01E0)
diff --git a/arch/arm/plat-omap/include/plat/flash.h b/arch/arm/plat-omap/include/plat/flash.h
new file mode 100644
index 000000000000..3e6327016b40
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/flash.h
@@ -0,0 +1,16 @@
+/*
+ * Flash support for OMAP1
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __OMAP_FLASH_H
+#define __OMAP_FLASH_H
+
+#include <linux/mtd/map.h>
+
+extern void omap1_set_vpp(struct map_info *map, int enable);
+
+#endif
diff --git a/arch/arm/plat-omap/include/plat/io.h b/arch/arm/plat-omap/include/plat/io.h
index 7e5319f907d1..a3e7b471bcba 100644
--- a/arch/arm/plat-omap/include/plat/io.h
+++ b/arch/arm/plat-omap/include/plat/io.h
@@ -122,16 +122,21 @@
#define OMAP243X_SMS_VIRT (OMAP243X_SMS_PHYS + OMAP2_L3_IO_OFFSET)
#define OMAP243X_SMS_SIZE SZ_1M
-/* DSP */
-#define DSP_MEM_24XX_PHYS OMAP2420_DSP_MEM_BASE /* 0x58000000 */
-#define DSP_MEM_24XX_VIRT 0xe0000000
-#define DSP_MEM_24XX_SIZE 0x28000
-#define DSP_IPI_24XX_PHYS OMAP2420_DSP_IPI_BASE /* 0x59000000 */
-#define DSP_IPI_24XX_VIRT 0xe1000000
-#define DSP_IPI_24XX_SIZE SZ_4K
-#define DSP_MMU_24XX_PHYS OMAP2420_DSP_MMU_BASE /* 0x5a000000 */
-#define DSP_MMU_24XX_VIRT 0xe2000000
-#define DSP_MMU_24XX_SIZE SZ_4K
+/* 2420 IVA */
+#define DSP_MEM_2420_PHYS OMAP2420_DSP_MEM_BASE
+ /* 0x58000000 --> 0xfc100000 */
+#define DSP_MEM_2420_VIRT 0xfc100000
+#define DSP_MEM_2420_SIZE 0x28000
+#define DSP_IPI_2420_PHYS OMAP2420_DSP_IPI_BASE
+ /* 0x59000000 --> 0xfc128000 */
+#define DSP_IPI_2420_VIRT 0xfc128000
+#define DSP_IPI_2420_SIZE SZ_4K
+#define DSP_MMU_2420_PHYS OMAP2420_DSP_MMU_BASE
+ /* 0x5a000000 --> 0xfc129000 */
+#define DSP_MMU_2420_VIRT 0xfc129000
+#define DSP_MMU_2420_SIZE SZ_4K
+
+/* 2430 IVA2.1 - currently unmapped */
/*
* ----------------------------------------------------------------------------
@@ -182,16 +187,7 @@
#define OMAP343X_SDRC_VIRT (OMAP343X_SDRC_PHYS + OMAP2_L3_IO_OFFSET)
#define OMAP343X_SDRC_SIZE SZ_1M
-/* DSP */
-#define DSP_MEM_34XX_PHYS OMAP34XX_DSP_MEM_BASE /* 0x58000000 */
-#define DSP_MEM_34XX_VIRT 0xe0000000
-#define DSP_MEM_34XX_SIZE 0x28000
-#define DSP_IPI_34XX_PHYS OMAP34XX_DSP_IPI_BASE /* 0x59000000 */
-#define DSP_IPI_34XX_VIRT 0xe1000000
-#define DSP_IPI_34XX_SIZE SZ_4K
-#define DSP_MMU_34XX_PHYS OMAP34XX_DSP_MMU_BASE /* 0x5a000000 */
-#define DSP_MMU_34XX_VIRT 0xe2000000
-#define DSP_MMU_34XX_SIZE SZ_4K
+/* 3430 IVA - currently unmapped */
/*
* ----------------------------------------------------------------------------
diff --git a/arch/arm/plat-omap/include/plat/mux.h b/arch/arm/plat-omap/include/plat/mux.h
index 8f069cc80350..692c90e89ac3 100644
--- a/arch/arm/plat-omap/include/plat/mux.h
+++ b/arch/arm/plat-omap/include/plat/mux.h
@@ -183,6 +183,14 @@ enum omap7xx_index {
/* I2C */
I2C_7XX_SCL,
I2C_7XX_SDA,
+
+ /* SPI */
+ SPI_7XX_1,
+ SPI_7XX_2,
+ SPI_7XX_3,
+ SPI_7XX_4,
+ SPI_7XX_5,
+ SPI_7XX_6,
};
enum omap1xxx_index {
diff --git a/arch/arm/plat-omap/include/plat/omap7xx.h b/arch/arm/plat-omap/include/plat/omap7xx.h
index 53f52414b0e9..48e4757e1e30 100644
--- a/arch/arm/plat-omap/include/plat/omap7xx.h
+++ b/arch/arm/plat-omap/include/plat/omap7xx.h
@@ -46,6 +46,9 @@
#define OMAP7XX_DSPREG_SIZE SZ_128K
#define OMAP7XX_DSPREG_START 0xE1000000
+#define OMAP7XX_SPI1_BASE 0xfffc0800
+#define OMAP7XX_SPI2_BASE 0xfffc1000
+
/*
* ----------------------------------------------------------------------------
* OMAP7XX specific configuration registers
diff --git a/arch/arm/plat-omap/io.c b/arch/arm/plat-omap/io.c
index 11f5d7961c73..4cbd4fb3232c 100644
--- a/arch/arm/plat-omap/io.c
+++ b/arch/arm/plat-omap/io.c
@@ -66,12 +66,12 @@ void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
return XLATE(p, L4_24XX_PHYS, L4_24XX_VIRT);
}
if (cpu_is_omap2420()) {
- if (BETWEEN(p, DSP_MEM_24XX_PHYS, DSP_MEM_24XX_SIZE))
- return XLATE(p, DSP_MEM_24XX_PHYS, DSP_MEM_24XX_VIRT);
- if (BETWEEN(p, DSP_IPI_24XX_PHYS, DSP_IPI_24XX_SIZE))
- return XLATE(p, DSP_IPI_24XX_PHYS, DSP_IPI_24XX_SIZE);
- if (BETWEEN(p, DSP_MMU_24XX_PHYS, DSP_MMU_24XX_SIZE))
- return XLATE(p, DSP_MMU_24XX_PHYS, DSP_MMU_24XX_VIRT);
+ if (BETWEEN(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_SIZE))
+ return XLATE(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_VIRT);
+ if (BETWEEN(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE))
+ return XLATE(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE);
+ if (BETWEEN(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_SIZE))
+ return XLATE(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_VIRT);
}
if (cpu_is_omap2430()) {
if (BETWEEN(p, L4_WK_243X_PHYS, L4_WK_243X_SIZE))
@@ -128,7 +128,7 @@ void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT);
}
#endif
- return __arm_ioremap(p, size, type);
+ return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
}
EXPORT_SYMBOL(omap_ioremap);
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index c0ff1e39d893..463d6386aff2 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -827,7 +827,7 @@ EXPORT_SYMBOL_GPL(iommu_get);
**/
void iommu_put(struct iommu *obj)
{
- if (!obj && IS_ERR(obj))
+ if (!obj || IS_ERR(obj))
return;
mutex_lock(&obj->iommu_lock);
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index 2cc1cc328bac..f75767278fc3 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -436,7 +436,7 @@ int omap_mcbsp_request(unsigned int id)
dev_err(mcbsp->dev, "Unable to request TX IRQ %d "
"for McBSP%d\n", mcbsp->tx_irq,
mcbsp->id);
- return err;
+ goto error;
}
init_completion(&mcbsp->rx_irq_completion);
@@ -446,12 +446,26 @@ int omap_mcbsp_request(unsigned int id)
dev_err(mcbsp->dev, "Unable to request RX IRQ %d "
"for McBSP%d\n", mcbsp->rx_irq,
mcbsp->id);
- free_irq(mcbsp->tx_irq, (void *)mcbsp);
- return err;
+ goto tx_irq;
}
}
return 0;
+tx_irq:
+ free_irq(mcbsp->tx_irq, (void *)mcbsp);
+error:
+ if (mcbsp->pdata && mcbsp->pdata->ops && mcbsp->pdata->ops->free)
+ mcbsp->pdata->ops->free(id);
+
+ /* Do procedure specific to omap34xx arch, if applicable */
+ omap34xx_mcbsp_free(mcbsp);
+
+ clk_disable(mcbsp->fclk);
+ clk_disable(mcbsp->iclk);
+
+ mcbsp->free = 1;
+
+ return err;
}
EXPORT_SYMBOL(omap_mcbsp_request);
diff --git a/arch/arm/plat-pxa/pwm.c b/arch/arm/plat-pxa/pwm.c
index a9eabdcfa163..51dc5c8106c0 100644
--- a/arch/arm/plat-pxa/pwm.c
+++ b/arch/arm/plat-pxa/pwm.c
@@ -204,14 +204,14 @@ static int __devinit pwm_probe(struct platform_device *pdev)
goto err_free_clk;
}
- r = request_mem_region(r->start, r->end - r->start + 1, pdev->name);
+ r = request_mem_region(r->start, resource_size(r), pdev->name);
if (r == NULL) {
dev_err(&pdev->dev, "failed to request memory resource\n");
ret = -EBUSY;
goto err_free_clk;
}
- pwm->mmio_base = ioremap(r->start, r->end - r->start + 1);
+ pwm->mmio_base = ioremap(r->start, resource_size(r));
if (pwm->mmio_base == NULL) {
dev_err(&pdev->dev, "failed to ioremap() registers\n");
ret = -ENODEV;
@@ -241,7 +241,7 @@ static int __devinit pwm_probe(struct platform_device *pdev)
return 0;
err_free_mem:
- release_mem_region(r->start, r->end - r->start + 1);
+ release_mem_region(r->start, resource_size(r));
err_free_clk:
clk_put(pwm->clk);
err_free:
@@ -271,7 +271,7 @@ static int __devexit pwm_remove(struct platform_device *pdev)
iounmap(pwm->mmio_base);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, r->end - r->start + 1);
+ release_mem_region(r->start, resource_size(r));
clk_put(pwm->clk);
kfree(pwm);
diff --git a/arch/arm/plat-s3c/Kconfig b/arch/arm/plat-s3c/Kconfig
index 9e9d0286e48f..454cc39b7adc 100644
--- a/arch/arm/plat-s3c/Kconfig
+++ b/arch/arm/plat-s3c/Kconfig
@@ -130,39 +130,6 @@ config S3C_GPIO_TRACK
Internal configuration option to enable the s3c specific gpio
chip tracking if the platform requires it.
-config S3C_GPIO_PULL_UPDOWN
- bool
- help
- Internal configuration to enable the correct GPIO pull helper
-
-config S3C_GPIO_PULL_DOWN
- bool
- help
- Internal configuration to enable the correct GPIO pull helper
-
-config S3C_GPIO_PULL_UP
- bool
- help
- Internal configuration to enable the correct GPIO pull helper
-
-config S3C_GPIO_CFG_S3C24XX
- bool
- help
- Internal configuration to enable S3C24XX style GPIO configuration
- functions.
-
-config S3C_GPIO_CFG_S3C64XX
- bool
- help
- Internal configuration to enable S3C64XX style GPIO configuration
- functions.
-
-config S5P_GPIO_CFG_S5PC1XX
- bool
- help
- Internal configuration to enable S5PC1XX style GPIO configuration
- functions.
-
# DMA
config S3C_DMA
@@ -170,46 +137,4 @@ config S3C_DMA
help
Internal configuration for S3C DMA core
-# device definitions to compile in
-
-config S3C_DEV_HSMMC
- bool
- help
- Compile in platform device definitions for HSMMC code
-
-config S3C_DEV_HSMMC1
- bool
- help
- Compile in platform device definitions for HSMMC channel 1
-
-config S3C_DEV_HSMMC2
- bool
- help
- Compile in platform device definitions for HSMMC channel 2
-
-config S3C_DEV_I2C1
- bool
- help
- Compile in platform device definitions for I2C channel 1
-
-config S3C_DEV_FB
- bool
- help
- Compile in platform device definition for framebuffer
-
-config S3C_DEV_USB_HOST
- bool
- help
- Compile in platform device definition for USB host.
-
-config S3C_DEV_USB_HSOTG
- bool
- help
- Compile in platform device definition for USB high-speed OtG
-
-config S3C_DEV_NAND
- bool
- help
- Compile in platform device definition for NAND controller
-
endif
diff --git a/arch/arm/plat-s3c/Makefile b/arch/arm/plat-s3c/Makefile
index 50444da98425..ea4a001f6793 100644
--- a/arch/arm/plat-s3c/Makefile
+++ b/arch/arm/plat-s3c/Makefile
@@ -11,12 +11,9 @@ obj- :=
# Core support for all Samsung SoCs
-obj-y += init.o
+obj-y += init.o
obj-y += time.o
-obj-y += clock.o
-obj-y += pwm-clock.o
obj-y += gpio.o
-obj-y += gpio-config.o
# DMA support
@@ -31,15 +28,3 @@ obj-$(CONFIG_S3C2410_PM_CHECK) += pm-check.o
# PWM support
obj-$(CONFIG_HAVE_PWM) += pwm.o
-
-# devices
-
-obj-$(CONFIG_S3C_DEV_HSMMC) += dev-hsmmc.o
-obj-$(CONFIG_S3C_DEV_HSMMC1) += dev-hsmmc1.o
-obj-$(CONFIG_S3C_DEV_HSMMC2) += dev-hsmmc2.o
-obj-y += dev-i2c0.o
-obj-$(CONFIG_S3C_DEV_I2C1) += dev-i2c1.o
-obj-$(CONFIG_S3C_DEV_FB) += dev-fb.o
-obj-$(CONFIG_S3C_DEV_USB_HOST) += dev-usb.o
-obj-$(CONFIG_S3C_DEV_USB_HSOTG) += dev-usb-hsotg.o
-obj-$(CONFIG_S3C_DEV_NAND) += dev-nand.o
diff --git a/arch/arm/plat-s3c/include/plat/nand.h b/arch/arm/plat-s3c/include/plat/nand.h
index 226147b7e026..b64115fa93a4 100644
--- a/arch/arm/plat-s3c/include/plat/nand.h
+++ b/arch/arm/plat-s3c/include/plat/nand.h
@@ -3,7 +3,7 @@
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
- * S3C2410 - NAND device controller platfrom_device info
+ * S3C2410 - NAND device controller platform_device info
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/arch/arm/plat-s3c24xx/clock-dclk.c b/arch/arm/plat-s3c24xx/clock-dclk.c
index ac061a1bcb37..cf97caafe56b 100644
--- a/arch/arm/plat-s3c24xx/clock-dclk.c
+++ b/arch/arm/plat-s3c24xx/clock-dclk.c
@@ -161,14 +161,18 @@ static int s3c24xx_clkout_setparent(struct clk *clk, struct clk *parent)
/* external clock definitions */
+static struct clk_ops dclk_ops = {
+ .set_parent = s3c24xx_dclk_setparent,
+ .set_rate = s3c24xx_set_dclk_rate,
+ .round_rate = s3c24xx_round_dclk_rate,
+};
+
struct clk s3c24xx_dclk0 = {
.name = "dclk0",
.id = -1,
.ctrlbit = S3C2410_DCLKCON_DCLK0EN,
.enable = s3c24xx_dclk_enable,
- .set_parent = s3c24xx_dclk_setparent,
- .set_rate = s3c24xx_set_dclk_rate,
- .round_rate = s3c24xx_round_dclk_rate,
+ .ops = &dclk_ops,
};
struct clk s3c24xx_dclk1 = {
@@ -176,19 +180,21 @@ struct clk s3c24xx_dclk1 = {
.id = -1,
.ctrlbit = S3C2410_DCLKCON_DCLK1EN,
.enable = s3c24xx_dclk_enable,
- .set_parent = s3c24xx_dclk_setparent,
- .set_rate = s3c24xx_set_dclk_rate,
- .round_rate = s3c24xx_round_dclk_rate,
+ .ops = &dclk_ops,
+};
+
+static struct clk_ops clkout_ops = {
+ .set_parent = s3c24xx_clkout_setparent,
};
struct clk s3c24xx_clkout0 = {
.name = "clkout0",
.id = -1,
- .set_parent = s3c24xx_clkout_setparent,
+ .ops = &clkout_ops,
};
struct clk s3c24xx_clkout1 = {
.name = "clkout1",
.id = -1,
- .set_parent = s3c24xx_clkout_setparent,
+ .ops = &clkout_ops,
};
diff --git a/arch/arm/plat-s3c24xx/s3c244x-clock.c b/arch/arm/plat-s3c24xx/s3c244x-clock.c
index 79371091aa38..f8d96130d1d1 100644
--- a/arch/arm/plat-s3c24xx/s3c244x-clock.c
+++ b/arch/arm/plat-s3c24xx/s3c244x-clock.c
@@ -68,7 +68,9 @@ static int s3c2440_setparent_armclk(struct clk *clk, struct clk *parent)
static struct clk clk_arm = {
.name = "armclk",
.id = -1,
- .set_parent = s3c2440_setparent_armclk,
+ .ops = &(struct clk_ops) {
+ .set_parent = s3c2440_setparent_armclk,
+ },
};
static int s3c244x_clk_add(struct sys_device *sysdev)
diff --git a/arch/arm/plat-s3c64xx/Kconfig b/arch/arm/plat-s3c64xx/Kconfig
index e6da87a5885c..0fba1f956b8a 100644
--- a/arch/arm/plat-s3c64xx/Kconfig
+++ b/arch/arm/plat-s3c64xx/Kconfig
@@ -13,6 +13,9 @@ config PLAT_S3C64XX
select ARM_VIC
select NO_IOPORT
select ARCH_REQUIRE_GPIOLIB
+ select SAMSUNG_CLKSRC
+ select SAMSUNG_IRQ_VIC_TIMER
+ select SAMSUNG_IRQ_UART
select S3C_GPIO_TRACK
select S3C_GPIO_PULL_UPDOWN
select S3C_GPIO_CFG_S3C24XX
diff --git a/arch/arm/plat-s3c64xx/clock.c b/arch/arm/plat-s3c64xx/clock.c
index 7a36e899360d..ae5883c00e7a 100644
--- a/arch/arm/plat-s3c64xx/clock.c
+++ b/arch/arm/plat-s3c64xx/clock.c
@@ -274,15 +274,7 @@ void __init s3c64xx_register_clocks(void)
int ptr;
s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
-
- clkp = init_clocks;
- for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) {
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
- }
+ s3c_register_clocks(init_clocks, ARRAY_SIZE(init_clocks));
clkp = init_clocks_disable;
for (ptr = 0; ptr < ARRAY_SIZE(init_clocks_disable); ptr++, clkp++) {
diff --git a/arch/arm/plat-s3c64xx/cpu.c b/arch/arm/plat-s3c64xx/cpu.c
index 49796d2db86d..c0e6f2a45154 100644
--- a/arch/arm/plat-s3c64xx/cpu.c
+++ b/arch/arm/plat-s3c64xx/cpu.c
@@ -78,12 +78,12 @@ static struct map_desc s3c_iodesc[] __initdata = {
.length = SZ_4K,
.type = MT_DEVICE,
}, {
- .virtual = (unsigned long)S3C_VA_VIC0,
+ .virtual = (unsigned long)VA_VIC0,
.pfn = __phys_to_pfn(S3C64XX_PA_VIC0),
.length = SZ_16K,
.type = MT_DEVICE,
}, {
- .virtual = (unsigned long)S3C_VA_VIC1,
+ .virtual = (unsigned long)VA_VIC1,
.pfn = __phys_to_pfn(S3C64XX_PA_VIC1),
.length = SZ_16K,
.type = MT_DEVICE,
diff --git a/arch/arm/plat-s3c64xx/include/plat/irqs.h b/arch/arm/plat-s3c64xx/include/plat/irqs.h
index 7956fd3bb194..176fe15a61d6 100644
--- a/arch/arm/plat-s3c64xx/include/plat/irqs.h
+++ b/arch/arm/plat-s3c64xx/include/plat/irqs.h
@@ -24,8 +24,8 @@
#define S3C_IRQ(x) ((x) + S3C_IRQ_OFFSET)
-#define S3C_VIC0_BASE S3C_IRQ(0)
-#define S3C_VIC1_BASE S3C_IRQ(32)
+#define IRQ_VIC0_BASE S3C_IRQ(0)
+#define IRQ_VIC1_BASE S3C_IRQ(32)
/* UART interrupts, each UART has 4 intterupts per channel so
* use the space between the ISA and S3C main interrupts. Note, these
@@ -59,8 +59,8 @@
/* VIC based IRQs */
-#define S3C64XX_IRQ_VIC0(x) (S3C_VIC0_BASE + (x))
-#define S3C64XX_IRQ_VIC1(x) (S3C_VIC1_BASE + (x))
+#define S3C64XX_IRQ_VIC0(x) (IRQ_VIC0_BASE + (x))
+#define S3C64XX_IRQ_VIC1(x) (IRQ_VIC1_BASE + (x))
/* VIC0 */
diff --git a/arch/arm/plat-s3c64xx/include/plat/regs-clock.h b/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
index ff46e7fa957a..3ef62741e5d1 100644
--- a/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
+++ b/arch/arm/plat-s3c64xx/include/plat/regs-clock.h
@@ -35,14 +35,6 @@
#define S3C_MEM0_GATE S3C_CLKREG(0x3C)
/* CLKDIV0 */
-#define S3C6400_CLKDIV0_MFC_MASK (0xf << 28)
-#define S3C6400_CLKDIV0_MFC_SHIFT (28)
-#define S3C6400_CLKDIV0_JPEG_MASK (0xf << 24)
-#define S3C6400_CLKDIV0_JPEG_SHIFT (24)
-#define S3C6400_CLKDIV0_CAM_MASK (0xf << 20)
-#define S3C6400_CLKDIV0_CAM_SHIFT (20)
-#define S3C6400_CLKDIV0_SECURITY_MASK (0x3 << 18)
-#define S3C6400_CLKDIV0_SECURITY_SHIFT (18)
#define S3C6400_CLKDIV0_PCLK_MASK (0xf << 12)
#define S3C6400_CLKDIV0_PCLK_SHIFT (12)
#define S3C6400_CLKDIV0_HCLK2_MASK (0x7 << 9)
@@ -51,42 +43,11 @@
#define S3C6400_CLKDIV0_HCLK_SHIFT (8)
#define S3C6400_CLKDIV0_MPLL_MASK (0x1 << 4)
#define S3C6400_CLKDIV0_MPLL_SHIFT (4)
+
#define S3C6400_CLKDIV0_ARM_MASK (0x7 << 0)
#define S3C6410_CLKDIV0_ARM_MASK (0xf << 0)
#define S3C6400_CLKDIV0_ARM_SHIFT (0)
-/* CLKDIV1 */
-#define S3C6410_CLKDIV1_FIMC_MASK (0xf << 24)
-#define S3C6410_CLKDIV1_FIMC_SHIFT (24)
-#define S3C6400_CLKDIV1_UHOST_MASK (0xf << 20)
-#define S3C6400_CLKDIV1_UHOST_SHIFT (20)
-#define S3C6400_CLKDIV1_SCALER_MASK (0xf << 16)
-#define S3C6400_CLKDIV1_SCALER_SHIFT (16)
-#define S3C6400_CLKDIV1_LCD_MASK (0xf << 12)
-#define S3C6400_CLKDIV1_LCD_SHIFT (12)
-#define S3C6400_CLKDIV1_MMC2_MASK (0xf << 8)
-#define S3C6400_CLKDIV1_MMC2_SHIFT (8)
-#define S3C6400_CLKDIV1_MMC1_MASK (0xf << 4)
-#define S3C6400_CLKDIV1_MMC1_SHIFT (4)
-#define S3C6400_CLKDIV1_MMC0_MASK (0xf << 0)
-#define S3C6400_CLKDIV1_MMC0_SHIFT (0)
-
-/* CLKDIV2 */
-#define S3C6410_CLKDIV2_AUDIO2_MASK (0xf << 24)
-#define S3C6410_CLKDIV2_AUDIO2_SHIFT (24)
-#define S3C6400_CLKDIV2_IRDA_MASK (0xf << 20)
-#define S3C6400_CLKDIV2_IRDA_SHIFT (20)
-#define S3C6400_CLKDIV2_UART_MASK (0xf << 16)
-#define S3C6400_CLKDIV2_UART_SHIFT (16)
-#define S3C6400_CLKDIV2_AUDIO1_MASK (0xf << 12)
-#define S3C6400_CLKDIV2_AUDIO1_SHIFT (12)
-#define S3C6400_CLKDIV2_AUDIO0_MASK (0xf << 8)
-#define S3C6400_CLKDIV2_AUDIO0_SHIFT (8)
-#define S3C6400_CLKDIV2_SPI1_MASK (0xf << 4)
-#define S3C6400_CLKDIV2_SPI1_SHIFT (4)
-#define S3C6400_CLKDIV2_SPI0_MASK (0xf << 0)
-#define S3C6400_CLKDIV2_SPI0_SHIFT (0)
-
/* HCLK GATE Registers */
#define S3C_CLKCON_HCLK_3DSE (1<<31)
#define S3C_CLKCON_HCLK_UHOST (1<<29)
@@ -192,34 +153,4 @@
#define S3C6400_CLKSRC_EPLL_MOUT_SHIFT (2)
#define S3C6400_CLKSRC_MFC (1 << 4)
-#define S3C6410_CLKSRC_TV27_MASK (0x1 << 31)
-#define S3C6410_CLKSRC_TV27_SHIFT (31)
-#define S3C6410_CLKSRC_DAC27_MASK (0x1 << 30)
-#define S3C6410_CLKSRC_DAC27_SHIFT (30)
-#define S3C6400_CLKSRC_SCALER_MASK (0x3 << 28)
-#define S3C6400_CLKSRC_SCALER_SHIFT (28)
-#define S3C6400_CLKSRC_LCD_MASK (0x3 << 26)
-#define S3C6400_CLKSRC_LCD_SHIFT (26)
-#define S3C6400_CLKSRC_IRDA_MASK (0x3 << 24)
-#define S3C6400_CLKSRC_IRDA_SHIFT (24)
-#define S3C6400_CLKSRC_MMC2_MASK (0x3 << 22)
-#define S3C6400_CLKSRC_MMC2_SHIFT (22)
-#define S3C6400_CLKSRC_MMC1_MASK (0x3 << 20)
-#define S3C6400_CLKSRC_MMC1_SHIFT (20)
-#define S3C6400_CLKSRC_MMC0_MASK (0x3 << 18)
-#define S3C6400_CLKSRC_MMC0_SHIFT (18)
-#define S3C6400_CLKSRC_SPI1_MASK (0x3 << 16)
-#define S3C6400_CLKSRC_SPI1_SHIFT (16)
-#define S3C6400_CLKSRC_SPI0_MASK (0x3 << 14)
-#define S3C6400_CLKSRC_SPI0_SHIFT (14)
-#define S3C6400_CLKSRC_UART_MASK (0x1 << 13)
-#define S3C6400_CLKSRC_UART_SHIFT (13)
-#define S3C6400_CLKSRC_AUDIO1_MASK (0x7 << 10)
-#define S3C6400_CLKSRC_AUDIO1_SHIFT (10)
-#define S3C6400_CLKSRC_AUDIO0_MASK (0x7 << 7)
-#define S3C6400_CLKSRC_AUDIO0_SHIFT (7)
-#define S3C6400_CLKSRC_UHOST_MASK (0x3 << 5)
-#define S3C6400_CLKSRC_UHOST_SHIFT (5)
-
-
#endif /* _PLAT_REGS_CLOCK_H */
diff --git a/arch/arm/plat-s3c64xx/irq.c b/arch/arm/plat-s3c64xx/irq.c
index 8dc5b6da9789..67a145d440f3 100644
--- a/arch/arm/plat-s3c64xx/irq.c
+++ b/arch/arm/plat-s3c64xx/irq.c
@@ -21,88 +21,11 @@
#include <asm/hardware/vic.h>
#include <mach/map.h>
-#include <plat/regs-serial.h>
-#include <plat/regs-timer.h>
+#include <plat/irq-vic-timer.h>
+#include <plat/irq-uart.h>
#include <plat/cpu.h>
-/* Timer interrupt handling */
-
-static void s3c_irq_demux_timer(unsigned int base_irq, unsigned int sub_irq)
-{
- generic_handle_irq(sub_irq);
-}
-
-static void s3c_irq_demux_timer0(unsigned int irq, struct irq_desc *desc)
-{
- s3c_irq_demux_timer(irq, IRQ_TIMER0);
-}
-
-static void s3c_irq_demux_timer1(unsigned int irq, struct irq_desc *desc)
-{
- s3c_irq_demux_timer(irq, IRQ_TIMER1);
-}
-
-static void s3c_irq_demux_timer2(unsigned int irq, struct irq_desc *desc)
-{
- s3c_irq_demux_timer(irq, IRQ_TIMER2);
-}
-
-static void s3c_irq_demux_timer3(unsigned int irq, struct irq_desc *desc)
-{
- s3c_irq_demux_timer(irq, IRQ_TIMER3);
-}
-
-static void s3c_irq_demux_timer4(unsigned int irq, struct irq_desc *desc)
-{
- s3c_irq_demux_timer(irq, IRQ_TIMER4);
-}
-
-/* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */
-
-static void s3c_irq_timer_mask(unsigned int irq)
-{
- u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
-
- reg &= 0x1f; /* mask out pending interrupts */
- reg &= ~(1 << (irq - IRQ_TIMER0));
- __raw_writel(reg, S3C64XX_TINT_CSTAT);
-}
-
-static void s3c_irq_timer_unmask(unsigned int irq)
-{
- u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
-
- reg &= 0x1f; /* mask out pending interrupts */
- reg |= 1 << (irq - IRQ_TIMER0);
- __raw_writel(reg, S3C64XX_TINT_CSTAT);
-}
-
-static void s3c_irq_timer_ack(unsigned int irq)
-{
- u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
-
- reg &= 0x1f;
- reg |= (1 << 5) << (irq - IRQ_TIMER0);
- __raw_writel(reg, S3C64XX_TINT_CSTAT);
-}
-
-static struct irq_chip s3c_irq_timer = {
- .name = "s3c-timer",
- .mask = s3c_irq_timer_mask,
- .unmask = s3c_irq_timer_unmask,
- .ack = s3c_irq_timer_ack,
-};
-
-struct uart_irq {
- void __iomem *regs;
- unsigned int base_irq;
- unsigned int parent_irq;
-};
-
-/* Note, we make use of the fact that the parent IRQs, IRQ_UART[0..3]
- * are consecutive when looking up the interrupt in the demux routines.
- */
-static struct uart_irq uart_irqs[] = {
+static struct s3c_uart_irq uart_irqs[] = {
[0] = {
.regs = S3C_VA_UART0,
.base_irq = IRQ_S3CUART_BASE0,
@@ -125,132 +48,22 @@ static struct uart_irq uart_irqs[] = {
},
};
-static inline void __iomem *s3c_irq_uart_base(unsigned int irq)
-{
- struct uart_irq *uirq = get_irq_chip_data(irq);
- return uirq->regs;
-}
-
-static inline unsigned int s3c_irq_uart_bit(unsigned int irq)
-{
- return irq & 3;
-}
-
-/* UART interrupt registers, not worth adding to seperate include header */
-
-static void s3c_irq_uart_mask(unsigned int irq)
-{
- void __iomem *regs = s3c_irq_uart_base(irq);
- unsigned int bit = s3c_irq_uart_bit(irq);
- u32 reg;
-
- reg = __raw_readl(regs + S3C64XX_UINTM);
- reg |= (1 << bit);
- __raw_writel(reg, regs + S3C64XX_UINTM);
-}
-
-static void s3c_irq_uart_maskack(unsigned int irq)
-{
- void __iomem *regs = s3c_irq_uart_base(irq);
- unsigned int bit = s3c_irq_uart_bit(irq);
- u32 reg;
-
- reg = __raw_readl(regs + S3C64XX_UINTM);
- reg |= (1 << bit);
- __raw_writel(reg, regs + S3C64XX_UINTM);
- __raw_writel(1 << bit, regs + S3C64XX_UINTP);
-}
-
-static void s3c_irq_uart_unmask(unsigned int irq)
-{
- void __iomem *regs = s3c_irq_uart_base(irq);
- unsigned int bit = s3c_irq_uart_bit(irq);
- u32 reg;
-
- reg = __raw_readl(regs + S3C64XX_UINTM);
- reg &= ~(1 << bit);
- __raw_writel(reg, regs + S3C64XX_UINTM);
-}
-
-static void s3c_irq_uart_ack(unsigned int irq)
-{
- void __iomem *regs = s3c_irq_uart_base(irq);
- unsigned int bit = s3c_irq_uart_bit(irq);
-
- __raw_writel(1 << bit, regs + S3C64XX_UINTP);
-}
-
-static void s3c_irq_demux_uart(unsigned int irq, struct irq_desc *desc)
-{
- struct uart_irq *uirq = &uart_irqs[irq - IRQ_UART0];
- u32 pend = __raw_readl(uirq->regs + S3C64XX_UINTP);
- int base = uirq->base_irq;
-
- if (pend & (1 << 0))
- generic_handle_irq(base);
- if (pend & (1 << 1))
- generic_handle_irq(base + 1);
- if (pend & (1 << 2))
- generic_handle_irq(base + 2);
- if (pend & (1 << 3))
- generic_handle_irq(base + 3);
-}
-
-static struct irq_chip s3c_irq_uart = {
- .name = "s3c-uart",
- .mask = s3c_irq_uart_mask,
- .unmask = s3c_irq_uart_unmask,
- .mask_ack = s3c_irq_uart_maskack,
- .ack = s3c_irq_uart_ack,
-};
-
-static void __init s3c64xx_uart_irq(struct uart_irq *uirq)
-{
- void __iomem *reg_base = uirq->regs;
- unsigned int irq;
- int offs;
-
- /* mask all interrupts at the start. */
- __raw_writel(0xf, reg_base + S3C64XX_UINTM);
-
- for (offs = 0; offs < 3; offs++) {
- irq = uirq->base_irq + offs;
-
- set_irq_chip(irq, &s3c_irq_uart);
- set_irq_chip_data(irq, uirq);
- set_irq_handler(irq, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID);
- }
-
- set_irq_chained_handler(uirq->parent_irq, s3c_irq_demux_uart);
-}
void __init s3c64xx_init_irq(u32 vic0_valid, u32 vic1_valid)
{
- int uart, irq;
-
printk(KERN_DEBUG "%s: initialising interrupts\n", __func__);
/* initialise the pair of VICs */
- vic_init(S3C_VA_VIC0, S3C_VIC0_BASE, vic0_valid, 0);
- vic_init(S3C_VA_VIC1, S3C_VIC1_BASE, vic1_valid, 0);
+ vic_init(VA_VIC0, IRQ_VIC0_BASE, vic0_valid, 0);
+ vic_init(VA_VIC1, IRQ_VIC1_BASE, vic1_valid, 0);
/* add the timer sub-irqs */
- set_irq_chained_handler(IRQ_TIMER0_VIC, s3c_irq_demux_timer0);
- set_irq_chained_handler(IRQ_TIMER1_VIC, s3c_irq_demux_timer1);
- set_irq_chained_handler(IRQ_TIMER2_VIC, s3c_irq_demux_timer2);
- set_irq_chained_handler(IRQ_TIMER3_VIC, s3c_irq_demux_timer3);
- set_irq_chained_handler(IRQ_TIMER4_VIC, s3c_irq_demux_timer4);
-
- for (irq = IRQ_TIMER0; irq <= IRQ_TIMER4; irq++) {
- set_irq_chip(irq, &s3c_irq_timer);
- set_irq_handler(irq, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID);
- }
+ s3c_init_vic_timer_irq(IRQ_TIMER0_VIC, IRQ_TIMER0);
+ s3c_init_vic_timer_irq(IRQ_TIMER1_VIC, IRQ_TIMER1);
+ s3c_init_vic_timer_irq(IRQ_TIMER2_VIC, IRQ_TIMER2);
+ s3c_init_vic_timer_irq(IRQ_TIMER3_VIC, IRQ_TIMER3);
+ s3c_init_vic_timer_irq(IRQ_TIMER4_VIC, IRQ_TIMER4);
- for (uart = 0; uart < ARRAY_SIZE(uart_irqs); uart++)
- s3c64xx_uart_irq(&uart_irqs[uart]);
+ s3c_init_uart_irqs(uart_irqs, ARRAY_SIZE(uart_irqs));
}
-
-
diff --git a/arch/arm/plat-s3c64xx/s3c6400-clock.c b/arch/arm/plat-s3c64xx/s3c6400-clock.c
index 6ffa21eb1b91..cb2bf4bff051 100644
--- a/arch/arm/plat-s3c64xx/s3c6400-clock.c
+++ b/arch/arm/plat-s3c64xx/s3c6400-clock.c
@@ -29,6 +29,7 @@
#include <plat/regs-clock.h>
#include <plat/clock.h>
+#include <plat/clock-clksrc.h>
#include <plat/cpu.h>
#include <plat/pll.h>
@@ -46,22 +47,7 @@ static struct clk clk_ext_xtal_mux = {
#define clk_fin_epll clk_ext_xtal_mux
#define clk_fout_mpll clk_mpll
-
-struct clk_sources {
- unsigned int nr_sources;
- struct clk **sources;
-};
-
-struct clksrc_clk {
- struct clk clk;
- unsigned int mask;
- unsigned int shift;
-
- struct clk_sources *sources;
-
- unsigned int divider_shift;
- void __iomem *reg_divider;
-};
+#define clk_fout_epll clk_epll
static struct clk clk_fout_apll = {
.name = "fout_apll",
@@ -73,7 +59,7 @@ static struct clk *clk_src_apll_list[] = {
[1] = &clk_fout_apll,
};
-static struct clk_sources clk_src_apll = {
+static struct clksrc_sources clk_src_apll = {
.sources = clk_src_apll_list,
.nr_sources = ARRAY_SIZE(clk_src_apll_list),
};
@@ -83,22 +69,16 @@ static struct clksrc_clk clk_mout_apll = {
.name = "mout_apll",
.id = -1,
},
- .shift = S3C6400_CLKSRC_APLL_MOUT_SHIFT,
- .mask = S3C6400_CLKSRC_APLL_MOUT,
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 0, .size = 1 },
.sources = &clk_src_apll,
};
-static struct clk clk_fout_epll = {
- .name = "fout_epll",
- .id = -1,
-};
-
static struct clk *clk_src_epll_list[] = {
[0] = &clk_fin_epll,
[1] = &clk_fout_epll,
};
-static struct clk_sources clk_src_epll = {
+static struct clksrc_sources clk_src_epll = {
.sources = clk_src_epll_list,
.nr_sources = ARRAY_SIZE(clk_src_epll_list),
};
@@ -108,8 +88,7 @@ static struct clksrc_clk clk_mout_epll = {
.name = "mout_epll",
.id = -1,
},
- .shift = S3C6400_CLKSRC_EPLL_MOUT_SHIFT,
- .mask = S3C6400_CLKSRC_EPLL_MOUT,
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 2, .size = 1 },
.sources = &clk_src_epll,
};
@@ -118,7 +97,7 @@ static struct clk *clk_src_mpll_list[] = {
[1] = &clk_fout_mpll,
};
-static struct clk_sources clk_src_mpll = {
+static struct clksrc_sources clk_src_mpll = {
.sources = clk_src_mpll_list,
.nr_sources = ARRAY_SIZE(clk_src_mpll_list),
};
@@ -128,8 +107,7 @@ static struct clksrc_clk clk_mout_mpll = {
.name = "mout_mpll",
.id = -1,
},
- .shift = S3C6400_CLKSRC_MPLL_MOUT_SHIFT,
- .mask = S3C6400_CLKSRC_MPLL_MOUT,
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 1, .size = 1 },
.sources = &clk_src_mpll,
};
@@ -187,9 +165,11 @@ static struct clk clk_arm = {
.name = "armclk",
.id = -1,
.parent = &clk_mout_apll.clk,
- .get_rate = s3c64xx_clk_arm_get_rate,
- .set_rate = s3c64xx_clk_arm_set_rate,
- .round_rate = s3c64xx_clk_arm_round_rate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s3c64xx_clk_arm_get_rate,
+ .set_rate = s3c64xx_clk_arm_set_rate,
+ .round_rate = s3c64xx_clk_arm_round_rate,
+ },
};
static unsigned long s3c64xx_clk_doutmpll_get_rate(struct clk *clk)
@@ -204,11 +184,15 @@ static unsigned long s3c64xx_clk_doutmpll_get_rate(struct clk *clk)
return rate;
}
+static struct clk_ops clk_dout_ops = {
+ .get_rate = s3c64xx_clk_doutmpll_get_rate,
+};
+
static struct clk clk_dout_mpll = {
.name = "dout_mpll",
.id = -1,
.parent = &clk_mout_mpll.clk,
- .get_rate = s3c64xx_clk_doutmpll_get_rate,
+ .ops = &clk_dout_ops,
};
static struct clk *clkset_spi_mmc_list[] = {
@@ -218,7 +202,7 @@ static struct clk *clkset_spi_mmc_list[] = {
&clk_27m,
};
-static struct clk_sources clkset_spi_mmc = {
+static struct clksrc_sources clkset_spi_mmc = {
.sources = clkset_spi_mmc_list,
.nr_sources = ARRAY_SIZE(clkset_spi_mmc_list),
};
@@ -230,7 +214,7 @@ static struct clk *clkset_irda_list[] = {
&clk_27m,
};
-static struct clk_sources clkset_irda = {
+static struct clksrc_sources clkset_irda = {
.sources = clkset_irda_list,
.nr_sources = ARRAY_SIZE(clkset_irda_list),
};
@@ -242,7 +226,7 @@ static struct clk *clkset_uart_list[] = {
NULL
};
-static struct clk_sources clkset_uart = {
+static struct clksrc_sources clkset_uart = {
.sources = clkset_uart_list,
.nr_sources = ARRAY_SIZE(clkset_uart_list),
};
@@ -254,12 +238,11 @@ static struct clk *clkset_uhost_list[] = {
&clk_fin_epll,
};
-static struct clk_sources clkset_uhost = {
+static struct clksrc_sources clkset_uhost = {
.sources = clkset_uhost_list,
.nr_sources = ARRAY_SIZE(clkset_uhost_list),
};
-
/* The peripheral clocks are all controlled via clocksource followed
* by an optional divider and gate stage. We currently roll this into
* one clock which hides the intermediate clock from the mux.
@@ -270,221 +253,7 @@ static struct clk_sources clkset_uhost = {
* have a common parent divisor so are not included here.
*/
-static inline struct clksrc_clk *to_clksrc(struct clk *clk)
-{
- return container_of(clk, struct clksrc_clk, clk);
-}
-
-static unsigned long s3c64xx_getrate_clksrc(struct clk *clk)
-{
- struct clksrc_clk *sclk = to_clksrc(clk);
- unsigned long rate = clk_get_rate(clk->parent);
- u32 clkdiv = __raw_readl(sclk->reg_divider);
-
- clkdiv >>= sclk->divider_shift;
- clkdiv &= 0xf;
- clkdiv++;
-
- rate /= clkdiv;
- return rate;
-}
-
-static int s3c64xx_setrate_clksrc(struct clk *clk, unsigned long rate)
-{
- struct clksrc_clk *sclk = to_clksrc(clk);
- void __iomem *reg = sclk->reg_divider;
- unsigned int div;
- u32 val;
-
- rate = clk_round_rate(clk, rate);
- div = clk_get_rate(clk->parent) / rate;
- if (div > 16)
- return -EINVAL;
-
- val = __raw_readl(reg);
- val &= ~(0xf << sclk->divider_shift);
- val |= (div - 1) << sclk->divider_shift;
- __raw_writel(val, reg);
-
- return 0;
-}
-
-static int s3c64xx_setparent_clksrc(struct clk *clk, struct clk *parent)
-{
- struct clksrc_clk *sclk = to_clksrc(clk);
- struct clk_sources *srcs = sclk->sources;
- u32 clksrc = __raw_readl(S3C_CLK_SRC);
- int src_nr = -1;
- int ptr;
-
- for (ptr = 0; ptr < srcs->nr_sources; ptr++)
- if (srcs->sources[ptr] == parent) {
- src_nr = ptr;
- break;
- }
-
- if (src_nr >= 0) {
- clksrc &= ~sclk->mask;
- clksrc |= src_nr << sclk->shift;
-
- __raw_writel(clksrc, S3C_CLK_SRC);
-
- clk->parent = parent;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static unsigned long s3c64xx_roundrate_clksrc(struct clk *clk,
- unsigned long rate)
-{
- unsigned long parent_rate = clk_get_rate(clk->parent);
- int div;
-
- if (rate > parent_rate)
- rate = parent_rate;
- else {
- div = parent_rate / rate;
-
- if (div == 0)
- div = 1;
- if (div > 16)
- div = 16;
-
- rate = parent_rate / div;
- }
-
- return rate;
-}
-
-static struct clksrc_clk clk_mmc0 = {
- .clk = {
- .name = "mmc_bus",
- .id = 0,
- .ctrlbit = S3C_CLKCON_SCLK_MMC0,
- .enable = s3c64xx_sclk_ctrl,
- .set_parent = s3c64xx_setparent_clksrc,
- .get_rate = s3c64xx_getrate_clksrc,
- .set_rate = s3c64xx_setrate_clksrc,
- .round_rate = s3c64xx_roundrate_clksrc,
- },
- .shift = S3C6400_CLKSRC_MMC0_SHIFT,
- .mask = S3C6400_CLKSRC_MMC0_MASK,
- .sources = &clkset_spi_mmc,
- .divider_shift = S3C6400_CLKDIV1_MMC0_SHIFT,
- .reg_divider = S3C_CLK_DIV1,
-};
-
-static struct clksrc_clk clk_mmc1 = {
- .clk = {
- .name = "mmc_bus",
- .id = 1,
- .ctrlbit = S3C_CLKCON_SCLK_MMC1,
- .enable = s3c64xx_sclk_ctrl,
- .get_rate = s3c64xx_getrate_clksrc,
- .set_rate = s3c64xx_setrate_clksrc,
- .set_parent = s3c64xx_setparent_clksrc,
- .round_rate = s3c64xx_roundrate_clksrc,
- },
- .shift = S3C6400_CLKSRC_MMC1_SHIFT,
- .mask = S3C6400_CLKSRC_MMC1_MASK,
- .sources = &clkset_spi_mmc,
- .divider_shift = S3C6400_CLKDIV1_MMC1_SHIFT,
- .reg_divider = S3C_CLK_DIV1,
-};
-
-static struct clksrc_clk clk_mmc2 = {
- .clk = {
- .name = "mmc_bus",
- .id = 2,
- .ctrlbit = S3C_CLKCON_SCLK_MMC2,
- .enable = s3c64xx_sclk_ctrl,
- .get_rate = s3c64xx_getrate_clksrc,
- .set_rate = s3c64xx_setrate_clksrc,
- .set_parent = s3c64xx_setparent_clksrc,
- .round_rate = s3c64xx_roundrate_clksrc,
- },
- .shift = S3C6400_CLKSRC_MMC2_SHIFT,
- .mask = S3C6400_CLKSRC_MMC2_MASK,
- .sources = &clkset_spi_mmc,
- .divider_shift = S3C6400_CLKDIV1_MMC2_SHIFT,
- .reg_divider = S3C_CLK_DIV1,
-};
-
-static struct clksrc_clk clk_usbhost = {
- .clk = {
- .name = "usb-bus-host",
- .id = -1,
- .ctrlbit = S3C_CLKCON_SCLK_UHOST,
- .enable = s3c64xx_sclk_ctrl,
- .set_parent = s3c64xx_setparent_clksrc,
- .get_rate = s3c64xx_getrate_clksrc,
- .set_rate = s3c64xx_setrate_clksrc,
- .round_rate = s3c64xx_roundrate_clksrc,
- },
- .shift = S3C6400_CLKSRC_UHOST_SHIFT,
- .mask = S3C6400_CLKSRC_UHOST_MASK,
- .sources = &clkset_uhost,
- .divider_shift = S3C6400_CLKDIV1_UHOST_SHIFT,
- .reg_divider = S3C_CLK_DIV1,
-};
-
-static struct clksrc_clk clk_uart_uclk1 = {
- .clk = {
- .name = "uclk1",
- .id = -1,
- .ctrlbit = S3C_CLKCON_SCLK_UART,
- .enable = s3c64xx_sclk_ctrl,
- .set_parent = s3c64xx_setparent_clksrc,
- .get_rate = s3c64xx_getrate_clksrc,
- .set_rate = s3c64xx_setrate_clksrc,
- .round_rate = s3c64xx_roundrate_clksrc,
- },
- .shift = S3C6400_CLKSRC_UART_SHIFT,
- .mask = S3C6400_CLKSRC_UART_MASK,
- .sources = &clkset_uart,
- .divider_shift = S3C6400_CLKDIV2_UART_SHIFT,
- .reg_divider = S3C_CLK_DIV2,
-};
-
-/* Where does UCLK0 come from? */
-
-static struct clksrc_clk clk_spi0 = {
- .clk = {
- .name = "spi-bus",
- .id = 0,
- .ctrlbit = S3C_CLKCON_SCLK_SPI0,
- .enable = s3c64xx_sclk_ctrl,
- .set_parent = s3c64xx_setparent_clksrc,
- .get_rate = s3c64xx_getrate_clksrc,
- .set_rate = s3c64xx_setrate_clksrc,
- .round_rate = s3c64xx_roundrate_clksrc,
- },
- .shift = S3C6400_CLKSRC_SPI0_SHIFT,
- .mask = S3C6400_CLKSRC_SPI0_MASK,
- .sources = &clkset_spi_mmc,
- .divider_shift = S3C6400_CLKDIV2_SPI0_SHIFT,
- .reg_divider = S3C_CLK_DIV2,
-};
-
-static struct clksrc_clk clk_spi1 = {
- .clk = {
- .name = "spi-bus",
- .id = 1,
- .ctrlbit = S3C_CLKCON_SCLK_SPI1,
- .enable = s3c64xx_sclk_ctrl,
- .set_parent = s3c64xx_setparent_clksrc,
- .get_rate = s3c64xx_getrate_clksrc,
- .set_rate = s3c64xx_setrate_clksrc,
- .round_rate = s3c64xx_roundrate_clksrc,
- },
- .shift = S3C6400_CLKSRC_SPI1_SHIFT,
- .mask = S3C6400_CLKSRC_SPI1_MASK,
- .sources = &clkset_spi_mmc,
- .divider_shift = S3C6400_CLKDIV2_SPI1_SHIFT,
- .reg_divider = S3C_CLK_DIV2,
-};
+/* clocks that feed other parts of the clock source tree */
static struct clk clk_iis_cd0 = {
.name = "iis_cdclk0",
@@ -509,29 +278,11 @@ static struct clk *clkset_audio0_list[] = {
[4] = &clk_pcm_cd,
};
-static struct clk_sources clkset_audio0 = {
+static struct clksrc_sources clkset_audio0 = {
.sources = clkset_audio0_list,
.nr_sources = ARRAY_SIZE(clkset_audio0_list),
};
-static struct clksrc_clk clk_audio0 = {
- .clk = {
- .name = "audio-bus",
- .id = 0,
- .ctrlbit = S3C_CLKCON_SCLK_AUDIO0,
- .enable = s3c64xx_sclk_ctrl,
- .set_parent = s3c64xx_setparent_clksrc,
- .get_rate = s3c64xx_getrate_clksrc,
- .set_rate = s3c64xx_setrate_clksrc,
- .round_rate = s3c64xx_roundrate_clksrc,
- },
- .shift = S3C6400_CLKSRC_AUDIO0_SHIFT,
- .mask = S3C6400_CLKSRC_AUDIO0_MASK,
- .sources = &clkset_audio0,
- .divider_shift = S3C6400_CLKDIV2_AUDIO0_SHIFT,
- .reg_divider = S3C_CLK_DIV2,
-};
-
static struct clk *clkset_audio1_list[] = {
[0] = &clk_mout_epll.clk,
[1] = &clk_dout_mpll,
@@ -540,72 +291,133 @@ static struct clk *clkset_audio1_list[] = {
[4] = &clk_pcm_cd,
};
-static struct clk_sources clkset_audio1 = {
+static struct clksrc_sources clkset_audio1 = {
.sources = clkset_audio1_list,
.nr_sources = ARRAY_SIZE(clkset_audio1_list),
};
-static struct clksrc_clk clk_audio1 = {
- .clk = {
- .name = "audio-bus",
- .id = 1,
- .ctrlbit = S3C_CLKCON_SCLK_AUDIO1,
- .enable = s3c64xx_sclk_ctrl,
- .set_parent = s3c64xx_setparent_clksrc,
- .get_rate = s3c64xx_getrate_clksrc,
- .set_rate = s3c64xx_setrate_clksrc,
- .round_rate = s3c64xx_roundrate_clksrc,
- },
- .shift = S3C6400_CLKSRC_AUDIO1_SHIFT,
- .mask = S3C6400_CLKSRC_AUDIO1_MASK,
- .sources = &clkset_audio1,
- .divider_shift = S3C6400_CLKDIV2_AUDIO1_SHIFT,
- .reg_divider = S3C_CLK_DIV2,
-};
-
-static struct clksrc_clk clk_irda = {
- .clk = {
- .name = "irda-bus",
- .id = 0,
- .ctrlbit = S3C_CLKCON_SCLK_IRDA,
- .enable = s3c64xx_sclk_ctrl,
- .set_parent = s3c64xx_setparent_clksrc,
- .get_rate = s3c64xx_getrate_clksrc,
- .set_rate = s3c64xx_setrate_clksrc,
- .round_rate = s3c64xx_roundrate_clksrc,
- },
- .shift = S3C6400_CLKSRC_IRDA_SHIFT,
- .mask = S3C6400_CLKSRC_IRDA_MASK,
- .sources = &clkset_irda,
- .divider_shift = S3C6400_CLKDIV2_IRDA_SHIFT,
- .reg_divider = S3C_CLK_DIV2,
-};
-
static struct clk *clkset_camif_list[] = {
&clk_h2,
};
-static struct clk_sources clkset_camif = {
+static struct clksrc_sources clkset_camif = {
.sources = clkset_camif_list,
.nr_sources = ARRAY_SIZE(clkset_camif_list),
};
-static struct clksrc_clk clk_camif = {
- .clk = {
- .name = "camera",
- .id = -1,
- .ctrlbit = S3C_CLKCON_SCLK_CAM,
- .enable = s3c64xx_sclk_ctrl,
- .set_parent = s3c64xx_setparent_clksrc,
- .get_rate = s3c64xx_getrate_clksrc,
- .set_rate = s3c64xx_setrate_clksrc,
- .round_rate = s3c64xx_roundrate_clksrc,
+static struct clksrc_clk clksrcs[] = {
+ {
+ .clk = {
+ .name = "mmc_bus",
+ .id = 0,
+ .ctrlbit = S3C_CLKCON_SCLK_MMC0,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 18, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV1, .shift = 0, .size = 4 },
+ .sources = &clkset_spi_mmc,
+ }, {
+ .clk = {
+ .name = "mmc_bus",
+ .id = 1,
+ .ctrlbit = S3C_CLKCON_SCLK_MMC1,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 20, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV1, .shift = 4, .size = 4 },
+ .sources = &clkset_spi_mmc,
+ }, {
+ .clk = {
+ .name = "mmc_bus",
+ .id = 2,
+ .ctrlbit = S3C_CLKCON_SCLK_MMC2,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 22, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV1, .shift = 8, .size = 4 },
+ .sources = &clkset_spi_mmc,
+ }, {
+ .clk = {
+ .name = "usb-bus-host",
+ .id = -1,
+ .ctrlbit = S3C_CLKCON_SCLK_UHOST,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 5, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV1, .shift = 20, .size = 4 },
+ .sources = &clkset_uhost,
+ }, {
+ .clk = {
+ .name = "uclk1",
+ .id = -1,
+ .ctrlbit = S3C_CLKCON_SCLK_UART,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 13, .size = 1 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 16, .size = 4 },
+ .sources = &clkset_uart,
+ }, {
+/* Where does UCLK0 come from? */
+ .clk = {
+ .name = "spi-bus",
+ .id = 0,
+ .ctrlbit = S3C_CLKCON_SCLK_SPI0,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 14, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 0, .size = 4 },
+ .sources = &clkset_spi_mmc,
+ }, {
+ .clk = {
+ .name = "spi-bus",
+ .id = 1,
+ .ctrlbit = S3C_CLKCON_SCLK_SPI1,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 16, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 4, .size = 4 },
+ .sources = &clkset_spi_mmc,
+ }, {
+ .clk = {
+ .name = "audio-bus",
+ .id = 0,
+ .ctrlbit = S3C_CLKCON_SCLK_AUDIO0,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 7, .size = 3 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 8, .size = 4 },
+ .sources = &clkset_audio0,
+ }, {
+ .clk = {
+ .name = "audio-bus",
+ .id = 1,
+ .ctrlbit = S3C_CLKCON_SCLK_AUDIO1,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 10, .size = 3 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 12, .size = 4 },
+ .sources = &clkset_audio1,
+ }, {
+ .clk = {
+ .name = "irda-bus",
+ .id = 0,
+ .ctrlbit = S3C_CLKCON_SCLK_IRDA,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_src = { .reg = S3C_CLK_SRC, .shift = 24, .size = 2 },
+ .reg_div = { .reg = S3C_CLK_DIV2, .shift = 20, .size = 4 },
+ .sources = &clkset_irda,
+ }, {
+ .clk = {
+ .name = "camera",
+ .id = -1,
+ .ctrlbit = S3C_CLKCON_SCLK_CAM,
+ .enable = s3c64xx_sclk_ctrl,
+ },
+ .reg_div = { .reg = S3C_CLK_DIV0, .shift = 20, .size = 4 },
+ .reg_src = { .reg = NULL, .shift = 0, .size = 0 },
+ .sources = &clkset_camif,
},
- .shift = 0,
- .mask = 0,
- .sources = &clkset_camif,
- .divider_shift = S3C6400_CLKDIV0_CAM_SHIFT,
- .reg_divider = S3C_CLK_DIV0,
};
/* Clock initialisation code */
@@ -614,39 +426,7 @@ static struct clksrc_clk *init_parents[] = {
&clk_mout_apll,
&clk_mout_epll,
&clk_mout_mpll,
- &clk_mmc0,
- &clk_mmc1,
- &clk_mmc2,
- &clk_usbhost,
- &clk_uart_uclk1,
- &clk_spi0,
- &clk_spi1,
- &clk_audio0,
- &clk_audio1,
- &clk_irda,
- &clk_camif,
-};
-
-static void __init_or_cpufreq s3c6400_set_clksrc(struct clksrc_clk *clk)
-{
- struct clk_sources *srcs = clk->sources;
- u32 clksrc = __raw_readl(S3C_CLK_SRC);
-
- clksrc &= clk->mask;
- clksrc >>= clk->shift;
-
- if (clksrc > srcs->nr_sources || !srcs->sources[clksrc]) {
- printk(KERN_ERR "%s: bad source %d\n",
- clk->clk.name, clksrc);
- return;
- }
-
- clk->clk.parent = srcs->sources[clksrc];
-
- printk(KERN_INFO "%s: source is %s (%d), rate is %ld\n",
- clk->clk.name, clk->clk.parent->name, clksrc,
- clk_get_rate(&clk->clk));
-}
+};
#define GET_DIV(clk, field) ((((clk) & field##_MASK) >> field##_SHIFT) + 1)
@@ -706,7 +486,10 @@ void __init_or_cpufreq s3c6400_setup_clocks(void)
clk_f.rate = fclk;
for (ptr = 0; ptr < ARRAY_SIZE(init_parents); ptr++)
- s3c6400_set_clksrc(init_parents[ptr]);
+ s3c_set_clksrc(init_parents[ptr], true);
+
+ for (ptr = 0; ptr < ARRAY_SIZE(clksrcs); ptr++)
+ s3c_set_clksrc(&clksrcs[ptr], true);
}
static struct clk *clks[] __initdata = {
@@ -715,20 +498,8 @@ static struct clk *clks[] __initdata = {
&clk_iis_cd1,
&clk_pcm_cd,
&clk_mout_epll.clk,
- &clk_fout_epll,
&clk_mout_mpll.clk,
&clk_dout_mpll,
- &clk_mmc0.clk,
- &clk_mmc1.clk,
- &clk_mmc2.clk,
- &clk_usbhost.clk,
- &clk_uart_uclk1.clk,
- &clk_spi0.clk,
- &clk_spi1.clk,
- &clk_audio0.clk,
- &clk_audio1.clk,
- &clk_irda.clk,
- &clk_camif.clk,
&clk_arm,
};
@@ -761,6 +532,5 @@ void __init s3c6400_register_clocks(unsigned armclk_divlimit)
}
}
- clk_mpll.parent = &clk_mout_mpll.clk;
- clk_epll.parent = &clk_mout_epll.clk;
+ s3c_register_clksrc(clksrcs, ARRAY_SIZE(clksrcs));
}
diff --git a/arch/arm/plat-s5pc1xx/clock.c b/arch/arm/plat-s5pc1xx/clock.c
index 26c21d849790..cc21a8bc884e 100644
--- a/arch/arm/plat-s5pc1xx/clock.c
+++ b/arch/arm/plat-s5pc1xx/clock.c
@@ -70,6 +70,10 @@ static int clk_default_setrate(struct clk *clk, unsigned long rate)
return 0;
}
+static struct clk_ops clk_ops_default_setrate = {
+ .set_rate = clk_default_setrate,
+};
+
static int clk_dummy_enable(struct clk *clk, int enable)
{
return 0;
@@ -81,8 +85,8 @@ struct clk clk_hd0 = {
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
- .set_rate = clk_default_setrate,
.enable = clk_dummy_enable,
+ .ops = &clk_ops_default_setrate,
};
struct clk clk_pd0 = {
@@ -91,7 +95,7 @@ struct clk clk_pd0 = {
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
- .set_rate = clk_default_setrate,
+ .ops = &clk_ops_default_setrate,
.enable = clk_dummy_enable,
};
@@ -700,16 +704,8 @@ void __init s5pc1xx_register_clocks(void)
s3c24xx_register_clocks(clks, ARRAY_SIZE(clks));
- clkp = s5pc100_init_clocks;
- size = ARRAY_SIZE(s5pc100_init_clocks);
-
- for (ptr = 0; ptr < size; ptr++, clkp++) {
- ret = s3c24xx_register_clock(clkp);
- if (ret < 0) {
- printk(KERN_ERR "Failed to register clock %s (%d)\n",
- clkp->name, ret);
- }
- }
+ s3c_register_clocks(s5pc100_init_clocks,
+ ARRAY_SIZE(s5pc100_init_clocks);
clkp = s5pc100_init_clocks_disable;
size = ARRAY_SIZE(s5pc100_init_clocks_disable);
diff --git a/arch/arm/plat-s5pc1xx/s5pc100-clock.c b/arch/arm/plat-s5pc1xx/s5pc100-clock.c
index b436d44510c8..16f0b9077390 100644
--- a/arch/arm/plat-s5pc1xx/s5pc100-clock.c
+++ b/arch/arm/plat-s5pc1xx/s5pc100-clock.c
@@ -111,7 +111,9 @@ static struct clk clk_dout_apll = {
.name = "dout_apll",
.id = -1,
.parent = &clk_mout_apll.clk,
- .get_rate = s5pc100_clk_dout_apll_get_rate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s5pc100_clk_dout_apll_get_rate,
+ },
};
static unsigned long s5pc100_clk_arm_get_rate(struct clk *clk)
@@ -165,9 +167,11 @@ static struct clk clk_arm = {
.name = "armclk",
.id = -1,
.parent = &clk_dout_apll,
- .get_rate = s5pc100_clk_arm_get_rate,
- .set_rate = s5pc100_clk_arm_set_rate,
- .round_rate = s5pc100_clk_arm_round_rate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s5pc100_clk_arm_get_rate,
+ .set_rate = s5pc100_clk_arm_set_rate,
+ .round_rate = s5pc100_clk_arm_round_rate,
+ },
};
static unsigned long s5pc100_clk_dout_d0_bus_get_rate(struct clk *clk)
@@ -185,7 +189,9 @@ static struct clk clk_dout_d0_bus = {
.name = "dout_d0_bus",
.id = -1,
.parent = &clk_arm,
- .get_rate = s5pc100_clk_dout_d0_bus_get_rate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s5pc100_clk_dout_d0_bus_get_rate,
+ },
};
static unsigned long s5pc100_clk_dout_pclkd0_get_rate(struct clk *clk)
@@ -203,7 +209,9 @@ static struct clk clk_dout_pclkd0 = {
.name = "dout_pclkd0",
.id = -1,
.parent = &clk_dout_d0_bus,
- .get_rate = s5pc100_clk_dout_pclkd0_get_rate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s5pc100_clk_dout_pclkd0_get_rate,
+ },
};
static unsigned long s5pc100_clk_dout_apll2_get_rate(struct clk *clk)
@@ -221,7 +229,9 @@ static struct clk clk_dout_apll2 = {
.name = "dout_apll2",
.id = -1,
.parent = &clk_mout_apll.clk,
- .get_rate = s5pc100_clk_dout_apll2_get_rate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s5pc100_clk_dout_apll2_get_rate,
+ },
};
/* MPLL */
@@ -284,7 +294,9 @@ static struct clk clk_dout_d1_bus = {
.name = "dout_d1_bus",
.id = -1,
.parent = &clk_mout_am.clk,
- .get_rate = s5pc100_clk_dout_d1_bus_get_rate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s5pc100_clk_dout_d1_bus_get_rate,
+ },
};
static struct clk *clkset_onenand_list[] = {
@@ -325,7 +337,9 @@ static struct clk clk_dout_pclkd1 = {
.name = "dout_pclkd1",
.id = -1,
.parent = &clk_dout_d1_bus,
- .get_rate = s5pc100_clk_dout_pclkd1_get_rate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s5pc100_clk_dout_pclkd1_get_rate,
+ },
};
static unsigned long s5pc100_clk_dout_mpll2_get_rate(struct clk *clk)
@@ -345,7 +359,9 @@ static struct clk clk_dout_mpll2 = {
.name = "dout_mpll2",
.id = -1,
.parent = &clk_mout_am.clk,
- .get_rate = s5pc100_clk_dout_mpll2_get_rate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s5pc100_clk_dout_mpll2_get_rate,
+ },
};
static unsigned long s5pc100_clk_dout_cam_get_rate(struct clk *clk)
@@ -365,7 +381,9 @@ static struct clk clk_dout_cam = {
.name = "dout_cam",
.id = -1,
.parent = &clk_dout_mpll2,
- .get_rate = s5pc100_clk_dout_cam_get_rate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s5pc100_clk_dout_cam_get_rate,
+ },
};
static unsigned long s5pc100_clk_dout_mpll_get_rate(struct clk *clk)
@@ -385,7 +403,9 @@ static struct clk clk_dout_mpll = {
.name = "dout_mpll",
.id = -1,
.parent = &clk_mout_am.clk,
- .get_rate = s5pc100_clk_dout_mpll_get_rate,
+ .ops = &(struct clk_ops) {
+ .get_rate = s5pc100_clk_dout_mpll_get_rate,
+ },
};
/* EPLL */
@@ -540,6 +560,13 @@ static unsigned long s5pc100_roundrate_clksrc(struct clk *clk,
return rate;
}
+static struct clk_ops s5pc100_clksrc_ops = {
+ .set_parent = s5pc100_setparent_clksrc,
+ .get_rate = s5pc100_getrate_clksrc,
+ .set_rate = s5pc100_setrate_clksrc,
+ .round_rate = s5pc100_roundrate_clksrc,
+};
+
static struct clk *clkset_spi_list[] = {
&clk_mout_epll.clk,
&clk_dout_mpll2,
@@ -558,10 +585,7 @@ static struct clksrc_clk clk_spi0 = {
.id = 0,
.ctrlbit = S5PC100_CLKGATE_SCLK0_SPI0,
.enable = s5pc100_sclk0_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+
},
.shift = S5PC100_CLKSRC1_SPI0_SHIFT,
.mask = S5PC100_CLKSRC1_SPI0_MASK,
@@ -577,10 +601,7 @@ static struct clksrc_clk clk_spi1 = {
.id = 1,
.ctrlbit = S5PC100_CLKGATE_SCLK0_SPI1,
.enable = s5pc100_sclk0_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC1_SPI1_SHIFT,
.mask = S5PC100_CLKSRC1_SPI1_MASK,
@@ -596,10 +617,7 @@ static struct clksrc_clk clk_spi2 = {
.id = 2,
.ctrlbit = S5PC100_CLKGATE_SCLK0_SPI2,
.enable = s5pc100_sclk0_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC1_SPI2_SHIFT,
.mask = S5PC100_CLKSRC1_SPI2_MASK,
@@ -625,10 +643,7 @@ static struct clksrc_clk clk_uart_uclk1 = {
.id = -1,
.ctrlbit = S5PC100_CLKGATE_SCLK0_UART,
.enable = s5pc100_sclk0_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC1_UART_SHIFT,
.mask = S5PC100_CLKSRC1_UART_MASK,
@@ -683,10 +698,7 @@ static struct clksrc_clk clk_audio0 = {
.id = 0,
.ctrlbit = S5PC100_CLKGATE_SCLK1_AUDIO0,
.enable = s5pc100_sclk1_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC3_AUDIO0_SHIFT,
.mask = S5PC100_CLKSRC3_AUDIO0_MASK,
@@ -716,10 +728,7 @@ static struct clksrc_clk clk_audio1 = {
.id = 1,
.ctrlbit = S5PC100_CLKGATE_SCLK1_AUDIO1,
.enable = s5pc100_sclk1_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC3_AUDIO1_SHIFT,
.mask = S5PC100_CLKSRC3_AUDIO1_MASK,
@@ -748,10 +757,7 @@ static struct clksrc_clk clk_audio2 = {
.id = 2,
.ctrlbit = S5PC100_CLKGATE_SCLK1_AUDIO2,
.enable = s5pc100_sclk1_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC3_AUDIO2_SHIFT,
.mask = S5PC100_CLKSRC3_AUDIO2_MASK,
@@ -801,10 +807,7 @@ static struct clksrc_clk clk_lcd = {
.id = -1,
.ctrlbit = S5PC100_CLKGATE_SCLK1_LCD,
.enable = s5pc100_sclk1_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC2_LCD_SHIFT,
.mask = S5PC100_CLKSRC2_LCD_MASK,
@@ -820,10 +823,7 @@ static struct clksrc_clk clk_fimc0 = {
.id = 0,
.ctrlbit = S5PC100_CLKGATE_SCLK1_FIMC0,
.enable = s5pc100_sclk1_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC2_FIMC0_SHIFT,
.mask = S5PC100_CLKSRC2_FIMC0_MASK,
@@ -839,10 +839,7 @@ static struct clksrc_clk clk_fimc1 = {
.id = 1,
.ctrlbit = S5PC100_CLKGATE_SCLK1_FIMC1,
.enable = s5pc100_sclk1_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC2_FIMC1_SHIFT,
.mask = S5PC100_CLKSRC2_FIMC1_MASK,
@@ -858,10 +855,7 @@ static struct clksrc_clk clk_fimc2 = {
.id = 2,
.ctrlbit = S5PC100_CLKGATE_SCLK1_FIMC2,
.enable = s5pc100_sclk1_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC2_FIMC2_SHIFT,
.mask = S5PC100_CLKSRC2_FIMC2_MASK,
@@ -889,10 +883,7 @@ static struct clksrc_clk clk_mmc0 = {
.id = 0,
.ctrlbit = S5PC100_CLKGATE_SCLK0_MMC0,
.enable = s5pc100_sclk0_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC2_MMC0_SHIFT,
.mask = S5PC100_CLKSRC2_MMC0_MASK,
@@ -908,10 +899,7 @@ static struct clksrc_clk clk_mmc1 = {
.id = 1,
.ctrlbit = S5PC100_CLKGATE_SCLK0_MMC1,
.enable = s5pc100_sclk0_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC2_MMC1_SHIFT,
.mask = S5PC100_CLKSRC2_MMC1_MASK,
@@ -927,10 +915,7 @@ static struct clksrc_clk clk_mmc2 = {
.id = 2,
.ctrlbit = S5PC100_CLKGATE_SCLK0_MMC2,
.enable = s5pc100_sclk0_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC2_MMC2_SHIFT,
.mask = S5PC100_CLKSRC2_MMC2_MASK,
@@ -959,10 +944,7 @@ static struct clksrc_clk clk_usbhost = {
.id = -1,
.ctrlbit = S5PC100_CLKGATE_SCLK0_USBHOST,
.enable = s5pc100_sclk0_ctrl,
- .set_parent = s5pc100_setparent_clksrc,
- .get_rate = s5pc100_getrate_clksrc,
- .set_rate = s5pc100_setrate_clksrc,
- .round_rate = s5pc100_roundrate_clksrc,
+ .ops = &s5pc100_clksrc_ops,
},
.shift = S5PC100_CLKSRC1_UHOST_SHIFT,
.mask = S5PC100_CLKSRC1_UHOST_MASK,
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 486a0d6301e7..faec4b8c626c 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -13,5 +13,107 @@ config PLAT_SAMSUNG
if PLAT_SAMSUNG
+config SAMSUNG_CLKSRC
+ bool
+ help
+ Select the clock code for the clksrc implementation
+ used by newer systems such as the S3C64XX.
+
+# options for IRQ support
+
+config SAMSUNG_IRQ_VIC_TIMER
+ bool
+ help
+ Internal configuration to build the VIC timer interrupt code.
+
+config SAMSUNG_IRQ_UART
+ bool
+ help
+ Internal configuration to build the IRQ UART demux code.
+
+# options for gpio configuration support
+
+config S3C_GPIO_CFG_S3C24XX
+ bool
+ help
+ Internal configuration to enable S3C24XX style GPIO configuration
+ functions.
+
+config S3C_GPIO_CFG_S3C64XX
+ bool
+ help
+ Internal configuration to enable S3C64XX style GPIO configuration
+ functions.
+
+config S5P_GPIO_CFG_S5PC1XX
+ bool
+ help
+ Internal configuration to enable S5PC1XX style GPIO configuration
+ functions.
+
+config S3C_GPIO_PULL_UPDOWN
+ bool
+ help
+ Internal configuration to enable the correct GPIO pull helper
+
+config S3C_GPIO_PULL_DOWN
+ bool
+ help
+ Internal configuration to enable the correct GPIO pull helper
+
+config S3C_GPIO_PULL_UP
+ bool
+ help
+ Internal configuration to enable the correct GPIO pull helper
+
+config SAMSUNG_GPIO_EXTRA
+ int "Number of additional GPIO pins"
+ default 0
+ help
+ Use additional GPIO space in addition to the GPIO's the SOC
+ provides. This allows expanding the GPIO space for use with
+ GPIO expanders.
+
+# device definitions to compile in
+
+config S3C_DEV_HSMMC
+ bool
+ help
+ Compile in platform device definitions for HSMMC code
+
+config S3C_DEV_HSMMC1
+ bool
+ help
+ Compile in platform device definitions for HSMMC channel 1
+
+config S3C_DEV_HSMMC2
+ bool
+ help
+ Compile in platform device definitions for HSMMC channel 2
+
+config S3C_DEV_I2C1
+ bool
+ help
+ Compile in platform device definitions for I2C channel 1
+
+config S3C_DEV_FB
+ bool
+ help
+ Compile in platform device definition for framebuffer
+
+config S3C_DEV_USB_HOST
+ bool
+ help
+ Compile in platform device definition for USB host.
+
+config S3C_DEV_USB_HSOTG
+ bool
+ help
+ Compile in platform device definition for USB high-speed OtG
+
+config S3C_DEV_NAND
+ bool
+ help
+ Compile in platform device definition for NAND controller
endif
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 4478b9f7dc34..aeb7e12d1f63 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -9,3 +9,25 @@ obj-m :=
obj-n := dummy.o
obj- :=
+# Objects we always build independent of SoC choice
+
+obj-y += clock.o
+obj-y += pwm-clock.o
+obj-y += gpio-config.o
+
+obj-$(CONFIG_SAMSUNG_CLKSRC) += clock-clksrc.o
+
+obj-$(CONFIG_SAMSUNG_IRQ_UART) += irq-uart.o
+obj-$(CONFIG_SAMSUNG_IRQ_VIC_TIMER) += irq-vic-timer.o
+
+# devices
+
+obj-$(CONFIG_S3C_DEV_HSMMC) += dev-hsmmc.o
+obj-$(CONFIG_S3C_DEV_HSMMC1) += dev-hsmmc1.o
+obj-$(CONFIG_S3C_DEV_HSMMC2) += dev-hsmmc2.o
+obj-y += dev-i2c0.o
+obj-$(CONFIG_S3C_DEV_I2C1) += dev-i2c1.o
+obj-$(CONFIG_S3C_DEV_FB) += dev-fb.o
+obj-$(CONFIG_S3C_DEV_USB_HOST) += dev-usb.o
+obj-$(CONFIG_S3C_DEV_USB_HSOTG) += dev-usb-hsotg.o
+obj-$(CONFIG_S3C_DEV_NAND) += dev-nand.o
diff --git a/arch/arm/plat-samsung/clock-clksrc.c b/arch/arm/plat-samsung/clock-clksrc.c
new file mode 100644
index 000000000000..33c633a8be8d
--- /dev/null
+++ b/arch/arm/plat-samsung/clock-clksrc.c
@@ -0,0 +1,203 @@
+/* linux/arch/arm/plat-samsung/clock-clksrc.c
+ *
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/sysdev.h>
+#include <linux/io.h>
+
+#include <plat/clock.h>
+#include <plat/clock-clksrc.h>
+#include <plat/cpu-freq.h>
+
+static inline struct clksrc_clk *to_clksrc(struct clk *clk)
+{
+ return container_of(clk, struct clksrc_clk, clk);
+}
+
+static inline u32 bit_mask(u32 shift, u32 nr_bits)
+{
+ u32 mask = 0xffffffff >> (32 - nr_bits);
+
+ return mask << shift;
+}
+
+static unsigned long s3c_getrate_clksrc(struct clk *clk)
+{
+ struct clksrc_clk *sclk = to_clksrc(clk);
+ unsigned long rate = clk_get_rate(clk->parent);
+ u32 clkdiv = __raw_readl(sclk->reg_div.reg);
+ u32 mask = bit_mask(sclk->reg_div.shift, sclk->reg_div.size);
+
+ clkdiv &= mask;
+ clkdiv >>= sclk->reg_div.shift;
+ clkdiv++;
+
+ rate /= clkdiv;
+ return rate;
+}
+
+static int s3c_setrate_clksrc(struct clk *clk, unsigned long rate)
+{
+ struct clksrc_clk *sclk = to_clksrc(clk);
+ void __iomem *reg = sclk->reg_div.reg;
+ unsigned int div;
+ u32 mask = bit_mask(sclk->reg_div.shift, sclk->reg_div.size);
+ u32 val;
+
+ rate = clk_round_rate(clk, rate);
+ div = clk_get_rate(clk->parent) / rate;
+ if (div > 16)
+ return -EINVAL;
+
+ val = __raw_readl(reg);
+ val &= ~mask;
+ val |= (div - 1) << sclk->reg_div.shift;
+ __raw_writel(val, reg);
+
+ return 0;
+}
+
+static int s3c_setparent_clksrc(struct clk *clk, struct clk *parent)
+{
+ struct clksrc_clk *sclk = to_clksrc(clk);
+ struct clksrc_sources *srcs = sclk->sources;
+ u32 clksrc = __raw_readl(sclk->reg_src.reg);
+ u32 mask = bit_mask(sclk->reg_src.shift, sclk->reg_src.size);
+ int src_nr = -1;
+ int ptr;
+
+ for (ptr = 0; ptr < srcs->nr_sources; ptr++)
+ if (srcs->sources[ptr] == parent) {
+ src_nr = ptr;
+ break;
+ }
+
+ if (src_nr >= 0) {
+ clk->parent = parent;
+
+ clksrc &= ~mask;
+ clksrc |= src_nr << sclk->reg_src.shift;
+
+ __raw_writel(clksrc, sclk->reg_src.reg);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static unsigned long s3c_roundrate_clksrc(struct clk *clk,
+ unsigned long rate)
+{
+ unsigned long parent_rate = clk_get_rate(clk->parent);
+ int div;
+
+ if (rate >= parent_rate)
+ rate = parent_rate;
+ else {
+ div = parent_rate / rate;
+ if (parent_rate % rate)
+ div++;
+
+ if (div == 0)
+ div = 1;
+ if (div > 16)
+ div = 16;
+
+ rate = parent_rate / div;
+ }
+
+ return rate;
+}
+
+/* Clock initialisation code */
+
+void __init_or_cpufreq s3c_set_clksrc(struct clksrc_clk *clk, bool announce)
+{
+ struct clksrc_sources *srcs = clk->sources;
+ u32 mask = bit_mask(clk->reg_src.shift, clk->reg_src.size);
+ u32 clksrc = 0;
+
+ if (clk->reg_src.reg)
+ clksrc = __raw_readl(clk->reg_src.reg);
+
+ clksrc &= mask;
+ clksrc >>= clk->reg_src.shift;
+
+ if (clksrc > srcs->nr_sources || !srcs->sources[clksrc]) {
+ printk(KERN_ERR "%s: bad source %d\n",
+ clk->clk.name, clksrc);
+ return;
+ }
+
+ clk->clk.parent = srcs->sources[clksrc];
+
+ if (announce)
+ printk(KERN_INFO "%s: source is %s (%d), rate is %ld\n",
+ clk->clk.name, clk->clk.parent->name, clksrc,
+ clk_get_rate(&clk->clk));
+}
+
+static struct clk_ops clksrc_ops = {
+ .set_parent = s3c_setparent_clksrc,
+ .get_rate = s3c_getrate_clksrc,
+ .set_rate = s3c_setrate_clksrc,
+ .round_rate = s3c_roundrate_clksrc,
+};
+
+static struct clk_ops clksrc_ops_nodiv = {
+ .set_parent = s3c_setparent_clksrc,
+};
+
+static struct clk_ops clksrc_ops_nosrc = {
+ .get_rate = s3c_getrate_clksrc,
+ .set_rate = s3c_setrate_clksrc,
+ .round_rate = s3c_roundrate_clksrc,
+};
+
+void __init s3c_register_clksrc(struct clksrc_clk *clksrc, int size)
+{
+ int ret;
+
+ WARN_ON(!clksrc->reg_div.reg && !clksrc->reg_src.reg);
+
+ for (; size > 0; size--, clksrc++) {
+ /* fill in the default functions */
+
+ if (!clksrc->clk.ops) {
+ if (!clksrc->reg_div.reg)
+ clksrc->clk.ops = &clksrc_ops_nodiv;
+ else if (!clksrc->reg_src.reg)
+ clksrc->clk.ops = &clksrc_ops_nosrc;
+ else
+ clksrc->clk.ops = &clksrc_ops;
+ }
+
+ /* setup the clocksource, but do not announce it
+ * as it may be re-set by the setup routines
+ * called after the rest of the clocks have been
+ * registered
+ */
+ s3c_set_clksrc(clksrc, false);
+
+ ret = s3c24xx_register_clock(&clksrc->clk);
+
+ if (ret < 0) {
+ printk(KERN_ERR "%s: failed to register %s (%d)\n",
+ __func__, clksrc->clk.name, ret);
+ }
+ }
+}
diff --git a/arch/arm/plat-s3c/clock.c b/arch/arm/plat-samsung/clock.c
index 619cfa82dcab..0c746ae7b2a6 100644
--- a/arch/arm/plat-s3c/clock.c
+++ b/arch/arm/plat-samsung/clock.c
@@ -150,8 +150,8 @@ unsigned long clk_get_rate(struct clk *clk)
if (clk->rate != 0)
return clk->rate;
- if (clk->get_rate != NULL)
- return (clk->get_rate)(clk);
+ if (clk->ops != NULL && clk->ops->get_rate != NULL)
+ return (clk->ops->get_rate)(clk);
if (clk->parent != NULL)
return clk_get_rate(clk->parent);
@@ -161,8 +161,8 @@ unsigned long clk_get_rate(struct clk *clk)
long clk_round_rate(struct clk *clk, unsigned long rate)
{
- if (!IS_ERR(clk) && clk->round_rate)
- return (clk->round_rate)(clk, rate);
+ if (!IS_ERR(clk) && clk->ops && clk->ops->round_rate)
+ return (clk->ops->round_rate)(clk, rate);
return rate;
}
@@ -178,13 +178,14 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
* the clock may have been made this way by choice.
*/
- WARN_ON(clk->set_rate == NULL);
+ WARN_ON(clk->ops == NULL);
+ WARN_ON(clk->ops && clk->ops->set_rate == NULL);
- if (clk->set_rate == NULL)
+ if (clk->ops == NULL || clk->ops->set_rate == NULL)
return -EINVAL;
spin_lock(&clocks_lock);
- ret = (clk->set_rate)(clk, rate);
+ ret = (clk->ops->set_rate)(clk, rate);
spin_unlock(&clocks_lock);
return ret;
@@ -204,8 +205,8 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
spin_lock(&clocks_lock);
- if (clk->set_parent)
- ret = (clk->set_parent)(clk, parent);
+ if (clk->ops && clk->ops->set_parent)
+ ret = (clk->ops->set_parent)(clk, parent);
spin_unlock(&clocks_lock);
@@ -224,12 +225,16 @@ EXPORT_SYMBOL(clk_set_parent);
/* base clocks */
-static int clk_default_setrate(struct clk *clk, unsigned long rate)
+int clk_default_setrate(struct clk *clk, unsigned long rate)
{
clk->rate = rate;
return 0;
}
+struct clk_ops clk_ops_def_setrate = {
+ .set_rate = clk_default_setrate,
+};
+
struct clk clk_xtal = {
.name = "xtal",
.id = -1,
@@ -251,7 +256,7 @@ struct clk clk_epll = {
struct clk clk_mpll = {
.name = "mpll",
.id = -1,
- .set_rate = clk_default_setrate,
+ .ops = &clk_ops_def_setrate,
};
struct clk clk_upll = {
@@ -267,7 +272,6 @@ struct clk clk_f = {
.rate = 0,
.parent = &clk_mpll,
.ctrlbit = 0,
- .set_rate = clk_default_setrate,
};
struct clk clk_h = {
@@ -276,7 +280,7 @@ struct clk clk_h = {
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
- .set_rate = clk_default_setrate,
+ .ops = &clk_ops_def_setrate,
};
struct clk clk_p = {
@@ -285,7 +289,7 @@ struct clk clk_p = {
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
- .set_rate = clk_default_setrate,
+ .ops = &clk_ops_def_setrate,
};
struct clk clk_usb_bus = {
@@ -296,7 +300,6 @@ struct clk clk_usb_bus = {
};
-
struct clk s3c24xx_uclk = {
.name = "uclk",
.id = -1,
@@ -333,6 +336,28 @@ int s3c24xx_register_clocks(struct clk **clks, int nr_clks)
return fails;
}
+/**
+ * s3c_register_clocks() - register an array of clocks
+ * @clkp: Pointer to the first clock in the array.
+ * @nr_clks: Number of clocks to register.
+ *
+ * Call s3c24xx_register_clock() on the @clkp array given, printing an
+ * error if it fails to register the clock (unlikely).
+ */
+void __initdata s3c_register_clocks(struct clk *clkp, int nr_clks)
+{
+ int ret;
+
+ for (; nr_clks > 0; nr_clks--, clkp++) {
+ ret = s3c24xx_register_clock(clkp);
+
+ if (ret < 0) {
+ printk(KERN_ERR "Failed to register clock %s (%d)\n",
+ clkp->name, ret);
+ }
+ }
+}
+
/* initalise all the clocks */
int __init s3c24xx_register_baseclocks(unsigned long xtal)
diff --git a/arch/arm/plat-s3c/dev-fb.c b/arch/arm/plat-samsung/dev-fb.c
index a90198fc4b0f..a90198fc4b0f 100644
--- a/arch/arm/plat-s3c/dev-fb.c
+++ b/arch/arm/plat-samsung/dev-fb.c
diff --git a/arch/arm/plat-s3c/dev-hsmmc.c b/arch/arm/plat-samsung/dev-hsmmc.c
index 4c05b39810e2..4c05b39810e2 100644
--- a/arch/arm/plat-s3c/dev-hsmmc.c
+++ b/arch/arm/plat-samsung/dev-hsmmc.c
diff --git a/arch/arm/plat-s3c/dev-hsmmc1.c b/arch/arm/plat-samsung/dev-hsmmc1.c
index e49bc4cd0ee6..e49bc4cd0ee6 100644
--- a/arch/arm/plat-s3c/dev-hsmmc1.c
+++ b/arch/arm/plat-samsung/dev-hsmmc1.c
diff --git a/arch/arm/plat-s3c/dev-hsmmc2.c b/arch/arm/plat-samsung/dev-hsmmc2.c
index 824580bc0e06..824580bc0e06 100644
--- a/arch/arm/plat-s3c/dev-hsmmc2.c
+++ b/arch/arm/plat-samsung/dev-hsmmc2.c
diff --git a/arch/arm/plat-s3c/dev-i2c0.c b/arch/arm/plat-samsung/dev-i2c0.c
index 4c761529b949..4c761529b949 100644
--- a/arch/arm/plat-s3c/dev-i2c0.c
+++ b/arch/arm/plat-samsung/dev-i2c0.c
diff --git a/arch/arm/plat-s3c/dev-i2c1.c b/arch/arm/plat-samsung/dev-i2c1.c
index d44f79110506..d44f79110506 100644
--- a/arch/arm/plat-s3c/dev-i2c1.c
+++ b/arch/arm/plat-samsung/dev-i2c1.c
diff --git a/arch/arm/plat-s3c/dev-nand.c b/arch/arm/plat-samsung/dev-nand.c
index 84808ccda70e..84808ccda70e 100644
--- a/arch/arm/plat-s3c/dev-nand.c
+++ b/arch/arm/plat-samsung/dev-nand.c
diff --git a/arch/arm/plat-s3c/dev-usb-hsotg.c b/arch/arm/plat-samsung/dev-usb-hsotg.c
index e2f604b51c86..e2f604b51c86 100644
--- a/arch/arm/plat-s3c/dev-usb-hsotg.c
+++ b/arch/arm/plat-samsung/dev-usb-hsotg.c
diff --git a/arch/arm/plat-s3c/dev-usb.c b/arch/arm/plat-samsung/dev-usb.c
index 2ee85abed6d9..2ee85abed6d9 100644
--- a/arch/arm/plat-s3c/dev-usb.c
+++ b/arch/arm/plat-samsung/dev-usb.c
diff --git a/arch/arm/plat-s3c/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c
index 456969b6fa0d..456969b6fa0d 100644
--- a/arch/arm/plat-s3c/gpio-config.c
+++ b/arch/arm/plat-samsung/gpio-config.c
diff --git a/arch/arm/plat-samsung/include/plat/clock-clksrc.h b/arch/arm/plat-samsung/include/plat/clock-clksrc.h
new file mode 100644
index 000000000000..50a8ca7c3760
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/clock-clksrc.h
@@ -0,0 +1,83 @@
+/* linux/arch/arm/plat-samsung/include/plat/clock-clksrc.h
+ *
+ * Parts taken from arch/arm/plat-s3c64xx/clock.c
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * Copyright 2009 Ben Dooks <ben-linux@fluff.org>
+ * Copyright 2009 Harald Welte
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/**
+ * struct clksrc_sources - list of sources for a given clock
+ * @sources: array of pointers to clocks
+ * @nr_sources: The size of @sources
+ */
+struct clksrc_sources {
+ unsigned int nr_sources;
+ struct clk **sources;
+};
+
+/**
+ * struct clksrc_reg - register definition for clock control bits
+ * @reg: pointer to the register in virtual memory.
+ * @shift: the shift in bits to where the bitfield is.
+ * @size: the size in bits of the bitfield.
+ *
+ * This specifies the size and position of the bits we are interested
+ * in within the register specified by @reg.
+ */
+struct clksrc_reg {
+ void __iomem *reg;
+ unsigned short shift;
+ unsigned short size;
+};
+
+/**
+ * struct clksrc_clk - class of clock for newer style samsung devices.
+ * @clk: the standard clock representation
+ * @sources: the sources for this clock
+ * @reg_src: the register definition for selecting the clock's source
+ * @reg_div: the register definition for the clock's output divisor
+ *
+ * This clock implements the features required by the newer SoCs where
+ * the standard clock block provides an input mux and a post-mux divisor
+ * to provide the periperhal's clock.
+ *
+ * The array of @sources provides the mapping of mux position to the
+ * clock, and @reg_src shows the code where to modify to change the mux
+ * position. The @reg_div defines how to change the divider settings on
+ * the output.
+ */
+struct clksrc_clk {
+ struct clk clk;
+ struct clksrc_sources *sources;
+
+ struct clksrc_reg reg_src;
+ struct clksrc_reg reg_div;
+};
+
+/**
+ * s3c_set_clksrc() - setup the clock from the register settings
+ * @clk: The clock to setup.
+ * @announce: true to announce the setting to printk().
+ *
+ * Setup the clock from the current register settings, for when the
+ * kernel boots or if it is resuming from a possibly unknown state.
+ */
+extern void s3c_set_clksrc(struct clksrc_clk *clk, bool announce);
+
+/**
+ * s3c_register_clksrc() register clocks from an array of clksrc clocks
+ * @srcs: The array of clocks to register
+ * @size: The size of the @srcs array.
+ *
+ * Initialise and register the array of clocks described by @srcs.
+ */
+extern void s3c_register_clksrc(struct clksrc_clk *srcs, int size);
diff --git a/arch/arm/plat-s3c/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index d86af84b5b8c..22e011497502 100644
--- a/arch/arm/plat-s3c/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -11,6 +11,30 @@
#include <linux/spinlock.h>
+struct clk;
+
+/**
+ * struct clk_ops - standard clock operations
+ * @set_rate: set the clock rate, see clk_set_rate().
+ * @get_rate: get the clock rate, see clk_get_rate().
+ * @round_rate: round a given clock rate, see clk_round_rate().
+ * @set_parent: set the clock's parent, see clk_set_parent().
+ *
+ * Group the common clock implementations together so that we
+ * don't have to keep setting the same fiels again. We leave
+ * enable in struct clk.
+ *
+ * Adding an extra layer of indirection into the process should
+ * not be a problem as it is unlikely these operations are going
+ * to need to be called quickly.
+ */
+struct clk_ops {
+ int (*set_rate)(struct clk *c, unsigned long rate);
+ unsigned long (*get_rate)(struct clk *c);
+ unsigned long (*round_rate)(struct clk *c, unsigned long rate);
+ int (*set_parent)(struct clk *c, struct clk *parent);
+};
+
struct clk {
struct list_head list;
struct module *owner;
@@ -21,11 +45,8 @@ struct clk {
unsigned long rate;
unsigned long ctrlbit;
+ struct clk_ops *ops;
int (*enable)(struct clk *, int enable);
- int (*set_rate)(struct clk *c, unsigned long rate);
- unsigned long (*get_rate)(struct clk *c);
- unsigned long (*round_rate)(struct clk *c, unsigned long rate);
- int (*set_parent)(struct clk *c, struct clk *parent);
};
/* other clocks which may be registered by board support */
@@ -54,6 +75,9 @@ extern struct clk clk_h2;
extern struct clk clk_27m;
extern struct clk clk_48m;
+extern int clk_default_setrate(struct clk *clk, unsigned long rate);
+extern struct clk_ops clk_ops_def_setrate;
+
/* exports for arch/arm/mach-s3c2410
*
* Please DO NOT use these outside of arch/arm/mach-s3c2410
@@ -66,6 +90,8 @@ extern int s3c2410_clkcon_enable(struct clk *clk, int enable);
extern int s3c24xx_register_clock(struct clk *clk);
extern int s3c24xx_register_clocks(struct clk **clk, int nr_clks);
+extern void s3c_register_clocks(struct clk *clk, int nr_clks);
+
extern int s3c24xx_register_baseclocks(unsigned long xtal);
extern void s3c64xx_register_clocks(void);
diff --git a/arch/arm/plat-samsung/include/plat/irq-uart.h b/arch/arm/plat-samsung/include/plat/irq-uart.h
new file mode 100644
index 000000000000..a9331e49bea3
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/irq-uart.h
@@ -0,0 +1,20 @@
+/* arch/arm/plat-samsung/include/plat/irq-uart.h
+ *
+ * Copyright (c) 2010 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Header file for Samsung SoC UART IRQ demux for S3C64XX and later
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+struct s3c_uart_irq {
+ void __iomem *regs;
+ unsigned int base_irq;
+ unsigned int parent_irq;
+};
+
+extern void s3c_init_uart_irqs(struct s3c_uart_irq *irq, unsigned int nr_irqs);
+
diff --git a/arch/arm/plat-samsung/include/plat/irq-vic-timer.h b/arch/arm/plat-samsung/include/plat/irq-vic-timer.h
new file mode 100644
index 000000000000..a90b53431b5b
--- /dev/null
+++ b/arch/arm/plat-samsung/include/plat/irq-vic-timer.h
@@ -0,0 +1,13 @@
+/* arch/arm/plat-samsung/include/plat/irq-vic-timer.h
+ *
+ * Copyright (c) 2010 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * Header file for Samsung SoC IRQ VIC timer
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+extern void s3c_init_vic_timer_irq(unsigned int vic, unsigned int timer);
diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c
new file mode 100644
index 000000000000..4f8c102674ae
--- /dev/null
+++ b/arch/arm/plat-samsung/irq-uart.c
@@ -0,0 +1,143 @@
+/* arch/arm/plat-samsung/irq-uart.c
+ * originally part of arch/arm/plat-s3c64xx/irq.c
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * Samsung- UART Interrupt handling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/serial_core.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <mach/map.h>
+#include <plat/irq-uart.h>
+#include <plat/regs-serial.h>
+#include <plat/cpu.h>
+
+/* Note, we make use of the fact that the parent IRQs, IRQ_UART[0..3]
+ * are consecutive when looking up the interrupt in the demux routines.
+ */
+
+static inline void __iomem *s3c_irq_uart_base(unsigned int irq)
+{
+ struct s3c_uart_irq *uirq = get_irq_chip_data(irq);
+ return uirq->regs;
+}
+
+static inline unsigned int s3c_irq_uart_bit(unsigned int irq)
+{
+ return irq & 3;
+}
+
+static void s3c_irq_uart_mask(unsigned int irq)
+{
+ void __iomem *regs = s3c_irq_uart_base(irq);
+ unsigned int bit = s3c_irq_uart_bit(irq);
+ u32 reg;
+
+ reg = __raw_readl(regs + S3C64XX_UINTM);
+ reg |= (1 << bit);
+ __raw_writel(reg, regs + S3C64XX_UINTM);
+}
+
+static void s3c_irq_uart_maskack(unsigned int irq)
+{
+ void __iomem *regs = s3c_irq_uart_base(irq);
+ unsigned int bit = s3c_irq_uart_bit(irq);
+ u32 reg;
+
+ reg = __raw_readl(regs + S3C64XX_UINTM);
+ reg |= (1 << bit);
+ __raw_writel(reg, regs + S3C64XX_UINTM);
+ __raw_writel(1 << bit, regs + S3C64XX_UINTP);
+}
+
+static void s3c_irq_uart_unmask(unsigned int irq)
+{
+ void __iomem *regs = s3c_irq_uart_base(irq);
+ unsigned int bit = s3c_irq_uart_bit(irq);
+ u32 reg;
+
+ reg = __raw_readl(regs + S3C64XX_UINTM);
+ reg &= ~(1 << bit);
+ __raw_writel(reg, regs + S3C64XX_UINTM);
+}
+
+static void s3c_irq_uart_ack(unsigned int irq)
+{
+ void __iomem *regs = s3c_irq_uart_base(irq);
+ unsigned int bit = s3c_irq_uart_bit(irq);
+
+ __raw_writel(1 << bit, regs + S3C64XX_UINTP);
+}
+
+static void s3c_irq_demux_uart(unsigned int irq, struct irq_desc *desc)
+{
+ struct s3c_uart_irq *uirq = desc->handler_data;
+ u32 pend = __raw_readl(uirq->regs + S3C64XX_UINTP);
+ int base = uirq->base_irq;
+
+ if (pend & (1 << 0))
+ generic_handle_irq(base);
+ if (pend & (1 << 1))
+ generic_handle_irq(base + 1);
+ if (pend & (1 << 2))
+ generic_handle_irq(base + 2);
+ if (pend & (1 << 3))
+ generic_handle_irq(base + 3);
+}
+
+static struct irq_chip s3c_irq_uart = {
+ .name = "s3c-uart",
+ .mask = s3c_irq_uart_mask,
+ .unmask = s3c_irq_uart_unmask,
+ .mask_ack = s3c_irq_uart_maskack,
+ .ack = s3c_irq_uart_ack,
+};
+
+static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
+{
+ struct irq_desc *desc = irq_to_desc(uirq->parent_irq);
+ void __iomem *reg_base = uirq->regs;
+ unsigned int irq;
+ int offs;
+
+ /* mask all interrupts at the start. */
+ __raw_writel(0xf, reg_base + S3C64XX_UINTM);
+
+ for (offs = 0; offs < 3; offs++) {
+ irq = uirq->base_irq + offs;
+
+ set_irq_chip(irq, &s3c_irq_uart);
+ set_irq_chip_data(irq, uirq);
+ set_irq_handler(irq, handle_level_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+
+ desc->handler_data = uirq;
+ set_irq_chained_handler(uirq->parent_irq, s3c_irq_demux_uart);
+}
+
+/**
+ * s3c_init_uart_irqs() - initialise UART IRQs and the necessary demuxing
+ * @irq: The interrupt data for registering
+ * @nr_irqs: The number of interrupt descriptions in @irq.
+ *
+ * Register the UART interrupts specified by @irq including the demuxing
+ * routines. This supports the S3C6400 and newer style of devices.
+ */
+void __init s3c_init_uart_irqs(struct s3c_uart_irq *irq, unsigned int nr_irqs)
+{
+ for (; nr_irqs > 0; nr_irqs--, irq++)
+ s3c_init_uart_irq(irq);
+}
diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c
new file mode 100644
index 000000000000..0270519fcabc
--- /dev/null
+++ b/arch/arm/plat-samsung/irq-vic-timer.c
@@ -0,0 +1,86 @@
+/* arch/arm/plat-samsung/irq-vic-timer.c
+ * originally part of arch/arm/plat-s3c64xx/irq.c
+ *
+ * Copyright 2008 Openmoko, Inc.
+ * Copyright 2008 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ * http://armlinux.simtec.co.uk/
+ *
+ * S3C64XX - Interrupt handling
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+
+#include <mach/map.h>
+#include <plat/irq-vic-timer.h>
+#include <plat/regs-timer.h>
+
+static void s3c_irq_demux_vic_timer(unsigned int irq, struct irq_desc *desc)
+{
+ generic_handle_irq((int)desc->handler_data);
+}
+
+/* We assume the IRQ_TIMER0..IRQ_TIMER4 range is continuous. */
+
+static void s3c_irq_timer_mask(unsigned int irq)
+{
+ u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
+
+ reg &= 0x1f; /* mask out pending interrupts */
+ reg &= ~(1 << (irq - IRQ_TIMER0));
+ __raw_writel(reg, S3C64XX_TINT_CSTAT);
+}
+
+static void s3c_irq_timer_unmask(unsigned int irq)
+{
+ u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
+
+ reg &= 0x1f; /* mask out pending interrupts */
+ reg |= 1 << (irq - IRQ_TIMER0);
+ __raw_writel(reg, S3C64XX_TINT_CSTAT);
+}
+
+static void s3c_irq_timer_ack(unsigned int irq)
+{
+ u32 reg = __raw_readl(S3C64XX_TINT_CSTAT);
+
+ reg &= 0x1f;
+ reg |= (1 << 5) << (irq - IRQ_TIMER0);
+ __raw_writel(reg, S3C64XX_TINT_CSTAT);
+}
+
+static struct irq_chip s3c_irq_timer = {
+ .name = "s3c-timer",
+ .mask = s3c_irq_timer_mask,
+ .unmask = s3c_irq_timer_unmask,
+ .ack = s3c_irq_timer_ack,
+};
+
+/**
+ * s3c_init_vic_timer_irq() - initialise timer irq chanined off VIC.\
+ * @parent_irq: The parent IRQ on the VIC for the timer.
+ * @timer_irq: The IRQ to be used for the timer.
+ *
+ * Register the necessary IRQ chaining and support for the timer IRQs
+ * chained of the VIC.
+ */
+void __init s3c_init_vic_timer_irq(unsigned int parent_irq,
+ unsigned int timer_irq)
+{
+ struct irq_desc *desc = irq_to_desc(parent_irq);
+
+ set_irq_chained_handler(parent_irq, s3c_irq_demux_vic_timer);
+
+ set_irq_chip(timer_irq, &s3c_irq_timer);
+ set_irq_handler(timer_irq, handle_level_irq);
+ set_irq_flags(timer_irq, IRQF_VALID);
+
+ desc->handler_data = (void *)timer_irq;
+}
diff --git a/arch/arm/plat-s3c/pwm-clock.c b/arch/arm/plat-samsung/pwm-clock.c
index a318215ab535..46c9381e083b 100644
--- a/arch/arm/plat-s3c/pwm-clock.c
+++ b/arch/arm/plat-samsung/pwm-clock.c
@@ -130,20 +130,22 @@ static int clk_pwm_scaler_set_rate(struct clk *clk, unsigned long rate)
return 0;
}
+static struct clk_ops clk_pwm_scaler_ops = {
+ .get_rate = clk_pwm_scaler_get_rate,
+ .set_rate = clk_pwm_scaler_set_rate,
+ .round_rate = clk_pwm_scaler_round_rate,
+};
+
static struct clk clk_timer_scaler[] = {
[0] = {
.name = "pwm-scaler0",
.id = -1,
- .get_rate = clk_pwm_scaler_get_rate,
- .set_rate = clk_pwm_scaler_set_rate,
- .round_rate = clk_pwm_scaler_round_rate,
+ .ops = &clk_pwm_scaler_ops,
},
[1] = {
.name = "pwm-scaler1",
.id = -1,
- .get_rate = clk_pwm_scaler_get_rate,
- .set_rate = clk_pwm_scaler_set_rate,
- .round_rate = clk_pwm_scaler_round_rate,
+ .ops = &clk_pwm_scaler_ops,
},
};
@@ -256,50 +258,46 @@ static int clk_pwm_tdiv_set_rate(struct clk *clk, unsigned long rate)
return 0;
}
+static struct clk_ops clk_tdiv_ops = {
+ .get_rate = clk_pwm_tdiv_get_rate,
+ .set_rate = clk_pwm_tdiv_set_rate,
+ .round_rate = clk_pwm_tdiv_round_rate,
+};
+
static struct pwm_tdiv_clk clk_timer_tdiv[] = {
[0] = {
.clk = {
- .name = "pwm-tdiv",
- .parent = &clk_timer_scaler[0],
- .get_rate = clk_pwm_tdiv_get_rate,
- .set_rate = clk_pwm_tdiv_set_rate,
- .round_rate = clk_pwm_tdiv_round_rate,
+ .name = "pwm-tdiv",
+ .ops = &clk_tdiv_ops,
+ .parent = &clk_timer_scaler[0],
},
},
[1] = {
.clk = {
- .name = "pwm-tdiv",
- .parent = &clk_timer_scaler[0],
- .get_rate = clk_pwm_tdiv_get_rate,
- .set_rate = clk_pwm_tdiv_set_rate,
- .round_rate = clk_pwm_tdiv_round_rate,
+ .name = "pwm-tdiv",
+ .ops = &clk_tdiv_ops,
+ .parent = &clk_timer_scaler[0],
}
},
[2] = {
.clk = {
- .name = "pwm-tdiv",
- .parent = &clk_timer_scaler[1],
- .get_rate = clk_pwm_tdiv_get_rate,
- .set_rate = clk_pwm_tdiv_set_rate,
- .round_rate = clk_pwm_tdiv_round_rate,
+ .name = "pwm-tdiv",
+ .ops = &clk_tdiv_ops,
+ .parent = &clk_timer_scaler[1],
},
},
[3] = {
.clk = {
- .name = "pwm-tdiv",
- .parent = &clk_timer_scaler[1],
- .get_rate = clk_pwm_tdiv_get_rate,
- .set_rate = clk_pwm_tdiv_set_rate,
- .round_rate = clk_pwm_tdiv_round_rate,
+ .name = "pwm-tdiv",
+ .ops = &clk_tdiv_ops,
+ .parent = &clk_timer_scaler[1],
},
},
[4] = {
.clk = {
- .name = "pwm-tdiv",
- .parent = &clk_timer_scaler[1],
- .get_rate = clk_pwm_tdiv_get_rate,
- .set_rate = clk_pwm_tdiv_set_rate,
- .round_rate = clk_pwm_tdiv_round_rate,
+ .name = "pwm-tdiv",
+ .ops = &clk_tdiv_ops,
+ .parent = &clk_timer_scaler[1],
},
},
};
@@ -356,31 +354,35 @@ static int clk_pwm_tin_set_parent(struct clk *clk, struct clk *parent)
return 0;
}
+static struct clk_ops clk_tin_ops = {
+ .set_parent = clk_pwm_tin_set_parent,
+};
+
static struct clk clk_tin[] = {
[0] = {
- .name = "pwm-tin",
- .id = 0,
- .set_parent = clk_pwm_tin_set_parent,
+ .name = "pwm-tin",
+ .id = 0,
+ .ops = &clk_tin_ops,
},
[1] = {
- .name = "pwm-tin",
- .id = 1,
- .set_parent = clk_pwm_tin_set_parent,
+ .name = "pwm-tin",
+ .id = 1,
+ .ops = &clk_tin_ops,
},
[2] = {
- .name = "pwm-tin",
- .id = 2,
- .set_parent = clk_pwm_tin_set_parent,
+ .name = "pwm-tin",
+ .id = 2,
+ .ops = &clk_tin_ops,
},
[3] = {
- .name = "pwm-tin",
- .id = 3,
- .set_parent = clk_pwm_tin_set_parent,
+ .name = "pwm-tin",
+ .id = 3,
+ .ops = &clk_tin_ops,
},
[4] = {
- .name = "pwm-tin",
- .id = 4,
- .set_parent = clk_pwm_tin_set_parent,
+ .name = "pwm-tin",
+ .id = 4,
+ .ops = &clk_tin_ops,
},
};
@@ -428,25 +430,15 @@ __init void s3c_pwmclk_init(void)
return;
}
- for (clk = 0; clk < ARRAY_SIZE(clk_timer_scaler); clk++) {
+ for (clk = 0; clk < ARRAY_SIZE(clk_timer_scaler); clk++)
clk_timer_scaler[clk].parent = clk_timers;
- ret = s3c24xx_register_clock(&clk_timer_scaler[clk]);
- if (ret < 0) {
- printk(KERN_ERR "error adding pwm scaler%d clock\n", clk);
- return;
- }
- }
- for (clk = 0; clk < ARRAY_SIZE(clk_timer_tclk); clk++) {
- ret = s3c24xx_register_clock(&clk_timer_tclk[clk]);
- if (ret < 0) {
- printk(KERN_ERR "error adding pww tclk%d\n", clk);
- return;
- }
- }
+ s3c_register_clocks(clk_timer_scaler, ARRAY_SIZE(clk_timer_scaler));
+ s3c_register_clocks(clk_timer_tclk, ARRAY_SIZE(clk_timer_tclk));
for (clk = 0; clk < ARRAY_SIZE(clk_timer_tdiv); clk++) {
ret = clk_pwm_tdiv_register(clk);
+
if (ret < 0) {
printk(KERN_ERR "error adding pwm%d tdiv clock\n", clk);
return;
diff --git a/arch/arm/plat-stmp3xxx/clock.c b/arch/arm/plat-stmp3xxx/clock.c
index 5d2f19a09e44..e593a2a801c6 100644
--- a/arch/arm/plat-stmp3xxx/clock.c
+++ b/arch/arm/plat-stmp3xxx/clock.c
@@ -1126,9 +1126,8 @@ static int __init clk_init(void)
if (ops && ops->set_parent)
ops->set_parent(cl->clk, cl->clk->parent);
}
-
- clkdev_add(cl);
}
+ clkdev_add_table(onchip_clks, ARRAY_SIZE(onchip_clks));
return 0;
}
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index 1aa1ea5e9212..b13d1879e51b 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1325,7 +1325,7 @@ struct platform_device *__init
at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
{
struct platform_device *pdev;
- struct mci_dma_slave *slave;
+ struct mci_dma_data *slave;
u32 pioa_mask;
u32 piob_mask;
@@ -1344,7 +1344,9 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
ARRAY_SIZE(atmel_mci0_resource)))
goto fail;
- slave = kzalloc(sizeof(struct mci_dma_slave), GFP_KERNEL);
+ slave = kzalloc(sizeof(struct mci_dma_data), GFP_KERNEL);
+ if (!slave)
+ goto fail;
slave->sdata.dma_dev = &dw_dmac0_device.dev;
slave->sdata.reg_width = DW_DMA_SLAVE_WIDTH_32BIT;
@@ -1357,7 +1359,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
if (platform_device_add_data(pdev, data,
sizeof(struct mci_platform_data)))
- goto fail;
+ goto fail_free;
/* CLK line is common to both slots */
pioa_mask = 1 << 10;
@@ -1381,7 +1383,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
/* Slot is unused */
break;
default:
- goto fail;
+ goto fail_free;
}
select_peripheral(PIOA, pioa_mask, PERIPH_A, 0);
@@ -1408,7 +1410,7 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
break;
default:
if (!data->slot[0].bus_width)
- goto fail;
+ goto fail_free;
data->slot[1].bus_width = 0;
break;
@@ -1419,9 +1421,10 @@ at32_add_device_mci(unsigned int id, struct mci_platform_data *data)
platform_device_add(pdev);
return pdev;
+fail_free:
+ kfree(slave);
fail:
data->dma_slave = NULL;
- kfree(slave);
platform_device_put(pdev);
return NULL;
}
diff --git a/arch/blackfin/include/asm/nand.h b/arch/blackfin/include/asm/nand.h
index 3ae8b569edfc..3a1e79dfc8d9 100644
--- a/arch/blackfin/include/asm/nand.h
+++ b/arch/blackfin/include/asm/nand.h
@@ -1,5 +1,5 @@
/*
- * BF5XX - NAND flash controller platfrom_device info
+ * BF5XX - NAND flash controller platform_device info
*
* Copyright 2007-2008 Analog Devices, Inc.
*
@@ -8,7 +8,7 @@
/* struct bf5xx_nand_platform
*
- * define a interface between platfrom board specific code and
+ * define a interface between platform board specific code and
* bf54x NFC driver.
*
* nr_partitions = number of partitions pointed to be partitoons (or zero)
diff --git a/arch/blackfin/include/asm/page.h b/arch/blackfin/include/asm/page.h
index 944a07c6cfd6..1d04e4078340 100644
--- a/arch/blackfin/include/asm/page.h
+++ b/arch/blackfin/include/asm/page.h
@@ -10,4 +10,9 @@
#include <asm-generic/page.h>
#define MAP_NR(addr) (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT)
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
#endif
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index f1036b6b9293..8cb6204ddcb8 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -6,23 +6,9 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
#include <linux/ptrace.h> /* for linux pt_regs struct */
#include <linux/kgdb.h>
-#include <linux/console.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/irq.h>
#include <linux/uaccess.h>
-#include <asm/system.h>
-#include <asm/traps.h>
-#include <asm/blackfin.h>
-#include <asm/dma.h>
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
@@ -147,7 +133,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
regs->lb1 = gdb_regs[BFIN_LB1];
regs->usp = gdb_regs[BFIN_USP];
regs->syscfg = gdb_regs[BFIN_SYSCFG];
- regs->retx = gdb_regs[BFIN_PC];
+ regs->retx = gdb_regs[BFIN_RETX];
regs->retn = gdb_regs[BFIN_RETN];
regs->rete = gdb_regs[BFIN_RETE];
regs->pc = gdb_regs[BFIN_PC];
@@ -424,182 +410,6 @@ struct kgdb_arch arch_kgdb_ops = {
.correct_hw_break = bfin_correct_hw_break,
};
-static int hex(char ch)
-{
- if ((ch >= 'a') && (ch <= 'f'))
- return ch - 'a' + 10;
- if ((ch >= '0') && (ch <= '9'))
- return ch - '0';
- if ((ch >= 'A') && (ch <= 'F'))
- return ch - 'A' + 10;
- return -1;
-}
-
-static int validate_memory_access_address(unsigned long addr, int size)
-{
- if (size < 0 || addr == 0)
- return -EFAULT;
- return bfin_mem_access_type(addr, size);
-}
-
-static int bfin_probe_kernel_read(char *dst, char *src, int size)
-{
- unsigned long lsrc = (unsigned long)src;
- int mem_type;
-
- mem_type = validate_memory_access_address(lsrc, size);
- if (mem_type < 0)
- return mem_type;
-
- if (lsrc >= SYSMMR_BASE) {
- if (size == 2 && lsrc % 2 == 0) {
- u16 mmr = bfin_read16(src);
- memcpy(dst, &mmr, sizeof(mmr));
- return 0;
- } else if (size == 4 && lsrc % 4 == 0) {
- u32 mmr = bfin_read32(src);
- memcpy(dst, &mmr, sizeof(mmr));
- return 0;
- }
- } else {
- switch (mem_type) {
- case BFIN_MEM_ACCESS_CORE:
- case BFIN_MEM_ACCESS_CORE_ONLY:
- return probe_kernel_read(dst, src, size);
- /* XXX: should support IDMA here with SMP */
- case BFIN_MEM_ACCESS_DMA:
- if (dma_memcpy(dst, src, size))
- return 0;
- break;
- case BFIN_MEM_ACCESS_ITEST:
- if (isram_memcpy(dst, src, size))
- return 0;
- break;
- }
- }
-
- return -EFAULT;
-}
-
-static int bfin_probe_kernel_write(char *dst, char *src, int size)
-{
- unsigned long ldst = (unsigned long)dst;
- int mem_type;
-
- mem_type = validate_memory_access_address(ldst, size);
- if (mem_type < 0)
- return mem_type;
-
- if (ldst >= SYSMMR_BASE) {
- if (size == 2 && ldst % 2 == 0) {
- u16 mmr;
- memcpy(&mmr, src, sizeof(mmr));
- bfin_write16(dst, mmr);
- return 0;
- } else if (size == 4 && ldst % 4 == 0) {
- u32 mmr;
- memcpy(&mmr, src, sizeof(mmr));
- bfin_write32(dst, mmr);
- return 0;
- }
- } else {
- switch (mem_type) {
- case BFIN_MEM_ACCESS_CORE:
- case BFIN_MEM_ACCESS_CORE_ONLY:
- return probe_kernel_write(dst, src, size);
- /* XXX: should support IDMA here with SMP */
- case BFIN_MEM_ACCESS_DMA:
- if (dma_memcpy(dst, src, size))
- return 0;
- break;
- case BFIN_MEM_ACCESS_ITEST:
- if (isram_memcpy(dst, src, size))
- return 0;
- break;
- }
- }
-
- return -EFAULT;
-}
-
-/*
- * Convert the memory pointed to by mem into hex, placing result in buf.
- * Return a pointer to the last char put in buf (null). May return an error.
- */
-int kgdb_mem2hex(char *mem, char *buf, int count)
-{
- char *tmp;
- int err;
-
- /*
- * We use the upper half of buf as an intermediate buffer for the
- * raw memory copy. Hex conversion will work against this one.
- */
- tmp = buf + count;
-
- err = bfin_probe_kernel_read(tmp, mem, count);
- if (!err) {
- while (count > 0) {
- buf = pack_hex_byte(buf, *tmp);
- tmp++;
- count--;
- }
-
- *buf = 0;
- }
-
- return err;
-}
-
-/*
- * Copy the binary array pointed to by buf into mem. Fix $, #, and
- * 0x7d escaped with 0x7d. Return a pointer to the character after
- * the last byte written.
- */
-int kgdb_ebin2mem(char *buf, char *mem, int count)
-{
- char *tmp_old, *tmp_new;
- int size;
-
- tmp_old = tmp_new = buf;
-
- for (size = 0; size < count; ++size) {
- if (*tmp_old == 0x7d)
- *tmp_new = *(++tmp_old) ^ 0x20;
- else
- *tmp_new = *tmp_old;
- tmp_new++;
- tmp_old++;
- }
-
- return bfin_probe_kernel_write(mem, buf, count);
-}
-
-/*
- * Convert the hex array pointed to by buf into binary to be placed in mem.
- * Return a pointer to the character AFTER the last byte written.
- * May return an error.
- */
-int kgdb_hex2mem(char *buf, char *mem, int count)
-{
- char *tmp_raw, *tmp_hex;
-
- /*
- * We use the upper half of buf as an intermediate buffer for the
- * raw memory that is converted from hex.
- */
- tmp_raw = buf + count * 2;
-
- tmp_hex = tmp_raw - 1;
- while (tmp_hex >= buf) {
- tmp_raw--;
- *tmp_raw = hex(*tmp_hex--);
- *tmp_raw |= hex(*tmp_hex--) << 4;
- }
-
- return bfin_probe_kernel_write(mem, tmp_raw, count);
-}
-
#define IN_MEM(addr, size, l1_addr, l1_size) \
({ \
unsigned long __addr = (unsigned long)(addr); \
@@ -629,19 +439,9 @@ int kgdb_validate_break_address(unsigned long addr)
return -EFAULT;
}
-int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
-{
- int err = bfin_probe_kernel_read(saved_instr, (char *)addr,
- BREAK_INSTR_SIZE);
- if (err)
- return err;
- return bfin_probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE);
-}
-
-int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
- return bfin_probe_kernel_write((char *)addr, bundle, BREAK_INSTR_SIZE);
+ regs->retx = ip;
}
int kgdb_arch_init(void)
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index d3cbcd6bd985..870d74b1b407 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -712,7 +712,7 @@ static void decode_instruction(unsigned short *address)
verbose_printk("RTE");
else if (opcode == 0x0025)
verbose_printk("EMUEXCPT");
- else if (opcode == 0x0040 && opcode <= 0x0047)
+ else if (opcode >= 0x0040 && opcode <= 0x0047)
verbose_printk("STI R%i", opcode & 7);
else if (opcode >= 0x0050 && opcode <= 0x0057)
verbose_printk("JUMP (P%i)", opcode & 7);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index b0ed0b487ff2..01b2f58dfb95 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -816,8 +816,8 @@ ENDPROC(_resume)
ENTRY(_ret_from_exception)
#ifdef CONFIG_IPIPE
- p2.l = _per_cpu__ipipe_percpu_domain;
- p2.h = _per_cpu__ipipe_percpu_domain;
+ p2.l = _ipipe_percpu_domain;
+ p2.h = _ipipe_percpu_domain;
r0.l = _ipipe_root;
r0.h = _ipipe_root;
r2 = [p2];
diff --git a/arch/blackfin/mm/Makefile b/arch/blackfin/mm/Makefile
index d489f894f4b1..4c011b1f661f 100644
--- a/arch/blackfin/mm/Makefile
+++ b/arch/blackfin/mm/Makefile
@@ -2,4 +2,4 @@
# arch/blackfin/mm/Makefile
#
-obj-y := sram-alloc.o isram-driver.o init.o
+obj-y := sram-alloc.o isram-driver.o init.o maccess.o
diff --git a/arch/blackfin/mm/maccess.c b/arch/blackfin/mm/maccess.c
new file mode 100644
index 000000000000..b71cebc1f8a3
--- /dev/null
+++ b/arch/blackfin/mm/maccess.c
@@ -0,0 +1,97 @@
+/*
+ * safe read and write memory routines callable while atomic
+ *
+ * Copyright 2005-2008 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/uaccess.h>
+#include <asm/dma.h>
+
+static int validate_memory_access_address(unsigned long addr, int size)
+{
+ if (size < 0 || addr == 0)
+ return -EFAULT;
+ return bfin_mem_access_type(addr, size);
+}
+
+long probe_kernel_read(void *dst, void *src, size_t size)
+{
+ unsigned long lsrc = (unsigned long)src;
+ int mem_type;
+
+ mem_type = validate_memory_access_address(lsrc, size);
+ if (mem_type < 0)
+ return mem_type;
+
+ if (lsrc >= SYSMMR_BASE) {
+ if (size == 2 && lsrc % 2 == 0) {
+ u16 mmr = bfin_read16(src);
+ memcpy(dst, &mmr, sizeof(mmr));
+ return 0;
+ } else if (size == 4 && lsrc % 4 == 0) {
+ u32 mmr = bfin_read32(src);
+ memcpy(dst, &mmr, sizeof(mmr));
+ return 0;
+ }
+ } else {
+ switch (mem_type) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ return __probe_kernel_read(dst, src, size);
+ /* XXX: should support IDMA here with SMP */
+ case BFIN_MEM_ACCESS_DMA:
+ if (dma_memcpy(dst, src, size))
+ return 0;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(dst, src, size))
+ return 0;
+ break;
+ }
+ }
+
+ return -EFAULT;
+}
+
+long probe_kernel_write(void *dst, void *src, size_t size)
+{
+ unsigned long ldst = (unsigned long)dst;
+ int mem_type;
+
+ mem_type = validate_memory_access_address(ldst, size);
+ if (mem_type < 0)
+ return mem_type;
+
+ if (ldst >= SYSMMR_BASE) {
+ if (size == 2 && ldst % 2 == 0) {
+ u16 mmr;
+ memcpy(&mmr, src, sizeof(mmr));
+ bfin_write16(dst, mmr);
+ return 0;
+ } else if (size == 4 && ldst % 4 == 0) {
+ u32 mmr;
+ memcpy(&mmr, src, sizeof(mmr));
+ bfin_write32(dst, mmr);
+ return 0;
+ }
+ } else {
+ switch (mem_type) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ return __probe_kernel_write(dst, src, size);
+ /* XXX: should support IDMA here with SMP */
+ case BFIN_MEM_ACCESS_DMA:
+ if (dma_memcpy(dst, src, size))
+ return 0;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(dst, src, size))
+ return 0;
+ break;
+ }
+ }
+
+ return -EFAULT;
+}
diff --git a/arch/cris/arch-v10/kernel/entry.S b/arch/cris/arch-v10/kernel/entry.S
index 2c18d08cd913..c52bef39e250 100644
--- a/arch/cris/arch-v10/kernel/entry.S
+++ b/arch/cris/arch-v10/kernel/entry.S
@@ -358,7 +358,7 @@ mmu_bus_fault:
1: btstq 12, $r1 ; Refill?
bpl 2f
lsrq 24, $r1 ; Get PGD index (bit 24-31)
- move.d [per_cpu__current_pgd], $r0 ; PGD for the current process
+ move.d [current_pgd], $r0 ; PGD for the current process
move.d [$r0+$r1.d], $r0 ; Get PMD
beq 2f
nop
diff --git a/arch/cris/arch-v10/kernel/irq.c b/arch/cris/arch-v10/kernel/irq.c
index 5d75f77f9c73..1a61efc13982 100644
--- a/arch/cris/arch-v10/kernel/irq.c
+++ b/arch/cris/arch-v10/kernel/irq.c
@@ -133,7 +133,7 @@ static void end_crisv10_irq(unsigned int irq)
}
static struct irq_chip crisv10_irq_type = {
- .typename = "CRISv10",
+ .name = "CRISv10",
.startup = startup_crisv10_irq,
.shutdown = shutdown_crisv10_irq,
.enable = enable_crisv10_irq,
diff --git a/arch/cris/arch-v32/kernel/irq.c b/arch/cris/arch-v32/kernel/irq.c
index 57668db25031..b6241198fb98 100644
--- a/arch/cris/arch-v32/kernel/irq.c
+++ b/arch/cris/arch-v32/kernel/irq.c
@@ -336,7 +336,7 @@ int set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
}
static struct irq_chip crisv32_irq_type = {
- .typename = "CRISv32",
+ .name = "CRISv32",
.startup = startup_crisv32_irq,
.shutdown = shutdown_crisv32_irq,
.enable = enable_crisv32_irq,
diff --git a/arch/cris/arch-v32/kernel/pinmux.c b/arch/cris/arch-v32/kernel/pinmux.c
index 6eb54ea1c976..f6f3637a4194 100644
--- a/arch/cris/arch-v32/kernel/pinmux.c
+++ b/arch/cris/arch-v32/kernel/pinmux.c
@@ -54,7 +54,7 @@ crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
crisv32_pinmux_init();
- if (port > PORTS)
+ if (port > PORTS || port < 0)
return -EINVAL;
spin_lock_irqsave(&pinmux_lock, flags);
@@ -197,7 +197,7 @@ crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
crisv32_pinmux_init();
- if (port > PORTS)
+ if (port > PORTS || port < 0)
return -EINVAL;
spin_lock_irqsave(&pinmux_lock, flags);
diff --git a/arch/cris/arch-v32/mach-a3/pinmux.c b/arch/cris/arch-v32/mach-a3/pinmux.c
index 0a28c9bedfb7..18648ef2d874 100644
--- a/arch/cris/arch-v32/mach-a3/pinmux.c
+++ b/arch/cris/arch-v32/mach-a3/pinmux.c
@@ -242,7 +242,7 @@ crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
crisv32_pinmux_init();
- if (port > PORTS)
+ if (port > PORTS || port < 0)
return -EINVAL;
spin_lock_irqsave(&pinmux_lock, flags);
diff --git a/arch/cris/arch-v32/mach-fs/pinmux.c b/arch/cris/arch-v32/mach-fs/pinmux.c
index d722ad9ae626..38f29eec14a6 100644
--- a/arch/cris/arch-v32/mach-fs/pinmux.c
+++ b/arch/cris/arch-v32/mach-fs/pinmux.c
@@ -54,7 +54,7 @@ crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
crisv32_pinmux_init();
- if (port > PORTS)
+ if (port > PORTS || port < 0)
return -EINVAL;
spin_lock_irqsave(&pinmux_lock, flags);
@@ -195,7 +195,7 @@ int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
crisv32_pinmux_init();
- if (port > PORTS)
+ if (port > PORTS || port < 0)
return -EINVAL;
spin_lock_irqsave(&pinmux_lock, flags);
diff --git a/arch/cris/arch-v32/mm/mmu.S b/arch/cris/arch-v32/mm/mmu.S
index 2238d154bde3..f125d912e140 100644
--- a/arch/cris/arch-v32/mm/mmu.S
+++ b/arch/cris/arch-v32/mm/mmu.S
@@ -115,7 +115,7 @@
#ifdef CONFIG_SMP
move $s7, $acr ; PGD
#else
- move.d per_cpu__current_pgd, $acr ; PGD
+ move.d current_pgd, $acr ; PGD
#endif
; Look up PMD in PGD
lsrq 24, $r0 ; Get PMD index into PGD (bit 24-31)
diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c
index b5ce0724a88f..6d7b9eda4036 100644
--- a/arch/cris/kernel/irq.c
+++ b/arch/cris/kernel/irq.c
@@ -63,7 +63,7 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
#endif
- seq_printf(p, " %14s", irq_desc[i].chip->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
diff --git a/arch/frv/include/asm/page.h b/arch/frv/include/asm/page.h
index 25c6a5002355..8c97068ac8fc 100644
--- a/arch/frv/include/asm/page.h
+++ b/arch/frv/include/asm/page.h
@@ -63,12 +63,10 @@ extern unsigned long max_pfn;
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
-#ifdef CONFIG_MMU
#define VM_DATA_DEFAULT_FLAGS \
(VM_READ | VM_WRITE | \
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h
index d20db3c2a656..fbd1a2470cae 100644
--- a/arch/ia64/include/asm/ftrace.h
+++ b/arch/ia64/include/asm/ftrace.h
@@ -8,7 +8,6 @@
extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
#define mcount _mcount
-#include <asm/kprobes.h>
/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h
index dbf83fb28db3..d5505d6f2382 100644
--- a/arch/ia64/include/asm/kprobes.h
+++ b/arch/ia64/include/asm/kprobes.h
@@ -103,11 +103,6 @@ typedef struct kprobe_opcode {
bundle_t bundle;
} kprobe_opcode_t;
-struct fnptr {
- unsigned long ip;
- unsigned long gp;
-};
-
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
/* copy of the instruction to be emulated */
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 30cf46534dd2..f7c00a5e0e2b 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -9,7 +9,7 @@
#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
#ifdef __ASSEMBLY__
-# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */
+# define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */
#else /* !__ASSEMBLY__ */
@@ -39,7 +39,7 @@ extern void *per_cpu_init(void);
* On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
* more efficient.
*/
-#define __ia64_per_cpu_var(var) per_cpu__##var
+#define __ia64_per_cpu_var(var) var
#include <asm-generic/percpu.h>
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 85d965cb19a0..23cce999eb1c 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -74,7 +74,7 @@ struct ia64_tr_entry {
extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
extern void ia64_ptr_entry(u64 target_mask, int slot);
-extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
/*
region register macros
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 3ddb4e709dba..d323071d0f91 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,9 @@
/*
* Returns a bitmask of CPUs on Node 'node'.
*/
-#define cpumask_of_node(node) (&node_to_cpu_mask[node])
+#define cpumask_of_node(node) ((node) == -1 ? \
+ cpu_all_mask : \
+ &node_to_cpu_mask[node])
/*
* Returns the number of the node containing Node 'nid'.
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
index bcd260e597de..93773fd37be0 100644
--- a/arch/ia64/include/asm/types.h
+++ b/arch/ia64/include/asm/types.h
@@ -35,6 +35,11 @@ typedef unsigned int umode_t;
*/
# ifdef __KERNEL__
+struct fnptr {
+ unsigned long ip;
+ unsigned long gp;
+};
+
/* DMA addresses are 64-bits wide, in general. */
typedef u64 dma_addr_t;
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 461b99902bf6..7f4a0ed24152 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -30,9 +30,9 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
#endif
#include <asm/processor.h>
-EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
+EXPORT_SYMBOL(ia64_cpu_info);
#ifdef CONFIG_SMP
-EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
+EXPORT_SYMBOL(local_per_cpu_offset);
#endif
#include <asm/uaccess.h>
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 32f2639e9b0a..378b4833024f 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1225,9 +1225,12 @@ static void mca_insert_tr(u64 iord)
unsigned long psr;
int cpu = smp_processor_id();
+ if (!ia64_idtrs[cpu])
+ return;
+
psr = ia64_clear_ic();
for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
- p = &__per_cpu_idtrs[cpu][iord-1][i];
+ p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
if (p->pte & 0x1) {
old_rr = ia64_get_rr(p->ifa);
if (old_rr != p->rr) {
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 5246285a95fb..6bcbe215b9a4 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2293,7 +2293,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
* if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
* return -ENOMEM;
*/
- if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ if (size > task_rlimit(task, RLIMIT_MEMLOCK))
return -ENOMEM;
/*
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index ef3e7be29caf..bf82e47c98bb 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -26,6 +26,7 @@ config KVM
select ANON_INODES
select HAVE_KVM_IRQCHIP
select KVM_APIC_ARCHITECTURE
+ select KVM_MMIO
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 5fdeec5fddcf..e6ac549f8d55 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -241,10 +241,10 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 0;
mmio:
if (p->dir)
- r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr,
+ r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
p->size, &p->data);
else
- r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr,
+ r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
p->size, &p->data);
if (r)
printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
@@ -636,12 +636,9 @@ static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
union context *host_ctx, *guest_ctx;
- int r;
+ int r, idx;
- /*
- * down_read() may sleep and return with interrupts enabled
- */
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
again:
if (signal_pending(current)) {
@@ -663,7 +660,7 @@ again:
if (r < 0)
goto vcpu_run_fail;
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
kvm_guest_enter();
/*
@@ -687,7 +684,7 @@ again:
kvm_guest_exit();
preempt_enable();
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_handle_exit(kvm_run, vcpu);
@@ -697,10 +694,10 @@ again:
}
out:
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (r > 0) {
kvm_resched(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
goto again;
}
@@ -1377,12 +1374,14 @@ static void free_kvm(struct kvm *kvm)
static void kvm_release_vm_pages(struct kvm *kvm)
{
+ struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i, j;
unsigned long base_gfn;
- for (i = 0; i < kvm->nmemslots; i++) {
- memslot = &kvm->memslots[i];
+ slots = rcu_dereference(kvm->memslots);
+ for (i = 0; i < slots->nmemslots; i++) {
+ memslot = &slots->memslots[i];
base_gfn = memslot->base_gfn;
for (j = 0; j < memslot->npages; j++) {
@@ -1576,15 +1575,15 @@ out:
return r;
}
-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
int user_alloc)
{
unsigned long i;
unsigned long pfn;
- int npages = mem->memory_size >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
+ int npages = memslot->npages;
unsigned long base_gfn = memslot->base_gfn;
if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
@@ -1608,6 +1607,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
return 0;
}
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ return;
+}
+
void kvm_arch_flush_shadow(struct kvm *kvm)
{
kvm_flush_remote_tlbs(kvm);
@@ -1802,7 +1809,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
@@ -1827,6 +1834,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot;
int is_dirty = 0;
+ mutex_lock(&kvm->slots_lock);
spin_lock(&kvm->arch.dirty_log_lock);
r = kvm_ia64_sync_dirty_log(kvm, log);
@@ -1840,12 +1848,13 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
kvm_flush_remote_tlbs(kvm);
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
memset(memslot->dirty_bitmap, 0, n);
}
r = 0;
out:
+ mutex_unlock(&kvm->slots_lock);
spin_unlock(&kvm->arch.dirty_log_lock);
return r;
}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 19c4b2195dce..8d586d1e2515 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -459,7 +459,7 @@ static void __init initialize_pernode_data(void)
cpu = 0;
node = node_cpuid[cpu].nid;
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
- ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
+ ((char *)&ia64_cpu_info - __per_cpu_start));
cpu0_cpu_info->node_data = mem_data[node].node_data;
}
#endif /* CONFIG_SMP */
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index b9609c69343a..7c0d4814a68d 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -91,7 +91,7 @@ dma_mark_clean(void *addr, size_t size)
inline void
ia64_set_rbs_bot (void)
{
- unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
+ unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index ee09d261f2e6..f3de9d7a98b4 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -48,7 +48,7 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
-struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
/*
* Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -429,10 +429,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
struct ia64_tr_entry *p;
int cpu = smp_processor_id();
+ if (!ia64_idtrs[cpu]) {
+ ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
+ sizeof (struct ia64_tr_entry), GFP_KERNEL);
+ if (!ia64_idtrs[cpu])
+ return -ENOMEM;
+ }
r = -EINVAL;
/*Check overlap with existing TR entries*/
if (target_mask & 0x1) {
- p = &__per_cpu_idtrs[cpu][0][0];
+ p = ia64_idtrs[cpu];
for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
i++, p++) {
if (p->pte & 0x1)
@@ -444,7 +450,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
}
}
if (target_mask & 0x2) {
- p = &__per_cpu_idtrs[cpu][1][0];
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
i++, p++) {
if (p->pte & 0x1)
@@ -459,16 +465,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
switch (target_mask & 0x3) {
case 1:
- if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1))
+ if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
goto found;
continue;
case 2:
- if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+ if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
goto found;
continue;
case 3:
- if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) &&
- !(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+ if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
+ !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
goto found;
continue;
default:
@@ -488,7 +494,7 @@ found:
if (target_mask & 0x1) {
ia64_itr(0x1, i, va, pte, log_size);
ia64_srlz_i();
- p = &__per_cpu_idtrs[cpu][0][i];
+ p = ia64_idtrs[cpu] + i;
p->ifa = va;
p->pte = pte;
p->itir = log_size << 2;
@@ -497,7 +503,7 @@ found:
if (target_mask & 0x2) {
ia64_itr(0x2, i, va, pte, log_size);
ia64_srlz_i();
- p = &__per_cpu_idtrs[cpu][1][i];
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
p->ifa = va;
p->pte = pte;
p->itir = log_size << 2;
@@ -528,7 +534,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
return;
if (target_mask & 0x1) {
- p = &__per_cpu_idtrs[cpu][0][slot];
+ p = ia64_idtrs[cpu] + slot;
if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
p->pte = 0;
ia64_ptr(0x1, p->ifa, p->itir>>2);
@@ -537,7 +543,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
}
if (target_mask & 0x2) {
- p = &__per_cpu_idtrs[cpu][1][slot];
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
p->pte = 0;
ia64_ptr(0x2, p->ifa, p->itir>>2);
@@ -546,8 +552,8 @@ void ia64_ptr_entry(u64 target_mask, int slot)
}
for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
- if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) ||
- (__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+ if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
+ ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
break;
}
per_cpu(ia64_tr_used, cpu) = i;
diff --git a/arch/m32r/include/asm/local.h b/arch/m32r/include/asm/local.h
index 22256d138630..734bca87018a 100644
--- a/arch/m32r/include/asm/local.h
+++ b/arch/m32r/include/asm/local.h
@@ -338,29 +338,4 @@ static inline void local_set_mask(unsigned long mask, local_t *addr)
* a variable, not an address.
*/
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non local way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* __M32R_LOCAL_H */
diff --git a/arch/m68k/include/asm/m5206sim.h b/arch/m68k/include/asm/m5206sim.h
index 9c384e294af9..97809c696fd3 100644
--- a/arch/m68k/include/asm/m5206sim.h
+++ b/arch/m68k/include/asm/m5206sim.h
@@ -91,6 +91,7 @@
/*
* Define system peripheral IRQ usage.
*/
+#define MCF_IRQ_I2C 29 /* I2C, Level 5 */
#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
@@ -117,6 +118,7 @@
#define MCFSIM_SWDICR MCFSIM_ICR8 /* Watchdog timer ICR */
#define MCFSIM_TIMER1ICR MCFSIM_ICR9 /* Timer 1 ICR */
#define MCFSIM_TIMER2ICR MCFSIM_ICR10 /* Timer 2 ICR */
+#define MCFSIM_I2CICR MCFSIM_ICR11 /* I2C ICR */
#define MCFSIM_UART1ICR MCFSIM_ICR12 /* UART 1 ICR */
#define MCFSIM_UART2ICR MCFSIM_ICR13 /* UART 2 ICR */
#ifdef CONFIG_M5206e
diff --git a/arch/m68k/include/asm/m520xsim.h b/arch/m68k/include/asm/m520xsim.h
index ed2b69b96805..132e49d2d9e2 100644
--- a/arch/m68k/include/asm/m520xsim.h
+++ b/arch/m68k/include/asm/m520xsim.h
@@ -41,6 +41,7 @@
#define MCFINT_UART0 26 /* Interrupt number for UART0 */
#define MCFINT_UART1 27 /* Interrupt number for UART1 */
#define MCFINT_UART2 28 /* Interrupt number for UART2 */
+#define MCFINT_I2C 30 /* Interrupt number for I2C */
#define MCFINT_QSPI 31 /* Interrupt number for QSPI */
#define MCFINT_PIT1 4 /* Interrupt number for PIT1 (PIT0 in processor) */
@@ -54,9 +55,12 @@
#define MCFSIM_SDCS0 0x000a8110 /* SDRAM Chip Select 0 Configuration */
#define MCFSIM_SDCS1 0x000a8114 /* SDRAM Chip Select 1 Configuration */
+#define MCFEPORT_EPPAR 0xFC088000
#define MCFEPORT_EPDDR 0xFC088002
+#define MCFEPORT_EPIER 0xFC088003
#define MCFEPORT_EPDR 0xFC088004
#define MCFEPORT_EPPDR 0xFC088005
+#define MCFEPORT_EPFR 0xFC088006
#define MCFGPIO_PODR_BUSCTL 0xFC0A4000
#define MCFGPIO_PODR_BE 0xFC0A4001
diff --git a/arch/m68k/include/asm/m523xsim.h b/arch/m68k/include/asm/m523xsim.h
index a34894cf8e6f..cb0d5986c0f3 100644
--- a/arch/m68k/include/asm/m523xsim.h
+++ b/arch/m68k/include/asm/m523xsim.h
@@ -30,6 +30,7 @@
#define MCFINT_VECBASE 64 /* Vector base number */
#define MCFINT_UART0 13 /* Interrupt number for UART0 */
#define MCFINT_PIT1 36 /* Interrupt number for PIT1 */
+#define MCFINT_I2C 17 /* Interrupt number for I2C */
#define MCFINT_QSPI 18 /* Interrupt number for QSPI */
/*
@@ -110,9 +111,12 @@
* EPort
*/
+#define MCFEPORT_EPPAR (MCF_IPSBAR + 0x130000)
#define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002)
+#define MCFEPORT_EPIER (MCF_IPSBAR + 0x130003)
#define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004)
#define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005)
+#define MCFEPORT_EPFR (MCF_IPSBAR + 0x130006)
/*
* Generic GPIO support
@@ -127,5 +131,9 @@
#define MCFGPIO_IRQ_MAX 8
#define MCFGPIO_IRQ_VECBASE MCFINT_VECBASE
+/*
+ * Pinmux
+ */
+#define MCFGPIO_PAR_FECI2C 0x100047
/****************************************************************************/
#endif /* m523xsim_h */
diff --git a/arch/m68k/include/asm/m5249sim.h b/arch/m68k/include/asm/m5249sim.h
index 14bce877ed88..d9d3308dfad0 100644
--- a/arch/m68k/include/asm/m5249sim.h
+++ b/arch/m68k/include/asm/m5249sim.h
@@ -63,6 +63,7 @@
#define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */
#define MCFSIM_TIMER1ICR MCFSIM_ICR1 /* Timer 1 ICR */
#define MCFSIM_TIMER2ICR MCFSIM_ICR2 /* Timer 2 ICR */
+#define MCFSIM_I2CICR MCFSIM_ICR3 /* I2C ICR */
#define MCFSIM_UART1ICR MCFSIM_ICR4 /* UART 1 ICR */
#define MCFSIM_UART2ICR MCFSIM_ICR5 /* UART 2 ICR */
#define MCFSIM_DMA0ICR MCFSIM_ICR6 /* DMA 0 ICR */
@@ -73,6 +74,7 @@
/*
* Define system peripheral IRQ usage.
*/
+#define MCF_IRQ_I2C 29 /* I2C, Level 5 */
#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
@@ -106,6 +108,8 @@
#define MCFSIM2_IDECONFIG1 0x18c /* IDEconfig1 */
#define MCFSIM2_IDECONFIG2 0x190 /* IDEconfig2 */
+#define MCFSIM2_INTPRI_62 0x0f000000 /* INT62 in MCFSIM2_LEVEL8 */
+#define MCFSIM2_INTPRI_I2C 0x05000000 /* INT62 (I2C) priority 5 */
/*
* Define the base interrupt for the second interrupt controller.
* We set it to 128, out of the way of the base interrupts, and plenty
@@ -122,12 +126,13 @@
#define MCFINTC2_GPIOIRQ6 (MCFINTC2_VECBASE + 38)
#define MCFINTC2_GPIOIRQ7 (MCFINTC2_VECBASE + 39)
+#define MCFINTC2_I2C (MCFINTC2_VECBASE + 62)
/*
* Generic GPIO support
*/
#define MCFGPIO_PIN_MAX 64
-#define MCFGPIO_IRQ_MAX -1
-#define MCFGPIO_IRQ_VECBASE -1
+#define MCFGPIO_IRQ_MAX MCFINTC2_GPIOIRQ7
+#define MCFGPIO_IRQ_VECBASE MCFINTC2_GPIOIRQ0
/****************************************************************************/
diff --git a/arch/m68k/include/asm/m527xsim.h b/arch/m68k/include/asm/m527xsim.h
index 453356d72d80..8e58fa7c1883 100644
--- a/arch/m68k/include/asm/m527xsim.h
+++ b/arch/m68k/include/asm/m527xsim.h
@@ -31,6 +31,7 @@
#define MCFINT_UART0 13 /* Interrupt number for UART0 */
#define MCFINT_UART1 14 /* Interrupt number for UART1 */
#define MCFINT_UART2 15 /* Interrupt number for UART2 */
+#define MCFINT_I2C 17 /* Interrupt number for I2C */
#define MCFINT_PIT1 36 /* Interrupt number for PIT1 */
/*
@@ -218,9 +219,12 @@
* EPort
*/
+#define MCFEPORT_EPPAR (MCF_IPSBAR + 0x130000)
#define MCFEPORT_EPDDR (MCF_IPSBAR + 0x130002)
+#define MCFEPORT_EPIER (MCF_IPSBAR + 0x130003)
#define MCFEPORT_EPDR (MCF_IPSBAR + 0x130004)
#define MCFEPORT_EPPDR (MCF_IPSBAR + 0x130005)
+#define MCFEPORT_EPFR (MCF_IPSBAR + 0x130006)
/*
@@ -239,6 +243,12 @@
#define UART2_ENABLE_MASK 0x3f00
#endif
+#ifdef CONFIG_M5271
+#define MCF_GPIO_PAR_FECI2C 0x100047
+#endif
+#ifdef CONFIG_M5275
+#define MCF_GPIO_PAR_FECI2C 0x100082
+#endif
/*
* Reset Controll Unit (relative to IPSBAR).
*/
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index e2ad1f42b657..8455c856ec00 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -29,6 +29,7 @@
#define MCFINT_VECBASE 64 /* Vector base number */
#define MCFINT_UART0 13 /* Interrupt number for UART0 */
+#define MCFINT_I2C 17 /* Interrupt number for I2C */
#define MCFINT_PIT1 55 /* Interrupt number for PIT1 */
/*
@@ -195,8 +196,8 @@
* Derek Cheung - 6 Feb 2005
* add I2C and QSPI register definition using Freescale's MCF5282
*/
-/* set Port AS pin for I2C or UART */
-#define MCF5282_GPIO_PASPAR (volatile u16 *) (MCF_IPSBAR + 0x00100056)
+/* Port AS Pin Assignment Register (16 Bit) */
+#define MCF5282_GPIO_PASPAR 0x100056
/* Port UA Pin Assignment Register (8 Bit) */
#define MCF5282_GPIO_PUAPAR 0x10005C
@@ -216,39 +217,6 @@
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
-/*********************************************************************
-*
-* Inter-IC (I2C) Module
-*
-*********************************************************************/
-/* Read/Write access macros for general use */
-#define MCF5282_I2C_I2ADR (volatile u8 *) (MCF_IPSBAR + 0x0300) // Address
-#define MCF5282_I2C_I2FDR (volatile u8 *) (MCF_IPSBAR + 0x0304) // Freq Divider
-#define MCF5282_I2C_I2CR (volatile u8 *) (MCF_IPSBAR + 0x0308) // Control
-#define MCF5282_I2C_I2SR (volatile u8 *) (MCF_IPSBAR + 0x030C) // Status
-#define MCF5282_I2C_I2DR (volatile u8 *) (MCF_IPSBAR + 0x0310) // Data I/O
-
-/* Bit level definitions and macros */
-#define MCF5282_I2C_I2ADR_ADDR(x) (((x)&0x7F)<<0x01)
-
-#define MCF5282_I2C_I2FDR_IC(x) (((x)&0x3F))
-
-#define MCF5282_I2C_I2CR_IEN (0x80) // I2C enable
-#define MCF5282_I2C_I2CR_IIEN (0x40) // interrupt enable
-#define MCF5282_I2C_I2CR_MSTA (0x20) // master/slave mode
-#define MCF5282_I2C_I2CR_MTX (0x10) // transmit/receive mode
-#define MCF5282_I2C_I2CR_TXAK (0x08) // transmit acknowledge enable
-#define MCF5282_I2C_I2CR_RSTA (0x04) // repeat start
-
-#define MCF5282_I2C_I2SR_ICF (0x80) // data transfer bit
-#define MCF5282_I2C_I2SR_IAAS (0x40) // I2C addressed as a slave
-#define MCF5282_I2C_I2SR_IBB (0x20) // I2C bus busy
-#define MCF5282_I2C_I2SR_IAL (0x10) // aribitration lost
-#define MCF5282_I2C_I2SR_SRW (0x04) // slave read/write
-#define MCF5282_I2C_I2SR_IIF (0x02) // I2C interrupt
-#define MCF5282_I2C_I2SR_RXAK (0x01) // received acknowledge
-
-
/*********************************************************************
*
diff --git a/arch/m68k/include/asm/m5307sim.h b/arch/m68k/include/asm/m5307sim.h
index c6830e5b54ce..5c8cf96287c2 100644
--- a/arch/m68k/include/asm/m5307sim.h
+++ b/arch/m68k/include/asm/m5307sim.h
@@ -117,6 +117,7 @@
#define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */
#define MCFSIM_TIMER1ICR MCFSIM_ICR1 /* Timer 1 ICR */
#define MCFSIM_TIMER2ICR MCFSIM_ICR2 /* Timer 2 ICR */
+#define MCFSIM_I2CICR MCFSIM_ICR3 /* I2C ICR */
#define MCFSIM_UART1ICR MCFSIM_ICR4 /* UART 1 ICR */
#define MCFSIM_UART2ICR MCFSIM_ICR5 /* UART 2 ICR */
#define MCFSIM_DMA0ICR MCFSIM_ICR6 /* DMA 0 ICR */
@@ -124,7 +125,6 @@
#define MCFSIM_DMA2ICR MCFSIM_ICR8 /* DMA 2 ICR */
#define MCFSIM_DMA3ICR MCFSIM_ICR9 /* DMA 3 ICR */
-
/*
* Some symbol defines for the Parallel Port Pin Assignment Register
*/
@@ -143,6 +143,7 @@
/*
* Define system peripheral IRQ usage.
*/
+#define MCF_IRQ_I2C 29 /* I2C, Level 5 */
#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
diff --git a/arch/m68k/include/asm/m532xsim.h b/arch/m68k/include/asm/m532xsim.h
index 36bf15aec9ae..402a4e05004b 100644
--- a/arch/m68k/include/asm/m532xsim.h
+++ b/arch/m68k/include/asm/m532xsim.h
@@ -17,6 +17,7 @@
#define MCFINT_UART0 26 /* Interrupt number for UART0 */
#define MCFINT_UART1 27 /* Interrupt number for UART1 */
#define MCFINT_UART2 28 /* Interrupt number for UART2 */
+#define MCFINT_I2C 30 /* Interrupt number for I2C */
#define MCF_WTM_WCR MCF_REG16(0xFC098000)
@@ -111,42 +112,6 @@
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
-/*********************************************************************
- *
- * Inter-IC (I2C) Module
- *
- *********************************************************************/
-
-/* Read/Write access macros for general use */
-#define MCF532x_I2C_I2ADR (volatile u8 *) (0xFC058000) // Address
-#define MCF532x_I2C_I2FDR (volatile u8 *) (0xFC058004) // Freq Divider
-#define MCF532x_I2C_I2CR (volatile u8 *) (0xFC058008) // Control
-#define MCF532x_I2C_I2SR (volatile u8 *) (0xFC05800C) // Status
-#define MCF532x_I2C_I2DR (volatile u8 *) (0xFC058010) // Data I/O
-
-/* Bit level definitions and macros */
-#define MCF532x_I2C_I2ADR_ADDR(x) (((x)&0x7F)<<0x01)
-
-#define MCF532x_I2C_I2FDR_IC(x) (((x)&0x3F))
-
-#define MCF532x_I2C_I2CR_IEN (0x80) // I2C enable
-#define MCF532x_I2C_I2CR_IIEN (0x40) // interrupt enable
-#define MCF532x_I2C_I2CR_MSTA (0x20) // master/slave mode
-#define MCF532x_I2C_I2CR_MTX (0x10) // transmit/receive mode
-#define MCF532x_I2C_I2CR_TXAK (0x08) // transmit acknowledge enable
-#define MCF532x_I2C_I2CR_RSTA (0x04) // repeat start
-
-#define MCF532x_I2C_I2SR_ICF (0x80) // data transfer bit
-#define MCF532x_I2C_I2SR_IAAS (0x40) // I2C addressed as a slave
-#define MCF532x_I2C_I2SR_IBB (0x20) // I2C bus busy
-#define MCF532x_I2C_I2SR_IAL (0x10) // aribitration lost
-#define MCF532x_I2C_I2SR_SRW (0x04) // slave read/write
-#define MCF532x_I2C_I2SR_IIF (0x02) // I2C interrupt
-#define MCF532x_I2C_I2SR_RXAK (0x01) // received acknowledge
-
-#define MCF532x_PAR_FECI2C (volatile u8 *) (0xFC0A4053)
-
-
/*
* The M5329EVB board needs a help getting its devices initialized
* at kernel start time if dBUG doesn't set it up (for example
diff --git a/arch/m68k/include/asm/m5407sim.h b/arch/m68k/include/asm/m5407sim.h
index c399abbf953c..5aa86189da5f 100644
--- a/arch/m68k/include/asm/m5407sim.h
+++ b/arch/m68k/include/asm/m5407sim.h
@@ -89,6 +89,7 @@
#define MCFSIM_SWDICR MCFSIM_ICR0 /* Watchdog timer ICR */
#define MCFSIM_TIMER1ICR MCFSIM_ICR1 /* Timer 1 ICR */
#define MCFSIM_TIMER2ICR MCFSIM_ICR2 /* Timer 2 ICR */
+#define MCFSIM_I2CICR MCFSIM_ICR3 /* I2C ICR */
#define MCFSIM_UART1ICR MCFSIM_ICR4 /* UART 1 ICR */
#define MCFSIM_UART2ICR MCFSIM_ICR5 /* UART 2 ICR */
#define MCFSIM_DMA0ICR MCFSIM_ICR6 /* DMA 0 ICR */
@@ -114,6 +115,7 @@
/*
* Define system peripheral IRQ usage.
*/
+#define MCF_IRQ_I2C 29 /* I2C, Level 5 */
#define MCF_IRQ_TIMER 30 /* Timer0, Level 6 */
#define MCF_IRQ_PROFILER 31 /* Timer1, Level 7 */
diff --git a/arch/m68k/include/asm/mcfi2c.h b/arch/m68k/include/asm/mcfi2c.h
new file mode 100644
index 000000000000..5713981dac36
--- /dev/null
+++ b/arch/m68k/include/asm/mcfi2c.h
@@ -0,0 +1,29 @@
+/*
+ * Definitions for Coldfire I2C interface
+*/
+#ifndef mcfi2c_h
+#define mcfi2c_h
+
+#if defined(CONFIG_M5206) || defined(CONFIG_M5206e)
+#define MCFI2C_IOBASE (MCF_MBAR + 0x1e0)
+#elif defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
+#define MCFI2C_IOBASE (MCF_IPSBAR + 0x300)
+#elif defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407)
+#define MCFI2C_IOBASE (MCF_MBAR + 0x280)
+#ifdef CONFIG_M5249
+#define MCFI2C_IOBASE2 (MCF_MBAR2 + 0x440)
+#endif
+#elif defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_M5445x)
+#define MCFI2C_IOBASE 0xFC058000
+#endif
+#define MCFI2C_IOSIZE 0x40
+
+/**
+ * struct mcfi2c_platform_data - platform data for the coldfire i2c driver
+ * @bitrate: bitrate to use for this i2c controller.
+*/
+struct mcfi2c_platform_data {
+ u32 bitrate;
+};
+
+#endif /* mcfi2c_h */
diff --git a/arch/m68k/include/asm/mcfmbus.h b/arch/m68k/include/asm/mcfmbus.h
deleted file mode 100644
index 319899c47a2c..000000000000
--- a/arch/m68k/include/asm/mcfmbus.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/****************************************************************************/
-
-/*
- * mcfmbus.h -- Coldfire MBUS support defines.
- *
- * (C) Copyright 1999, Martin Floeer (mfloeer@axcent.de)
- */
-
-/****************************************************************************/
-
-
-#ifndef mcfmbus_h
-#define mcfmbus_h
-
-
-#define MCFMBUS_BASE 0x280
-#define MCFMBUS_IRQ_VECTOR 0x19
-#define MCFMBUS_IRQ 0x1
-#define MCFMBUS_CLK 0x3f
-#define MCFMBUS_IRQ_LEVEL 0x07 /*IRQ Level 1*/
-#define MCFMBUS_ADDRESS 0x01
-
-
-/*
-* Define the 5307 MBUS register set addresses
-*/
-
-#define MCFMBUS_MADR 0x00
-#define MCFMBUS_MFDR 0x04
-#define MCFMBUS_MBCR 0x08
-#define MCFMBUS_MBSR 0x0C
-#define MCFMBUS_MBDR 0x10
-
-
-#define MCFMBUS_MADR_ADDR(a) (((a)&0x7F)<<0x01) /*Slave Address*/
-
-#define MCFMBUS_MFDR_MBC(a) ((a)&0x3F) /*M-Bus Clock*/
-
-/*
-* Define bit flags in Control Register
-*/
-
-#define MCFMBUS_MBCR_MEN (0x80) /* M-Bus Enable */
-#define MCFMBUS_MBCR_MIEN (0x40) /* M-Bus Interrupt Enable */
-#define MCFMBUS_MBCR_MSTA (0x20) /* Master/Slave Mode Select Bit */
-#define MCFMBUS_MBCR_MTX (0x10) /* Transmit/Rcv Mode Select Bit */
-#define MCFMBUS_MBCR_TXAK (0x08) /* Transmit Acknowledge Enable */
-#define MCFMBUS_MBCR_RSTA (0x04) /* Repeat Start */
-
-/*
-* Define bit flags in Status Register
-*/
-
-#define MCFMBUS_MBSR_MCF (0x80) /* Data Transfer Complete */
-#define MCFMBUS_MBSR_MAAS (0x40) /* Addressed as a Slave */
-#define MCFMBUS_MBSR_MBB (0x20) /* Bus Busy */
-#define MCFMBUS_MBSR_MAL (0x10) /* Arbitration Lost */
-#define MCFMBUS_MBSR_SRW (0x04) /* Slave Transmit */
-#define MCFMBUS_MBSR_MIF (0x02) /* M-Bus Interrupt */
-#define MCFMBUS_MBSR_RXAK (0x01) /* No Acknowledge Received */
-
-/*
-* Define bit flags in DATA I/O Register
-*/
-
-#define MCFMBUS_MBDR_READ (0x01) /* 1=read 0=write MBUS */
-
-#define MBUSIOCSCLOCK 1
-#define MBUSIOCGCLOCK 2
-#define MBUSIOCSADDR 3
-#define MBUSIOCGADDR 4
-#define MBUSIOCSSLADDR 5
-#define MBUSIOCGSLADDR 6
-#define MBUSIOCSSUBADDR 7
-#define MBUSIOCGSUBADDR 8
-
-#endif
diff --git a/arch/m68k/include/asm/thread_info_no.h b/arch/m68k/include/asm/thread_info_no.h
index a6512bfdd01d..884776f686ca 100644
--- a/arch/m68k/include/asm/thread_info_no.h
+++ b/arch/m68k/include/asm/thread_info_no.h
@@ -37,6 +37,7 @@ struct thread_info {
unsigned long flags; /* low level flags */
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable, <0 => BUG */
+ unsigned long tp_value; /* thread pointer */
struct restart_block restart_block;
};
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 064f5913db1a..63be09e58d4a 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -59,6 +59,10 @@ config GENERIC_HARDIRQS
bool
default y
+config GENERIC_HARDIRQS_NO__DO_IRQ
+ bool
+ default y
+
config GENERIC_CALIBRATE_DELAY
bool
default y
diff --git a/arch/m68knommu/kernel/process.c b/arch/m68knommu/kernel/process.c
index 5c9ecd427090..959cb249c759 100644
--- a/arch/m68knommu/kernel/process.c
+++ b/arch/m68knommu/kernel/process.c
@@ -221,6 +221,10 @@ int copy_thread(unsigned long clone_flags,
p->thread.usp = usp;
p->thread.ksp = (unsigned long)childstack;
+
+ if (clone_flags & CLONE_SETTLS)
+ task_thread_info(p)->tp_value = regs->d5;
+
/*
* Must save the current SFC/DFC value, NOT the value when
* the parent was last descheduled - RGH 10-08-96
diff --git a/arch/m68knommu/kernel/ptrace.c b/arch/m68knommu/kernel/ptrace.c
index 4d3828959fb0..85ed2f988f98 100644
--- a/arch/m68knommu/kernel/ptrace.c
+++ b/arch/m68knommu/kernel/ptrace.c
@@ -319,6 +319,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
#endif
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(task_thread_info(child)->tp_value,
+ (unsigned long __user *)data);
+ break;
+
default:
ret = -EIO;
break;
diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c
index b67cbc735a9b..923dd4aab875 100644
--- a/arch/m68knommu/kernel/sys_m68k.c
+++ b/arch/m68knommu/kernel/sys_m68k.c
@@ -190,3 +190,39 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[])
: "d" (__a), "d" (__b), "d" (__c));
return __res;
}
+
+asmlinkage unsigned long sys_get_thread_area(void)
+{
+ return current_thread_info()->tp_value;
+}
+
+asmlinkage int sys_set_thread_area(unsigned long tp)
+{
+ current_thread_info()->tp_value = tp;
+ return 0;
+}
+
+/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
+ D1 (newval). */
+asmlinkage int
+sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
+ unsigned long __user * mem)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long mem_value;
+
+ down_read(&mm->mmap_sem);
+
+ mem_value = *mem;
+ if (mem_value == oldval)
+ *mem = newval;
+
+ up_read(&mm->mmap_sem);
+ return mem_value;
+}
+
+asmlinkage int sys_atomic_barrier(void)
+{
+ /* no code needed for uniprocs */
+ return 0;
+}
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index 486837efa3d7..56dd01ded148 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -351,6 +351,10 @@ ENTRY(sys_call_table)
.long sys_pwritev /* 330 */
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
+ .long sys_get_thread_area
+ .long sys_set_thread_area
+ .long sys_atomic_cmpxchg_32 /* 335 */
+ .long sys_atomic_barrier
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
diff --git a/arch/m68knommu/platform/5206/config.c b/arch/m68knommu/platform/5206/config.c
index 9c335465e66d..bb31b3448c3d 100644
--- a/arch/m68knommu/platform/5206/config.c
+++ b/arch/m68knommu/platform/5206/config.c
@@ -17,6 +17,7 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfi2c.h>
/***************************************************************************/
@@ -38,8 +39,45 @@ static struct platform_device m5206_uart = {
.dev.platform_data = m5206_uart_platform,
};
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+static struct resource m5206_i2c_resources[] = {
+ {
+ .start = MCFI2C_IOBASE,
+ .end = MCFI2C_IOBASE + MCFI2C_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_I2C,
+ .end = MCF_IRQ_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mcfi2c_platform_data m5206_i2c_platform_data = {
+ .bitrate = 100000,
+};
+
+static struct platform_device m5206_i2c = {
+ .name = "i2c-mcf",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m5206_i2c_resources),
+ .resource = m5206_i2c_resources,
+ .dev.platform_data = &m5206_i2c_platform_data,
+};
+
+static void __init m5206_i2c_init(void)
+{
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL5 | MCFSIM_ICR_PRI0,
+ MCF_MBAR + MCFSIM_I2CICR);
+ mcf_mapirq2imr(MCF_IRQ_I2C, MCFINTC_I2C);
+}
+#endif
+
static struct platform_device *m5206_devices[] __initdata = {
&m5206_uart,
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ &m5206_i2c,
+#endif
};
/***************************************************************************/
@@ -101,6 +139,9 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m5206_cpu_reset;
m5206_timers_init();
m5206_uarts_init();
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ m5206_i2c_init();
+#endif
/* Only support the external interrupts on their primary level */
mcf_mapirq2imr(25, MCFINTC_EINT1);
diff --git a/arch/m68knommu/platform/5206e/config.c b/arch/m68knommu/platform/5206e/config.c
index 942397984c66..a10d9d130387 100644
--- a/arch/m68knommu/platform/5206e/config.c
+++ b/arch/m68knommu/platform/5206e/config.c
@@ -17,6 +17,7 @@
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/mcfdma.h>
+#include <asm/mcfi2c.h>
/***************************************************************************/
@@ -38,8 +39,45 @@ static struct platform_device m5206e_uart = {
.dev.platform_data = m5206e_uart_platform,
};
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+static struct resource m5206e_i2c_resources[] = {
+ {
+ .start = MCFI2C_IOBASE,
+ .end = MCFI2C_IOBASE + MCFI2C_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_I2C,
+ .end = MCF_IRQ_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mcfi2c_platform_data m5206e_i2c_platform_data = {
+ .bitrate = 100000,
+};
+
+static struct platform_device m5206e_i2c = {
+ .name = "i2c-mcf",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m5206e_i2c_resources),
+ .resource = m5206e_i2c_resources,
+ .dev.platform_data = &m5206e_i2c_platform_data,
+};
+
+static void __init m5206e_i2c_init(void)
+{
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL5 | MCFSIM_ICR_PRI0,
+ MCF_MBAR + MCFSIM_I2CICR);
+ mcf_mapirq2imr(MCF_IRQ_I2C, MCFINTC_I2C);
+}
+#endif
+
static struct platform_device *m5206e_devices[] __initdata = {
&m5206e_uart,
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ &m5206e_i2c,
+#endif
};
/***************************************************************************/
@@ -107,6 +145,9 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m5206e_cpu_reset;
m5206e_timers_init();
m5206e_uarts_init();
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ m5206e_i2c_init();
+#endif
/* Only support the external interrupts on their primary level */
mcf_mapirq2imr(25, MCFINTC_EINT1);
diff --git a/arch/m68knommu/platform/520x/config.c b/arch/m68knommu/platform/520x/config.c
index 92614de42cd3..3206f7ecf27e 100644
--- a/arch/m68knommu/platform/520x/config.c
+++ b/arch/m68knommu/platform/520x/config.c
@@ -19,6 +19,7 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfi2c.h>
/***************************************************************************/
@@ -74,9 +75,50 @@ static struct platform_device m520x_fec = {
.resource = m520x_fec_resources,
};
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+static struct resource m520x_i2c_resources[] = {
+ {
+ .start = MCFI2C_IOBASE,
+ .end = MCFI2C_IOBASE + MCFI2C_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_I2C,
+ .end = MCFINT_VECBASE + MCFINT_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mcfi2c_platform_data m520x_i2c_platform_data = {
+ .bitrate = 100000,
+};
+
+static struct platform_device m520x_i2c = {
+ .name = "i2c-mcf",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m520x_i2c_resources),
+ .resource = m520x_i2c_resources,
+ .dev.platform_data = &m520x_i2c_platform_data,
+};
+
+static void __init m520x_i2c_init(void)
+{
+ u8 par;
+
+ /* setup Port FECI2C Pin Assignment Register for I2C */
+ /* set PAR_SCL to SCL and PAR_SDA to SDA */
+ par = readb(MCF_IPSBAR + MCF_GPIO_PAR_FECI2C);
+ par |= 0x0f;
+ writeb(par, MCF_IPSBAR + MCF_GPIO_PAR_FECI2C);
+}
+#endif
+
static struct platform_device *m520x_devices[] __initdata = {
&m520x_uart,
&m520x_fec,
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ &m520x_i2c,
+#endif
};
/***************************************************************************/
@@ -147,6 +189,9 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m520x_cpu_reset;
m520x_uarts_init();
m520x_fec_init();
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ m520x_i2c_init();
+#endif
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/523x/config.c b/arch/m68knommu/platform/523x/config.c
index 6ba84f2aa397..05b4ab9996d7 100644
--- a/arch/m68knommu/platform/523x/config.c
+++ b/arch/m68knommu/platform/523x/config.c
@@ -20,6 +20,7 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfi2c.h>
/***************************************************************************/
@@ -75,9 +76,51 @@ static struct platform_device m523x_fec = {
.resource = m523x_fec_resources,
};
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+static struct resource m523x_i2c_resources[] = {
+ {
+ .start = MCFI2C_IOBASE,
+ .end = MCFI2C_IOBASE + MCFI2C_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_I2C,
+ .end = MCFINT_VECBASE + MCFINT_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mcfi2c_platform_data m523x_i2c_platform_data = {
+ .bitrate = 100000,
+};
+
+static struct platform_device m523x_i2c = {
+ .name = "i2c-mcf",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m523x_i2c_resources),
+ .resource = m523x_i2c_resources,
+ .dev.platform_data = &m523x_i2c_platform_data,
+};
+
+static void __init m523x_i2c_init(void)
+{
+ u8 par;
+
+ /* setup Port AS Pin Assignment Register for I2C */
+ /* set PASPA0 to SCL and PASPA1 to SDA */
+ par = readb(MCF_IPSBAR + MCFGPIO_PAR_FECI2C);
+ par |= 0x0f;
+ writeb(par, MCF_IPSBAR + MCFGPIO_PAR_FECI2C);
+}
+
+#endif
+
static struct platform_device *m523x_devices[] __initdata = {
&m523x_uart,
&m523x_fec,
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ &m523x_i2c,
+#endif
};
/***************************************************************************/
@@ -114,6 +157,9 @@ void __init config_BSP(char *commandp, int size)
static int __init init_BSP(void)
{
m523x_fec_init();
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ m523x_i2c_init();
+#endif
platform_add_devices(m523x_devices, ARRAY_SIZE(m523x_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5249/config.c b/arch/m68knommu/platform/5249/config.c
index 646f5ba462fc..bd085427c43a 100644
--- a/arch/m68knommu/platform/5249/config.c
+++ b/arch/m68knommu/platform/5249/config.c
@@ -16,6 +16,7 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfi2c.h>
/***************************************************************************/
@@ -37,8 +38,81 @@ static struct platform_device m5249_uart = {
.dev.platform_data = m5249_uart_platform,
};
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+static struct resource m5249_i2c0_resources[] = {
+ {
+ .start = MCFI2C_IOBASE,
+ .end = MCFI2C_IOBASE + MCFI2C_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_I2C,
+ .end = MCF_IRQ_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource m5249_i2c1_resources[] = {
+ {
+ .start = MCFI2C_IOBASE2,
+ .end = MCFI2C_IOBASE2 + MCFI2C_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINTC2_I2C,
+ .end = MCFINTC2_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mcfi2c_platform_data m5249_i2c0_platform_data = {
+ .bitrate = 100000,
+};
+
+static struct mcfi2c_platform_data m5249_i2c1_platform_data = {
+ .bitrate = 100000,
+};
+
+static struct platform_device m5249_i2c[] = {
+ {
+ .name = "i2c-mcf",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m5249_i2c0_resources),
+ .resource = m5249_i2c0_resources,
+ .dev.platform_data = &m5249_i2c0_platform_data,
+ },
+ {
+ .name = "i2c-mcf",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(m5249_i2c1_resources),
+ .resource = m5249_i2c1_resources,
+ .dev.platform_data = &m5249_i2c1_platform_data,
+ },
+};
+
+static void __init m5249_i2c_init(void)
+{
+ u32 pri;
+
+ /* first I2C controller uses regular irq setup */
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL5 | MCFSIM_ICR_PRI0,
+ MCF_MBAR + MCFSIM_I2CICR);
+ mcf_mapirq2imr(MCF_IRQ_I2C, MCFINTC_I2C);
+
+ /* second I2C controller is completely different */
+ pri = readl(MCF_MBAR2 + MCFSIM2_INTLEVEL8);
+ pri &= ~MCFSIM2_INTPRI_62;
+ pri |= MCFSIM2_INTPRI_I2C;
+ writel(pri, MCF_MBAR2 + MCFSIM2_INTLEVEL8);
+}
+#endif
+
static struct platform_device *m5249_devices[] __initdata = {
&m5249_uart,
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ &m5249_i2c[0],
+ &m5249_i2c[1],
+#endif
};
/***************************************************************************/
@@ -100,6 +174,9 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m5249_cpu_reset;
m5249_timers_init();
m5249_uarts_init();
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ m5249_i2c_init();
+#endif
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/5272/intc.c b/arch/m68knommu/platform/5272/intc.c
index 7081e0a9720e..2889f7d7cf70 100644
--- a/arch/m68knommu/platform/5272/intc.c
+++ b/arch/m68knommu/platform/5272/intc.c
@@ -103,8 +103,26 @@ static void intc_irq_ack(unsigned int irq)
static int intc_irq_set_type(unsigned int irq, unsigned int type)
{
- /* We can set the edge type here for external interrupts */
- return 0;
+ /* set the edge type for external interrupts */
+ u32 pitr;
+
+ if ((type != IRQF_TRIGGER_RISING) || (type != IRQF_TRIGGER_FALLING))
+ return -EINVAL;
+
+ switch (irq) {
+ case MCF_IRQ_EINT1 ... MCF_IRQ_EINT4:
+ case MCF_IRQ_EINT5 ... MCF_IRQ_EINT6:
+ pitr = __raw_readl(MCFSIM_PITR);
+ if (type & IRQF_TRIGGER_RISING)
+ pitr |= 1 << (96 - irq);
+ else
+ pitr &= ~(1 << (96 - irq));
+ __raw_writel(pitr, MCFSIM_PITR);
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
static struct irq_chip intc_irq_chip = {
@@ -128,11 +146,16 @@ void __init init_IRQ(void)
writel(0x88888888, MCF_MBAR + MCFSIM_ICR4);
for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
- intc_irq_set_type(irq, 0);
+ switch (irq) {
+ case MCF_IRQ_EINT1 ... MCF_IRQ_EINT4:
+ case MCF_IRQ_EINT5 ... MCF_IRQ_EINT6:
+ set_irq_chip_and_handler(irq, &intc_irq_chip,
+ handle_edge_irq);
+ break;
+ default:
+ set_irq_chip_and_handler(irq, &intc_irq_chip,
+ handle_level_irq);
+ break;
+ }
}
}
-
diff --git a/arch/m68knommu/platform/527x/config.c b/arch/m68knommu/platform/527x/config.c
index fa51be172830..001fa362021c 100644
--- a/arch/m68knommu/platform/527x/config.c
+++ b/arch/m68knommu/platform/527x/config.c
@@ -20,6 +20,7 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfi2c.h>
/***************************************************************************/
@@ -106,12 +107,67 @@ static struct platform_device m527x_fec[] = {
},
};
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+static struct resource m527x_i2c_resources[] = {
+ {
+ .start = MCFI2C_IOBASE,
+ .end = MCFI2C_IOBASE + MCFI2C_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_I2C,
+ .end = MCFINT_VECBASE + MCFINT_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mcfi2c_platform_data m527x_i2c_platform_data = {
+ .bitrate = 100000,
+};
+
+static struct platform_device m527x_i2c = {
+ .name = "i2c-mcf",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m527x_i2c_resources),
+ .resource = m527x_i2c_resources,
+ .dev.platform_data = &m527x_i2c_platform_data,
+};
+
+#if defined(CONFIG_M5271)
+static void __init m527x_i2c_init(void)
+{
+ u8 par;
+
+ /* setup Port FECI2C Pin Assignment Register for I2C */
+ /* set PAR_SCL to SCL and PAR_SDA to SDA */
+ par = readb(MCF_IPSBAR + MCF_GPIO_PAR_FECI2C);
+ par |= 0x0f;
+ writeb(par, MCF_IPSBAR + MCF_GPIO_PAR_FECI2C);
+}
+#endif
+#if defined(CONFIG_M5275)
+static void __init m527x_i2c_init(void)
+{
+ u16 par;
+
+ /* setup Port FECI2C Pin Assignment Register for I2C */
+ /* set PAR_SCL to SCL and PAR_SDA to SDA */
+ par = readw(MCF_IPSBAR + MCF_GPIO_PAR_FECI2C);
+ par |= 0x0f;
+ writew(par, MCF_IPSBAR + MCF_GPIO_PAR_FECI2C);
+}
+#endif
+#endif
+
static struct platform_device *m527x_devices[] __initdata = {
&m527x_uart,
&m527x_fec[0],
#ifdef CONFIG_FEC2
&m527x_fec[1],
#endif
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ &m527x_i2c,
+#endif
};
/***************************************************************************/
@@ -187,6 +243,10 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m527x_cpu_reset;
m527x_uarts_init();
m527x_fec_init();
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ m527x_i2c_init();
+#endif
+
}
/***************************************************************************/
diff --git a/arch/m68knommu/platform/528x/config.c b/arch/m68knommu/platform/528x/config.c
index 6e608d1836f1..f7516d8ea08d 100644
--- a/arch/m68knommu/platform/528x/config.c
+++ b/arch/m68knommu/platform/528x/config.c
@@ -21,6 +21,7 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfi2c.h>
/***************************************************************************/
@@ -76,10 +77,57 @@ static struct platform_device m528x_fec = {
.resource = m528x_fec_resources,
};
+#if defined(CONFIG_GPIO_PCF857X) || defined(CONFIG_GPIO_PCF857X_MODULE)
+static struct pcf857x_platform_data pcf857x_data[] = {
+ {
+ .gpio_base = MCFGPIO_PIN_MAX,
+ },
+};
+#endif
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+static struct resource m528x_i2c_resources[] = {
+ {
+ .start = MCFI2C_IOBASE,
+ .end = MCFI2C_IOBASE + MCFI2C_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_I2C,
+ .end = MCFINT_VECBASE + MCFINT_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mcfi2c_platform_data m528x_i2c_platform_data = {
+ .bitrate = 100000,
+};
+
+static struct platform_device m528x_i2c = {
+ .name = "i2c-mcf",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m528x_i2c_resources),
+ .resource = m528x_i2c_resources,
+ .dev.platform_data = &m528x_i2c_platform_data,
+};
+
+static void __init m528x_i2c_init(void)
+{
+ u16 paspar;
+
+ /* setup Port AS Pin Assignment Register for I2C */
+ /* set PASPA0 to SCL and PASPA1 to SDA */
+ paspar = __raw_readw(MCF_IPSBAR + MCF5282_GPIO_PASPAR);
+ paspar |= 0xF;
+ __raw_writew(paspar, MCF_IPSBAR + MCF5282_GPIO_PASPAR);
+}
+#endif
static struct platform_device *m528x_devices[] __initdata = {
&m528x_uart,
&m528x_fec,
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ &m528x_i2c,
+#endif
};
/***************************************************************************/
@@ -174,6 +222,9 @@ static int __init init_BSP(void)
mach_reset = m528x_cpu_reset;
m528x_uarts_init();
m528x_fec_init();
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ m528x_i2c_init();
+#endif
platform_add_devices(m528x_devices, ARRAY_SIZE(m528x_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5307/config.c b/arch/m68knommu/platform/5307/config.c
index 00900ac06a9c..57f51ab3ae29 100644
--- a/arch/m68knommu/platform/5307/config.c
+++ b/arch/m68knommu/platform/5307/config.c
@@ -18,6 +18,7 @@
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
#include <asm/mcfwdebug.h>
+#include <asm/mcfi2c.h>
/***************************************************************************/
@@ -47,8 +48,45 @@ static struct platform_device m5307_uart = {
.dev.platform_data = m5307_uart_platform,
};
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+static struct resource m5307_i2c_resources[] = {
+ {
+ .start = MCFI2C_IOBASE,
+ .end = MCFI2C_IOBASE + MCFI2C_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_I2C,
+ .end = MCF_IRQ_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mcfi2c_platform_data m5307_i2c_platform_data = {
+ .bitrate = 100000,
+};
+
+static struct platform_device m5307_i2c = {
+ .name = "i2c-mcf",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m5307_i2c_resources),
+ .resource = m5307_i2c_resources,
+ .dev.platform_data = &m5307_i2c_platform_data,
+};
+
+static void __init m5307_i2c_init(void)
+{
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL5 | MCFSIM_ICR_PRI0,
+ MCF_MBAR + MCFSIM_I2CICR);
+ mcf_mapirq2imr(MCF_IRQ_I2C, MCFINTC_I2C);
+}
+#endif
+
static struct platform_device *m5307_devices[] __initdata = {
&m5307_uart,
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ &m5307_i2c,
+#endif
};
/***************************************************************************/
@@ -117,7 +155,9 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m5307_cpu_reset;
m5307_timers_init();
m5307_uarts_init();
-
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ m5307_i2c_init();
+#endif
/* Only support the external interrupts on their primary level */
mcf_mapirq2imr(25, MCFINTC_EINT1);
mcf_mapirq2imr(27, MCFINTC_EINT3);
diff --git a/arch/m68knommu/platform/532x/config.c b/arch/m68knommu/platform/532x/config.c
index d632948e64e5..7a0f14ec3460 100644
--- a/arch/m68knommu/platform/532x/config.c
+++ b/arch/m68knommu/platform/532x/config.c
@@ -27,6 +27,7 @@
#include <asm/mcfuart.h>
#include <asm/mcfdma.h>
#include <asm/mcfwdebug.h>
+#include <asm/mcfi2c.h>
/***************************************************************************/
@@ -82,9 +83,50 @@ static struct platform_device m532x_fec = {
.resource = m532x_fec_resources,
};
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+static struct resource m532x_i2c_resources[] = {
+ {
+ .start = MCFI2C_IOBASE,
+ .end = MCFI2C_IOBASE + MCFI2C_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCFINT_VECBASE + MCFINT_I2C,
+ .end = MCFINT_VECBASE + MCFINT_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mcfi2c_platform_data m532x_i2c_platform_data = {
+ .bitrate = 100000,
+};
+
+static struct platform_device m532x_i2c = {
+ .name = "i2c-mcf",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m532x_i2c_resources),
+ .resource = m532x_i2c_resources,
+ .dev.platform_data = &m532x_i2c_platform_data,
+};
+
+static void __init m532x_i2c_init(void)
+{
+ u8 par;
+
+ /* setup Port AS Pin Assignment Register for I2C */
+ /* set PASPA0 to SCL and PASPA1 to SDA */
+ par = __raw_readb(MCF_GPIO_PAR_FECI2C);
+ par |= 0x0f;
+ __raw_writeb(par, MCF_GPIO_PAR_FECI2C);
+}
+#endif
+
static struct platform_device *m532x_devices[] __initdata = {
&m532x_uart,
&m532x_fec,
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ &m532x_i2c,
+#endif
};
/***************************************************************************/
@@ -158,6 +200,9 @@ static int __init init_BSP(void)
{
m532x_uarts_init();
m532x_fec_init();
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ m532x_i2c_init();
+#endif
platform_add_devices(m532x_devices, ARRAY_SIZE(m532x_devices));
return 0;
}
diff --git a/arch/m68knommu/platform/5407/config.c b/arch/m68knommu/platform/5407/config.c
index 70ea789a400c..b8ceb3762c3f 100644
--- a/arch/m68knommu/platform/5407/config.c
+++ b/arch/m68knommu/platform/5407/config.c
@@ -17,6 +17,7 @@
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
+#include <asm/mcfi2c.h>
/***************************************************************************/
@@ -38,8 +39,45 @@ static struct platform_device m5407_uart = {
.dev.platform_data = m5407_uart_platform,
};
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+static struct resource m5407_i2c_resources[] = {
+ {
+ .start = MCFI2C_IOBASE,
+ .end = MCFI2C_IOBASE + MCFI2C_IOSIZE - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = MCF_IRQ_I2C,
+ .end = MCF_IRQ_I2C,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mcfi2c_platform_data m5407_i2c_platform_data = {
+ .bitrate = 100000,
+};
+
+static struct platform_device m5407_i2c = {
+ .name = "i2c-mcf",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(m5407_i2c_resources),
+ .resource = m5407_i2c_resources,
+ .dev.platform_data = &m5407_i2c_platform_data,
+};
+
+static void __init m5407_i2c_init(void)
+{
+ writeb(MCFSIM_ICR_AUTOVEC | MCFSIM_ICR_LEVEL5 | MCFSIM_ICR_PRI0,
+ MCF_MBAR + MCFSIM_I2CICR);
+ mcf_mapirq2imr(MCF_IRQ_I2C, MCFINTC_I2C);
+}
+#endif
+
static struct platform_device *m5407_devices[] __initdata = {
&m5407_uart,
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ &m5407_i2c,
+#endif
};
/***************************************************************************/
@@ -101,6 +139,9 @@ void __init config_BSP(char *commandp, int size)
mach_reset = m5407_cpu_reset;
m5407_timers_init();
m5407_uarts_init();
+#if defined(CONFIG_I2C_MCF) || defined(CONFIG_I2C_MCF_MODULE)
+ m5407_i2c_init();
+#endif
/* Only support the external interrupts on their primary level */
mcf_mapirq2imr(25, MCFINTC_EINT1);
diff --git a/arch/m68knommu/platform/68328/ints.c b/arch/m68knommu/platform/68328/ints.c
index b91ee85d4b5d..8a6a10212fb5 100644
--- a/arch/m68knommu/platform/68328/ints.c
+++ b/arch/m68knommu/platform/68328/ints.c
@@ -178,11 +178,7 @@ void __init init_IRQ(void)
/* turn off all interrupts */
IMR = ~0;
- for (i = 0; (i < NR_IRQS); i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &intc_irq_chip;
- }
+ for (i = 0; (i < NR_IRQS); i++)
+ set_irq_chip_and_handler(i, &intc_irq_chip, handle_level_irq);
}
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index 1143f77caca4..9b94c52daa55 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -132,11 +132,7 @@ void init_IRQ(void)
/* turn off all CPM interrupts */
pquicc->intr_cimr = 0x00000000;
- for (i = 0; (i < NR_IRQS); i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &intc_irq_chip;
- }
+ for (i = 0; (i < NR_IRQS); i++)
+ set_irq_chip_and_handler(i, &intc_irq_chip, handle_level_irq);
}
diff --git a/arch/m68knommu/platform/coldfire/intc-2.c b/arch/m68knommu/platform/coldfire/intc-2.c
index 5598c8b8661f..2863285927de 100644
--- a/arch/m68knommu/platform/coldfire/intc-2.c
+++ b/arch/m68knommu/platform/coldfire/intc-2.c
@@ -39,6 +39,15 @@ static void intc_irq_mask(unsigned int irq)
val = __raw_readl(imraddr);
__raw_writel(val | imrbit, imraddr);
+
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+
+ u8 epier = __raw_readb(MCFEPORT_EPIER);
+ epier &= ~(1 << (irq - MCFGPIO_IRQ_VECBASE));
+ __raw_writeb(epier, MCFEPORT_EPIER);
+ }
}
}
@@ -64,13 +73,72 @@ static void intc_irq_unmask(unsigned int irq)
val = __raw_readl(imraddr);
__raw_writel(val & ~imrbit, imraddr);
+
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+
+ u8 epier = __raw_readb(MCFEPORT_EPIER);
+ epier |= 1 << (irq - MCFGPIO_IRQ_VECBASE);
+ __raw_writeb(epier, MCFEPORT_EPIER);
+ }
+ }
+}
+
+static void intc_irq_ack(unsigned int irq)
+{
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+ u8 epfr = __raw_readb(MCFEPORT_EPFR);
+ epfr |= 1 << (irq - MCFGPIO_IRQ_VECBASE);
+ __raw_writeb(epfr, MCFEPORT_EPFR);
}
}
+static int intc_irq_set_type(unsigned int irq, unsigned int flow_type)
+{
+ unsigned shift;
+ u16 eppar;
+
+ /* only on eport */
+ if (irq < MCFGPIO_IRQ_VECBASE ||
+ irq >= (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX))
+ return -EINVAL;
+
+ /* we only support TRIGGER_LOW or either (or both) RISING and FALLING */
+ if ((flow_type & IRQF_TRIGGER_HIGH) ||
+ ((flow_type & IRQF_TRIGGER_LOW) &&
+ (flow_type & (IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING))))
+ return -EINVAL;
+
+ shift = (irq - MCFGPIO_IRQ_VECBASE) * 2;
+
+ /* default to TRIGGER_LOW */
+ eppar = 0;
+ if (flow_type & IRQF_TRIGGER_RISING)
+ eppar |= (0x01 << shift);
+ if (flow_type & IRQF_TRIGGER_FALLING)
+ eppar |= (0x02 << shift);
+
+ if (eppar)
+ set_irq_handler(irq, handle_edge_irq);
+ else
+ set_irq_handler(irq, handle_level_irq);
+
+ eppar |= (__raw_readw(MCFEPORT_EPPAR) & ~(0x3 << shift));
+ __raw_writew(eppar, MCFEPORT_EPPAR);
+
+ return 0;
+}
+
static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.mask = intc_irq_mask,
.unmask = intc_irq_unmask,
+ .ack = intc_irq_ack,
+ .set_type = intc_irq_set_type,
};
void __init init_IRQ(void)
@@ -83,11 +151,7 @@ void __init init_IRQ(void)
__raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC0 + MCFINTC_IMRL);
__raw_writel(0x1, MCF_IPSBAR + MCFICM_INTC1 + MCFINTC_IMRL);
- for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
- }
+ for (irq = 0; (irq < NR_IRQS); irq++)
+ set_irq_chip_and_handler(irq, &intc_irq_chip, handle_level_irq);
}
diff --git a/arch/m68knommu/platform/coldfire/intc-simr.c b/arch/m68knommu/platform/coldfire/intc-simr.c
index 1b01e79c2f63..cc5473f6c2c7 100644
--- a/arch/m68knommu/platform/coldfire/intc-simr.c
+++ b/arch/m68knommu/platform/coldfire/intc-simr.c
@@ -26,6 +26,15 @@ static void intc_irq_mask(unsigned int irq)
else if ((irq < MCFINT_VECBASE + 128) && MCFINTC1_SIMR)
__raw_writeb(irq - MCFINT_VECBASE - 64, MCFINTC1_SIMR);
}
+
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+
+ u8 epier = __raw_readb(MCFEPORT_EPIER);
+ epier &= ~(1 << (irq - MCFGPIO_IRQ_VECBASE));
+ __raw_writeb(epier, MCFEPORT_EPIER);
+ }
}
static void intc_irq_unmask(unsigned int irq)
@@ -36,10 +45,63 @@ static void intc_irq_unmask(unsigned int irq)
else if ((irq < MCFINT_VECBASE + 128) && MCFINTC1_CIMR)
__raw_writeb(irq - MCFINT_VECBASE - 64, MCFINTC1_CIMR);
}
+
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+
+ u8 epier = __raw_readb(MCFEPORT_EPIER);
+ epier |= 1 << (irq - MCFGPIO_IRQ_VECBASE);
+ __raw_writeb(epier, MCFEPORT_EPIER);
+ }
+}
+
+static void intc_irq_ack(unsigned int irq)
+{
+ /* only on eport */
+ if (irq >= MCFGPIO_IRQ_VECBASE ||
+ irq < (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX)) {
+ u8 epfr = __raw_readb(MCFEPORT_EPFR);
+ epfr |= 1 << (irq - MCFGPIO_IRQ_VECBASE);
+ __raw_writeb(epfr, MCFEPORT_EPFR);
+ }
}
static int intc_irq_set_type(unsigned int irq, unsigned int type)
{
+ unsigned shift;
+ u16 eppar;
+
+ /* only on eport */
+ if (irq < MCFGPIO_IRQ_VECBASE ||
+ irq >= (MCFGPIO_IRQ_VECBASE + MCFGPIO_IRQ_MAX))
+ return -EINVAL;
+
+ /* we only support TRIGGER_LOW or either (or both) RISING and FALLING */
+ if ((type & IRQF_TRIGGER_HIGH) ||
+ ((type & IRQF_TRIGGER_LOW) &&
+ (type & (IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING))))
+ return -EINVAL;
+
+ shift = (irq - MCFGPIO_IRQ_VECBASE) * 2;
+
+ /* default to TRIGGER_LOW */
+ eppar = 0;
+ if (type & IRQF_TRIGGER_RISING)
+ eppar |= (0x01 << shift);
+ if (type & IRQF_TRIGGER_FALLING)
+ eppar |= (0x02 << shift);
+
+ if (eppar)
+ set_irq_handler(irq, handle_edge_irq);
+ else
+ set_irq_handler(irq, handle_level_irq);
+
+ eppar |= (__raw_readw(MCFEPORT_EPPAR) & ~(0x3 << shift));
+ __raw_writew(eppar, MCFEPORT_EPPAR);
+
+
if (irq >= MCFINT_VECBASE) {
if (irq < MCFINT_VECBASE + 64)
__raw_writeb(5, MCFINTC0_ICR0 + irq - MCFINT_VECBASE);
@@ -53,6 +115,7 @@ static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.mask = intc_irq_mask,
.unmask = intc_irq_unmask,
+ .ack = intc_irq_ack,
.set_type = intc_irq_set_type,
};
@@ -68,10 +131,7 @@ void __init init_IRQ(void)
__raw_writeb(0xff, MCFINTC1_SIMR);
for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
+ set_irq_chip_and_handler(irq, &intc_irq_chip, handle_level_irq);
intc_irq_set_type(irq, 0);
}
}
diff --git a/arch/m68knommu/platform/coldfire/intc.c b/arch/m68knommu/platform/coldfire/intc.c
index a4560c86db71..ad392a515cd2 100644
--- a/arch/m68knommu/platform/coldfire/intc.c
+++ b/arch/m68knommu/platform/coldfire/intc.c
@@ -115,16 +115,69 @@ static void intc_irq_mask(unsigned int irq)
{
if (mcf_irq2imr[irq])
mcf_setimr(mcf_irq2imr[irq]);
+
+#if defined MCFINTC2_GPIOIRQ0
+ if (irq >= MCFINTC2_GPIOIRQ0 && irq <= MCFINTC2_GPIOIRQ7) {
+ u32 gpiointenable = __raw_readl(MCFSIM2_GPIOINTENABLE);
+
+ gpiointenable &= ~(0x101 << (irq - MCFINTC2_GPIOIRQ0));
+ __raw_writel(gpiointenable, MCFSIM2_GPIOINTENABLE);
+ }
+#endif
}
static void intc_irq_unmask(unsigned int irq)
{
if (mcf_irq2imr[irq])
mcf_clrimr(mcf_irq2imr[irq]);
+
+#if defined MCFINTC2_GPIOIRQ0
+ if (irq >= MCFINTC2_GPIOIRQ0 && irq <= MCFINTC2_GPIOIRQ7) {
+ struct irq_desc *desc = irq_to_desc(irq);
+ u32 gpiointenable = __raw_readl(MCFSIM2_GPIOINTENABLE);
+
+ if (desc->status & IRQF_TRIGGER_RISING)
+ gpiointenable |= 0x0001 << (irq - MCFINTC2_GPIOIRQ0);
+ if (desc->status & IRQF_TRIGGER_FALLING)
+ gpiointenable |= 0x0100 << (irq - MCFINTC2_GPIOIRQ0);
+ __raw_writel(gpiointenable, MCFSIM2_GPIOINTENABLE);
+ }
+#endif
+}
+
+static void intc_irq_ack(unsigned int irq)
+{
+#if defined MCFINTC2_GPIOIRQ0
+ if (irq >= MCFINTC2_GPIOIRQ0 && irq <= MCFINTC2_GPIOIRQ7) {
+ u32 gpiointclear = __raw_readl(MCFSIM2_GPIOINTCLEAR);
+
+ gpiointclear |= 0x0101 << (irq - MCFINTC2_GPIOIRQ0);
+ __raw_writel(gpiointclear, MCFSIM2_GPIOINTCLEAR);
+ }
+#endif
}
static int intc_irq_set_type(unsigned int irq, unsigned int type)
{
+#if defined MCFINTC2_GPIOIRQ0
+ u32 gpiointenable;
+
+ if (type & ~(IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
+ return -EINVAL;
+
+ if ((irq < MCFINTC2_GPIOIRQ0) || (irq > MCFINTC2_GPIOIRQ7))
+ return -EINVAL;
+
+ /* enable rising or falling or both */
+ gpiointenable = __raw_readl(MCFSIM2_GPIOINTENABLE);
+ gpiointenable &= ~(0x101 << (irq - MCFINTC2_GPIOIRQ0));
+ if (type & IRQF_TRIGGER_RISING)
+ gpiointenable |= 0x0001 << (irq - MCFINTC2_GPIOIRQ0);
+ if (type & IRQF_TRIGGER_FALLING)
+ gpiointenable |= 0x0100 << (irq - MCFINTC2_GPIOIRQ0);
+ __raw_writel(gpiointenable, MCFSIM2_GPIOINTENABLE);
+#endif
+
return 0;
}
@@ -132,6 +185,7 @@ static struct irq_chip intc_irq_chip = {
.name = "CF-INTC",
.mask = intc_irq_mask,
.unmask = intc_irq_unmask,
+ .ack = intc_irq_ack,
.set_type = intc_irq_set_type,
};
@@ -143,10 +197,7 @@ void __init init_IRQ(void)
mcf_maskimr(0xffffffff);
for (irq = 0; (irq < NR_IRQS); irq++) {
- irq_desc[irq].status = IRQ_DISABLED;
- irq_desc[irq].action = NULL;
- irq_desc[irq].depth = 1;
- irq_desc[irq].chip = &intc_irq_chip;
+ set_irq_chip_and_handler(irq, &intc_irq_chip, handle_level_irq);
intc_irq_set_type(irq, 0);
}
}
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h
index 61abbd232640..ec89f2ad0fe1 100644
--- a/arch/microblaze/include/asm/entry.h
+++ b/arch/microblaze/include/asm/entry.h
@@ -21,7 +21,7 @@
* places
*/
-#define PER_CPU(var) per_cpu__##var
+#define PER_CPU(var) var
# ifndef __ASSEMBLY__
DECLARE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
diff --git a/arch/microblaze/include/asm/ptrace.h b/arch/microblaze/include/asm/ptrace.h
index a917dc517736..d74dbfb92c04 100644
--- a/arch/microblaze/include/asm/ptrace.h
+++ b/arch/microblaze/include/asm/ptrace.h
@@ -54,6 +54,7 @@ struct pt_regs {
int pt_mode;
};
+#ifdef __KERNEL__
#define kernel_mode(regs) ((regs)->pt_mode)
#define user_mode(regs) (!kernel_mode(regs))
@@ -62,6 +63,19 @@ struct pt_regs {
void show_regs(struct pt_regs *);
+#else /* __KERNEL__ */
+
+/* pt_regs offsets used by gdbserver etc in ptrace syscalls */
+#define PT_GPR(n) ((n) * sizeof(microblaze_reg_t))
+#define PT_PC (32 * sizeof(microblaze_reg_t))
+#define PT_MSR (33 * sizeof(microblaze_reg_t))
+#define PT_EAR (34 * sizeof(microblaze_reg_t))
+#define PT_ESR (35 * sizeof(microblaze_reg_t))
+#define PT_FSR (36 * sizeof(microblaze_reg_t))
+#define PT_KERNEL_MODE (37 * sizeof(microblaze_reg_t))
+
+#endif /* __KERNEL */
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_MICROBLAZE_PTRACE_H */
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index cb05a07e55e9..2b67e92a773c 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -377,13 +377,14 @@
#define __NR_shutdown 359 /* new */
#define __NR_sendmsg 360 /* new */
#define __NR_recvmsg 361 /* new */
-#define __NR_accept04 362 /* new */
+#define __NR_accept4 362 /* new */
#define __NR_preadv 363 /* new */
#define __NR_pwritev 364 /* new */
#define __NR_rt_tgsigqueueinfo 365 /* new */
#define __NR_perf_event_open 366 /* new */
+#define __NR_recvmmsg 367 /* new */
-#define __NR_syscalls 367
+#define __NR_syscalls 368
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index 30916193fcc7..da4faa702505 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -234,19 +234,6 @@ start_here:
bralid r15, mmu_init
nop
- /* Go back to running unmapped so we can load up new values
- * and change to using our exception vectors.
- * On the MicroBlaze, all we invalidate the used TLB entries to clear
- * the old 16M byte TLB mappings.
- */
- ori r15,r0,TOPHYS(kernel_load_context)
- ori r4,r0,MSR_KERNEL
- mts rmsr,r4
- nop
- bri 4
- rted r15,0
- nop
-
/* Load up the kernel context */
kernel_load_context:
# Keep entry 0 and 1 valid. Entry 3 mapped to LMB can go away.
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 4088be7d4e29..03376dc814c9 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -366,7 +366,7 @@ ENTRY(sys_call_table)
.long sys_shutdown
.long sys_sendmsg /* 360 */
.long sys_recvmsg
- .long sys_ni_syscall
+ .long sys_accept4
.long sys_ni_syscall
.long sys_ni_syscall
.long sys_rt_tgsigqueueinfo /* 365 */
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index 00b498e97c83..df3b1a7eb15d 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -1,5 +1,5 @@
-# au1000-style gpio
-config ALCHEMY_GPIO_AU1000
+# au1000-style gpio and interrupt controllers
+config ALCHEMY_GPIOINT_AU1000
bool
# select this in your board config if you don't want to use the gpio
@@ -20,12 +20,14 @@ config MIPS_MTX1
select HW_HAS_PCI
select SOC_AU1500
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_BOSPORUS
bool "Alchemy Bosporus board"
select SOC_AU1500
select DMA_NONCOHERENT
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_DB1000
bool "Alchemy DB1000 board"
@@ -33,12 +35,14 @@ config MIPS_DB1000
select DMA_NONCOHERENT
select HW_HAS_PCI
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_DB1100
bool "Alchemy DB1100 board"
select SOC_AU1100
select DMA_NONCOHERENT
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_DB1200
bool "Alchemy DB1200 board"
@@ -46,6 +50,7 @@ config MIPS_DB1200
select DMA_COHERENT
select MIPS_DISABLE_OBSOLETE_IDE
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_DB1500
bool "Alchemy DB1500 board"
@@ -55,6 +60,7 @@ config MIPS_DB1500
select MIPS_DISABLE_OBSOLETE_IDE
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_DB1550
bool "Alchemy DB1550 board"
@@ -63,12 +69,14 @@ config MIPS_DB1550
select DMA_NONCOHERENT
select MIPS_DISABLE_OBSOLETE_IDE
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_MIRAGE
bool "Alchemy Mirage board"
select DMA_NONCOHERENT
select SOC_AU1500
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_PB1000
bool "Alchemy PB1000 board"
@@ -77,6 +85,7 @@ config MIPS_PB1000
select HW_HAS_PCI
select SWAP_IO_SPACE
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_PB1100
bool "Alchemy PB1100 board"
@@ -85,6 +94,7 @@ config MIPS_PB1100
select HW_HAS_PCI
select SWAP_IO_SPACE
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_PB1200
bool "Alchemy PB1200 board"
@@ -92,6 +102,7 @@ config MIPS_PB1200
select DMA_NONCOHERENT
select MIPS_DISABLE_OBSOLETE_IDE
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_PB1500
bool "Alchemy PB1500 board"
@@ -99,6 +110,7 @@ config MIPS_PB1500
select DMA_NONCOHERENT
select HW_HAS_PCI
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_PB1550
bool "Alchemy PB1550 board"
@@ -107,39 +119,41 @@ config MIPS_PB1550
select HW_HAS_PCI
select MIPS_DISABLE_OBSOLETE_IDE
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
config MIPS_XXS1500
bool "MyCable XXS1500 board"
select DMA_NONCOHERENT
select SOC_AU1500
select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
endchoice
config SOC_AU1000
bool
select SOC_AU1X00
- select ALCHEMY_GPIO_AU1000
+ select ALCHEMY_GPIOINT_AU1000
config SOC_AU1100
bool
select SOC_AU1X00
- select ALCHEMY_GPIO_AU1000
+ select ALCHEMY_GPIOINT_AU1000
config SOC_AU1500
bool
select SOC_AU1X00
- select ALCHEMY_GPIO_AU1000
+ select ALCHEMY_GPIOINT_AU1000
config SOC_AU1550
bool
select SOC_AU1X00
- select ALCHEMY_GPIO_AU1000
+ select ALCHEMY_GPIOINT_AU1000
config SOC_AU1200
bool
select SOC_AU1X00
- select ALCHEMY_GPIO_AU1000
+ select ALCHEMY_GPIOINT_AU1000
config SOC_AU1X00
bool
diff --git a/arch/mips/alchemy/common/Makefile b/arch/mips/alchemy/common/Makefile
index b67fb512529d..06c0e65a54b5 100644
--- a/arch/mips/alchemy/common/Makefile
+++ b/arch/mips/alchemy/common/Makefile
@@ -5,14 +5,15 @@
# Makefile for the Alchemy Au1xx0 CPUs, generic files.
#
-obj-y += prom.o irq.o puts.o time.o reset.o \
- clocks.o platform.o power.o setup.o \
+obj-y += prom.o time.o clocks.o platform.o power.o setup.o \
sleeper.o dma.o dbdma.o
+obj-$(CONFIG_ALCHEMY_GPIOINT_AU1000) += irq.o
+
# optional gpiolib support
ifeq ($(CONFIG_ALCHEMY_GPIO_INDIRECT),)
ifeq ($(CONFIG_GPIOLIB),y)
- obj-$(CONFIG_ALCHEMY_GPIO_AU1000) += gpiolib-au1000.o
+ obj-$(CONFIG_ALCHEMY_GPIOINT_AU1000) += gpiolib-au1000.o
endif
endif
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 19c1c82849ff..4851308a95d0 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -30,6 +30,7 @@
*
*/
+#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -58,7 +59,6 @@ static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock);
static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE;
static int dbdma_initialized;
-static void au1xxx_dbdma_init(void);
static dbdev_tab_t dbdev_tab[] = {
#ifdef CONFIG_SOC_AU1550
@@ -250,8 +250,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
* which can't be done successfully during board set up.
*/
if (!dbdma_initialized)
- au1xxx_dbdma_init();
- dbdma_initialized = 1;
+ return 0;
stp = find_dbdev_id(srcid);
if (stp == NULL)
@@ -569,7 +568,7 @@ EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc);
* This updates the source pointer and byte count. Normally used
* for memory to fifo transfers.
*/
-u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags)
+u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
@@ -595,7 +594,7 @@ u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags)
return 0;
/* Load up buffer address and byte count. */
- dp->dscr_source0 = virt_to_phys(buf);
+ dp->dscr_source0 = buf & ~0UL;
dp->dscr_cmd1 = nbytes;
/* Check flags */
if (flags & DDMA_FLAGS_IE)
@@ -622,14 +621,13 @@ u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags)
/* Return something non-zero. */
return nbytes;
}
-EXPORT_SYMBOL(_au1xxx_dbdma_put_source);
+EXPORT_SYMBOL(au1xxx_dbdma_put_source);
/* Put a destination buffer into the DMA ring.
* This updates the destination pointer and byte count. Normally used
* to place an empty buffer into the ring for fifo to memory transfers.
*/
-u32
-_au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags)
+u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
{
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
@@ -659,7 +657,7 @@ _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags)
if (flags & DDMA_FLAGS_NOIE)
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
- dp->dscr_dest0 = virt_to_phys(buf);
+ dp->dscr_dest0 = buf & ~0UL;
dp->dscr_cmd1 = nbytes;
#if 0
printk(KERN_DEBUG "cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n",
@@ -685,7 +683,7 @@ _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags)
/* Return something non-zero. */
return nbytes;
}
-EXPORT_SYMBOL(_au1xxx_dbdma_put_dest);
+EXPORT_SYMBOL(au1xxx_dbdma_put_dest);
/*
* Get a destination buffer into the DMA ring.
@@ -868,28 +866,6 @@ static irqreturn_t dbdma_interrupt(int irq, void *dev_id)
return IRQ_RETVAL(1);
}
-static void au1xxx_dbdma_init(void)
-{
- int irq_nr;
-
- dbdma_gptr->ddma_config = 0;
- dbdma_gptr->ddma_throttle = 0;
- dbdma_gptr->ddma_inten = 0xffff;
- au_sync();
-
-#if defined(CONFIG_SOC_AU1550)
- irq_nr = AU1550_DDMA_INT;
-#elif defined(CONFIG_SOC_AU1200)
- irq_nr = AU1200_DDMA_INT;
-#else
- #error Unknown Au1x00 SOC
-#endif
-
- if (request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED,
- "Au1xxx dbdma", (void *)dbdma_gptr))
- printk(KERN_ERR "Can't get 1550 dbdma irq");
-}
-
void au1xxx_dbdma_dump(u32 chanid)
{
chan_tab_t *ctp;
@@ -1038,4 +1014,38 @@ void au1xxx_dbdma_resume(void)
}
}
#endif /* CONFIG_PM */
+
+static int __init au1xxx_dbdma_init(void)
+{
+ int irq_nr, ret;
+
+ dbdma_gptr->ddma_config = 0;
+ dbdma_gptr->ddma_throttle = 0;
+ dbdma_gptr->ddma_inten = 0xffff;
+ au_sync();
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1550:
+ irq_nr = AU1550_DDMA_INT;
+ break;
+ case ALCHEMY_CPU_AU1200:
+ irq_nr = AU1200_DDMA_INT;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ ret = request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED,
+ "Au1xxx dbdma", (void *)dbdma_gptr);
+ if (ret)
+ printk(KERN_ERR "Cannot grab DBDMA interrupt!\n");
+ else {
+ dbdma_initialized = 1;
+ printk(KERN_INFO "Alchemy DBDMA initialized\n");
+ }
+
+ return ret;
+}
+subsys_initcall(au1xxx_dbdma_init);
+
#endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */
diff --git a/arch/mips/alchemy/common/dma.c b/arch/mips/alchemy/common/dma.c
index d6fbda232e6a..d5278877891d 100644
--- a/arch/mips/alchemy/common/dma.c
+++ b/arch/mips/alchemy/common/dma.c
@@ -29,6 +29,8 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
+
+#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -188,17 +190,14 @@ int request_au1000_dma(int dev_id, const char *dev_str,
dev = &dma_dev_table[dev_id];
if (irqhandler) {
- chan->irq = AU1000_DMA_INT_BASE + i;
chan->irq_dev = irq_dev_id;
ret = request_irq(chan->irq, irqhandler, irqflags, dev_str,
chan->irq_dev);
if (ret) {
- chan->irq = 0;
chan->irq_dev = NULL;
return ret;
}
} else {
- chan->irq = 0;
chan->irq_dev = NULL;
}
@@ -226,13 +225,40 @@ void free_au1000_dma(unsigned int dmanr)
}
disable_dma(dmanr);
- if (chan->irq)
+ if (chan->irq_dev)
free_irq(chan->irq, chan->irq_dev);
- chan->irq = 0;
chan->irq_dev = NULL;
chan->dev_id = -1;
}
EXPORT_SYMBOL(free_au1000_dma);
+static int __init au1000_dma_init(void)
+{
+ int base, i;
+
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ base = AU1000_DMA_INT_BASE;
+ break;
+ case ALCHEMY_CPU_AU1500:
+ base = AU1500_DMA_INT_BASE;
+ break;
+ case ALCHEMY_CPU_AU1100:
+ base = AU1100_DMA_INT_BASE;
+ break;
+ default:
+ goto out;
+ }
+
+ for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++)
+ au1000_dma_table[i].irq = base + i;
+
+ printk(KERN_INFO "Alchemy DMA initialized\n");
+
+out:
+ return 0;
+}
+arch_initcall(au1000_dma_init);
+
#endif /* AU1000 AU1500 AU1100 */
diff --git a/arch/mips/alchemy/common/gpiolib-au1000.c b/arch/mips/alchemy/common/gpiolib-au1000.c
index 1bfa91f939f4..c8e1a94d4a95 100644
--- a/arch/mips/alchemy/common/gpiolib-au1000.c
+++ b/arch/mips/alchemy/common/gpiolib-au1000.c
@@ -36,7 +36,6 @@
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio.h>
-#if !defined(CONFIG_SOC_AU1000)
static int gpio2_get(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio2_get_value(offset + ALCHEMY_GPIO2_BASE);
@@ -63,7 +62,7 @@ static int gpio2_to_irq(struct gpio_chip *chip, unsigned offset)
{
return alchemy_gpio2_to_irq(offset + ALCHEMY_GPIO2_BASE);
}
-#endif /* !defined(CONFIG_SOC_AU1000) */
+
static int gpio1_get(struct gpio_chip *chip, unsigned offset)
{
@@ -104,7 +103,6 @@ struct gpio_chip alchemy_gpio_chip[] = {
.base = ALCHEMY_GPIO1_BASE,
.ngpio = ALCHEMY_GPIO1_NUM,
},
-#if !defined(CONFIG_SOC_AU1000)
[1] = {
.label = "alchemy-gpio2",
.direction_input = gpio2_direction_input,
@@ -115,15 +113,13 @@ struct gpio_chip alchemy_gpio_chip[] = {
.base = ALCHEMY_GPIO2_BASE,
.ngpio = ALCHEMY_GPIO2_NUM,
},
-#endif
};
static int __init alchemy_gpiolib_init(void)
{
gpiochip_add(&alchemy_gpio_chip[0]);
-#if !defined(CONFIG_SOC_AU1000)
- gpiochip_add(&alchemy_gpio_chip[1]);
-#endif
+ if (alchemy_get_cputype() != ALCHEMY_CPU_AU1000)
+ gpiochip_add(&alchemy_gpio_chip[1]);
return 0;
}
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index d670928afcfd..b2821ace4d00 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -39,168 +39,180 @@
static int au1x_ic_settype(unsigned int irq, unsigned int flow_type);
+/* NOTE on interrupt priorities: The original writers of this code said:
+ *
+ * Because of the tight timing of SETUP token to reply transactions,
+ * the USB devices-side packet complete interrupt (USB_DEV_REQ_INT)
+ * needs the highest priority.
+ */
+
/* per-processor fixed function irqs */
-struct au1xxx_irqmap au1xxx_ic0_map[] __initdata = {
-
-#if defined(CONFIG_SOC_AU1000)
- { AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
- { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
-
-#elif defined(CONFIG_SOC_AU1500)
-
- { AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1000_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1000_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
- { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
-
-#elif defined(CONFIG_SOC_AU1100)
-
- { AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
- { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+struct au1xxx_irqmap {
+ int im_irq;
+ int im_type;
+ int im_request; /* set 1 to get higher priority */
+};
+
+struct au1xxx_irqmap au1000_irqmap[] __initdata = {
+ { AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+ { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 1 },
{ AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
-
-#elif defined(CONFIG_SOC_AU1550)
-
- { AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
- { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1550_NAND_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { -1, },
+};
+
+struct au1xxx_irqmap au1500_irqmap[] __initdata = {
+ { AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1500_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1500_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1500_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1500_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1500_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1500_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1500_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1500_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1500_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1500_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+ { AU1500_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 1 },
+ { AU1500_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1500_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1500_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1500_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { -1, },
+};
+
+struct au1xxx_irqmap au1100_irqmap[] __initdata = {
+ { AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1100_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1100_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1100_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1100_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1100_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1100_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1100_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+ { AU1100_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 1 },
+ { AU1100_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1100_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1100_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1100_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { -1, },
+};
+
+struct au1xxx_irqmap au1550_irqmap[] __initdata = {
+ { AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1550_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1550_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1550_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1550_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1550_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1550_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1550_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+ { AU1550_NAND_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 1 },
{ AU1550_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
- { AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
-
-#elif defined(CONFIG_SOC_AU1200)
-
- { AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1200_SWT_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
- { AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1200_NAND_INT, IRQ_TYPE_EDGE_RISING, 0 },
- { AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
- { AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
-
-#else
-#error "Error: Unknown Alchemy SOC"
-#endif
+ { AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
+ { AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { -1, },
+};
+
+struct au1xxx_irqmap au1200_irqmap[] __initdata = {
+ { AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_SWT_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
+ { AU1200_NAND_INT, IRQ_TYPE_EDGE_RISING, 0 },
+ { AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
+ { -1, },
};
@@ -306,7 +318,7 @@ static void au1x_ic1_unmask(unsigned int irq_nr)
* nowhere in the current kernel sources is it disabled. --mlau
*/
#if defined(CONFIG_MIPS_PB1000)
- if (irq_nr == AU1000_GPIO_15)
+ if (irq_nr == AU1000_GPIO15_INT)
au_writel(0x4000, PB1000_MDR); /* enable int */
#endif
au_sync();
@@ -378,11 +390,13 @@ static void au1x_ic1_maskack(unsigned int irq_nr)
static int au1x_ic1_setwake(unsigned int irq, unsigned int on)
{
- unsigned int bit = irq - AU1000_INTC1_INT_BASE;
+ int bit = irq - AU1000_INTC1_INT_BASE;
unsigned long wakemsk, flags;
- /* only GPIO 0-7 can act as wakeup source: */
- if ((irq < AU1000_GPIO_0) || (irq > AU1000_GPIO_7))
+ /* only GPIO 0-7 can act as wakeup source. Fortunately these
+ * are wired up identically on all supported variants.
+ */
+ if ((bit < 0) || (bit > 7))
return -EINVAL;
local_irq_save(flags);
@@ -504,11 +518,11 @@ static int au1x_ic_settype(unsigned int irq, unsigned int flow_type)
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause();
- unsigned long s, off, bit;
+ unsigned long s, off;
if (pending & CAUSEF_IP7) {
- do_IRQ(MIPS_CPU_IRQ_BASE + 7);
- return;
+ off = MIPS_CPU_IRQ_BASE + 7;
+ goto handle;
} else if (pending & CAUSEF_IP2) {
s = IC0_REQ0INT;
off = AU1000_INTC0_INT_BASE;
@@ -524,58 +538,20 @@ asmlinkage void plat_irq_dispatch(void)
} else
goto spurious;
- bit = 0;
s = au_readl(s);
if (unlikely(!s)) {
spurious:
spurious_interrupt();
return;
}
-#ifdef AU1000_USB_DEV_REQ_INT
- /*
- * Because of the tight timing of SETUP token to reply
- * transactions, the USB devices-side packet complete
- * interrupt needs the highest priority.
- */
- bit = 1 << (AU1000_USB_DEV_REQ_INT - AU1000_INTC0_INT_BASE);
- if ((pending & CAUSEF_IP2) && (s & bit)) {
- do_IRQ(AU1000_USB_DEV_REQ_INT);
- return;
- }
-#endif
- do_IRQ(__ffs(s) + off);
+ off += __ffs(s);
+handle:
+ do_IRQ(off);
}
-/* setup edge/level and assign request 0/1 */
-void __init au1xxx_setup_irqmap(struct au1xxx_irqmap *map, int count)
+static void __init au1000_init_irq(struct au1xxx_irqmap *map)
{
unsigned int bit, irq_nr;
-
- while (count--) {
- irq_nr = map[count].im_irq;
-
- if (((irq_nr < AU1000_INTC0_INT_BASE) ||
- (irq_nr >= AU1000_INTC0_INT_BASE + 32)) &&
- ((irq_nr < AU1000_INTC1_INT_BASE) ||
- (irq_nr >= AU1000_INTC1_INT_BASE + 32)))
- continue;
-
- if (irq_nr >= AU1000_INTC1_INT_BASE) {
- bit = irq_nr - AU1000_INTC1_INT_BASE;
- if (map[count].im_request)
- au_writel(1 << bit, IC1_ASSIGNCLR);
- } else {
- bit = irq_nr - AU1000_INTC0_INT_BASE;
- if (map[count].im_request)
- au_writel(1 << bit, IC0_ASSIGNCLR);
- }
-
- au1x_ic_settype(irq_nr, map[count].im_type);
- }
-}
-
-void __init arch_init_irq(void)
-{
int i;
/*
@@ -585,7 +561,7 @@ void __init arch_init_irq(void)
au_writel(0xffffffff, IC0_CFG1CLR);
au_writel(0xffffffff, IC0_CFG2CLR);
au_writel(0xffffffff, IC0_MASKCLR);
- au_writel(0xffffffff, IC0_ASSIGNSET);
+ au_writel(0xffffffff, IC0_ASSIGNCLR);
au_writel(0xffffffff, IC0_WAKECLR);
au_writel(0xffffffff, IC0_SRCSET);
au_writel(0xffffffff, IC0_FALLINGCLR);
@@ -596,7 +572,7 @@ void __init arch_init_irq(void)
au_writel(0xffffffff, IC1_CFG1CLR);
au_writel(0xffffffff, IC1_CFG2CLR);
au_writel(0xffffffff, IC1_MASKCLR);
- au_writel(0xffffffff, IC1_ASSIGNSET);
+ au_writel(0xffffffff, IC1_ASSIGNCLR);
au_writel(0xffffffff, IC1_WAKECLR);
au_writel(0xffffffff, IC1_SRCSET);
au_writel(0xffffffff, IC1_FALLINGCLR);
@@ -619,11 +595,43 @@ void __init arch_init_irq(void)
/*
* Initialize IC0, which is fixed per processor.
*/
- au1xxx_setup_irqmap(au1xxx_ic0_map, ARRAY_SIZE(au1xxx_ic0_map));
+ while (map->im_irq != -1) {
+ irq_nr = map->im_irq;
- /* Boards can register additional (GPIO-based) IRQs.
- */
- board_init_irq();
+ if (irq_nr >= AU1000_INTC1_INT_BASE) {
+ bit = irq_nr - AU1000_INTC1_INT_BASE;
+ if (map->im_request)
+ au_writel(1 << bit, IC1_ASSIGNSET);
+ } else {
+ bit = irq_nr - AU1000_INTC0_INT_BASE;
+ if (map->im_request)
+ au_writel(1 << bit, IC0_ASSIGNSET);
+ }
+
+ au1x_ic_settype(irq_nr, map->im_type);
+ ++map;
+ }
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3);
}
+
+void __init arch_init_irq(void)
+{
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ au1000_init_irq(au1000_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1500:
+ au1000_init_irq(au1500_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1100:
+ au1000_init_irq(au1100_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1550:
+ au1000_init_irq(au1550_irqmap);
+ break;
+ case ALCHEMY_CPU_AU1200:
+ au1000_init_irq(au1200_irqmap);
+ break;
+ }
+}
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 117f99f70649..3fbe30c1fd9a 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -19,39 +19,40 @@
#include <asm/mach-au1x00/au1xxx.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1100_mmc.h>
-
-#define PORT(_base, _irq) \
- { \
- .iobase = _base, \
- .membase = (void __iomem *)_base,\
- .mapbase = CPHYSADDR(_base), \
- .irq = _irq, \
- .regshift = 2, \
- .iotype = UPIO_AU, \
- .flags = UPF_SKIP_TEST \
+#include <asm/mach-au1x00/au1xxx_eth.h>
+
+#define PORT(_base, _irq) \
+ { \
+ .mapbase = _base, \
+ .irq = _irq, \
+ .regshift = 2, \
+ .iotype = UPIO_AU, \
+ .flags = UPF_SKIP_TEST | UPF_IOREMAP | \
+ UPF_FIXED_TYPE, \
+ .type = PORT_16550A, \
}
static struct plat_serial8250_port au1x00_uart_data[] = {
#if defined(CONFIG_SERIAL_8250_AU1X00)
#if defined(CONFIG_SOC_AU1000)
- PORT(UART0_ADDR, AU1000_UART0_INT),
- PORT(UART1_ADDR, AU1000_UART1_INT),
- PORT(UART2_ADDR, AU1000_UART2_INT),
- PORT(UART3_ADDR, AU1000_UART3_INT),
+ PORT(UART0_PHYS_ADDR, AU1000_UART0_INT),
+ PORT(UART1_PHYS_ADDR, AU1000_UART1_INT),
+ PORT(UART2_PHYS_ADDR, AU1000_UART2_INT),
+ PORT(UART3_PHYS_ADDR, AU1000_UART3_INT),
#elif defined(CONFIG_SOC_AU1500)
- PORT(UART0_ADDR, AU1500_UART0_INT),
- PORT(UART3_ADDR, AU1500_UART3_INT),
+ PORT(UART0_PHYS_ADDR, AU1500_UART0_INT),
+ PORT(UART3_PHYS_ADDR, AU1500_UART3_INT),
#elif defined(CONFIG_SOC_AU1100)
- PORT(UART0_ADDR, AU1100_UART0_INT),
- PORT(UART1_ADDR, AU1100_UART1_INT),
- PORT(UART3_ADDR, AU1100_UART3_INT),
+ PORT(UART0_PHYS_ADDR, AU1100_UART0_INT),
+ PORT(UART1_PHYS_ADDR, AU1100_UART1_INT),
+ PORT(UART3_PHYS_ADDR, AU1100_UART3_INT),
#elif defined(CONFIG_SOC_AU1550)
- PORT(UART0_ADDR, AU1550_UART0_INT),
- PORT(UART1_ADDR, AU1550_UART1_INT),
- PORT(UART3_ADDR, AU1550_UART3_INT),
+ PORT(UART0_PHYS_ADDR, AU1550_UART0_INT),
+ PORT(UART1_PHYS_ADDR, AU1550_UART1_INT),
+ PORT(UART3_PHYS_ADDR, AU1550_UART3_INT),
#elif defined(CONFIG_SOC_AU1200)
- PORT(UART0_ADDR, AU1200_UART0_INT),
- PORT(UART1_ADDR, AU1200_UART1_INT),
+ PORT(UART0_PHYS_ADDR, AU1200_UART0_INT),
+ PORT(UART1_PHYS_ADDR, AU1200_UART1_INT),
#endif
#endif /* CONFIG_SERIAL_8250_AU1X00 */
{ },
@@ -73,8 +74,8 @@ static struct resource au1xxx_usb_ohci_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AU1000_USB_HOST_INT,
- .end = AU1000_USB_HOST_INT,
+ .start = FOR_PLATFORM_C_USB_HOST_INT,
+ .end = FOR_PLATFORM_C_USB_HOST_INT,
.flags = IORESOURCE_IRQ,
},
};
@@ -132,8 +133,8 @@ static struct resource au1xxx_usb_ehci_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
- .start = AU1000_USB_HOST_INT,
- .end = AU1000_USB_HOST_INT,
+ .start = AU1200_USB_INT,
+ .end = AU1200_USB_INT,
.flags = IORESOURCE_IRQ,
},
};
@@ -308,11 +309,6 @@ static struct platform_device au1200_mmc1_device = {
#endif /* #ifndef CONFIG_MIPS_DB1200 */
#endif /* #ifdef CONFIG_SOC_AU1200 */
-static struct platform_device au1x00_pcmcia_device = {
- .name = "au1x00-pcmcia",
- .id = 0,
-};
-
/* All Alchemy demoboards with I2C have this #define in their headers */
#ifdef SMBUS_PSC_BASE
static struct resource pbdb_smbus_resources[] = {
@@ -331,10 +327,91 @@ static struct platform_device pbdb_smbus_device = {
};
#endif
+/* Macro to help defining the Ethernet MAC resources */
+#define MAC_RES(_base, _enable, _irq) \
+ { \
+ .start = CPHYSADDR(_base), \
+ .end = CPHYSADDR(_base + 0xffff), \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = CPHYSADDR(_enable), \
+ .end = CPHYSADDR(_enable + 0x3), \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = _irq, \
+ .end = _irq, \
+ .flags = IORESOURCE_IRQ \
+ }
+
+static struct resource au1xxx_eth0_resources[] = {
+#if defined(CONFIG_SOC_AU1000)
+ MAC_RES(AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT),
+#elif defined(CONFIG_SOC_AU1100)
+ MAC_RES(AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT),
+#elif defined(CONFIG_SOC_AU1550)
+ MAC_RES(AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT),
+#elif defined(CONFIG_SOC_AU1500)
+ MAC_RES(AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT),
+#endif
+};
+
+static struct resource au1xxx_eth1_resources[] = {
+#if defined(CONFIG_SOC_AU1000)
+ MAC_RES(AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT),
+#elif defined(CONFIG_SOC_AU1550)
+ MAC_RES(AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT),
+#elif defined(CONFIG_SOC_AU1500)
+ MAC_RES(AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT),
+#endif
+};
+
+static struct au1000_eth_platform_data au1xxx_eth0_platform_data = {
+ .phy1_search_mac0 = 1,
+};
+
+static struct platform_device au1xxx_eth0_device = {
+ .name = "au1000-eth",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(au1xxx_eth0_resources),
+ .resource = au1xxx_eth0_resources,
+ .dev.platform_data = &au1xxx_eth0_platform_data,
+};
+
+#ifndef CONFIG_SOC_AU1100
+static struct au1000_eth_platform_data au1xxx_eth1_platform_data = {
+ .phy1_search_mac0 = 1,
+};
+
+static struct platform_device au1xxx_eth1_device = {
+ .name = "au1000-eth",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(au1xxx_eth1_resources),
+ .resource = au1xxx_eth1_resources,
+ .dev.platform_data = &au1xxx_eth1_platform_data,
+};
+#endif
+
+void __init au1xxx_override_eth_cfg(unsigned int port,
+ struct au1000_eth_platform_data *eth_data)
+{
+ if (!eth_data || port > 1)
+ return;
+
+ if (port == 0)
+ memcpy(&au1xxx_eth0_platform_data, eth_data,
+ sizeof(struct au1000_eth_platform_data));
+#ifndef CONFIG_SOC_AU1100
+ else
+ memcpy(&au1xxx_eth1_platform_data, eth_data,
+ sizeof(struct au1000_eth_platform_data));
+#endif
+}
+
static struct platform_device *au1xxx_platform_devices[] __initdata = {
&au1xx0_uart_device,
&au1xxx_usb_ohci_device,
- &au1x00_pcmcia_device,
#ifdef CONFIG_FB_AU1100
&au1100_lcd_device,
#endif
@@ -351,6 +428,7 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = {
#ifdef SMBUS_PSC_BASE
&pbdb_smbus_device,
#endif
+ &au1xxx_eth0_device,
};
static int __init au1xxx_platform_init(void)
@@ -362,6 +440,12 @@ static int __init au1xxx_platform_init(void)
for (i = 0; au1x00_uart_data[i].flags; i++)
au1x00_uart_data[i].uartclk = uartclk;
+#ifndef CONFIG_SOC_AU1100
+ /* Register second MAC if enabled in pinfunc */
+ if (!(au_readl(SYS_PINFUNC) & (u32)SYS_PF_NI2))
+ platform_device_register(&au1xxx_eth1_device);
+#endif
+
return platform_add_devices(au1xxx_platform_devices,
ARRAY_SIZE(au1xxx_platform_devices));
}
diff --git a/arch/mips/alchemy/common/puts.c b/arch/mips/alchemy/common/puts.c
deleted file mode 100644
index 55bbe24d45b6..000000000000
--- a/arch/mips/alchemy/common/puts.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * Low level UART routines to directly access Alchemy UART.
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <asm/mach-au1x00/au1000.h>
-
-#define SERIAL_BASE UART_BASE
-#define SER_CMD 0x7
-#define SER_DATA 0x1
-#define TX_BUSY 0x20
-
-#define TIMEOUT 0xffffff
-#define SLOW_DOWN
-
-static volatile unsigned long * const com1 = (unsigned long *)SERIAL_BASE;
-
-#ifdef SLOW_DOWN
-static inline void slow_down(void)
-{
- int k;
-
- for (k = 0; k < 10000; k++);
-}
-#else
-#define slow_down()
-#endif
-
-void
-prom_putchar(const unsigned char c)
-{
- unsigned char ch;
- int i = 0;
-
- do {
- ch = com1[SER_CMD];
- slow_down();
- i++;
- if (i > TIMEOUT)
- break;
- } while (0 == (ch & TX_BUSY));
-
- com1[SER_DATA] = c;
-}
diff --git a/arch/mips/alchemy/common/reset.c b/arch/mips/alchemy/common/reset.c
deleted file mode 100644
index 4791011e8f92..000000000000
--- a/arch/mips/alchemy/common/reset.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * Au1xx0 reset routines.
- *
- * Copyright 2001, 2006, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/gpio.h>
-
-#include <asm/cacheflush.h>
-#include <asm/mach-au1x00/au1000.h>
-
-void au1000_restart(char *command)
-{
- /* Set all integrated peripherals to disabled states */
- extern void board_reset(void);
- u32 prid = read_c0_prid();
-
- printk(KERN_NOTICE "\n** Resetting Integrated Peripherals\n");
-
- switch (prid & 0xFF000000) {
- case 0x00000000: /* Au1000 */
- au_writel(0x02, 0xb0000010); /* ac97_enable */
- au_writel(0x08, 0xb017fffc); /* usbh_enable - early errata */
- asm("sync");
- au_writel(0x00, 0xb017fffc); /* usbh_enable */
- au_writel(0x00, 0xb0200058); /* usbd_enable */
- au_writel(0x00, 0xb0300040); /* ir_enable */
- au_writel(0x00, 0xb4004104); /* mac dma */
- au_writel(0x00, 0xb4004114); /* mac dma */
- au_writel(0x00, 0xb4004124); /* mac dma */
- au_writel(0x00, 0xb4004134); /* mac dma */
- au_writel(0x00, 0xb0520000); /* macen0 */
- au_writel(0x00, 0xb0520004); /* macen1 */
- au_writel(0x00, 0xb1000008); /* i2s_enable */
- au_writel(0x00, 0xb1100100); /* uart0_enable */
- au_writel(0x00, 0xb1200100); /* uart1_enable */
- au_writel(0x00, 0xb1300100); /* uart2_enable */
- au_writel(0x00, 0xb1400100); /* uart3_enable */
- au_writel(0x02, 0xb1600100); /* ssi0_enable */
- au_writel(0x02, 0xb1680100); /* ssi1_enable */
- au_writel(0x00, 0xb1900020); /* sys_freqctrl0 */
- au_writel(0x00, 0xb1900024); /* sys_freqctrl1 */
- au_writel(0x00, 0xb1900028); /* sys_clksrc */
- au_writel(0x10, 0xb1900060); /* sys_cpupll */
- au_writel(0x00, 0xb1900064); /* sys_auxpll */
- au_writel(0x00, 0xb1900100); /* sys_pininputen */
- break;
- case 0x01000000: /* Au1500 */
- au_writel(0x02, 0xb0000010); /* ac97_enable */
- au_writel(0x08, 0xb017fffc); /* usbh_enable - early errata */
- asm("sync");
- au_writel(0x00, 0xb017fffc); /* usbh_enable */
- au_writel(0x00, 0xb0200058); /* usbd_enable */
- au_writel(0x00, 0xb4004104); /* mac dma */
- au_writel(0x00, 0xb4004114); /* mac dma */
- au_writel(0x00, 0xb4004124); /* mac dma */
- au_writel(0x00, 0xb4004134); /* mac dma */
- au_writel(0x00, 0xb1520000); /* macen0 */
- au_writel(0x00, 0xb1520004); /* macen1 */
- au_writel(0x00, 0xb1100100); /* uart0_enable */
- au_writel(0x00, 0xb1400100); /* uart3_enable */
- au_writel(0x00, 0xb1900020); /* sys_freqctrl0 */
- au_writel(0x00, 0xb1900024); /* sys_freqctrl1 */
- au_writel(0x00, 0xb1900028); /* sys_clksrc */
- au_writel(0x10, 0xb1900060); /* sys_cpupll */
- au_writel(0x00, 0xb1900064); /* sys_auxpll */
- au_writel(0x00, 0xb1900100); /* sys_pininputen */
- break;
- case 0x02000000: /* Au1100 */
- au_writel(0x02, 0xb0000010); /* ac97_enable */
- au_writel(0x08, 0xb017fffc); /* usbh_enable - early errata */
- asm("sync");
- au_writel(0x00, 0xb017fffc); /* usbh_enable */
- au_writel(0x00, 0xb0200058); /* usbd_enable */
- au_writel(0x00, 0xb0300040); /* ir_enable */
- au_writel(0x00, 0xb4004104); /* mac dma */
- au_writel(0x00, 0xb4004114); /* mac dma */
- au_writel(0x00, 0xb4004124); /* mac dma */
- au_writel(0x00, 0xb4004134); /* mac dma */
- au_writel(0x00, 0xb0520000); /* macen0 */
- au_writel(0x00, 0xb1000008); /* i2s_enable */
- au_writel(0x00, 0xb1100100); /* uart0_enable */
- au_writel(0x00, 0xb1200100); /* uart1_enable */
- au_writel(0x00, 0xb1400100); /* uart3_enable */
- au_writel(0x02, 0xb1600100); /* ssi0_enable */
- au_writel(0x02, 0xb1680100); /* ssi1_enable */
- au_writel(0x00, 0xb1900020); /* sys_freqctrl0 */
- au_writel(0x00, 0xb1900024); /* sys_freqctrl1 */
- au_writel(0x00, 0xb1900028); /* sys_clksrc */
- au_writel(0x10, 0xb1900060); /* sys_cpupll */
- au_writel(0x00, 0xb1900064); /* sys_auxpll */
- au_writel(0x00, 0xb1900100); /* sys_pininputen */
- break;
- case 0x03000000: /* Au1550 */
- au_writel(0x00, 0xb1a00004); /* psc 0 */
- au_writel(0x00, 0xb1b00004); /* psc 1 */
- au_writel(0x00, 0xb0a00004); /* psc 2 */
- au_writel(0x00, 0xb0b00004); /* psc 3 */
- au_writel(0x00, 0xb017fffc); /* usbh_enable */
- au_writel(0x00, 0xb0200058); /* usbd_enable */
- au_writel(0x00, 0xb4004104); /* mac dma */
- au_writel(0x00, 0xb4004114); /* mac dma */
- au_writel(0x00, 0xb4004124); /* mac dma */
- au_writel(0x00, 0xb4004134); /* mac dma */
- au_writel(0x00, 0xb1520000); /* macen0 */
- au_writel(0x00, 0xb1520004); /* macen1 */
- au_writel(0x00, 0xb1100100); /* uart0_enable */
- au_writel(0x00, 0xb1200100); /* uart1_enable */
- au_writel(0x00, 0xb1400100); /* uart3_enable */
- au_writel(0x00, 0xb1900020); /* sys_freqctrl0 */
- au_writel(0x00, 0xb1900024); /* sys_freqctrl1 */
- au_writel(0x00, 0xb1900028); /* sys_clksrc */
- au_writel(0x10, 0xb1900060); /* sys_cpupll */
- au_writel(0x00, 0xb1900064); /* sys_auxpll */
- au_writel(0x00, 0xb1900100); /* sys_pininputen */
- break;
- }
-
- set_c0_status(ST0_BEV | ST0_ERL);
- change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
- flush_cache_all();
- write_c0_wired(0);
-
- /* Give board a chance to do a hardware reset */
- board_reset();
-
- /* Jump to the beggining in case board_reset() is empty */
- __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
-}
-
-void au1000_halt(void)
-{
-#if defined(CONFIG_MIPS_PB1550) || defined(CONFIG_MIPS_DB1550)
- /* Power off system */
- printk(KERN_NOTICE "\n** Powering off...\n");
- au_writew(au_readw(0xAF00001C) | (3 << 14), 0xAF00001C);
- au_sync();
- while (1); /* should not get here */
-#else
- printk(KERN_NOTICE "\n** You can safely turn off the power\n");
-#ifdef CONFIG_MIPS_MIRAGE
- gpio_direction_output(210, 1);
-#endif
-#ifdef CONFIG_MIPS_DB1200
- au_writew(au_readw(0xB980001C) | (1 << 14), 0xB980001C);
-#endif
-#ifdef CONFIG_PM
- au_sleep();
-
- /* Should not get here */
- printk(KERN_ERR "Unable to put CPU in sleep mode\n");
- while (1);
-#else
- while (1)
- __asm__(".set\tmips3\n\t"
- "wait\n\t"
- ".set\tmips0");
-#endif
-#endif /* defined(CONFIG_MIPS_PB1550) || defined(CONFIG_MIPS_DB1550) */
-}
-
-void au1000_power_off(void)
-{
- au1000_halt();
-}
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 6184baa56786..193ba166affd 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -29,18 +29,13 @@
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/module.h>
-#include <linux/pm.h>
#include <asm/mipsregs.h>
-#include <asm/reboot.h>
#include <asm/time.h>
#include <au1000.h>
extern void __init board_setup(void);
-extern void au1000_restart(char *);
-extern void au1000_halt(void);
-extern void au1000_power_off(void);
extern void set_cpuspec(void);
void __init plat_mem_setup(void)
@@ -57,10 +52,6 @@ void __init plat_mem_setup(void)
/* this is faster than wasting cycles trying to approximate it */
preset_lpj = (est_freq >> 1) / HZ;
- _machine_restart = au1000_restart;
- _machine_halt = au1000_halt;
- pm_power_off = au1000_power_off;
-
board_setup(); /* board specific setup */
if (au1xxx_cpu_needs_config_od())
@@ -107,7 +98,8 @@ phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
* The pseudo address we use is 0xF400 0000. Any address over
* 0xF400 0000 is a PCMCIA pseudo address.
*/
- if ((phys_addr >= 0xF4000000) && (phys_addr < 0xFFFFFFFF))
+ if ((phys_addr >= PCMCIA_ATTR_PSEUDO_PHYS) &&
+ (phys_addr < PCMCIA_PSEUDO_END))
return (phys_t)(phys_addr << 4);
/* default nop */
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 379a664809b0..2aecb2fdf982 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Manuel Lauss <mano@roarinelk.homelinux.net>
+ * Copyright (C) 2008-2009 Manuel Lauss <manuel.lauss@gmail.com>
*
* Previous incarnations were:
* Copyright (C) 2001, 2006, 2008 MontaVista Software, <source@mvista.com>
@@ -85,7 +85,6 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = {
.name = "rtcmatch2",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 100,
- .irq = AU1000_RTC_MATCH2_INT,
.set_next_event = au1x_rtcmatch2_set_next_event,
.set_mode = au1x_rtcmatch2_set_mode,
.cpumask = cpu_all_mask,
@@ -98,11 +97,13 @@ static struct irqaction au1x_rtcmatch2_irqaction = {
.dev_id = &au1x_rtcmatch2_clockdev,
};
-void __init plat_time_init(void)
+static int __init alchemy_time_init(unsigned int m2int)
{
struct clock_event_device *cd = &au1x_rtcmatch2_clockdev;
unsigned long t;
+ au1x_rtcmatch2_clockdev.irq = m2int;
+
/* Check if firmware (YAMON, ...) has enabled 32kHz and clock
* has been detected. If so install the rtcmatch2 clocksource,
* otherwise don't bother. Note that both bits being set is by
@@ -148,13 +149,18 @@ void __init plat_time_init(void)
cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(8, cd); /* ~0.25ms */
clockevents_register_device(cd);
- setup_irq(AU1000_RTC_MATCH2_INT, &au1x_rtcmatch2_irqaction);
+ setup_irq(m2int, &au1x_rtcmatch2_irqaction);
printk(KERN_INFO "Alchemy clocksource installed\n");
- return;
+ return 0;
cntr_err:
+ return -1;
+}
+
+static void __init alchemy_setup_c0timer(void)
+{
/*
* MIPS kernel assigns 'au1k_wait' to 'cpu_wait' before this
* function is called. Because the Alchemy counters are unusable
@@ -166,3 +172,22 @@ cntr_err:
r4k_clockevent_init();
init_r4k_clocksource();
}
+
+static int alchemy_m2inttab[] __initdata = {
+ AU1000_RTC_MATCH2_INT,
+ AU1500_RTC_MATCH2_INT,
+ AU1100_RTC_MATCH2_INT,
+ AU1550_RTC_MATCH2_INT,
+ AU1200_RTC_MATCH2_INT,
+};
+
+void __init plat_time_init(void)
+{
+ int t;
+
+ t = alchemy_get_cputype();
+ if (t == ALCHEMY_CPU_UNKNOWN)
+ alchemy_setup_c0timer();
+ else if (alchemy_time_init(alchemy_m2inttab[t]))
+ alchemy_setup_c0timer();
+}
diff --git a/arch/mips/alchemy/devboards/Makefile b/arch/mips/alchemy/devboards/Makefile
index 730f9f2b30e8..ecbd37f9ee87 100644
--- a/arch/mips/alchemy/devboards/Makefile
+++ b/arch/mips/alchemy/devboards/Makefile
@@ -2,7 +2,7 @@
# Alchemy Develboards
#
-obj-y += prom.o
+obj-y += prom.o bcsr.o platform.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_MIPS_PB1000) += pb1000/
obj-$(CONFIG_MIPS_PB1100) += pb1100/
@@ -11,8 +11,10 @@ obj-$(CONFIG_MIPS_PB1500) += pb1500/
obj-$(CONFIG_MIPS_PB1550) += pb1550/
obj-$(CONFIG_MIPS_DB1000) += db1x00/
obj-$(CONFIG_MIPS_DB1100) += db1x00/
-obj-$(CONFIG_MIPS_DB1200) += pb1200/
+obj-$(CONFIG_MIPS_DB1200) += db1200/
obj-$(CONFIG_MIPS_DB1500) += db1x00/
obj-$(CONFIG_MIPS_DB1550) += db1x00/
obj-$(CONFIG_MIPS_BOSPORUS) += db1x00/
obj-$(CONFIG_MIPS_MIRAGE) += db1x00/
+
+EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
new file mode 100644
index 000000000000..3bc4fd2155d7
--- /dev/null
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -0,0 +1,148 @@
+/*
+ * bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction.
+ *
+ * All Alchemy development boards (except, of course, the weird PB1000)
+ * have a few registers in a CPLD with standardised layout; they mostly
+ * only differ in base address.
+ * All registers are 16bits wide with 32bit spacing.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+static struct bcsr_reg {
+ void __iomem *raddr;
+ spinlock_t lock;
+} bcsr_regs[BCSR_CNT];
+
+static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
+static int bcsr_csc_base; /* linux-irq of first cascaded irq */
+
+void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
+{
+ int i;
+
+ bcsr1_phys = KSEG1ADDR(CPHYSADDR(bcsr1_phys));
+ bcsr2_phys = KSEG1ADDR(CPHYSADDR(bcsr2_phys));
+
+ bcsr_virt = (void __iomem *)bcsr1_phys;
+
+ for (i = 0; i < BCSR_CNT; i++) {
+ if (i >= BCSR_HEXLEDS)
+ bcsr_regs[i].raddr = (void __iomem *)bcsr2_phys +
+ (0x04 * (i - BCSR_HEXLEDS));
+ else
+ bcsr_regs[i].raddr = (void __iomem *)bcsr1_phys +
+ (0x04 * i);
+
+ spin_lock_init(&bcsr_regs[i].lock);
+ }
+}
+
+unsigned short bcsr_read(enum bcsr_id reg)
+{
+ unsigned short r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
+ r = __raw_readw(bcsr_regs[reg].raddr);
+ spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
+ return r;
+}
+EXPORT_SYMBOL_GPL(bcsr_read);
+
+void bcsr_write(enum bcsr_id reg, unsigned short val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
+ __raw_writew(val, bcsr_regs[reg].raddr);
+ wmb();
+ spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
+}
+EXPORT_SYMBOL_GPL(bcsr_write);
+
+void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set)
+{
+ unsigned short r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bcsr_regs[reg].lock, flags);
+ r = __raw_readw(bcsr_regs[reg].raddr);
+ r &= ~clr;
+ r |= set;
+ __raw_writew(r, bcsr_regs[reg].raddr);
+ wmb();
+ spin_unlock_irqrestore(&bcsr_regs[reg].lock, flags);
+}
+EXPORT_SYMBOL_GPL(bcsr_mod);
+
+/*
+ * DB1200/PB1200 CPLD IRQ muxer
+ */
+static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
+{
+ unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
+
+ for ( ; bisr; bisr &= bisr - 1)
+ generic_handle_irq(bcsr_csc_base + __ffs(bisr));
+}
+
+/* NOTE: both the enable and mask bits must be cleared, otherwise the
+ * CPLD generates tons of spurious interrupts (at least on my DB1200).
+ * -- mlau
+ */
+static void bcsr_irq_mask(unsigned int irq_nr)
+{
+ unsigned short v = 1 << (irq_nr - bcsr_csc_base);
+ __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR);
+ __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
+ wmb();
+}
+
+static void bcsr_irq_maskack(unsigned int irq_nr)
+{
+ unsigned short v = 1 << (irq_nr - bcsr_csc_base);
+ __raw_writew(v, bcsr_virt + BCSR_REG_INTCLR);
+ __raw_writew(v, bcsr_virt + BCSR_REG_MASKCLR);
+ __raw_writew(v, bcsr_virt + BCSR_REG_INTSTAT); /* ack */
+ wmb();
+}
+
+static void bcsr_irq_unmask(unsigned int irq_nr)
+{
+ unsigned short v = 1 << (irq_nr - bcsr_csc_base);
+ __raw_writew(v, bcsr_virt + BCSR_REG_INTSET);
+ __raw_writew(v, bcsr_virt + BCSR_REG_MASKSET);
+ wmb();
+}
+
+static struct irq_chip bcsr_irq_type = {
+ .name = "CPLD",
+ .mask = bcsr_irq_mask,
+ .mask_ack = bcsr_irq_maskack,
+ .unmask = bcsr_irq_unmask,
+};
+
+void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq)
+{
+ unsigned int irq;
+
+ /* mask & disable & ack all */
+ __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTCLR);
+ __raw_writew(0xffff, bcsr_virt + BCSR_REG_MASKCLR);
+ __raw_writew(0xffff, bcsr_virt + BCSR_REG_INTSTAT);
+ wmb();
+
+ bcsr_csc_base = csc_start;
+
+ for (irq = csc_start; irq <= csc_end; irq++)
+ set_irq_chip_and_handler_name(irq, &bcsr_irq_type,
+ handle_level_irq, "level");
+
+ set_irq_chained_handler(hook_irq, bcsr_csc_handler);
+}
diff --git a/arch/mips/alchemy/devboards/db1200/Makefile b/arch/mips/alchemy/devboards/db1200/Makefile
new file mode 100644
index 000000000000..17840a5e2738
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1200/Makefile
@@ -0,0 +1 @@
+obj-y += setup.o platform.o
diff --git a/arch/mips/alchemy/devboards/db1200/platform.c b/arch/mips/alchemy/devboards/db1200/platform.c
new file mode 100644
index 000000000000..d6b3e64376c0
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1200/platform.c
@@ -0,0 +1,561 @@
+/*
+ * DBAu1200 board platform device registration
+ *
+ * Copyright (C) 2008-2009 Manuel Lauss
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/leds.h>
+#include <linux/mmc/host.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <linux/serial_8250.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+#include <linux/smc91x.h>
+
+#include <asm/mach-au1x00/au1100_mmc.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-au1x00/au1550_spi.h>
+#include <asm/mach-db1x00/bcsr.h>
+#include <asm/mach-db1x00/db1200.h>
+
+#include "../platform.h"
+
+static struct mtd_partition db1200_spiflash_parts[] = {
+ {
+ .name = "DB1200 SPI flash",
+ .offset = 0,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+static struct flash_platform_data db1200_spiflash_data = {
+ .name = "s25fl001",
+ .parts = db1200_spiflash_parts,
+ .nr_parts = ARRAY_SIZE(db1200_spiflash_parts),
+ .type = "m25p10",
+};
+
+static struct spi_board_info db1200_spi_devs[] __initdata = {
+ {
+ /* TI TMP121AIDBVR temp sensor */
+ .modalias = "tmp121",
+ .max_speed_hz = 2000000,
+ .bus_num = 0,
+ .chip_select = 0,
+ .mode = 0,
+ },
+ {
+ /* Spansion S25FL001D0FMA SPI flash */
+ .modalias = "m25p80",
+ .max_speed_hz = 50000000,
+ .bus_num = 0,
+ .chip_select = 1,
+ .mode = 0,
+ .platform_data = &db1200_spiflash_data,
+ },
+};
+
+static struct i2c_board_info db1200_i2c_devs[] __initdata = {
+ {
+ /* AT24C04-10 I2C eeprom */
+ I2C_BOARD_INFO("24c04", 0x52),
+ },
+ {
+ /* Philips NE1619 temp/voltage sensor (adm1025 drv) */
+ I2C_BOARD_INFO("ne1619", 0x2d),
+ },
+ {
+ /* I2S audio codec WM8731 */
+ I2C_BOARD_INFO("wm8731", 0x1b),
+ },
+};
+
+/**********************************************************************/
+
+static void au1200_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *this = mtd->priv;
+ unsigned long ioaddr = (unsigned long)this->IO_ADDR_W;
+
+ ioaddr &= 0xffffff00;
+
+ if (ctrl & NAND_CLE) {
+ ioaddr += MEM_STNAND_CMD;
+ } else if (ctrl & NAND_ALE) {
+ ioaddr += MEM_STNAND_ADDR;
+ } else {
+ /* assume we want to r/w real data by default */
+ ioaddr += MEM_STNAND_DATA;
+ }
+ this->IO_ADDR_R = this->IO_ADDR_W = (void __iomem *)ioaddr;
+ if (cmd != NAND_CMD_NONE) {
+ __raw_writeb(cmd, this->IO_ADDR_W);
+ wmb();
+ }
+}
+
+static int au1200_nand_device_ready(struct mtd_info *mtd)
+{
+ return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
+}
+
+static const char *db1200_part_probes[] = { "cmdlinepart", NULL };
+
+static struct mtd_partition db1200_nand_parts[] = {
+ {
+ .name = "NAND FS 0",
+ .offset = 0,
+ .size = 8 * 1024 * 1024,
+ },
+ {
+ .name = "NAND FS 1",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL
+ },
+};
+
+struct platform_nand_data db1200_nand_platdata = {
+ .chip = {
+ .nr_chips = 1,
+ .chip_offset = 0,
+ .nr_partitions = ARRAY_SIZE(db1200_nand_parts),
+ .partitions = db1200_nand_parts,
+ .chip_delay = 20,
+ .part_probe_types = db1200_part_probes,
+ },
+ .ctrl = {
+ .dev_ready = au1200_nand_device_ready,
+ .cmd_ctrl = au1200_nand_cmd_ctrl,
+ },
+};
+
+static struct resource db1200_nand_res[] = {
+ [0] = {
+ .start = DB1200_NAND_PHYS_ADDR,
+ .end = DB1200_NAND_PHYS_ADDR + 0xff,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device db1200_nand_dev = {
+ .name = "gen_nand",
+ .num_resources = ARRAY_SIZE(db1200_nand_res),
+ .resource = db1200_nand_res,
+ .id = -1,
+ .dev = {
+ .platform_data = &db1200_nand_platdata,
+ }
+};
+
+/**********************************************************************/
+
+static struct smc91x_platdata db1200_eth_data = {
+ .flags = SMC91X_NOWAIT | SMC91X_USE_16BIT,
+ .leda = RPC_LED_100_10,
+ .ledb = RPC_LED_TX_RX,
+};
+
+static struct resource db1200_eth_res[] = {
+ [0] = {
+ .start = DB1200_ETH_PHYS_ADDR,
+ .end = DB1200_ETH_PHYS_ADDR + 0xf,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DB1200_ETH_INT,
+ .end = DB1200_ETH_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device db1200_eth_dev = {
+ .dev = {
+ .platform_data = &db1200_eth_data,
+ },
+ .name = "smc91x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(db1200_eth_res),
+ .resource = db1200_eth_res,
+};
+
+/**********************************************************************/
+
+static struct resource db1200_ide_res[] = {
+ [0] = {
+ .start = DB1200_IDE_PHYS_ADDR,
+ .end = DB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = DB1200_IDE_INT,
+ .end = DB1200_IDE_INT,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static u64 ide_dmamask = DMA_32BIT_MASK;
+
+static struct platform_device db1200_ide_dev = {
+ .name = "au1200-ide",
+ .id = 0,
+ .dev = {
+ .dma_mask = &ide_dmamask,
+ .coherent_dma_mask = DMA_32BIT_MASK,
+ },
+ .num_resources = ARRAY_SIZE(db1200_ide_res),
+ .resource = db1200_ide_res,
+};
+
+/**********************************************************************/
+
+static struct platform_device db1200_rtc_dev = {
+ .name = "rtc-au1xxx",
+ .id = -1,
+};
+
+/**********************************************************************/
+
+/* SD carddetects: they're supposed to be edge-triggered, but ack
+ * doesn't seem to work (CPLD Rev 2). Instead, the screaming one
+ * is disabled and its counterpart enabled. The 500ms timeout is
+ * because the carddetect isn't debounced in hardware.
+ */
+static irqreturn_t db1200_mmc_cd(int irq, void *ptr)
+{
+ void(*mmc_cd)(struct mmc_host *, unsigned long);
+
+ if (irq == DB1200_SD0_INSERT_INT) {
+ disable_irq_nosync(DB1200_SD0_INSERT_INT);
+ enable_irq(DB1200_SD0_EJECT_INT);
+ } else {
+ disable_irq_nosync(DB1200_SD0_EJECT_INT);
+ enable_irq(DB1200_SD0_INSERT_INT);
+ }
+
+ /* link against CONFIG_MMC=m */
+ mmc_cd = symbol_get(mmc_detect_change);
+ if (mmc_cd) {
+ mmc_cd(ptr, msecs_to_jiffies(500));
+ symbol_put(mmc_detect_change);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int db1200_mmc_cd_setup(void *mmc_host, int en)
+{
+ int ret;
+
+ if (en) {
+ ret = request_irq(DB1200_SD0_INSERT_INT, db1200_mmc_cd,
+ IRQF_DISABLED, "sd_insert", mmc_host);
+ if (ret)
+ goto out;
+
+ ret = request_irq(DB1200_SD0_EJECT_INT, db1200_mmc_cd,
+ IRQF_DISABLED, "sd_eject", mmc_host);
+ if (ret) {
+ free_irq(DB1200_SD0_INSERT_INT, mmc_host);
+ goto out;
+ }
+
+ if (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD0INSERT)
+ enable_irq(DB1200_SD0_EJECT_INT);
+ else
+ enable_irq(DB1200_SD0_INSERT_INT);
+
+ } else {
+ free_irq(DB1200_SD0_INSERT_INT, mmc_host);
+ free_irq(DB1200_SD0_EJECT_INT, mmc_host);
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+static void db1200_mmc_set_power(void *mmc_host, int state)
+{
+ if (state) {
+ bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_SD0PWR);
+ msleep(400); /* stabilization time */
+ } else
+ bcsr_mod(BCSR_BOARD, BCSR_BOARD_SD0PWR, 0);
+}
+
+static int db1200_mmc_card_readonly(void *mmc_host)
+{
+ return (bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP) ? 1 : 0;
+}
+
+static int db1200_mmc_card_inserted(void *mmc_host)
+{
+ return (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD0INSERT) ? 1 : 0;
+}
+
+static void db1200_mmcled_set(struct led_classdev *led,
+ enum led_brightness brightness)
+{
+ if (brightness != LED_OFF)
+ bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0);
+ else
+ bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0);
+}
+
+static struct led_classdev db1200_mmc_led = {
+ .brightness_set = db1200_mmcled_set,
+};
+
+/* needed by arch/mips/alchemy/common/platform.c */
+struct au1xmmc_platform_data au1xmmc_platdata[] = {
+ [0] = {
+ .cd_setup = db1200_mmc_cd_setup,
+ .set_power = db1200_mmc_set_power,
+ .card_inserted = db1200_mmc_card_inserted,
+ .card_readonly = db1200_mmc_card_readonly,
+ .led = &db1200_mmc_led,
+ },
+};
+
+/**********************************************************************/
+
+static struct resource au1200_psc0_res[] = {
+ [0] = {
+ .start = PSC0_PHYS_ADDR,
+ .end = PSC0_PHYS_ADDR + 0x000fffff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1200_PSC0_INT,
+ .end = AU1200_PSC0_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = DSCR_CMD0_PSC0_TX,
+ .end = DSCR_CMD0_PSC0_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = DSCR_CMD0_PSC0_RX,
+ .end = DSCR_CMD0_PSC0_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device db1200_i2c_dev = {
+ .name = "au1xpsc_smbus",
+ .id = 0, /* bus number */
+ .num_resources = ARRAY_SIZE(au1200_psc0_res),
+ .resource = au1200_psc0_res,
+};
+
+static void db1200_spi_cs_en(struct au1550_spi_info *spi, int cs, int pol)
+{
+ if (cs)
+ bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_SPISEL);
+ else
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_SPISEL, 0);
+}
+
+static struct au1550_spi_info db1200_spi_platdata = {
+ .mainclk_hz = 50000000, /* PSC0 clock */
+ .num_chipselect = 2,
+ .activate_cs = db1200_spi_cs_en,
+};
+
+static u64 spi_dmamask = DMA_32BIT_MASK;
+
+static struct platform_device db1200_spi_dev = {
+ .dev = {
+ .dma_mask = &spi_dmamask,
+ .coherent_dma_mask = DMA_32BIT_MASK,
+ .platform_data = &db1200_spi_platdata,
+ },
+ .name = "au1550-spi",
+ .id = 0, /* bus number */
+ .num_resources = ARRAY_SIZE(au1200_psc0_res),
+ .resource = au1200_psc0_res,
+};
+
+static struct resource au1200_psc1_res[] = {
+ [0] = {
+ .start = PSC1_PHYS_ADDR,
+ .end = PSC1_PHYS_ADDR + 0x000fffff,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = AU1200_PSC1_INT,
+ .end = AU1200_PSC1_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = DSCR_CMD0_PSC1_TX,
+ .end = DSCR_CMD0_PSC1_TX,
+ .flags = IORESOURCE_DMA,
+ },
+ [3] = {
+ .start = DSCR_CMD0_PSC1_RX,
+ .end = DSCR_CMD0_PSC1_RX,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+static struct platform_device db1200_audio_dev = {
+ /* name assigned later based on switch setting */
+ .id = 1, /* PSC ID */
+ .num_resources = ARRAY_SIZE(au1200_psc1_res),
+ .resource = au1200_psc1_res,
+};
+
+static struct platform_device *db1200_devs[] __initdata = {
+ NULL, /* PSC0, selected by S6.8 */
+ &db1200_ide_dev,
+ &db1200_eth_dev,
+ &db1200_rtc_dev,
+ &db1200_nand_dev,
+ &db1200_audio_dev,
+};
+
+static int __init db1200_dev_init(void)
+{
+ unsigned long pfc;
+ unsigned short sw;
+ int swapped;
+
+ i2c_register_board_info(0, db1200_i2c_devs,
+ ARRAY_SIZE(db1200_i2c_devs));
+ spi_register_board_info(db1200_spi_devs,
+ ARRAY_SIZE(db1200_i2c_devs));
+
+ /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
+ * S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
+ */
+
+ /* NOTE: GPIO215 controls OTG VBUS supply. In SPI mode however
+ * this pin is claimed by PSC0 (unused though, but pinmux doesn't
+ * allow to free it without crippling the SPI interface).
+ * As a result, in SPI mode, OTG simply won't work (PSC0 uses
+ * it as an input pin which is pulled high on the boards).
+ */
+ pfc = __raw_readl((void __iomem *)SYS_PINFUNC) & ~SYS_PINFUNC_P0A;
+
+ /* switch off OTG VBUS supply */
+ gpio_request(215, "otg-vbus");
+ gpio_direction_output(215, 1);
+
+ printk(KERN_INFO "DB1200 device configuration:\n");
+
+ sw = bcsr_read(BCSR_SWITCHES);
+ if (sw & BCSR_SWITCHES_DIP_8) {
+ db1200_devs[0] = &db1200_i2c_dev;
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_PSC0MUX, 0);
+
+ pfc |= (2 << 17); /* GPIO2 block owns GPIO215 */
+
+ printk(KERN_INFO " S6.8 OFF: PSC0 mode I2C\n");
+ printk(KERN_INFO " OTG port VBUS supply available!\n");
+ } else {
+ db1200_devs[0] = &db1200_spi_dev;
+ bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_PSC0MUX);
+
+ pfc |= (1 << 17); /* PSC0 owns GPIO215 */
+
+ printk(KERN_INFO " S6.8 ON : PSC0 mode SPI\n");
+ printk(KERN_INFO " OTG port VBUS supply disabled\n");
+ }
+ __raw_writel(pfc, (void __iomem *)SYS_PINFUNC);
+ wmb();
+
+ /* Audio: DIP7 selects I2S(0)/AC97(1), but need I2C for I2S!
+ * so: DIP7=1 || DIP8=0 => AC97, DIP7=0 && DIP8=1 => I2S
+ */
+ sw &= BCSR_SWITCHES_DIP_8 | BCSR_SWITCHES_DIP_7;
+ if (sw == BCSR_SWITCHES_DIP_8) {
+ bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_PSC1MUX);
+ db1200_audio_dev.name = "au1xpsc_i2s";
+ printk(KERN_INFO " S6.7 ON : PSC1 mode I2S\n");
+ } else {
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_PSC1MUX, 0);
+ db1200_audio_dev.name = "au1xpsc_ac97";
+ printk(KERN_INFO " S6.7 OFF: PSC1 mode AC97\n");
+ }
+
+ /* Audio PSC clock is supplied externally. (FIXME: platdata!!) */
+ __raw_writel(PSC_SEL_CLK_SERCLK,
+ (void __iomem *)KSEG1ADDR(PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
+ wmb();
+
+ db1x_register_pcmcia_socket(PCMCIA_ATTR_PSEUDO_PHYS,
+ PCMCIA_ATTR_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_MEM_PSEUDO_PHYS,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_IO_PSEUDO_PHYS,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00001000 - 1,
+ DB1200_PC0_INT,
+ DB1200_PC0_INSERT_INT,
+ /*DB1200_PC0_STSCHG_INT*/0,
+ DB1200_PC0_EJECT_INT,
+ 0);
+
+ db1x_register_pcmcia_socket(PCMCIA_ATTR_PSEUDO_PHYS + 0x00400000,
+ PCMCIA_ATTR_PSEUDO_PHYS + 0x00440000 - 1,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00400000,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00440000 - 1,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00400000,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00401000 - 1,
+ DB1200_PC1_INT,
+ DB1200_PC1_INSERT_INT,
+ /*DB1200_PC1_STSCHG_INT*/0,
+ DB1200_PC1_EJECT_INT,
+ 1);
+
+ swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT;
+ db1x_register_norflash(64 << 20, 2, swapped);
+
+ return platform_add_devices(db1200_devs, ARRAY_SIZE(db1200_devs));
+}
+device_initcall(db1200_dev_init);
+
+/* au1200fb calls these: STERBT EINEN TRAGISCHEN TOD!!! */
+int board_au1200fb_panel(void)
+{
+ return (bcsr_read(BCSR_SWITCHES) >> 8) & 0x0f;
+}
+
+int board_au1200fb_panel_init(void)
+{
+ /* Apply power */
+ bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD |
+ BCSR_BOARD_LCDBL);
+ return 0;
+}
+
+int board_au1200fb_panel_shutdown(void)
+{
+ /* Remove power */
+ bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD |
+ BCSR_BOARD_LCDBL, 0);
+ return 0;
+}
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c
new file mode 100644
index 000000000000..379536e3abd1
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1200/setup.c
@@ -0,0 +1,118 @@
+/*
+ * Alchemy/AMD/RMI DB1200 board setup.
+ *
+ * Licensed under the terms outlined in the file COPYING in the root of
+ * this source archive.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-db1x00/bcsr.h>
+#include <asm/mach-db1x00/db1200.h>
+
+const char *get_system_type(void)
+{
+ return "Alchemy Db1200";
+}
+
+void __init board_setup(void)
+{
+ unsigned long freq0, clksrc, div, pfc;
+ unsigned short whoami;
+
+ bcsr_init(DB1200_BCSR_PHYS_ADDR,
+ DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS);
+
+ whoami = bcsr_read(BCSR_WHOAMI);
+ printk(KERN_INFO "Alchemy/AMD/RMI DB1200 Board, CPLD Rev %d"
+ " Board-ID %d Daughtercard ID %d\n",
+ (whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
+
+ /* SMBus/SPI on PSC0, Audio on PSC1 */
+ pfc = __raw_readl((void __iomem *)SYS_PINFUNC);
+ pfc &= ~(SYS_PINFUNC_P0A | SYS_PINFUNC_P0B);
+ pfc &= ~(SYS_PINFUNC_P1A | SYS_PINFUNC_P1B | SYS_PINFUNC_FS3);
+ pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */
+ __raw_writel(pfc, (void __iomem *)SYS_PINFUNC);
+ wmb();
+
+ /* Clock configurations: PSC0: ~50MHz via Clkgen0, derived from
+ * CPU clock; all other clock generators off/unused.
+ */
+ div = (get_au1x00_speed() + 25000000) / 50000000;
+ if (div & 1)
+ div++;
+ div = ((div >> 1) - 1) & 0xff;
+
+ freq0 = div << SYS_FC_FRDIV0_BIT;
+ __raw_writel(freq0, (void __iomem *)SYS_FREQCTRL0);
+ wmb();
+ freq0 |= SYS_FC_FE0; /* enable F0 */
+ __raw_writel(freq0, (void __iomem *)SYS_FREQCTRL0);
+ wmb();
+
+ /* psc0_intclk comes 1:1 from F0 */
+ clksrc = SYS_CS_MUX_FQ0 << SYS_CS_ME0_BIT;
+ __raw_writel(clksrc, (void __iomem *)SYS_CLKSRC);
+ wmb();
+}
+
+/* use the hexleds to count the number of times the cpu has entered
+ * wait, the dots to indicate whether the CPU is currently idle or
+ * active (dots off = sleeping, dots on = working) for cases where
+ * the number doesn't change for a long(er) period of time.
+ */
+static void db1200_wait(void)
+{
+ __asm__(" .set push \n"
+ " .set mips3 \n"
+ " .set noreorder \n"
+ " cache 0x14, 0(%0) \n"
+ " cache 0x14, 32(%0) \n"
+ " cache 0x14, 64(%0) \n"
+ /* dots off: we're about to call wait */
+ " lui $26, 0xb980 \n"
+ " ori $27, $0, 3 \n"
+ " sb $27, 0x18($26) \n"
+ " sync \n"
+ " nop \n"
+ " wait \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ /* dots on: there's work to do, increment cntr */
+ " lui $26, 0xb980 \n"
+ " sb $0, 0x18($26) \n"
+ " lui $26, 0xb9c0 \n"
+ " lb $27, 0($26) \n"
+ " addiu $27, $27, 1 \n"
+ " sb $27, 0($26) \n"
+ " sync \n"
+ " .set pop \n"
+ : : "r" (db1200_wait));
+}
+
+static int __init db1200_arch_init(void)
+{
+ /* GPIO7 is low-level triggered CPLD cascade */
+ set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW);
+ bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT);
+
+ /* do not autoenable these: CPLD has broken edge int handling,
+ * and the CD handler setup requires manual enabling to work
+ * around that.
+ */
+ irq_to_desc(DB1200_SD0_INSERT_INT)->status |= IRQ_NOAUTOEN;
+ irq_to_desc(DB1200_SD0_EJECT_INT)->status |= IRQ_NOAUTOEN;
+
+ if (cpu_wait)
+ cpu_wait = db1200_wait;
+
+ return 0;
+}
+arch_initcall(db1200_arch_init);
diff --git a/arch/mips/alchemy/devboards/db1x00/Makefile b/arch/mips/alchemy/devboards/db1x00/Makefile
index 432241ab8677..613c0c0c8be9 100644
--- a/arch/mips/alchemy/devboards/db1x00/Makefile
+++ b/arch/mips/alchemy/devboards/db1x00/Makefile
@@ -5,4 +5,4 @@
# Makefile for the Alchemy Semiconductor DBAu1xx0 boards.
#
-obj-y := board_setup.o irqmap.o
+obj-y := board_setup.o platform.o
diff --git a/arch/mips/alchemy/devboards/db1x00/board_setup.c b/arch/mips/alchemy/devboards/db1x00/board_setup.c
index de30d8ea7176..d21f9c0ad207 100644
--- a/arch/mips/alchemy/devboards/db1x00/board_setup.c
+++ b/arch/mips/alchemy/devboards/db1x00/board_setup.c
@@ -29,35 +29,136 @@
#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h
#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_eth.h>
#include <asm/mach-db1x00/db1x00.h>
+#include <asm/mach-db1x00/bcsr.h>
+#include <asm/reboot.h>
#include <prom.h>
+#ifdef CONFIG_MIPS_DB1500
+char irq_tab_alchemy[][5] __initdata = {
+ [12] = { -1, AU1500_PCI_INTA, 0xff, 0xff, 0xff }, /* IDSEL 12 - HPT371 */
+ [13] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, AU1500_PCI_INTC, AU1500_PCI_INTD }, /* IDSEL 13 - PCI slot */
+};
-static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
+static void bosporus_power_off(void)
+{
+ printk(KERN_INFO "It's now safe to turn off power\n");
+ while (1)
+ asm volatile (".set mips3 ; wait ; .set mips0");
+}
const char *get_system_type(void)
{
-#ifdef CONFIG_MIPS_BOSPORUS
return "Alchemy Bosporus Gateway Reference";
-#else
- return "Alchemy Db1x00";
+}
#endif
+
+/*
+ * Micrel/Kendin 5 port switch attached to MAC0,
+ * MAC0 is associated with PHY address 5 (== WAN port)
+ * MAC1 is not associated with any PHY, since it's connected directly
+ * to the switch.
+ * no interrupts are used
+ */
+static struct au1000_eth_platform_data eth0_pdata = {
+ .phy_static_config = 1,
+ .phy_addr = 5,
+};
+
+#ifdef CONFIG_MIPS_BOSPORUS
+char irq_tab_alchemy[][5] __initdata = {
+ [11] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, /* IDSEL 11 - miniPCI */
+ [12] = { -1, AU1500_PCI_INTA, 0xff, 0xff, 0xff }, /* IDSEL 12 - SN1741 */
+ [13] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, AU1500_PCI_INTC, AU1500_PCI_INTD }, /* IDSEL 13 - PCI slot */
+};
+
+
+#endif
+
+#ifdef CONFIG_MIPS_MIRAGE
+char irq_tab_alchemy[][5] __initdata = {
+ [11] = { -1, AU1500_PCI_INTD, 0xff, 0xff, 0xff }, /* IDSEL 11 - SMI VGX */
+ [12] = { -1, 0xff, 0xff, AU1500_PCI_INTC, 0xff }, /* IDSEL 12 - PNX1300 */
+ [13] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, /* IDSEL 13 - miniPCI */
+};
+
+static void mirage_power_off(void)
+{
+ alchemy_gpio_direction_output(210, 1);
}
-void board_reset(void)
+const char *get_system_type(void)
{
- /* Hit BCSR.SW_RESET[RESET] */
- bcsr->swreset = 0x0000;
+ return "Alchemy Mirage";
}
+#endif
+
+#ifdef CONFIG_MIPS_DB1550
+char irq_tab_alchemy[][5] __initdata = {
+ [11] = { -1, AU1550_PCI_INTC, 0xff, 0xff, 0xff }, /* IDSEL 11 - on-board HPT371 */
+ [12] = { -1, AU1550_PCI_INTB, AU1550_PCI_INTC, AU1550_PCI_INTD, AU1550_PCI_INTA }, /* IDSEL 12 - PCI slot 2 (left) */
+ [13] = { -1, AU1550_PCI_INTA, AU1550_PCI_INTB, AU1550_PCI_INTC, AU1550_PCI_INTD }, /* IDSEL 13 - PCI slot 1 (right) */
+};
+#endif
+
+#if defined(CONFIG_MIPS_BOSPORUS) || defined(CONFIG_MIPS_MIRAGE)
+static void mips_softreset(void)
+{
+ asm volatile ("jr\t%0" : : "r"(0xbfc00000));
+}
+
+#else
+
+const char *get_system_type(void)
+{
+ return "Alchemy Db1x00";
+}
+#endif
void __init board_setup(void)
{
- u32 pin_func = 0;
+ unsigned long bcsr1, bcsr2;
+ u32 pin_func;
char *argptr;
+ bcsr1 = DB1000_BCSR_PHYS_ADDR;
+ bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS;
+
+ pin_func = 0;
+
+#ifdef CONFIG_MIPS_DB1000
+ printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n");
+#endif
+#ifdef CONFIG_MIPS_DB1500
+ printk(KERN_INFO "AMD Alchemy Au1500/Db1500 Board\n");
+#endif
+#ifdef CONFIG_MIPS_DB1100
+ printk(KERN_INFO "AMD Alchemy Au1100/Db1100 Board\n");
+#endif
+#ifdef CONFIG_MIPS_BOSPORUS
+ au1xxx_override_eth_cfg(0, &eth0_pdata);
+
+ printk(KERN_INFO "AMD Alchemy Bosporus Board\n");
+#endif
+#ifdef CONFIG_MIPS_MIRAGE
+ printk(KERN_INFO "AMD Alchemy Mirage Board\n");
+#endif
+#ifdef CONFIG_MIPS_DB1550
+ printk(KERN_INFO "AMD Alchemy Au1550/Db1550 Board\n");
+
+ bcsr1 = DB1550_BCSR_PHYS_ADDR;
+ bcsr2 = DB1550_BCSR_PHYS_ADDR + DB1550_BCSR_HEXLED_OFS;
+#endif
+
+ /* initialize board register space */
+ bcsr_init(bcsr1, bcsr2);
+
argptr = prom_getcmdline();
#ifdef CONFIG_SERIAL_8250_CONSOLE
argptr = strstr(argptr, "console=");
@@ -89,11 +190,10 @@ void __init board_setup(void)
pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF;
au_writel(pin_func, SYS_PINFUNC);
/* Power off until the driver is in use */
- bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK;
- bcsr->resets |= BCSR_RESETS_IRDA_MODE_OFF;
- au_sync();
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
+ BCSR_RESETS_IRDA_MODE_OFF);
#endif
- bcsr->pcmcia = 0x0000; /* turn off PCMCIA power */
+ bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */
/* Enable GPIO[31:0] inputs */
alchemy_gpio1_input_enable();
@@ -120,26 +220,53 @@ void __init board_setup(void)
* be part of the audio driver.
*/
alchemy_gpio_direction_output(209, 1);
-#endif
- au_sync();
-
-#ifdef CONFIG_MIPS_DB1000
- printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n");
-#endif
-#ifdef CONFIG_MIPS_DB1500
- printk(KERN_INFO "AMD Alchemy Au1500/Db1500 Board\n");
-#endif
-#ifdef CONFIG_MIPS_DB1100
- printk(KERN_INFO "AMD Alchemy Au1100/Db1100 Board\n");
+ pm_power_off = mirage_power_off;
+ _machine_halt = mirage_power_off;
+ _machine_restart = (void(*)(char *))mips_softreset;
#endif
+
#ifdef CONFIG_MIPS_BOSPORUS
- printk(KERN_INFO "AMD Alchemy Bosporus Board\n");
+ pm_power_off = bosporus_power_off;
+ _machine_halt = bosporus_power_off;
+ _machine_restart = (void(*)(char *))mips_softreset;
#endif
-#ifdef CONFIG_MIPS_MIRAGE
- printk(KERN_INFO "AMD Alchemy Mirage Board\n");
-#endif
-#ifdef CONFIG_MIPS_DB1550
- printk(KERN_INFO "AMD Alchemy Au1550/Db1550 Board\n");
+ au_sync();
+}
+
+static int __init db1x00_init_irq(void)
+{
+#if defined(CONFIG_MIPS_MIRAGE)
+ set_irq_type(AU1500_GPIO7_INT, IRQF_TRIGGER_RISING); /* TS pendown */
+#elif defined(CONFIG_MIPS_DB1550)
+ set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
+ set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW); /* CD1# */
+ set_irq_type(AU1550_GPIO3_INT, IRQF_TRIGGER_LOW); /* CARD0# */
+ set_irq_type(AU1550_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
+ set_irq_type(AU1550_GPIO21_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
+ set_irq_type(AU1550_GPIO22_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
+#elif defined(CONFIG_MIPS_DB1500)
+ set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
+ set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
+ set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
+ set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
+ set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
+ set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
+#elif defined(CONFIG_MIPS_DB1100)
+ set_irq_type(AU1100_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
+ set_irq_type(AU1100_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
+ set_irq_type(AU1100_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
+ set_irq_type(AU1100_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
+ set_irq_type(AU1100_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
+ set_irq_type(AU1100_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
+#elif defined(CONFIG_MIPS_DB1000)
+ set_irq_type(AU1000_GPIO0_INT, IRQF_TRIGGER_LOW); /* CD0# */
+ set_irq_type(AU1000_GPIO3_INT, IRQF_TRIGGER_LOW); /* CD1# */
+ set_irq_type(AU1000_GPIO2_INT, IRQF_TRIGGER_LOW); /* CARD0# */
+ set_irq_type(AU1000_GPIO5_INT, IRQF_TRIGGER_LOW); /* CARD1# */
+ set_irq_type(AU1000_GPIO1_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
+ set_irq_type(AU1000_GPIO4_INT, IRQF_TRIGGER_LOW); /* STSCHG1# */
#endif
+ return 0;
}
+arch_initcall(db1x00_init_irq);
diff --git a/arch/mips/alchemy/devboards/db1x00/irqmap.c b/arch/mips/alchemy/devboards/db1x00/irqmap.c
deleted file mode 100644
index 0b09025087c6..000000000000
--- a/arch/mips/alchemy/devboards/db1x00/irqmap.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * Au1xxx irq map table
- *
- * Copyright 2003 Embedded Edge, LLC
- * dan@embeddededge.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-#ifdef CONFIG_MIPS_DB1500
-char irq_tab_alchemy[][5] __initdata = {
- [12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - HPT371 */
- [13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */
-};
-#endif
-
-#ifdef CONFIG_MIPS_BOSPORUS
-char irq_tab_alchemy[][5] __initdata = {
- [11] = { -1, INTA, INTB, INTX, INTX }, /* IDSEL 11 - miniPCI */
- [12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - SN1741 */
- [13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */
-};
-#endif
-
-#ifdef CONFIG_MIPS_MIRAGE
-char irq_tab_alchemy[][5] __initdata = {
- [11] = { -1, INTD, INTX, INTX, INTX }, /* IDSEL 11 - SMI VGX */
- [12] = { -1, INTX, INTX, INTC, INTX }, /* IDSEL 12 - PNX1300 */
- [13] = { -1, INTA, INTB, INTX, INTX }, /* IDSEL 13 - miniPCI */
-};
-#endif
-
-#ifdef CONFIG_MIPS_DB1550
-char irq_tab_alchemy[][5] __initdata = {
- [11] = { -1, INTC, INTX, INTX, INTX }, /* IDSEL 11 - on-board HPT371 */
- [12] = { -1, INTB, INTC, INTD, INTA }, /* IDSEL 12 - PCI slot 2 (left) */
- [13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot 1 (right) */
-};
-#endif
-
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
-
-#ifndef CONFIG_MIPS_MIRAGE
-#ifdef CONFIG_MIPS_DB1550
- { AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 IRQ# */
- { AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 IRQ# */
-#else
- { AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 Fully_Interted# */
- { AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 STSCHG# */
- { AU1000_GPIO_2, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 IRQ# */
-
- { AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 Fully_Interted# */
- { AU1000_GPIO_4, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 STSCHG# */
- { AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 IRQ# */
-#endif
-#else
- { AU1000_GPIO_7, IRQF_TRIGGER_RISING, 0 }, /* touchscreen pen down */
-#endif
-
-};
-
-void __init board_init_irq(void)
-{
- au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
-}
diff --git a/arch/mips/alchemy/devboards/db1x00/platform.c b/arch/mips/alchemy/devboards/db1x00/platform.c
new file mode 100644
index 000000000000..62e2a96fe119
--- /dev/null
+++ b/arch/mips/alchemy/devboards/db1x00/platform.c
@@ -0,0 +1,118 @@
+/*
+ * DBAu1xxx board platform device registration
+ *
+ * Copyright (C) 2009 Manuel Lauss
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-au1x00/au1xxx.h>
+#include <asm/mach-db1x00/bcsr.h>
+#include "../platform.h"
+
+/* DB1xxx PCMCIA interrupt sources:
+ * CD0/1 GPIO0/3
+ * STSCHG0/1 GPIO1/4
+ * CARD0/1 GPIO2/5
+ * Db1550: 0/1, 21/22, 3/5
+ */
+
+#define DB1XXX_HAS_PCMCIA
+#define F_SWAPPED (bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT)
+
+#if defined(CONFIG_MIPS_DB1000)
+#define DB1XXX_PCMCIA_CD0 AU1000_GPIO0_INT
+#define DB1XXX_PCMCIA_STSCHG0 AU1000_GPIO1_INT
+#define DB1XXX_PCMCIA_CARD0 AU1000_GPIO2_INT
+#define DB1XXX_PCMCIA_CD1 AU1000_GPIO3_INT
+#define DB1XXX_PCMCIA_STSCHG1 AU1000_GPIO4_INT
+#define DB1XXX_PCMCIA_CARD1 AU1000_GPIO5_INT
+#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#elif defined(CONFIG_MIPS_DB1100)
+#define DB1XXX_PCMCIA_CD0 AU1100_GPIO0_INT
+#define DB1XXX_PCMCIA_STSCHG0 AU1100_GPIO1_INT
+#define DB1XXX_PCMCIA_CARD0 AU1100_GPIO2_INT
+#define DB1XXX_PCMCIA_CD1 AU1100_GPIO3_INT
+#define DB1XXX_PCMCIA_STSCHG1 AU1100_GPIO4_INT
+#define DB1XXX_PCMCIA_CARD1 AU1100_GPIO5_INT
+#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#elif defined(CONFIG_MIPS_DB1500)
+#define DB1XXX_PCMCIA_CD0 AU1500_GPIO0_INT
+#define DB1XXX_PCMCIA_STSCHG0 AU1500_GPIO1_INT
+#define DB1XXX_PCMCIA_CARD0 AU1500_GPIO2_INT
+#define DB1XXX_PCMCIA_CD1 AU1500_GPIO3_INT
+#define DB1XXX_PCMCIA_STSCHG1 AU1500_GPIO4_INT
+#define DB1XXX_PCMCIA_CARD1 AU1500_GPIO5_INT
+#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#elif defined(CONFIG_MIPS_DB1550)
+#define DB1XXX_PCMCIA_CD0 AU1550_GPIO0_INT
+#define DB1XXX_PCMCIA_STSCHG0 AU1550_GPIO21_INT
+#define DB1XXX_PCMCIA_CARD0 AU1550_GPIO3_INT
+#define DB1XXX_PCMCIA_CD1 AU1550_GPIO1_INT
+#define DB1XXX_PCMCIA_STSCHG1 AU1550_GPIO22_INT
+#define DB1XXX_PCMCIA_CARD1 AU1550_GPIO5_INT
+#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#else
+/* other board: no PCMCIA */
+#undef DB1XXX_HAS_PCMCIA
+#undef F_SWAPPED
+#define F_SWAPPED 0
+#if defined(CONFIG_MIPS_BOSPORUS)
+#define BOARD_FLASH_SIZE 0x01000000 /* 16MB */
+#define BOARD_FLASH_WIDTH 2 /* 16-bits */
+#elif defined(CONFIG_MIPS_MIRAGE)
+#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
+#define BOARD_FLASH_WIDTH 4 /* 32-bits */
+#endif
+#endif
+
+static int __init db1xxx_dev_init(void)
+{
+#ifdef DB1XXX_HAS_PCMCIA
+ db1x_register_pcmcia_socket(PCMCIA_ATTR_PSEUDO_PHYS,
+ PCMCIA_ATTR_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_MEM_PSEUDO_PHYS,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_IO_PSEUDO_PHYS,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00001000 - 1,
+ DB1XXX_PCMCIA_CARD0,
+ DB1XXX_PCMCIA_CD0,
+ /*DB1XXX_PCMCIA_STSCHG0*/0,
+ 0,
+ 0);
+
+ db1x_register_pcmcia_socket(PCMCIA_ATTR_PSEUDO_PHYS + 0x00400000,
+ PCMCIA_ATTR_PSEUDO_PHYS + 0x00440000 - 1,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00400000,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00440000 - 1,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00400000,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00401000 - 1,
+ DB1XXX_PCMCIA_CARD1,
+ DB1XXX_PCMCIA_CD1,
+ /*DB1XXX_PCMCIA_STSCHG1*/0,
+ 0,
+ 1);
+#endif
+ db1x_register_norflash(BOARD_FLASH_SIZE, BOARD_FLASH_WIDTH, F_SWAPPED);
+ return 0;
+}
+device_initcall(db1xxx_dev_init);
diff --git a/arch/mips/alchemy/devboards/pb1000/board_setup.c b/arch/mips/alchemy/devboards/pb1000/board_setup.c
index cd273545e810..28b8bd278a16 100644
--- a/arch/mips/alchemy/devboards/pb1000/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1000/board_setup.c
@@ -31,11 +31,7 @@
#include <asm/mach-pb1x00/pb1000.h>
#include <prom.h>
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1000_GPIO_15, IRQF_TRIGGER_LOW, 0 },
-};
-
+#include "../platform.h"
const char *get_system_type(void)
{
@@ -46,19 +42,18 @@ void board_reset(void)
{
}
-void __init board_init_irq(void)
-{
- au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
-}
-
void __init board_setup(void)
{
u32 pin_func, static_cfg0;
u32 sys_freqctrl, sys_clksrc;
u32 prid = read_c0_prid();
+ char *argptr;
+
+ sys_freqctrl = 0;
+ sys_clksrc = 0;
+ argptr = prom_getcmdline();
#ifdef CONFIG_SERIAL_8250_CONSOLE
- char *argptr = prom_getcmdline();
argptr = strstr(argptr, "console=");
if (argptr == NULL) {
argptr = prom_getcmdline();
@@ -193,3 +188,16 @@ void __init board_setup(void)
break;
}
}
+
+static int __init pb1000_init_irq(void)
+{
+ set_irq_type(AU1000_GPIO15_INT, IRQF_TRIGGER_LOW);
+ return 0;
+}
+arch_initcall(pb1000_init_irq);
+
+static int __init pb1000_device_init(void)
+{
+ return db1x_register_norflash(8 * 1024 * 1024, 4, 0);
+}
+device_initcall(pb1000_device_init);
diff --git a/arch/mips/alchemy/devboards/pb1100/Makefile b/arch/mips/alchemy/devboards/pb1100/Makefile
index c586dd7e91dc..60cf5b914730 100644
--- a/arch/mips/alchemy/devboards/pb1100/Makefile
+++ b/arch/mips/alchemy/devboards/pb1100/Makefile
@@ -5,4 +5,5 @@
# Makefile for the Alchemy Semiconductor Pb1100 board.
#
-obj-y := board_setup.o
+obj-y := board_setup.o platform.o
+
diff --git a/arch/mips/alchemy/devboards/pb1100/board_setup.c b/arch/mips/alchemy/devboards/pb1100/board_setup.c
index 61263081ef58..e0bd855f899b 100644
--- a/arch/mips/alchemy/devboards/pb1100/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1100/board_setup.c
@@ -29,19 +29,11 @@
#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-pb1x00/pb1100.h>
+#include <asm/mach-db1x00/bcsr.h>
#include <prom.h>
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1000_GPIO_9, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card Fully_Inserted# */
- { AU1000_GPIO_10, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card STSCHG# */
- { AU1000_GPIO_11, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card IRQ# */
- { AU1000_GPIO_13, IRQF_TRIGGER_LOW, 0 }, /* DC_IRQ# */
-};
-
-
const char *get_system_type(void)
{
return "Alchemy Pb1100";
@@ -49,13 +41,7 @@ const char *get_system_type(void)
void board_reset(void)
{
- /* Hit BCSR.RST_VDDI[SOFT_RESET] */
- au_writel(0x00000000, PB1100_RST_VDDI);
-}
-
-void __init board_init_irq(void)
-{
- au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+ bcsr_write(BCSR_SYSTEM, 0);
}
void __init board_setup(void)
@@ -63,6 +49,9 @@ void __init board_setup(void)
volatile void __iomem *base = (volatile void __iomem *)0xac000000UL;
char *argptr;
+ bcsr_init(DB1000_BCSR_PHYS_ADDR,
+ DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS);
+
argptr = prom_getcmdline();
#ifdef CONFIG_SERIAL_8250_CONSOLE
argptr = strstr(argptr, "console=");
@@ -155,3 +144,14 @@ void __init board_setup(void)
au_sync();
}
}
+
+static int __init pb1100_init_irq(void)
+{
+ set_irq_type(AU1100_GPIO9_INT, IRQF_TRIGGER_LOW); /* PCCD# */
+ set_irq_type(AU1100_GPIO10_INT, IRQF_TRIGGER_LOW); /* PCSTSCHG# */
+ set_irq_type(AU1100_GPIO11_INT, IRQF_TRIGGER_LOW); /* PCCard# */
+ set_irq_type(AU1100_GPIO13_INT, IRQF_TRIGGER_LOW); /* DC_IRQ# */
+
+ return 0;
+}
+arch_initcall(pb1100_init_irq);
diff --git a/arch/mips/alchemy/devboards/pb1100/platform.c b/arch/mips/alchemy/devboards/pb1100/platform.c
new file mode 100644
index 000000000000..bfc5ab6a121c
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1100/platform.c
@@ -0,0 +1,50 @@
+/*
+ * Pb1100 board platform device registration
+ *
+ * Copyright (C) 2009 Manuel Lauss
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/init.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+#include "../platform.h"
+
+static int __init pb1100_dev_init(void)
+{
+ int swapped;
+
+ /* PCMCIA. single socket, identical to Pb1500 */
+ db1x_register_pcmcia_socket(PCMCIA_ATTR_PSEUDO_PHYS,
+ PCMCIA_ATTR_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_MEM_PSEUDO_PHYS,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_IO_PSEUDO_PHYS,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00001000 - 1,
+ AU1100_GPIO11_INT, /* card */
+ AU1100_GPIO9_INT, /* insert */
+ /*AU1100_GPIO10_INT*/0, /* stschg */
+ 0, /* eject */
+ 0); /* id */
+
+ swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT;
+ db1x_register_norflash(64 * 1024 * 1024, 4, swapped);
+
+ return 0;
+}
+device_initcall(pb1100_dev_init);
diff --git a/arch/mips/alchemy/devboards/pb1200/Makefile b/arch/mips/alchemy/devboards/pb1200/Makefile
index c8c3a99fb68a..2ea9b02ef09f 100644
--- a/arch/mips/alchemy/devboards/pb1200/Makefile
+++ b/arch/mips/alchemy/devboards/pb1200/Makefile
@@ -2,6 +2,6 @@
# Makefile for the Alchemy Semiconductor Pb1200/DBAu1200 boards.
#
-obj-y := board_setup.o irqmap.o platform.o
+obj-y := board_setup.o platform.o
EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/alchemy/devboards/pb1200/board_setup.c b/arch/mips/alchemy/devboards/pb1200/board_setup.c
index 94e6b7e7753d..2cf59e728247 100644
--- a/arch/mips/alchemy/devboards/pb1200/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1200/board_setup.c
@@ -25,11 +25,23 @@
*/
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/sched.h>
-#include <prom.h>
-#include <au1xxx.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-db1x00/bcsr.h>
+#ifdef CONFIG_MIPS_PB1200
+#include <asm/mach-pb1x00/pb1200.h>
+#endif
+
+#ifdef CONFIG_MIPS_DB1200
+#include <asm/mach-db1x00/db1200.h>
+#define PB1200_INT_BEGIN DB1200_INT_BEGIN
+#define PB1200_INT_END DB1200_INT_END
+#endif
+
+#include <prom.h>
const char *get_system_type(void)
{
@@ -38,14 +50,18 @@ const char *get_system_type(void)
void board_reset(void)
{
- bcsr->resets = 0;
- bcsr->system = 0;
+ bcsr_write(BCSR_RESETS, 0);
+ bcsr_write(BCSR_SYSTEM, 0);
}
void __init board_setup(void)
{
char *argptr;
+ printk(KERN_INFO "AMD Alchemy Pb1200 Board\n");
+ bcsr_init(PB1200_BCSR_PHYS_ADDR,
+ PB1200_BCSR_PHYS_ADDR + PB1200_BCSR_HEXLED_OFS);
+
argptr = prom_getcmdline();
#ifdef CONFIG_SERIAL_8250_CONSOLE
argptr = strstr(argptr, "console=");
@@ -82,7 +98,7 @@ void __init board_setup(void)
u32 pin_func;
/* Select SMBus in CPLD */
- bcsr->resets &= ~BCSR_RESETS_PCS0MUX;
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_PSC0MUX, 0);
pin_func = au_readl(SYS_PINFUNC);
au_sync();
@@ -116,38 +132,54 @@ void __init board_setup(void)
/*
* The Pb1200 development board uses external MUX for PSC0 to
- * support SMB/SPI. bcsr->resets bit 12: 0=SMB 1=SPI
+ * support SMB/SPI. bcsr_resets bit 12: 0=SMB 1=SPI
*/
#ifdef CONFIG_I2C_AU1550
- bcsr->resets &= ~BCSR_RESETS_PCS0MUX;
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_PSC0MUX, 0);
#endif
au_sync();
+}
-#ifdef CONFIG_MIPS_PB1200
- printk(KERN_INFO "AMD Alchemy Pb1200 Board\n");
-#endif
-#ifdef CONFIG_MIPS_DB1200
- printk(KERN_INFO "AMD Alchemy Db1200 Board\n");
-#endif
+static int __init pb1200_init_irq(void)
+{
+ /* We have a problem with CPLD rev 3. */
+ if (BCSR_WHOAMI_CPLD(bcsr_read(BCSR_WHOAMI)) <= 3) {
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "Pb1200 must be at CPLD rev 4. Please have Pb1200\n");
+ printk(KERN_ERR "updated to latest revision. This software will\n");
+ printk(KERN_ERR "not work on anything less than CPLD rev 4.\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ printk(KERN_ERR "WARNING!!!\n");
+ panic("Game over. Your score is 0.");
+ }
+
+ set_irq_type(AU1200_GPIO7_INT, IRQF_TRIGGER_LOW);
+ bcsr_init_irq(PB1200_INT_BEGIN, PB1200_INT_END, AU1200_GPIO7_INT);
+
+ return 0;
}
+arch_initcall(pb1200_init_irq);
+
int board_au1200fb_panel(void)
{
- BCSR *bcsr = (BCSR *)BCSR_KSEG1_ADDR;
- int p;
-
- p = bcsr->switches;
- p >>= 8;
- p &= 0x0F;
- return p;
+ return (bcsr_read(BCSR_SWITCHES) >> 8) & 0x0f;
}
int board_au1200fb_panel_init(void)
{
/* Apply power */
- BCSR *bcsr = (BCSR *)BCSR_KSEG1_ADDR;
-
- bcsr->board |= BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD | BCSR_BOARD_LCDBL;
+ bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD |
+ BCSR_BOARD_LCDBL);
/* printk(KERN_DEBUG "board_au1200fb_panel_init()\n"); */
return 0;
}
@@ -155,10 +187,8 @@ int board_au1200fb_panel_init(void)
int board_au1200fb_panel_shutdown(void)
{
/* Remove power */
- BCSR *bcsr = (BCSR *)BCSR_KSEG1_ADDR;
-
- bcsr->board &= ~(BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD |
- BCSR_BOARD_LCDBL);
+ bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD |
+ BCSR_BOARD_LCDBL, 0);
/* printk(KERN_DEBUG "board_au1200fb_panel_shutdown()\n"); */
return 0;
}
diff --git a/arch/mips/alchemy/devboards/pb1200/irqmap.c b/arch/mips/alchemy/devboards/pb1200/irqmap.c
deleted file mode 100644
index fe47498da280..000000000000
--- a/arch/mips/alchemy/devboards/pb1200/irqmap.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * Au1xxx irq map table
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-#ifdef CONFIG_MIPS_PB1200
-#include <asm/mach-pb1x00/pb1200.h>
-#endif
-
-#ifdef CONFIG_MIPS_DB1200
-#include <asm/mach-db1x00/db1200.h>
-#define PB1200_INT_BEGIN DB1200_INT_BEGIN
-#define PB1200_INT_END DB1200_INT_END
-#endif
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- /* This is external interrupt cascade */
- { AU1000_GPIO_7, IRQF_TRIGGER_LOW, 0 },
-};
-
-
-/*
- * Support for External interrupts on the Pb1200 Development platform.
- */
-
-static void pb1200_cascade_handler(unsigned int irq, struct irq_desc *d)
-{
- unsigned short bisr = bcsr->int_status;
-
- for ( ; bisr; bisr &= bisr - 1)
- generic_handle_irq(PB1200_INT_BEGIN + __ffs(bisr));
-}
-
-/* NOTE: both the enable and mask bits must be cleared, otherwise the
- * CPLD generates tons of spurious interrupts (at least on the DB1200).
- */
-static void pb1200_mask_irq(unsigned int irq_nr)
-{
- bcsr->intclr_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
- bcsr->intclr = 1 << (irq_nr - PB1200_INT_BEGIN);
- au_sync();
-}
-
-static void pb1200_maskack_irq(unsigned int irq_nr)
-{
- bcsr->intclr_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
- bcsr->intclr = 1 << (irq_nr - PB1200_INT_BEGIN);
- bcsr->int_status = 1 << (irq_nr - PB1200_INT_BEGIN); /* ack */
- au_sync();
-}
-
-static void pb1200_unmask_irq(unsigned int irq_nr)
-{
- bcsr->intset = 1 << (irq_nr - PB1200_INT_BEGIN);
- bcsr->intset_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
- au_sync();
-}
-
-static struct irq_chip pb1200_cpld_irq_type = {
-#ifdef CONFIG_MIPS_PB1200
- .name = "Pb1200 Ext",
-#endif
-#ifdef CONFIG_MIPS_DB1200
- .name = "Db1200 Ext",
-#endif
- .mask = pb1200_mask_irq,
- .mask_ack = pb1200_maskack_irq,
- .unmask = pb1200_unmask_irq,
-};
-
-void __init board_init_irq(void)
-{
- unsigned int irq;
-
- au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
-
-#ifdef CONFIG_MIPS_PB1200
- /* We have a problem with CPLD rev 3. */
- if (((bcsr->whoami & BCSR_WHOAMI_CPLD) >> 4) <= 3) {
- printk(KERN_ERR "WARNING!!!\n");
- printk(KERN_ERR "WARNING!!!\n");
- printk(KERN_ERR "WARNING!!!\n");
- printk(KERN_ERR "WARNING!!!\n");
- printk(KERN_ERR "WARNING!!!\n");
- printk(KERN_ERR "WARNING!!!\n");
- printk(KERN_ERR "Pb1200 must be at CPLD rev 4. Please have Pb1200\n");
- printk(KERN_ERR "updated to latest revision. This software will\n");
- printk(KERN_ERR "not work on anything less than CPLD rev 4.\n");
- printk(KERN_ERR "WARNING!!!\n");
- printk(KERN_ERR "WARNING!!!\n");
- printk(KERN_ERR "WARNING!!!\n");
- printk(KERN_ERR "WARNING!!!\n");
- printk(KERN_ERR "WARNING!!!\n");
- printk(KERN_ERR "WARNING!!!\n");
- panic("Game over. Your score is 0.");
- }
-#endif
- /* mask & disable & ack all */
- bcsr->intclr_mask = 0xffff;
- bcsr->intclr = 0xffff;
- bcsr->int_status = 0xffff;
- au_sync();
-
- for (irq = PB1200_INT_BEGIN; irq <= PB1200_INT_END; irq++)
- set_irq_chip_and_handler_name(irq, &pb1200_cpld_irq_type,
- handle_level_irq, "level");
-
- set_irq_chained_handler(AU1000_GPIO_7, pb1200_cascade_handler);
-}
diff --git a/arch/mips/alchemy/devboards/pb1200/platform.c b/arch/mips/alchemy/devboards/pb1200/platform.c
index b93dff4a6789..14e889fffcc5 100644
--- a/arch/mips/alchemy/devboards/pb1200/platform.c
+++ b/arch/mips/alchemy/devboards/pb1200/platform.c
@@ -26,27 +26,30 @@
#include <asm/mach-au1x00/au1xxx.h>
#include <asm/mach-au1x00/au1100_mmc.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+#include "../platform.h"
static int mmc_activity;
static void pb1200mmc0_set_power(void *mmc_host, int state)
{
if (state)
- bcsr->board |= BCSR_BOARD_SD0PWR;
+ bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_SD0PWR);
else
- bcsr->board &= ~BCSR_BOARD_SD0PWR;
+ bcsr_mod(BCSR_BOARD, BCSR_BOARD_SD0PWR, 0);
- au_sync_delay(1);
+ msleep(1);
}
static int pb1200mmc0_card_readonly(void *mmc_host)
{
- return (bcsr->status & BCSR_STATUS_SD0WP) ? 1 : 0;
+ return (bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP) ? 1 : 0;
}
static int pb1200mmc0_card_inserted(void *mmc_host)
{
- return (bcsr->sig_status & BCSR_INT_SD0INSERT) ? 1 : 0;
+ return (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD0INSERT) ? 1 : 0;
}
static void pb1200_mmcled_set(struct led_classdev *led,
@@ -54,10 +57,10 @@ static void pb1200_mmcled_set(struct led_classdev *led,
{
if (brightness != LED_OFF) {
if (++mmc_activity == 1)
- bcsr->disk_leds &= ~(1 << 8);
+ bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0);
} else {
if (--mmc_activity == 0)
- bcsr->disk_leds |= (1 << 8);
+ bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0);
}
}
@@ -65,27 +68,25 @@ static struct led_classdev pb1200mmc_led = {
.brightness_set = pb1200_mmcled_set,
};
-#ifndef CONFIG_MIPS_DB1200
static void pb1200mmc1_set_power(void *mmc_host, int state)
{
if (state)
- bcsr->board |= BCSR_BOARD_SD1PWR;
+ bcsr_mod(BCSR_BOARD, 0, BCSR_BOARD_SD1PWR);
else
- bcsr->board &= ~BCSR_BOARD_SD1PWR;
+ bcsr_mod(BCSR_BOARD, BCSR_BOARD_SD1PWR, 0);
- au_sync_delay(1);
+ msleep(1);
}
static int pb1200mmc1_card_readonly(void *mmc_host)
{
- return (bcsr->status & BCSR_STATUS_SD1WP) ? 1 : 0;
+ return (bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD1WP) ? 1 : 0;
}
static int pb1200mmc1_card_inserted(void *mmc_host)
{
- return (bcsr->sig_status & BCSR_INT_SD1INSERT) ? 1 : 0;
+ return (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD1INSERT) ? 1 : 0;
}
-#endif
const struct au1xmmc_platform_data au1xmmc_platdata[2] = {
[0] = {
@@ -95,7 +96,6 @@ const struct au1xmmc_platform_data au1xmmc_platdata[2] = {
.cd_setup = NULL, /* use poll-timer in driver */
.led = &pb1200mmc_led,
},
-#ifndef CONFIG_MIPS_DB1200
[1] = {
.set_power = pb1200mmc1_set_power,
.card_inserted = pb1200mmc1_card_inserted,
@@ -103,7 +103,6 @@ const struct au1xmmc_platform_data au1xmmc_platdata[2] = {
.cd_setup = NULL, /* use poll-timer in driver */
.led = &pb1200mmc_led,
},
-#endif
};
static struct resource ide_resources[] = {
@@ -169,8 +168,36 @@ static struct platform_device *board_platform_devices[] __initdata = {
static int __init board_register_devices(void)
{
+ int swapped;
+
+ db1x_register_pcmcia_socket(PCMCIA_ATTR_PSEUDO_PHYS,
+ PCMCIA_ATTR_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_MEM_PSEUDO_PHYS,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_IO_PSEUDO_PHYS,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00001000 - 1,
+ PB1200_PC0_INT,
+ PB1200_PC0_INSERT_INT,
+ /*PB1200_PC0_STSCHG_INT*/0,
+ PB1200_PC0_EJECT_INT,
+ 0);
+
+ db1x_register_pcmcia_socket(PCMCIA_ATTR_PSEUDO_PHYS + 0x00800000,
+ PCMCIA_ATTR_PSEUDO_PHYS + 0x00840000 - 1,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00800000,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00840000 - 1,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00800000,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00801000 - 1,
+ PB1200_PC1_INT,
+ PB1200_PC1_INSERT_INT,
+ /*PB1200_PC1_STSCHG_INT*/0,
+ PB1200_PC1_EJECT_INT,
+ 1);
+
+ swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT;
+ db1x_register_norflash(128 * 1024 * 1024, 2, swapped);
+
return platform_add_devices(board_platform_devices,
ARRAY_SIZE(board_platform_devices));
}
-
-arch_initcall(board_register_devices);
+device_initcall(board_register_devices);
diff --git a/arch/mips/alchemy/devboards/pb1500/Makefile b/arch/mips/alchemy/devboards/pb1500/Makefile
index 173b419a7479..c29545d2c8ba 100644
--- a/arch/mips/alchemy/devboards/pb1500/Makefile
+++ b/arch/mips/alchemy/devboards/pb1500/Makefile
@@ -5,4 +5,5 @@
# Makefile for the Alchemy Semiconductor Pb1500 board.
#
-obj-y := board_setup.o
+obj-y := board_setup.o platform.o
+
diff --git a/arch/mips/alchemy/devboards/pb1500/board_setup.c b/arch/mips/alchemy/devboards/pb1500/board_setup.c
index d7a56569e7ed..3f0c92cb35bd 100644
--- a/arch/mips/alchemy/devboards/pb1500/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1500/board_setup.c
@@ -29,22 +29,14 @@
#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-pb1x00/pb1500.h>
+#include <asm/mach-db1x00/bcsr.h>
#include <prom.h>
char irq_tab_alchemy[][5] __initdata = {
- [12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - HPT370 */
- [13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */
-};
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
- { AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
- { AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
- { AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
- { AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
+ [12] = { -1, AU1500_PCI_INTA, 0xff, 0xff, 0xff }, /* IDSEL 12 - HPT370 */
+ [13] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, AU1500_PCI_INTC, AU1500_PCI_INTD }, /* IDSEL 13 - PCI slot */
};
@@ -55,13 +47,7 @@ const char *get_system_type(void)
void board_reset(void)
{
- /* Hit BCSR.RST_VDDI[SOFT_RESET] */
- au_writel(0x00000000, PB1500_RST_VDDI);
-}
-
-void __init board_init_irq(void)
-{
- au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+ bcsr_write(BCSR_SYSTEM, 0);
}
void __init board_setup(void)
@@ -70,6 +56,9 @@ void __init board_setup(void)
u32 sys_freqctrl, sys_clksrc;
char *argptr;
+ bcsr_init(DB1000_BCSR_PHYS_ADDR,
+ DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS);
+
argptr = prom_getcmdline();
#ifdef CONFIG_SERIAL_8250_CONSOLE
argptr = strstr(argptr, "console=");
@@ -163,3 +152,18 @@ void __init board_setup(void)
au_sync();
}
}
+
+static int __init pb1500_init_irq(void)
+{
+ set_irq_type(AU1500_GPIO9_INT, IRQF_TRIGGER_LOW); /* CD0# */
+ set_irq_type(AU1500_GPIO10_INT, IRQF_TRIGGER_LOW); /* CARD0 */
+ set_irq_type(AU1500_GPIO11_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */
+ set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
+ set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
+
+ return 0;
+}
+arch_initcall(pb1500_init_irq);
diff --git a/arch/mips/alchemy/devboards/pb1500/platform.c b/arch/mips/alchemy/devboards/pb1500/platform.c
new file mode 100644
index 000000000000..529acb789254
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1500/platform.c
@@ -0,0 +1,49 @@
+/*
+ * Pb1500 board platform device registration
+ *
+ * Copyright (C) 2009 Manuel Lauss
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/init.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+#include "../platform.h"
+
+static int __init pb1500_dev_init(void)
+{
+ int swapped;
+
+ /* PCMCIA. single socket, identical to Pb1500 */
+ db1x_register_pcmcia_socket(PCMCIA_ATTR_PSEUDO_PHYS,
+ PCMCIA_ATTR_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_MEM_PSEUDO_PHYS,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_IO_PSEUDO_PHYS,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00001000 - 1,
+ AU1500_GPIO11_INT, /* card */
+ AU1500_GPIO9_INT, /* insert */
+ /*AU1500_GPIO10_INT*/0, /* stschg */
+ 0, /* eject */
+ 0); /* id */
+
+ swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT;
+ db1x_register_norflash(64 * 1024 * 1024, 4, swapped);
+
+ return 0;
+}
+device_initcall(pb1500_dev_init);
diff --git a/arch/mips/alchemy/devboards/pb1550/Makefile b/arch/mips/alchemy/devboards/pb1550/Makefile
index cff95bcdb2ca..86b410b5d19a 100644
--- a/arch/mips/alchemy/devboards/pb1550/Makefile
+++ b/arch/mips/alchemy/devboards/pb1550/Makefile
@@ -5,4 +5,5 @@
# Makefile for the Alchemy Semiconductor Pb1550 board.
#
-obj-y := board_setup.o
+obj-y := board_setup.o platform.o
+
diff --git a/arch/mips/alchemy/devboards/pb1550/board_setup.c b/arch/mips/alchemy/devboards/pb1550/board_setup.c
index b6e9e7d247a3..0d060c3dd6f8 100644
--- a/arch/mips/alchemy/devboards/pb1550/board_setup.c
+++ b/arch/mips/alchemy/devboards/pb1550/board_setup.c
@@ -32,18 +32,15 @@
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-pb1x00/pb1550.h>
+#include <asm/mach-db1x00/bcsr.h>
+#include <asm/mach-au1x00/gpio.h>
#include <prom.h>
char irq_tab_alchemy[][5] __initdata = {
- [12] = { -1, INTB, INTC, INTD, INTA }, /* IDSEL 12 - PCI slot 2 (left) */
- [13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot 1 (right) */
-};
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 },
- { AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 },
+ [12] = { -1, AU1550_PCI_INTB, AU1550_PCI_INTC, AU1550_PCI_INTD, AU1550_PCI_INTA }, /* IDSEL 12 - PCI slot 2 (left) */
+ [13] = { -1, AU1550_PCI_INTA, AU1550_PCI_INTB, AU1550_PCI_INTC, AU1550_PCI_INTD }, /* IDSEL 13 - PCI slot 1 (right) */
};
const char *get_system_type(void)
@@ -53,22 +50,19 @@ const char *get_system_type(void)
void board_reset(void)
{
- /* Hit BCSR.SYSTEM[RESET] */
- au_writew(au_readw(0xAF00001C) & ~BCSR_SYSTEM_RESET, 0xAF00001C);
-}
-
-void __init board_init_irq(void)
-{
- au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
+ bcsr_write(BCSR_SYSTEM, 0);
}
void __init board_setup(void)
{
u32 pin_func;
-
-#ifdef CONFIG_SERIAL_8250_CONSOLE
char *argptr;
+
+ bcsr_init(PB1550_BCSR_PHYS_ADDR,
+ PB1550_BCSR_PHYS_ADDR + PB1550_BCSR_HEXLED_OFS);
+
argptr = prom_getcmdline();
+#ifdef CONFIG_SERIAL_8250_CONSOLE
argptr = strstr(argptr, "console=");
if (argptr == NULL) {
argptr = prom_getcmdline();
@@ -76,6 +70,8 @@ void __init board_setup(void)
}
#endif
+ alchemy_gpio2_enable();
+
/*
* Enable PSC1 SYNC for AC'97. Normaly done in audio driver,
* but it is board specific code, so put it here.
@@ -85,8 +81,21 @@ void __init board_setup(void)
pin_func |= SYS_PF_MUST_BE_SET | SYS_PF_PSC1_S1;
au_writel(pin_func, SYS_PINFUNC);
- au_writel(0, (u32)bcsr | 0x10); /* turn off PCMCIA power */
- au_sync();
+ bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */
printk(KERN_INFO "AMD Alchemy Pb1550 Board\n");
}
+
+static int __init pb1550_init_irq(void)
+{
+ set_irq_type(AU1550_GPIO0_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1550_GPIO1_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1550_GPIO201_205_INT, IRQF_TRIGGER_HIGH);
+
+ /* enable both PCMCIA card irqs in the shared line */
+ alchemy_gpio2_enable_int(201);
+ alchemy_gpio2_enable_int(202);
+
+ return 0;
+}
+arch_initcall(pb1550_init_irq);
diff --git a/arch/mips/alchemy/devboards/pb1550/platform.c b/arch/mips/alchemy/devboards/pb1550/platform.c
new file mode 100644
index 000000000000..461339166a4e
--- /dev/null
+++ b/arch/mips/alchemy/devboards/pb1550/platform.c
@@ -0,0 +1,69 @@
+/*
+ * Pb1550 board platform device registration
+ *
+ * Copyright (C) 2009 Manuel Lauss
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/init.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-pb1x00/pb1550.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+#include "../platform.h"
+
+static int __init pb1550_dev_init(void)
+{
+ int swapped;
+
+ /* Pb1550, like all others, also has statuschange irqs; however they're
+ * wired up on one of the Au1550's shared GPIO201_205 line, which also
+ * services the PCMCIA card interrupts. So we ignore statuschange and
+ * use the GPIO201_205 exclusively for card interrupts, since a) pcmcia
+ * drivers are used to shared irqs and b) statuschange isn't really use-
+ * ful anyway.
+ */
+ db1x_register_pcmcia_socket(PCMCIA_ATTR_PSEUDO_PHYS,
+ PCMCIA_ATTR_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_MEM_PSEUDO_PHYS,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00040000 - 1,
+ PCMCIA_IO_PSEUDO_PHYS,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00001000 - 1,
+ AU1550_GPIO201_205_INT,
+ AU1550_GPIO0_INT,
+ 0,
+ 0,
+ 0);
+
+ db1x_register_pcmcia_socket(PCMCIA_ATTR_PSEUDO_PHYS + 0x00800000,
+ PCMCIA_ATTR_PSEUDO_PHYS + 0x00840000 - 1,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00800000,
+ PCMCIA_MEM_PSEUDO_PHYS + 0x00840000 - 1,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00800000,
+ PCMCIA_IO_PSEUDO_PHYS + 0x00801000 - 1,
+ AU1550_GPIO201_205_INT,
+ AU1550_GPIO1_INT,
+ 0,
+ 0,
+ 1);
+
+ swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_PB1550_SWAPBOOT;
+ db1x_register_norflash(128 * 1024 * 1024, 4, swapped);
+
+ return 0;
+}
+device_initcall(pb1550_dev_init);
diff --git a/arch/mips/alchemy/devboards/platform.c b/arch/mips/alchemy/devboards/platform.c
new file mode 100644
index 000000000000..febf4e042343
--- /dev/null
+++ b/arch/mips/alchemy/devboards/platform.c
@@ -0,0 +1,222 @@
+/*
+ * devoard misc stuff.
+ */
+
+#include <linux/init.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/physmap.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+
+#include <asm/reboot.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+static void db1x_power_off(void)
+{
+ bcsr_write(BCSR_RESETS, 0);
+ bcsr_write(BCSR_SYSTEM, BCSR_SYSTEM_PWROFF | BCSR_SYSTEM_RESET);
+}
+
+static void db1x_reset(char *c)
+{
+ bcsr_write(BCSR_RESETS, 0);
+ bcsr_write(BCSR_SYSTEM, 0);
+}
+
+static int __init db1x_poweroff_setup(void)
+{
+ if (!pm_power_off)
+ pm_power_off = db1x_power_off;
+ if (!_machine_halt)
+ _machine_halt = db1x_power_off;
+ if (!_machine_restart)
+ _machine_restart = db1x_reset;
+
+ return 0;
+}
+late_initcall(db1x_poweroff_setup);
+
+/* register a pcmcia socket */
+int __init db1x_register_pcmcia_socket(unsigned long pseudo_attr_start,
+ unsigned long pseudo_attr_end,
+ unsigned long pseudo_mem_start,
+ unsigned long pseudo_mem_end,
+ unsigned long pseudo_io_start,
+ unsigned long pseudo_io_end,
+ int card_irq,
+ int cd_irq,
+ int stschg_irq,
+ int eject_irq,
+ int id)
+{
+ int cnt, i, ret;
+ struct resource *sr;
+ struct platform_device *pd;
+
+ cnt = 5;
+ if (eject_irq)
+ cnt++;
+ if (stschg_irq)
+ cnt++;
+
+ sr = kzalloc(sizeof(struct resource) * cnt, GFP_KERNEL);
+ if (!sr)
+ return -ENOMEM;
+
+ pd = platform_device_alloc("db1xxx_pcmcia", id);
+ if (!pd) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sr[0].name = "pseudo-attr";
+ sr[0].flags = IORESOURCE_MEM;
+ sr[0].start = pseudo_attr_start;
+ sr[0].end = pseudo_attr_end;
+
+ sr[1].name = "pseudo-mem";
+ sr[1].flags = IORESOURCE_MEM;
+ sr[1].start = pseudo_mem_start;
+ sr[1].end = pseudo_mem_end;
+
+ sr[2].name = "pseudo-io";
+ sr[2].flags = IORESOURCE_MEM;
+ sr[2].start = pseudo_io_start;
+ sr[2].end = pseudo_io_end;
+
+ sr[3].name = "insert";
+ sr[3].flags = IORESOURCE_IRQ;
+ sr[3].start = sr[3].end = cd_irq;
+
+ sr[4].name = "card";
+ sr[4].flags = IORESOURCE_IRQ;
+ sr[4].start = sr[4].end = card_irq;
+
+ i = 5;
+ if (stschg_irq) {
+ sr[i].name = "insert";
+ sr[i].flags = IORESOURCE_IRQ;
+ sr[i].start = sr[i].end = cd_irq;
+ i++;
+ }
+ if (eject_irq) {
+ sr[i].name = "eject";
+ sr[i].flags = IORESOURCE_IRQ;
+ sr[i].start = sr[i].end = eject_irq;
+ }
+
+ pd->resource = sr;
+ pd->num_resources = cnt;
+
+ ret = platform_device_add(pd);
+ if (!ret)
+ return 0;
+
+ platform_device_put(pd);
+out:
+ kfree(sr);
+ return ret;
+}
+
+#define YAMON_SIZE 0x00100000
+#define YAMON_ENV_SIZE 0x00040000
+
+int __init db1x_register_norflash(unsigned long size, int width,
+ int swapped)
+{
+ struct physmap_flash_data *pfd;
+ struct platform_device *pd;
+ struct mtd_partition *parts;
+ struct resource *res;
+ int ret, i;
+
+ if (size < (8 * 1024 * 1024))
+ return -EINVAL;
+
+ ret = -ENOMEM;
+ parts = kzalloc(sizeof(struct mtd_partition) * 5, GFP_KERNEL);
+ if (!parts)
+ goto out;
+
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ if (!res)
+ goto out1;
+
+ pfd = kzalloc(sizeof(struct physmap_flash_data), GFP_KERNEL);
+ if (!pfd)
+ goto out2;
+
+ pd = platform_device_alloc("physmap-flash", 0);
+ if (!pd)
+ goto out3;
+
+ /* NOR flash ends at 0x20000000, regardless of size */
+ res->start = 0x20000000 - size;
+ res->end = 0x20000000 - 1;
+ res->flags = IORESOURCE_MEM;
+
+ /* partition setup. Most Develboards have a switch which allows
+ * to swap the physical locations of the 2 NOR flash banks.
+ */
+ i = 0;
+ if (!swapped) {
+ /* first NOR chip */
+ parts[i].offset = 0;
+ parts[i].name = "User FS";
+ parts[i].size = size / 2;
+ i++;
+ }
+
+ parts[i].offset = MTDPART_OFS_APPEND;
+ parts[i].name = "User FS 2";
+ parts[i].size = (size / 2) - (0x20000000 - 0x1fc00000);
+ i++;
+
+ parts[i].offset = MTDPART_OFS_APPEND;
+ parts[i].name = "YAMON";
+ parts[i].size = YAMON_SIZE;
+ parts[i].mask_flags = MTD_WRITEABLE;
+ i++;
+
+ parts[i].offset = MTDPART_OFS_APPEND;
+ parts[i].name = "raw kernel";
+ parts[i].size = 0x00400000 - YAMON_SIZE - YAMON_ENV_SIZE;
+ i++;
+
+ parts[i].offset = MTDPART_OFS_APPEND;
+ parts[i].name = "YAMON Env";
+ parts[i].size = YAMON_ENV_SIZE;
+ parts[i].mask_flags = MTD_WRITEABLE;
+ i++;
+
+ if (swapped) {
+ parts[i].offset = MTDPART_OFS_APPEND;
+ parts[i].name = "User FS";
+ parts[i].size = size / 2;
+ i++;
+ }
+
+ pfd->width = width;
+ pfd->parts = parts;
+ pfd->nr_parts = 5;
+
+ pd->dev.platform_data = pfd;
+ pd->resource = res;
+ pd->num_resources = 1;
+
+ ret = platform_device_add(pd);
+ if (!ret)
+ return ret;
+
+ platform_device_put(pd);
+out3:
+ kfree(pfd);
+out2:
+ kfree(res);
+out1:
+ kfree(parts);
+out:
+ return ret;
+}
diff --git a/arch/mips/alchemy/devboards/platform.h b/arch/mips/alchemy/devboards/platform.h
new file mode 100644
index 000000000000..828c54e31157
--- /dev/null
+++ b/arch/mips/alchemy/devboards/platform.h
@@ -0,0 +1,21 @@
+#ifndef _DEVBOARD_PLATFORM_H_
+#define _DEVBOARD_PLATFORM_H_
+
+#include <linux/init.h>
+
+int __init db1x_register_pcmcia_socket(unsigned long pseudo_attr_start,
+ unsigned long pseudo_attr_len,
+ unsigned long pseudo_mem_start,
+ unsigned long pseudo_mem_end,
+ unsigned long pseudo_io_start,
+ unsigned long pseudo_io_end,
+ int card_irq,
+ int cd_irq,
+ int stschg_irq,
+ int eject_irq,
+ int id);
+
+int __init db1x_register_norflash(unsigned long size, int width,
+ int swapped);
+
+#endif
diff --git a/arch/mips/alchemy/devboards/prom.c b/arch/mips/alchemy/devboards/prom.c
index 0042bd6b1d7d..b30df5c97ad3 100644
--- a/arch/mips/alchemy/devboards/prom.c
+++ b/arch/mips/alchemy/devboards/prom.c
@@ -60,3 +60,8 @@ void __init prom_init(void)
strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
+
+void prom_putchar(unsigned char c)
+{
+ alchemy_uart_putchar(UART0_PHYS_ADDR, c);
+}
diff --git a/arch/mips/alchemy/mtx-1/Makefile b/arch/mips/alchemy/mtx-1/Makefile
index 7c67b3d33bec..4a53815b3c6c 100644
--- a/arch/mips/alchemy/mtx-1/Makefile
+++ b/arch/mips/alchemy/mtx-1/Makefile
@@ -6,7 +6,7 @@
# Makefile for 4G Systems MTX-1 board.
#
-lib-y := init.o board_setup.o irqmap.o
+lib-y := init.o board_setup.o
obj-y := platform.o
EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c
index 45b61c9b82b9..e2838c6185d3 100644
--- a/arch/mips/alchemy/mtx-1/board_setup.c
+++ b/arch/mips/alchemy/mtx-1/board_setup.c
@@ -30,20 +30,41 @@
#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
+char irq_tab_alchemy[][5] __initdata = {
+ [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 00 - AdapterA-Slot0 (top) */
+ [1] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 01 - AdapterA-Slot1 (bottom) */
+ [2] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 02 - AdapterB-Slot0 (top) */
+ [3] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 03 - AdapterB-Slot1 (bottom) */
+ [4] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, /* IDSEL 04 - AdapterC-Slot0 (top) */
+ [5] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 05 - AdapterC-Slot1 (bottom) */
+ [6] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 06 - AdapterD-Slot0 (top) */
+ [7] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 07 - AdapterD-Slot1 (bottom) */
+};
+
extern int (*board_pci_idsel)(unsigned int devsel, int assert);
int mtx1_pci_idsel(unsigned int devsel, int assert);
-void board_reset(void)
+static void mtx1_reset(char *c)
{
/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
au_writel(0x00000000, 0xAE00001C);
}
+static void mtx1_power_off(void)
+{
+ printk(KERN_ALERT "It's now safe to remove power\n");
+ while (1)
+ asm volatile (".set mips3 ; wait ; .set mips1");
+}
+
void __init board_setup(void)
{
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -86,6 +107,10 @@ void __init board_setup(void)
alchemy_gpio_direction_output(211, 1); /* green on */
alchemy_gpio_direction_output(212, 0); /* red off */
+ pm_power_off = mtx1_power_off;
+ _machine_halt = mtx1_power_off;
+ _machine_restart = mtx1_reset;
+
printk(KERN_INFO "4G Systems MTX-1 Board\n");
}
@@ -109,3 +134,15 @@ mtx1_pci_idsel(unsigned int devsel, int assert)
au_sync_udelay(1);
return 1;
}
+
+static int __init mtx1_init_irq(void)
+{
+ set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
+ set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
+
+ return 0;
+}
+arch_initcall(mtx1_init_irq);
diff --git a/arch/mips/alchemy/mtx-1/init.c b/arch/mips/alchemy/mtx-1/init.c
index 5e871c8d9e96..f8d25575fa05 100644
--- a/arch/mips/alchemy/mtx-1/init.c
+++ b/arch/mips/alchemy/mtx-1/init.c
@@ -32,6 +32,7 @@
#include <linux/init.h>
#include <asm/bootinfo.h>
+#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
@@ -58,3 +59,8 @@ void __init prom_init(void)
strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
+
+void prom_putchar(unsigned char c)
+{
+ alchemy_uart_putchar(UART0_PHYS_ADDR, c);
+}
diff --git a/arch/mips/alchemy/mtx-1/irqmap.c b/arch/mips/alchemy/mtx-1/irqmap.c
deleted file mode 100644
index f1ab12ab3433..000000000000
--- a/arch/mips/alchemy/mtx-1/irqmap.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * Au1xxx irq map table
- *
- * Copyright 2003 Embedded Edge, LLC
- * dan@embeddededge.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <asm/mach-au1x00/au1000.h>
-
-char irq_tab_alchemy[][5] __initdata = {
- [0] = { -1, INTA, INTA, INTX, INTX }, /* IDSEL 00 - AdapterA-Slot0 (top) */
- [1] = { -1, INTB, INTA, INTX, INTX }, /* IDSEL 01 - AdapterA-Slot1 (bottom) */
- [2] = { -1, INTC, INTD, INTX, INTX }, /* IDSEL 02 - AdapterB-Slot0 (top) */
- [3] = { -1, INTD, INTC, INTX, INTX }, /* IDSEL 03 - AdapterB-Slot1 (bottom) */
- [4] = { -1, INTA, INTB, INTX, INTX }, /* IDSEL 04 - AdapterC-Slot0 (top) */
- [5] = { -1, INTB, INTA, INTX, INTX }, /* IDSEL 05 - AdapterC-Slot1 (bottom) */
- [6] = { -1, INTC, INTD, INTX, INTX }, /* IDSEL 06 - AdapterD-Slot0 (top) */
- [7] = { -1, INTD, INTC, INTX, INTX }, /* IDSEL 07 - AdapterD-Slot1 (bottom) */
-};
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
- { AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
- { AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
- { AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
- { AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
-};
-
-
-void __init board_init_irq(void)
-{
- au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
-}
diff --git a/arch/mips/alchemy/xxs1500/Makefile b/arch/mips/alchemy/xxs1500/Makefile
index db3c526f64d8..4dc81d794cb8 100644
--- a/arch/mips/alchemy/xxs1500/Makefile
+++ b/arch/mips/alchemy/xxs1500/Makefile
@@ -5,4 +5,6 @@
# Makefile for MyCable XXS1500 board.
#
-lib-y := init.o board_setup.o irqmap.o
+lib-y := init.o board_setup.o platform.o
+
+EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c
index 4de2d48caed8..7956afa78c4b 100644
--- a/arch/mips/alchemy/xxs1500/board_setup.c
+++ b/arch/mips/alchemy/xxs1500/board_setup.c
@@ -25,18 +25,28 @@
#include <linux/gpio.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/delay.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
-void board_reset(void)
+static void xxs1500_reset(char *c)
{
/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
au_writel(0x00000000, 0xAE00001C);
}
+static void xxs1500_power_off(void)
+{
+ printk(KERN_ALERT "It's now safe to remove power\n");
+ while (1)
+ asm volatile (".set mips3 ; wait ; .set mips1");
+}
+
void __init board_setup(void)
{
u32 pin_func;
@@ -51,6 +61,10 @@ void __init board_setup(void)
}
#endif
+ pm_power_off = xxs1500_power_off;
+ _machine_halt = xxs1500_power_off;
+ _machine_restart = xxs1500_reset;
+
alchemy_gpio1_input_enable();
alchemy_gpio2_enable();
@@ -68,22 +82,6 @@ void __init board_setup(void)
/* Enable DTR = USB power up */
au_writel(0x01, UART3_ADDR + UART_MCR); /* UART_MCR_DTR is 0x01??? */
-#ifdef CONFIG_PCMCIA_XXS1500
- /* GPIO 0, 1, and 4 are inputs */
- alchemy_gpio_direction_input(0);
- alchemy_gpio_direction_input(1);
- alchemy_gpio_direction_input(4);
-
- /* GPIO2 208/9/10/11 are inputs */
- alchemy_gpio_direction_input(208);
- alchemy_gpio_direction_input(209);
- alchemy_gpio_direction_input(210);
- alchemy_gpio_direction_input(211);
-
- /* Turn off power */
- alchemy_gpio_direction_output(214, 0);
-#endif
-
#ifdef CONFIG_PCI
#if defined(__MIPSEB__)
au_writel(0xf | (2 << 6) | (1 << 4), Au1500_PCI_CFG);
@@ -92,3 +90,23 @@ void __init board_setup(void)
#endif
#endif
}
+
+static int __init xxs1500_init_irq(void)
+{
+ set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH);
+ set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO207_INT, IRQF_TRIGGER_LOW);
+
+ set_irq_type(AU1500_GPIO0_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO1_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO2_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO3_INT, IRQF_TRIGGER_LOW);
+ set_irq_type(AU1500_GPIO4_INT, IRQF_TRIGGER_LOW); /* CF irq */
+ set_irq_type(AU1500_GPIO5_INT, IRQF_TRIGGER_LOW);
+
+ return 0;
+}
+arch_initcall(xxs1500_init_irq);
diff --git a/arch/mips/alchemy/xxs1500/init.c b/arch/mips/alchemy/xxs1500/init.c
index 456fa142c093..15125c2fda7d 100644
--- a/arch/mips/alchemy/xxs1500/init.c
+++ b/arch/mips/alchemy/xxs1500/init.c
@@ -30,6 +30,7 @@
#include <linux/kernel.h>
#include <asm/bootinfo.h>
+#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
@@ -56,3 +57,8 @@ void __init prom_init(void)
strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
+
+void prom_putchar(unsigned char c)
+{
+ alchemy_uart_putchar(UART0_PHYS_ADDR, c);
+}
diff --git a/arch/mips/alchemy/xxs1500/irqmap.c b/arch/mips/alchemy/xxs1500/irqmap.c
deleted file mode 100644
index 0f0f3012e5fd..000000000000
--- a/arch/mips/alchemy/xxs1500/irqmap.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- * Au1xxx irq map table
- *
- * Copyright 2003 Embedded Edge, LLC
- * dan@embeddededge.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <asm/mach-au1x00/au1000.h>
-
-struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
- { AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
- { AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
- { AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
- { AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
- { AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
- { AU1500_GPIO_207, IRQF_TRIGGER_LOW, 0 },
-
- { AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 },
- { AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 },
- { AU1000_GPIO_2, IRQF_TRIGGER_LOW, 0 },
- { AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 },
- { AU1000_GPIO_4, IRQF_TRIGGER_LOW, 0 }, /* CF interrupt */
- { AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 },
-};
-
-void __init board_init_irq(void)
-{
- au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
-}
diff --git a/arch/mips/alchemy/xxs1500/platform.c b/arch/mips/alchemy/xxs1500/platform.c
new file mode 100644
index 000000000000..c14dcaa95311
--- /dev/null
+++ b/arch/mips/alchemy/xxs1500/platform.c
@@ -0,0 +1,63 @@
+/*
+ * XXS1500 board platform device registration
+ *
+ * Copyright (C) 2009 Manuel Lauss
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-au1x00/au1000.h>
+
+static struct resource xxs1500_pcmcia_res[] = {
+ {
+ .name = "pseudo-io",
+ .flags = IORESOURCE_MEM,
+ .start = PCMCIA_IO_PSEUDO_PHYS,
+ .end = PCMCIA_IO_PSEUDO_PHYS + 0x00040000 - 1,
+ },
+ {
+ .name = "pseudo-attr",
+ .flags = IORESOURCE_MEM,
+ .start = PCMCIA_ATTR_PSEUDO_PHYS,
+ .end = PCMCIA_ATTR_PSEUDO_PHYS + 0x00040000 - 1,
+ },
+ {
+ .name = "pseudo-mem",
+ .flags = IORESOURCE_MEM,
+ .start = PCMCIA_IO_PSEUDO_PHYS,
+ .end = PCMCIA_IO_PSEUDO_PHYS + 0x00040000 - 1,
+ },
+};
+
+static struct platform_device xxs1500_pcmcia_dev = {
+ .name = "xxs1500_pcmcia",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(xxs1500_pcmcia_res),
+ .resource = xxs1500_pcmcia_res,
+};
+
+static struct platform_device *xxs1500_devs[] __initdata = {
+ &xxs1500_pcmcia_dev,
+};
+
+static int __init xxs1500_dev_init(void)
+{
+ return platform_add_devices(xxs1500_devs,
+ ARRAY_SIZE(xxs1500_devs));
+}
+device_initcall(xxs1500_dev_init);
diff --git a/arch/mips/configs/db1200_defconfig b/arch/mips/configs/db1200_defconfig
index dabf03032e06..51abc6e10f7c 100644
--- a/arch/mips/configs/db1200_defconfig
+++ b/arch/mips/configs/db1200_defconfig
@@ -1,78 +1,100 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.20
-# Tue Feb 20 21:47:25 2007
+# Linux kernel version: 2.6.32-rc5
+# Mon Nov 2 21:09:28 2009
#
CONFIG_MIPS=y
#
# Machine selection
#
-CONFIG_ZONE_DMA=y
CONFIG_MACH_ALCHEMY=y
-# CONFIG_MIPS_MTX1 is not set
-# CONFIG_MIPS_BOSPORUS is not set
-# CONFIG_MIPS_PB1000 is not set
-# CONFIG_MIPS_PB1100 is not set
-# CONFIG_MIPS_PB1500 is not set
-# CONFIG_MIPS_PB1550 is not set
-# CONFIG_MIPS_PB1200 is not set
-# CONFIG_MIPS_DB1000 is not set
-# CONFIG_MIPS_DB1100 is not set
-# CONFIG_MIPS_DB1500 is not set
-# CONFIG_MIPS_DB1550 is not set
-CONFIG_MIPS_DB1200=y
-# CONFIG_MIPS_MIRAGE is not set
+# CONFIG_AR7 is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
+# CONFIG_LASAT is not set
+# CONFIG_MACH_LOONGSON is not set
# CONFIG_MIPS_MALTA is not set
-# CONFIG_WR_PPMC is not set
# CONFIG_MIPS_SIM is not set
-# CONFIG_MOMENCO_JAGUAR_ATX is not set
-# CONFIG_MIPS_XXS1500 is not set
+# CONFIG_NEC_MARKEINS is not set
+# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
# CONFIG_PNX8550_JBS is not set
# CONFIG_PNX8550_STB810 is not set
-# CONFIG_MACH_VR41XX is not set
+# CONFIG_PMC_MSP is not set
# CONFIG_PMC_YOSEMITE is not set
-# CONFIG_MARKEINS is not set
# CONFIG_SGI_IP22 is not set
# CONFIG_SGI_IP27 is not set
+# CONFIG_SGI_IP28 is not set
# CONFIG_SGI_IP32 is not set
-# CONFIG_SIBYTE_BIGSUR is not set
-# CONFIG_SIBYTE_SWARM is not set
-# CONFIG_SIBYTE_SENTOSA is not set
-# CONFIG_SIBYTE_RHONE is not set
-# CONFIG_SIBYTE_CARMEL is not set
-# CONFIG_SIBYTE_LITTLESUR is not set
# CONFIG_SIBYTE_CRHINE is not set
+# CONFIG_SIBYTE_CARMEL is not set
# CONFIG_SIBYTE_CRHONE is not set
+# CONFIG_SIBYTE_RHONE is not set
+# CONFIG_SIBYTE_SWARM is not set
+# CONFIG_SIBYTE_LITTLESUR is not set
+# CONFIG_SIBYTE_SENTOSA is not set
+# CONFIG_SIBYTE_BIGSUR is not set
# CONFIG_SNI_RM is not set
-# CONFIG_TOSHIBA_JMR3927 is not set
-# CONFIG_TOSHIBA_RBTX4927 is not set
-# CONFIG_TOSHIBA_RBTX4938 is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
+# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+CONFIG_ALCHEMY_GPIO_AU1000=y
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+# CONFIG_MIPS_MTX1 is not set
+# CONFIG_MIPS_BOSPORUS is not set
+# CONFIG_MIPS_DB1000 is not set
+# CONFIG_MIPS_DB1100 is not set
+CONFIG_MIPS_DB1200=y
+# CONFIG_MIPS_DB1500 is not set
+# CONFIG_MIPS_DB1550 is not set
+# CONFIG_MIPS_MIRAGE is not set
+# CONFIG_MIPS_PB1000 is not set
+# CONFIG_MIPS_PB1100 is not set
+# CONFIG_MIPS_PB1200 is not set
+# CONFIG_MIPS_PB1500 is not set
+# CONFIG_MIPS_PB1550 is not set
+# CONFIG_MIPS_XXS1500 is not set
+CONFIG_SOC_AU1200=y
+CONFIG_SOC_AU1X00=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_ARCH_SUPPORTS_OPROFILE=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_CLOCKEVENTS=y
CONFIG_GENERIC_TIME=y
-CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
-# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
+CONFIG_CEVT_R4K_LIB=y
+CONFIG_CSRC_R4K_LIB=y
CONFIG_DMA_COHERENT=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_SYS_HAS_EARLY_PRINTK=y
CONFIG_MIPS_DISABLE_OBSOLETE_IDE=y
+# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_GPIO=y
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
-CONFIG_SOC_AU1200=y
-CONFIG_SOC_AU1X00=y
+CONFIG_IRQ_CPU=y
CONFIG_MIPS_L1_CACHE_SHIFT=5
#
# CPU selection
#
+# CONFIG_CPU_LOONGSON2E is not set
CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
@@ -85,6 +107,7 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_TX49XX is not set
# CONFIG_CPU_R5000 is not set
# CONFIG_CPU_R5432 is not set
+# CONFIG_CPU_R5500 is not set
# CONFIG_CPU_R6000 is not set
# CONFIG_CPU_NEVADA is not set
# CONFIG_CPU_R8000 is not set
@@ -92,11 +115,13 @@ CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_RM7000 is not set
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
+# CONFIG_CPU_CAVIUM_OCTEON is not set
CONFIG_SYS_HAS_CPU_MIPS32_R1=y
CONFIG_CPU_MIPS32=y
CONFIG_CPU_MIPSR1=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
#
# Kernel type
@@ -106,180 +131,204 @@ CONFIG_32BIT=y
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_32KB is not set
# CONFIG_PAGE_SIZE_64KB is not set
CONFIG_CPU_HAS_PREFETCH=y
CONFIG_MIPS_MT_DISABLED=y
# CONFIG_MIPS_MT_SMP is not set
# CONFIG_MIPS_MT_SMTC is not set
-# CONFIG_MIPS_VPE_LOADER is not set
CONFIG_64BIT_PHYS_ADDR=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_IRQ_PROBE=y
CONFIG_CPU_SUPPORTS_HIGHMEM=y
CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
-# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4
-# CONFIG_RESOURCES_64BIT is not set
-CONFIG_ZONE_DMA_FLAG=1
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=0
+CONFIG_VIRT_TO_BUS=y
+CONFIG_HAVE_MLOCK=y
+CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
# CONFIG_HZ_48 is not set
-# CONFIG_HZ_100 is not set
+CONFIG_HZ_100=y
# CONFIG_HZ_128 is not set
# CONFIG_HZ_250 is not set
# CONFIG_HZ_256 is not set
-CONFIG_HZ_1000=y
+# CONFIG_HZ_1000 is not set
# CONFIG_HZ_1024 is not set
CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
-CONFIG_HZ=1000
+CONFIG_HZ=100
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
# CONFIG_KEXEC is not set
+# CONFIG_SECCOMP is not set
CONFIG_LOCKDEP_SUPPORT=y
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
-# Code maturity level options
+# General setup
#
CONFIG_EXPERIMENTAL=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
-
-#
-# General setup
-#
-CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION="-db1200"
CONFIG_LOCALVERSION_AUTO=y
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
-# CONFIG_IPC_NS is not set
CONFIG_SYSVIPC_SYSCTL=y
-# CONFIG_POSIX_MQUEUE is not set
+CONFIG_POSIX_MQUEUE=y
+CONFIG_POSIX_MQUEUE_SYSCTL=y
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
-# CONFIG_UTS_NS is not set
# CONFIG_AUDIT is not set
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_SYSFS_DEPRECATED=y
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_EXACT=y
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=18
+# CONFIG_GROUP_SCHED is not set
+# CONFIG_CGROUPS is not set
+# CONFIG_SYSFS_DEPRECATED_V2 is not set
# CONFIG_RELAY is not set
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+# CONFIG_NAMESPACES is not set
+# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
-CONFIG_SYSCTL_SYSCALL=y
-CONFIG_KALLSYMS=y
-# CONFIG_KALLSYMS_EXTRA_PASS is not set
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
CONFIG_HOTPLUG=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
+# CONFIG_PCSPKR_PLATFORM is not set
CONFIG_BASE_FULL=y
CONFIG_FUTEX=y
CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
CONFIG_SHMEM=y
-CONFIG_SLAB=y
-CONFIG_VM_EVENT_COUNTERS=y
-CONFIG_RT_MUTEXES=y
-# CONFIG_TINY_SHMEM is not set
-CONFIG_BASE_SMALL=0
+CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
#
-# Loadable module support
+# GCOV-based kernel profiling
#
+# CONFIG_SLOW_WORK is not set
+# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
+# CONFIG_MODULE_FORCE_LOAD is not set
CONFIG_MODULE_UNLOAD=y
# CONFIG_MODULE_FORCE_UNLOAD is not set
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_KMOD=y
-
-#
-# Block layer
-#
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
CONFIG_BLOCK=y
-# CONFIG_LBD is not set
-# CONFIG_BLK_DEV_IO_TRACE is not set
-# CONFIG_LSF is not set
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
# CONFIG_DEFAULT_CFQ is not set
-# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+# CONFIG_FREEZER is not set
#
# Bus options (PCI, PCMCIA, EISA, ISA, TC)
#
+# CONFIG_ARCH_SUPPORTS_MSI is not set
CONFIG_MMU=y
-
-#
-# PCCARD (PCMCIA/CardBus) support
-#
-CONFIG_PCCARD=m
+CONFIG_PCCARD=y
# CONFIG_PCMCIA_DEBUG is not set
-CONFIG_PCMCIA=m
+CONFIG_PCMCIA=y
CONFIG_PCMCIA_LOAD_CIS=y
-CONFIG_PCMCIA_IOCTL=y
+# CONFIG_PCMCIA_IOCTL is not set
#
# PC-card bridges
#
-CONFIG_PCMCIA_AU1X00=m
-
-#
-# PCI Hotplug Support
-#
+# CONFIG_PCMCIA_AU1X00 is not set
+CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
#
# Executable file formats
#
CONFIG_BINFMT_ELF=y
-# CONFIG_BINFMT_MISC is not set
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+# CONFIG_HAVE_AOUT is not set
+CONFIG_BINFMT_MISC=y
CONFIG_TRAD_SIGNALS=y
#
# Power management options
#
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
# CONFIG_PM is not set
-
-#
-# Networking
-#
CONFIG_NET=y
#
# Networking options
#
-# CONFIG_NETDEBUG is not set
CONFIG_PACKET=y
-# CONFIG_PACKET_MMAP is not set
+CONFIG_PACKET_MMAP=y
CONFIG_UNIX=y
-CONFIG_XFRM=y
-CONFIG_XFRM_USER=m
-# CONFIG_XFRM_SUB_POLICY is not set
-CONFIG_XFRM_MIGRATE=y
-CONFIG_NET_KEY=y
-CONFIG_NET_KEY_MIGRATE=y
+# CONFIG_NET_KEY is not set
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
# CONFIG_IP_ADVANCED_ROUTER is not set
CONFIG_IP_FIB_HASH=y
-# CONFIG_IP_PNP is not set
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+# CONFIG_IP_PNP_BOOTP is not set
+# CONFIG_IP_PNP_RARP is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
# CONFIG_IP_MROUTE is not set
@@ -290,107 +339,25 @@ CONFIG_IP_FIB_HASH=y
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_INET_DIAG=y
-CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
CONFIG_DEFAULT_TCP_CONG="cubic"
-CONFIG_TCP_MD5SIG=y
-
-#
-# IP: Virtual Server Configuration
-#
-# CONFIG_IP_VS is not set
+# CONFIG_TCP_MD5SIG is not set
# CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
-CONFIG_NETWORK_SECMARK=y
-CONFIG_NETFILTER=y
-# CONFIG_NETFILTER_DEBUG is not set
-
-#
-# Core Netfilter Configuration
-#
-# CONFIG_NETFILTER_NETLINK is not set
-CONFIG_NF_CONNTRACK_ENABLED=m
-CONFIG_NF_CONNTRACK_SUPPORT=y
-# CONFIG_IP_NF_CONNTRACK_SUPPORT is not set
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CT_ACCT=y
-CONFIG_NF_CONNTRACK_MARK=y
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_GRE=m
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CONNTRACK_AMANDA=m
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_H323=m
-CONFIG_NF_CONNTRACK_IRC=m
-# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
-CONFIG_NF_CONNTRACK_PPTP=m
-CONFIG_NF_CONNTRACK_SANE=m
-CONFIG_NF_CONNTRACK_SIP=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NETFILTER_XTABLES=m
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
-CONFIG_NETFILTER_XT_TARGET_MARK=m
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
-CONFIG_NETFILTER_XT_TARGET_NFLOG=m
-CONFIG_NETFILTER_XT_TARGET_SECMARK=m
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_COMMENT=m
-CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
-CONFIG_NETFILTER_XT_MATCH_DCCP=m
-CONFIG_NETFILTER_XT_MATCH_DSCP=m
-CONFIG_NETFILTER_XT_MATCH_ESP=m
-CONFIG_NETFILTER_XT_MATCH_HELPER=m
-CONFIG_NETFILTER_XT_MATCH_LENGTH=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MARK=m
-CONFIG_NETFILTER_XT_MATCH_POLICY=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
-CONFIG_NETFILTER_XT_MATCH_QUOTA=m
-CONFIG_NETFILTER_XT_MATCH_REALM=m
-CONFIG_NETFILTER_XT_MATCH_SCTP=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
-CONFIG_NETFILTER_XT_MATCH_STRING=m
-CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
-
-#
-# IP: Netfilter Configuration
-#
-CONFIG_NF_CONNTRACK_IPV4=m
-CONFIG_NF_CONNTRACK_PROC_COMPAT=y
-# CONFIG_IP_NF_QUEUE is not set
-# CONFIG_IP_NF_IPTABLES is not set
-# CONFIG_IP_NF_ARPTABLES is not set
-
-#
-# DCCP Configuration (EXPERIMENTAL)
-#
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
-
-#
-# SCTP Configuration (EXPERIMENTAL)
-#
# CONFIG_IP_SCTP is not set
-
-#
-# TIPC Configuration (EXPERIMENTAL)
-#
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
+# CONFIG_NET_DSA is not set
# CONFIG_VLAN_8021Q is not set
# CONFIG_DECNET is not set
# CONFIG_LLC2 is not set
@@ -400,21 +367,26 @@ CONFIG_NF_CONNTRACK_PROC_COMPAT=y
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
+# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
-CONFIG_NET_CLS_ROUTE=y
+# CONFIG_DCB is not set
#
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
-# CONFIG_IEEE80211 is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_WIRELESS is not set
+# CONFIG_MAC80211_RC_DEFAULT_PID is not set
+# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
+# CONFIG_WIMAX is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
#
# Device Drivers
@@ -423,25 +395,23 @@ CONFIG_NET_CLS_ROUTE=y
#
# Generic Driver Options
#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_DEVTMPFS is not set
CONFIG_STANDALONE=y
CONFIG_PREVENT_FIRMWARE_BUILD=y
CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
-
-#
-# Connector - unified userspace <-> kernelspace linker
-#
# CONFIG_CONNECTOR is not set
-
-#
-# Memory Technology Devices (MTD)
-#
CONFIG_MTD=y
# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_TESTS is not set
# CONFIG_MTD_CONCAT is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_REDBOOT_PARTS is not set
-# CONFIG_MTD_CMDLINE_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AR7_PARTS is not set
#
# User Modules And Translation Layers
@@ -454,6 +424,7 @@ CONFIG_MTD_BLOCK=y
# CONFIG_INFTL is not set
# CONFIG_RFD_FTL is not set
# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
#
# RAM/ROM/Flash chip drivers
@@ -462,6 +433,9 @@ CONFIG_MTD_CFI=y
# CONFIG_MTD_JEDECPROBE is not set
CONFIG_MTD_GEN_PROBE=y
# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+# CONFIG_MTD_CFI_NOSWAP is not set
+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set
+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set
CONFIG_MTD_MAP_BANK_WIDTH_1=y
CONFIG_MTD_MAP_BANK_WIDTH_2=y
CONFIG_MTD_MAP_BANK_WIDTH_4=y
@@ -479,19 +453,21 @@ CONFIG_MTD_CFI_UTIL=y
# CONFIG_MTD_RAM is not set
# CONFIG_MTD_ROM is not set
# CONFIG_MTD_ABSENT is not set
-# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
-# CONFIG_MTD_PHYSMAP is not set
-CONFIG_MTD_ALCHEMY=y
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
# CONFIG_MTD_PLATRAM is not set
#
# Self-contained MTD device drivers
#
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_M25P80 is not set
+# CONFIG_MTD_SST25L is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
@@ -503,224 +479,129 @@ CONFIG_MTD_ALCHEMY=y
# CONFIG_MTD_DOC2000 is not set
# CONFIG_MTD_DOC2001 is not set
# CONFIG_MTD_DOC2001PLUS is not set
-
-#
-# NAND Flash Device Drivers
-#
CONFIG_MTD_NAND=y
# CONFIG_MTD_NAND_VERIFY_WRITE is not set
# CONFIG_MTD_NAND_ECC_SMC is not set
+# CONFIG_MTD_NAND_MUSEUM_IDS is not set
CONFIG_MTD_NAND_IDS=y
# CONFIG_MTD_NAND_AU1550 is not set
# CONFIG_MTD_NAND_DISKONCHIP is not set
# CONFIG_MTD_NAND_NANDSIM is not set
-
-#
-# OneNAND Flash Device Drivers
-#
+CONFIG_MTD_NAND_PLATFORM=y
+# CONFIG_MTD_ALAUDA is not set
# CONFIG_MTD_ONENAND is not set
#
-# Parallel port support
-#
-# CONFIG_PARPORT is not set
-
-#
-# Plug and Play support
+# LPDDR flash memory drivers
#
-# CONFIG_PNPACPI is not set
+# CONFIG_MTD_LPDDR is not set
#
-# Block devices
+# UBI - Unsorted block images
#
+# CONFIG_MTD_UBI is not set
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
# CONFIG_BLK_DEV_NBD is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_COUNT=16
-CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
-# CONFIG_BLK_DEV_INITRD is not set
+CONFIG_BLK_DEV_UB=y
+# CONFIG_BLK_DEV_RAM is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
-
-#
-# Misc devices
-#
-
-#
-# ATA/ATAPI/MFM/RLL support
-#
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
CONFIG_IDE=y
-CONFIG_IDE_MAX_HWIFS=4
-CONFIG_BLK_DEV_IDE=y
#
-# Please see Documentation/ide.txt for help/info on IDE drives
+# Please see Documentation/ide/ide.txt for help/info on IDE drives
#
+CONFIG_IDE_XFER_MODE=y
+CONFIG_IDE_ATAPI=y
# CONFIG_BLK_DEV_IDE_SATA is not set
-CONFIG_BLK_DEV_IDEDISK=y
-CONFIG_IDEDISK_MULTI_MODE=y
-CONFIG_BLK_DEV_IDECS=m
-# CONFIG_BLK_DEV_IDECD is not set
+CONFIG_IDE_GD=y
+CONFIG_IDE_GD_ATA=y
+# CONFIG_IDE_GD_ATAPI is not set
+CONFIG_BLK_DEV_IDECS=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
# CONFIG_BLK_DEV_IDETAPE is not set
-# CONFIG_BLK_DEV_IDEFLOPPY is not set
-# CONFIG_BLK_DEV_IDESCSI is not set
-# CONFIG_IDE_TASK_IOCTL is not set
+CONFIG_IDE_TASK_IOCTL=y
+# CONFIG_IDE_PROC_FS is not set
#
# IDE chipset support/bugfixes
#
-CONFIG_IDE_GENERIC=y
+# CONFIG_IDE_GENERIC is not set
+# CONFIG_BLK_DEV_PLATFORM is not set
CONFIG_BLK_DEV_IDE_AU1XXX=y
CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA=y
# CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA is not set
-CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ=128
-# CONFIG_IDE_ARM is not set
# CONFIG_BLK_DEV_IDEDMA is not set
-# CONFIG_IDEDMA_AUTO is not set
-# CONFIG_BLK_DEV_HD is not set
#
# SCSI device support
#
# CONFIG_RAID_ATTRS is not set
-CONFIG_SCSI=y
-CONFIG_SCSI_TGT=m
+# CONFIG_SCSI is not set
+# CONFIG_SCSI_DMA is not set
# CONFIG_SCSI_NETLINK is not set
-CONFIG_SCSI_PROC_FS=y
-
-#
-# SCSI support type (disk, tape, CD-ROM)
-#
-CONFIG_BLK_DEV_SD=y
-# CONFIG_CHR_DEV_ST is not set
-# CONFIG_CHR_DEV_OSST is not set
-CONFIG_BLK_DEV_SR=y
-# CONFIG_BLK_DEV_SR_VENDOR is not set
-CONFIG_CHR_DEV_SG=y
-# CONFIG_CHR_DEV_SCH is not set
-
-#
-# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
-#
-CONFIG_SCSI_MULTI_LUN=y
-# CONFIG_SCSI_CONSTANTS is not set
-# CONFIG_SCSI_LOGGING is not set
-CONFIG_SCSI_SCAN_ASYNC=y
-
-#
-# SCSI Transports
-#
-# CONFIG_SCSI_SPI_ATTRS is not set
-# CONFIG_SCSI_FC_ATTRS is not set
-# CONFIG_SCSI_ISCSI_ATTRS is not set
-# CONFIG_SCSI_SAS_ATTRS is not set
-# CONFIG_SCSI_SAS_LIBSAS is not set
-
-#
-# SCSI low-level drivers
-#
-# CONFIG_ISCSI_TCP is not set
-# CONFIG_SCSI_DEBUG is not set
-
-#
-# PCMCIA SCSI adapter support
-#
-# CONFIG_PCMCIA_AHA152X is not set
-# CONFIG_PCMCIA_FDOMAIN is not set
-# CONFIG_PCMCIA_NINJA_SCSI is not set
-# CONFIG_PCMCIA_QLOGIC is not set
-# CONFIG_PCMCIA_SYM53C500 is not set
-
-#
-# Serial ATA (prod) and Parallel ATA (experimental) drivers
-#
# CONFIG_ATA is not set
-
-#
-# Multi-device support (RAID and LVM)
-#
# CONFIG_MD is not set
-
-#
-# Fusion MPT device support
-#
-# CONFIG_FUSION is not set
-
-#
-# IEEE 1394 (FireWire) support
-#
-
-#
-# I2O device support
-#
-
-#
-# Network device support
-#
CONFIG_NETDEVICES=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
-
-#
-# PHY device support
-#
+# CONFIG_VETH is not set
# CONFIG_PHYLIB is not set
-
-#
-# Ethernet (10 or 100Mbit)
-#
CONFIG_NET_ETHERNET=y
-CONFIG_MII=m
+CONFIG_MII=y
+# CONFIG_AX88796 is not set
# CONFIG_MIPS_AU1X00_ENET is not set
-# CONFIG_SMC91X is not set
+CONFIG_SMC91X=y
# CONFIG_DM9000 is not set
-
-#
-# Ethernet (1000 Mbit)
-#
-
-#
-# Ethernet (10000 Mbit)
-#
-
-#
-# Token Ring devices
-#
-
-#
-# Wireless LAN (non-hamradio)
-#
-# CONFIG_NET_RADIO is not set
-
-#
-# PCMCIA network device support
-#
+# CONFIG_ENC28J60 is not set
+# CONFIG_ETHOC is not set
+# CONFIG_DNET is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
+# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
+# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
+# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851 is not set
+# CONFIG_KS8851_MLL is not set
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
# CONFIG_NET_PCMCIA is not set
-
-#
-# Wan interfaces
-#
# CONFIG_WAN is not set
# CONFIG_PPP is not set
# CONFIG_SLIP is not set
-# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
-
-#
-# ISDN subsystem
-#
# CONFIG_ISDN is not set
-
-#
-# Telephony Support
-#
# CONFIG_PHONE is not set
#
@@ -728,16 +609,16 @@ CONFIG_MII=m
#
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_POLLDEV is not set
#
# Userland interfaces
#
CONFIG_INPUT_MOUSEDEV=y
-CONFIG_INPUT_MOUSEDEV_PSAUX=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
# CONFIG_INPUT_JOYDEV is not set
-# CONFIG_INPUT_TSDEV is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_EVBUG is not set
@@ -747,28 +628,26 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
# CONFIG_INPUT_TOUCHSCREEN is not set
# CONFIG_INPUT_MISC is not set
#
# Hardware I/O ports
#
-CONFIG_SERIO=y
-# CONFIG_SERIO_I8042 is not set
-CONFIG_SERIO_SERPORT=y
-# CONFIG_SERIO_LIBPS2 is not set
-CONFIG_SERIO_RAW=y
+# CONFIG_SERIO is not set
# CONFIG_GAMEPORT is not set
#
# Character devices
#
CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y
CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_DEVKMEM=y
# CONFIG_SERIAL_NONSTANDARD is not set
-# CONFIG_AU1X00_GPIO is not set
#
# Serial drivers
@@ -776,33 +655,22 @@ CONFIG_VT_HW_CONSOLE_BINDING=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_SERIAL_8250_CS is not set
-CONFIG_SERIAL_8250_NR_UARTS=4
-CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+CONFIG_SERIAL_8250_NR_UARTS=2
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
# CONFIG_SERIAL_8250_EXTENDED is not set
CONFIG_SERIAL_8250_AU1X00=y
#
# Non-8250 serial port support
#
+# CONFIG_SERIAL_MAX3100 is not set
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_UNIX98_PTYS=y
-CONFIG_LEGACY_PTYS=y
-CONFIG_LEGACY_PTY_COUNT=256
-
-#
-# IPMI
-#
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
# CONFIG_IPMI_HANDLER is not set
-
-#
-# Watchdog Cards
-#
-# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
-# CONFIG_RTC is not set
-# CONFIG_GEN_RTC is not set
-# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
#
@@ -811,225 +679,606 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_SYNCLINK_CS is not set
# CONFIG_CARDMAN_4000 is not set
# CONFIG_CARDMAN_4040 is not set
+# CONFIG_IPWIRELESS is not set
# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_CHARDEV=y
+# CONFIG_I2C_HELPER_AUTO is not set
#
-# TPM devices
+# I2C Algorithms
#
-# CONFIG_TCG_TPM is not set
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
#
-# I2C support
+# I2C Hardware Bus support
#
-# CONFIG_I2C is not set
#
-# SPI support
+# I2C system bus drivers (mostly embedded / system-on-chip)
#
-# CONFIG_SPI is not set
-# CONFIG_SPI_MASTER is not set
+CONFIG_I2C_AU1550=y
+# CONFIG_I2C_GPIO is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_SIMTEC is not set
#
-# Dallas's 1-wire bus
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_STUB is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+CONFIG_SPI=y
+CONFIG_SPI_MASTER=y
+
+#
+# SPI Master Controller Drivers
+#
+CONFIG_SPI_AU1550=y
+CONFIG_SPI_BITBANG=y
+# CONFIG_SPI_GPIO is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_TLE62X0 is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+
+#
+# Memory mapped GPIO expanders:
#
-# CONFIG_W1 is not set
#
-# Hardware Monitoring support
+# I2C GPIO expanders:
#
-# CONFIG_HWMON is not set
-# CONFIG_HWMON_VID is not set
+# CONFIG_GPIO_MAX732X is not set
+# CONFIG_GPIO_PCA953X is not set
+# CONFIG_GPIO_PCF857X is not set
#
-# Multimedia devices
+# PCI GPIO expanders:
#
-# CONFIG_VIDEO_DEV is not set
#
-# Digital Video Broadcasting Devices
+# SPI GPIO expanders:
#
-# CONFIG_DVB is not set
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MCP23S08 is not set
+# CONFIG_GPIO_MC33880 is not set
+
+#
+# AC97 GPIO expanders:
+#
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+CONFIG_HWMON_VID=y
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_ADM1021 is not set
+CONFIG_SENSORS_ADM1025=y
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+CONFIG_SENSORS_LM70=y
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_UCB1400_CORE is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_MC13783 is not set
+# CONFIG_AB3100_CORE is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_REGULATOR is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
#
-# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
# CONFIG_FB_SVGALIB is not set
# CONFIG_FB_MACMODES is not set
# CONFIG_FB_BACKLIGHT is not set
# CONFIG_FB_MODE_HELPERS is not set
# CONFIG_FB_TILEBLITTING is not set
+
+#
+# Frame buffer hardware drivers
+#
# CONFIG_FB_S1D13XXX is not set
CONFIG_FB_AU1200=y
# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
#
# Console display driver support
#
-CONFIG_VGA_CONSOLE=y
-# CONFIG_VGACON_SOFT_SCROLLBACK is not set
+# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
-# CONFIG_FRAMEBUFFER_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+CONFIG_FONTS=y
+# CONFIG_FONT_8x8 is not set
+CONFIG_FONT_8x16=y
+# CONFIG_FONT_6x11 is not set
+# CONFIG_FONT_7x14 is not set
+# CONFIG_FONT_PEARL_8x8 is not set
+# CONFIG_FONT_ACORN_8x8 is not set
+# CONFIG_FONT_MINI_4x6 is not set
+# CONFIG_FONT_SUN8x16 is not set
+# CONFIG_FONT_SUN12x22 is not set
+# CONFIG_FONT_10x18 is not set
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+CONFIG_SND_VMASTER=y
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_AC97_CODEC=y
+# CONFIG_SND_DRIVERS is not set
+# CONFIG_SND_SPI is not set
+# CONFIG_SND_MIPS is not set
+# CONFIG_SND_USB is not set
+# CONFIG_SND_PCMCIA is not set
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_AC97_BUS=y
+CONFIG_SND_SOC_AU1XPSC=y
+CONFIG_SND_SOC_AU1XPSC_I2S=y
+CONFIG_SND_SOC_AU1XPSC_AC97=y
+CONFIG_SND_SOC_DB1200=y
+CONFIG_SND_SOC_I2C_AND_SPI=y
+# CONFIG_SND_SOC_ALL_CODECS is not set
+CONFIG_SND_SOC_AC97_CODEC=y
+CONFIG_SND_SOC_WM8731=y
+# CONFIG_SOUND_PRIME is not set
+CONFIG_AC97_BUS=y
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+CONFIG_HIDRAW=y
+
+#
+# USB Input Devices
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+CONFIG_USB_HIDDEV=y
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_CHICONY is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LOGITECH is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_NTRIG is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_SAMSUNG is not set
+# CONFIG_HID_SONY is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_THRUSTMASTER is not set
+# CONFIG_HID_ZEROPLUS is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
#
-# Logo configuration
+# Miscellaneous USB options
#
-CONFIG_LOGO=y
-CONFIG_LOGO_LINUX_MONO=y
-CONFIG_LOGO_LINUX_VGA16=y
-CONFIG_LOGO_LINUX_CLUT224=y
-# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+# CONFIG_USB_DEVICEFS is not set
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_DYNAMIC_MINORS=y
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB is not set
+# CONFIG_USB_WUSB_CBAF is not set
#
-# Sound
+# USB Host Controller Drivers
#
-# CONFIG_SOUND is not set
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN_DESC is not set
+# CONFIG_USB_OHCI_BIG_ENDIAN_MMIO is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_HWA_HCD is not set
#
-# HID Devices
+# USB Device Class drivers
#
-CONFIG_HID=y
-# CONFIG_HID_DEBUG is not set
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
#
-# USB support
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
#
-CONFIG_USB_ARCH_HAS_HCD=y
-CONFIG_USB_ARCH_HAS_OHCI=y
-CONFIG_USB_ARCH_HAS_EHCI=y
-# CONFIG_USB is not set
#
-# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+# also be needed; see USB_STORAGE Help for more info
+#
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
#
+# CONFIG_USB_MDC800 is not set
#
-# USB Gadget Support
+# USB port drivers
#
-CONFIG_USB_GADGET=m
-# CONFIG_USB_GADGET_DEBUG_FILES is not set
-# CONFIG_USB_GADGET_NET2280 is not set
-# CONFIG_USB_GADGET_PXA2XX is not set
-# CONFIG_USB_GADGET_GOKU is not set
-# CONFIG_USB_GADGET_LH7A40X is not set
-# CONFIG_USB_GADGET_OMAP is not set
-# CONFIG_USB_GADGET_AT91 is not set
-# CONFIG_USB_GADGET_DUMMY_HCD is not set
-# CONFIG_USB_GADGET_DUALSPEED is not set
+# CONFIG_USB_SERIAL is not set
#
-# MMC/SD Card support
+# USB Miscellaneous drivers
#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_VST is not set
+# CONFIG_USB_GADGET is not set
+
+#
+# OTG and related infrastructure
+#
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_NOP_USB_XCEIV is not set
CONFIG_MMC=y
# CONFIG_MMC_DEBUG is not set
-CONFIG_MMC_BLOCK=y
-CONFIG_MMC_AU1X=y
+# CONFIG_MMC_UNSAFE_RESUME is not set
#
-# LED devices
+# MMC/SD/SDIO Card Drivers
#
-# CONFIG_NEW_LEDS is not set
+CONFIG_MMC_BLOCK=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
#
-# LED drivers
+# MMC/SD/SDIO Host Controller Drivers
#
+# CONFIG_MMC_SDHCI is not set
+CONFIG_MMC_AU1X=y
+# CONFIG_MMC_AT91 is not set
+# CONFIG_MMC_ATMELMCI is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
#
-# LED Triggers
+# LED drivers
#
+# CONFIG_LEDS_PCA9532 is not set
+# CONFIG_LEDS_GPIO is not set
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2802 is not set
#
-# InfiniBand support
+# LED Triggers
#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_IDE_DISK is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
#
-# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
+# iptables trigger is under Netfilter config (LED target)
#
+# CONFIG_ACCESSIBILITY is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
#
-# Real Time Clock
+# RTC interfaces
#
-# CONFIG_RTC_CLASS is not set
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
#
-# DMA Engine support
+# I2C RTC drivers
#
-# CONFIG_DMA_ENGINE is not set
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+# CONFIG_RTC_DRV_FM3130 is not set
+# CONFIG_RTC_DRV_RX8581 is not set
+# CONFIG_RTC_DRV_RX8025 is not set
#
-# DMA Clients
+# SPI RTC drivers
#
+# CONFIG_RTC_DRV_M41T94 is not set
+# CONFIG_RTC_DRV_DS1305 is not set
+# CONFIG_RTC_DRV_DS1390 is not set
+# CONFIG_RTC_DRV_MAX6902 is not set
+# CONFIG_RTC_DRV_R9701 is not set
+# CONFIG_RTC_DRV_RS5C348 is not set
+# CONFIG_RTC_DRV_DS3234 is not set
+# CONFIG_RTC_DRV_PCF2123 is not set
#
-# DMA Devices
+# Platform RTC drivers
#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_V3020 is not set
#
-# Auxiliary Display support
+# on-CPU RTC drivers
#
+CONFIG_RTC_DRV_AU1XXX=y
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_UIO is not set
#
-# Virtualization
+# TI VLYNQ
#
+# CONFIG_STAGING is not set
#
# File systems
#
CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-# CONFIG_EXT2_FS_SECURITY is not set
+# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_XATTR=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-# CONFIG_EXT4DEV_FS is not set
-CONFIG_JBD=y
-# CONFIG_JBD_DEBUG is not set
-CONFIG_FS_MBCACHE=y
+# CONFIG_EXT3_FS is not set
+# CONFIG_EXT4_FS is not set
# CONFIG_REISERFS_FS is not set
-CONFIG_JFS_FS=y
-# CONFIG_JFS_POSIX_ACL is not set
-# CONFIG_JFS_SECURITY is not set
-# CONFIG_JFS_DEBUG is not set
-# CONFIG_JFS_STATISTICS is not set
-CONFIG_FS_POSIX_ACL=y
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
# CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-# CONFIG_ROMFS_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
# CONFIG_QUOTA is not set
-CONFIG_DNOTIFY=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
-CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
#
# CD-ROM/DVD Filesystems
#
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_ZISOFS=y
-CONFIG_UDF_FS=m
-CONFIG_UDF_NLS=y
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
#
# DOS/FAT/NT Filesystems
#
-CONFIG_FAT_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_FAT_DEFAULT_CODEPAGE=437
-CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
# CONFIG_NTFS_FS is not set
#
@@ -1038,19 +1287,15 @@ CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_PROC_SYSCTL=y
+# CONFIG_PROC_PAGE_MONITOR is not set
CONFIG_SYSFS=y
CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_TMPFS_POSIX_ACL is not set
# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
-CONFIG_CONFIGFS_FS=m
-
-#
-# Miscellaneous filesystems
-#
+# CONFIG_CONFIGFS_FS is not set
+CONFIG_MISC_FILESYSTEMS=y
# CONFIG_ADFS_FS is not set
# CONFIG_AFFS_FS is not set
-# CONFIG_ECRYPT_FS is not set
# CONFIG_HFS_FS is not set
# CONFIG_HFSPLUS_FS is not set
# CONFIG_BEFS_FS is not set
@@ -1059,27 +1304,36 @@ CONFIG_CONFIGFS_FS=m
CONFIG_JFFS2_FS=y
CONFIG_JFFS2_FS_DEBUG=0
CONFIG_JFFS2_FS_WRITEBUFFER=y
-# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+CONFIG_JFFS2_SUMMARY=y
# CONFIG_JFFS2_FS_XATTR is not set
-# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
CONFIG_JFFS2_ZLIB=y
+CONFIG_JFFS2_LZO=y
CONFIG_JFFS2_RTIME=y
-# CONFIG_JFFS2_RUBIN is not set
-CONFIG_CRAMFS=m
+CONFIG_JFFS2_RUBIN=y
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_CRAMFS is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
+CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
# CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
+CONFIG_ROOT_NFS=y
# CONFIG_NFSD is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
@@ -1087,161 +1341,126 @@ CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=y
# CONFIG_RPCSEC_GSS_KRB5 is not set
# CONFIG_RPCSEC_GSS_SPKM3 is not set
-CONFIG_SMB_FS=y
-# CONFIG_SMB_NLS_DEFAULT is not set
+# CONFIG_SMB_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
-# CONFIG_9P_FS is not set
#
# Partition Types
#
-# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
CONFIG_NLS=y
CONFIG_NLS_DEFAULT="iso8859-1"
-CONFIG_NLS_CODEPAGE_437=m
-CONFIG_NLS_CODEPAGE_737=m
-CONFIG_NLS_CODEPAGE_775=m
-CONFIG_NLS_CODEPAGE_850=m
-CONFIG_NLS_CODEPAGE_852=m
-CONFIG_NLS_CODEPAGE_855=m
-CONFIG_NLS_CODEPAGE_857=m
-CONFIG_NLS_CODEPAGE_860=m
-CONFIG_NLS_CODEPAGE_861=m
-CONFIG_NLS_CODEPAGE_862=m
-CONFIG_NLS_CODEPAGE_863=m
-CONFIG_NLS_CODEPAGE_864=m
-CONFIG_NLS_CODEPAGE_865=m
-CONFIG_NLS_CODEPAGE_866=m
-CONFIG_NLS_CODEPAGE_869=m
-CONFIG_NLS_CODEPAGE_936=m
-CONFIG_NLS_CODEPAGE_950=m
-CONFIG_NLS_CODEPAGE_932=m
-CONFIG_NLS_CODEPAGE_949=m
-CONFIG_NLS_CODEPAGE_874=m
-CONFIG_NLS_ISO8859_8=m
-CONFIG_NLS_CODEPAGE_1250=m
-CONFIG_NLS_CODEPAGE_1251=m
-CONFIG_NLS_ASCII=m
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_NLS_KOI8_R=m
-CONFIG_NLS_KOI8_U=m
-CONFIG_NLS_UTF8=m
-
-#
-# Distributed Lock Manager
-#
-CONFIG_DLM=m
-CONFIG_DLM_TCP=y
-# CONFIG_DLM_SCTP is not set
-# CONFIG_DLM_DEBUG is not set
-
-#
-# Profiling support
-#
-# CONFIG_PROFILING is not set
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+CONFIG_NLS_CODEPAGE_1250=y
+# CONFIG_NLS_CODEPAGE_1251 is not set
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+CONFIG_NLS_ISO8859_15=y
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
#
# Kernel hacking
#
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
# CONFIG_PRINTK_TIME is not set
-CONFIG_ENABLE_MUST_CHECK=y
-# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_CROSSCOMPILE=y
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="mem=48M"
+CONFIG_CMDLINE="console=ttyS0,115200"
# CONFIG_CMDLINE_OVERRIDE is not set
#
# Security options
#
-CONFIG_KEYS=y
-CONFIG_KEYS_DEBUG_PROC_KEYS=y
+# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
-
-#
-# Cryptographic options
-#
-CONFIG_CRYPTO=y
-CONFIG_CRYPTO_ALGAPI=y
-CONFIG_CRYPTO_BLKCIPHER=m
-CONFIG_CRYPTO_HASH=m
-CONFIG_CRYPTO_MANAGER=m
-CONFIG_CRYPTO_HMAC=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_SHA1=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_DES=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_AES=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_DEFLATE=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_CRC32C=m
-CONFIG_CRYPTO_CAMELLIA=m
-# CONFIG_CRYPTO_TEST is not set
-
-#
-# Hardware crypto devices
-#
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_FILE_CAPABILITIES=y
+# CONFIG_CRYPTO is not set
+# CONFIG_BINARY_PRINTF is not set
#
# Library routines
#
CONFIG_BITREVERSE=y
-CONFIG_CRC_CCITT=y
+CONFIG_GENERIC_FIND_LAST_BIT=y
+# CONFIG_CRC_CCITT is not set
# CONFIG_CRC16 is not set
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC_ITU_T is not set
CONFIG_CRC32=y
-CONFIG_LIBCRC32C=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=y
-CONFIG_TEXTSEARCH=y
-CONFIG_TEXTSEARCH_KMP=m
-CONFIG_TEXTSEARCH_BM=m
-CONFIG_TEXTSEARCH_FSM=m
-CONFIG_PLIST=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_NLATTR=y
diff --git a/arch/mips/include/asm/kgdb.h b/arch/mips/include/asm/kgdb.h
index 48223b09396c..19002d605ac4 100644
--- a/arch/mips/include/asm/kgdb.h
+++ b/arch/mips/include/asm/kgdb.h
@@ -38,6 +38,8 @@ extern int kgdb_early_setup;
extern void *saved_vectors[32];
extern void handle_exception(struct pt_regs *regs);
extern void breakinst(void);
+extern int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig);
#endif /* __KERNEL__ */
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 361f4f16c30c..bdcdef02d147 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -193,29 +193,4 @@ static __inline__ long local_sub_return(long i, local_t * l)
#define __local_add(i, l) ((l)->a.counter+=(i))
#define __local_sub(i, l) ((l)->a.counter-=(i))
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ARCH_MIPS_LOCAL_H */
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index 854e95f1b07c..088c8e0f43bb 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -130,6 +130,56 @@ static inline int au1xxx_cpu_needs_config_od(void)
return 0;
}
+#define ALCHEMY_CPU_UNKNOWN -1
+#define ALCHEMY_CPU_AU1000 0
+#define ALCHEMY_CPU_AU1500 1
+#define ALCHEMY_CPU_AU1100 2
+#define ALCHEMY_CPU_AU1550 3
+#define ALCHEMY_CPU_AU1200 4
+
+static inline int alchemy_get_cputype(void)
+{
+ switch (read_c0_prid() & 0xffff0000) {
+ case 0x00030000:
+ return ALCHEMY_CPU_AU1000;
+ break;
+ case 0x01030000:
+ return ALCHEMY_CPU_AU1500;
+ break;
+ case 0x02030000:
+ return ALCHEMY_CPU_AU1100;
+ break;
+ case 0x03030000:
+ return ALCHEMY_CPU_AU1550;
+ break;
+ case 0x04030000:
+ case 0x05030000:
+ return ALCHEMY_CPU_AU1200;
+ break;
+ }
+
+ return ALCHEMY_CPU_UNKNOWN;
+}
+
+static inline void alchemy_uart_putchar(u32 uart_phys, u8 c)
+{
+ void __iomem *base = (void __iomem *)KSEG1ADDR(uart_phys);
+ int timeout, i;
+
+ /* check LSR TX_EMPTY bit */
+ timeout = 0xffffff;
+ do {
+ if (__raw_readl(base + 0x1c) & 0x20)
+ break;
+ /* slow down */
+ for (i = 10000; i; i--)
+ asm volatile ("nop");
+ } while (--timeout);
+
+ __raw_writel(c, base + 0x04); /* tx */
+ wmb();
+}
+
/* arch/mips/au1000/common/clocks.c */
extern void set_au1x00_speed(unsigned int new_freq);
extern unsigned int get_au1x00_speed(void);
@@ -143,20 +193,332 @@ void au_sleep(void);
void save_au1xxx_intctl(void);
void restore_au1xxx_intctl(void);
-/*
- * Every board describes its IRQ mapping with this table.
- */
-struct au1xxx_irqmap {
- int im_irq;
- int im_type;
- int im_request;
+
+/* SOC Interrupt numbers */
+
+#define AU1000_INTC0_INT_BASE (MIPS_CPU_IRQ_BASE + 8)
+#define AU1000_INTC0_INT_LAST (AU1000_INTC0_INT_BASE + 31)
+#define AU1000_INTC1_INT_BASE (AU1000_INTC0_INT_LAST + 1)
+#define AU1000_INTC1_INT_LAST (AU1000_INTC1_INT_BASE + 31)
+#define AU1000_MAX_INTR AU1000_INTC1_INT_LAST
+
+enum soc_au1000_ints {
+ AU1000_FIRST_INT = AU1000_INTC0_INT_BASE,
+ AU1000_UART0_INT = AU1000_FIRST_INT,
+ AU1000_UART1_INT,
+ AU1000_UART2_INT,
+ AU1000_UART3_INT,
+ AU1000_SSI0_INT,
+ AU1000_SSI1_INT,
+ AU1000_DMA_INT_BASE,
+
+ AU1000_TOY_INT = AU1000_FIRST_INT + 14,
+ AU1000_TOY_MATCH0_INT,
+ AU1000_TOY_MATCH1_INT,
+ AU1000_TOY_MATCH2_INT,
+ AU1000_RTC_INT,
+ AU1000_RTC_MATCH0_INT,
+ AU1000_RTC_MATCH1_INT,
+ AU1000_RTC_MATCH2_INT,
+ AU1000_IRDA_TX_INT,
+ AU1000_IRDA_RX_INT,
+ AU1000_USB_DEV_REQ_INT,
+ AU1000_USB_DEV_SUS_INT,
+ AU1000_USB_HOST_INT,
+ AU1000_ACSYNC_INT,
+ AU1000_MAC0_DMA_INT,
+ AU1000_MAC1_DMA_INT,
+ AU1000_I2S_UO_INT,
+ AU1000_AC97C_INT,
+ AU1000_GPIO0_INT,
+ AU1000_GPIO1_INT,
+ AU1000_GPIO2_INT,
+ AU1000_GPIO3_INT,
+ AU1000_GPIO4_INT,
+ AU1000_GPIO5_INT,
+ AU1000_GPIO6_INT,
+ AU1000_GPIO7_INT,
+ AU1000_GPIO8_INT,
+ AU1000_GPIO9_INT,
+ AU1000_GPIO10_INT,
+ AU1000_GPIO11_INT,
+ AU1000_GPIO12_INT,
+ AU1000_GPIO13_INT,
+ AU1000_GPIO14_INT,
+ AU1000_GPIO15_INT,
+ AU1000_GPIO16_INT,
+ AU1000_GPIO17_INT,
+ AU1000_GPIO18_INT,
+ AU1000_GPIO19_INT,
+ AU1000_GPIO20_INT,
+ AU1000_GPIO21_INT,
+ AU1000_GPIO22_INT,
+ AU1000_GPIO23_INT,
+ AU1000_GPIO24_INT,
+ AU1000_GPIO25_INT,
+ AU1000_GPIO26_INT,
+ AU1000_GPIO27_INT,
+ AU1000_GPIO28_INT,
+ AU1000_GPIO29_INT,
+ AU1000_GPIO30_INT,
+ AU1000_GPIO31_INT,
};
-/* core calls this function to let boards initialize other IRQ sources */
-void board_init_irq(void);
+enum soc_au1100_ints {
+ AU1100_FIRST_INT = AU1000_INTC0_INT_BASE,
+ AU1100_UART0_INT = AU1100_FIRST_INT,
+ AU1100_UART1_INT,
+ AU1100_SD_INT,
+ AU1100_UART3_INT,
+ AU1100_SSI0_INT,
+ AU1100_SSI1_INT,
+ AU1100_DMA_INT_BASE,
+
+ AU1100_TOY_INT = AU1100_FIRST_INT + 14,
+ AU1100_TOY_MATCH0_INT,
+ AU1100_TOY_MATCH1_INT,
+ AU1100_TOY_MATCH2_INT,
+ AU1100_RTC_INT,
+ AU1100_RTC_MATCH0_INT,
+ AU1100_RTC_MATCH1_INT,
+ AU1100_RTC_MATCH2_INT,
+ AU1100_IRDA_TX_INT,
+ AU1100_IRDA_RX_INT,
+ AU1100_USB_DEV_REQ_INT,
+ AU1100_USB_DEV_SUS_INT,
+ AU1100_USB_HOST_INT,
+ AU1100_ACSYNC_INT,
+ AU1100_MAC0_DMA_INT,
+ AU1100_GPIO208_215_INT,
+ AU1100_LCD_INT,
+ AU1100_AC97C_INT,
+ AU1100_GPIO0_INT,
+ AU1100_GPIO1_INT,
+ AU1100_GPIO2_INT,
+ AU1100_GPIO3_INT,
+ AU1100_GPIO4_INT,
+ AU1100_GPIO5_INT,
+ AU1100_GPIO6_INT,
+ AU1100_GPIO7_INT,
+ AU1100_GPIO8_INT,
+ AU1100_GPIO9_INT,
+ AU1100_GPIO10_INT,
+ AU1100_GPIO11_INT,
+ AU1100_GPIO12_INT,
+ AU1100_GPIO13_INT,
+ AU1100_GPIO14_INT,
+ AU1100_GPIO15_INT,
+ AU1100_GPIO16_INT,
+ AU1100_GPIO17_INT,
+ AU1100_GPIO18_INT,
+ AU1100_GPIO19_INT,
+ AU1100_GPIO20_INT,
+ AU1100_GPIO21_INT,
+ AU1100_GPIO22_INT,
+ AU1100_GPIO23_INT,
+ AU1100_GPIO24_INT,
+ AU1100_GPIO25_INT,
+ AU1100_GPIO26_INT,
+ AU1100_GPIO27_INT,
+ AU1100_GPIO28_INT,
+ AU1100_GPIO29_INT,
+ AU1100_GPIO30_INT,
+ AU1100_GPIO31_INT,
+};
-/* boards call this to register additional (GPIO) interrupts */
-void au1xxx_setup_irqmap(struct au1xxx_irqmap *map, int count);
+enum soc_au1500_ints {
+ AU1500_FIRST_INT = AU1000_INTC0_INT_BASE,
+ AU1500_UART0_INT = AU1500_FIRST_INT,
+ AU1500_PCI_INTA,
+ AU1500_PCI_INTB,
+ AU1500_UART3_INT,
+ AU1500_PCI_INTC,
+ AU1500_PCI_INTD,
+ AU1500_DMA_INT_BASE,
+
+ AU1500_TOY_INT = AU1500_FIRST_INT + 14,
+ AU1500_TOY_MATCH0_INT,
+ AU1500_TOY_MATCH1_INT,
+ AU1500_TOY_MATCH2_INT,
+ AU1500_RTC_INT,
+ AU1500_RTC_MATCH0_INT,
+ AU1500_RTC_MATCH1_INT,
+ AU1500_RTC_MATCH2_INT,
+ AU1500_PCI_ERR_INT,
+ AU1500_RESERVED_INT,
+ AU1500_USB_DEV_REQ_INT,
+ AU1500_USB_DEV_SUS_INT,
+ AU1500_USB_HOST_INT,
+ AU1500_ACSYNC_INT,
+ AU1500_MAC0_DMA_INT,
+ AU1500_MAC1_DMA_INT,
+ AU1500_AC97C_INT = AU1500_FIRST_INT + 31,
+ AU1500_GPIO0_INT,
+ AU1500_GPIO1_INT,
+ AU1500_GPIO2_INT,
+ AU1500_GPIO3_INT,
+ AU1500_GPIO4_INT,
+ AU1500_GPIO5_INT,
+ AU1500_GPIO6_INT,
+ AU1500_GPIO7_INT,
+ AU1500_GPIO8_INT,
+ AU1500_GPIO9_INT,
+ AU1500_GPIO10_INT,
+ AU1500_GPIO11_INT,
+ AU1500_GPIO12_INT,
+ AU1500_GPIO13_INT,
+ AU1500_GPIO14_INT,
+ AU1500_GPIO15_INT,
+ AU1500_GPIO200_INT,
+ AU1500_GPIO201_INT,
+ AU1500_GPIO202_INT,
+ AU1500_GPIO203_INT,
+ AU1500_GPIO20_INT,
+ AU1500_GPIO204_INT,
+ AU1500_GPIO205_INT,
+ AU1500_GPIO23_INT,
+ AU1500_GPIO24_INT,
+ AU1500_GPIO25_INT,
+ AU1500_GPIO26_INT,
+ AU1500_GPIO27_INT,
+ AU1500_GPIO28_INT,
+ AU1500_GPIO206_INT,
+ AU1500_GPIO207_INT,
+ AU1500_GPIO208_215_INT,
+};
+
+enum soc_au1550_ints {
+ AU1550_FIRST_INT = AU1000_INTC0_INT_BASE,
+ AU1550_UART0_INT = AU1550_FIRST_INT,
+ AU1550_PCI_INTA,
+ AU1550_PCI_INTB,
+ AU1550_DDMA_INT,
+ AU1550_CRYPTO_INT,
+ AU1550_PCI_INTC,
+ AU1550_PCI_INTD,
+ AU1550_PCI_RST_INT,
+ AU1550_UART1_INT,
+ AU1550_UART3_INT,
+ AU1550_PSC0_INT,
+ AU1550_PSC1_INT,
+ AU1550_PSC2_INT,
+ AU1550_PSC3_INT,
+ AU1550_TOY_INT,
+ AU1550_TOY_MATCH0_INT,
+ AU1550_TOY_MATCH1_INT,
+ AU1550_TOY_MATCH2_INT,
+ AU1550_RTC_INT,
+ AU1550_RTC_MATCH0_INT,
+ AU1550_RTC_MATCH1_INT,
+ AU1550_RTC_MATCH2_INT,
+
+ AU1550_NAND_INT = AU1550_FIRST_INT + 23,
+ AU1550_USB_DEV_REQ_INT,
+ AU1550_USB_DEV_SUS_INT,
+ AU1550_USB_HOST_INT,
+ AU1550_MAC0_DMA_INT,
+ AU1550_MAC1_DMA_INT,
+ AU1550_GPIO0_INT = AU1550_FIRST_INT + 32,
+ AU1550_GPIO1_INT,
+ AU1550_GPIO2_INT,
+ AU1550_GPIO3_INT,
+ AU1550_GPIO4_INT,
+ AU1550_GPIO5_INT,
+ AU1550_GPIO6_INT,
+ AU1550_GPIO7_INT,
+ AU1550_GPIO8_INT,
+ AU1550_GPIO9_INT,
+ AU1550_GPIO10_INT,
+ AU1550_GPIO11_INT,
+ AU1550_GPIO12_INT,
+ AU1550_GPIO13_INT,
+ AU1550_GPIO14_INT,
+ AU1550_GPIO15_INT,
+ AU1550_GPIO200_INT,
+ AU1550_GPIO201_205_INT, /* Logical or of GPIO201:205 */
+ AU1550_GPIO16_INT,
+ AU1550_GPIO17_INT,
+ AU1550_GPIO20_INT,
+ AU1550_GPIO21_INT,
+ AU1550_GPIO22_INT,
+ AU1550_GPIO23_INT,
+ AU1550_GPIO24_INT,
+ AU1550_GPIO25_INT,
+ AU1550_GPIO26_INT,
+ AU1550_GPIO27_INT,
+ AU1550_GPIO28_INT,
+ AU1550_GPIO206_INT,
+ AU1550_GPIO207_INT,
+ AU1550_GPIO208_215_INT, /* Logical or of GPIO208:215 */
+};
+
+enum soc_au1200_ints {
+ AU1200_FIRST_INT = AU1000_INTC0_INT_BASE,
+ AU1200_UART0_INT = AU1200_FIRST_INT,
+ AU1200_SWT_INT,
+ AU1200_SD_INT,
+ AU1200_DDMA_INT,
+ AU1200_MAE_BE_INT,
+ AU1200_GPIO200_INT,
+ AU1200_GPIO201_INT,
+ AU1200_GPIO202_INT,
+ AU1200_UART1_INT,
+ AU1200_MAE_FE_INT,
+ AU1200_PSC0_INT,
+ AU1200_PSC1_INT,
+ AU1200_AES_INT,
+ AU1200_CAMERA_INT,
+ AU1200_TOY_INT,
+ AU1200_TOY_MATCH0_INT,
+ AU1200_TOY_MATCH1_INT,
+ AU1200_TOY_MATCH2_INT,
+ AU1200_RTC_INT,
+ AU1200_RTC_MATCH0_INT,
+ AU1200_RTC_MATCH1_INT,
+ AU1200_RTC_MATCH2_INT,
+ AU1200_GPIO203_INT,
+ AU1200_NAND_INT,
+ AU1200_GPIO204_INT,
+ AU1200_GPIO205_INT,
+ AU1200_GPIO206_INT,
+ AU1200_GPIO207_INT,
+ AU1200_GPIO208_215_INT, /* Logical OR of 208:215 */
+ AU1200_USB_INT,
+ AU1200_LCD_INT,
+ AU1200_MAE_BOTH_INT,
+ AU1200_GPIO0_INT,
+ AU1200_GPIO1_INT,
+ AU1200_GPIO2_INT,
+ AU1200_GPIO3_INT,
+ AU1200_GPIO4_INT,
+ AU1200_GPIO5_INT,
+ AU1200_GPIO6_INT,
+ AU1200_GPIO7_INT,
+ AU1200_GPIO8_INT,
+ AU1200_GPIO9_INT,
+ AU1200_GPIO10_INT,
+ AU1200_GPIO11_INT,
+ AU1200_GPIO12_INT,
+ AU1200_GPIO13_INT,
+ AU1200_GPIO14_INT,
+ AU1200_GPIO15_INT,
+ AU1200_GPIO16_INT,
+ AU1200_GPIO17_INT,
+ AU1200_GPIO18_INT,
+ AU1200_GPIO19_INT,
+ AU1200_GPIO20_INT,
+ AU1200_GPIO21_INT,
+ AU1200_GPIO22_INT,
+ AU1200_GPIO23_INT,
+ AU1200_GPIO24_INT,
+ AU1200_GPIO25_INT,
+ AU1200_GPIO26_INT,
+ AU1200_GPIO27_INT,
+ AU1200_GPIO28_INT,
+ AU1200_GPIO29_INT,
+ AU1200_GPIO30_INT,
+ AU1200_GPIO31_INT,
+};
#endif /* !defined (_LANGUAGE_ASSEMBLY) */
@@ -549,78 +911,16 @@ void au1xxx_setup_irqmap(struct au1xxx_irqmap *map, int count);
#define IC1_TESTBIT 0xB1800080
-/* Interrupt Numbers */
+
/* Au1000 */
#ifdef CONFIG_SOC_AU1000
-enum soc_au1000_ints {
- AU1000_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
- AU1000_UART0_INT = AU1000_FIRST_INT,
- AU1000_UART1_INT, /* au1000 */
- AU1000_UART2_INT, /* au1000 */
- AU1000_UART3_INT,
- AU1000_SSI0_INT, /* au1000 */
- AU1000_SSI1_INT, /* au1000 */
- AU1000_DMA_INT_BASE,
-
- AU1000_TOY_INT = AU1000_FIRST_INT + 14,
- AU1000_TOY_MATCH0_INT,
- AU1000_TOY_MATCH1_INT,
- AU1000_TOY_MATCH2_INT,
- AU1000_RTC_INT,
- AU1000_RTC_MATCH0_INT,
- AU1000_RTC_MATCH1_INT,
- AU1000_RTC_MATCH2_INT,
- AU1000_IRDA_TX_INT, /* au1000 */
- AU1000_IRDA_RX_INT, /* au1000 */
- AU1000_USB_DEV_REQ_INT,
- AU1000_USB_DEV_SUS_INT,
- AU1000_USB_HOST_INT,
- AU1000_ACSYNC_INT,
- AU1000_MAC0_DMA_INT,
- AU1000_MAC1_DMA_INT,
- AU1000_I2S_UO_INT, /* au1000 */
- AU1000_AC97C_INT,
- AU1000_GPIO_0,
- AU1000_GPIO_1,
- AU1000_GPIO_2,
- AU1000_GPIO_3,
- AU1000_GPIO_4,
- AU1000_GPIO_5,
- AU1000_GPIO_6,
- AU1000_GPIO_7,
- AU1000_GPIO_8,
- AU1000_GPIO_9,
- AU1000_GPIO_10,
- AU1000_GPIO_11,
- AU1000_GPIO_12,
- AU1000_GPIO_13,
- AU1000_GPIO_14,
- AU1000_GPIO_15,
- AU1000_GPIO_16,
- AU1000_GPIO_17,
- AU1000_GPIO_18,
- AU1000_GPIO_19,
- AU1000_GPIO_20,
- AU1000_GPIO_21,
- AU1000_GPIO_22,
- AU1000_GPIO_23,
- AU1000_GPIO_24,
- AU1000_GPIO_25,
- AU1000_GPIO_26,
- AU1000_GPIO_27,
- AU1000_GPIO_28,
- AU1000_GPIO_29,
- AU1000_GPIO_30,
- AU1000_GPIO_31,
-};
#define UART0_ADDR 0xB1100000
-#define UART1_ADDR 0xB1200000
-#define UART2_ADDR 0xB1300000
#define UART3_ADDR 0xB1400000
#define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */
#define USB_HOST_CONFIG 0xB017FFFC
+#define FOR_PLATFORM_C_USB_HOST_INT AU1000_USB_HOST_INT
#define AU1000_ETH0_BASE 0xB0500000
#define AU1000_ETH1_BASE 0xB0510000
@@ -631,78 +931,13 @@ enum soc_au1000_ints {
/* Au1500 */
#ifdef CONFIG_SOC_AU1500
-enum soc_au1500_ints {
- AU1500_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
- AU1500_UART0_INT = AU1500_FIRST_INT,
- AU1000_PCI_INTA, /* au1500 */
- AU1000_PCI_INTB, /* au1500 */
- AU1500_UART3_INT,
- AU1000_PCI_INTC, /* au1500 */
- AU1000_PCI_INTD, /* au1500 */
- AU1000_DMA_INT_BASE,
-
- AU1000_TOY_INT = AU1500_FIRST_INT + 14,
- AU1000_TOY_MATCH0_INT,
- AU1000_TOY_MATCH1_INT,
- AU1000_TOY_MATCH2_INT,
- AU1000_RTC_INT,
- AU1000_RTC_MATCH0_INT,
- AU1000_RTC_MATCH1_INT,
- AU1000_RTC_MATCH2_INT,
- AU1500_PCI_ERR_INT,
- AU1500_RESERVED_INT,
- AU1000_USB_DEV_REQ_INT,
- AU1000_USB_DEV_SUS_INT,
- AU1000_USB_HOST_INT,
- AU1000_ACSYNC_INT,
- AU1500_MAC0_DMA_INT,
- AU1500_MAC1_DMA_INT,
- AU1000_AC97C_INT = AU1500_FIRST_INT + 31,
- AU1000_GPIO_0,
- AU1000_GPIO_1,
- AU1000_GPIO_2,
- AU1000_GPIO_3,
- AU1000_GPIO_4,
- AU1000_GPIO_5,
- AU1000_GPIO_6,
- AU1000_GPIO_7,
- AU1000_GPIO_8,
- AU1000_GPIO_9,
- AU1000_GPIO_10,
- AU1000_GPIO_11,
- AU1000_GPIO_12,
- AU1000_GPIO_13,
- AU1000_GPIO_14,
- AU1000_GPIO_15,
- AU1500_GPIO_200,
- AU1500_GPIO_201,
- AU1500_GPIO_202,
- AU1500_GPIO_203,
- AU1500_GPIO_20,
- AU1500_GPIO_204,
- AU1500_GPIO_205,
- AU1500_GPIO_23,
- AU1500_GPIO_24,
- AU1500_GPIO_25,
- AU1500_GPIO_26,
- AU1500_GPIO_27,
- AU1500_GPIO_28,
- AU1500_GPIO_206,
- AU1500_GPIO_207,
- AU1500_GPIO_208_215,
-};
-
-/* shortcuts */
-#define INTA AU1000_PCI_INTA
-#define INTB AU1000_PCI_INTB
-#define INTC AU1000_PCI_INTC
-#define INTD AU1000_PCI_INTD
#define UART0_ADDR 0xB1100000
#define UART3_ADDR 0xB1400000
#define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */
#define USB_HOST_CONFIG 0xB017fffc
+#define FOR_PLATFORM_C_USB_HOST_INT AU1500_USB_HOST_INT
#define AU1500_ETH0_BASE 0xB1500000
#define AU1500_ETH1_BASE 0xB1510000
@@ -713,74 +948,13 @@ enum soc_au1500_ints {
/* Au1100 */
#ifdef CONFIG_SOC_AU1100
-enum soc_au1100_ints {
- AU1100_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
- AU1100_UART0_INT = AU1100_FIRST_INT,
- AU1100_UART1_INT,
- AU1100_SD_INT,
- AU1100_UART3_INT,
- AU1000_SSI0_INT,
- AU1000_SSI1_INT,
- AU1000_DMA_INT_BASE,
-
- AU1000_TOY_INT = AU1100_FIRST_INT + 14,
- AU1000_TOY_MATCH0_INT,
- AU1000_TOY_MATCH1_INT,
- AU1000_TOY_MATCH2_INT,
- AU1000_RTC_INT,
- AU1000_RTC_MATCH0_INT,
- AU1000_RTC_MATCH1_INT,
- AU1000_RTC_MATCH2_INT,
- AU1000_IRDA_TX_INT,
- AU1000_IRDA_RX_INT,
- AU1000_USB_DEV_REQ_INT,
- AU1000_USB_DEV_SUS_INT,
- AU1000_USB_HOST_INT,
- AU1000_ACSYNC_INT,
- AU1100_MAC0_DMA_INT,
- AU1100_GPIO_208_215,
- AU1100_LCD_INT,
- AU1000_AC97C_INT,
- AU1000_GPIO_0,
- AU1000_GPIO_1,
- AU1000_GPIO_2,
- AU1000_GPIO_3,
- AU1000_GPIO_4,
- AU1000_GPIO_5,
- AU1000_GPIO_6,
- AU1000_GPIO_7,
- AU1000_GPIO_8,
- AU1000_GPIO_9,
- AU1000_GPIO_10,
- AU1000_GPIO_11,
- AU1000_GPIO_12,
- AU1000_GPIO_13,
- AU1000_GPIO_14,
- AU1000_GPIO_15,
- AU1000_GPIO_16,
- AU1000_GPIO_17,
- AU1000_GPIO_18,
- AU1000_GPIO_19,
- AU1000_GPIO_20,
- AU1000_GPIO_21,
- AU1000_GPIO_22,
- AU1000_GPIO_23,
- AU1000_GPIO_24,
- AU1000_GPIO_25,
- AU1000_GPIO_26,
- AU1000_GPIO_27,
- AU1000_GPIO_28,
- AU1000_GPIO_29,
- AU1000_GPIO_30,
- AU1000_GPIO_31,
-};
#define UART0_ADDR 0xB1100000
-#define UART1_ADDR 0xB1200000
#define UART3_ADDR 0xB1400000
#define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */
#define USB_HOST_CONFIG 0xB017FFFC
+#define FOR_PLATFORM_C_USB_HOST_INT AU1100_USB_HOST_INT
#define AU1100_ETH0_BASE 0xB0500000
#define AU1100_MAC0_ENABLE 0xB0520000
@@ -788,87 +962,12 @@ enum soc_au1100_ints {
#endif /* CONFIG_SOC_AU1100 */
#ifdef CONFIG_SOC_AU1550
-enum soc_au1550_ints {
- AU1550_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
- AU1550_UART0_INT = AU1550_FIRST_INT,
- AU1550_PCI_INTA,
- AU1550_PCI_INTB,
- AU1550_DDMA_INT,
- AU1550_CRYPTO_INT,
- AU1550_PCI_INTC,
- AU1550_PCI_INTD,
- AU1550_PCI_RST_INT,
- AU1550_UART1_INT,
- AU1550_UART3_INT,
- AU1550_PSC0_INT,
- AU1550_PSC1_INT,
- AU1550_PSC2_INT,
- AU1550_PSC3_INT,
- AU1000_TOY_INT,
- AU1000_TOY_MATCH0_INT,
- AU1000_TOY_MATCH1_INT,
- AU1000_TOY_MATCH2_INT,
- AU1000_RTC_INT,
- AU1000_RTC_MATCH0_INT,
- AU1000_RTC_MATCH1_INT,
- AU1000_RTC_MATCH2_INT,
-
- AU1550_NAND_INT = AU1550_FIRST_INT + 23,
- AU1550_USB_DEV_REQ_INT,
- AU1000_USB_DEV_REQ_INT = AU1550_USB_DEV_REQ_INT,
- AU1550_USB_DEV_SUS_INT,
- AU1000_USB_DEV_SUS_INT = AU1550_USB_DEV_SUS_INT,
- AU1550_USB_HOST_INT,
- AU1000_USB_HOST_INT = AU1550_USB_HOST_INT,
- AU1550_MAC0_DMA_INT,
- AU1550_MAC1_DMA_INT,
- AU1000_GPIO_0 = AU1550_FIRST_INT + 32,
- AU1000_GPIO_1,
- AU1000_GPIO_2,
- AU1000_GPIO_3,
- AU1000_GPIO_4,
- AU1000_GPIO_5,
- AU1000_GPIO_6,
- AU1000_GPIO_7,
- AU1000_GPIO_8,
- AU1000_GPIO_9,
- AU1000_GPIO_10,
- AU1000_GPIO_11,
- AU1000_GPIO_12,
- AU1000_GPIO_13,
- AU1000_GPIO_14,
- AU1000_GPIO_15,
- AU1550_GPIO_200,
- AU1500_GPIO_201_205, /* Logical or of GPIO201:205 */
- AU1500_GPIO_16,
- AU1500_GPIO_17,
- AU1500_GPIO_20,
- AU1500_GPIO_21,
- AU1500_GPIO_22,
- AU1500_GPIO_23,
- AU1500_GPIO_24,
- AU1500_GPIO_25,
- AU1500_GPIO_26,
- AU1500_GPIO_27,
- AU1500_GPIO_28,
- AU1500_GPIO_206,
- AU1500_GPIO_207,
- AU1500_GPIO_208_218, /* Logical or of GPIO208:218 */
-};
-
-/* shortcuts */
-#define INTA AU1550_PCI_INTA
-#define INTB AU1550_PCI_INTB
-#define INTC AU1550_PCI_INTC
-#define INTD AU1550_PCI_INTD
-
#define UART0_ADDR 0xB1100000
-#define UART1_ADDR 0xB1200000
-#define UART3_ADDR 0xB1400000
#define USB_OHCI_BASE 0x14020000 /* phys addr for ioremap */
#define USB_OHCI_LEN 0x00060000
#define USB_HOST_CONFIG 0xB4027ffc
+#define FOR_PLATFORM_C_USB_HOST_INT AU1550_USB_HOST_INT
#define AU1550_ETH0_BASE 0xB0500000
#define AU1550_ETH1_BASE 0xB0510000
@@ -877,78 +976,10 @@ enum soc_au1550_ints {
#define NUM_ETH_INTERFACES 2
#endif /* CONFIG_SOC_AU1550 */
+
#ifdef CONFIG_SOC_AU1200
-enum soc_au1200_ints {
- AU1200_FIRST_INT = MIPS_CPU_IRQ_BASE + 8,
- AU1200_UART0_INT = AU1200_FIRST_INT,
- AU1200_SWT_INT,
- AU1200_SD_INT,
- AU1200_DDMA_INT,
- AU1200_MAE_BE_INT,
- AU1200_GPIO_200,
- AU1200_GPIO_201,
- AU1200_GPIO_202,
- AU1200_UART1_INT,
- AU1200_MAE_FE_INT,
- AU1200_PSC0_INT,
- AU1200_PSC1_INT,
- AU1200_AES_INT,
- AU1200_CAMERA_INT,
- AU1000_TOY_INT,
- AU1000_TOY_MATCH0_INT,
- AU1000_TOY_MATCH1_INT,
- AU1000_TOY_MATCH2_INT,
- AU1000_RTC_INT,
- AU1000_RTC_MATCH0_INT,
- AU1000_RTC_MATCH1_INT,
- AU1000_RTC_MATCH2_INT,
- AU1200_GPIO_203,
- AU1200_NAND_INT,
- AU1200_GPIO_204,
- AU1200_GPIO_205,
- AU1200_GPIO_206,
- AU1200_GPIO_207,
- AU1200_GPIO_208_215, /* Logical OR of 208:215 */
- AU1200_USB_INT,
- AU1000_USB_HOST_INT = AU1200_USB_INT,
- AU1200_LCD_INT,
- AU1200_MAE_BOTH_INT,
- AU1000_GPIO_0,
- AU1000_GPIO_1,
- AU1000_GPIO_2,
- AU1000_GPIO_3,
- AU1000_GPIO_4,
- AU1000_GPIO_5,
- AU1000_GPIO_6,
- AU1000_GPIO_7,
- AU1000_GPIO_8,
- AU1000_GPIO_9,
- AU1000_GPIO_10,
- AU1000_GPIO_11,
- AU1000_GPIO_12,
- AU1000_GPIO_13,
- AU1000_GPIO_14,
- AU1000_GPIO_15,
- AU1000_GPIO_16,
- AU1000_GPIO_17,
- AU1000_GPIO_18,
- AU1000_GPIO_19,
- AU1000_GPIO_20,
- AU1000_GPIO_21,
- AU1000_GPIO_22,
- AU1000_GPIO_23,
- AU1000_GPIO_24,
- AU1000_GPIO_25,
- AU1000_GPIO_26,
- AU1000_GPIO_27,
- AU1000_GPIO_28,
- AU1000_GPIO_29,
- AU1000_GPIO_30,
- AU1000_GPIO_31,
-};
#define UART0_ADDR 0xB1100000
-#define UART1_ADDR 0xB1200000
#define USB_UOC_BASE 0x14020020
#define USB_UOC_LEN 0x20
@@ -974,15 +1005,9 @@ enum soc_au1200_ints {
#define USBMSRMCFG_RDCOMB 30
#define USBMSRMCFG_PFEN 31
-#endif /* CONFIG_SOC_AU1200 */
-
-#define AU1000_INTC0_INT_BASE (MIPS_CPU_IRQ_BASE + 8)
-#define AU1000_INTC0_INT_LAST (AU1000_INTC0_INT_BASE + 31)
-#define AU1000_INTC1_INT_BASE (AU1000_INTC0_INT_BASE + 32)
-#define AU1000_INTC1_INT_LAST (AU1000_INTC1_INT_BASE + 31)
+#define FOR_PLATFORM_C_USB_HOST_INT AU1200_USB_INT
-#define AU1000_MAX_INTR AU1000_INTC1_INT_LAST
-#define INTX 0xFF /* not valid */
+#endif /* CONFIG_SOC_AU1200 */
/* Programmable Counters 0 and 1 */
#define SYS_BASE 0xB1900000
@@ -1231,14 +1256,6 @@ enum soc_au1200_ints {
#define MAC_RX_BUFF3_STATUS 0x30
#define MAC_RX_BUFF3_ADDR 0x34
-/* UARTS 0-3 */
-#define UART_BASE UART0_ADDR
-#ifdef CONFIG_SOC_AU1200
-#define UART_DEBUG_BASE UART1_ADDR
-#else
-#define UART_DEBUG_BASE UART3_ADDR
-#endif
-
#define UART_RX 0 /* Receive buffer */
#define UART_TX 4 /* Transmit buffer */
#define UART_IER 8 /* Interrupt Enable Register */
@@ -1251,84 +1268,6 @@ enum soc_au1200_ints {
#define UART_CLK 0x28 /* Baud Rate Clock Divider */
#define UART_MOD_CNTRL 0x100 /* Module Control */
-#define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */
-#define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */
-#define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */
-#define UART_FCR_DMA_SELECT 0x08 /* For DMA applications */
-#define UART_FCR_TRIGGER_MASK 0xF0 /* Mask for the FIFO trigger range */
-#define UART_FCR_R_TRIGGER_1 0x00 /* Mask for receive trigger set at 1 */
-#define UART_FCR_R_TRIGGER_4 0x40 /* Mask for receive trigger set at 4 */
-#define UART_FCR_R_TRIGGER_8 0x80 /* Mask for receive trigger set at 8 */
-#define UART_FCR_R_TRIGGER_14 0xA0 /* Mask for receive trigger set at 14 */
-#define UART_FCR_T_TRIGGER_0 0x00 /* Mask for transmit trigger set at 0 */
-#define UART_FCR_T_TRIGGER_4 0x10 /* Mask for transmit trigger set at 4 */
-#define UART_FCR_T_TRIGGER_8 0x20 /* Mask for transmit trigger set at 8 */
-#define UART_FCR_T_TRIGGER_12 0x30 /* Mask for transmit trigger set at 12 */
-
-/*
- * These are the definitions for the Line Control Register
- */
-#define UART_LCR_SBC 0x40 /* Set break control */
-#define UART_LCR_SPAR 0x20 /* Stick parity (?) */
-#define UART_LCR_EPAR 0x10 /* Even parity select */
-#define UART_LCR_PARITY 0x08 /* Parity Enable */
-#define UART_LCR_STOP 0x04 /* Stop bits: 0=1 stop bit, 1= 2 stop bits */
-#define UART_LCR_WLEN5 0x00 /* Wordlength: 5 bits */
-#define UART_LCR_WLEN6 0x01 /* Wordlength: 6 bits */
-#define UART_LCR_WLEN7 0x02 /* Wordlength: 7 bits */
-#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */
-
-/*
- * These are the definitions for the Line Status Register
- */
-#define UART_LSR_TEMT 0x40 /* Transmitter empty */
-#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */
-#define UART_LSR_BI 0x10 /* Break interrupt indicator */
-#define UART_LSR_FE 0x08 /* Frame error indicator */
-#define UART_LSR_PE 0x04 /* Parity error indicator */
-#define UART_LSR_OE 0x02 /* Overrun error indicator */
-#define UART_LSR_DR 0x01 /* Receiver data ready */
-
-/*
- * These are the definitions for the Interrupt Identification Register
- */
-#define UART_IIR_NO_INT 0x01 /* No interrupts pending */
-#define UART_IIR_ID 0x06 /* Mask for the interrupt ID */
-#define UART_IIR_MSI 0x00 /* Modem status interrupt */
-#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */
-#define UART_IIR_RDI 0x04 /* Receiver data interrupt */
-#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */
-
-/*
- * These are the definitions for the Interrupt Enable Register
- */
-#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */
-#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */
-#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */
-#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */
-
-/*
- * These are the definitions for the Modem Control Register
- */
-#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */
-#define UART_MCR_OUT2 0x08 /* Out2 complement */
-#define UART_MCR_OUT1 0x04 /* Out1 complement */
-#define UART_MCR_RTS 0x02 /* RTS complement */
-#define UART_MCR_DTR 0x01 /* DTR complement */
-
-/*
- * These are the definitions for the Modem Status Register
- */
-#define UART_MSR_DCD 0x80 /* Data Carrier Detect */
-#define UART_MSR_RI 0x40 /* Ring Indicator */
-#define UART_MSR_DSR 0x20 /* Data Set Ready */
-#define UART_MSR_CTS 0x10 /* Clear to Send */
-#define UART_MSR_DDCD 0x08 /* Delta DCD */
-#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */
-#define UART_MSR_DDSR 0x02 /* Delta DSR */
-#define UART_MSR_DCTS 0x01 /* Delta CTS */
-#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */
-
/* SSIO */
#define SSI0_STATUS 0xB1600000
# define SSI_STATUS_BF (1 << 4)
@@ -1739,53 +1678,18 @@ enum soc_au1200_ints {
#endif
-#ifndef _LANGUAGE_ASSEMBLY
-typedef volatile struct {
- /* 0x0000 */ u32 toytrim;
- /* 0x0004 */ u32 toywrite;
- /* 0x0008 */ u32 toymatch0;
- /* 0x000C */ u32 toymatch1;
- /* 0x0010 */ u32 toymatch2;
- /* 0x0014 */ u32 cntrctrl;
- /* 0x0018 */ u32 scratch0;
- /* 0x001C */ u32 scratch1;
- /* 0x0020 */ u32 freqctrl0;
- /* 0x0024 */ u32 freqctrl1;
- /* 0x0028 */ u32 clksrc;
- /* 0x002C */ u32 pinfunc;
- /* 0x0030 */ u32 reserved0;
- /* 0x0034 */ u32 wakemsk;
- /* 0x0038 */ u32 endian;
- /* 0x003C */ u32 powerctrl;
- /* 0x0040 */ u32 toyread;
- /* 0x0044 */ u32 rtctrim;
- /* 0x0048 */ u32 rtcwrite;
- /* 0x004C */ u32 rtcmatch0;
- /* 0x0050 */ u32 rtcmatch1;
- /* 0x0054 */ u32 rtcmatch2;
- /* 0x0058 */ u32 rtcread;
- /* 0x005C */ u32 wakesrc;
- /* 0x0060 */ u32 cpupll;
- /* 0x0064 */ u32 auxpll;
- /* 0x0068 */ u32 reserved1;
- /* 0x006C */ u32 reserved2;
- /* 0x0070 */ u32 reserved3;
- /* 0x0074 */ u32 reserved4;
- /* 0x0078 */ u32 slppwr;
- /* 0x007C */ u32 sleep;
- /* 0x0080 */ u32 reserved5[32];
- /* 0x0100 */ u32 trioutrd;
-#define trioutclr trioutrd
- /* 0x0104 */ u32 reserved6;
- /* 0x0108 */ u32 outputrd;
-#define outputset outputrd
- /* 0x010C */ u32 outputclr;
- /* 0x0110 */ u32 pinstaterd;
-#define pininputen pinstaterd
-} AU1X00_SYS;
-
-static AU1X00_SYS * const sys = (AU1X00_SYS *)SYS_BASE;
-
-#endif
+/*
+ * All Au1xx0 SOCs have a PCMCIA controller.
+ * We setup our 32-bit pseudo addresses to be equal to the
+ * 36-bit addr >> 4, to make it easier to check the address
+ * and fix it.
+ * The PCMCIA socket 0 physical attribute address is 0xF 4000 0000.
+ * The pseudo address we use is 0xF400 0000. Any address over
+ * 0xF400 0000 is a PCMCIA pseudo address.
+ */
+#define PCMCIA_IO_PSEUDO_PHYS (PCMCIA_IO_PHYS_ADDR >> 4)
+#define PCMCIA_ATTR_PSEUDO_PHYS (PCMCIA_ATTR_PHYS_ADDR >> 4)
+#define PCMCIA_MEM_PSEUDO_PHYS (PCMCIA_MEM_PHYS_ADDR >> 4)
+#define PCMCIA_PSEUDO_END (0xffffffff)
#endif
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index 06f68f43800a..c098b45a0360 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -338,8 +338,8 @@ u32 au1xxx_dbdma_set_devwidth(u32 chanid, int bits);
u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries);
/* Put buffers on source/destination descriptors. */
-u32 _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags);
-u32 _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags);
+u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags);
+u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags);
/* Get a buffer from the destination descriptor. */
u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes);
@@ -362,25 +362,6 @@ void au1xxx_dbdma_suspend(void);
void au1xxx_dbdma_resume(void);
#endif
-
-/*
- * Some compatibilty macros -- needed to make changes to API
- * without breaking existing drivers.
- */
-#define au1xxx_dbdma_put_source(chanid, buf, nbytes) \
- _au1xxx_dbdma_put_source(chanid, buf, nbytes, DDMA_FLAGS_IE)
-#define au1xxx_dbdma_put_source_flags(chanid, buf, nbytes, flags) \
- _au1xxx_dbdma_put_source(chanid, buf, nbytes, flags)
-#define put_source_flags(chanid, buf, nbytes, flags) \
- au1xxx_dbdma_put_source_flags(chanid, buf, nbytes, flags)
-
-#define au1xxx_dbdma_put_dest(chanid, buf, nbytes) \
- _au1xxx_dbdma_put_dest(chanid, buf, nbytes, DDMA_FLAGS_IE)
-#define au1xxx_dbdma_put_dest_flags(chanid, buf, nbytes, flags) \
- _au1xxx_dbdma_put_dest(chanid, buf, nbytes, flags)
-#define put_dest_flags(chanid, buf, nbytes, flags) \
- au1xxx_dbdma_put_dest_flags(chanid, buf, nbytes, flags)
-
/*
* Flags for the put_source/put_dest functions.
*/
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_eth.h b/arch/mips/include/asm/mach-au1x00/au1xxx_eth.h
new file mode 100644
index 000000000000..f30529e8c5c0
--- /dev/null
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_eth.h
@@ -0,0 +1,18 @@
+#ifndef __AU1X00_ETH_DATA_H
+#define __AU1X00_ETH_DATA_H
+
+/* Platform specific PHY configuration passed to the MAC driver */
+struct au1000_eth_platform_data {
+ int phy_static_config;
+ int phy_search_highest_addr;
+ int phy1_search_mac0;
+ int phy_addr;
+ int phy_busid;
+ int phy_irq;
+};
+
+void __init au1xxx_override_eth_cfg(unsigned port,
+ struct au1000_eth_platform_data *eth_data);
+
+#endif /* __AU1X00_ETH_DATA_H */
+
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
index 91595fa89034..62d2f136d941 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
@@ -35,15 +35,13 @@ static inline int au1000_gpio2_to_irq(int gpio)
return -ENXIO;
}
-#ifdef CONFIG_SOC_AU1000
static inline int au1000_irq_to_gpio(int irq)
{
- if ((irq >= AU1000_GPIO_0) && (irq <= AU1000_GPIO_31))
- return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0;
+ if ((irq >= AU1000_GPIO0_INT) && (irq <= AU1000_GPIO31_INT))
+ return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO0_INT) + 0;
return -ENXIO;
}
-#endif
static inline int au1500_gpio1_to_irq(int gpio)
{
@@ -71,27 +69,25 @@ static inline int au1500_gpio2_to_irq(int gpio)
return -ENXIO;
}
-#ifdef CONFIG_SOC_AU1500
static inline int au1500_irq_to_gpio(int irq)
{
switch (irq) {
- case AU1000_GPIO_0 ... AU1000_GPIO_15:
- case AU1500_GPIO_20:
- case AU1500_GPIO_23 ... AU1500_GPIO_28:
- return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0;
- case AU1500_GPIO_200 ... AU1500_GPIO_203:
- return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_200) + 0;
- case AU1500_GPIO_204 ... AU1500_GPIO_205:
- return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_204) + 4;
- case AU1500_GPIO_206 ... AU1500_GPIO_207:
- return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_206) + 6;
- case AU1500_GPIO_208_215:
+ case AU1500_GPIO0_INT ... AU1500_GPIO15_INT:
+ case AU1500_GPIO20_INT:
+ case AU1500_GPIO23_INT ... AU1500_GPIO28_INT:
+ return ALCHEMY_GPIO1_BASE + (irq - AU1500_GPIO0_INT) + 0;
+ case AU1500_GPIO200_INT ... AU1500_GPIO203_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO200_INT) + 0;
+ case AU1500_GPIO204_INT ... AU1500_GPIO205_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO204_INT) + 4;
+ case AU1500_GPIO206_INT ... AU1500_GPIO207_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO206_INT) + 6;
+ case AU1500_GPIO208_215_INT:
return ALCHEMY_GPIO2_BASE + 8;
}
return -ENXIO;
}
-#endif
static inline int au1100_gpio1_to_irq(int gpio)
{
@@ -108,19 +104,17 @@ static inline int au1100_gpio2_to_irq(int gpio)
return -ENXIO;
}
-#ifdef CONFIG_SOC_AU1100
static inline int au1100_irq_to_gpio(int irq)
{
switch (irq) {
- case AU1000_GPIO_0 ... AU1000_GPIO_31:
- return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0;
- case AU1100_GPIO_208_215:
+ case AU1100_GPIO0_INT ... AU1100_GPIO31_INT:
+ return ALCHEMY_GPIO1_BASE + (irq - AU1100_GPIO0_INT) + 0;
+ case AU1100_GPIO208_215_INT:
return ALCHEMY_GPIO2_BASE + 8;
}
return -ENXIO;
}
-#endif
static inline int au1550_gpio1_to_irq(int gpio)
{
@@ -149,24 +143,22 @@ static inline int au1550_gpio2_to_irq(int gpio)
return -ENXIO;
}
-#ifdef CONFIG_SOC_AU1550
static inline int au1550_irq_to_gpio(int irq)
{
switch (irq) {
- case AU1000_GPIO_0 ... AU1000_GPIO_15:
- return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0;
- case AU1550_GPIO_200:
- case AU1500_GPIO_201_205:
- return ALCHEMY_GPIO2_BASE + (irq - AU1550_GPIO_200) + 0;
- case AU1500_GPIO_16 ... AU1500_GPIO_28:
- return ALCHEMY_GPIO1_BASE + (irq - AU1500_GPIO_16) + 16;
- case AU1500_GPIO_206 ... AU1500_GPIO_208_218:
- return ALCHEMY_GPIO2_BASE + (irq - AU1500_GPIO_206) + 6;
+ case AU1550_GPIO0_INT ... AU1550_GPIO15_INT:
+ return ALCHEMY_GPIO1_BASE + (irq - AU1550_GPIO0_INT) + 0;
+ case AU1550_GPIO200_INT:
+ case AU1550_GPIO201_205_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1550_GPIO200_INT) + 0;
+ case AU1550_GPIO16_INT ... AU1550_GPIO28_INT:
+ return ALCHEMY_GPIO1_BASE + (irq - AU1550_GPIO16_INT) + 16;
+ case AU1550_GPIO206_INT ... AU1550_GPIO208_215_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1550_GPIO206_INT) + 6;
}
return -ENXIO;
}
-#endif
static inline int au1200_gpio1_to_irq(int gpio)
{
@@ -187,23 +179,21 @@ static inline int au1200_gpio2_to_irq(int gpio)
return -ENXIO;
}
-#ifdef CONFIG_SOC_AU1200
static inline int au1200_irq_to_gpio(int irq)
{
switch (irq) {
- case AU1000_GPIO_0 ... AU1000_GPIO_31:
- return ALCHEMY_GPIO1_BASE + (irq - AU1000_GPIO_0) + 0;
- case AU1200_GPIO_200 ... AU1200_GPIO_202:
- return ALCHEMY_GPIO2_BASE + (irq - AU1200_GPIO_200) + 0;
- case AU1200_GPIO_203:
+ case AU1200_GPIO0_INT ... AU1200_GPIO31_INT:
+ return ALCHEMY_GPIO1_BASE + (irq - AU1200_GPIO0_INT) + 0;
+ case AU1200_GPIO200_INT ... AU1200_GPIO202_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1200_GPIO200_INT) + 0;
+ case AU1200_GPIO203_INT:
return ALCHEMY_GPIO2_BASE + 3;
- case AU1200_GPIO_204 ... AU1200_GPIO_208_215:
- return ALCHEMY_GPIO2_BASE + (irq - AU1200_GPIO_204) + 4;
+ case AU1200_GPIO204_INT ... AU1200_GPIO208_215_INT:
+ return ALCHEMY_GPIO2_BASE + (irq - AU1200_GPIO204_INT) + 4;
}
return -ENXIO;
}
-#endif
/*
* GPIO1 block macros for common linux gpio functions.
@@ -246,19 +236,19 @@ static inline int alchemy_gpio1_is_valid(int gpio)
static inline int alchemy_gpio1_to_irq(int gpio)
{
-#if defined(CONFIG_SOC_AU1000)
- return au1000_gpio1_to_irq(gpio);
-#elif defined(CONFIG_SOC_AU1100)
- return au1100_gpio1_to_irq(gpio);
-#elif defined(CONFIG_SOC_AU1500)
- return au1500_gpio1_to_irq(gpio);
-#elif defined(CONFIG_SOC_AU1550)
- return au1550_gpio1_to_irq(gpio);
-#elif defined(CONFIG_SOC_AU1200)
- return au1200_gpio1_to_irq(gpio);
-#else
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ return au1000_gpio1_to_irq(gpio);
+ case ALCHEMY_CPU_AU1100:
+ return au1100_gpio1_to_irq(gpio);
+ case ALCHEMY_CPU_AU1500:
+ return au1500_gpio1_to_irq(gpio);
+ case ALCHEMY_CPU_AU1550:
+ return au1550_gpio1_to_irq(gpio);
+ case ALCHEMY_CPU_AU1200:
+ return au1200_gpio1_to_irq(gpio);
+ }
return -ENXIO;
-#endif
}
/*
@@ -316,19 +306,19 @@ static inline int alchemy_gpio2_is_valid(int gpio)
static inline int alchemy_gpio2_to_irq(int gpio)
{
-#if defined(CONFIG_SOC_AU1000)
- return au1000_gpio2_to_irq(gpio);
-#elif defined(CONFIG_SOC_AU1100)
- return au1100_gpio2_to_irq(gpio);
-#elif defined(CONFIG_SOC_AU1500)
- return au1500_gpio2_to_irq(gpio);
-#elif defined(CONFIG_SOC_AU1550)
- return au1550_gpio2_to_irq(gpio);
-#elif defined(CONFIG_SOC_AU1200)
- return au1200_gpio2_to_irq(gpio);
-#else
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ return au1000_gpio2_to_irq(gpio);
+ case ALCHEMY_CPU_AU1100:
+ return au1100_gpio2_to_irq(gpio);
+ case ALCHEMY_CPU_AU1500:
+ return au1500_gpio2_to_irq(gpio);
+ case ALCHEMY_CPU_AU1550:
+ return au1550_gpio2_to_irq(gpio);
+ case ALCHEMY_CPU_AU1200:
+ return au1200_gpio2_to_irq(gpio);
+ }
return -ENXIO;
-#endif
}
/**********************************************************************/
@@ -384,10 +374,13 @@ static inline void alchemy_gpio2_enable_int(int gpio2)
gpio2 -= ALCHEMY_GPIO2_BASE;
-#if defined(CONFIG_SOC_AU1100) || defined(CONFIG_SOC_AU1500)
/* Au1100/Au1500 have GPIO208-215 enable bits at 0..7 */
- gpio2 -= 8;
-#endif
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1100:
+ case ALCHEMY_CPU_AU1500:
+ gpio2 -= 8;
+ }
+
local_irq_save(flags);
__alchemy_gpio2_mod_int(gpio2, 1);
local_irq_restore(flags);
@@ -405,10 +398,13 @@ static inline void alchemy_gpio2_disable_int(int gpio2)
gpio2 -= ALCHEMY_GPIO2_BASE;
-#if defined(CONFIG_SOC_AU1100) || defined(CONFIG_SOC_AU1500)
/* Au1100/Au1500 have GPIO208-215 enable bits at 0..7 */
- gpio2 -= 8;
-#endif
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1100:
+ case ALCHEMY_CPU_AU1500:
+ gpio2 -= 8;
+ }
+
local_irq_save(flags);
__alchemy_gpio2_mod_int(gpio2, 0);
local_irq_restore(flags);
@@ -494,19 +490,19 @@ static inline int alchemy_gpio_to_irq(int gpio)
static inline int alchemy_irq_to_gpio(int irq)
{
-#if defined(CONFIG_SOC_AU1000)
- return au1000_irq_to_gpio(irq);
-#elif defined(CONFIG_SOC_AU1100)
- return au1100_irq_to_gpio(irq);
-#elif defined(CONFIG_SOC_AU1500)
- return au1500_irq_to_gpio(irq);
-#elif defined(CONFIG_SOC_AU1550)
- return au1550_irq_to_gpio(irq);
-#elif defined(CONFIG_SOC_AU1200)
- return au1200_irq_to_gpio(irq);
-#else
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1000:
+ return au1000_irq_to_gpio(irq);
+ case ALCHEMY_CPU_AU1100:
+ return au1100_irq_to_gpio(irq);
+ case ALCHEMY_CPU_AU1500:
+ return au1500_irq_to_gpio(irq);
+ case ALCHEMY_CPU_AU1550:
+ return au1550_irq_to_gpio(irq);
+ case ALCHEMY_CPU_AU1200:
+ return au1200_irq_to_gpio(irq);
+ }
return -ENXIO;
-#endif
}
/**********************************************************************/
diff --git a/arch/mips/include/asm/mach-au1x00/gpio.h b/arch/mips/include/asm/mach-au1x00/gpio.h
index f9b7d41c659a..c3f60cdc3203 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio.h
@@ -1,7 +1,7 @@
#ifndef _ALCHEMY_GPIO_H_
#define _ALCHEMY_GPIO_H_
-#if defined(CONFIG_ALCHEMY_GPIO_AU1000)
+#if defined(CONFIG_ALCHEMY_GPIOINT_AU1000)
#include <asm/mach-au1x00/gpio-au1000.h>
diff --git a/arch/mips/include/asm/mach-db1x00/bcsr.h b/arch/mips/include/asm/mach-db1x00/bcsr.h
new file mode 100644
index 000000000000..618d2de02ed3
--- /dev/null
+++ b/arch/mips/include/asm/mach-db1x00/bcsr.h
@@ -0,0 +1,238 @@
+/*
+ * bcsr.h -- Db1xxx/Pb1xxx Devboard CPLD registers ("BCSR") abstraction.
+ *
+ * All Alchemy development boards (except, of course, the weird PB1000)
+ * have a few registers in a CPLD with standardised layout; they mostly
+ * only differ in base address and bit meanings in the RESETS and BOARD
+ * registers.
+ *
+ * All data taken from the official AMD board documentation sheets.
+ */
+
+#ifndef _DB1XXX_BCSR_H_
+#define _DB1XXX_BCSR_H_
+
+
+/* BCSR base addresses on various boards. BCSR base 2 refers to the
+ * physical address of the first HEXLEDS register, which is usually
+ * a variable offset from the WHOAMI register.
+ */
+
+/* DB1000, DB1100, DB1500, PB1100, PB1500 */
+#define DB1000_BCSR_PHYS_ADDR 0x0E000000
+#define DB1000_BCSR_HEXLED_OFS 0x01000000
+
+#define DB1550_BCSR_PHYS_ADDR 0x0F000000
+#define DB1550_BCSR_HEXLED_OFS 0x00400000
+
+#define PB1550_BCSR_PHYS_ADDR 0x0F000000
+#define PB1550_BCSR_HEXLED_OFS 0x00800000
+
+#define DB1200_BCSR_PHYS_ADDR 0x19800000
+#define DB1200_BCSR_HEXLED_OFS 0x00400000
+
+#define PB1200_BCSR_PHYS_ADDR 0x0D800000
+#define PB1200_BCSR_HEXLED_OFS 0x00400000
+
+
+enum bcsr_id {
+ /* BCSR base 1 */
+ BCSR_WHOAMI = 0,
+ BCSR_STATUS,
+ BCSR_SWITCHES,
+ BCSR_RESETS,
+ BCSR_PCMCIA,
+ BCSR_BOARD,
+ BCSR_LEDS,
+ BCSR_SYSTEM,
+ /* Au1200/1300 based boards */
+ BCSR_INTCLR,
+ BCSR_INTSET,
+ BCSR_MASKCLR,
+ BCSR_MASKSET,
+ BCSR_SIGSTAT,
+ BCSR_INTSTAT,
+
+ /* BCSR base 2 */
+ BCSR_HEXLEDS,
+ BCSR_RSVD1,
+ BCSR_HEXCLEAR,
+
+ BCSR_CNT,
+};
+
+/* register offsets, valid for all Db1xxx/Pb1xxx boards */
+#define BCSR_REG_WHOAMI 0x00
+#define BCSR_REG_STATUS 0x04
+#define BCSR_REG_SWITCHES 0x08
+#define BCSR_REG_RESETS 0x0c
+#define BCSR_REG_PCMCIA 0x10
+#define BCSR_REG_BOARD 0x14
+#define BCSR_REG_LEDS 0x18
+#define BCSR_REG_SYSTEM 0x1c
+/* Au1200/Au1300 based boards: CPLD IRQ muxer */
+#define BCSR_REG_INTCLR 0x20
+#define BCSR_REG_INTSET 0x24
+#define BCSR_REG_MASKCLR 0x28
+#define BCSR_REG_MASKSET 0x2c
+#define BCSR_REG_SIGSTAT 0x30
+#define BCSR_REG_INTSTAT 0x34
+
+/* hexled control, offset from BCSR base 2 */
+#define BCSR_REG_HEXLEDS 0x00
+#define BCSR_REG_HEXCLEAR 0x08
+
+/*
+ * Register Bits and Pieces.
+ */
+#define BCSR_WHOAMI_DCID(x) ((x) & 0xf)
+#define BCSR_WHOAMI_CPLD(x) (((x) >> 4) & 0xf)
+#define BCSR_WHOAMI_BOARD(x) (((x) >> 8) & 0xf)
+
+/* register "WHOAMI" bits 11:8 identify the board */
+enum bcsr_whoami_boards {
+ BCSR_WHOAMI_PB1500 = 1,
+ BCSR_WHOAMI_PB1500R2,
+ BCSR_WHOAMI_PB1100,
+ BCSR_WHOAMI_DB1000,
+ BCSR_WHOAMI_DB1100,
+ BCSR_WHOAMI_DB1500,
+ BCSR_WHOAMI_DB1550,
+ BCSR_WHOAMI_PB1550_DDR,
+ BCSR_WHOAMI_PB1550 = BCSR_WHOAMI_PB1550_DDR,
+ BCSR_WHOAMI_PB1550_SDR,
+ BCSR_WHOAMI_PB1200_DDR1,
+ BCSR_WHOAMI_PB1200 = BCSR_WHOAMI_PB1200_DDR1,
+ BCSR_WHOAMI_PB1200_DDR2,
+ BCSR_WHOAMI_DB1200,
+};
+
+/* STATUS reg. Unless otherwise noted, they're valid on all boards.
+ * PB1200 = DB1200.
+ */
+#define BCSR_STATUS_PC0VS 0x0003
+#define BCSR_STATUS_PC1VS 0x000C
+#define BCSR_STATUS_PC0FI 0x0010
+#define BCSR_STATUS_PC1FI 0x0020
+#define BCSR_STATUS_PB1550_SWAPBOOT 0x0040
+#define BCSR_STATUS_SRAMWIDTH 0x0080
+#define BCSR_STATUS_FLASHBUSY 0x0100
+#define BCSR_STATUS_ROMBUSY 0x0400
+#define BCSR_STATUS_SD0WP 0x0400 /* DB1200 */
+#define BCSR_STATUS_SD1WP 0x0800
+#define BCSR_STATUS_USBOTGID 0x0800 /* PB/DB1550 */
+#define BCSR_STATUS_DB1000_SWAPBOOT 0x2000
+#define BCSR_STATUS_DB1200_SWAPBOOT 0x0040 /* DB1200 */
+#define BCSR_STATUS_IDECBLID 0x0200 /* DB1200 */
+#define BCSR_STATUS_DB1200_U0RXD 0x1000 /* DB1200 */
+#define BCSR_STATUS_DB1200_U1RXD 0x2000 /* DB1200 */
+#define BCSR_STATUS_FLASHDEN 0xC000
+#define BCSR_STATUS_DB1550_U0RXD 0x1000 /* DB1550 */
+#define BCSR_STATUS_DB1550_U3RXD 0x2000 /* DB1550 */
+#define BCSR_STATUS_PB1550_U0RXD 0x1000 /* PB1550 */
+#define BCSR_STATUS_PB1550_U1RXD 0x2000 /* PB1550 */
+#define BCSR_STATUS_PB1550_U3RXD 0x8000 /* PB1550 */
+
+
+/* DB/PB1000,1100,1500,1550 */
+#define BCSR_RESETS_PHY0 0x0001
+#define BCSR_RESETS_PHY1 0x0002
+#define BCSR_RESETS_DC 0x0004
+#define BCSR_RESETS_FIR_SEL 0x2000
+#define BCSR_RESETS_IRDA_MODE_MASK 0xC000
+#define BCSR_RESETS_IRDA_MODE_FULL 0x0000
+#define BCSR_RESETS_PB1550_WSCFSM 0x2000
+#define BCSR_RESETS_IRDA_MODE_OFF 0x4000
+#define BCSR_RESETS_IRDA_MODE_2_3 0x8000
+#define BCSR_RESETS_IRDA_MODE_1_3 0xC000
+#define BCSR_RESETS_DMAREQ 0x8000 /* PB1550 */
+
+#define BCSR_BOARD_PCIM66EN 0x0001
+#define BCSR_BOARD_SD0PWR 0x0040
+#define BCSR_BOARD_SD1PWR 0x0080
+#define BCSR_BOARD_PCIM33 0x0100
+#define BCSR_BOARD_PCIEXTARB 0x0200
+#define BCSR_BOARD_GPIO200RST 0x0400
+#define BCSR_BOARD_PCICLKOUT 0x0800
+#define BCSR_BOARD_PCICFG 0x1000
+#define BCSR_BOARD_SPISEL 0x4000 /* PB/DB1550 */
+#define BCSR_BOARD_SD0WP 0x4000 /* DB1100 */
+#define BCSR_BOARD_SD1WP 0x8000 /* DB1100 */
+
+
+/* DB/PB1200 */
+#define BCSR_RESETS_ETH 0x0001
+#define BCSR_RESETS_CAMERA 0x0002
+#define BCSR_RESETS_DC 0x0004
+#define BCSR_RESETS_IDE 0x0008
+#define BCSR_RESETS_TV 0x0010 /* DB1200 */
+/* Not resets but in the same register */
+#define BCSR_RESETS_PWMR1MUX 0x0800 /* DB1200 */
+#define BCSR_RESETS_PB1200_WSCFSM 0x0800 /* PB1200 */
+#define BCSR_RESETS_PSC0MUX 0x1000
+#define BCSR_RESETS_PSC1MUX 0x2000
+#define BCSR_RESETS_SPISEL 0x4000
+#define BCSR_RESETS_SD1MUX 0x8000 /* PB1200 */
+
+#define BCSR_BOARD_LCDVEE 0x0001
+#define BCSR_BOARD_LCDVDD 0x0002
+#define BCSR_BOARD_LCDBL 0x0004
+#define BCSR_BOARD_CAMSNAP 0x0010
+#define BCSR_BOARD_CAMPWR 0x0020
+#define BCSR_BOARD_SD0PWR 0x0040
+
+
+#define BCSR_SWITCHES_DIP 0x00FF
+#define BCSR_SWITCHES_DIP_1 0x0080
+#define BCSR_SWITCHES_DIP_2 0x0040
+#define BCSR_SWITCHES_DIP_3 0x0020
+#define BCSR_SWITCHES_DIP_4 0x0010
+#define BCSR_SWITCHES_DIP_5 0x0008
+#define BCSR_SWITCHES_DIP_6 0x0004
+#define BCSR_SWITCHES_DIP_7 0x0002
+#define BCSR_SWITCHES_DIP_8 0x0001
+#define BCSR_SWITCHES_ROTARY 0x0F00
+
+
+#define BCSR_PCMCIA_PC0VPP 0x0003
+#define BCSR_PCMCIA_PC0VCC 0x000C
+#define BCSR_PCMCIA_PC0DRVEN 0x0010
+#define BCSR_PCMCIA_PC0RST 0x0080
+#define BCSR_PCMCIA_PC1VPP 0x0300
+#define BCSR_PCMCIA_PC1VCC 0x0C00
+#define BCSR_PCMCIA_PC1DRVEN 0x1000
+#define BCSR_PCMCIA_PC1RST 0x8000
+
+
+#define BCSR_LEDS_DECIMALS 0x0003
+#define BCSR_LEDS_LED0 0x0100
+#define BCSR_LEDS_LED1 0x0200
+#define BCSR_LEDS_LED2 0x0400
+#define BCSR_LEDS_LED3 0x0800
+
+
+#define BCSR_SYSTEM_RESET 0x8000 /* clear to reset */
+#define BCSR_SYSTEM_PWROFF 0x4000 /* set to power off */
+#define BCSR_SYSTEM_VDDI 0x001F /* PB1xxx boards */
+
+
+
+
+/* initialize BCSR for a board. Provide the PHYSICAL addresses of both
+ * BCSR spaces.
+ */
+void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys);
+
+/* read a board register */
+unsigned short bcsr_read(enum bcsr_id reg);
+
+/* write to a board register */
+void bcsr_write(enum bcsr_id reg, unsigned short val);
+
+/* modify a register. clear bits set in 'clr', set bits set in 'set' */
+void bcsr_mod(enum bcsr_id reg, unsigned short clr, unsigned short set);
+
+/* install CPLD IRQ demuxer (DB1200/PB1200) */
+void __init bcsr_init_irq(int csc_start, int csc_end, int hook_irq);
+
+#endif
diff --git a/arch/mips/include/asm/mach-db1x00/db1200.h b/arch/mips/include/asm/mach-db1x00/db1200.h
index 27f26102b1bb..3404248f5094 100644
--- a/arch/mips/include/asm/mach-db1x00/db1200.h
+++ b/arch/mips/include/asm/mach-db1x00/db1200.h
@@ -25,133 +25,9 @@
#define __ASM_DB1200_H
#include <linux/types.h>
+#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
-#define DBDMA_AC97_TX_CHAN DSCR_CMD0_PSC1_TX
-#define DBDMA_AC97_RX_CHAN DSCR_CMD0_PSC1_RX
-#define DBDMA_I2S_TX_CHAN DSCR_CMD0_PSC1_TX
-#define DBDMA_I2S_RX_CHAN DSCR_CMD0_PSC1_RX
-
-/*
- * SPI and SMB are muxed on the DBAu1200 board.
- * Refer to board documentation.
- */
-#define SPI_PSC_BASE PSC0_BASE_ADDR
-#define SMBUS_PSC_BASE PSC0_BASE_ADDR
-/*
- * AC'97 and I2S are muxed on the DBAu1200 board.
- * Refer to board documentation.
- */
-#define AC97_PSC_BASE PSC1_BASE_ADDR
-#define I2S_PSC_BASE PSC1_BASE_ADDR
-
-#define BCSR_KSEG1_ADDR 0xB9800000
-
-typedef volatile struct
-{
- /*00*/ u16 whoami;
- u16 reserved0;
- /*04*/ u16 status;
- u16 reserved1;
- /*08*/ u16 switches;
- u16 reserved2;
- /*0C*/ u16 resets;
- u16 reserved3;
-
- /*10*/ u16 pcmcia;
- u16 reserved4;
- /*14*/ u16 board;
- u16 reserved5;
- /*18*/ u16 disk_leds;
- u16 reserved6;
- /*1C*/ u16 system;
- u16 reserved7;
-
- /*20*/ u16 intclr;
- u16 reserved8;
- /*24*/ u16 intset;
- u16 reserved9;
- /*28*/ u16 intclr_mask;
- u16 reserved10;
- /*2C*/ u16 intset_mask;
- u16 reserved11;
-
- /*30*/ u16 sig_status;
- u16 reserved12;
- /*34*/ u16 int_status;
- u16 reserved13;
- /*38*/ u16 reserved14;
- u16 reserved15;
- /*3C*/ u16 reserved16;
- u16 reserved17;
-
-} BCSR;
-
-static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
-
-/*
- * Register bit definitions for the BCSRs
- */
-#define BCSR_WHOAMI_DCID 0x000F
-#define BCSR_WHOAMI_CPLD 0x00F0
-#define BCSR_WHOAMI_BOARD 0x0F00
-
-#define BCSR_STATUS_PCMCIA0VS 0x0003
-#define BCSR_STATUS_PCMCIA1VS 0x000C
-#define BCSR_STATUS_SWAPBOOT 0x0040
-#define BCSR_STATUS_FLASHBUSY 0x0100
-#define BCSR_STATUS_IDECBLID 0x0200
-#define BCSR_STATUS_SD0WP 0x0400
-#define BCSR_STATUS_U0RXD 0x1000
-#define BCSR_STATUS_U1RXD 0x2000
-
-#define BCSR_SWITCHES_OCTAL 0x00FF
-#define BCSR_SWITCHES_DIP_1 0x0080
-#define BCSR_SWITCHES_DIP_2 0x0040
-#define BCSR_SWITCHES_DIP_3 0x0020
-#define BCSR_SWITCHES_DIP_4 0x0010
-#define BCSR_SWITCHES_DIP_5 0x0008
-#define BCSR_SWITCHES_DIP_6 0x0004
-#define BCSR_SWITCHES_DIP_7 0x0002
-#define BCSR_SWITCHES_DIP_8 0x0001
-#define BCSR_SWITCHES_ROTARY 0x0F00
-
-#define BCSR_RESETS_ETH 0x0001
-#define BCSR_RESETS_CAMERA 0x0002
-#define BCSR_RESETS_DC 0x0004
-#define BCSR_RESETS_IDE 0x0008
-#define BCSR_RESETS_TV 0x0010
-/* Not resets but in the same register */
-#define BCSR_RESETS_PWMR1MUX 0x0800
-#define BCSR_RESETS_PCS0MUX 0x1000
-#define BCSR_RESETS_PCS1MUX 0x2000
-#define BCSR_RESETS_SPISEL 0x4000
-
-#define BCSR_PCMCIA_PC0VPP 0x0003
-#define BCSR_PCMCIA_PC0VCC 0x000C
-#define BCSR_PCMCIA_PC0DRVEN 0x0010
-#define BCSR_PCMCIA_PC0RST 0x0080
-#define BCSR_PCMCIA_PC1VPP 0x0300
-#define BCSR_PCMCIA_PC1VCC 0x0C00
-#define BCSR_PCMCIA_PC1DRVEN 0x1000
-#define BCSR_PCMCIA_PC1RST 0x8000
-
-#define BCSR_BOARD_LCDVEE 0x0001
-#define BCSR_BOARD_LCDVDD 0x0002
-#define BCSR_BOARD_LCDBL 0x0004
-#define BCSR_BOARD_CAMSNAP 0x0010
-#define BCSR_BOARD_CAMPWR 0x0020
-#define BCSR_BOARD_SD0PWR 0x0040
-
-#define BCSR_LEDS_DECIMALS 0x0003
-#define BCSR_LEDS_LED0 0x0100
-#define BCSR_LEDS_LED1 0x0200
-#define BCSR_LEDS_LED2 0x0400
-#define BCSR_LEDS_LED3 0x0800
-
-#define BCSR_SYSTEM_POWEROFF 0x4000
-#define BCSR_SYSTEM_RESET 0x8000
-
/* Bit positions for the different interrupt sources */
#define BCSR_INT_IDE 0x0001
#define BCSR_INT_ETH 0x0002
@@ -168,17 +44,15 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
#define BCSR_INT_SD0INSERT 0x1000
#define BCSR_INT_SD0EJECT 0x2000
-#define SMC91C111_PHYS_ADDR 0x19000300
-#define SMC91C111_INT DB1200_ETH_INT
-
#define IDE_PHYS_ADDR 0x18800000
#define IDE_REG_SHIFT 5
-#define IDE_PHYS_LEN (16 << IDE_REG_SHIFT)
-#define IDE_INT DB1200_IDE_INT
#define IDE_DDMA_REQ DSCR_CMD0_DMA_REQ1
#define IDE_RQSIZE 128
-#define NAND_PHYS_ADDR 0x20000000
+#define DB1200_IDE_PHYS_ADDR IDE_PHYS_ADDR
+#define DB1200_IDE_PHYS_LEN (16 << IDE_REG_SHIFT)
+#define DB1200_ETH_PHYS_ADDR 0x19000300
+#define DB1200_NAND_PHYS_ADDR 0x20000000
/*
* External Interrupts for DBAu1200 as of 8/6/2004.
@@ -188,7 +62,7 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
* Example: IDE bis pos is = 64 - 64
* ETH bit pos is = 65 - 64
*/
-enum external_pb1200_ints {
+enum external_db1200_ints {
DB1200_INT_BEGIN = AU1000_MAX_INTR + 1,
DB1200_IDE_INT = DB1200_INT_BEGIN,
@@ -209,22 +83,4 @@ enum external_pb1200_ints {
DB1200_INT_END = DB1200_INT_BEGIN + 15,
};
-
-/*
- * DBAu1200 specific PCMCIA defines for drivers/pcmcia/au1000_db1x00.c
- */
-#define PCMCIA_MAX_SOCK 1
-#define PCMCIA_NUM_SOCKS (PCMCIA_MAX_SOCK + 1)
-
-/* VPP/VCC */
-#define SET_VCC_VPP(VCC, VPP, SLOT) \
- ((((VCC) << 2) | ((VPP) << 0)) << ((SLOT) * 8))
-
-#define BOARD_PC0_INT DB1200_PC0_INT
-#define BOARD_PC1_INT DB1200_PC1_INT
-#define BOARD_CARD_INSERTED(SOCKET) bcsr->sig_status & (1 << (8 + (2 * SOCKET)))
-
-/* NAND chip select */
-#define NAND_CS 1
-
#endif /* __ASM_DB1200_H */
diff --git a/arch/mips/include/asm/mach-db1x00/db1x00.h b/arch/mips/include/asm/mach-db1x00/db1x00.h
index 1a515b8c870f..a919dac525a1 100644
--- a/arch/mips/include/asm/mach-db1x00/db1x00.h
+++ b/arch/mips/include/asm/mach-db1x00/db1x00.h
@@ -41,111 +41,11 @@
#define SMBUS_PSC_BASE PSC2_BASE_ADDR
#define I2S_PSC_BASE PSC3_BASE_ADDR
-#define BCSR_KSEG1_ADDR 0xAF000000
#define NAND_PHYS_ADDR 0x20000000
-#else
-#define BCSR_KSEG1_ADDR 0xAE000000
#endif
/*
- * Overlay data structure of the DBAu1x00 board registers.
- * Registers are located at physical 0E0000xx, KSEG1 0xAE0000xx.
- */
-typedef volatile struct
-{
- /*00*/ unsigned short whoami;
- unsigned short reserved0;
- /*04*/ unsigned short status;
- unsigned short reserved1;
- /*08*/ unsigned short switches;
- unsigned short reserved2;
- /*0C*/ unsigned short resets;
- unsigned short reserved3;
- /*10*/ unsigned short pcmcia;
- unsigned short reserved4;
- /*14*/ unsigned short specific;
- unsigned short reserved5;
- /*18*/ unsigned short leds;
- unsigned short reserved6;
- /*1C*/ unsigned short swreset;
- unsigned short reserved7;
-
-} BCSR;
-
-
-/*
- * Register/mask bit definitions for the BCSRs
- */
-#define BCSR_WHOAMI_DCID 0x000F
-#define BCSR_WHOAMI_CPLD 0x00F0
-#define BCSR_WHOAMI_BOARD 0x0F00
-
-#define BCSR_STATUS_PC0VS 0x0003
-#define BCSR_STATUS_PC1VS 0x000C
-#define BCSR_STATUS_PC0FI 0x0010
-#define BCSR_STATUS_PC1FI 0x0020
-#define BCSR_STATUS_FLASHBUSY 0x0100
-#define BCSR_STATUS_ROMBUSY 0x0400
-#define BCSR_STATUS_SWAPBOOT 0x2000
-#define BCSR_STATUS_FLASHDEN 0xC000
-
-#define BCSR_SWITCHES_DIP 0x00FF
-#define BCSR_SWITCHES_DIP_1 0x0080
-#define BCSR_SWITCHES_DIP_2 0x0040
-#define BCSR_SWITCHES_DIP_3 0x0020
-#define BCSR_SWITCHES_DIP_4 0x0010
-#define BCSR_SWITCHES_DIP_5 0x0008
-#define BCSR_SWITCHES_DIP_6 0x0004
-#define BCSR_SWITCHES_DIP_7 0x0002
-#define BCSR_SWITCHES_DIP_8 0x0001
-#define BCSR_SWITCHES_ROTARY 0x0F00
-
-#define BCSR_RESETS_PHY0 0x0001
-#define BCSR_RESETS_PHY1 0x0002
-#define BCSR_RESETS_DC 0x0004
-#define BCSR_RESETS_FIR_SEL 0x2000
-#define BCSR_RESETS_IRDA_MODE_MASK 0xC000
-#define BCSR_RESETS_IRDA_MODE_FULL 0x0000
-#define BCSR_RESETS_IRDA_MODE_OFF 0x4000
-#define BCSR_RESETS_IRDA_MODE_2_3 0x8000
-#define BCSR_RESETS_IRDA_MODE_1_3 0xC000
-
-#define BCSR_PCMCIA_PC0VPP 0x0003
-#define BCSR_PCMCIA_PC0VCC 0x000C
-#define BCSR_PCMCIA_PC0DRVEN 0x0010
-#define BCSR_PCMCIA_PC0RST 0x0080
-#define BCSR_PCMCIA_PC1VPP 0x0300
-#define BCSR_PCMCIA_PC1VCC 0x0C00
-#define BCSR_PCMCIA_PC1DRVEN 0x1000
-#define BCSR_PCMCIA_PC1RST 0x8000
-
-#define BCSR_BOARD_PCIM66EN 0x0001
-#define BCSR_BOARD_SD0_PWR 0x0040
-#define BCSR_BOARD_SD1_PWR 0x0080
-#define BCSR_BOARD_PCIM33 0x0100
-#define BCSR_BOARD_GPIO200RST 0x0400
-#define BCSR_BOARD_PCICFG 0x1000
-#define BCSR_BOARD_SD0_WP 0x4000
-#define BCSR_BOARD_SD1_WP 0x8000
-
-#define BCSR_LEDS_DECIMALS 0x0003
-#define BCSR_LEDS_LED0 0x0100
-#define BCSR_LEDS_LED1 0x0200
-#define BCSR_LEDS_LED2 0x0400
-#define BCSR_LEDS_LED3 0x0800
-
-#define BCSR_SWRESET_RESET 0x0080
-
-/* PCMCIA DBAu1x00 specific defines */
-#define PCMCIA_MAX_SOCK 1
-#define PCMCIA_NUM_SOCKS (PCMCIA_MAX_SOCK + 1)
-
-/* VPP/VCC */
-#define SET_VCC_VPP(VCC, VPP, SLOT)\
- ((((VCC) << 2) | ((VPP) << 0)) << ((SLOT) * 8))
-
-/*
* NAND defines
*
* Timing values as described in databook, * ns value stripped of the
diff --git a/arch/mips/include/asm/mach-pb1x00/pb1100.h b/arch/mips/include/asm/mach-pb1x00/pb1100.h
deleted file mode 100644
index b1a60f1cbd02..000000000000
--- a/arch/mips/include/asm/mach-pb1x00/pb1100.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Alchemy Semi Pb1100 Referrence Board
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- *
- */
-#ifndef __ASM_PB1100_H
-#define __ASM_PB1100_H
-
-#define PB1100_IDENT 0xAE000000
-#define BOARD_STATUS_REG 0xAE000004
-# define PB1100_ROM_SEL (1 << 15)
-# define PB1100_ROM_SIZ (1 << 14)
-# define PB1100_SWAP_BOOT (1 << 13)
-# define PB1100_FLASH_WP (1 << 12)
-# define PB1100_ROM_H_STS (1 << 11)
-# define PB1100_ROM_L_STS (1 << 10)
-# define PB1100_FLASH_H_STS (1 << 9)
-# define PB1100_FLASH_L_STS (1 << 8)
-# define PB1100_SRAM_SIZ (1 << 7)
-# define PB1100_TSC_BUSY (1 << 6)
-# define PB1100_PCMCIA_VS_MASK (3 << 4)
-# define PB1100_RS232_CD (1 << 3)
-# define PB1100_RS232_CTS (1 << 2)
-# define PB1100_RS232_DSR (1 << 1)
-# define PB1100_RS232_RI (1 << 0)
-
-#define PB1100_IRDA_RS232 0xAE00000C
-# define PB1100_IRDA_FULL (0 << 14) /* full power */
-# define PB1100_IRDA_SHUTDOWN (1 << 14)
-# define PB1100_IRDA_TT (2 << 14) /* 2/3 power */
-# define PB1100_IRDA_OT (3 << 14) /* 1/3 power */
-# define PB1100_IRDA_FIR (1 << 13)
-
-#define PCMCIA_BOARD_REG 0xAE000010
-# define PB1100_SD_WP1_RO (1 << 15) /* read only */
-# define PB1100_SD_WP0_RO (1 << 14) /* read only */
-# define PB1100_SD_PWR1 (1 << 11) /* applies power to SD1 */
-# define PB1100_SD_PWR0 (1 << 10) /* applies power to SD0 */
-# define PB1100_SEL_SD_CONN1 (1 << 9)
-# define PB1100_SEL_SD_CONN0 (1 << 8)
-# define PC_DEASSERT_RST (1 << 7)
-# define PC_DRV_EN (1 << 4)
-
-#define PB1100_G_CONTROL 0xAE000014 /* graphics control */
-
-#define PB1100_RST_VDDI 0xAE00001C
-# define PB1100_SOFT_RESET (1 << 15) /* clear to reset the board */
-# define PB1100_VDDI_MASK 0x1F
-
-#define PB1100_LEDS 0xAE000018
-
-/*
- * 11:8 is 4 discreet LEDs. Clearing a bit illuminates the LED.
- * 7:0 is the LED Display's decimal points.
- */
-#define PB1100_HEX_LED 0xAE000018
-
-/* PCMCIA Pb1100 specific defines */
-#define PCMCIA_MAX_SOCK 0
-#define PCMCIA_NUM_SOCKS (PCMCIA_MAX_SOCK + 1)
-
-/* VPP/VCC */
-#define SET_VCC_VPP(VCC, VPP) (((VCC) << 2) | ((VPP) << 0))
-
-#endif /* __ASM_PB1100_H */
diff --git a/arch/mips/include/asm/mach-pb1x00/pb1200.h b/arch/mips/include/asm/mach-pb1x00/pb1200.h
index c8618df88cb5..962eb55dc880 100644
--- a/arch/mips/include/asm/mach-pb1x00/pb1200.h
+++ b/arch/mips/include/asm/mach-pb1x00/pb1200.h
@@ -25,6 +25,7 @@
#define __ASM_PB1200_H
#include <linux/types.h>
+#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
#define DBDMA_AC97_TX_CHAN DSCR_CMD0_PSC1_TX
@@ -43,113 +44,8 @@
* Refer to board documentation.
*/
#define AC97_PSC_BASE PSC1_BASE_ADDR
-#define I2S_PSC_BASE PSC1_BASE_ADDR
+#define I2S_PSC_BASE PSC1_BASE_ADDR
-#define BCSR_KSEG1_ADDR 0xAD800000
-
-typedef volatile struct
-{
- /*00*/ u16 whoami;
- u16 reserved0;
- /*04*/ u16 status;
- u16 reserved1;
- /*08*/ u16 switches;
- u16 reserved2;
- /*0C*/ u16 resets;
- u16 reserved3;
-
- /*10*/ u16 pcmcia;
- u16 reserved4;
- /*14*/ u16 board;
- u16 reserved5;
- /*18*/ u16 disk_leds;
- u16 reserved6;
- /*1C*/ u16 system;
- u16 reserved7;
-
- /*20*/ u16 intclr;
- u16 reserved8;
- /*24*/ u16 intset;
- u16 reserved9;
- /*28*/ u16 intclr_mask;
- u16 reserved10;
- /*2C*/ u16 intset_mask;
- u16 reserved11;
-
- /*30*/ u16 sig_status;
- u16 reserved12;
- /*34*/ u16 int_status;
- u16 reserved13;
- /*38*/ u16 reserved14;
- u16 reserved15;
- /*3C*/ u16 reserved16;
- u16 reserved17;
-
-} BCSR;
-
-static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
-
-/*
- * Register bit definitions for the BCSRs
- */
-#define BCSR_WHOAMI_DCID 0x000F
-#define BCSR_WHOAMI_CPLD 0x00F0
-#define BCSR_WHOAMI_BOARD 0x0F00
-
-#define BCSR_STATUS_PCMCIA0VS 0x0003
-#define BCSR_STATUS_PCMCIA1VS 0x000C
-#define BCSR_STATUS_SWAPBOOT 0x0040
-#define BCSR_STATUS_FLASHBUSY 0x0100
-#define BCSR_STATUS_IDECBLID 0x0200
-#define BCSR_STATUS_SD0WP 0x0400
-#define BCSR_STATUS_SD1WP 0x0800
-#define BCSR_STATUS_U0RXD 0x1000
-#define BCSR_STATUS_U1RXD 0x2000
-
-#define BCSR_SWITCHES_OCTAL 0x00FF
-#define BCSR_SWITCHES_DIP_1 0x0080
-#define BCSR_SWITCHES_DIP_2 0x0040
-#define BCSR_SWITCHES_DIP_3 0x0020
-#define BCSR_SWITCHES_DIP_4 0x0010
-#define BCSR_SWITCHES_DIP_5 0x0008
-#define BCSR_SWITCHES_DIP_6 0x0004
-#define BCSR_SWITCHES_DIP_7 0x0002
-#define BCSR_SWITCHES_DIP_8 0x0001
-#define BCSR_SWITCHES_ROTARY 0x0F00
-
-#define BCSR_RESETS_ETH 0x0001
-#define BCSR_RESETS_CAMERA 0x0002
-#define BCSR_RESETS_DC 0x0004
-#define BCSR_RESETS_IDE 0x0008
-/* not resets but in the same register */
-#define BCSR_RESETS_WSCFSM 0x0800
-#define BCSR_RESETS_PCS0MUX 0x1000
-#define BCSR_RESETS_PCS1MUX 0x2000
-#define BCSR_RESETS_SPISEL 0x4000
-#define BCSR_RESETS_SD1MUX 0x8000
-
-#define BCSR_PCMCIA_PC0VPP 0x0003
-#define BCSR_PCMCIA_PC0VCC 0x000C
-#define BCSR_PCMCIA_PC0DRVEN 0x0010
-#define BCSR_PCMCIA_PC0RST 0x0080
-#define BCSR_PCMCIA_PC1VPP 0x0300
-#define BCSR_PCMCIA_PC1VCC 0x0C00
-#define BCSR_PCMCIA_PC1DRVEN 0x1000
-#define BCSR_PCMCIA_PC1RST 0x8000
-
-#define BCSR_BOARD_LCDVEE 0x0001
-#define BCSR_BOARD_LCDVDD 0x0002
-#define BCSR_BOARD_LCDBL 0x0004
-#define BCSR_BOARD_CAMSNAP 0x0010
-#define BCSR_BOARD_CAMPWR 0x0020
-#define BCSR_BOARD_SD0PWR 0x0040
-#define BCSR_BOARD_SD1PWR 0x0080
-
-#define BCSR_LEDS_DECIMALS 0x00FF
-#define BCSR_LEDS_LED0 0x0100
-#define BCSR_LEDS_LED1 0x0200
-#define BCSR_LEDS_LED2 0x0400
-#define BCSR_LEDS_LED3 0x0800
#define BCSR_SYSTEM_VDDI 0x001F
#define BCSR_SYSTEM_POWEROFF 0x4000
@@ -239,20 +135,6 @@ enum external_pb1200_ints {
PB1200_INT_END = PB1200_INT_BEGIN + 15
};
-/*
- * Pb1200 specific PCMCIA defines for drivers/pcmcia/au1000_db1x00.c
- */
-#define PCMCIA_MAX_SOCK 1
-#define PCMCIA_NUM_SOCKS (PCMCIA_MAX_SOCK + 1)
-
-/* VPP/VCC */
-#define SET_VCC_VPP(VCC, VPP, SLOT) \
- ((((VCC) << 2) | ((VPP) << 0)) << ((SLOT) * 8))
-
-#define BOARD_PC0_INT PB1200_PC0_INT
-#define BOARD_PC1_INT PB1200_PC1_INT
-#define BOARD_CARD_INSERTED(SOCKET) bcsr->sig_status & (1 << (8 + (2 * SOCKET)))
-
/* NAND chip select */
#define NAND_CS 1
diff --git a/arch/mips/include/asm/mach-pb1x00/pb1500.h b/arch/mips/include/asm/mach-pb1x00/pb1500.h
deleted file mode 100644
index da51a2eb7b82..000000000000
--- a/arch/mips/include/asm/mach-pb1x00/pb1500.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Alchemy Semi Pb1500 Referrence Board
- *
- * Copyright 2001, 2008 MontaVista Software Inc.
- * Author: MontaVista Software, Inc. <source@mvista.com>
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- *
- */
-#ifndef __ASM_PB1500_H
-#define __ASM_PB1500_H
-
-#define IDENT_BOARD_REG 0xAE000000
-#define BOARD_STATUS_REG 0xAE000004
-#define PCI_BOARD_REG 0xAE000010
-#define PCMCIA_BOARD_REG 0xAE000010
-# define PC_DEASSERT_RST 0x80
-# define PC_DRV_EN 0x10
-#define PB1500_G_CONTROL 0xAE000014
-#define PB1500_RST_VDDI 0xAE00001C
-#define PB1500_LEDS 0xAE000018
-
-#define PB1500_HEX_LED 0xAF000004
-#define PB1500_HEX_LED_BLANK 0xAF000008
-
-/* PCMCIA Pb1500 specific defines */
-#define PCMCIA_MAX_SOCK 0
-#define PCMCIA_NUM_SOCKS (PCMCIA_MAX_SOCK + 1)
-
-/* VPP/VCC */
-#define SET_VCC_VPP(VCC, VPP) (((VCC) << 2) | ((VPP) << 0))
-
-#endif /* __ASM_PB1500_H */
diff --git a/arch/mips/include/asm/mach-pb1x00/pb1550.h b/arch/mips/include/asm/mach-pb1x00/pb1550.h
index 6704a11497db..58796410bd6e 100644
--- a/arch/mips/include/asm/mach-pb1x00/pb1550.h
+++ b/arch/mips/include/asm/mach-pb1x00/pb1550.h
@@ -40,102 +40,6 @@
#define SMBUS_PSC_BASE PSC2_BASE_ADDR
#define I2S_PSC_BASE PSC3_BASE_ADDR
-#define BCSR_PHYS_ADDR 0xAF000000
-
-typedef volatile struct
-{
- /*00*/ u16 whoami;
- u16 reserved0;
- /*04*/ u16 status;
- u16 reserved1;
- /*08*/ u16 switches;
- u16 reserved2;
- /*0C*/ u16 resets;
- u16 reserved3;
- /*10*/ u16 pcmcia;
- u16 reserved4;
- /*14*/ u16 pci;
- u16 reserved5;
- /*18*/ u16 leds;
- u16 reserved6;
- /*1C*/ u16 system;
- u16 reserved7;
-
-} BCSR;
-
-static BCSR * const bcsr = (BCSR *)BCSR_PHYS_ADDR;
-
-/*
- * Register bit definitions for the BCSRs
- */
-#define BCSR_WHOAMI_DCID 0x000F
-#define BCSR_WHOAMI_CPLD 0x00F0
-#define BCSR_WHOAMI_BOARD 0x0F00
-
-#define BCSR_STATUS_PCMCIA0VS 0x0003
-#define BCSR_STATUS_PCMCIA1VS 0x000C
-#define BCSR_STATUS_PCMCIA0FI 0x0010
-#define BCSR_STATUS_PCMCIA1FI 0x0020
-#define BCSR_STATUS_SWAPBOOT 0x0040
-#define BCSR_STATUS_SRAMWIDTH 0x0080
-#define BCSR_STATUS_FLASHBUSY 0x0100
-#define BCSR_STATUS_ROMBUSY 0x0200
-#define BCSR_STATUS_USBOTGID 0x0800
-#define BCSR_STATUS_U0RXD 0x1000
-#define BCSR_STATUS_U1RXD 0x2000
-#define BCSR_STATUS_U3RXD 0x8000
-
-#define BCSR_SWITCHES_OCTAL 0x00FF
-#define BCSR_SWITCHES_DIP_1 0x0080
-#define BCSR_SWITCHES_DIP_2 0x0040
-#define BCSR_SWITCHES_DIP_3 0x0020
-#define BCSR_SWITCHES_DIP_4 0x0010
-#define BCSR_SWITCHES_DIP_5 0x0008
-#define BCSR_SWITCHES_DIP_6 0x0004
-#define BCSR_SWITCHES_DIP_7 0x0002
-#define BCSR_SWITCHES_DIP_8 0x0001
-#define BCSR_SWITCHES_ROTARY 0x0F00
-
-#define BCSR_RESETS_PHY0 0x0001
-#define BCSR_RESETS_PHY1 0x0002
-#define BCSR_RESETS_DC 0x0004
-#define BCSR_RESETS_WSC 0x2000
-#define BCSR_RESETS_SPISEL 0x4000
-#define BCSR_RESETS_DMAREQ 0x8000
-
-#define BCSR_PCMCIA_PC0VPP 0x0003
-#define BCSR_PCMCIA_PC0VCC 0x000C
-#define BCSR_PCMCIA_PC0DRVEN 0x0010
-#define BCSR_PCMCIA_PC0RST 0x0080
-#define BCSR_PCMCIA_PC1VPP 0x0300
-#define BCSR_PCMCIA_PC1VCC 0x0C00
-#define BCSR_PCMCIA_PC1DRVEN 0x1000
-#define BCSR_PCMCIA_PC1RST 0x8000
-
-#define BCSR_PCI_M66EN 0x0001
-#define BCSR_PCI_M33 0x0100
-#define BCSR_PCI_EXTERNARB 0x0200
-#define BCSR_PCI_GPIO200RST 0x0400
-#define BCSR_PCI_CLKOUT 0x0800
-#define BCSR_PCI_CFGHOST 0x1000
-
-#define BCSR_LEDS_DECIMALS 0x00FF
-#define BCSR_LEDS_LED0 0x0100
-#define BCSR_LEDS_LED1 0x0200
-#define BCSR_LEDS_LED2 0x0400
-#define BCSR_LEDS_LED3 0x0800
-
-#define BCSR_SYSTEM_VDDI 0x001F
-#define BCSR_SYSTEM_POWEROFF 0x4000
-#define BCSR_SYSTEM_RESET 0x8000
-
-#define PCMCIA_MAX_SOCK 1
-#define PCMCIA_NUM_SOCKS (PCMCIA_MAX_SOCK + 1)
-
-/* VPP/VCC */
-#define SET_VCC_VPP(VCC, VPP, SLOT) \
- ((((VCC) << 2) | ((VPP) << 0)) << ((SLOT) * 8))
-
#if defined(CONFIG_MTD_PB1550_BOOT) && defined(CONFIG_MTD_PB1550_USER)
#define PB1550_BOTH_BANKS
#elif defined(CONFIG_MTD_PB1550_BOOT) && !defined(CONFIG_MTD_PB1550_USER)
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index f266295cce51..ac32572430f4 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -107,18 +107,6 @@ typedef struct { unsigned long pte; } pte_t;
typedef struct page *pgtable_t;
/*
- * For 3-level pagetables we defines these ourselves, for 2-level the
- * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
- */
-#ifdef CONFIG_64BIT
-
-typedef struct { unsigned long pmd; } pmd_t;
-#define pmd_val(x) ((x).pmd)
-#define __pmd(x) ((pmd_t) { (x) } )
-
-#endif
-
-/*
* Right now we don't support 4-level pagetables, so all pud-related
* definitions come from <asm-generic/pgtable-nopud.h>.
*/
diff --git a/arch/mips/include/asm/param.h b/arch/mips/include/asm/param.h
index 1d9bb8c5ab24..da3920fce9ad 100644
--- a/arch/mips/include/asm/param.h
+++ b/arch/mips/include/asm/param.h
@@ -9,23 +9,8 @@
#ifndef _ASM_PARAM_H
#define _ASM_PARAM_H
-#ifdef __KERNEL__
-
-# define HZ CONFIG_HZ /* Internal kernel timer frequency */
-# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
-# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
-#endif
-
-#ifndef HZ
-#define HZ 100
-#endif
-
#define EXEC_PAGESIZE 65536
-#ifndef NOGROUP
-#define NOGROUP (-1)
-#endif
-
-#define MAXHOSTNAMELEN 64 /* max length of hostname */
+#include <asm-generic/param.h>
#endif /* _ASM_PARAM_H */
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index 3738f4b48cbd..881d18b4e298 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -31,7 +31,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
*/
extern void pmd_init(unsigned long page, unsigned long pagetable);
-#ifdef CONFIG_64BIT
+#ifndef __PAGETABLE_PMD_FOLDED
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
@@ -104,7 +104,7 @@ do { \
tlb_remove_page((tlb), pte); \
} while (0)
-#ifdef CONFIG_64BIT
+#ifndef __PAGETABLE_PMD_FOLDED
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 9cd508993956..073a393bb97a 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -16,7 +16,11 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
+#ifdef CONFIG_PAGE_SIZE_64KB
+#include <asm-generic/pgtable-nopmd.h>
+#else
#include <asm-generic/pgtable-nopud.h>
+#endif
/*
* Each address space has 2 4K pages as its page directory, giving 1024
@@ -37,13 +41,20 @@
* fault address - VMALLOC_START.
*/
+
+/* PGDIR_SHIFT determines what a third-level page table entry can map */
+#ifdef __PAGETABLE_PMD_FOLDED
+#define PGDIR_SHIFT (PAGE_SHIFT + PAGE_SHIFT + PTE_ORDER - 3)
+#else
+
/* PMD_SHIFT determines the size of the area a second-level page table can map */
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT + PTE_ORDER - 3))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
-/* PGDIR_SHIFT determines what a third-level page table entry can map */
+
#define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT + PMD_ORDER - 3))
+#endif
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
@@ -92,12 +103,14 @@
#ifdef CONFIG_PAGE_SIZE_64KB
#define PGD_ORDER 0
#define PUD_ORDER aieeee_attempt_to_allocate_pud
-#define PMD_ORDER 0
+#define PMD_ORDER aieeee_attempt_to_allocate_pmd
#define PTE_ORDER 0
#endif
#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
+#ifndef __PAGETABLE_PMD_FOLDED
#define PTRS_PER_PMD ((PAGE_SIZE << PMD_ORDER) / sizeof(pmd_t))
+#endif
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#if PGDIR_SIZE >= TASK_SIZE
@@ -120,15 +133,30 @@
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
+#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_ERROR(e) \
printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#endif
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
extern pte_t invalid_pte_table[PTRS_PER_PTE];
extern pte_t empty_bad_page_table[PTRS_PER_PTE];
+
+
+#ifndef __PAGETABLE_PMD_FOLDED
+/*
+ * For 3-level pagetables we defines these ourselves, for 2-level the
+ * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
+ */
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) } )
+
+
extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
extern pmd_t empty_bad_pmd_table[PTRS_PER_PMD];
+#endif
/*
* Empty pgd/pmd entries point to the invalid_pte_table.
@@ -149,6 +177,7 @@ static inline void pmd_clear(pmd_t *pmdp)
{
pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
}
+#ifndef __PAGETABLE_PMD_FOLDED
/*
* Empty pud entries point to the invalid_pmd_table.
@@ -172,6 +201,7 @@ static inline void pud_clear(pud_t *pudp)
{
pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
}
+#endif
#define pte_page(x) pfn_to_page(pte_pfn(x))
@@ -196,6 +226,7 @@ static inline void pud_clear(pud_t *pudp)
/* to find an entry in a page-table-directory */
#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
+#ifndef __PAGETABLE_PMD_FOLDED
static inline unsigned long pud_page_vaddr(pud_t pud)
{
return pud_val(pud);
@@ -208,6 +239,7 @@ static inline pmd_t *pmd_offset(pud_t * pud, unsigned long address)
{
return (pmd_t *) pud_page_vaddr(*pud) + pmd_index(address);
}
+#endif
/* Find an entry in the third-level page table.. */
#define __pte_offset(address) \
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 1854336e56a2..02335fda9e77 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -177,7 +177,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
*/
#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
-#ifdef CONFIG_64BIT
+#ifndef __PAGETABLE_PMD_FOLDED
/*
* (puds are folded into pgds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 2c1e1d02338b..ca6c83218caa 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -188,11 +188,15 @@ void output_mm_defines(void)
DEFINE(_PTE_T_SIZE, sizeof(pte_t));
BLANK();
DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+#ifndef __PAGETABLE_PMD_FOLDED
DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+#endif
DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
BLANK();
DEFINE(_PGD_ORDER, PGD_ORDER);
+#ifndef __PAGETABLE_PMD_FOLDED
DEFINE(_PMD_ORDER, PMD_ORDER);
+#endif
DEFINE(_PTE_ORDER, PTE_ORDER);
BLANK();
DEFINE(_PMD_SHIFT, PMD_SHIFT);
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 50c9bb880667..9b78ff6e9b84 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -180,6 +180,11 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
*(ptr++) = regs->cp0_epc;
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->cp0_epc = pc;
+}
+
/*
* Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
* then try to fall into the debugger
@@ -198,7 +203,7 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
- if (kgdb_handle_exception(trap, compute_signal(trap), 0, regs))
+ if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
return NOTIFY_DONE;
if (atomic_read(&kgdb_setting_breakpoint))
@@ -212,6 +217,26 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
return NOTIFY_STOP;
}
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig,
+
+ };
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ return kgdb_mips_notify(NULL, cmd, &args);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
static struct notifier_block kgdb_notifier = {
.notifier_call = kgdb_mips_notify,
};
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index f042563c924f..bde79ef602e6 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -9,7 +9,6 @@
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/file.h>
-#include <linux/smp_lock.h>
#include <linux/highuid.h>
#include <linux/resource.h>
#include <linux/highmem.h>
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 364f066cb497..dcaed1bbbfe5 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -28,7 +28,6 @@
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h>
-#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
#include <linux/interrupt.h>
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 308e43460864..d42f6a35e303 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -26,6 +26,8 @@
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/notifier.h>
+#include <linux/kdb.h>
+#include "../../../kernel/debug/kdb/kdb_private.h"
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -184,6 +186,11 @@ void show_stack(struct task_struct *task, unsigned long *sp)
regs.regs[29] = task->thread.reg29;
regs.regs[31] = 0;
regs.cp0_epc = task->thread.reg31;
+#ifdef CONFIG_KGDB_KDB
+ } else if (atomic_read(&kgdb_active) != -1 &&
+ kdb_current_regs) {
+ memcpy(&regs, kdb_current_regs, sizeof(regs));
+#endif /* CONFIG_KGDB_KDB */
} else {
prepare_frametrace(&regs);
}
@@ -358,6 +365,8 @@ void __noreturn die(const char * str, const struct pt_regs * regs)
unsigned long dvpret = dvpe();
#endif /* CONFIG_MIPS_MT_SMTC */
+ notify_die(DIE_OOPS, str, (struct pt_regs *)regs, SIGSEGV, 0, 0);
+
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
@@ -698,6 +707,11 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
siginfo_t info;
char b[40];
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+ if (kgdb_ll_trap(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
+ return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
if (notify_die(DIE_TRAP, str, regs, code, 0, 0) == NOTIFY_STOP)
return;
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 60477529362e..2bd2151c586a 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -38,7 +38,6 @@
#include <linux/vmalloc.h>
#include <linux/elf.h>
#include <linux/seq_file.h>
-#include <linux/smp_lock.h>
#include <linux/syscalls.h>
#include <linux/moduleloader.h>
#include <linux/interrupt.h>
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 9e8d00389eef..3c5b7de10af5 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -424,7 +424,7 @@ void __init mem_init(void)
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10,
- (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
+ totalhigh_pages << (PAGE_SHIFT-10));
}
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
@@ -477,7 +477,7 @@ unsigned long pgd_current[NR_CPUS];
* will officially be retired.
*/
pgd_t swapper_pg_dir[_PTRS_PER_PGD] __page_aligned(_PGD_ORDER);
-#ifdef CONFIG_64BIT
+#ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned(PMD_ORDER);
#endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 1121019fa456..78eaa4f0b0ec 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -15,23 +15,31 @@
void pgd_init(unsigned long page)
{
unsigned long *p, *end;
+ unsigned long entry;
+
+#ifdef __PAGETABLE_PMD_FOLDED
+ entry = (unsigned long)invalid_pte_table;
+#else
+ entry = (unsigned long)invalid_pmd_table;
+#endif
p = (unsigned long *) page;
end = p + PTRS_PER_PGD;
while (p < end) {
- p[0] = (unsigned long) invalid_pmd_table;
- p[1] = (unsigned long) invalid_pmd_table;
- p[2] = (unsigned long) invalid_pmd_table;
- p[3] = (unsigned long) invalid_pmd_table;
- p[4] = (unsigned long) invalid_pmd_table;
- p[5] = (unsigned long) invalid_pmd_table;
- p[6] = (unsigned long) invalid_pmd_table;
- p[7] = (unsigned long) invalid_pmd_table;
+ p[0] = entry;
+ p[1] = entry;
+ p[2] = entry;
+ p[3] = entry;
+ p[4] = entry;
+ p[5] = entry;
+ p[6] = entry;
+ p[7] = entry;
p += 8;
}
}
+#ifndef __PAGETABLE_PMD_FOLDED
void pmd_init(unsigned long addr, unsigned long pagetable)
{
unsigned long *p, *end;
@@ -40,17 +48,18 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
end = p + PTRS_PER_PMD;
while (p < end) {
- p[0] = (unsigned long)pagetable;
- p[1] = (unsigned long)pagetable;
- p[2] = (unsigned long)pagetable;
- p[3] = (unsigned long)pagetable;
- p[4] = (unsigned long)pagetable;
- p[5] = (unsigned long)pagetable;
- p[6] = (unsigned long)pagetable;
- p[7] = (unsigned long)pagetable;
+ p[0] = pagetable;
+ p[1] = pagetable;
+ p[2] = pagetable;
+ p[3] = pagetable;
+ p[4] = pagetable;
+ p[5] = pagetable;
+ p[6] = pagetable;
+ p[7] = pagetable;
p += 8;
}
}
+#endif
void __init pagetable_init(void)
{
@@ -59,8 +68,9 @@ void __init pagetable_init(void)
/* Initialize the entire pgd. */
pgd_init((unsigned long)swapper_pg_dir);
+#ifndef __PAGETABLE_PMD_FOLDED
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
-
+#endif
pgd_base = swapper_pg_dir;
/*
* Fixed mappings:
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 3d0baa4a842d..a04b3bcab7d4 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -555,11 +555,13 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
+#ifndef __PAGETABLE_PMD_FOLDED
uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
uasm_i_dsrl(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
+#endif
}
/*
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index f61c164d1e67..bc1297109cc5 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -505,5 +505,5 @@ void __init mem_init(void)
(num_physpages - tmp) << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10,
- (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
+ totalhigh_pages << (PAGE_SHIFT-10));
}
diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c
index 15ea778b5e66..ed2453eab5cb 100644
--- a/arch/mips/sibyte/common/sb_tbprof.c
+++ b/arch/mips/sibyte/common/sb_tbprof.c
@@ -28,7 +28,6 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/errno.h>
diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile
index dd0c8ff52a68..ac5c6bdb2f05 100644
--- a/arch/mn10300/Makefile
+++ b/arch/mn10300/Makefile
@@ -19,7 +19,7 @@ CCDIR := $(strip $(patsubst %/specs,%,$(CCSPECS)))
KBUILD_CPPFLAGS += -nostdinc -I$(CCDIR)/include
LDFLAGS :=
-OBJCOPYFLAGS := -O binary -R .note -R .comment -S
+OBJCOPYFLAGS := -O binary -R .note -R .comment -R .GCC-command-line -R .note.gnu.build-id -S
#LDFLAGS_vmlinux := -Map linkmap.txt
CHECKFLAGS +=
diff --git a/arch/mn10300/configs/asb2303_defconfig b/arch/mn10300/configs/asb2303_defconfig
index 3acce23708b0..441920d8ff58 100644
--- a/arch/mn10300/configs/asb2303_defconfig
+++ b/arch/mn10300/configs/asb2303_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.30-rc2
-# Sat Apr 18 11:13:22 2009
+# Linux kernel version: 2.6.33-rc1
+# Tue Dec 22 19:26:25 2009
#
CONFIG_MN10300=y
CONFIG_AM33=y
@@ -22,6 +22,7 @@ CONFIG_GENERIC_HARDIRQS=y
# CONFIG_HOTPLUG_CPU is not set
CONFIG_HZ=1000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_CONSTRUCTORS=y
#
# General setup
@@ -43,11 +44,10 @@ CONFIG_BSD_PROCESS_ACCT=y
#
# RCU Subsystem
#
-CONFIG_CLASSIC_RCU=y
# CONFIG_TREE_RCU is not set
-# CONFIG_PREEMPT_RCU is not set
+# CONFIG_TREE_PREEMPT_RCU is not set
+CONFIG_TINY_RCU=y
# CONFIG_TREE_RCU_TRACE is not set
-# CONFIG_PREEMPT_RCU_TRACE is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_GROUP_SCHED is not set
@@ -62,7 +62,6 @@ CONFIG_ANON_INODES=y
CONFIG_EMBEDDED=y
CONFIG_SYSCTL_SYSCALL=y
# CONFIG_KALLSYMS is not set
-CONFIG_STRIP_ASM_SYMS=y
# CONFIG_HOTPLUG is not set
CONFIG_PRINTK=y
CONFIG_BUG=y
@@ -75,14 +74,22 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y
CONFIG_SHMEM=y
CONFIG_AIO=y
+
+#
+# Kernel Performance Events And Counters
+#
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_COMPAT_BRK=y
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
CONFIG_PROFILING=y
-# CONFIG_MARKERS is not set
CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+
+#
+# GCOV-based kernel profiling
+#
# CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y
@@ -90,6 +97,35 @@ CONFIG_RT_MUTEXES=y
CONFIG_BASE_SMALL=0
# CONFIG_MODULES is not set
# CONFIG_BLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
# CONFIG_FREEZER is not set
#
@@ -145,9 +181,8 @@ CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_ZONE_DMA_FLAG=0
CONFIG_NR_QUICK=1
CONFIG_VIRT_TO_BUS=y
-CONFIG_UNEVICTABLE_LRU=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
+# CONFIG_KSM is not set
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
#
# Power management options
@@ -202,6 +237,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_NETFILTER is not set
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
@@ -216,6 +252,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set
+# CONFIG_IEEE802154 is not set
# CONFIG_NET_SCHED is not set
# CONFIG_DCB is not set
@@ -341,7 +378,6 @@ CONFIG_MISC_DEVICES=y
# CONFIG_SCSI_DMA is not set
# CONFIG_SCSI_NETLINK is not set
CONFIG_NETDEVICES=y
-CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_DUMMY is not set
# CONFIG_BONDING is not set
# CONFIG_MACVLAN is not set
@@ -362,14 +398,11 @@ CONFIG_SMC91X=y
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_B44 is not set
+# CONFIG_KS8842 is not set
+# CONFIG_KS8851_MLL is not set
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
-
-#
-# Wireless LAN
-#
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+# CONFIG_WLAN is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -430,11 +463,15 @@ CONFIG_RTC=y
# CONFIG_TCG_TPM is not set
# CONFIG_I2C is not set
# CONFIG_SPI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
# CONFIG_THERMAL is not set
-# CONFIG_THERMAL_HWMON is not set
# CONFIG_WATCHDOG is not set
CONFIG_SSB_POSSIBLE=y
@@ -451,22 +488,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_HTC_PASIC3 is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_REGULATOR is not set
-
-#
-# Multimedia devices
-#
-
-#
-# Multimedia core support
-#
-# CONFIG_VIDEO_DEV is not set
-# CONFIG_DVB_CORE is not set
-# CONFIG_VIDEO_MEDIA is not set
-
-#
-# Multimedia drivers
-#
-# CONFIG_DAB is not set
+# CONFIG_MEDIA_SUPPORT is not set
#
# Graphics support
@@ -490,11 +512,17 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_DMADEVICES is not set
# CONFIG_AUXDISPLAY is not set
# CONFIG_UIO is not set
+
+#
+# TI VLYNQ
+#
# CONFIG_STAGING is not set
#
# File systems
#
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y
@@ -539,6 +567,7 @@ CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
# CONFIG_NFS_V4 is not set
CONFIG_ROOT_NFS=y
+# CONFIG_NFSD is not set
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_NFS_COMMON=y
@@ -561,13 +590,13 @@ CONFIG_ENABLE_WARN_DEPRECATED=y
CONFIG_ENABLE_MUST_CHECK=y
CONFIG_FRAME_WARN=1024
CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_SAMPLES is not set
@@ -577,7 +606,11 @@ CONFIG_MAGIC_SYSRQ=y
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
# CONFIG_BINARY_PRINTF is not set
diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h
index 0b610f482abb..f49ac49e09ad 100644
--- a/arch/mn10300/include/asm/bitops.h
+++ b/arch/mn10300/include/asm/bitops.h
@@ -165,7 +165,7 @@ static inline __attribute__((const))
unsigned long __ffs(unsigned long x)
{
int bit;
- asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(x & -x));
+ asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(x & -x) : "cc");
return bit;
}
@@ -177,7 +177,7 @@ static inline __attribute__((const))
int __ilog2_u32(u32 n)
{
int bit;
- asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(n));
+ asm("bsch %2,%0" : "=r"(bit) : "0"(0), "r"(n) : "cc");
return bit;
}
diff --git a/arch/mn10300/include/asm/div64.h b/arch/mn10300/include/asm/div64.h
index 3a8329b3e869..34dcb8e68309 100644
--- a/arch/mn10300/include/asm/div64.h
+++ b/arch/mn10300/include/asm/div64.h
@@ -72,6 +72,7 @@ unsigned __muldiv64u(unsigned val, unsigned mult, unsigned div)
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
+ : "cc"
);
return result;
@@ -92,6 +93,7 @@ signed __muldiv64s(signed val, signed mult, signed div)
* MDR = MDR:val%div */
: "=r"(result)
: "0"(val), "ir"(mult), "r"(div)
+ : "cc"
);
return result;
diff --git a/arch/mn10300/include/asm/system.h b/arch/mn10300/include/asm/system.h
index 8214fb7e7fe4..3636c054dcd5 100644
--- a/arch/mn10300/include/asm/system.h
+++ b/arch/mn10300/include/asm/system.h
@@ -143,6 +143,7 @@ do { \
" mov %0,epsw \n" \
: "=&d"(tmp) \
: "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw) \
+ : "cc" \
); \
} while (0)
diff --git a/arch/mn10300/include/asm/tlbflush.h b/arch/mn10300/include/asm/tlbflush.h
index e0239865abcb..1a7e29281c5d 100644
--- a/arch/mn10300/include/asm/tlbflush.h
+++ b/arch/mn10300/include/asm/tlbflush.h
@@ -22,7 +22,7 @@ do { \
" mov %0,%1 \n" \
: "=d"(w) \
: "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \
- : "memory" \
+ : "cc", "memory" \
); \
} while (0)
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 167e10ff06d9..197a7af3dd8a 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -316,7 +316,7 @@ do { \
" .previous\n" \
: "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
: "0"(__from), "1"(__to), "2"(size) \
- : "memory"); \
+ : "cc", "memory"); \
} \
} while (0)
@@ -352,7 +352,7 @@ do { \
" .previous\n" \
: "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\
: "0"(__from), "1"(__to), "2"(size) \
- : "memory"); \
+ : "cc", "memory"); \
} \
} while (0)
diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h
index 2a983931c11f..c05acb95c2a9 100644
--- a/arch/mn10300/include/asm/unistd.h
+++ b/arch/mn10300/include/asm/unistd.h
@@ -348,10 +348,11 @@
#define __NR_pwritev 335
#define __NR_rt_tgsigqueueinfo 336
#define __NR_perf_event_open 337
+#define __NR_recvmmsg 338
#ifdef __KERNEL__
-#define NR_syscalls 338
+#define NR_syscalls 339
/*
* specify the deprecated syscalls we want to support on this arch
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index c9ee6c009d79..88e3e1c3cc21 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -724,6 +724,7 @@ ENTRY(sys_call_table)
.long sys_pwritev /* 335 */
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
+ .long sys_recvmmsg
nr_syscalls=(.-sys_call_table)/4
diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c
index 229b710fc5d5..ef34d5a0f8bd 100644
--- a/arch/mn10300/kernel/mn10300-serial.c
+++ b/arch/mn10300/kernel/mn10300-serial.c
@@ -380,7 +380,8 @@ static int mask_test_and_clear(volatile u8 *ptr, u8 mask)
u32 epsw;
asm volatile(" bclr %1,(%2) \n"
" mov epsw,%0 \n"
- : "=d"(epsw) : "d"(mask), "a"(ptr));
+ : "=d"(epsw) : "d"(mask), "a"(ptr)
+ : "cc", "memory");
return !(epsw & EPSW_FLAG_Z);
}
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index a21f43bc68e2..717db14c2cc3 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -264,7 +264,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka,
/* this is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
- if (!on_sig_stack(sp))
+ if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
}
diff --git a/arch/mn10300/lib/checksum.c b/arch/mn10300/lib/checksum.c
index 274f29ec33c1..b6580f5d89ee 100644
--- a/arch/mn10300/lib/checksum.c
+++ b/arch/mn10300/lib/checksum.c
@@ -22,6 +22,7 @@ static inline unsigned short from32to16(__wsum sum)
" addc 0xffff,%0 \n"
: "=r" (sum)
: "r" (sum << 16), "0" (sum & 0xffff0000)
+ : "cc"
);
return sum >> 16;
}
diff --git a/arch/mn10300/lib/delay.c b/arch/mn10300/lib/delay.c
index cce66bc0822d..fdf6f710f94e 100644
--- a/arch/mn10300/lib/delay.c
+++ b/arch/mn10300/lib/delay.c
@@ -28,7 +28,8 @@ void __delay(unsigned long loops)
"2: add -1,%0 \n"
" bne 2b \n"
: "=&d" (d0)
- : "0" (loops));
+ : "0" (loops)
+ : "cc");
}
EXPORT_SYMBOL(__delay);
diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c
index a75b203059c1..7826e6c364e7 100644
--- a/arch/mn10300/lib/usercopy.c
+++ b/arch/mn10300/lib/usercopy.c
@@ -62,7 +62,7 @@ do { \
" .previous" \
:"=&r"(res), "=r"(count), "=&r"(w) \
:"i"(-EFAULT), "1"(count), "a"(src), "a"(dst) \
- :"memory"); \
+ : "memory", "cc"); \
} while (0)
long
@@ -109,7 +109,7 @@ do { \
".previous\n" \
: "+r"(size), "=&r"(w) \
: "a"(addr), "d"(0) \
- : "memory"); \
+ : "memory", "cc"); \
} while (0)
unsigned long
@@ -161,6 +161,6 @@ long strnlen_user(const char *s, long n)
".previous\n"
:"=d"(res), "=&r"(w)
:"0"(0), "a"(s), "r"(n)
- :"memory");
+ : "memory", "cc");
return res;
}
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
index f3649d8f50e3..ee82d624b3c6 100644
--- a/arch/mn10300/mm/dma-alloc.c
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -16,12 +16,24 @@
#include <linux/pci.h>
#include <asm/io.h>
+static unsigned long pci_sram_allocated = 0xbc000000;
+
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, int gfp)
{
unsigned long addr;
void *ret;
+ printk("dma_alloc_coherent(%s,%zu,,%x)\n", dev_name(dev), size, gfp);
+
+ if (0xbe000000 - pci_sram_allocated >= size) {
+ size = (size + 255) & ~255;
+ addr = pci_sram_allocated;
+ pci_sram_allocated += size;
+ ret = (void *) addr;
+ goto done;
+ }
+
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
@@ -41,7 +53,9 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
/* write back and evict all cache lines covering this region */
mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE);
+done:
*dma_handle = virt_to_bus((void *) addr);
+ printk("dma_alloc_coherent() = %p [%x]\n", ret, *dma_handle);
return ret;
}
EXPORT_SYMBOL(dma_alloc_coherent);
@@ -51,6 +65,9 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
{
unsigned long addr = (unsigned long) vaddr & ~0x20000000;
+ if (addr >= 0x9c000000)
+ return;
+
free_pages(addr, get_order(size));
}
EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index ec1420562dc7..dd27a9a35152 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -118,8 +118,7 @@ void __init mem_init(void)
reservedpages << (PAGE_SHIFT - 10),
datasize >> 10,
initsize >> 10,
- (unsigned long) (totalhigh_pages << (PAGE_SHIFT - 10))
- );
+ totalhigh_pages << (PAGE_SHIFT - 10));
}
/*
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
index 30016251f658..6dffbf97ac26 100644
--- a/arch/mn10300/mm/misalignment.c
+++ b/arch/mn10300/mm/misalignment.c
@@ -633,13 +633,13 @@ static int misalignment_addr(unsigned long *registers, unsigned long sp,
goto displace_or_inc;
case SD24:
tmp = disp << 8;
- asm("asr 8,%0" : "=r"(tmp) : "0"(tmp));
+ asm("asr 8,%0" : "=r"(tmp) : "0"(tmp) : "cc");
disp = (long) tmp;
goto displace_or_inc;
case SIMM4_2:
tmp = opcode >> 4 & 0x0f;
tmp <<= 28;
- asm("asr 28,%0" : "=r"(tmp) : "0"(tmp));
+ asm("asr 28,%0" : "=r"(tmp) : "0"(tmp) : "cc");
disp = (long) tmp;
goto displace_or_inc;
case IMM8:
diff --git a/arch/mn10300/unit-asb2305/include/unit/serial.h b/arch/mn10300/unit-asb2305/include/unit/serial.h
index 3bfc90938787..8086cc092cec 100644
--- a/arch/mn10300/unit-asb2305/include/unit/serial.h
+++ b/arch/mn10300/unit-asb2305/include/unit/serial.h
@@ -11,7 +11,7 @@
#ifndef _ASM_UNIT_SERIAL_H
#define _ASM_UNIT_SERIAL_H
-#include <asm/cpu/cpu-regs.h>
+#include <asm/cpu-regs.h>
#include <proc/irq.h>
#include <linux/serial_reg.h>
diff --git a/arch/mn10300/unit-asb2305/include/unit/timex.h b/arch/mn10300/unit-asb2305/include/unit/timex.h
index a71c49aa85eb..d1c72d59fa9f 100644
--- a/arch/mn10300/unit-asb2305/include/unit/timex.h
+++ b/arch/mn10300/unit-asb2305/include/unit/timex.h
@@ -15,7 +15,7 @@
#include <linux/irq.h>
#endif /* __ASSEMBLY__ */
-#include <asm/cpu/timer-regs.h>
+#include <asm/timer-regs.h>
#include <unit/clock.h>
/*
diff --git a/arch/mn10300/unit-asb2305/leds.c b/arch/mn10300/unit-asb2305/leds.c
index d345ff9042d5..6f8de9954026 100644
--- a/arch/mn10300/unit-asb2305/leds.c
+++ b/arch/mn10300/unit-asb2305/leds.c
@@ -13,8 +13,8 @@
#include <linux/init.h>
#include <asm/io.h>
#include <asm/processor.h>
-#include <asm/cpu/intctl-regs.h>
-#include <asm/cpu/rtc-regs.h>
+#include <asm/intctl-regs.h>
+#include <asm/rtc-regs.h>
#include <unit/leds.h>
static const u8 asb2305_led_hex_tbl[16] = {
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index d100ca788468..78cd134ddf7d 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -218,45 +218,6 @@ void __init pcibios_resource_survey(void)
pcibios_allocate_resources(1);
}
-int pcibios_enable_resources(struct pci_dev *dev, int mask)
-{
- u16 cmd, old_cmd;
- int idx;
- struct resource *r;
-
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
- old_cmd = cmd;
-
- for (idx = 0; idx < 6; idx++) {
- /* Only set up the requested stuff */
- if (!(mask & (1 << idx)))
- continue;
-
- r = &dev->resource[idx];
-
- if (!r->start && r->end) {
- printk(KERN_ERR
- "PCI: Device %s not available because of"
- " resource collisions\n",
- pci_name(dev));
- return -EINVAL;
- }
-
- if (r->flags & IORESOURCE_IO)
- cmd |= PCI_COMMAND_IO;
- if (r->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
-
- if (dev->resource[PCI_ROM_RESOURCE].start)
- cmd |= PCI_COMMAND_MEMORY;
-
- if (cmd != old_cmd)
- pci_write_config_word(dev, PCI_COMMAND, cmd);
-
- return 0;
-}
-
/*
* If we set up a device for bus mastering, we need to check the latency
* timer as certain crappy BIOSes forget to set it properly.
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.h b/arch/mn10300/unit-asb2305/pci-asb2305.h
index 9763d1ce343a..c3fa294b6e28 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.h
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.h
@@ -34,7 +34,6 @@ extern unsigned int pci_probe;
extern unsigned int pcibios_max_latency;
extern void pcibios_resource_survey(void);
-extern int pcibios_enable_resources(struct pci_dev *dev, int mask);
/* pci.c */
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 07dbbcda3b2e..2cb7e75ba1c0 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -27,6 +27,29 @@ struct pci_bus *pci_root_bus;
struct pci_ops *pci_root_ops;
/*
+ * The accessible PCI window does not cover the entire CPU address space, but
+ * there are devices we want to access outside of that window, so we need to
+ * insert specific PCI bus resources instead of using the platform-level bus
+ * resources directly for the PCI root bus.
+ *
+ * These are configured and inserted by pcibios_init() and are attached to the
+ * root bus by pcibios_fixup_bus().
+ */
+static struct resource pci_ioport_resource = {
+ .name = "PCI IO",
+ .start = 0xbe000000,
+ .end = 0xbe03ffff,
+ .flags = IORESOURCE_IO,
+};
+
+static struct resource pci_iomem_resource = {
+ .name = "PCI mem",
+ .start = 0xb8000000,
+ .end = 0xbbffffff,
+ .flags = IORESOURCE_MEM,
+};
+
+/*
* Functions for accessing PCI configuration space
*/
@@ -279,7 +302,7 @@ static int __init pci_sanity_check(struct pci_ops *o)
(x == PCI_VENDOR_ID_INTEL || x == PCI_VENDOR_ID_COMPAQ)))
return 1;
- printk(KERN_ERROR "PCI: Sanity check failed\n");
+ printk(KERN_ERR "PCI: Sanity check failed\n");
return 0;
}
@@ -297,6 +320,7 @@ static int __init pci_check_direct(void)
printk(KERN_INFO "PCI: Using configuration ampci\n");
request_mem_region(0xBE040000, 256, "AMPCI bridge");
request_mem_region(0xBFFFFFF4, 12, "PCI ampci");
+ request_mem_region(0xBC000000, 32 * 1024 * 1024, "PCI SRAM");
return 0;
}
@@ -358,6 +382,11 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
+ if (bus->number == 0) {
+ bus->resource[0] = &pci_ioport_resource;
+ bus->resource[1] = &pci_iomem_resource;
+ }
+
if (bus->self) {
pci_read_bridge_bases(bus);
pcibios_fixup_device_resources(bus->self);
@@ -380,6 +409,11 @@ static int __init pcibios_init(void)
iomem_resource.start = 0xA0000000;
iomem_resource.end = 0xDFFFFFFF;
+ if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0)
+ panic("Unable to insert PCI IOMEM resource\n");
+ if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0)
+ panic("Unable to insert PCI IOPORT resource\n");
+
if (!pci_probe)
return 0;
@@ -391,32 +425,11 @@ static int __init pcibios_init(void)
printk(KERN_INFO "PCI: Probing PCI hardware [mempage %08x]\n",
MEM_PAGING_REG);
- {
-#if 0
- static struct pci_bus am33_root_bus = {
- .children = LIST_HEAD_INIT(am33_root_bus.children),
- .devices = LIST_HEAD_INIT(am33_root_bus.devices),
- .number = 0,
- .secondary = 0,
- .resource = { &ioport_resource, &iomem_resource },
- };
-
- am33_root_bus.ops = pci_root_ops;
- list_add_tail(&am33_root_bus.node, &pci_root_buses);
-
- am33_root_bus.subordinate = pci_do_scan_bus(0);
-
- pci_root_bus = &am33_root_bus;
-#else
- pci_root_bus = pci_scan_bus(0, &pci_direct_ampci, NULL);
-#endif
- }
+ pci_root_bus = pci_scan_bus(0, &pci_direct_ampci, NULL);
pcibios_irq_init();
pcibios_fixup_irqs();
-#if 0
pcibios_resource_survey();
-#endif
return 0;
}
@@ -440,7 +453,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
{
int err;
- err = pcibios_enable_resources(dev, mask);
+ err = pci_enable_resources(dev, mask);
if (err == 0)
pcibios_enable_irq(dev);
return err;
@@ -455,6 +468,7 @@ static void __init unit_disable_pcnet(struct pci_bus *bus, struct pci_ops *o)
bus->number = 0;
+ o->read (bus, PCI_DEVFN(2, 0), PCI_VENDOR_ID, 4, &x);
o->read (bus, PCI_DEVFN(2, 0), PCI_COMMAND, 2, &x);
x |= PCI_COMMAND_MASTER |
PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
diff --git a/arch/mn10300/unit-asb2305/unit-init.c b/arch/mn10300/unit-asb2305/unit-init.c
index 1c452cc3f6e9..a76c8e0ab90f 100644
--- a/arch/mn10300/unit-asb2305/unit-init.c
+++ b/arch/mn10300/unit-asb2305/unit-init.c
@@ -15,9 +15,8 @@
#include <asm/io.h>
#include <asm/setup.h>
#include <asm/processor.h>
-#include <asm/cpu/intctl-regs.h>
-#include <asm/cpu/rtc-regs.h>
-#include <asm/cpu/serial-regs.h>
+#include <asm/intctl-regs.h>
+#include <asm/serial-regs.h>
#include <unit/serial.h>
/*
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index cda158318c62..1ce7d2851d90 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -811,8 +811,10 @@
#define __NR_pwritev (__NR_Linux + 316)
#define __NR_rt_tgsigqueueinfo (__NR_Linux + 317)
#define __NR_perf_event_open (__NR_Linux + 318)
+#define __NR_recvmmsg (__NR_Linux + 319)
+#define __NR_accept4 (__NR_Linux + 320)
-#define __NR_Linux_syscalls (__NR_perf_event_open + 1)
+#define __NR_Linux_syscalls (__NR_accept4 + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 01c4fcf8f481..de5f6dab48b7 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -417,6 +417,8 @@
ENTRY_COMP(pwritev)
ENTRY_COMP(rt_tgsigqueueinfo)
ENTRY_SAME(perf_event_open)
+ ENTRY_COMP(recvmmsg)
+ ENTRY_SAME(accept4) /* 320 */
/* Nothing yet */
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
index d172d4245cdc..f8c45cc2947d 100644
--- a/arch/parisc/lib/fixup.S
+++ b/arch/parisc/lib/fixup.S
@@ -36,8 +36,8 @@
#endif
/* t2 = &__per_cpu_offset[smp_processor_id()]; */
LDREGX \t2(\t1),\t2
- addil LT%per_cpu__exception_data,%r27
- LDREG RT%per_cpu__exception_data(%r1),\t1
+ addil LT%exception_data,%r27
+ LDREG RT%exception_data(%r1),\t1
/* t1 = &__get_cpu_var(exception_data) */
add,l \t1,\t2,\t1
/* t1 = t1->fault_ip */
@@ -46,8 +46,8 @@
#else
.macro get_fault_ip t1 t2
/* t1 = &__get_cpu_var(exception_data) */
- addil LT%per_cpu__exception_data,%r27
- LDREG RT%per_cpu__exception_data(%r1),\t2
+ addil LT%exception_data,%r27
+ LDREG RT%exception_data(%r1),\t2
/* t1 = t2->fault_ip */
LDREG EXCDATA_IP(\t2), \t1
.endm
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index 390512ae7f86..f4594ed09a20 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -43,6 +43,9 @@ gzip=.gz
# cross-compilation prefix
CROSS=
+# mkimage wrapper script
+MKIMAGE=$srctree/scripts/mkuboot.sh
+
# directory for object and other files used by this script
object=arch/powerpc/boot
objbin=$object
@@ -267,7 +270,7 @@ membase=`${CROSS}objdump -p "$kernel" | grep -m 1 LOAD | awk '{print $7}'`
case "$platform" in
uboot)
rm -f "$ofile"
- mkimage -A ppc -O linux -T kernel -C gzip -a $membase -e $membase \
+ ${MKIMAGE} -A ppc -O linux -T kernel -C gzip -a $membase -e $membase \
$uboot_version -d "$vmz" "$ofile"
if [ -z "$cacheit" ]; then
rm -f "$vmz"
@@ -327,7 +330,7 @@ coff)
;;
cuboot*)
gzip -f -9 "$ofile"
- mkimage -A ppc -O linux -T kernel -C gzip -a "$base" -e "$entry" \
+ ${MKIMAGE} -A ppc -O linux -T kernel -C gzip -a "$base" -e "$entry" \
$uboot_version -d "$ofile".gz "$ofile"
;;
treeboot*)
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index 0396ce7bffc6..ff9bdb28197d 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32-rc4
-# Thu Oct 15 10:33:22 2009
+# Linux kernel version: 2.6.33-rc2
+# Wed Dec 30 14:45:07 2009
#
# CONFIG_PPC64 is not set
@@ -36,6 +36,7 @@ CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
CONFIG_IRQ_PER_CPU=y
+CONFIG_NR_IRQS=512
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
@@ -58,6 +59,7 @@ CONFIG_AUDIT_ARCH=y
CONFIG_GENERIC_BUG=y
CONFIG_DTC=y
CONFIG_DEFAULT_UIMAGE=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
# CONFIG_PPC_DCR_NATIVE is not set
# CONFIG_PPC_DCR_MMIO is not set
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
@@ -85,6 +87,7 @@ CONFIG_SYSVIPC_SYSCTL=y
#
CONFIG_TREE_RCU=y
# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
@@ -166,14 +169,41 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
# CONFIG_FREEZER is not set
#
@@ -189,6 +219,7 @@ CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_MEDIA5200 is not set
# CONFIG_PPC_MPC5200_BUGFIX is not set
# CONFIG_PPC_MPC5200_GPIO is not set
+# CONFIG_PPC_MPC5200_LPBFIFO is not set
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_CELL is not set
# CONFIG_PPC_CELL_NATIVE is not set
@@ -243,6 +274,7 @@ CONFIG_ARCH_HAS_WALK_MEMORY=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
+CONFIG_SPARSE_IRQ=y
CONFIG_MAX_ACTIVE_REGIONS=32
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
@@ -259,8 +291,6 @@ CONFIG_MIGRATION=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_PPC_4K_PAGES=y
@@ -273,6 +303,7 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_EXTRA_TARGETS=""
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
+# CONFIG_HIBERNATION is not set
# CONFIG_PM_RUNTIME is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
@@ -378,7 +409,13 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_WIRELESS is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -489,6 +526,10 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_UB is not set
CONFIG_BLK_DEV_RAM=y
@@ -587,8 +628,8 @@ CONFIG_FEC_MPC52xx_MDIO=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
CONFIG_WLAN=y
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -643,6 +684,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=57600
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -688,7 +730,6 @@ CONFIG_I2C_MPC=y
#
# Miscellaneous I2C Chip support
#
-# CONFIG_DS1682 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -735,11 +776,13 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
+# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -797,7 +840,6 @@ CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
# CONFIG_USB_SL811_HCD is not set
# CONFIG_USB_R8A66597_HCD is not set
# CONFIG_USB_HWA_HCD is not set
-# CONFIG_USB_MUSB_HDRC is not set
#
# USB Device Class drivers
@@ -1137,6 +1179,7 @@ CONFIG_SCHED_DEBUG=y
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
@@ -1180,7 +1223,11 @@ CONFIG_PRINT_STACK_DEPTH=64
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index f5c07fd72239..7b3f4d0ed404 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32-rc4
-# Thu Oct 15 10:33:24 2009
+# Linux kernel version: 2.6.33-rc2
+# Wed Dec 30 14:45:09 2009
#
# CONFIG_PPC64 is not set
@@ -36,6 +36,7 @@ CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
CONFIG_IRQ_PER_CPU=y
+CONFIG_NR_IRQS=512
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
@@ -58,6 +59,7 @@ CONFIG_AUDIT_ARCH=y
CONFIG_GENERIC_BUG=y
CONFIG_DTC=y
CONFIG_DEFAULT_UIMAGE=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
# CONFIG_PPC_DCR_NATIVE is not set
# CONFIG_PPC_DCR_MMIO is not set
@@ -86,6 +88,7 @@ CONFIG_SYSVIPC_SYSCTL=y
#
CONFIG_TREE_RCU=y
# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
@@ -173,14 +176,41 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
CONFIG_FREEZER=y
#
@@ -196,6 +226,7 @@ CONFIG_PPC_LITE5200=y
# CONFIG_PPC_MEDIA5200 is not set
# CONFIG_PPC_MPC5200_BUGFIX is not set
# CONFIG_PPC_MPC5200_GPIO is not set
+# CONFIG_PPC_MPC5200_LPBFIFO is not set
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_CELL is not set
# CONFIG_PPC_CELL_NATIVE is not set
@@ -252,6 +283,7 @@ CONFIG_ARCH_HAS_WALK_MEMORY=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
+CONFIG_SPARSE_IRQ=y
CONFIG_MAX_ACTIVE_REGIONS=32
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
@@ -268,8 +300,6 @@ CONFIG_MIGRATION=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_PPC_4K_PAGES=y
@@ -285,6 +315,7 @@ CONFIG_PM=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_HIBERNATION is not set
# CONFIG_PM_RUNTIME is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
@@ -398,7 +429,13 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_WIRELESS is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -433,6 +470,10 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_SX8 is not set
CONFIG_BLK_DEV_RAM=y
@@ -443,6 +484,7 @@ CONFIG_BLK_DEV_RAM_SIZE=32768
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
# CONFIG_PHANTOM is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
@@ -450,6 +492,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
# CONFIG_ISL29003 is not set
+# CONFIG_DS1682 is not set
# CONFIG_C2PORT is not set
#
@@ -502,7 +545,9 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_BE2ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
@@ -541,6 +586,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_BFA_FC is not set
# CONFIG_SCSI_DH is not set
@@ -596,15 +642,16 @@ CONFIG_PATA_MPC52xx=y
# CONFIG_PATA_NS87415 is not set
# CONFIG_PATA_OPTI is not set
# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
-# CONFIG_PATA_PDC2027X is not set
# CONFIG_PATA_SIL680 is not set
# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
# CONFIG_PATA_VIA is not set
# CONFIG_PATA_WINBOND is not set
# CONFIG_PATA_PLATFORM is not set
@@ -726,8 +773,10 @@ CONFIG_CHELSIO_T3_DEPENDS=y
# CONFIG_BE2NET is not set
# CONFIG_TR is not set
CONFIG_WLAN=y
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+# CONFIG_AIRO is not set
+# CONFIG_ATMEL is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_HOSTAP is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -741,6 +790,7 @@ CONFIG_WLAN=y
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -778,6 +828,7 @@ CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -836,11 +887,6 @@ CONFIG_I2C_MPC=y
# CONFIG_I2C_TAOS_EVM is not set
#
-# Graphics adapter I2C/DDC channel drivers
-#
-# CONFIG_I2C_VOODOO3 is not set
-
-#
# Other I2C/SMBus bus drivers
#
# CONFIG_I2C_PCA_PLATFORM is not set
@@ -849,7 +895,6 @@ CONFIG_I2C_MPC=y
#
# Miscellaneous I2C Chip support
#
-# CONFIG_DS1682 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -884,11 +929,13 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
+# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -1129,6 +1176,7 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
@@ -1172,7 +1220,11 @@ CONFIG_PRINT_STACK_DEPTH=64
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 4f77a1bdc8f9..eaae2d469aa0 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32-rc4
-# Thu Oct 15 10:33:22 2009
+# Linux kernel version: 2.6.33-rc2
+# Wed Dec 30 14:45:08 2009
#
# CONFIG_PPC64 is not set
@@ -36,6 +36,7 @@ CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
CONFIG_IRQ_PER_CPU=y
+CONFIG_NR_IRQS=512
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
@@ -58,6 +59,7 @@ CONFIG_AUDIT_ARCH=y
CONFIG_GENERIC_BUG=y
CONFIG_DTC=y
CONFIG_DEFAULT_UIMAGE=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
# CONFIG_PPC_DCR_NATIVE is not set
# CONFIG_PPC_DCR_MMIO is not set
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
@@ -85,6 +87,7 @@ CONFIG_SYSVIPC_SYSCTL=y
#
CONFIG_TREE_RCU=y
# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
@@ -166,14 +169,41 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
# CONFIG_FREEZER is not set
#
@@ -189,6 +219,7 @@ CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_MEDIA5200 is not set
# CONFIG_PPC_MPC5200_BUGFIX is not set
# CONFIG_PPC_MPC5200_GPIO is not set
+# CONFIG_PPC_MPC5200_LPBFIFO is not set
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_CELL is not set
# CONFIG_PPC_CELL_NATIVE is not set
@@ -244,6 +275,7 @@ CONFIG_ARCH_HAS_WALK_MEMORY=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
+CONFIG_SPARSE_IRQ=y
CONFIG_MAX_ACTIVE_REGIONS=32
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
@@ -260,8 +292,6 @@ CONFIG_MIGRATION=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_PPC_4K_PAGES=y
@@ -274,6 +304,7 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_EXTRA_TARGETS=""
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
+# CONFIG_HIBERNATION is not set
# CONFIG_PM_RUNTIME is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
@@ -379,7 +410,13 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_WIRELESS is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -490,6 +527,10 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
# CONFIG_BLK_DEV_NBD is not set
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
@@ -499,9 +540,11 @@ CONFIG_BLK_DEV_RAM_SIZE=32768
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
# CONFIG_ICS932S401 is not set
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_ISL29003 is not set
+# CONFIG_DS1682 is not set
# CONFIG_C2PORT is not set
#
@@ -610,8 +653,7 @@ CONFIG_FEC_MPC52xx_MDIO=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
CONFIG_WLAN=y
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+# CONFIG_HOSTAP is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -657,6 +699,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -700,7 +743,6 @@ CONFIG_I2C_MPC=y
#
# Miscellaneous I2C Chip support
#
-# CONFIG_DS1682 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -745,6 +787,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_GL520SM is not set
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM73 is not set
# CONFIG_SENSORS_LM75 is not set
# CONFIG_SENSORS_LM77 is not set
# CONFIG_SENSORS_LM78 is not set
@@ -805,11 +848,13 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
+# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -881,6 +926,7 @@ CONFIG_RTC_DRV_DS1307=y
# CONFIG_RTC_DRV_PCF8563 is not set
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
@@ -902,7 +948,9 @@ CONFIG_RTC_DRV_DS1307=y
# CONFIG_RTC_DRV_M48T86 is not set
# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
@@ -1172,6 +1220,7 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
@@ -1215,7 +1264,11 @@ CONFIG_PRINT_STACK_DEPTH=64
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index f9168c1a2fa5..1742c0200b75 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32-rc4
-# Thu Oct 15 10:33:25 2009
+# Linux kernel version: 2.6.33-rc2
+# Wed Dec 30 14:45:10 2009
#
# CONFIG_PPC64 is not set
@@ -36,6 +36,7 @@ CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
CONFIG_IRQ_PER_CPU=y
+CONFIG_NR_IRQS=512
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
@@ -58,6 +59,7 @@ CONFIG_AUDIT_ARCH=y
CONFIG_GENERIC_BUG=y
CONFIG_DTC=y
CONFIG_DEFAULT_UIMAGE=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
# CONFIG_PPC_DCR_NATIVE is not set
# CONFIG_PPC_DCR_MMIO is not set
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
@@ -87,6 +89,7 @@ CONFIG_POSIX_MQUEUE_SYSCTL=y
#
CONFIG_TREE_RCU=y
# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
@@ -172,14 +175,41 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
-# CONFIG_DEFAULT_AS is not set
# CONFIG_DEFAULT_DEADLINE is not set
# CONFIG_DEFAULT_CFQ is not set
CONFIG_DEFAULT_NOOP=y
CONFIG_DEFAULT_IOSCHED="noop"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_SPIN_UNLOCK is not set
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_READ_UNLOCK is not set
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQ is not set
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+# CONFIG_INLINE_WRITE_UNLOCK is not set
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
# CONFIG_FREEZER is not set
#
@@ -195,6 +225,7 @@ CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_MEDIA5200 is not set
# CONFIG_PPC_MPC5200_BUGFIX is not set
# CONFIG_PPC_MPC5200_GPIO is not set
+# CONFIG_PPC_MPC5200_LPBFIFO is not set
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_CELL is not set
# CONFIG_PPC_CELL_NATIVE is not set
@@ -251,6 +282,7 @@ CONFIG_ARCH_HAS_WALK_MEMORY=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
+CONFIG_SPARSE_IRQ=y
CONFIG_MAX_ACTIVE_REGIONS=32
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
@@ -267,8 +299,6 @@ CONFIG_MIGRATION=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_PPC_4K_PAGES=y
@@ -385,7 +415,13 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_WIRELESS is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -585,15 +621,16 @@ CONFIG_PATA_MPC52xx=m
# CONFIG_PATA_NS87415 is not set
# CONFIG_PATA_OPTI is not set
# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
-# CONFIG_PATA_PDC2027X is not set
# CONFIG_PATA_SIL680 is not set
# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
# CONFIG_PATA_VIA is not set
# CONFIG_PATA_WINBOND is not set
# CONFIG_PATA_PLATFORM is not set
@@ -673,8 +710,11 @@ CONFIG_FEC_MPC52xx_MDIO=y
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
CONFIG_WLAN=y
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+# CONFIG_AIRO is not set
+# CONFIG_ATMEL is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -697,6 +737,7 @@ CONFIG_WLAN=y
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -734,6 +775,7 @@ CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
# CONFIG_LEGACY_PTYS is not set
@@ -791,11 +833,6 @@ CONFIG_I2C_MPC=y
# CONFIG_I2C_TINY_USB is not set
#
-# Graphics adapter I2C/DDC channel drivers
-#
-# CONFIG_I2C_VOODOO3 is not set
-
-#
# Other I2C/SMBus bus drivers
#
# CONFIG_I2C_PCA_PLATFORM is not set
@@ -804,7 +841,6 @@ CONFIG_I2C_MPC=y
#
# Miscellaneous I2C Chip support
#
-# CONFIG_DS1682 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -839,11 +875,13 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
+# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -908,7 +946,6 @@ CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
# CONFIG_USB_R8A66597_HCD is not set
# CONFIG_USB_WHCI_HCD is not set
# CONFIG_USB_HWA_HCD is not set
-# CONFIG_USB_MUSB_HDRC is not set
#
# USB Device Class drivers
@@ -1011,6 +1048,7 @@ CONFIG_RTC_INTF_DEV=y
CONFIG_RTC_DRV_PCF8563=m
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
@@ -1032,7 +1070,9 @@ CONFIG_RTC_DRV_PCF8563=m
# CONFIG_RTC_DRV_M48T86 is not set
# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
@@ -1243,10 +1283,11 @@ CONFIG_FRAME_WARN=1024
# CONFIG_DEBUG_FS is not set
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
-# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_BUGVERBOSE=y
# CONFIG_DEBUG_MEMORY_INIT is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
@@ -1269,7 +1310,11 @@ CONFIG_PRINT_STACK_DEPTH=64
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
# CONFIG_CRYPTO is not set
CONFIG_PPC_CLOCK=y
CONFIG_PPC_LIB_RHEAP=y
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 75c835c2ae66..3972438db719 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32-rc4
-# Thu Oct 15 10:33:23 2009
+# Linux kernel version: 2.6.33-rc2
+# Wed Dec 30 14:45:09 2009
#
# CONFIG_PPC64 is not set
@@ -36,6 +36,7 @@ CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
CONFIG_IRQ_PER_CPU=y
+CONFIG_NR_IRQS=512
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
@@ -58,6 +59,7 @@ CONFIG_AUDIT_ARCH=y
CONFIG_GENERIC_BUG=y
CONFIG_DTC=y
CONFIG_DEFAULT_UIMAGE=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
# CONFIG_PPC_DCR_NATIVE is not set
# CONFIG_PPC_DCR_MMIO is not set
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
@@ -85,6 +87,7 @@ CONFIG_SYSVIPC_SYSCTL=y
#
CONFIG_TREE_RCU=y
# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
@@ -171,14 +174,41 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
# CONFIG_FREEZER is not set
#
@@ -194,6 +224,7 @@ CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_MEDIA5200 is not set
CONFIG_PPC_MPC5200_BUGFIX=y
# CONFIG_PPC_MPC5200_GPIO is not set
+# CONFIG_PPC_MPC5200_LPBFIFO is not set
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_CELL is not set
# CONFIG_PPC_CELL_NATIVE is not set
@@ -249,6 +280,7 @@ CONFIG_ARCH_HAS_WALK_MEMORY=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
+CONFIG_SPARSE_IRQ=y
CONFIG_MAX_ACTIVE_REGIONS=32
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
@@ -265,8 +297,6 @@ CONFIG_MIGRATION=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_PPC_4K_PAGES=y
@@ -279,6 +309,7 @@ CONFIG_PROC_DEVICETREE=y
CONFIG_EXTRA_TARGETS=""
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
+# CONFIG_HIBERNATION is not set
# CONFIG_PM_RUNTIME is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
@@ -384,7 +415,13 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_WIRELESS is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -496,6 +533,10 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_UB is not set
CONFIG_BLK_DEV_RAM=y
@@ -607,8 +648,8 @@ CONFIG_FEC_MPC52xx_MDIO=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
CONFIG_WLAN=y
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -663,6 +704,7 @@ CONFIG_SERIAL_CORE_CONSOLE=y
CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -708,7 +750,6 @@ CONFIG_I2C_MPC=y
#
# Miscellaneous I2C Chip support
#
-# CONFIG_DS1682 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -753,6 +794,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_GL520SM is not set
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM73 is not set
# CONFIG_SENSORS_LM75 is not set
# CONFIG_SENSORS_LM77 is not set
# CONFIG_SENSORS_LM78 is not set
@@ -818,11 +860,13 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
+# CONFIG_MFD_88PM8607 is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -880,7 +924,6 @@ CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
# CONFIG_USB_SL811_HCD is not set
# CONFIG_USB_R8A66597_HCD is not set
# CONFIG_USB_HWA_HCD is not set
-# CONFIG_USB_MUSB_HDRC is not set
#
# USB Device Class drivers
@@ -984,6 +1027,7 @@ CONFIG_RTC_DRV_DS1307=y
# CONFIG_RTC_DRV_PCF8563 is not set
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
@@ -1005,7 +1049,9 @@ CONFIG_RTC_DRV_DS1307=y
# CONFIG_RTC_DRV_M48T86 is not set
# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
@@ -1275,6 +1321,7 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
@@ -1318,7 +1365,11 @@ CONFIG_PRINT_STACK_DEPTH=64
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 523d5fe18c0e..61cf73d0000f 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.32-rc4
-# Thu Oct 15 10:33:21 2009
+# Linux kernel version: 2.6.33-rc2
+# Wed Dec 30 15:08:52 2009
#
# CONFIG_PPC64 is not set
@@ -36,6 +36,7 @@ CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
# CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK is not set
CONFIG_IRQ_PER_CPU=y
+CONFIG_NR_IRQS=512
CONFIG_STACKTRACE_SUPPORT=y
CONFIG_HAVE_LATENCYTOP_SUPPORT=y
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
@@ -59,6 +60,7 @@ CONFIG_AUDIT_ARCH=y
CONFIG_GENERIC_BUG=y
CONFIG_DTC=y
CONFIG_DEFAULT_UIMAGE=y
+CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y
# CONFIG_PPC_DCR_NATIVE is not set
# CONFIG_PPC_DCR_MMIO is not set
@@ -87,6 +89,7 @@ CONFIG_SYSVIPC_SYSCTL=y
#
CONFIG_TREE_RCU=y
# CONFIG_TREE_PREEMPT_RCU is not set
+# CONFIG_TINY_RCU is not set
# CONFIG_RCU_TRACE is not set
CONFIG_RCU_FANOUT=32
# CONFIG_RCU_FANOUT_EXACT is not set
@@ -170,14 +173,41 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
-# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_INLINE_SPIN_TRYLOCK is not set
+# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK is not set
+# CONFIG_INLINE_SPIN_LOCK_BH is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQ is not set
+# CONFIG_INLINE_SPIN_LOCK_IRQSAVE is not set
+CONFIG_INLINE_SPIN_UNLOCK=y
+# CONFIG_INLINE_SPIN_UNLOCK_BH is not set
+CONFIG_INLINE_SPIN_UNLOCK_IRQ=y
+# CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_READ_TRYLOCK is not set
+# CONFIG_INLINE_READ_LOCK is not set
+# CONFIG_INLINE_READ_LOCK_BH is not set
+# CONFIG_INLINE_READ_LOCK_IRQ is not set
+# CONFIG_INLINE_READ_LOCK_IRQSAVE is not set
+CONFIG_INLINE_READ_UNLOCK=y
+# CONFIG_INLINE_READ_UNLOCK_BH is not set
+CONFIG_INLINE_READ_UNLOCK_IRQ=y
+# CONFIG_INLINE_READ_UNLOCK_IRQRESTORE is not set
+# CONFIG_INLINE_WRITE_TRYLOCK is not set
+# CONFIG_INLINE_WRITE_LOCK is not set
+# CONFIG_INLINE_WRITE_LOCK_BH is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQ is not set
+# CONFIG_INLINE_WRITE_LOCK_IRQSAVE is not set
+CONFIG_INLINE_WRITE_UNLOCK=y
+# CONFIG_INLINE_WRITE_UNLOCK_BH is not set
+CONFIG_INLINE_WRITE_UNLOCK_IRQ=y
+# CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE is not set
+# CONFIG_MUTEX_SPIN_ON_OWNER is not set
CONFIG_FREEZER=y
#
@@ -193,6 +223,7 @@ CONFIG_PPC_LITE5200=y
CONFIG_PPC_MEDIA5200=y
CONFIG_PPC_MPC5200_BUGFIX=y
CONFIG_PPC_MPC5200_GPIO=y
+CONFIG_PPC_MPC5200_LPBFIFO=m
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_CELL is not set
# CONFIG_PPC_CELL_NATIVE is not set
@@ -211,6 +242,7 @@ CONFIG_PPC_OF_BOOT_TRAMPOLINE=y
# CONFIG_PPC_I8259 is not set
CONFIG_PPC_RTAS=y
# CONFIG_RTAS_ERROR_LOGGING is not set
+# CONFIG_PPC_RTAS_DAEMON is not set
CONFIG_RTAS_PROC=y
# CONFIG_MMIO_NVRAM is not set
# CONFIG_PPC_MPC106 is not set
@@ -223,6 +255,7 @@ CONFIG_RTAS_PROC=y
CONFIG_PPC_BESTCOMM=y
CONFIG_PPC_BESTCOMM_ATA=y
CONFIG_PPC_BESTCOMM_FEC=y
+CONFIG_PPC_BESTCOMM_GEN_BD=m
CONFIG_SIMPLE_GPIO=y
#
@@ -253,6 +286,7 @@ CONFIG_ARCH_HAS_WALK_MEMORY=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
# CONFIG_KEXEC is not set
# CONFIG_CRASH_DUMP is not set
+CONFIG_SPARSE_IRQ=y
CONFIG_MAX_ACTIVE_REGIONS=32
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_POPULATES_NODE_MAP=y
@@ -269,8 +303,6 @@ CONFIG_MIGRATION=y
CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
-CONFIG_HAVE_MLOCK=y
-CONFIG_HAVE_MLOCKED_PAGE_BIT=y
# CONFIG_KSM is not set
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_PPC_4K_PAGES=y
@@ -286,6 +318,7 @@ CONFIG_PM=y
CONFIG_PM_SLEEP=y
CONFIG_SUSPEND=y
CONFIG_SUSPEND_FREEZER=y
+# CONFIG_HIBERNATION is not set
# CONFIG_PM_RUNTIME is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
@@ -399,7 +432,13 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
-# CONFIG_WIRELESS is not set
+CONFIG_WIRELESS=y
+# CONFIG_CFG80211 is not set
+# CONFIG_LIB80211 is not set
+
+#
+# CFG80211 needs to be enabled for MAC80211
+#
# CONFIG_WIMAX is not set
# CONFIG_RFKILL is not set
# CONFIG_NET_9P is not set
@@ -530,6 +569,10 @@ CONFIG_BLK_DEV=y
# CONFIG_BLK_DEV_COW_COMMON is not set
CONFIG_BLK_DEV_LOOP=y
# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+
+#
+# DRBD disabled because PROC_FS, INET or CONNECTOR not selected
+#
# CONFIG_BLK_DEV_NBD is not set
# CONFIG_BLK_DEV_SX8 is not set
# CONFIG_BLK_DEV_UB is not set
@@ -541,6 +584,7 @@ CONFIG_BLK_DEV_RAM_SIZE=32768
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_BLK_DEV_HD is not set
CONFIG_MISC_DEVICES=y
+# CONFIG_AD525X_DPOT is not set
# CONFIG_PHANTOM is not set
# CONFIG_SGI_IOC4 is not set
# CONFIG_TIFM_CORE is not set
@@ -548,6 +592,8 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
# CONFIG_ISL29003 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_TI_DAC7512 is not set
# CONFIG_C2PORT is not set
#
@@ -600,7 +646,9 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_BNX2_ISCSI is not set
# CONFIG_BE2ISCSI is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_HPSA is not set
# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_3W_SAS is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_AACRAID is not set
# CONFIG_SCSI_AIC7XXX is not set
@@ -639,6 +687,7 @@ CONFIG_SCSI_LOWLEVEL=y
# CONFIG_SCSI_NSP32 is not set
# CONFIG_SCSI_DEBUG is not set
# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
# CONFIG_SCSI_SRP is not set
# CONFIG_SCSI_BFA_FC is not set
# CONFIG_SCSI_DH is not set
@@ -694,15 +743,16 @@ CONFIG_PATA_MPC52xx=y
# CONFIG_PATA_NS87415 is not set
# CONFIG_PATA_OPTI is not set
# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC2027X is not set
# CONFIG_PATA_PDC_OLD is not set
# CONFIG_PATA_RADISYS is not set
# CONFIG_PATA_RDC is not set
# CONFIG_PATA_RZ1000 is not set
# CONFIG_PATA_SC1200 is not set
# CONFIG_PATA_SERVERWORKS is not set
-# CONFIG_PATA_PDC2027X is not set
# CONFIG_PATA_SIL680 is not set
# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_TOSHIBA is not set
# CONFIG_PATA_VIA is not set
# CONFIG_PATA_WINBOND is not set
CONFIG_PATA_PLATFORM=y
@@ -785,8 +835,11 @@ CONFIG_FEC_MPC52xx_MDIO=y
# CONFIG_NETDEV_10000 is not set
# CONFIG_TR is not set
CONFIG_WLAN=y
-# CONFIG_WLAN_PRE80211 is not set
-# CONFIG_WLAN_80211 is not set
+# CONFIG_AIRO is not set
+# CONFIG_ATMEL is not set
+# CONFIG_PRISM54 is not set
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_HOSTAP is not set
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -809,6 +862,7 @@ CONFIG_WLAN=y
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_VMXNET3 is not set
# CONFIG_ISDN is not set
# CONFIG_PHONE is not set
@@ -818,6 +872,7 @@ CONFIG_WLAN=y
CONFIG_INPUT=y
# CONFIG_INPUT_FF_MEMLESS is not set
# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
#
# Userland interfaces
@@ -874,6 +929,7 @@ CONFIG_SERIAL_MPC52xx=y
CONFIG_SERIAL_MPC52xx_CONSOLE=y
CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
CONFIG_LEGACY_PTYS=y
@@ -934,11 +990,6 @@ CONFIG_I2C_MPC=y
# CONFIG_I2C_TINY_USB is not set
#
-# Graphics adapter I2C/DDC channel drivers
-#
-# CONFIG_I2C_VOODOO3 is not set
-
-#
# Other I2C/SMBus bus drivers
#
# CONFIG_I2C_PCA_PLATFORM is not set
@@ -947,7 +998,6 @@ CONFIG_I2C_MPC=y
#
# Miscellaneous I2C Chip support
#
-# CONFIG_DS1682 is not set
# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
@@ -962,7 +1012,10 @@ CONFIG_SPI_MASTER=y
#
# CONFIG_SPI_BITBANG is not set
# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_MPC52xx is not set
CONFIG_SPI_MPC52xx_PSC=m
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_DESIGNWARE is not set
#
# SPI Protocol Masters
@@ -995,6 +1048,7 @@ CONFIG_GPIOLIB=y
#
# PCI GPIO expanders:
#
+# CONFIG_GPIO_CS5535 is not set
# CONFIG_GPIO_BT8XX is not set
# CONFIG_GPIO_LANGWELL is not set
@@ -1042,6 +1096,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_IT87 is not set
# CONFIG_SENSORS_LM63 is not set
# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
# CONFIG_SENSORS_LM75 is not set
# CONFIG_SENSORS_LM77 is not set
# CONFIG_SENSORS_LM78 is not set
@@ -1083,6 +1138,7 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_W83627HF is not set
# CONFIG_SENSORS_W83627EHF is not set
# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
# CONFIG_THERMAL is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
@@ -1122,6 +1178,7 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
+# CONFIG_PMIC_ADP5520 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
@@ -1129,6 +1186,8 @@ CONFIG_SSB_POSSIBLE=y
# CONFIG_MFD_MC13783 is not set
# CONFIG_AB3100_CORE is not set
# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_88PM8607 is not set
+# CONFIG_AB4500_CORE is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -1322,7 +1381,6 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
# CONFIG_USB_R8A66597_HCD is not set
# CONFIG_USB_WHCI_HCD is not set
# CONFIG_USB_HWA_HCD is not set
-# CONFIG_USB_MUSB_HDRC is not set
#
# USB Device Class drivers
@@ -1440,6 +1498,7 @@ CONFIG_RTC_DRV_DS1307=y
# CONFIG_RTC_DRV_PCF8563 is not set
# CONFIG_RTC_DRV_PCF8583 is not set
# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_BQ32K is not set
# CONFIG_RTC_DRV_S35390A is not set
# CONFIG_RTC_DRV_FM3130 is not set
# CONFIG_RTC_DRV_RX8581 is not set
@@ -1469,7 +1528,9 @@ CONFIG_RTC_DRV_DS1307=y
# CONFIG_RTC_DRV_M48T86 is not set
# CONFIG_RTC_DRV_M48T35 is not set
# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
# CONFIG_RTC_DRV_V3020 is not set
#
@@ -1731,6 +1792,7 @@ CONFIG_DEBUG_INFO=y
# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
# CONFIG_FAULT_INJECTION is not set
# CONFIG_LATENCYTOP is not set
+# CONFIG_SYSCTL_SYSCALL_CHECK is not set
# CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
@@ -1774,7 +1836,11 @@ CONFIG_PRINT_STACK_DEPTH=64
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
# CONFIG_SECURITYFS is not set
-# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_DEFAULT_SECURITY_SELINUX is not set
+# CONFIG_DEFAULT_SECURITY_SMACK is not set
+# CONFIG_DEFAULT_SECURITY_TOMOYO is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
CONFIG_CRYPTO=y
#
diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
index 916369575c97..bca8fdcd2542 100644
--- a/arch/powerpc/include/asm/kmap_types.h
+++ b/arch/powerpc/include/asm/kmap_types.h
@@ -26,6 +26,7 @@ enum km_type {
KM_SOFTIRQ1,
KM_PPC_SYNC_PAGE,
KM_PPC_SYNC_ICACHE,
+ KM_KDB,
KM_TYPE_NR
};
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index af2abe74f544..aadf2dd6f84e 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -97,4 +97,10 @@
#define RESUME_HOST RESUME_FLAG_HOST
#define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV)
+#define KVM_GUEST_MODE_NONE 0
+#define KVM_GUEST_MODE_GUEST 1
+#define KVM_GUEST_MODE_SKIP 2
+
+#define KVM_INST_FETCH_FAILED -1
+
#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 74b7369770d0..c7db69f1e779 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s_64_asm.h>
struct kvmppc_slb {
u64 esid;
@@ -33,7 +34,8 @@ struct kvmppc_slb {
bool Ks;
bool Kp;
bool nx;
- bool large;
+ bool large; /* PTEs are 16MB */
+ bool tb; /* 1TB segment */
bool class;
};
@@ -69,6 +71,7 @@ struct kvmppc_sid_map {
struct kvmppc_vcpu_book3s {
struct kvm_vcpu vcpu;
+ struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
struct kvmppc_sid_map sid_map[SID_MAP_NUM];
struct kvmppc_slb slb[64];
struct {
@@ -89,6 +92,7 @@ struct kvmppc_vcpu_book3s {
u64 vsid_next;
u64 vsid_max;
int context_id;
+ ulong prog_flags; /* flags to inject when giving a 700 trap */
};
#define CONTEXT_HOST 0
@@ -119,6 +123,7 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
extern u32 kvmppc_trampoline_lowmem;
extern u32 kvmppc_trampoline_enter;
+extern void kvmppc_rmcall(ulong srr0, ulong srr1);
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
index 2e06ee8184ef..183461b48407 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
@@ -20,6 +20,8 @@
#ifndef __ASM_KVM_BOOK3S_ASM_H__
#define __ASM_KVM_BOOK3S_ASM_H__
+#ifdef __ASSEMBLY__
+
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
#include <asm/kvm_asm.h>
@@ -55,4 +57,20 @@ kvmppc_resume_\intno:
#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
+#else /*__ASSEMBLY__ */
+
+struct kvmppc_book3s_shadow_vcpu {
+ ulong gpr[14];
+ u32 cr;
+ u32 xer;
+ ulong host_r1;
+ ulong host_r2;
+ ulong handler;
+ ulong scratch0;
+ ulong scratch1;
+ ulong vmhandler;
+};
+
+#endif /*__ASSEMBLY__ */
+
#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1201f62d0d73..f7215e622dfd 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -167,6 +167,7 @@ struct kvm_vcpu_arch {
ulong trampoline_lowmem;
ulong trampoline_enter;
ulong highmem_handler;
+ ulong rmcall;
ulong host_paca_phys;
struct kvmppc_mmu mmu;
#endif
@@ -175,10 +176,13 @@ struct kvm_vcpu_arch {
ulong gpr[32];
ulong pc;
- u32 cr;
ulong ctr;
ulong lr;
+
+#ifdef CONFIG_BOOKE
ulong xer;
+ u32 cr;
+#endif
ulong msr;
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 269ee46ab028..09816da9e950 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -80,8 +80,9 @@ extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu);
extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
-extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
+extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq);
@@ -95,4 +96,78 @@ extern void kvmppc_booke_exit(void);
extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
+#ifdef CONFIG_PPC_BOOK3S
+
+/* We assume we're always acting on the current vcpu */
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ if ( num < 14 )
+ get_paca()->shadow_vcpu.gpr[num] = val;
+ else
+ vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ if ( num < 14 )
+ return get_paca()->shadow_vcpu.gpr[num];
+ else
+ return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ get_paca()->shadow_vcpu.cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return get_paca()->shadow_vcpu.cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+ get_paca()->shadow_vcpu.xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return get_paca()->shadow_vcpu.xer;
+}
+
+#else
+
+static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
+{
+ vcpu->arch.gpr[num] = val;
+}
+
+static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
+{
+ return vcpu->arch.gpr[num];
+}
+
+static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.cr = val;
+}
+
+static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.cr;
+}
+
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+{
+ vcpu->arch.xer = val;
+}
+
+static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.xer;
+}
+
+#endif
+
#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h
index 84b457a3c1bc..227753d288f6 100644
--- a/arch/powerpc/include/asm/local.h
+++ b/arch/powerpc/include/asm/local.h
@@ -172,29 +172,4 @@ static __inline__ long local_dec_if_positive(local_t *l)
#define __local_add(i,l) ((l)->a.counter+=(i))
#define __local_sub(i,l) ((l)->a.counter-=(i))
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
- ({ local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; })
-#define cpu_local_wrap(l) \
- ({ preempt_disable(); \
- l; \
- preempt_enable(); }) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var(l)))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var(l), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var(l)))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var(l)))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var(l)))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var(l)))
-
-#define __cpu_local_inc(l) cpu_local_inc(l)
-#define __cpu_local_dec(l) cpu_local_dec(l)
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ARCH_POWERPC_LOCAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 5e9b4ef71415..d8a693109c82 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -19,6 +19,9 @@
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/exception-64e.h>
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+#include <asm/kvm_book3s_64_asm.h>
+#endif
register struct paca_struct *local_paca asm("r13");
@@ -135,6 +138,8 @@ struct paca_struct {
u64 esid;
u64 vsid;
} kvm_slb[64]; /* guest SLB */
+ /* We use this to store guest state in */
+ struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
u8 kvm_slb_max; /* highest used guest slb entry */
u8 kvm_in_guest; /* are we inside the guest? */
#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index bc8dd53f718a..5572e86223f4 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -426,6 +426,10 @@
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
#define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */
+#define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */
+#define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */
+#define SRR1_PROGTRAP 0x00020000 /* Trap */
+#define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index a6c2b63227b3..ee9935442f0e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -194,6 +194,30 @@ int main(void)
DEFINE(PACA_KVM_IN_GUEST, offsetof(struct paca_struct, kvm_in_guest));
DEFINE(PACA_KVM_SLB, offsetof(struct paca_struct, kvm_slb));
DEFINE(PACA_KVM_SLB_MAX, offsetof(struct paca_struct, kvm_slb_max));
+ DEFINE(PACA_KVM_CR, offsetof(struct paca_struct, shadow_vcpu.cr));
+ DEFINE(PACA_KVM_XER, offsetof(struct paca_struct, shadow_vcpu.xer));
+ DEFINE(PACA_KVM_R0, offsetof(struct paca_struct, shadow_vcpu.gpr[0]));
+ DEFINE(PACA_KVM_R1, offsetof(struct paca_struct, shadow_vcpu.gpr[1]));
+ DEFINE(PACA_KVM_R2, offsetof(struct paca_struct, shadow_vcpu.gpr[2]));
+ DEFINE(PACA_KVM_R3, offsetof(struct paca_struct, shadow_vcpu.gpr[3]));
+ DEFINE(PACA_KVM_R4, offsetof(struct paca_struct, shadow_vcpu.gpr[4]));
+ DEFINE(PACA_KVM_R5, offsetof(struct paca_struct, shadow_vcpu.gpr[5]));
+ DEFINE(PACA_KVM_R6, offsetof(struct paca_struct, shadow_vcpu.gpr[6]));
+ DEFINE(PACA_KVM_R7, offsetof(struct paca_struct, shadow_vcpu.gpr[7]));
+ DEFINE(PACA_KVM_R8, offsetof(struct paca_struct, shadow_vcpu.gpr[8]));
+ DEFINE(PACA_KVM_R9, offsetof(struct paca_struct, shadow_vcpu.gpr[9]));
+ DEFINE(PACA_KVM_R10, offsetof(struct paca_struct, shadow_vcpu.gpr[10]));
+ DEFINE(PACA_KVM_R11, offsetof(struct paca_struct, shadow_vcpu.gpr[11]));
+ DEFINE(PACA_KVM_R12, offsetof(struct paca_struct, shadow_vcpu.gpr[12]));
+ DEFINE(PACA_KVM_R13, offsetof(struct paca_struct, shadow_vcpu.gpr[13]));
+ DEFINE(PACA_KVM_HOST_R1, offsetof(struct paca_struct, shadow_vcpu.host_r1));
+ DEFINE(PACA_KVM_HOST_R2, offsetof(struct paca_struct, shadow_vcpu.host_r2));
+ DEFINE(PACA_KVM_VMHANDLER, offsetof(struct paca_struct,
+ shadow_vcpu.vmhandler));
+ DEFINE(PACA_KVM_SCRATCH0, offsetof(struct paca_struct,
+ shadow_vcpu.scratch0));
+ DEFINE(PACA_KVM_SCRATCH1, offsetof(struct paca_struct,
+ shadow_vcpu.scratch1));
#endif
#endif /* CONFIG_PPC64 */
@@ -389,8 +413,6 @@ int main(void)
DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
- DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
- DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
@@ -414,8 +436,12 @@ int main(void)
DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
+ DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
-#endif
+#else
+ DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+ DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
+#endif /* CONFIG_PPC64 */
#endif
#ifdef CONFIG_44x
DEFINE(PGD_T_LOG2, PGD_T_LOG2);
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index b6bd1eaa1c24..c06fe552bc92 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -20,6 +20,7 @@
#include <linux/smp.h>
#include <linux/signal.h>
#include <linux/ptrace.h>
+#include <linux/kdebug.h>
#include <asm/current.h>
#include <asm/processor.h>
#include <asm/machdep.h>
@@ -115,7 +116,9 @@ void kgdb_roundup_cpus(unsigned long flags)
/* KGDB functions to use existing PowerPC64 hooks. */
static int kgdb_debugger(struct pt_regs *regs)
{
- return kgdb_handle_exception(0, computeSignal(TRAP(regs)), 0, regs);
+ if (kgdb_handle_exception(1, computeSignal(TRAP(regs)), DIE_OOPS, regs))
+ return 0;
+ return 1;
}
static int kgdb_handle_breakpoint(struct pt_regs *regs)
@@ -123,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
if (user_mode(regs))
return 0;
- if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
+ if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
return 0;
if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
@@ -309,6 +312,11 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
(unsigned long)(((void *)gdb_regs) + NUMREGBYTES));
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->nip = pc;
+}
+
/*
* This function does PowerPC specific procesing for interfacing to gdb.
*/
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 9ddfaef1a184..035ada5443ee 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -469,7 +469,7 @@ static int __init serial_dev_init(void)
return -ENODEV;
/*
- * Before we register the platfrom serial devices, we need
+ * Before we register the platform serial devices, we need
* to fixup their interrupts and their IO ports.
*/
DBG("Fixing serial ports interrupts and IO ports ...\n");
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index d069ff8a7e03..379104ae291a 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -809,12 +809,19 @@ void __kprobes program_check_exception(struct pt_regs *regs)
return;
}
if (reason & REASON_TRAP) {
+
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+ if (debugger_bpt(regs))
+ return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
/* trap exception */
if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
== NOTIFY_STOP)
return;
+#ifndef CONFIG_KGDB_LOW_LEVEL_TRAP
if (debugger_bpt(regs))
return;
+#endif /* ! CONFIG_KGDB_LOW_LEVEL_TRAP */
if (!(regs->msr & MSR_PR) && /* not user-mode */
report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 61af58fcecee..65ea083a5b27 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -65,13 +65,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
*/
switch (dcrn) {
case DCRN_CPR0_CONFIG_ADDR:
- vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
break;
case DCRN_CPR0_CONFIG_DATA:
local_irq_disable();
mtdcr(DCRN_CPR0_CONFIG_ADDR,
vcpu->arch.cpr0_cfgaddr);
- vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
+ kvmppc_set_gpr(vcpu, rt,
+ mfdcr(DCRN_CPR0_CONFIG_DATA));
local_irq_enable();
break;
default:
@@ -93,11 +94,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* emulate some access in kernel */
switch (dcrn) {
case DCRN_CPR0_CONFIG_ADDR:
- vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
+ vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
break;
default:
run->dcr.dcrn = dcrn;
- run->dcr.data = vcpu->arch.gpr[rs];
+ run->dcr.data = kvmppc_get_gpr(vcpu, rs);
run->dcr.is_write = 1;
vcpu->arch.dcr_needed = 1;
kvmppc_account_exit(vcpu, DCR_EXITS);
@@ -146,13 +147,13 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
switch (sprn) {
case SPRN_PID:
- kvmppc_set_pid(vcpu, vcpu->arch.gpr[rs]); break;
+ kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break;
case SPRN_MMUCR:
- vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_CCR0:
- vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_CCR1:
- vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break;
default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
}
@@ -167,13 +168,13 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
switch (sprn) {
case SPRN_PID:
- vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break;
case SPRN_MMUCR:
- vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break;
case SPRN_CCR0:
- vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break;
case SPRN_CCR1:
- vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break;
default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
}
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index ff3cb63b8117..2570fcc7665d 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -439,7 +439,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
struct kvmppc_44x_tlbe *tlbe;
unsigned int gtlb_index;
- gtlb_index = vcpu->arch.gpr[ra];
+ gtlb_index = kvmppc_get_gpr(vcpu, ra);
if (gtlb_index > KVM44x_GUEST_TLB_SIZE) {
printk("%s: index %d\n", __func__, gtlb_index);
kvmppc_dump_vcpu(vcpu);
@@ -455,15 +455,15 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
switch (ws) {
case PPC44x_TLB_PAGEID:
tlbe->tid = get_mmucr_stid(vcpu);
- tlbe->word0 = vcpu->arch.gpr[rs];
+ tlbe->word0 = kvmppc_get_gpr(vcpu, rs);
break;
case PPC44x_TLB_XLAT:
- tlbe->word1 = vcpu->arch.gpr[rs];
+ tlbe->word1 = kvmppc_get_gpr(vcpu, rs);
break;
case PPC44x_TLB_ATTRIB:
- tlbe->word2 = vcpu->arch.gpr[rs];
+ tlbe->word2 = kvmppc_get_gpr(vcpu, rs);
break;
default:
@@ -500,18 +500,20 @@ int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
unsigned int as = get_mmucr_sts(vcpu);
unsigned int pid = get_mmucr_stid(vcpu);
- ea = vcpu->arch.gpr[rb];
+ ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
- ea += vcpu->arch.gpr[ra];
+ ea += kvmppc_get_gpr(vcpu, ra);
gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
if (rc) {
+ u32 cr = kvmppc_get_cr(vcpu);
+
if (gtlb_index < 0)
- vcpu->arch.cr &= ~0x20000000;
+ kvmppc_set_cr(vcpu, cr & ~0x20000000);
else
- vcpu->arch.cr |= 0x20000000;
+ kvmppc_set_cr(vcpu, cr | 0x20000000);
}
- vcpu->arch.gpr[rt] = gtlb_index;
+ kvmppc_set_gpr(vcpu, rt, gtlb_index);
kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
return EMULATE_DONE;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 07703f72330e..be28968c7941 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -20,6 +20,7 @@ config KVM
bool
select PREEMPT_NOTIFIERS
select ANON_INODES
+ select KVM_MMIO
config KVM_BOOK3S_64_HANDLER
bool
@@ -53,7 +54,7 @@ config KVM_440
config KVM_EXIT_TIMING
bool "Detailed exit timing"
- depends on KVM
+ depends on KVM_440 || KVM_E500
---help---
Calculate elapsed time for every exit/enter cycle. A per-vcpu
report is available in debugfs kvm/vm#_vcpu#_timing.
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 3e294bd9b8c6..02861fda73da 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -34,12 +34,6 @@
/* #define EXIT_DEBUG */
/* #define EXIT_DEBUG_SIMPLE */
-/* Without AGGRESSIVE_DEC we only fire off a DEC interrupt when DEC turns 0.
- * When set, we retrigger a DEC interrupt after that if DEC <= 0.
- * PPC32 Linux runs faster without AGGRESSIVE_DEC, PPC64 Linux requires it. */
-
-/* #define AGGRESSIVE_DEC */
-
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "exits", VCPU_STAT(sum_exits) },
{ "mmio", VCPU_STAT(mmio_exits) },
@@ -72,16 +66,20 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
+ memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
}
void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
{
memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
+ memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
+ sizeof(get_paca()->shadow_vcpu));
to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
}
-#if defined(AGGRESSIVE_DEC) || defined(EXIT_DEBUG)
+#if defined(EXIT_DEBUG)
static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
{
u64 jd = mftb() - vcpu->arch.dec_jiffies;
@@ -125,11 +123,10 @@ void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
vcpu->arch.mmu.reset_msr(vcpu);
}
-void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
+static int kvmppc_book3s_vec2irqprio(unsigned int vec)
{
unsigned int prio;
- vcpu->stat.queue_intr++;
switch (vec) {
case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
@@ -149,15 +146,31 @@ void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
default: prio = BOOK3S_IRQPRIO_MAX; break;
}
- set_bit(prio, &vcpu->arch.pending_exceptions);
+ return prio;
+}
+
+static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
+ unsigned int vec)
+{
+ clear_bit(kvmppc_book3s_vec2irqprio(vec),
+ &vcpu->arch.pending_exceptions);
+}
+
+void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
+{
+ vcpu->stat.queue_intr++;
+
+ set_bit(kvmppc_book3s_vec2irqprio(vec),
+ &vcpu->arch.pending_exceptions);
#ifdef EXIT_DEBUG
printk(KERN_INFO "Queueing interrupt %x\n", vec);
#endif
}
-void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
+void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
+ to_book3s(vcpu)->prog_flags = flags;
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
}
@@ -171,6 +184,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
}
+void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
+{
+ kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
+}
+
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
@@ -181,6 +199,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
{
int deliver = 1;
int vec = 0;
+ ulong flags = 0ULL;
switch (priority) {
case BOOK3S_IRQPRIO_DECREMENTER:
@@ -214,6 +233,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
break;
case BOOK3S_IRQPRIO_PROGRAM:
vec = BOOK3S_INTERRUPT_PROGRAM;
+ flags = to_book3s(vcpu)->prog_flags;
break;
case BOOK3S_IRQPRIO_VSX:
vec = BOOK3S_INTERRUPT_VSX;
@@ -244,7 +264,7 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
#endif
if (deliver)
- kvmppc_inject_interrupt(vcpu, vec, 0ULL);
+ kvmppc_inject_interrupt(vcpu, vec, flags);
return deliver;
}
@@ -254,21 +274,15 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned int priority;
- /* XXX be more clever here - no need to mftb() on every entry */
- /* Issue DEC again if it's still active */
-#ifdef AGGRESSIVE_DEC
- if (vcpu->arch.msr & MSR_EE)
- if (kvmppc_get_dec(vcpu) & 0x80000000)
- kvmppc_core_queue_dec(vcpu);
-#endif
-
#ifdef EXIT_DEBUG
if (vcpu->arch.pending_exceptions)
printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
#endif
priority = __ffs(*pending);
while (priority <= (sizeof(unsigned int) * 8)) {
- if (kvmppc_book3s_irqprio_deliver(vcpu, priority)) {
+ if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
+ (priority != BOOK3S_IRQPRIO_DECREMENTER)) {
+ /* DEC interrupts get cleared by mtdec */
clear_bit(priority, &vcpu->arch.pending_exceptions);
break;
}
@@ -532,8 +546,6 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = kvmppc_emulate_mmio(run, vcpu);
if ( r == RESUME_HOST_NV )
r = RESUME_HOST;
- if ( r == RESUME_GUEST_NV )
- r = RESUME_GUEST;
}
return r;
@@ -621,6 +633,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_PROGRAM:
{
enum emulation_result er;
+ ulong flags;
+
+ flags = (vcpu->arch.shadow_msr & 0x1f0000ull);
if (vcpu->arch.msr & MSR_PR) {
#ifdef EXIT_DEBUG
@@ -628,7 +643,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
#endif
if ((vcpu->arch.last_inst & 0xff0007ff) !=
(INS_DCBZ & 0xfffffff7)) {
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
break;
}
@@ -638,12 +653,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
er = kvmppc_emulate_instruction(run, vcpu);
switch (er) {
case EMULATE_DONE:
- r = RESUME_GUEST;
+ r = RESUME_GUEST_NV;
break;
case EMULATE_FAIL:
printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
__func__, vcpu->arch.pc, vcpu->arch.last_inst);
- kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
+ kvmppc_core_queue_program(vcpu, flags);
r = RESUME_GUEST;
break;
default:
@@ -653,7 +668,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
case BOOK3S_INTERRUPT_SYSCALL:
#ifdef EXIT_DEBUG
- printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]);
+ printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0));
#endif
vcpu->stat.syscall_exits++;
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
@@ -712,10 +727,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int i;
regs->pc = vcpu->arch.pc;
- regs->cr = vcpu->arch.cr;
+ regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = vcpu->arch.ctr;
regs->lr = vcpu->arch.lr;
- regs->xer = vcpu->arch.xer;
+ regs->xer = kvmppc_get_xer(vcpu);
regs->msr = vcpu->arch.msr;
regs->srr0 = vcpu->arch.srr0;
regs->srr1 = vcpu->arch.srr1;
@@ -729,7 +744,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->sprg7 = vcpu->arch.sprg6;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
- regs->gpr[i] = vcpu->arch.gpr[i];
+ regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
return 0;
}
@@ -739,10 +754,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int i;
vcpu->arch.pc = regs->pc;
- vcpu->arch.cr = regs->cr;
+ kvmppc_set_cr(vcpu, regs->cr);
vcpu->arch.ctr = regs->ctr;
vcpu->arch.lr = regs->lr;
- vcpu->arch.xer = regs->xer;
+ kvmppc_set_xer(vcpu, regs->xer);
kvmppc_set_msr(vcpu, regs->msr);
vcpu->arch.srr0 = regs->srr0;
vcpu->arch.srr1 = regs->srr1;
@@ -754,8 +769,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.sprg6 = regs->sprg5;
vcpu->arch.sprg7 = regs->sprg6;
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
- vcpu->arch.gpr[i] = regs->gpr[i];
+ for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+ kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
return 0;
}
@@ -850,7 +865,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
int is_dirty = 0;
int r, n;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
r = kvm_get_dirty_log(kvm, log, &is_dirty);
if (r)
@@ -858,7 +873,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT);
@@ -872,7 +887,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
r = 0;
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
@@ -910,6 +925,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
+ vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
vcpu->arch.shadow_msr = MSR_USER64;
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c
index 1027eac6d474..2b0ee7e040c9 100644
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ b/arch/powerpc/kvm/book3s_64_emulate.c
@@ -65,11 +65,11 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
- vcpu->arch.gpr[get_rt(inst)] = vcpu->arch.msr;
+ kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr);
break;
case OP_31_XOP_MTMSRD:
{
- ulong rs = vcpu->arch.gpr[get_rs(inst)];
+ ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
if (inst & 0x10000) {
vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
@@ -78,30 +78,30 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}
case OP_31_XOP_MTMSR:
- kvmppc_set_msr(vcpu, vcpu->arch.gpr[get_rs(inst)]);
+ kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
break;
case OP_31_XOP_MFSRIN:
{
int srnum;
- srnum = (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf;
+ srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
if (vcpu->arch.mmu.mfsrin) {
u32 sr;
sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
- vcpu->arch.gpr[get_rt(inst)] = sr;
+ kvmppc_set_gpr(vcpu, get_rt(inst), sr);
}
break;
}
case OP_31_XOP_MTSRIN:
vcpu->arch.mmu.mtsrin(vcpu,
- (vcpu->arch.gpr[get_rb(inst)] >> 28) & 0xf,
- vcpu->arch.gpr[get_rs(inst)]);
+ (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
+ kvmppc_get_gpr(vcpu, get_rs(inst)));
break;
case OP_31_XOP_TLBIE:
case OP_31_XOP_TLBIEL:
{
bool large = (inst & 0x00200000) ? true : false;
- ulong addr = vcpu->arch.gpr[get_rb(inst)];
+ ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
vcpu->arch.mmu.tlbie(vcpu, addr, large);
break;
}
@@ -111,14 +111,16 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!vcpu->arch.mmu.slbmte)
return EMULATE_FAIL;
- vcpu->arch.mmu.slbmte(vcpu, vcpu->arch.gpr[get_rs(inst)],
- vcpu->arch.gpr[get_rb(inst)]);
+ vcpu->arch.mmu.slbmte(vcpu,
+ kvmppc_get_gpr(vcpu, get_rs(inst)),
+ kvmppc_get_gpr(vcpu, get_rb(inst)));
break;
case OP_31_XOP_SLBIE:
if (!vcpu->arch.mmu.slbie)
return EMULATE_FAIL;
- vcpu->arch.mmu.slbie(vcpu, vcpu->arch.gpr[get_rb(inst)]);
+ vcpu->arch.mmu.slbie(vcpu,
+ kvmppc_get_gpr(vcpu, get_rb(inst)));
break;
case OP_31_XOP_SLBIA:
if (!vcpu->arch.mmu.slbia)
@@ -132,9 +134,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
} else {
ulong t, rb;
- rb = vcpu->arch.gpr[get_rb(inst)];
+ rb = kvmppc_get_gpr(vcpu, get_rb(inst));
t = vcpu->arch.mmu.slbmfee(vcpu, rb);
- vcpu->arch.gpr[get_rt(inst)] = t;
+ kvmppc_set_gpr(vcpu, get_rt(inst), t);
}
break;
case OP_31_XOP_SLBMFEV:
@@ -143,20 +145,20 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
} else {
ulong t, rb;
- rb = vcpu->arch.gpr[get_rb(inst)];
+ rb = kvmppc_get_gpr(vcpu, get_rb(inst));
t = vcpu->arch.mmu.slbmfev(vcpu, rb);
- vcpu->arch.gpr[get_rt(inst)] = t;
+ kvmppc_set_gpr(vcpu, get_rt(inst), t);
}
break;
case OP_31_XOP_DCBZ:
{
- ulong rb = vcpu->arch.gpr[get_rb(inst)];
+ ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
ulong ra = 0;
ulong addr;
u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
if (get_ra(inst))
- ra = vcpu->arch.gpr[get_ra(inst)];
+ ra = kvmppc_get_gpr(vcpu, get_ra(inst));
addr = (ra + rb) & ~31ULL;
if (!(vcpu->arch.msr & MSR_SF))
@@ -233,43 +235,44 @@ static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
int emulated = EMULATE_DONE;
+ ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_SDR1:
- to_book3s(vcpu)->sdr1 = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->sdr1 = spr_val;
break;
case SPRN_DSISR:
- to_book3s(vcpu)->dsisr = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->dsisr = spr_val;
break;
case SPRN_DAR:
- vcpu->arch.dear = vcpu->arch.gpr[rs];
+ vcpu->arch.dear = spr_val;
break;
case SPRN_HIOR:
- to_book3s(vcpu)->hior = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hior = spr_val;
break;
case SPRN_IBAT0U ... SPRN_IBAT3L:
case SPRN_IBAT4U ... SPRN_IBAT7L:
case SPRN_DBAT0U ... SPRN_DBAT3L:
case SPRN_DBAT4U ... SPRN_DBAT7L:
- kvmppc_write_bat(vcpu, sprn, (u32)vcpu->arch.gpr[rs]);
+ kvmppc_write_bat(vcpu, sprn, (u32)spr_val);
/* BAT writes happen so rarely that we're ok to flush
* everything here */
kvmppc_mmu_pte_flush(vcpu, 0, 0);
break;
case SPRN_HID0:
- to_book3s(vcpu)->hid[0] = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hid[0] = spr_val;
break;
case SPRN_HID1:
- to_book3s(vcpu)->hid[1] = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hid[1] = spr_val;
break;
case SPRN_HID2:
- to_book3s(vcpu)->hid[2] = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hid[2] = spr_val;
break;
case SPRN_HID4:
- to_book3s(vcpu)->hid[4] = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hid[4] = spr_val;
break;
case SPRN_HID5:
- to_book3s(vcpu)->hid[5] = vcpu->arch.gpr[rs];
+ to_book3s(vcpu)->hid[5] = spr_val;
/* guest HID5 set can change is_dcbz32 */
if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
(mfmsr() & MSR_HV))
@@ -299,38 +302,38 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
switch (sprn) {
case SPRN_SDR1:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->sdr1;
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
break;
case SPRN_DSISR:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->dsisr;
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr);
break;
case SPRN_DAR:
- vcpu->arch.gpr[rt] = vcpu->arch.dear;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear);
break;
case SPRN_HIOR:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hior;
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
break;
case SPRN_HID0:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[0];
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
break;
case SPRN_HID1:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[1];
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
break;
case SPRN_HID2:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[2];
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
break;
case SPRN_HID4:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[4];
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
break;
case SPRN_HID5:
- vcpu->arch.gpr[rt] = to_book3s(vcpu)->hid[5];
+ kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
break;
case SPRN_THRM1:
case SPRN_THRM2:
case SPRN_THRM3:
case SPRN_CTRLF:
case SPRN_CTRLT:
- vcpu->arch.gpr[rt] = 0;
+ kvmppc_set_gpr(vcpu, rt, 0);
break;
default:
printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_64_exports.c
index 5b2db38ed86c..99b07125c529 100644
--- a/arch/powerpc/kvm/book3s_64_exports.c
+++ b/arch/powerpc/kvm/book3s_64_exports.c
@@ -22,3 +22,4 @@
EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
+EXPORT_SYMBOL_GPL(kvmppc_rmcall);
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S
index 7b55d8094c8b..2ff0b2137e6f 100644
--- a/arch/powerpc/kvm/book3s_64_interrupts.S
+++ b/arch/powerpc/kvm/book3s_64_interrupts.S
@@ -28,11 +28,6 @@
#define ULONG_SIZE 8
#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
-.macro mfpaca tmp_reg, src_reg, offset, vcpu_reg
- ld \tmp_reg, (PACA_EXMC+\offset)(r13)
- std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg)
-.endm
-
.macro DISABLE_INTERRUPTS
mfmsr r0
rldicl r0,r0,48,1
@@ -40,6 +35,26 @@
mtmsrd r0,1
.endm
+#define VCPU_LOAD_NVGPRS(vcpu) \
+ ld r14, VCPU_GPR(r14)(vcpu); \
+ ld r15, VCPU_GPR(r15)(vcpu); \
+ ld r16, VCPU_GPR(r16)(vcpu); \
+ ld r17, VCPU_GPR(r17)(vcpu); \
+ ld r18, VCPU_GPR(r18)(vcpu); \
+ ld r19, VCPU_GPR(r19)(vcpu); \
+ ld r20, VCPU_GPR(r20)(vcpu); \
+ ld r21, VCPU_GPR(r21)(vcpu); \
+ ld r22, VCPU_GPR(r22)(vcpu); \
+ ld r23, VCPU_GPR(r23)(vcpu); \
+ ld r24, VCPU_GPR(r24)(vcpu); \
+ ld r25, VCPU_GPR(r25)(vcpu); \
+ ld r26, VCPU_GPR(r26)(vcpu); \
+ ld r27, VCPU_GPR(r27)(vcpu); \
+ ld r28, VCPU_GPR(r28)(vcpu); \
+ ld r29, VCPU_GPR(r29)(vcpu); \
+ ld r30, VCPU_GPR(r30)(vcpu); \
+ ld r31, VCPU_GPR(r31)(vcpu); \
+
/*****************************************************************************
* *
* Guest entry / exit code that is in kernel module memory (highmem) *
@@ -67,61 +82,32 @@ kvm_start_entry:
SAVE_NVGPRS(r1)
/* Save LR */
- mflr r14
- std r14, _LINK(r1)
-
-/* XXX optimize non-volatile loading away */
-kvm_start_lightweight:
+ std r0, _LINK(r1)
- DISABLE_INTERRUPTS
+ /* Load non-volatile guest state from the vcpu */
+ VCPU_LOAD_NVGPRS(r4)
/* Save R1/R2 in the PACA */
- std r1, PACAR1(r13)
- std r2, (PACA_EXMC+EX_SRR0)(r13)
+ std r1, PACA_KVM_HOST_R1(r13)
+ std r2, PACA_KVM_HOST_R2(r13)
+
+ /* XXX swap in/out on load? */
ld r3, VCPU_HIGHMEM_HANDLER(r4)
- std r3, PACASAVEDMSR(r13)
+ std r3, PACA_KVM_VMHANDLER(r13)
- /* Load non-volatile guest state from the vcpu */
- ld r14, VCPU_GPR(r14)(r4)
- ld r15, VCPU_GPR(r15)(r4)
- ld r16, VCPU_GPR(r16)(r4)
- ld r17, VCPU_GPR(r17)(r4)
- ld r18, VCPU_GPR(r18)(r4)
- ld r19, VCPU_GPR(r19)(r4)
- ld r20, VCPU_GPR(r20)(r4)
- ld r21, VCPU_GPR(r21)(r4)
- ld r22, VCPU_GPR(r22)(r4)
- ld r23, VCPU_GPR(r23)(r4)
- ld r24, VCPU_GPR(r24)(r4)
- ld r25, VCPU_GPR(r25)(r4)
- ld r26, VCPU_GPR(r26)(r4)
- ld r27, VCPU_GPR(r27)(r4)
- ld r28, VCPU_GPR(r28)(r4)
- ld r29, VCPU_GPR(r29)(r4)
- ld r30, VCPU_GPR(r30)(r4)
- ld r31, VCPU_GPR(r31)(r4)
+kvm_start_lightweight:
ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
- ld r3, VCPU_TRAMPOLINE_ENTER(r4)
- mtsrr0 r3
-
- LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
- mtsrr1 r3
-
- /* Load guest state in the respective registers */
- lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */
- stw r3, (PACA_EXMC + EX_CCR)(r13)
-
- ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */
- mtctr r3 /* CTR = r3 */
+ /* Load some guest state in the respective registers */
+ ld r5, VCPU_CTR(r4) /* r5 = vcpu->arch.ctr */
+ /* will be swapped in by rmcall */
ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
mtlr r3 /* LR = r3 */
- ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */
- std r3, (PACA_EXMC + EX_R3)(r13)
+ DISABLE_INTERRUPTS
/* Some guests may need to have dcbz set to 32 byte length.
*
@@ -141,36 +127,15 @@ kvm_start_lightweight:
mtspr SPRN_HID5,r3
no_dcbz32_on:
- /* Load guest GPRs */
-
- ld r3, VCPU_GPR(r9)(r4)
- std r3, (PACA_EXMC + EX_R9)(r13)
- ld r3, VCPU_GPR(r10)(r4)
- std r3, (PACA_EXMC + EX_R10)(r13)
- ld r3, VCPU_GPR(r11)(r4)
- std r3, (PACA_EXMC + EX_R11)(r13)
- ld r3, VCPU_GPR(r12)(r4)
- std r3, (PACA_EXMC + EX_R12)(r13)
- ld r3, VCPU_GPR(r13)(r4)
- std r3, (PACA_EXMC + EX_R13)(r13)
-
- ld r0, VCPU_GPR(r0)(r4)
- ld r1, VCPU_GPR(r1)(r4)
- ld r2, VCPU_GPR(r2)(r4)
- ld r3, VCPU_GPR(r3)(r4)
- ld r5, VCPU_GPR(r5)(r4)
- ld r6, VCPU_GPR(r6)(r4)
- ld r7, VCPU_GPR(r7)(r4)
- ld r8, VCPU_GPR(r8)(r4)
- ld r4, VCPU_GPR(r4)(r4)
-
- /* This sets the Magic value for the trampoline */
-
- li r11, 1
- stb r11, PACA_KVM_IN_GUEST(r13)
+
+ ld r6, VCPU_RMCALL(r4)
+ mtctr r6
+
+ ld r3, VCPU_TRAMPOLINE_ENTER(r4)
+ LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
/* Jump to SLB patching handlder and into our guest */
- RFI
+ bctr
/*
* This is the handler in module memory. It gets jumped at from the
@@ -184,125 +149,70 @@ kvmppc_handler_highmem:
/*
* Register usage at this point:
*
- * R00 = guest R13
- * R01 = host R1
- * R02 = host R2
- * R10 = guest PC
- * R11 = guest MSR
- * R12 = exit handler id
- * R13 = PACA
- * PACA.exmc.R9 = guest R1
- * PACA.exmc.R10 = guest R10
- * PACA.exmc.R11 = guest R11
- * PACA.exmc.R12 = guest R12
- * PACA.exmc.R13 = guest R2
- * PACA.exmc.DAR = guest DAR
- * PACA.exmc.DSISR = guest DSISR
- * PACA.exmc.LR = guest instruction
- * PACA.exmc.CCR = guest CR
- * PACA.exmc.SRR0 = guest R0
+ * R0 = guest last inst
+ * R1 = host R1
+ * R2 = host R2
+ * R3 = guest PC
+ * R4 = guest MSR
+ * R5 = guest DAR
+ * R6 = guest DSISR
+ * R13 = PACA
+ * PACA.KVM.* = guest *
*
*/
- std r3, (PACA_EXMC+EX_R3)(r13)
+ /* R7 = vcpu */
+ ld r7, GPR4(r1)
- /* save the exit id in R3 */
- mr r3, r12
+ /* Now save the guest state */
- /* R12 = vcpu */
- ld r12, GPR4(r1)
+ stw r0, VCPU_LAST_INST(r7)
- /* Now save the guest state */
+ std r3, VCPU_PC(r7)
+ std r4, VCPU_SHADOW_MSR(r7)
+ std r5, VCPU_FAULT_DEAR(r7)
+ std r6, VCPU_FAULT_DSISR(r7)
- std r0, VCPU_GPR(r13)(r12)
- std r4, VCPU_GPR(r4)(r12)
- std r5, VCPU_GPR(r5)(r12)
- std r6, VCPU_GPR(r6)(r12)
- std r7, VCPU_GPR(r7)(r12)
- std r8, VCPU_GPR(r8)(r12)
- std r9, VCPU_GPR(r9)(r12)
-
- /* get registers from PACA */
- mfpaca r5, r0, EX_SRR0, r12
- mfpaca r5, r3, EX_R3, r12
- mfpaca r5, r1, EX_R9, r12
- mfpaca r5, r10, EX_R10, r12
- mfpaca r5, r11, EX_R11, r12
- mfpaca r5, r12, EX_R12, r12
- mfpaca r5, r2, EX_R13, r12
-
- lwz r5, (PACA_EXMC+EX_LR)(r13)
- stw r5, VCPU_LAST_INST(r12)
-
- lwz r5, (PACA_EXMC+EX_CCR)(r13)
- stw r5, VCPU_CR(r12)
-
- ld r5, VCPU_HFLAGS(r12)
+ ld r5, VCPU_HFLAGS(r7)
rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
beq no_dcbz32_off
+ li r4, 0
mfspr r5,SPRN_HID5
- rldimi r5,r5,6,56
+ rldimi r5,r4,6,56
mtspr SPRN_HID5,r5
no_dcbz32_off:
- /* XXX maybe skip on lightweight? */
- std r14, VCPU_GPR(r14)(r12)
- std r15, VCPU_GPR(r15)(r12)
- std r16, VCPU_GPR(r16)(r12)
- std r17, VCPU_GPR(r17)(r12)
- std r18, VCPU_GPR(r18)(r12)
- std r19, VCPU_GPR(r19)(r12)
- std r20, VCPU_GPR(r20)(r12)
- std r21, VCPU_GPR(r21)(r12)
- std r22, VCPU_GPR(r22)(r12)
- std r23, VCPU_GPR(r23)(r12)
- std r24, VCPU_GPR(r24)(r12)
- std r25, VCPU_GPR(r25)(r12)
- std r26, VCPU_GPR(r26)(r12)
- std r27, VCPU_GPR(r27)(r12)
- std r28, VCPU_GPR(r28)(r12)
- std r29, VCPU_GPR(r29)(r12)
- std r30, VCPU_GPR(r30)(r12)
- std r31, VCPU_GPR(r31)(r12)
-
- /* Restore non-volatile host registers (r14 - r31) */
- REST_NVGPRS(r1)
-
- /* Save guest PC (R10) */
- std r10, VCPU_PC(r12)
-
- /* Save guest msr (R11) */
- std r11, VCPU_SHADOW_MSR(r12)
-
- /* Save guest CTR (in R12) */
+ std r14, VCPU_GPR(r14)(r7)
+ std r15, VCPU_GPR(r15)(r7)
+ std r16, VCPU_GPR(r16)(r7)
+ std r17, VCPU_GPR(r17)(r7)
+ std r18, VCPU_GPR(r18)(r7)
+ std r19, VCPU_GPR(r19)(r7)
+ std r20, VCPU_GPR(r20)(r7)
+ std r21, VCPU_GPR(r21)(r7)
+ std r22, VCPU_GPR(r22)(r7)
+ std r23, VCPU_GPR(r23)(r7)
+ std r24, VCPU_GPR(r24)(r7)
+ std r25, VCPU_GPR(r25)(r7)
+ std r26, VCPU_GPR(r26)(r7)
+ std r27, VCPU_GPR(r27)(r7)
+ std r28, VCPU_GPR(r28)(r7)
+ std r29, VCPU_GPR(r29)(r7)
+ std r30, VCPU_GPR(r30)(r7)
+ std r31, VCPU_GPR(r31)(r7)
+
+ /* Save guest CTR */
mfctr r5
- std r5, VCPU_CTR(r12)
+ std r5, VCPU_CTR(r7)
/* Save guest LR */
mflr r5
- std r5, VCPU_LR(r12)
-
- /* Save guest XER */
- mfxer r5
- std r5, VCPU_XER(r12)
-
- /* Save guest DAR */
- ld r5, (PACA_EXMC+EX_DAR)(r13)
- std r5, VCPU_FAULT_DEAR(r12)
-
- /* Save guest DSISR */
- lwz r5, (PACA_EXMC+EX_DSISR)(r13)
- std r5, VCPU_FAULT_DSISR(r12)
+ std r5, VCPU_LR(r7)
/* Restore host msr -> SRR1 */
- ld r7, VCPU_HOST_MSR(r12)
- mtsrr1 r7
-
- /* Restore host IP -> SRR0 */
- ld r6, VCPU_HOST_RETIP(r12)
- mtsrr0 r6
+ ld r6, VCPU_HOST_MSR(r7)
/*
* For some interrupts, we need to call the real Linux
@@ -314,13 +224,14 @@ no_dcbz32_off:
* r3 = address of interrupt handler (exit reason)
*/
- cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL
+ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
beq call_linux_handler
- cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER
+ cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
beq call_linux_handler
- /* Back to Interruptable Mode! (goto kvm_return_point) */
- RFI
+ /* Back to EE=1 */
+ mtmsr r6
+ b kvm_return_point
call_linux_handler:
@@ -333,16 +244,22 @@ call_linux_handler:
* interrupt handler!
*
* R3 still contains the exit code,
- * R6 VCPU_HOST_RETIP and
- * R7 VCPU_HOST_MSR
+ * R5 VCPU_HOST_RETIP and
+ * R6 VCPU_HOST_MSR
*/
- mtlr r3
+ /* Restore host IP -> SRR0 */
+ ld r5, VCPU_HOST_RETIP(r7)
+
+ /* XXX Better move to a safe function?
+ * What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
- ld r5, VCPU_TRAMPOLINE_LOWMEM(r12)
- mtsrr0 r5
- LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR))
- mtsrr1 r5
+ mtlr r12
+
+ ld r4, VCPU_TRAMPOLINE_LOWMEM(r7)
+ mtsrr0 r4
+ LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
+ mtsrr1 r3
RFI
@@ -351,42 +268,51 @@ kvm_return_point:
/* Jump back to lightweight entry if we're supposed to */
/* go back into the guest */
- mr r5, r3
+
+ /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
+ mr r5, r12
+
/* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1)
bl KVMPPC_HANDLE_EXIT
-#if 0 /* XXX get lightweight exits back */
+ /* If RESUME_GUEST, get back in the loop */
cmpwi r3, RESUME_GUEST
- bne kvm_exit_heavyweight
+ beq kvm_loop_lightweight
- /* put VCPU and KVM_RUN back into place and roll again! */
- REST_2GPRS(3, r1)
- b kvm_start_lightweight
+ cmpwi r3, RESUME_GUEST_NV
+ beq kvm_loop_heavyweight
-kvm_exit_heavyweight:
- /* Restore non-volatile host registers */
- ld r14, _LINK(r1)
- mtlr r14
- REST_NVGPRS(r1)
+kvm_exit_loop:
- addi r1, r1, SWITCH_FRAME_SIZE
-#else
ld r4, _LINK(r1)
mtlr r4
- cmpwi r3, RESUME_GUEST
- bne kvm_exit_heavyweight
+ /* Restore non-volatile host registers (r14 - r31) */
+ REST_NVGPRS(r1)
+
+ addi r1, r1, SWITCH_FRAME_SIZE
+ blr
+
+kvm_loop_heavyweight:
+
+ ld r4, _LINK(r1)
+ std r4, (16 + SWITCH_FRAME_SIZE)(r1)
+ /* Load vcpu and cpu_run */
REST_2GPRS(3, r1)
- addi r1, r1, SWITCH_FRAME_SIZE
+ /* Load non-volatile guest state from the vcpu */
+ VCPU_LOAD_NVGPRS(r4)
- b kvm_start_entry
+ /* Jump back into the beginning of this function */
+ b kvm_start_lightweight
-kvm_exit_heavyweight:
+kvm_loop_lightweight:
- addi r1, r1, SWITCH_FRAME_SIZE
-#endif
+ /* We'll need the vcpu pointer */
+ REST_GPR(4, r1)
+
+ /* Jump back into the beginning of this function */
+ b kvm_start_lightweight
- blr
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index e4beeb371a73..512dcff77554 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -54,7 +54,7 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
if (!vcpu_book3s->slb[i].valid)
continue;
- if (vcpu_book3s->slb[i].large)
+ if (vcpu_book3s->slb[i].tb)
cmp_esid = esid_1t;
if (vcpu_book3s->slb[i].esid == cmp_esid)
@@ -65,9 +65,10 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
eaddr, esid, esid_1t);
for (i = 0; i < vcpu_book3s->slb_nr; i++) {
if (vcpu_book3s->slb[i].vsid)
- dprintk(" %d: %c%c %llx %llx\n", i,
+ dprintk(" %d: %c%c%c %llx %llx\n", i,
vcpu_book3s->slb[i].valid ? 'v' : ' ',
vcpu_book3s->slb[i].large ? 'l' : ' ',
+ vcpu_book3s->slb[i].tb ? 't' : ' ',
vcpu_book3s->slb[i].esid,
vcpu_book3s->slb[i].vsid);
}
@@ -84,7 +85,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
if (!slb)
return 0;
- if (slb->large)
+ if (slb->tb)
return (((u64)eaddr >> 12) & 0xfffffff) |
(((u64)slb->vsid) << 28);
@@ -309,7 +310,8 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
slbe = &vcpu_book3s->slb[slb_nr];
slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
- slbe->esid = slbe->large ? esid_1t : esid;
+ slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
+ slbe->esid = slbe->tb ? esid_1t : esid;
slbe->vsid = rs >> 12;
slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S
index fb7dd2e9ac88..e7091c9459a8 100644
--- a/arch/powerpc/kvm/book3s_64_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S
@@ -45,36 +45,25 @@ kvmppc_trampoline_\intno:
* To distinguish, we check a magic byte in the PACA
*/
mfspr r13, SPRN_SPRG_PACA /* r13 = PACA */
- std r12, (PACA_EXMC + EX_R12)(r13)
+ std r12, PACA_KVM_SCRATCH0(r13)
mfcr r12
- stw r12, (PACA_EXMC + EX_CCR)(r13)
+ stw r12, PACA_KVM_SCRATCH1(r13)
lbz r12, PACA_KVM_IN_GUEST(r13)
- cmpwi r12, 0
+ cmpwi r12, KVM_GUEST_MODE_NONE
bne ..kvmppc_handler_hasmagic_\intno
/* No KVM guest? Then jump back to the Linux handler! */
- lwz r12, (PACA_EXMC + EX_CCR)(r13)
+ lwz r12, PACA_KVM_SCRATCH1(r13)
mtcr r12
- ld r12, (PACA_EXMC + EX_R12)(r13)
+ ld r12, PACA_KVM_SCRATCH0(r13)
mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */
b kvmppc_resume_\intno /* Get back original handler */
/* Now we know we're handling a KVM guest */
..kvmppc_handler_hasmagic_\intno:
- /* Unset guest state */
- li r12, 0
- stb r12, PACA_KVM_IN_GUEST(r13)
- std r1, (PACA_EXMC+EX_R9)(r13)
- std r10, (PACA_EXMC+EX_R10)(r13)
- std r11, (PACA_EXMC+EX_R11)(r13)
- std r2, (PACA_EXMC+EX_R13)(r13)
-
- mfsrr0 r10
- mfsrr1 r11
-
- /* Restore R1/R2 so we can handle faults */
- ld r1, PACAR1(r13)
- ld r2, (PACA_EXMC+EX_SRR0)(r13)
+ /* Should we just skip the faulting instruction? */
+ cmpwi r12, KVM_GUEST_MODE_SKIP
+ beq kvmppc_handler_skip_ins
/* Let's store which interrupt we're handling */
li r12, \intno
@@ -102,23 +91,73 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC
INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX
/*
+ * Bring us back to the faulting code, but skip the
+ * faulting instruction.
+ *
+ * This is a generic exit path from the interrupt
+ * trampolines above.
+ *
+ * Input Registers:
+ *
+ * R12 = free
+ * R13 = PACA
+ * PACA.KVM.SCRATCH0 = guest R12
+ * PACA.KVM.SCRATCH1 = guest CR
+ * SPRG_SCRATCH0 = guest R13
+ *
+ */
+kvmppc_handler_skip_ins:
+
+ /* Patch the IP to the next instruction */
+ mfsrr0 r12
+ addi r12, r12, 4
+ mtsrr0 r12
+
+ /* Clean up all state */
+ lwz r12, PACA_KVM_SCRATCH1(r13)
+ mtcr r12
+ ld r12, PACA_KVM_SCRATCH0(r13)
+ mfspr r13, SPRN_SPRG_SCRATCH0
+
+ /* And get back into the code */
+ RFI
+
+/*
* This trampoline brings us back to a real mode handler
*
* Input Registers:
*
- * R6 = SRR0
- * R7 = SRR1
+ * R5 = SRR0
+ * R6 = SRR1
* LR = real-mode IP
*
*/
.global kvmppc_handler_lowmem_trampoline
kvmppc_handler_lowmem_trampoline:
- mtsrr0 r6
- mtsrr1 r7
+ mtsrr0 r5
+ mtsrr1 r6
blr
kvmppc_handler_lowmem_trampoline_end:
+/*
+ * Call a function in real mode
+ *
+ * Input Registers:
+ *
+ * R3 = function
+ * R4 = MSR
+ * R5 = CTR
+ *
+ */
+_GLOBAL(kvmppc_rmcall)
+ mtmsr r4 /* Disable relocation, so mtsrr
+ doesn't get interrupted */
+ mtctr r5
+ mtsrr0 r3
+ mtsrr1 r4
+ RFI
+
.global kvmppc_trampoline_lowmem
kvmppc_trampoline_lowmem:
.long kvmppc_handler_lowmem_trampoline - _stext
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index ecd237a03fd0..35b762722187 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -31,7 +31,7 @@
#define REBOLT_SLB_ENTRY(num) \
ld r10, SHADOW_SLB_ESID(num)(r11); \
cmpdi r10, 0; \
- beq slb_exit_skip_1; \
+ beq slb_exit_skip_ ## num; \
oris r10, r10, SLB_ESID_V@h; \
ld r9, SHADOW_SLB_VSID(num)(r11); \
slbmte r9, r10; \
@@ -51,23 +51,21 @@ kvmppc_handler_trampoline_enter:
*
* MSR = ~IR|DR
* R13 = PACA
+ * R1 = host R1
+ * R2 = host R2
* R9 = guest IP
* R10 = guest MSR
- * R11 = free
- * R12 = free
- * PACA[PACA_EXMC + EX_R9] = guest R9
- * PACA[PACA_EXMC + EX_R10] = guest R10
- * PACA[PACA_EXMC + EX_R11] = guest R11
- * PACA[PACA_EXMC + EX_R12] = guest R12
- * PACA[PACA_EXMC + EX_R13] = guest R13
- * PACA[PACA_EXMC + EX_CCR] = guest CR
- * PACA[PACA_EXMC + EX_R3] = guest XER
+ * all other GPRS = free
+ * PACA[KVM_CR] = guest CR
+ * PACA[KVM_XER] = guest XER
*/
mtsrr0 r9
mtsrr1 r10
- mtspr SPRN_SPRG_SCRATCH0, r0
+ /* Activate guest mode, so faults get handled by KVM */
+ li r11, KVM_GUEST_MODE_GUEST
+ stb r11, PACA_KVM_IN_GUEST(r13)
/* Remove LPAR shadow entries */
@@ -131,20 +129,27 @@ slb_do_enter:
/* Enter guest */
- mfspr r0, SPRN_SPRG_SCRATCH0
-
- ld r9, (PACA_EXMC+EX_R9)(r13)
- ld r10, (PACA_EXMC+EX_R10)(r13)
- ld r12, (PACA_EXMC+EX_R12)(r13)
-
- lwz r11, (PACA_EXMC+EX_CCR)(r13)
+ ld r0, (PACA_KVM_R0)(r13)
+ ld r1, (PACA_KVM_R1)(r13)
+ ld r2, (PACA_KVM_R2)(r13)
+ ld r3, (PACA_KVM_R3)(r13)
+ ld r4, (PACA_KVM_R4)(r13)
+ ld r5, (PACA_KVM_R5)(r13)
+ ld r6, (PACA_KVM_R6)(r13)
+ ld r7, (PACA_KVM_R7)(r13)
+ ld r8, (PACA_KVM_R8)(r13)
+ ld r9, (PACA_KVM_R9)(r13)
+ ld r10, (PACA_KVM_R10)(r13)
+ ld r12, (PACA_KVM_R12)(r13)
+
+ lwz r11, (PACA_KVM_CR)(r13)
mtcr r11
- ld r11, (PACA_EXMC+EX_R3)(r13)
+ ld r11, (PACA_KVM_XER)(r13)
mtxer r11
- ld r11, (PACA_EXMC+EX_R11)(r13)
- ld r13, (PACA_EXMC+EX_R13)(r13)
+ ld r11, (PACA_KVM_R11)(r13)
+ ld r13, (PACA_KVM_R13)(r13)
RFI
kvmppc_handler_trampoline_enter_end:
@@ -162,28 +167,54 @@ kvmppc_handler_trampoline_exit:
/* Register usage at this point:
*
- * SPRG_SCRATCH0 = guest R13
- * R01 = host R1
- * R02 = host R2
- * R10 = guest PC
- * R11 = guest MSR
- * R12 = exit handler id
- * R13 = PACA
- * PACA.exmc.CCR = guest CR
- * PACA.exmc.R9 = guest R1
- * PACA.exmc.R10 = guest R10
- * PACA.exmc.R11 = guest R11
- * PACA.exmc.R12 = guest R12
- * PACA.exmc.R13 = guest R2
+ * SPRG_SCRATCH0 = guest R13
+ * R12 = exit handler id
+ * R13 = PACA
+ * PACA.KVM.SCRATCH0 = guest R12
+ * PACA.KVM.SCRATCH1 = guest CR
*
*/
/* Save registers */
- std r0, (PACA_EXMC+EX_SRR0)(r13)
- std r9, (PACA_EXMC+EX_R3)(r13)
- std r10, (PACA_EXMC+EX_LR)(r13)
- std r11, (PACA_EXMC+EX_DAR)(r13)
+ std r0, PACA_KVM_R0(r13)
+ std r1, PACA_KVM_R1(r13)
+ std r2, PACA_KVM_R2(r13)
+ std r3, PACA_KVM_R3(r13)
+ std r4, PACA_KVM_R4(r13)
+ std r5, PACA_KVM_R5(r13)
+ std r6, PACA_KVM_R6(r13)
+ std r7, PACA_KVM_R7(r13)
+ std r8, PACA_KVM_R8(r13)
+ std r9, PACA_KVM_R9(r13)
+ std r10, PACA_KVM_R10(r13)
+ std r11, PACA_KVM_R11(r13)
+
+ /* Restore R1/R2 so we can handle faults */
+ ld r1, PACA_KVM_HOST_R1(r13)
+ ld r2, PACA_KVM_HOST_R2(r13)
+
+ /* Save guest PC and MSR in GPRs */
+ mfsrr0 r3
+ mfsrr1 r4
+
+ /* Get scratch'ed off registers */
+ mfspr r9, SPRN_SPRG_SCRATCH0
+ std r9, PACA_KVM_R13(r13)
+
+ ld r8, PACA_KVM_SCRATCH0(r13)
+ std r8, PACA_KVM_R12(r13)
+
+ lwz r7, PACA_KVM_SCRATCH1(r13)
+ stw r7, PACA_KVM_CR(r13)
+
+ /* Save more register state */
+
+ mfxer r6
+ stw r6, PACA_KVM_XER(r13)
+
+ mfdar r5
+ mfdsisr r6
/*
* In order for us to easily get the last instruction,
@@ -202,17 +233,28 @@ kvmppc_handler_trampoline_exit:
ld_last_inst:
/* Save off the guest instruction we're at */
+
+ /* Set guest mode to 'jump over instruction' so if lwz faults
+ * we'll just continue at the next IP. */
+ li r9, KVM_GUEST_MODE_SKIP
+ stb r9, PACA_KVM_IN_GUEST(r13)
+
/* 1) enable paging for data */
mfmsr r9
ori r11, r9, MSR_DR /* Enable paging for data */
mtmsr r11
/* 2) fetch the instruction */
- lwz r0, 0(r10)
+ li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */
+ lwz r0, 0(r3)
/* 3) disable paging again */
mtmsr r9
no_ld_last_inst:
+ /* Unset guest mode */
+ li r9, KVM_GUEST_MODE_NONE
+ stb r9, PACA_KVM_IN_GUEST(r13)
+
/* Restore bolted entries from the shadow and fix it along the way */
/* We don't store anything in entry 0, so we don't need to take care of it */
@@ -233,29 +275,27 @@ no_ld_last_inst:
slb_do_exit:
- /* Restore registers */
-
- ld r11, (PACA_EXMC+EX_DAR)(r13)
- ld r10, (PACA_EXMC+EX_LR)(r13)
- ld r9, (PACA_EXMC+EX_R3)(r13)
-
- /* Save last inst */
- stw r0, (PACA_EXMC+EX_LR)(r13)
-
- /* Save DAR and DSISR before going to paged mode */
- mfdar r0
- std r0, (PACA_EXMC+EX_DAR)(r13)
- mfdsisr r0
- stw r0, (PACA_EXMC+EX_DSISR)(r13)
+ /* Register usage at this point:
+ *
+ * R0 = guest last inst
+ * R1 = host R1
+ * R2 = host R2
+ * R3 = guest PC
+ * R4 = guest MSR
+ * R5 = guest DAR
+ * R6 = guest DSISR
+ * R12 = exit handler id
+ * R13 = PACA
+ * PACA.KVM.* = guest *
+ *
+ */
/* RFI into the highmem handler */
- mfmsr r0
- ori r0, r0, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
- mtsrr1 r0
- ld r0, PACASAVEDMSR(r13) /* Highmem handler address */
- mtsrr0 r0
-
- mfspr r0, SPRN_SPRG_SCRATCH0
+ mfmsr r7
+ ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
+ mtsrr1 r7
+ ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */
+ mtsrr0 r8
RFI
kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 06f5a9ecc42c..e283e44e9f16 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -69,10 +69,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
for (i = 0; i < 32; i += 4) {
printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
- vcpu->arch.gpr[i],
- vcpu->arch.gpr[i+1],
- vcpu->arch.gpr[i+2],
- vcpu->arch.gpr[i+3]);
+ kvmppc_get_gpr(vcpu, i),
+ kvmppc_get_gpr(vcpu, i+1),
+ kvmppc_get_gpr(vcpu, i+2),
+ kvmppc_get_gpr(vcpu, i+3));
}
}
@@ -82,8 +82,9 @@ static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
set_bit(priority, &vcpu->arch.pending_exceptions);
}
-void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
+void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
{
+ /* BookE does flags in ESR, so ignore those we get here */
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
}
@@ -97,6 +98,11 @@ int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
}
+void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
+{
+ clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
+}
+
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
@@ -426,7 +432,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = 0;
vcpu->arch.msr = 0;
- vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
+ kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
vcpu->arch.shadow_pid = 1;
@@ -444,10 +450,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int i;
regs->pc = vcpu->arch.pc;
- regs->cr = vcpu->arch.cr;
+ regs->cr = kvmppc_get_cr(vcpu);
regs->ctr = vcpu->arch.ctr;
regs->lr = vcpu->arch.lr;
- regs->xer = vcpu->arch.xer;
+ regs->xer = kvmppc_get_xer(vcpu);
regs->msr = vcpu->arch.msr;
regs->srr0 = vcpu->arch.srr0;
regs->srr1 = vcpu->arch.srr1;
@@ -461,7 +467,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->sprg7 = vcpu->arch.sprg6;
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
- regs->gpr[i] = vcpu->arch.gpr[i];
+ regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
return 0;
}
@@ -471,10 +477,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
int i;
vcpu->arch.pc = regs->pc;
- vcpu->arch.cr = regs->cr;
+ kvmppc_set_cr(vcpu, regs->cr);
vcpu->arch.ctr = regs->ctr;
vcpu->arch.lr = regs->lr;
- vcpu->arch.xer = regs->xer;
+ kvmppc_set_xer(vcpu, regs->xer);
kvmppc_set_msr(vcpu, regs->msr);
vcpu->arch.srr0 = regs->srr0;
vcpu->arch.srr1 = regs->srr1;
@@ -486,8 +492,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
vcpu->arch.sprg6 = regs->sprg5;
vcpu->arch.sprg7 = regs->sprg6;
- for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
- vcpu->arch.gpr[i] = regs->gpr[i];
+ for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+ kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
return 0;
}
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index aebc65e93f4b..cbc790ee1928 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -62,20 +62,20 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
case OP_31_XOP_MFMSR:
rt = get_rt(inst);
- vcpu->arch.gpr[rt] = vcpu->arch.msr;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr);
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break;
case OP_31_XOP_MTMSR:
rs = get_rs(inst);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
- kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
+ kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_WRTEE:
rs = get_rs(inst);
vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
- | (vcpu->arch.gpr[rs] & MSR_EE);
+ | (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
@@ -101,22 +101,23 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
int emulated = EMULATE_DONE;
+ ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_DEAR:
- vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.dear = spr_val; break;
case SPRN_ESR:
- vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.esr = spr_val; break;
case SPRN_DBCR0:
- vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.dbcr0 = spr_val; break;
case SPRN_DBCR1:
- vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.dbcr1 = spr_val; break;
case SPRN_DBSR:
- vcpu->arch.dbsr &= ~vcpu->arch.gpr[rs]; break;
+ vcpu->arch.dbsr &= ~spr_val; break;
case SPRN_TSR:
- vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
+ vcpu->arch.tsr &= ~spr_val; break;
case SPRN_TCR:
- vcpu->arch.tcr = vcpu->arch.gpr[rs];
+ vcpu->arch.tcr = spr_val;
kvmppc_emulate_dec(vcpu);
break;
@@ -124,64 +125,64 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
* loaded into the real SPRGs when resuming the
* guest. */
case SPRN_SPRG4:
- vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg4 = spr_val; break;
case SPRN_SPRG5:
- vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg5 = spr_val; break;
case SPRN_SPRG6:
- vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg6 = spr_val; break;
case SPRN_SPRG7:
- vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg7 = spr_val; break;
case SPRN_IVPR:
- vcpu->arch.ivpr = vcpu->arch.gpr[rs];
+ vcpu->arch.ivpr = spr_val;
break;
case SPRN_IVOR0:
- vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
break;
case SPRN_IVOR1:
- vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val;
break;
case SPRN_IVOR2:
- vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
break;
case SPRN_IVOR3:
- vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
break;
case SPRN_IVOR4:
- vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val;
break;
case SPRN_IVOR5:
- vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val;
break;
case SPRN_IVOR6:
- vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val;
break;
case SPRN_IVOR7:
- vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val;
break;
case SPRN_IVOR8:
- vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
break;
case SPRN_IVOR9:
- vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
break;
case SPRN_IVOR10:
- vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val;
break;
case SPRN_IVOR11:
- vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val;
break;
case SPRN_IVOR12:
- vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val;
break;
case SPRN_IVOR13:
- vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val;
break;
case SPRN_IVOR14:
- vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val;
break;
case SPRN_IVOR15:
- vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val;
break;
default:
@@ -197,65 +198,65 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
switch (sprn) {
case SPRN_IVPR:
- vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
case SPRN_DEAR:
- vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break;
case SPRN_ESR:
- vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
case SPRN_DBCR0:
- vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
case SPRN_DBCR1:
- vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
case SPRN_DBSR:
- vcpu->arch.gpr[rt] = vcpu->arch.dbsr; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
case SPRN_IVOR0:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
break;
case SPRN_IVOR1:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
break;
case SPRN_IVOR2:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
break;
case SPRN_IVOR3:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
break;
case SPRN_IVOR4:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
break;
case SPRN_IVOR5:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
break;
case SPRN_IVOR6:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
break;
case SPRN_IVOR7:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
break;
case SPRN_IVOR8:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
break;
case SPRN_IVOR9:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
break;
case SPRN_IVOR10:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
break;
case SPRN_IVOR11:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]);
break;
case SPRN_IVOR12:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
break;
case SPRN_IVOR13:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]);
break;
case SPRN_IVOR14:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]);
break;
case SPRN_IVOR15:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]);
break;
default:
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index be95b8d8e3b7..7644f7a9bac3 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -74,54 +74,55 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
+ ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_PID:
vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
- vcpu->arch.pid = vcpu->arch.gpr[rs];
+ vcpu->arch.pid = spr_val;
break;
case SPRN_PID1:
- vcpu_e500->pid[1] = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->pid[1] = spr_val; break;
case SPRN_PID2:
- vcpu_e500->pid[2] = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->pid[2] = spr_val; break;
case SPRN_MAS0:
- vcpu_e500->mas0 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas0 = spr_val; break;
case SPRN_MAS1:
- vcpu_e500->mas1 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas1 = spr_val; break;
case SPRN_MAS2:
- vcpu_e500->mas2 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas2 = spr_val; break;
case SPRN_MAS3:
- vcpu_e500->mas3 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas3 = spr_val; break;
case SPRN_MAS4:
- vcpu_e500->mas4 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas4 = spr_val; break;
case SPRN_MAS6:
- vcpu_e500->mas6 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas6 = spr_val; break;
case SPRN_MAS7:
- vcpu_e500->mas7 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->mas7 = spr_val; break;
case SPRN_L1CSR1:
- vcpu_e500->l1csr1 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->l1csr1 = spr_val; break;
case SPRN_HID0:
- vcpu_e500->hid0 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->hid0 = spr_val; break;
case SPRN_HID1:
- vcpu_e500->hid1 = vcpu->arch.gpr[rs]; break;
+ vcpu_e500->hid1 = spr_val; break;
case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
- vcpu->arch.gpr[rs]);
+ spr_val);
break;
/* extra exceptions */
case SPRN_IVOR32:
- vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
break;
case SPRN_IVOR33:
- vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
break;
case SPRN_IVOR34:
- vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
break;
case SPRN_IVOR35:
- vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = vcpu->arch.gpr[rs];
+ vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
break;
default:
@@ -138,63 +139,71 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
switch (sprn) {
case SPRN_PID:
- vcpu->arch.gpr[rt] = vcpu_e500->pid[0]; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
case SPRN_PID1:
- vcpu->arch.gpr[rt] = vcpu_e500->pid[1]; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
case SPRN_PID2:
- vcpu->arch.gpr[rt] = vcpu_e500->pid[2]; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
case SPRN_MAS0:
- vcpu->arch.gpr[rt] = vcpu_e500->mas0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
case SPRN_MAS1:
- vcpu->arch.gpr[rt] = vcpu_e500->mas1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
case SPRN_MAS2:
- vcpu->arch.gpr[rt] = vcpu_e500->mas2; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
case SPRN_MAS3:
- vcpu->arch.gpr[rt] = vcpu_e500->mas3; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break;
case SPRN_MAS4:
- vcpu->arch.gpr[rt] = vcpu_e500->mas4; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
case SPRN_MAS6:
- vcpu->arch.gpr[rt] = vcpu_e500->mas6; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
case SPRN_MAS7:
- vcpu->arch.gpr[rt] = vcpu_e500->mas7; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break;
case SPRN_TLB0CFG:
- vcpu->arch.gpr[rt] = mfspr(SPRN_TLB0CFG);
- vcpu->arch.gpr[rt] &= ~0xfffUL;
- vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[0];
+ {
+ ulong tmp = SPRN_TLB0CFG;
+
+ tmp &= ~0xfffUL;
+ tmp |= vcpu_e500->guest_tlb_size[0];
+ kvmppc_set_gpr(vcpu, rt, tmp);
break;
+ }
case SPRN_TLB1CFG:
- vcpu->arch.gpr[rt] = mfspr(SPRN_TLB1CFG);
- vcpu->arch.gpr[rt] &= ~0xfffUL;
- vcpu->arch.gpr[rt] |= vcpu_e500->guest_tlb_size[1];
+ {
+ ulong tmp = SPRN_TLB1CFG;
+
+ tmp &= ~0xfffUL;
+ tmp |= vcpu_e500->guest_tlb_size[1];
+ kvmppc_set_gpr(vcpu, rt, tmp);
break;
+ }
case SPRN_L1CSR1:
- vcpu->arch.gpr[rt] = vcpu_e500->l1csr1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
case SPRN_HID0:
- vcpu->arch.gpr[rt] = vcpu_e500->hid0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
case SPRN_HID1:
- vcpu->arch.gpr[rt] = vcpu_e500->hid1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
case SPRN_MMUCSR0:
- vcpu->arch.gpr[rt] = 0; break;
+ kvmppc_set_gpr(vcpu, rt, 0); break;
case SPRN_MMUCFG:
- vcpu->arch.gpr[rt] = mfspr(SPRN_MMUCFG); break;
+ kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;
/* extra exceptions */
case SPRN_IVOR32:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
break;
case SPRN_IVOR33:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
break;
case SPRN_IVOR34:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
break;
case SPRN_IVOR35:
- vcpu->arch.gpr[rt] = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
break;
default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index fb1e1dc11ba5..6a7fc012b93f 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -417,7 +417,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
int esel, tlbsel;
gva_t ea;
- ea = ((ra) ? vcpu->arch.gpr[ra] : 0) + vcpu->arch.gpr[rb];
+ ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
ia = (ea >> 2) & 0x1;
@@ -470,7 +470,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
struct tlbe *gtlbe = NULL;
gva_t ea;
- ea = vcpu->arch.gpr[rb];
+ ea = kvmppc_get_gpr(vcpu, rb);
for (tlbsel = 0; tlbsel < 2; tlbsel++) {
esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 4a9ac6640fad..b905623735bd 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -83,6 +83,9 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
pr_debug("mtDEC: %x\n", vcpu->arch.dec);
#ifdef CONFIG_PPC64
+ /* mtdec lowers the interrupt line when positive. */
+ kvmppc_core_dequeue_dec(vcpu);
+
/* POWER4+ triggers a dec interrupt if the value is < 0 */
if (vcpu->arch.dec & 0x80000000) {
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
@@ -140,6 +143,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
+ /* Try again next time */
+ if (inst == KVM_INST_FETCH_FAILED)
+ return EMULATE_DONE;
+
switch (get_op(inst)) {
case OP_TRAP:
#ifdef CONFIG_PPC64
@@ -147,7 +154,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
#else
vcpu->arch.esr |= ESR_PTR;
#endif
- kvmppc_core_queue_program(vcpu);
+ kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
advance = 0;
break;
@@ -167,14 +174,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case OP_31_XOP_STWX:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
case OP_31_XOP_STBX:
rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
@@ -183,14 +190,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rb = get_rb(inst);
- ea = vcpu->arch.gpr[rb];
+ ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
- ea += vcpu->arch.gpr[ra];
+ ea += kvmppc_get_gpr(vcpu, ra);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
1, 1);
- vcpu->arch.gpr[rs] = ea;
+ kvmppc_set_gpr(vcpu, rs, ea);
break;
case OP_31_XOP_LHZX:
@@ -203,12 +210,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rb = get_rb(inst);
- ea = vcpu->arch.gpr[rb];
+ ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
- ea += vcpu->arch.gpr[ra];
+ ea += kvmppc_get_gpr(vcpu, ra);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
- vcpu->arch.gpr[ra] = ea;
+ kvmppc_set_gpr(vcpu, ra, ea);
break;
case OP_31_XOP_MFSPR:
@@ -217,47 +224,49 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
switch (sprn) {
case SPRN_SRR0:
- vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
case SPRN_SRR1:
- vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
case SPRN_PVR:
- vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
case SPRN_PIR:
- vcpu->arch.gpr[rt] = vcpu->vcpu_id; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
case SPRN_MSSSR0:
- vcpu->arch.gpr[rt] = 0; break;
+ kvmppc_set_gpr(vcpu, rt, 0); break;
/* Note: mftb and TBRL/TBWL are user-accessible, so
* the guest can always access the real TB anyways.
* In fact, we probably will never see these traps. */
case SPRN_TBWL:
- vcpu->arch.gpr[rt] = get_tb() >> 32; break;
+ kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
case SPRN_TBWU:
- vcpu->arch.gpr[rt] = get_tb(); break;
+ kvmppc_set_gpr(vcpu, rt, get_tb()); break;
case SPRN_SPRG0:
- vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
case SPRN_SPRG1:
- vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
case SPRN_SPRG2:
- vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
case SPRN_SPRG3:
- vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap. */
case SPRN_DEC:
{
u64 jd = get_tb() - vcpu->arch.dec_jiffies;
- vcpu->arch.gpr[rt] = vcpu->arch.dec - jd;
- pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, vcpu->arch.gpr[rt]);
+ kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
+ pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
+ vcpu->arch.dec, jd,
+ kvmppc_get_gpr(vcpu, rt));
break;
}
default:
emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
if (emulated == EMULATE_FAIL) {
printk("mfspr: unknown spr %x\n", sprn);
- vcpu->arch.gpr[rt] = 0;
+ kvmppc_set_gpr(vcpu, rt, 0);
}
break;
}
@@ -269,7 +278,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
rb = get_rb(inst);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
@@ -278,14 +287,14 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rb = get_rb(inst);
- ea = vcpu->arch.gpr[rb];
+ ea = kvmppc_get_gpr(vcpu, rb);
if (ra)
- ea += vcpu->arch.gpr[ra];
+ ea += kvmppc_get_gpr(vcpu, ra);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
2, 1);
- vcpu->arch.gpr[ra] = ea;
+ kvmppc_set_gpr(vcpu, ra, ea);
break;
case OP_31_XOP_MTSPR:
@@ -293,9 +302,9 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
rs = get_rs(inst);
switch (sprn) {
case SPRN_SRR0:
- vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_SRR1:
- vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;
/* XXX We need to context-switch the timebase for
* watchdog and FIT. */
@@ -305,18 +314,18 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case SPRN_MSSSR0: break;
case SPRN_DEC:
- vcpu->arch.dec = vcpu->arch.gpr[rs];
+ vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
kvmppc_emulate_dec(vcpu);
break;
case SPRN_SPRG0:
- vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_SPRG1:
- vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_SPRG2:
- vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
case SPRN_SPRG3:
- vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
+ vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;
default:
emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
@@ -348,7 +357,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
rb = get_rb(inst);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
4, 0);
break;
@@ -363,7 +372,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
rb = get_rb(inst);
emulated = kvmppc_handle_store(run, vcpu,
- vcpu->arch.gpr[rs],
+ kvmppc_get_gpr(vcpu, rs),
2, 0);
break;
@@ -382,7 +391,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
case OP_LBZ:
@@ -394,35 +403,39 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
case OP_STW:
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
4, 1);
break;
case OP_STWU:
ra = get_ra(inst);
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
4, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
case OP_STB:
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
1, 1);
break;
case OP_STBU:
ra = get_ra(inst);
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
1, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
case OP_LHZ:
@@ -434,21 +447,23 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
ra = get_ra(inst);
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
case OP_STH:
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
2, 1);
break;
case OP_STHU:
ra = get_ra(inst);
rs = get_rs(inst);
- emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+ emulated = kvmppc_handle_store(run, vcpu,
+ kvmppc_get_gpr(vcpu, rs),
2, 1);
- vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
break;
default:
@@ -461,6 +476,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
advance = 0;
printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
"(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
+ kvmppc_core_queue_program(vcpu, 0);
}
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index f06cf93b178e..2c291161df89 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -165,14 +165,24 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- int user_alloc)
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc)
{
return 0;
}
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ return;
+}
+
+
void kvm_arch_flush_shadow(struct kvm *kvm)
{
}
@@ -260,34 +270,35 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
- ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
- *gpr = run->dcr.data;
+ kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
}
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
- ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+ ulong gpr;
- if (run->mmio.len > sizeof(*gpr)) {
+ if (run->mmio.len > sizeof(gpr)) {
printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
return;
}
if (vcpu->arch.mmio_is_bigendian) {
switch (run->mmio.len) {
- case 4: *gpr = *(u32 *)run->mmio.data; break;
- case 2: *gpr = *(u16 *)run->mmio.data; break;
- case 1: *gpr = *(u8 *)run->mmio.data; break;
+ case 4: gpr = *(u32 *)run->mmio.data; break;
+ case 2: gpr = *(u16 *)run->mmio.data; break;
+ case 1: gpr = *(u8 *)run->mmio.data; break;
}
} else {
/* Convert BE data from userland back to LE. */
switch (run->mmio.len) {
- case 4: *gpr = ld_le32((u32 *)run->mmio.data); break;
- case 2: *gpr = ld_le16((u16 *)run->mmio.data); break;
- case 1: *gpr = *(u8 *)run->mmio.data; break;
+ case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
+ case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
+ case 1: gpr = *(u8 *)run->mmio.data; break;
}
}
+
+ kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
}
int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
diff --git a/arch/s390/Kconfig.debug b/arch/s390/Kconfig.debug
index 2283933a9a93..45e0c6199f36 100644
--- a/arch/s390/Kconfig.debug
+++ b/arch/s390/Kconfig.debug
@@ -6,4 +6,17 @@ config TRACE_IRQFLAGS_SUPPORT
source "lib/Kconfig.debug"
+config DEBUG_STRICT_USER_COPY_CHECKS
+ bool "Strict user copy size checks"
+ ---help---
+ Enabling this option turns a certain set of sanity checks for user
+ copy operations into compile time warnings.
+
+ The copy_from_user() etc checks are there to help test if there
+ are sufficient security checks on the length argument of
+ the copy operation, by having gcc prove that the argument is
+ within bounds.
+
+ If unsure, or if you run an older (pre 4.4) gcc, say N.
+
endmenu
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index 6be4503201ac..58f46734465f 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -78,14 +78,14 @@ static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
int ret;
- sctx->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- sctx->fallback.blk->base.crt_flags |= (tfm->crt_flags &
+ sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
if (ret) {
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= (sctx->fallback.blk->base.crt_flags &
+ tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
CRYPTO_TFM_RES_MASK);
}
return ret;
diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h
index efb74fd5156e..b1066b9fb5f8 100644
--- a/arch/s390/include/asm/bug.h
+++ b/arch/s390/include/asm/bug.h
@@ -52,6 +52,10 @@
unreachable(); \
} while (0)
+#define __WARN() do { \
+ __EMIT_BUG(BUGFLAG_WARNING); \
+} while (0)
+
#define WARN_ON(x) ({ \
int __ret_warn_on = !!(x); \
if (__builtin_constant_p(__ret_warn_on)) { \
diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h
index ec403d4304f8..f72d611f7e13 100644
--- a/arch/s390/include/asm/sigp.h
+++ b/arch/s390/include/asm/sigp.h
@@ -15,11 +15,19 @@
#ifndef __SIGP__
#define __SIGP__
-#include <asm/ptrace.h>
-#include <asm/atomic.h>
+#include <asm/system.h>
/* get real cpu address from logical cpu number */
-extern volatile int __cpu_logical_map[];
+extern int __cpu_logical_map[];
+
+static inline int cpu_logical_map(int cpu)
+{
+#ifdef CONFIG_SMP
+ return __cpu_logical_map[cpu];
+#else
+ return stap();
+#endif
+}
typedef enum
{
@@ -79,7 +87,7 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode)
- : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]),
+ : "d" (reg1), "d" (cpu_logical_map(cpu_addr)),
"a" (order_code) : "cc" , "memory");
return ccode;
}
@@ -98,7 +106,7 @@ signal_processor_p(__u32 parameter, __u16 cpu_addr, sigp_order_code order_code)
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode)
- : "d" (reg1), "d" (__cpu_logical_map[cpu_addr]),
+ : "d" (reg1), "d" (cpu_logical_map(cpu_addr)),
"a" (order_code) : "cc" , "memory");
return ccode;
}
@@ -118,7 +126,7 @@ signal_processor_ps(__u32 *statusptr, __u32 parameter, __u16 cpu_addr,
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode), "+d" (reg1)
- : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code)
+ : "d" (cpu_logical_map(cpu_addr)), "a" (order_code)
: "cc" , "memory");
*statusptr = reg1;
return ccode;
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 07eb61b2fb3a..66069e736842 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -93,13 +93,12 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SYSCALL_AUDIT 9 /* syscall auditing active */
#define TIF_SECCOMP 10 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
-#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
-#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling
+#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
-#define TIF_31BIT 18 /* 32bit process */
-#define TIF_MEMDIE 19
-#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
-#define TIF_FREEZE 21 /* thread is freezing for suspend */
+#define TIF_31BIT 17 /* 32bit process */
+#define TIF_MEMDIE 18
+#define TIF_RESTORE_SIGMASK 19 /* restore signal mask in do_signal() */
+#define TIF_FREEZE 20 /* thread is freezing for suspend */
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
@@ -112,7 +111,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1<<TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
-#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#define _TIF_31BIT (1<<TIF_31BIT)
#define _TIF_FREEZE (1<<TIF_FREEZE)
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index cbf0a8745bf4..d6b1ed0ec52b 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -265,6 +265,12 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
return uaccess.copy_from_user(n, from, to);
}
+extern void copy_from_user_overflow(void)
+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
+__compiletime_warning("copy_from_user() buffer size is not provably correct")
+#endif
+;
+
/**
* copy_from_user: - Copy a block of data from user space.
* @to: Destination address, in kernel space.
@@ -284,7 +290,13 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
static inline unsigned long __must_check
copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned int sz = __compiletime_object_size(to);
+
might_fault();
+ if (unlikely(sz != -1 && sz < n)) {
+ copy_from_user_overflow();
+ return n;
+ }
if (access_ok(VERIFY_READ, from, n))
n = __copy_from_user(to, from, n);
else
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index 192a7203a14f..6e9f049fa823 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -269,8 +269,7 @@
#define __NR_pwritev 329
#define __NR_rt_tgsigqueueinfo 330
#define __NR_perf_event_open 331
-#define __NR_recvmmsg 332
-#define NR_syscalls 333
+#define NR_syscalls 332
/*
* There are some system calls that are not present on 64 bit, some
@@ -377,6 +376,9 @@
#define __IGNORE_migrate_pages
#define __IGNORE_move_pages
+/* Ignore system calls that are also reachable via sys_socket */
+#define __IGNORE_recvmmsg
+
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 22c9e557bb22..11c3aba664ea 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -616,44 +616,35 @@ asmlinkage long sys32_fstatat64(unsigned int dfd, char __user *filename,
*/
struct mmap_arg_struct_emu31 {
- u32 addr;
- u32 len;
- u32 prot;
- u32 flags;
- u32 fd;
- u32 offset;
+ compat_ulong_t addr;
+ compat_ulong_t len;
+ compat_ulong_t prot;
+ compat_ulong_t flags;
+ compat_ulong_t fd;
+ compat_ulong_t offset;
};
-asmlinkage unsigned long
-old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
+asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
{
struct mmap_arg_struct_emu31 a;
- int error = -EFAULT;
if (copy_from_user(&a, arg, sizeof(a)))
- goto out;
-
- error = -EINVAL;
+ return -EFAULT;
if (a.offset & ~PAGE_MASK)
- goto out;
-
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
- a.offset >> PAGE_SHIFT);
-out:
- return error;
+ return -EINVAL;
+ a.addr = (unsigned long) compat_ptr(a.addr);
+ return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
+ a.offset >> PAGE_SHIFT);
}
-asmlinkage long
-sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
+asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
{
struct mmap_arg_struct_emu31 a;
- int error = -EFAULT;
if (copy_from_user(&a, arg, sizeof(a)))
- goto out;
- error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
-out:
- return error;
+ return -EFAULT;
+ a.addr = (unsigned long) compat_ptr(a.addr);
+ return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
}
asmlinkage long sys32_read(unsigned int fd, char __user * buf, size_t count)
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index faeaccc7d7d9..30de2d0e52bb 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1853,12 +1853,3 @@ sys32_execve_wrapper:
llgtr %r3,%r3 # compat_uptr_t *
llgtr %r4,%r4 # compat_uptr_t *
jg sys32_execve # branch to system call
-
- .globl compat_sys_recvmmsg_wrapper
-compat_sys_recvmmsg_wrapper:
- lgfr %r2,%r2 # int
- llgtr %r3,%r3 # struct compat_mmsghdr *
- llgfr %r4,%r4 # unsigned int
- llgfr %r5,%r5 # unsigned int
- llgtr %r6,%r6 # struct compat_timespec *
- jg compat_sys_recvmmsg
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 5417eb57271a..00b6d1d292f2 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -153,8 +153,6 @@ void exit_thread(void)
void flush_thread(void)
{
- clear_used_math();
- clear_tsk_thread_flag(current, TIF_USEDFPU);
}
void release_thread(struct task_struct *dead_task)
@@ -217,6 +215,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
p->thread.mm_segment = get_fs();
/* Don't copy debug registers */
memset(&p->thread.per_info, 0, sizeof(p->thread.per_info));
+ clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
/* Initialize per thread user and system timer values */
ti = task_thread_info(p);
ti->user_timer = 0;
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 13815d39f7dd..872b00a5ca9f 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -98,6 +98,9 @@ FixPerRegisters(struct task_struct *task)
per_info->control_regs.bits.storage_alt_space_ctl = 1;
else
per_info->control_regs.bits.storage_alt_space_ctl = 0;
+
+ if (task == current)
+ __ctl_load(per_info->control_regs.words, 9, 11);
}
void user_enable_single_step(struct task_struct *task)
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 0663287fa1b3..8d8957b38ab3 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -87,7 +87,6 @@ unsigned long elf_hwcap = 0;
char elf_platform[ELF_PLATFORM_SIZE];
struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
-volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
int __initdata memory_end_set;
unsigned long __initdata memory_end;
@@ -124,12 +123,6 @@ void __cpuinit cpu_init(void)
*/
get_cpu_id(&S390_lowcore.cpu_id);
- /*
- * Force FPU initialization:
- */
- clear_thread_flag(TIF_USEDFPU);
- clear_used_math();
-
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
@@ -855,7 +848,6 @@ setup_arch(char **cmdline_p)
setup_lowcore();
cpu_init();
- __cpu_logical_map[0] = stap();
s390_init_cpu_topology();
/*
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 6b4fef877f9d..1675c48b9145 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -500,18 +500,10 @@ void do_signal(struct pt_regs *regs)
clear_thread_flag(TIF_RESTORE_SIGMASK);
/*
- * If we would have taken a single-step trap
- * for a normal instruction, act like we took
- * one for the handler setup.
- */
- if (current->thread.per_info.single_step)
- set_thread_flag(TIF_SINGLE_STEP);
-
- /*
* Let tracing know that we've done the handler setup.
*/
tracehook_signal_handler(signr, &info, &ka, regs,
- test_thread_flag(TIF_SINGLE_STEP));
+ current->thread.per_info.single_step);
}
return;
}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index c684f04f3d7f..39502de662fd 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -52,6 +52,9 @@
#include <asm/cpu.h>
#include "entry.h"
+/* logical cpu to cpu address */
+int __cpu_logical_map[NR_CPUS];
+
static struct task_struct *current_set[NR_CPUS];
static u8 smp_cpu_type;
@@ -717,6 +720,12 @@ void __init smp_cpus_done(unsigned int max_cpus)
{
}
+void __init smp_setup_processor_id(void)
+{
+ S390_lowcore.cpu_nr = 0;
+ __cpu_logical_map[0] = stap();
+}
+
/*
* the frequency of the profiling timer can be changed
* by writing a multiplier value into /proc/profile.
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 4f292c936872..30eca070d426 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -340,4 +340,3 @@ SYSCALL(sys_preadv,sys_preadv,compat_sys_preadv_wrapper)
SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev_wrapper)
SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo_wrapper) /* 330 */
SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper)
-SYSCALL(sys_recvmmsg,sys_recvmmsg,compat_sys_recvmmsg_wrapper)
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 3c72c9cf22b6..14ef6f05e432 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -114,7 +114,7 @@ static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
rcpu = CPU_BITS - 1 - cpu + tl_cpu->origin;
for_each_present_cpu(lcpu) {
- if (__cpu_logical_map[lcpu] == rcpu) {
+ if (cpu_logical_map(lcpu) == rcpu) {
cpu_set(lcpu, core->mask);
smp_cpu_polarization[lcpu] = tl_cpu->pp;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f8bcaefd7d34..8bedd31011dd 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -689,14 +689,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
}
/* Section: memory related */
-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
- struct kvm_memory_slot old,
- int user_alloc)
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc)
{
- int i;
- struct kvm_vcpu *vcpu;
-
/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
page boundary in userland and which has to end at a page boundary.
@@ -719,14 +717,23 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (!user_alloc)
return -EINVAL;
+ return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
/* request update of sie control block for all available vcpus */
kvm_for_each_vcpu(i, vcpu, kvm) {
if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
continue;
kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
}
-
- return 0;
}
void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 06cce8285ba0..60f09ab3672c 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -67,10 +67,14 @@ static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu)
static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
{
+ int idx;
struct kvm_memory_slot *mem;
+ struct kvm_memslots *memslots;
- down_read(&vcpu->kvm->slots_lock);
- mem = &vcpu->kvm->memslots[0];
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ memslots = rcu_dereference(vcpu->kvm->memslots);
+
+ mem = &memslots->memslots[0];
vcpu->arch.sie_block->gmsor = mem->userspace_addr;
vcpu->arch.sie_block->gmslm =
@@ -78,7 +82,7 @@ static inline void kvm_s390_vcpu_set_mem(struct kvm_vcpu *vcpu)
(mem->npages << PAGE_SHIFT) +
VIRTIODESCSPACE - 1ul;
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
/* implemented in priv.c */
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 97975ec7a274..cd54a1c352af 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,7 +2,7 @@
# Makefile for s390-specific library files..
#
-lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
+lib-y += delay.o string.o uaccess_std.o uaccess_pt.o usercopy.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o
lib-$(CONFIG_64BIT) += uaccess_mvcos.o
lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c
index 10754a375668..cff327f109a8 100644
--- a/arch/s390/lib/spinlock.c
+++ b/arch/s390/lib/spinlock.c
@@ -34,7 +34,7 @@ static inline void _raw_yield_cpu(int cpu)
{
if (MACHINE_HAS_DIAG9C)
asm volatile("diag %0,0,0x9c"
- : : "d" (__cpu_logical_map[cpu]));
+ : : "d" (cpu_logical_map(cpu)));
else
_raw_yield();
}
diff --git a/arch/s390/lib/usercopy.c b/arch/s390/lib/usercopy.c
new file mode 100644
index 000000000000..14b363fec8a2
--- /dev/null
+++ b/arch/s390/lib/usercopy.c
@@ -0,0 +1,8 @@
+#include <linux/module.h>
+#include <linux/bug.h>
+
+void copy_from_user_overflow(void)
+{
+ WARN(1, "Buffer overflow detected!\n");
+}
+EXPORT_SYMBOL(copy_from_user_overflow);
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index f4558ccf02b9..869efbaed3ea 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -40,7 +40,7 @@
static inline unsigned long mmap_base(void)
{
- unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
+ unsigned long gap = rlimit(RLIMIT_STACK);
if (gap < MIN_GAP)
gap = MIN_GAP;
@@ -61,7 +61,7 @@ static inline int mmap_is_legacy(void)
#endif
return sysctl_legacy_va_layout ||
(current->personality & ADDR_COMPAT_LAYOUT) ||
- current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY;
+ rlimit(RLIMIT_STACK) == RLIM_INFINITY;
}
#ifndef CONFIG_64BIT
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
index 8c15b2c85d5a..dfaf458d6702 100644
--- a/arch/score/mm/init.c
+++ b/arch/score/mm/init.c
@@ -106,7 +106,7 @@ void __init mem_init(void)
ram << (PAGE_SHIFT-10), codesize >> 10,
reservedpages << (PAGE_SHIFT-10), datasize >> 10,
initsize >> 10,
- (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
+ totalhigh_pages << (PAGE_SHIFT-10));
}
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 2121fbb2ff4c..ae6c73689036 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -726,8 +726,9 @@ config GUSA_RB
disabling interrupts around the atomic sequence.
config SPARSE_IRQ
- bool "Support sparse irq numbering"
- depends on EXPERIMENTAL
+ def_bool y
+ depends on SUPERH32 && !SH_DREAMCAST && !SH_HIGHLANDER && \
+ !SH_RTS7751R2D && !HD64461 && !SH_7724_SOLUTION_ENGINE
help
This enables support for sparse irqs. This is useful in general
as most CPUs have a fairly sparse array of IRQ vectors, which
diff --git a/arch/sh/boards/mach-se/7343/irq.c b/arch/sh/boards/mach-se/7343/irq.c
index 051c29d4eae0..c60fd13608d0 100644
--- a/arch/sh/boards/mach-se/7343/irq.c
+++ b/arch/sh/boards/mach-se/7343/irq.c
@@ -16,15 +16,17 @@
#include <linux/io.h>
#include <mach-se/mach/se7343.h>
+unsigned int se7343_fpga_irq[SE7343_FPGA_IRQ_NR] = { 0, };
+
static void disable_se7343_irq(unsigned int irq)
{
- unsigned int bit = irq - SE7343_FPGA_IRQ_BASE;
+ unsigned int bit = (unsigned int)get_irq_chip_data(irq);
ctrl_outw(ctrl_inw(PA_CPLD_IMSK) | 1 << bit, PA_CPLD_IMSK);
}
static void enable_se7343_irq(unsigned int irq)
{
- unsigned int bit = irq - SE7343_FPGA_IRQ_BASE;
+ unsigned int bit = (unsigned int)get_irq_chip_data(irq);
ctrl_outw(ctrl_inw(PA_CPLD_IMSK) & ~(1 << bit), PA_CPLD_IMSK);
}
@@ -38,18 +40,15 @@ static struct irq_chip se7343_irq_chip __read_mostly = {
static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
{
unsigned short intv = ctrl_inw(PA_CPLD_ST);
- struct irq_desc *ext_desc;
- unsigned int ext_irq = SE7343_FPGA_IRQ_BASE;
+ unsigned int ext_irq = 0;
intv &= (1 << SE7343_FPGA_IRQ_NR) - 1;
- while (intv) {
- if (intv & 1) {
- ext_desc = irq_desc + ext_irq;
- handle_level_irq(ext_irq, ext_desc);
- }
- intv >>= 1;
- ext_irq++;
+ for (; intv; intv >>= 1, ext_irq++) {
+ if (!(intv & 1))
+ continue;
+
+ generic_handle_irq(se7343_fpga_irq[ext_irq]);
}
}
@@ -58,16 +57,24 @@ static void se7343_irq_demux(unsigned int irq, struct irq_desc *desc)
*/
void __init init_7343se_IRQ(void)
{
- int i;
+ int i, irq;
ctrl_outw(0, PA_CPLD_IMSK); /* disable all irqs */
ctrl_outw(0x2000, 0xb03fffec); /* mrshpc irq enable */
- for (i = 0; i < SE7343_FPGA_IRQ_NR; i++)
- set_irq_chip_and_handler_name(SE7343_FPGA_IRQ_BASE + i,
+ for (i = 0; i < SE7343_FPGA_IRQ_NR; i++) {
+ irq = create_irq();
+ if (irq < 0)
+ return;
+ se7343_fpga_irq[i] = irq;
+
+ set_irq_chip_and_handler_name(se7343_fpga_irq[i],
&se7343_irq_chip,
handle_level_irq, "level");
+ set_irq_chip_data(se7343_fpga_irq[i], (void *)i);
+ }
+
set_irq_chained_handler(IRQ0_IRQ, se7343_irq_demux);
set_irq_type(IRQ0_IRQ, IRQ_TYPE_LEVEL_LOW);
set_irq_chained_handler(IRQ1_IRQ, se7343_irq_demux);
diff --git a/arch/sh/boards/mach-se/7343/setup.c b/arch/sh/boards/mach-se/7343/setup.c
index 4de56f35f419..292cc47d853f 100644
--- a/arch/sh/boards/mach-se/7343/setup.c
+++ b/arch/sh/boards/mach-se/7343/setup.c
@@ -82,7 +82,6 @@ static struct plat_serial8250_port serial_platform_data[] = {
.mapbase = 0x16000000,
.regshift = 1,
.flags = ST16C2550C_FLAGS,
- .irq = UARTA_IRQ,
.uartclk = 7372800,
},
[1] = {
@@ -90,7 +89,6 @@ static struct plat_serial8250_port serial_platform_data[] = {
.mapbase = 0x17000000,
.regshift = 1,
.flags = ST16C2550C_FLAGS,
- .irq = UARTB_IRQ,
.uartclk = 7372800,
},
{ },
@@ -121,7 +119,7 @@ static struct resource usb_resources[] = {
.flags = IORESOURCE_MEM,
},
[2] = {
- .start = USB_IRQ,
+ /* Filled in later */
.flags = IORESOURCE_IRQ,
},
};
@@ -138,8 +136,8 @@ static struct isp116x_platform_data usb_platform_data = {
static struct platform_device usb_device = {
.name = "isp116x-hcd",
.id = -1,
- .num_resources = ARRAY_SIZE(usb_resources),
- .resource = usb_resources,
+ .num_resources = ARRAY_SIZE(usb_resources),
+ .resource = usb_resources,
.dev = {
.platform_data = &usb_platform_data,
},
@@ -155,6 +153,13 @@ static struct platform_device *sh7343se_platform_devices[] __initdata = {
static int __init sh7343se_devices_setup(void)
{
+ /* Wire-up dynamic vectors */
+ serial_platform_data[0].irq = se7343_fpga_irq[SE7343_FPGA_IRQ_UARTA];
+ serial_platform_data[1].irq = se7343_fpga_irq[SE7343_FPGA_IRQ_UARTB];
+
+ usb_resources[2].start = usb_resources[2].end =
+ se7343_fpga_irq[SE7343_FPGA_IRQ_USB];
+
return platform_add_devices(sh7343se_platform_devices,
ARRAY_SIZE(sh7343se_platform_devices));
}
@@ -179,6 +184,5 @@ static void __init sh7343se_setup(char **cmdline_p)
static struct sh_machine_vector mv_7343se __initmv = {
.mv_name = "SolutionEngine 7343",
.mv_setup = sh7343se_setup,
- .mv_nr_irqs = SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_NR,
.mv_init_irq = init_7343se_IRQ,
};
diff --git a/arch/sh/include/asm/alignment.h b/arch/sh/include/asm/alignment.h
new file mode 100644
index 000000000000..b12efecf5294
--- /dev/null
+++ b/arch/sh/include/asm/alignment.h
@@ -0,0 +1,21 @@
+#ifndef __ASM_SH_ALIGNMENT_H
+#define __ASM_SH_ALIGNMENT_H
+
+#include <linux/types.h>
+
+extern void inc_unaligned_byte_access(void);
+extern void inc_unaligned_word_access(void);
+extern void inc_unaligned_dword_access(void);
+extern void inc_unaligned_multi_access(void);
+extern void inc_unaligned_user_access(void);
+extern void inc_unaligned_kernel_access(void);
+
+#define UM_WARN (1 << 0)
+#define UM_FIXUP (1 << 1)
+#define UM_SIGNAL (1 << 2)
+
+extern unsigned int unaligned_user_action(void);
+
+extern void unaligned_fixups_notify(struct task_struct *, insn_size_t, struct pt_regs *);
+
+#endif /* __ASM_SH_ALIGNMENT_H */
diff --git a/arch/sh/include/asm/atomic-grb.h b/arch/sh/include/asm/atomic-grb.h
index 4c5b7dbfcedb..a273c88578fc 100644
--- a/arch/sh/include/asm/atomic-grb.h
+++ b/arch/sh/include/asm/atomic-grb.h
@@ -120,50 +120,4 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
: "memory" , "r0", "r1");
}
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
-
- __asm__ __volatile__ (
- " .align 2 \n\t"
- " mova 1f, r0 \n\t"
- " nop \n\t"
- " mov r15, r1 \n\t"
- " mov #-8, r15 \n\t"
- " mov.l @%1, %0 \n\t"
- " cmp/eq %2, %0 \n\t"
- " bf 1f \n\t"
- " mov.l %3, @%1 \n\t"
- "1: mov r1, r15 \n\t"
- : "=&r" (ret)
- : "r" (v), "r" (old), "r" (new)
- : "memory" , "r0", "r1" , "t");
-
- return ret;
-}
-
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
-{
- int ret;
- unsigned long tmp;
-
- __asm__ __volatile__ (
- " .align 2 \n\t"
- " mova 1f, r0 \n\t"
- " nop \n\t"
- " mov r15, r1 \n\t"
- " mov #-12, r15 \n\t"
- " mov.l @%2, %1 \n\t"
- " mov %1, %0 \n\t"
- " cmp/eq %4, %0 \n\t"
- " bt/s 1f \n\t"
- " add %3, %1 \n\t"
- " mov.l %1, @%2 \n\t"
- "1: mov r1, r15 \n\t"
- : "=&r" (ret), "=&r" (tmp)
- : "r" (v), "r" (a), "r" (u)
- : "memory" , "r0", "r1" , "t");
-
- return ret != u;
-}
#endif /* __ASM_SH_ATOMIC_GRB_H */
diff --git a/arch/sh/include/asm/atomic-llsc.h b/arch/sh/include/asm/atomic-llsc.h
index b040e1e08610..4b00b78e3f4f 100644
--- a/arch/sh/include/asm/atomic-llsc.h
+++ b/arch/sh/include/asm/atomic-llsc.h
@@ -104,31 +104,4 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
: "t");
}
-#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
-
-/**
- * atomic_add_unless - add unless the number is a given value
- * @v: pointer of type atomic_t
- * @a: the amount to add to v...
- * @u: ...unless v is equal to u.
- *
- * Atomically adds @a to @v, so long as it was not @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
- */
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- for (;;) {
- if (unlikely(c == (u)))
- break;
- old = atomic_cmpxchg((v), c, c + (a));
- if (likely(old == c))
- break;
- c = old;
- }
-
- return c != (u);
-}
-
#endif /* __ASM_SH_ATOMIC_LLSC_H */
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index b16388d71954..275a448ae8c2 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -25,58 +25,43 @@
#endif
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-#define atomic_inc_return(v) atomic_add_return(1,(v))
+#define atomic_inc(v) atomic_add(1, (v))
+#define atomic_dec(v) atomic_sub(1, (v))
-/*
- * atomic_inc_and_test - increment and test
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
+
+/**
+ * atomic_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
*
- * Atomically increments @v by 1
- * and returns true if the result is zero, or false for all
- * other cases.
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
-
-#define atomic_inc(v) atomic_add(1,(v))
-#define atomic_dec(v) atomic_sub(1,(v))
-
-#if !defined(CONFIG_GUSA_RB) && !defined(CONFIG_CPU_SH4A)
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
- int ret;
- unsigned long flags;
-
- local_irq_save(flags);
- ret = v->counter;
- if (likely(ret == old))
- v->counter = new;
- local_irq_restore(flags);
-
- return ret;
-}
-
static inline int atomic_add_unless(atomic_t *v, int a, int u)
{
- int ret;
- unsigned long flags;
-
- local_irq_save(flags);
- ret = v->counter;
- if (ret != u)
- v->counter += a;
- local_irq_restore(flags);
-
- return ret != u;
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+
+ return c != (u);
}
-#endif /* !CONFIG_GUSA_RB && !CONFIG_CPU_SH4A */
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index 63ca37bd9a95..f8982f4e0405 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -4,8 +4,16 @@
#include <linux/quicklist.h>
#include <asm/page.h>
-#define QUICK_PGD 0 /* We preserve special mappings over free */
-#define QUICK_PT 1 /* Other page table pages that are zero on free */
+#define QUICK_PT 0 /* Other page table pages that are zero on free */
+
+extern pgd_t *pgd_alloc(struct mm_struct *);
+extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
+
+#ifdef CONFIG_PGTABLE_LEVELS_3
+extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
+extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
+extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
+#endif
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
pte_t *pte)
@@ -20,28 +28,9 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
}
#define pmd_pgtable(pmd) pmd_page(pmd)
-static inline void pgd_ctor(void *x)
-{
- pgd_t *pgd = x;
-
- memcpy(pgd + USER_PTRS_PER_PGD,
- swapper_pg_dir + USER_PTRS_PER_PGD,
- (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
-}
-
/*
* Allocate and free page tables.
*/
-static inline pgd_t *pgd_alloc(struct mm_struct *mm)
-{
- return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
-}
-
-static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
- quicklist_free(QUICK_PGD, NULL, pgd);
-}
-
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
@@ -81,7 +70,6 @@ do { \
static inline void check_pgt_cache(void)
{
- quicklist_trim(QUICK_PGD, NULL, 25, 16);
quicklist_trim(QUICK_PT, NULL, 25, 16);
}
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index ba3046e4f06f..78598ec33d0a 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -12,7 +12,11 @@
#ifndef __ASM_SH_PGTABLE_H
#define __ASM_SH_PGTABLE_H
-#include <asm-generic/pgtable-nopmd.h>
+#ifdef CONFIG_PGTABLE_LEVELS_3
+#include <asm/pgtable_pmd.h>
+#else
+#include <asm/pgtable_nopmd.h>
+#endif
#include <asm/page.h>
#ifndef __ASSEMBLY__
@@ -51,28 +55,12 @@ static inline unsigned long long neff_sign_extend(unsigned long val)
#define NPHYS_SIGN (1LL << (NPHYS - 1))
#define NPHYS_MASK (-1LL << NPHYS)
-/*
- * traditional two-level paging structure
- */
-/* PTE bits */
-#if defined(CONFIG_X2TLB) || defined(CONFIG_SUPERH64)
-# define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */
-#else
-# define PTE_MAGNITUDE 2 /* 32-bit PTEs */
-#endif
-#define PTE_SHIFT PAGE_SHIFT
-#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE)
-
-/* PGD bits */
-#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/* Entries per level */
#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
-#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
-#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
#define PHYS_ADDR_MASK29 0x1fffffff
@@ -153,9 +141,9 @@ typedef pte_t *pte_addr_t;
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
/*
- * No page table caches to initialise
+ * Initialise the page table caches
*/
-#define pgtable_cache_init() do { } while (0)
+extern void pgtable_cache_init(void);
struct vm_area_struct;
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h
index 17cdbecc3adc..dd381588c695 100644
--- a/arch/sh/include/asm/pgtable_64.h
+++ b/arch/sh/include/asm/pgtable_64.h
@@ -43,11 +43,6 @@ static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
-{
- pmd_val(*pmdp) = (unsigned long) ptep;
-}
-
/*
* PGD defines. Top level.
*/
@@ -203,12 +198,6 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
/*
- * Handling allocation failures during page table setup.
- */
-extern void __handle_bad_pmd_kernel(pmd_t * pmd);
-#define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x)
-
-/*
* PTE level access routines.
*
* Note1:
diff --git a/arch/sh/include/asm/pgtable_nopmd.h b/arch/sh/include/asm/pgtable_nopmd.h
new file mode 100644
index 000000000000..f0b525b3cb4a
--- /dev/null
+++ b/arch/sh/include/asm/pgtable_nopmd.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_SH_PGTABLE_NOPMD_H
+#define __ASM_SH_PGTABLE_NOPMD_H
+
+#include <asm-generic/pgtable-nopmd.h>
+
+/*
+ * traditional two-level paging structure
+ */
+
+/* PTE bits */
+#define PTE_MAGNITUDE 2 /* 32-bit PTEs */
+
+#define PTE_SHIFT PAGE_SHIFT
+#define PTE_BITS (PTE_SHIFT - PTE_MAGNITUDE)
+
+/* PGD bits */
+#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
+
+#define PTRS_PER_PGD (PAGE_SIZE / (1 << PTE_MAGNITUDE))
+#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
+
+#endif /* __ASM_SH_PGTABLE_NOPMD_H */
diff --git a/arch/sh/include/asm/pgtable_pmd.h b/arch/sh/include/asm/pgtable_pmd.h
new file mode 100644
index 000000000000..42a180e534a8
--- /dev/null
+++ b/arch/sh/include/asm/pgtable_pmd.h
@@ -0,0 +1,55 @@
+#ifndef __ASM_SH_PGTABLE_PMD_H
+#define __ASM_SH_PGTABLE_PMD_H
+
+#include <asm-generic/pgtable-nopud.h>
+
+/*
+ * Some cores need a 3-level page table layout, for example when using
+ * 64-bit PTEs and 4K pages.
+ */
+
+#define PTE_MAGNITUDE 3 /* 64-bit PTEs on extended mode SH-X2 TLB */
+
+/* PGD bits */
+#define PGDIR_SHIFT 30
+
+#define PTRS_PER_PGD 4
+#define USER_PTRS_PER_PGD 2
+
+/* PMD bits */
+#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PTE_MAGNITUDE))
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+#define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
+
+#define pmd_ERROR(e) \
+ printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
+
+typedef struct { unsigned long long pmd; } pmd_t;
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) } )
+
+static inline unsigned long pud_page_vaddr(pud_t pud)
+{
+ return pud_val(pud);
+}
+
+#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
+{
+ return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
+}
+
+#define pud_none(x) (!pud_val(x))
+#define pud_present(x) (pud_val(x))
+#define pud_clear(xp) do { set_pud(xp, __pud(0)); } while (0)
+#define pud_bad(x) (pud_val(x) & ~PAGE_MASK)
+
+/*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
+
+#endif /* __ASM_SH_PGTABLE_PMD_H */
diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
index ce3743599b27..4758325bb24a 100644
--- a/arch/sh/include/asm/setup.h
+++ b/arch/sh/include/asm/setup.h
@@ -18,7 +18,6 @@
/* ... */
#define COMMAND_LINE ((char *) (PARAM+0x100))
-int setup_early_printk(char *);
void sh_mv_setup(void);
#endif /* __KERNEL__ */
diff --git a/arch/sh/include/asm/sh_bios.h b/arch/sh/include/asm/sh_bios.h
index d9c96d7cf6c7..95714c28422b 100644
--- a/arch/sh/include/asm/sh_bios.h
+++ b/arch/sh/include/asm/sh_bios.h
@@ -1,18 +1,27 @@
#ifndef __ASM_SH_BIOS_H
#define __ASM_SH_BIOS_H
+#ifdef CONFIG_SH_STANDARD_BIOS
+
/*
* Copyright (C) 2000 Greg Banks, Mitch Davis
* C API to interface to the standard LinuxSH BIOS
* usually from within the early stages of kernel boot.
*/
-
-
extern void sh_bios_console_write(const char *buf, unsigned int len);
-extern void sh_bios_char_out(char ch);
extern void sh_bios_gdb_detach(void);
extern void sh_bios_get_node_addr(unsigned char *node_addr);
extern void sh_bios_shutdown(unsigned int how);
+extern void sh_bios_vbr_init(void);
+extern void sh_bios_vbr_reload(void);
+
+#else
+
+static inline void sh_bios_vbr_init(void) { }
+static inline void sh_bios_vbr_reload(void) { }
+
+#endif /* CONFIG_SH_STANDARD_BIOS */
+
#endif /* __ASM_SH_BIOS_H */
diff --git a/arch/sh/include/mach-se/mach/se7343.h b/arch/sh/include/mach-se/mach/se7343.h
index 749914b400fb..8d8170d6cc43 100644
--- a/arch/sh/include/mach-se/mach/se7343.h
+++ b/arch/sh/include/mach-se/mach/se7343.h
@@ -94,26 +94,26 @@
#define PORT_DRVCR 0xA4050180
-#define PORT_PADR 0xA4050120
-#define PORT_PBDR 0xA4050122
-#define PORT_PCDR 0xA4050124
-#define PORT_PDDR 0xA4050126
-#define PORT_PEDR 0xA4050128
-#define PORT_PFDR 0xA405012A
-#define PORT_PGDR 0xA405012C
-#define PORT_PHDR 0xA405012E
-#define PORT_PJDR 0xA4050130
-#define PORT_PKDR 0xA4050132
-#define PORT_PLDR 0xA4050134
-#define PORT_PMDR 0xA4050136
-#define PORT_PNDR 0xA4050138
-#define PORT_PQDR 0xA405013A
-#define PORT_PRDR 0xA405013C
-#define PORT_PTDR 0xA4050160
-#define PORT_PUDR 0xA4050162
-#define PORT_PVDR 0xA4050164
-#define PORT_PWDR 0xA4050166
-#define PORT_PYDR 0xA4050168
+#define PORT_PADR 0xA4050120
+#define PORT_PBDR 0xA4050122
+#define PORT_PCDR 0xA4050124
+#define PORT_PDDR 0xA4050126
+#define PORT_PEDR 0xA4050128
+#define PORT_PFDR 0xA405012A
+#define PORT_PGDR 0xA405012C
+#define PORT_PHDR 0xA405012E
+#define PORT_PJDR 0xA4050130
+#define PORT_PKDR 0xA4050132
+#define PORT_PLDR 0xA4050134
+#define PORT_PMDR 0xA4050136
+#define PORT_PNDR 0xA4050138
+#define PORT_PQDR 0xA405013A
+#define PORT_PRDR 0xA405013C
+#define PORT_PTDR 0xA4050160
+#define PORT_PUDR 0xA4050162
+#define PORT_PVDR 0xA4050164
+#define PORT_PWDR 0xA4050166
+#define PORT_PYDR 0xA4050168
#define FPGA_IN 0xb1400000
#define FPGA_OUT 0xb1400002
@@ -133,18 +133,10 @@
#define SE7343_FPGA_IRQ_UARTB 11
#define SE7343_FPGA_IRQ_NR 12
-#define SE7343_FPGA_IRQ_BASE 120
-
-#define MRSHPC_IRQ3 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC3)
-#define MRSHPC_IRQ2 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC2)
-#define MRSHPC_IRQ1 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC1)
-#define MRSHPC_IRQ0 (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_MRSHPC0)
-#define SMC_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_SMC)
-#define USB_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_USB)
-#define UARTA_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_UARTA)
-#define UARTB_IRQ (SE7343_FPGA_IRQ_BASE + SE7343_FPGA_IRQ_UARTB)
/* arch/sh/boards/se/7343/irq.c */
+extern unsigned int se7343_fpga_irq[];
+
void init_7343se_IRQ(void);
#endif /* __ASM_SH_HITACHI_SE7343_H */
diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile
index 0d587da1ef12..5bec10c8bd74 100644
--- a/arch/sh/kernel/Makefile
+++ b/arch/sh/kernel/Makefile
@@ -22,7 +22,7 @@ obj-y := debugtraps.o dma-nommu.o dumpstack.o \
obj-y += cpu/
obj-$(CONFIG_VSYSCALL) += vsyscall/
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o early_printk.o
+obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o
obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
diff --git a/arch/sh/kernel/early_printk.c b/arch/sh/kernel/early_printk.c
deleted file mode 100644
index f8bb50c6e050..000000000000
--- a/arch/sh/kernel/early_printk.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * arch/sh/kernel/early_printk.c
- *
- * Copyright (C) 1999, 2000 Niibe Yutaka
- * Copyright (C) 2002 M. R. Brown
- * Copyright (C) 2004 - 2007 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/console.h>
-#include <linux/tty.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-
-#include <asm/sh_bios.h>
-
-/*
- * Print a string through the BIOS
- */
-static void sh_console_write(struct console *co, const char *s,
- unsigned count)
-{
- sh_bios_console_write(s, count);
-}
-
-/*
- * Setup initial baud/bits/parity. We do two things here:
- * - construct a cflag setting for the first rs_open()
- * - initialize the serial port
- * Return non-zero if we didn't find a serial port.
- */
-static int __init sh_console_setup(struct console *co, char *options)
-{
- int cflag = CREAD | HUPCL | CLOCAL;
-
- /*
- * Now construct a cflag setting.
- * TODO: this is a totally bogus cflag, as we have
- * no idea what serial settings the BIOS is using, or
- * even if its using the serial port at all.
- */
- cflag |= B115200 | CS8 | /*no parity*/0;
-
- co->cflag = cflag;
-
- return 0;
-}
-
-static struct console bios_console = {
- .name = "bios",
- .write = sh_console_write,
- .setup = sh_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
-};
-
-static struct console *early_console;
-
-static int __init setup_early_printk(char *buf)
-{
- int keep_early = 0;
-
- if (!buf)
- return 0;
-
- if (strstr(buf, "keep"))
- keep_early = 1;
-
- if (!strncmp(buf, "bios", 4))
- early_console = &bios_console;
-
- if (likely(early_console)) {
- if (keep_early)
- early_console->flags &= ~CON_BOOT;
- else
- early_console->flags |= CON_BOOT;
- register_console(early_console);
- }
-
- return 0;
-}
-early_param("earlyprintk", setup_early_printk);
diff --git a/arch/sh/kernel/head_64.S b/arch/sh/kernel/head_64.S
index 3ea765844c74..defd851abefa 100644
--- a/arch/sh/kernel/head_64.S
+++ b/arch/sh/kernel/head_64.S
@@ -220,7 +220,6 @@ clear_DTLB:
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
-#ifdef CONFIG_EARLY_PRINTK
/*
* Setup a DTLB translation for SCIF phys.
*/
@@ -231,7 +230,6 @@ clear_DTLB:
movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
shori 0x0003, r22
putcfg r21, 0, r22 /* PTEH last */
-#endif
/*
* Set cache behaviours.
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 3e532d0d4a5c..1ec03c963e96 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -237,6 +237,18 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
return -1;
}
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+ if (exception == 60)
+ return instruction_pointer(regs) - 2;
+ return instruction_pointer(regs);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->pc = ip;
+}
+
/*
* The primary entry points for the kgdb debug trap table entries.
*/
@@ -247,7 +259,7 @@ BUILD_TRAP_HANDLER(singlestep)
local_irq_save(flags);
regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
- kgdb_handle_exception(vec >> 2, SIGTRAP, 0, regs);
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
local_irq_restore(flags);
}
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 76f280223ebd..f52d8ed69e12 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -21,6 +21,7 @@
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
+#include <asm/sh_bios.h>
typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
unsigned long reboot_code_buffer,
@@ -28,7 +29,6 @@ typedef void (*relocate_new_kernel_t)(unsigned long indirection_page,
extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
-extern void *gdb_vbr_vector;
extern void *vbr_base;
void machine_shutdown(void)
@@ -117,11 +117,7 @@ void machine_kexec(struct kimage *image)
kexec_info(image);
flush_cache_all();
-#if defined(CONFIG_SH_STANDARD_BIOS)
- asm volatile("ldc %0, vbr" :
- : "r" (((unsigned long) gdb_vbr_vector) - 0x100)
- : "memory");
-#endif
+ sh_bios_vbr_reload();
/* now call it */
rnk = (relocate_new_kernel_t) reboot_code_buffer;
diff --git a/arch/sh/kernel/sh_bios.c b/arch/sh/kernel/sh_bios.c
index c852f7805728..47475cca068a 100644
--- a/arch/sh/kernel/sh_bios.c
+++ b/arch/sh/kernel/sh_bios.c
@@ -1,19 +1,30 @@
/*
- * linux/arch/sh/kernel/sh_bios.c
* C interface for trapping into the standard LinuxSH BIOS.
*
* Copyright (C) 2000 Greg Banks, Mitch Davis
+ * Copyright (C) 1999, 2000 Niibe Yutaka
+ * Copyright (C) 2002 M. R. Brown
+ * Copyright (C) 2004 - 2010 Paul Mundt
*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*/
#include <linux/module.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
#include <asm/sh_bios.h>
#define BIOS_CALL_CONSOLE_WRITE 0
#define BIOS_CALL_ETH_NODE_ADDR 10
#define BIOS_CALL_SHUTDOWN 11
-#define BIOS_CALL_CHAR_OUT 0x1f /* TODO: hack */
#define BIOS_CALL_GDB_DETACH 0xff
+void *gdb_vbr_vector = NULL;
+
static inline long sh_bios_call(long func, long arg0, long arg1, long arg2,
long arg3)
{
@@ -23,6 +34,9 @@ static inline long sh_bios_call(long func, long arg0, long arg1, long arg2,
register long r6 __asm__("r6") = arg2;
register long r7 __asm__("r7") = arg3;
+ if (!gdb_vbr_vector)
+ return -ENOSYS;
+
__asm__ __volatile__("trapa #0x3f":"=z"(r0)
:"0"(r0), "r"(r4), "r"(r5), "r"(r6), "r"(r7)
:"memory");
@@ -34,11 +48,6 @@ void sh_bios_console_write(const char *buf, unsigned int len)
sh_bios_call(BIOS_CALL_CONSOLE_WRITE, (long)buf, (long)len, 0, 0);
}
-void sh_bios_char_out(char ch)
-{
- sh_bios_call(BIOS_CALL_CHAR_OUT, ch, 0, 0, 0);
-}
-
void sh_bios_gdb_detach(void)
{
sh_bios_call(BIOS_CALL_GDB_DETACH, 0, 0, 0, 0);
@@ -55,3 +64,109 @@ void sh_bios_shutdown(unsigned int how)
{
sh_bios_call(BIOS_CALL_SHUTDOWN, how, 0, 0, 0);
}
+
+/*
+ * Read the old value of the VBR register to initialise the vector
+ * through which debug and BIOS traps are delegated by the Linux trap
+ * handler.
+ */
+void sh_bios_vbr_init(void)
+{
+ unsigned long vbr;
+
+ if (unlikely(gdb_vbr_vector))
+ return;
+
+ __asm__ __volatile__ ("stc vbr, %0" : "=r" (vbr));
+
+ if (vbr) {
+ gdb_vbr_vector = (void *)(vbr + 0x100);
+ printk(KERN_NOTICE "Setting GDB trap vector to %p\n",
+ gdb_vbr_vector);
+ } else
+ printk(KERN_NOTICE "SH-BIOS not detected\n");
+}
+
+/**
+ * sh_bios_vbr_reload - Re-load the system VBR from the BIOS vector.
+ *
+ * This can be used by save/restore code to reinitialize the system VBR
+ * from the fixed BIOS VBR. A no-op if no BIOS VBR is known.
+ */
+void sh_bios_vbr_reload(void)
+{
+ if (gdb_vbr_vector)
+ __asm__ __volatile__ (
+ "ldc %0, vbr"
+ :
+ : "r" (((unsigned long) gdb_vbr_vector) - 0x100)
+ : "memory"
+ );
+}
+
+/*
+ * Print a string through the BIOS
+ */
+static void sh_console_write(struct console *co, const char *s,
+ unsigned count)
+{
+ sh_bios_console_write(s, count);
+}
+
+/*
+ * Setup initial baud/bits/parity. We do two things here:
+ * - construct a cflag setting for the first rs_open()
+ * - initialize the serial port
+ * Return non-zero if we didn't find a serial port.
+ */
+static int __init sh_console_setup(struct console *co, char *options)
+{
+ int cflag = CREAD | HUPCL | CLOCAL;
+
+ /*
+ * Now construct a cflag setting.
+ * TODO: this is a totally bogus cflag, as we have
+ * no idea what serial settings the BIOS is using, or
+ * even if its using the serial port at all.
+ */
+ cflag |= B115200 | CS8 | /*no parity*/0;
+
+ co->cflag = cflag;
+
+ return 0;
+}
+
+static struct console bios_console = {
+ .name = "bios",
+ .write = sh_console_write,
+ .setup = sh_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+};
+
+static struct console *early_console;
+
+static int __init setup_early_printk(char *buf)
+{
+ int keep_early = 0;
+
+ if (!buf)
+ return 0;
+
+ if (strstr(buf, "keep"))
+ keep_early = 1;
+
+ if (!strncmp(buf, "bios", 4))
+ early_console = &bios_console;
+
+ if (likely(early_console)) {
+ if (keep_early)
+ early_console->flags &= ~CON_BOOT;
+ else
+ early_console->flags |= CON_BOOT;
+ register_console(early_console);
+ }
+
+ return 0;
+}
+early_param("earlyprintk", setup_early_printk);
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
index 86639beac3a2..204def6ecb6a 100644
--- a/arch/sh/kernel/traps_32.c
+++ b/arch/sh/kernel/traps_32.c
@@ -24,13 +24,13 @@
#include <linux/kdebug.h>
#include <linux/kexec.h>
#include <linux/limits.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
#include <linux/sysfs.h>
+#include <linux/uaccess.h>
#include <asm/system.h>
-#include <asm/uaccess.h>
+#include <asm/alignment.h>
#include <asm/fpu.h>
#include <asm/kprobes.h>
+#include <asm/sh_bios.h>
#ifdef CONFIG_CPU_SH2
# define TRAP_RESERVED_INST 4
@@ -47,73 +47,6 @@
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
-static unsigned long se_user;
-static unsigned long se_sys;
-static unsigned long se_half;
-static unsigned long se_word;
-static unsigned long se_dword;
-static unsigned long se_multi;
-/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not
- valid! */
-static int se_usermode = 3;
-/* 0: no warning 1: print a warning message, disabled by default */
-static int se_kernmode_warn;
-
-#ifdef CONFIG_PROC_FS
-static const char *se_usermode_action[] = {
- "ignored",
- "warn",
- "fixup",
- "fixup+warn",
- "signal",
- "signal+warn"
-};
-
-static int alignment_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "User:\t\t%lu\n", se_user);
- seq_printf(m, "System:\t\t%lu\n", se_sys);
- seq_printf(m, "Half:\t\t%lu\n", se_half);
- seq_printf(m, "Word:\t\t%lu\n", se_word);
- seq_printf(m, "DWord:\t\t%lu\n", se_dword);
- seq_printf(m, "Multi:\t\t%lu\n", se_multi);
- seq_printf(m, "User faults:\t%i (%s)\n", se_usermode,
- se_usermode_action[se_usermode]);
- seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
- se_kernmode_warn ? "+warn" : "");
- return 0;
-}
-
-static int alignment_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, alignment_proc_show, NULL);
-}
-
-static ssize_t alignment_proc_write(struct file *file,
- const char __user *buffer, size_t count, loff_t *pos)
-{
- int *data = PDE(file->f_path.dentry->d_inode)->data;
- char mode;
-
- if (count > 0) {
- if (get_user(mode, buffer))
- return -EFAULT;
- if (mode >= '0' && mode <= '5')
- *data = mode - '0';
- }
- return count;
-}
-
-static const struct file_operations alignment_proc_fops = {
- .owner = THIS_MODULE,
- .open = alignment_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = alignment_proc_write,
-};
-#endif
-
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
{
unsigned long p;
@@ -265,10 +198,10 @@ static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
count = 1<<(instruction&3);
switch (count) {
- case 1: se_half += 1; break;
- case 2: se_word += 1; break;
- case 4: se_dword += 1; break;
- case 8: se_multi += 1; break; /* ??? */
+ case 1: inc_unaligned_byte_access(); break;
+ case 2: inc_unaligned_word_access(); break;
+ case 4: inc_unaligned_dword_access(); break;
+ case 8: inc_unaligned_multi_access(); break;
}
ret = -EFAULT;
@@ -452,18 +385,8 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
rm = regs->regs[index];
/* shout about fixups */
- if (!expected) {
- if (user_mode(regs) && (se_usermode & 1) && printk_ratelimit())
- pr_notice("Fixing up unaligned userspace access "
- "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, task_pid_nr(current),
- (void *)regs->pc, instruction);
- else if (se_kernmode_warn && printk_ratelimit())
- pr_notice("Fixing up unaligned kernel access "
- "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, task_pid_nr(current),
- (void *)regs->pc, instruction);
- }
+ if (!expected)
+ unaligned_fixups_notify(current, instruction, regs);
ret = -EFAULT;
switch (instruction&0xF000) {
@@ -616,10 +539,10 @@ asmlinkage void do_address_error(struct pt_regs *regs,
if (user_mode(regs)) {
int si_code = BUS_ADRERR;
+ unsigned int user_action;
local_irq_enable();
-
- se_user += 1;
+ inc_unaligned_user_access();
set_fs(USER_DS);
if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
@@ -630,16 +553,12 @@ asmlinkage void do_address_error(struct pt_regs *regs,
set_fs(oldfs);
/* shout about userspace fixups */
- if (se_usermode & 1)
- printk(KERN_NOTICE "Unaligned userspace access "
- "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, current->pid, (void *)regs->pc,
- instruction);
+ unaligned_fixups_notify(current, instruction, regs);
- if (se_usermode & 2)
+ user_action = unaligned_user_action();
+ if (user_action & UM_FIXUP)
goto fixup;
-
- if (se_usermode & 4)
+ if (user_action & UM_SIGNAL)
goto uspace_segv;
else {
/* ignore */
@@ -659,7 +578,7 @@ fixup:
&user_mem_access, 0);
set_fs(oldfs);
- if (tmp==0)
+ if (tmp == 0)
return; /* sorted */
uspace_segv:
printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
@@ -672,7 +591,7 @@ uspace_segv:
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
} else {
- se_sys += 1;
+ inc_unaligned_kernel_access();
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
@@ -687,11 +606,7 @@ uspace_segv:
die("insn faulting in do_address_error", regs, 0);
}
- if (se_kernmode_warn)
- printk(KERN_NOTICE "Unaligned kernel access "
- "on behalf of \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
- current->comm, current->pid, (void *)regs->pc,
- instruction);
+ unaligned_fixups_notify(current, instruction, regs);
handle_unaligned_access(instruction, regs,
&user_mem_access, 0);
@@ -876,35 +791,10 @@ asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
die_if_kernel("exception", regs, ex);
}
-#if defined(CONFIG_SH_STANDARD_BIOS)
-void *gdb_vbr_vector;
-
-static inline void __init gdb_vbr_init(void)
-{
- register unsigned long vbr;
-
- /*
- * Read the old value of the VBR register to initialise
- * the vector through which debug and BIOS traps are
- * delegated by the Linux trap handler.
- */
- asm volatile("stc vbr, %0" : "=r" (vbr));
-
- gdb_vbr_vector = (void *)(vbr + 0x100);
- printk("Setting GDB trap vector to 0x%08lx\n",
- (unsigned long)gdb_vbr_vector);
-}
-#endif
-
void __cpuinit per_cpu_trap_init(void)
{
extern void *vbr_base;
-#ifdef CONFIG_SH_STANDARD_BIOS
- if (raw_smp_processor_id() == 0)
- gdb_vbr_init();
-#endif
-
/* NOTE: The VBR value should be at P1
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
@@ -959,6 +849,9 @@ void __init trap_init(void)
set_exception_table_vec(TRAP_UBC, break_point_trap);
#endif
+ /* Save off the BIOS VBR, if there is one */
+ sh_bios_vbr_init();
+
/* Setup VBR for boot cpu */
per_cpu_trap_init();
}
@@ -985,34 +878,3 @@ void dump_stack(void)
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
-
-#ifdef CONFIG_PROC_FS
-/*
- * This needs to be done after sysctl_init, otherwise sys/ will be
- * overwritten. Actually, this shouldn't be in sys/ at all since
- * it isn't a sysctl, and it doesn't contain sysctl information.
- * We now locate it in /proc/cpu/alignment instead.
- */
-static int __init alignment_init(void)
-{
- struct proc_dir_entry *dir, *res;
-
- dir = proc_mkdir("cpu", NULL);
- if (!dir)
- return -ENOMEM;
-
- res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
- &alignment_proc_fops, &se_usermode);
- if (!res)
- return -ENOMEM;
-
- res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
- &alignment_proc_fops, &se_kernmode_warn);
- if (!res)
- return -ENOMEM;
-
- return 0;
-}
-
-fs_initcall(alignment_init);
-#endif
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 986a71b88ca3..358c860aeb9b 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -189,13 +189,31 @@ config ARCH_MEMORY_PROBE
depends on MEMORY_HOTPLUG
choice
+ prompt "Page table layout"
+ default PGTABLE_LEVELS_3 if X2TLB
+ default PGTABLE_LEVELS_2
+
+config PGTABLE_LEVELS_2
+ bool "2 Levels"
+ help
+ This is the default page table layout for all SuperH CPUs.
+
+config PGTABLE_LEVELS_3
+ bool "3 Levels"
+ depends on X2TLB
+ help
+ This enables a 3 level page table structure.
+
+endchoice
+
+choice
prompt "Kernel page size"
default PAGE_SIZE_8KB if X2TLB
default PAGE_SIZE_4KB
config PAGE_SIZE_4KB
bool "4kB"
- depends on !MMU || !X2TLB
+ depends on !MMU || !X2TLB || PGTABLE_LEVELS_3
help
This is the default page size used by all SuperH CPUs.
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 8a70535fa7ce..9fa11d655044 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -2,7 +2,7 @@
# Makefile for the Linux SuperH-specific parts of the memory manager.
#
-obj-y := cache.o init.o consistent.o mmap.o
+obj-y := alignment.o cache.o init.o consistent.o mmap.o
cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o
cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o
@@ -15,7 +15,7 @@ obj-y += $(cacheops-y)
mmu-y := nommu.o extable_32.o
mmu-$(CONFIG_MMU) := extable_$(BITS).o fault_$(BITS).o \
- ioremap_$(BITS).o kmap.o tlbflush_$(BITS).o
+ ioremap_$(BITS).o kmap.o pgtable.o tlbflush_$(BITS).o
obj-y += $(mmu-y)
obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c
new file mode 100644
index 000000000000..e615151eac3b
--- /dev/null
+++ b/arch/sh/mm/alignment.c
@@ -0,0 +1,159 @@
+/*
+ * Alignment access counters and corresponding user-space interfaces.
+ *
+ * Copyright (C) 2009 ST Microelectronics
+ * Copyright (C) 2009 - 2010 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <asm/alignment.h>
+
+static unsigned long se_user;
+static unsigned long se_sys;
+static unsigned long se_half;
+static unsigned long se_word;
+static unsigned long se_dword;
+static unsigned long se_multi;
+/* bitfield: 1: warn 2: fixup 4: signal -> combinations 2|4 && 1|2|4 are not
+ valid! */
+static int se_usermode = UM_WARN | UM_FIXUP;
+/* 0: no warning 1: print a warning message, disabled by default */
+static int se_kernmode_warn;
+
+void inc_unaligned_byte_access(void)
+{
+ se_half++;
+}
+
+void inc_unaligned_word_access(void)
+{
+ se_word++;
+}
+
+void inc_unaligned_dword_access(void)
+{
+ se_dword++;
+}
+
+void inc_unaligned_multi_access(void)
+{
+ se_multi++;
+}
+
+void inc_unaligned_user_access(void)
+{
+ se_user++;
+}
+
+void inc_unaligned_kernel_access(void)
+{
+ se_sys++;
+}
+
+unsigned int unaligned_user_action(void)
+{
+ return se_usermode;
+}
+
+void unaligned_fixups_notify(struct task_struct *tsk, insn_size_t insn,
+ struct pt_regs *regs)
+{
+ if (user_mode(regs) && (se_usermode & UM_WARN) && printk_ratelimit())
+ pr_notice("Fixing up unaligned userspace access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ tsk->comm, task_pid_nr(tsk),
+ (void *)regs->pc, insn);
+ else if (se_kernmode_warn && printk_ratelimit())
+ pr_notice("Fixing up unaligned kernel access "
+ "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+ tsk->comm, task_pid_nr(tsk),
+ (void *)regs->pc, insn);
+}
+
+static const char *se_usermode_action[] = {
+ "ignored",
+ "warn",
+ "fixup",
+ "fixup+warn",
+ "signal",
+ "signal+warn"
+};
+
+static int alignment_proc_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "User:\t\t%lu\n", se_user);
+ seq_printf(m, "System:\t\t%lu\n", se_sys);
+ seq_printf(m, "Half:\t\t%lu\n", se_half);
+ seq_printf(m, "Word:\t\t%lu\n", se_word);
+ seq_printf(m, "DWord:\t\t%lu\n", se_dword);
+ seq_printf(m, "Multi:\t\t%lu\n", se_multi);
+ seq_printf(m, "User faults:\t%i (%s)\n", se_usermode,
+ se_usermode_action[se_usermode]);
+ seq_printf(m, "Kernel faults:\t%i (fixup%s)\n", se_kernmode_warn,
+ se_kernmode_warn ? "+warn" : "");
+ return 0;
+}
+
+static int alignment_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, alignment_proc_show, NULL);
+}
+
+static ssize_t alignment_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *pos)
+{
+ int *data = PDE(file->f_path.dentry->d_inode)->data;
+ char mode;
+
+ if (count > 0) {
+ if (get_user(mode, buffer))
+ return -EFAULT;
+ if (mode >= '0' && mode <= '5')
+ *data = mode - '0';
+ }
+ return count;
+}
+
+static const struct file_operations alignment_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = alignment_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = alignment_proc_write,
+};
+
+/*
+ * This needs to be done after sysctl_init, otherwise sys/ will be
+ * overwritten. Actually, this shouldn't be in sys/ at all since
+ * it isn't a sysctl, and it doesn't contain sysctl information.
+ * We now locate it in /proc/cpu/alignment instead.
+ */
+static int __init alignment_init(void)
+{
+ struct proc_dir_entry *dir, *res;
+
+ dir = proc_mkdir("cpu", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
+ &alignment_proc_fops, &se_usermode);
+ if (!res)
+ return -ENOMEM;
+
+ res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
+ &alignment_proc_fops, &se_kernmode_warn);
+ if (!res)
+ return -ENOMEM;
+
+ return 0;
+}
+fs_initcall(alignment_init);
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 560ddb6bc8a7..a2301daeefa3 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -109,6 +109,7 @@ static inline void flush_cache_one(unsigned long start, unsigned long phys)
static void sh4_flush_dcache_page(void *arg)
{
struct page *page = arg;
+ unsigned long addr = (unsigned long)page_address(page);
#ifndef CONFIG_SMP
struct address_space *mapping = page_mapping(page);
@@ -116,16 +117,8 @@ static void sh4_flush_dcache_page(void *arg)
set_bit(PG_dcache_dirty, &page->flags);
else
#endif
- {
- unsigned long phys = page_to_phys(page);
- unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
- int i, n;
-
- /* Loop all the D-cache */
- n = boot_cpu_data.dcache.n_aliases;
- for (i = 0; i < n; i++, addr += PAGE_SIZE)
- flush_cache_one(addr, phys);
- }
+ flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
+ (addr & shm_align_mask), page_to_phys(page));
wmb();
}
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index 47530104e0ad..28e22839c665 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -53,6 +53,9 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
if (!pud_present(*pud_k))
return NULL;
+ if (!pud_present(*pud))
+ set_pud(pud, *pud_k);
+
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 432acd07e76a..761910d142f8 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -120,7 +120,13 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
pud = (pud_t *)pgd;
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+#ifdef __PAGETABLE_PMD_FOLDED
pmd = (pmd_t *)pud;
+#else
+ pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
+ pud_populate(&init_mm, pud, pmd);
+ pmd += k;
+#endif
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
index ac16c05917ef..7694f50c9034 100644
--- a/arch/sh/mm/nommu.c
+++ b/arch/sh/mm/nommu.c
@@ -94,3 +94,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
{
}
+
+void pgtable_cache_init(void)
+{
+}
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c
new file mode 100644
index 000000000000..e1bc5483cc07
--- /dev/null
+++ b/arch/sh/mm/pgtable.c
@@ -0,0 +1,57 @@
+#include <linux/mm.h>
+
+#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
+
+static struct kmem_cache *pgd_cachep;
+
+#ifdef CONFIG_PGTABLE_LEVELS_3
+static struct kmem_cache *pmd_cachep;
+#endif
+
+void pgd_ctor(void *x)
+{
+ pgd_t *pgd = x;
+
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ swapper_pg_dir + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
+void pgtable_cache_init(void)
+{
+ pgd_cachep = kmem_cache_create("pgd_cache",
+ PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
+ PAGE_SIZE, SLAB_PANIC, pgd_ctor);
+#ifdef CONFIG_PGTABLE_LEVELS_3
+ pmd_cachep = kmem_cache_create("pmd_cache",
+ PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
+ PAGE_SIZE, SLAB_PANIC, NULL);
+#endif
+}
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
+}
+
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ kmem_cache_free(pgd_cachep, pgd);
+}
+
+#ifdef CONFIG_PGTABLE_LEVELS_3
+void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ set_pud(pud, __pud((unsigned long)pmd));
+}
+
+pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
+}
+
+void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+ kmem_cache_free(pmd_cachep, pmd);
+}
+#endif /* CONFIG_PGTABLE_LEVELS_3 */
diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
index 04df4edc0073..539243b236fa 100644
--- a/arch/sparc/kernel/kgdb_32.c
+++ b/arch/sparc/kernel/kgdb_32.c
@@ -158,6 +158,12 @@ void kgdb_arch_exit(void)
{
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->pc = ip;
+ regs->npc = regs->pc + 4;
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x7d */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index f5a0fd490b59..9bf387bfe955 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -180,6 +180,12 @@ void kgdb_arch_exit(void)
{
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->tpc = ip;
+ regs->tnpc = regs->tpc + 4;
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x72 */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index d242a7340541..b287b62c7ea3 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -21,7 +21,6 @@
#include <asm/perf_event.h>
#include <asm/ptrace.h>
-#include <asm/local.h>
#include <asm/pcr.h>
/* We don't have a real NMI on sparc64, but we can fake one
@@ -113,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
touched = 1;
}
if (!touched && __get_cpu_var(last_irq_sum) == sum) {
- __this_cpu_inc(per_cpu_var(alert_counter));
- if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz)
+ __this_cpu_inc(alert_counter);
+ if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
die_nmi("BUG: NMI Watchdog detected LOCKUP",
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
- __this_cpu_write(per_cpu_var(alert_counter), 0);
+ __this_cpu_write(alert_counter, 0);
}
if (__get_cpu_var(wd_enabled)) {
write_pic(picl_value(nmi_hz));
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index fd3cee4d117c..1ddec403f512 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -149,11 +149,11 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
rtrap_irq:
rtrap:
#ifndef CONFIG_SMP
- sethi %hi(per_cpu____cpu_data), %l0
- lduw [%l0 + %lo(per_cpu____cpu_data)], %l1
+ sethi %hi(__cpu_data), %l0
+ lduw [%l0 + %lo(__cpu_data)], %l1
#else
- sethi %hi(per_cpu____cpu_data), %l0
- or %l0, %lo(per_cpu____cpu_data), %l0
+ sethi %hi(__cpu_data), %l0
+ or %l0, %lo(__cpu_data), %l0
lduw [%l0 + %g5], %l1
#endif
cmp %l1, 0
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 55298e891571..512c3114314d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -49,6 +49,7 @@ config X86
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_LZMA
+ select HAVE_KERNEL_LZO
select HAVE_HW_BREAKPOINT
select PERF_EVENTS
select ANON_INODES
@@ -1246,6 +1247,11 @@ config ARCH_MEMORY_PROBE
def_bool X86_64
depends on MEMORY_HOTPLUG
+config ILLEGAL_POINTER_VALUE
+ hex
+ default 0 if X86_32
+ default 0xdead000000000000 if X86_64
+
source "mm/Kconfig"
config HIGHPTE
@@ -2032,6 +2038,8 @@ source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
+source "drivers/vbus/Kconfig"
+
endmenu
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index f25bbd37765a..fbb47daf2459 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
# create a compressed vmlinux image from the original vmlinux
#
-targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o
+targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo head_$(BITS).o misc.o piggy.o
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -49,10 +49,13 @@ $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
$(call if_changed,bzip2)
$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzma)
+$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
+ $(call if_changed,lzo)
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
suffix-$(CONFIG_KERNEL_LZMA) := lzma
+suffix-$(CONFIG_KERNEL_LZO) := lzo
quiet_cmd_mkpiggy = MKPIGGY $@
cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false )
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 842b2a36174a..3b22fe8ab91b 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -162,6 +162,10 @@ static int lines, cols;
#include "../../../../lib/decompress_unlzma.c"
#endif
+#ifdef CONFIG_KERNEL_LZO
+#include "../../../../lib/decompress_unlzo.c"
+#endif
+
static void scroll(void)
{
int i;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 53147ad85b96..a861829fb396 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -842,4 +842,8 @@ ia32_sys_call_table:
.quad compat_sys_rt_tgsigqueueinfo /* 335 */
.quad sys_perf_event_open
.quad compat_sys_recvmmsg
+ .quad sys_fanotify_init
+ .quad sys32_fanotify_mark
+ .quad compat_sys_getprlimit /* 340 */
+ .quad compat_sys_setprlimit
ia32_syscall_end:
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 422572c77923..47f22c568048 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -615,3 +615,12 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
((u64)len_hi << 32) | len_lo);
}
+
+asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags,
+ u32 mask_lo, u32 mask_hi,
+ int fd, const char __user *pathname)
+{
+ return sys_fanotify_mark(fanotify_fd, flags,
+ ((u64)mask_hi << 32) | mask_lo,
+ fd, pathname);
+}
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h
index e6c6c808489f..006da3687cdc 100644
--- a/arch/x86/include/asm/kgdb.h
+++ b/arch/x86/include/asm/kgdb.h
@@ -76,4 +76,7 @@ static inline void arch_kgdb_breakpoint(void)
#define BREAK_INSTR_SIZE 1
#define CACHE_FLUSH_IS_SAFE 1
+extern int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig);
+
#endif /* _ASM_X86_KGDB_H */
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 7c18e1230f54..9b697c2735d9 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -74,7 +74,7 @@ struct x86_emulate_ops {
struct kvm_vcpu *vcpu);
/*
- * write_emulated: Read bytes from emulated/special memory area.
+ * write_emulated: Write bytes to emulated/special memory area.
* @addr: [IN ] Linear address to which to write.
* @val: [IN ] Value to write to memory (low-order bytes used as
* required).
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4f865e8b8540..a4de557ad733 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -25,7 +25,7 @@
#include <asm/mtrr.h>
#include <asm/msr-index.h>
-#define KVM_MAX_VCPUS 16
+#define KVM_MAX_VCPUS 64
#define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
@@ -38,19 +38,6 @@
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
0xFFFFFF0000000000ULL)
-#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
- (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
-#define KVM_GUEST_CR0_MASK \
- (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
-#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
- (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
-#define KVM_VM_CR0_ALWAYS_ON \
- (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
-#define KVM_GUEST_CR4_MASK \
- (X86_CR4_VME | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE | X86_CR4_VMXE)
-#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
-#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
-
#define INVALID_PAGE (~(hpa_t)0)
#define UNMAPPED_GVA (~(gpa_t)0)
@@ -285,6 +272,7 @@ struct kvm_vcpu_arch {
unsigned long cr2;
unsigned long cr3;
unsigned long cr4;
+ unsigned long cr4_guest_owned_bits;
unsigned long cr8;
u32 hflags;
u64 pdptrs[4]; /* pae */
@@ -380,11 +368,19 @@ struct kvm_mem_alias {
gfn_t base_gfn;
unsigned long npages;
gfn_t target_gfn;
+#define KVM_ALIAS_INVALID 1UL
+ unsigned long flags;
};
-struct kvm_arch{
- int naliases;
+#define KVM_ARCH_HAS_UNALIAS_INSTANTIATION
+
+struct kvm_mem_aliases {
struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
+ int naliases;
+};
+
+struct kvm_arch {
+ struct kvm_mem_aliases *aliases;
unsigned int n_free_mmu_pages;
unsigned int n_requested_mmu_pages;
@@ -471,6 +467,7 @@ struct kvm_x86_ops {
int (*hardware_setup)(void); /* __init */
void (*hardware_unsetup)(void); /* __exit */
bool (*cpu_has_accelerated_tpr)(void);
+ void (*cpuid_update)(struct kvm_vcpu *vcpu);
/* Create, but do not attach this VCPU */
struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned id);
@@ -531,7 +528,8 @@ struct kvm_x86_ops {
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*get_tdp_level)(void);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
- bool (*gb_page_enable)(void);
+ int (*get_lpage_level)(void);
+ bool (*rdtscp_supported)(void);
const struct trace_print_flags *exit_reasons_str;
};
diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
index 47b9b6f19057..2e9972468a5d 100644
--- a/arch/x86/include/asm/local.h
+++ b/arch/x86/include/asm/local.h
@@ -195,41 +195,4 @@ static inline long local_sub_return(long i, local_t *l)
#define __local_add(i, l) local_add((i), (l))
#define __local_sub(i, l) local_sub((i), (l))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable, not an address.
- *
- * X86_64: This could be done better if we moved the per cpu data directly
- * after GS.
- */
-
-/* Need to disable preemption for the cpu local counters otherwise we could
- still access a variable of a previous CPU in a non atomic way. */
-#define cpu_local_wrap_v(l) \
-({ \
- local_t res__; \
- preempt_disable(); \
- res__ = (l); \
- preempt_enable(); \
- res__; \
-})
-#define cpu_local_wrap(l) \
-({ \
- preempt_disable(); \
- (l); \
- preempt_enable(); \
-}) \
-
-#define cpu_local_read(l) cpu_local_wrap_v(local_read(&__get_cpu_var((l))))
-#define cpu_local_set(l, i) cpu_local_wrap(local_set(&__get_cpu_var((l)), (i)))
-#define cpu_local_inc(l) cpu_local_wrap(local_inc(&__get_cpu_var((l))))
-#define cpu_local_dec(l) cpu_local_wrap(local_dec(&__get_cpu_var((l))))
-#define cpu_local_add(i, l) cpu_local_wrap(local_add((i), &__get_cpu_var((l))))
-#define cpu_local_sub(i, l) cpu_local_wrap(local_sub((i), &__get_cpu_var((l))))
-
-#define __cpu_local_inc(l) cpu_local_inc((l))
-#define __cpu_local_dec(l) cpu_local_dec((l))
-#define __cpu_local_add(i, l) cpu_local_add((i), (l))
-#define __cpu_local_sub(i, l) cpu_local_sub((i), (l))
-
#endif /* _ASM_X86_LOCAL_H */
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 139d4c1a33a7..93da9c3f3341 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -19,7 +19,6 @@ extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
extern int check_nmi_watchdog(void);
extern int nmi_watchdog_enabled;
extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
-extern int avail_to_resrv_perfctr_nmi(unsigned int);
extern int reserve_perfctr_nmi(unsigned int);
extern void release_perfctr_nmi(unsigned int);
extern int reserve_evntsel_nmi(unsigned int);
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index b4bf9a942ed0..cb39be7416d4 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -52,6 +52,8 @@ extern int pcibios_last_bus;
extern struct pci_bus *pci_root_bus;
extern struct pci_ops pci_root_ops;
+void pcibios_scan_specific_bus(int busn);
+
/* pci-irq.c */
struct irq_info {
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 0c44196b78ac..66a272dfd8b8 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -25,19 +25,18 @@
*/
#ifdef CONFIG_SMP
#define PER_CPU(var, reg) \
- __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg; \
- lea per_cpu__##var(reg), reg
-#define PER_CPU_VAR(var) %__percpu_seg:per_cpu__##var
+ __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
+ lea var(reg), reg
+#define PER_CPU_VAR(var) %__percpu_seg:var
#else /* ! SMP */
-#define PER_CPU(var, reg) \
- __percpu_mov_op $per_cpu__##var, reg
-#define PER_CPU_VAR(var) per_cpu__##var
+#define PER_CPU(var, reg) __percpu_mov_op $var, reg
+#define PER_CPU_VAR(var) var
#endif /* SMP */
#ifdef CONFIG_X86_64_SMP
#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
#else
-#define INIT_PER_CPU_VAR(var) per_cpu__##var
+#define INIT_PER_CPU_VAR(var) var
#endif
#else /* ...!ASSEMBLY */
@@ -60,12 +59,12 @@
* There also must be an entry in vmlinux_64.lds.S
*/
#define DECLARE_INIT_PER_CPU(var) \
- extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
+ extern typeof(var) init_per_cpu_var(var)
#ifdef CONFIG_X86_64_SMP
#define init_per_cpu_var(var) init_per_cpu__##var
#else
-#define init_per_cpu_var(var) per_cpu_var(var)
+#define init_per_cpu_var(var) var
#endif
/* For arch-specific code, we can use direct single-insn ops (they
@@ -104,6 +103,64 @@ do { \
} \
} while (0)
+/*
+ * Generate a percpu add to memory instruction and optimize code
+ * if a one is added or subtracted.
+ */
+#define percpu_add_op(var, val) \
+do { \
+ typedef typeof(var) pao_T__; \
+ const int pao_ID__ = (__builtin_constant_p(val) && \
+ ((val) == 1 || (val) == -1)) ? (val) : 0; \
+ if (0) { \
+ pao_T__ pao_tmp__; \
+ pao_tmp__ = (val); \
+ } \
+ switch (sizeof(var)) { \
+ case 1: \
+ if (pao_ID__ == 1) \
+ asm("incb "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decb "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addb %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "qi" ((pao_T__)(val))); \
+ break; \
+ case 2: \
+ if (pao_ID__ == 1) \
+ asm("incw "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decw "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addw %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "ri" ((pao_T__)(val))); \
+ break; \
+ case 4: \
+ if (pao_ID__ == 1) \
+ asm("incl "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decl "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addl %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "ri" ((pao_T__)(val))); \
+ break; \
+ case 8: \
+ if (pao_ID__ == 1) \
+ asm("incq "__percpu_arg(0) : "+m" (var)); \
+ else if (pao_ID__ == -1) \
+ asm("decq "__percpu_arg(0) : "+m" (var)); \
+ else \
+ asm("addq %1, "__percpu_arg(0) \
+ : "+m" (var) \
+ : "re" ((pao_T__)(val))); \
+ break; \
+ default: __bad_percpu_size(); \
+ } \
+} while (0)
+
#define percpu_from_op(op, var, constraint) \
({ \
typeof(var) pfo_ret__; \
@@ -142,16 +199,14 @@ do { \
* per-thread variables implemented as per-cpu variables and thus
* stable for the duration of the respective task.
*/
-#define percpu_read(var) percpu_from_op("mov", per_cpu__##var, \
- "m" (per_cpu__##var))
-#define percpu_read_stable(var) percpu_from_op("mov", per_cpu__##var, \
- "p" (&per_cpu__##var))
-#define percpu_write(var, val) percpu_to_op("mov", per_cpu__##var, val)
-#define percpu_add(var, val) percpu_to_op("add", per_cpu__##var, val)
-#define percpu_sub(var, val) percpu_to_op("sub", per_cpu__##var, val)
-#define percpu_and(var, val) percpu_to_op("and", per_cpu__##var, val)
-#define percpu_or(var, val) percpu_to_op("or", per_cpu__##var, val)
-#define percpu_xor(var, val) percpu_to_op("xor", per_cpu__##var, val)
+#define percpu_read(var) percpu_from_op("mov", var, "m" (var))
+#define percpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
+#define percpu_write(var, val) percpu_to_op("mov", var, val)
+#define percpu_add(var, val) percpu_add_op(var, val)
+#define percpu_sub(var, val) percpu_add_op(var, -(val))
+#define percpu_and(var, val) percpu_to_op("and", var, val)
+#define percpu_or(var, val) percpu_to_op("or", var, val)
+#define percpu_xor(var, val) percpu_to_op("xor", var, val)
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
@@ -160,9 +215,9 @@ do { \
#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
-#define __this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
-#define __this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
-#define __this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -179,9 +234,9 @@ do { \
#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
-#define this_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
-#define this_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
-#define this_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -192,9 +247,9 @@ do { \
#define this_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)
-#define irqsafe_cpu_add_1(pcp, val) percpu_to_op("add", (pcp), val)
-#define irqsafe_cpu_add_2(pcp, val) percpu_to_op("add", (pcp), val)
-#define irqsafe_cpu_add_4(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
+#define irqsafe_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
+#define irqsafe_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
@@ -212,19 +267,19 @@ do { \
#ifdef CONFIG_X86_64
#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
-#define __this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define __this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
-#define this_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define this_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
-#define irqsafe_cpu_add_8(pcp, val) percpu_to_op("add", (pcp), val)
+#define irqsafe_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
#define irqsafe_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
#define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
#define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)
@@ -236,7 +291,7 @@ do { \
({ \
int old__; \
asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
- : "=r" (old__), "+m" (per_cpu__##var) \
+ : "=r" (old__), "+m" (var) \
: "dIr" (bit)); \
old__; \
})
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index d5f69045c100..8aeba19e8455 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -87,4 +87,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *);
/* ia32/ipc32.c */
asmlinkage long sys32_ipc(u32, int, int, int, compat_uptr_t, u32);
+
+asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int,
+ const char __user *);
#endif /* _ASM_X86_SYS_IA32_H */
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index ecb544e65382..e529f26c3292 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -32,7 +32,7 @@ extern void show_regs_common(void);
"movl %P[task_canary](%[next]), %%ebx\n\t" \
"movl %%ebx, "__percpu_arg([stack_canary])"\n\t"
#define __switch_canary_oparam \
- , [stack_canary] "=m" (per_cpu_var(stack_canary.canary))
+ , [stack_canary] "=m" (stack_canary.canary)
#define __switch_canary_iparam \
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
#else /* CC_STACKPROTECTOR */
@@ -114,7 +114,7 @@ do { \
"movq %P[task_canary](%%rsi),%%r8\n\t" \
"movq %%r8,"__percpu_arg([gs_canary])"\n\t"
#define __switch_canary_oparam \
- , [gs_canary] "=m" (per_cpu_var(irq_stack_union.stack_canary))
+ , [gs_canary] "=m" (irq_stack_union.stack_canary)
#define __switch_canary_iparam \
, [task_canary] "i" (offsetof(struct task_struct, stack_canary))
#else /* CC_STACKPROTECTOR */
@@ -133,7 +133,7 @@ do { \
__switch_canary \
"movq %P[thread_info](%%rsi),%%r8\n\t" \
"movq %%rax,%%rdi\n\t" \
- "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
+ "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
"jnz ret_from_fork\n\t" \
RESTORE_CONTEXT \
: "=a" (last) \
@@ -143,7 +143,7 @@ do { \
[ti_flags] "i" (offsetof(struct thread_info, flags)), \
[_tif_fork] "i" (_TIF_FORK), \
[thread_info] "i" (offsetof(struct task_struct, stack)), \
- [current_task] "m" (per_cpu_var(current_task)) \
+ [current_task] "m" (current_task) \
__switch_canary_iparam \
: "memory", "cc" __EXTRA_CLOBBER)
#endif
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 0c9825e97f36..088d09fb1615 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -205,14 +205,13 @@ static inline unsigned long __must_check copy_from_user(void *to,
unsigned long n)
{
int sz = __compiletime_object_size(to);
- int ret = -EFAULT;
if (likely(sz == -1 || sz >= n))
- ret = _copy_from_user(to, from, n);
+ n = _copy_from_user(to, from, n);
else
copy_from_user_overflow();
- return ret;
+ return n;
}
long __must_check strncpy_from_user(char *dst, const char __user *src,
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 46324c6a4f6e..535e421498f6 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -30,16 +30,15 @@ static inline unsigned long __must_check copy_from_user(void *to,
unsigned long n)
{
int sz = __compiletime_object_size(to);
- int ret = -EFAULT;
might_fault();
if (likely(sz == -1 || sz >= n))
- ret = _copy_from_user(to, from, n);
+ n = _copy_from_user(to, from, n);
#ifdef CONFIG_DEBUG_VM
else
WARN(1, "Buffer overflow detected!\n");
#endif
- return ret;
+ return n;
}
static __always_inline __must_check
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
index 3baf379fa840..64c7765ea4a1 100644
--- a/arch/x86/include/asm/unistd_32.h
+++ b/arch/x86/include/asm/unistd_32.h
@@ -343,10 +343,14 @@
#define __NR_rt_tgsigqueueinfo 335
#define __NR_perf_event_open 336
#define __NR_recvmmsg 337
+#define __NR_fanotify_init 338
+#define __NR_fanotify_mark 339
+#define __NR_getprlimit 340
+#define __NR_setprlimit 341
#ifdef __KERNEL__
-#define NR_syscalls 338
+#define NR_syscalls 342
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
index 4843f7ba754a..78e2e0bf1394 100644
--- a/arch/x86/include/asm/unistd_64.h
+++ b/arch/x86/include/asm/unistd_64.h
@@ -663,6 +663,14 @@ __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
#define __NR_recvmmsg 299
__SYSCALL(__NR_recvmmsg, sys_recvmmsg)
+#define __NR_fanotify_init 300
+__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
+#define __NR_fanotify_mark 301
+__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
+#define __NR_getprlimit 302
+__SYSCALL(__NR_getprlimit, sys_getprlimit)
+#define __NR_setprlimit 303
+__SYSCALL(__NR_setprlimit, sys_setprlimit)
#ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index 2751f3075d8b..3fbc1f348a7d 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -18,8 +18,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
- * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (c) Russ Anderson
+ * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) Russ Anderson <rja@sgi.com>
*/
#include <linux/rtc.h>
@@ -89,7 +89,7 @@ extern s64 uv_bios_call(enum uv_bios_cmd, u64, u64, u64, u64, u64);
extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64);
extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
-extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
+extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *, long *);
extern s64 uv_bios_freq_base(u64, u64 *);
extern int uv_bios_mq_watchlist_alloc(unsigned long, unsigned int,
unsigned long *);
@@ -104,6 +104,7 @@ extern int uv_type;
extern long sn_partition_id;
extern long sn_coherency_id;
extern long sn_region_size;
+extern long system_serial_number;
#define partition_coherence_id() (sn_coherency_id)
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 2b4945419a84..43f1e9b45917 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -53,6 +53,7 @@
*/
#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
+#define SECONDARY_EXEC_RDTSCP 0x00000008
#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
@@ -251,6 +252,7 @@ enum vmcs_field {
#define EXIT_REASON_MSR_READ 31
#define EXIT_REASON_MSR_WRITE 32
#define EXIT_REASON_MWAIT_INSTRUCTION 36
+#define EXIT_REASON_MONITOR_INSTRUCTION 39
#define EXIT_REASON_PAUSE_INSTRUCTION 40
#define EXIT_REASON_MCE_DURING_VMENTRY 41
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
@@ -362,6 +364,7 @@ enum vmcs_field {
#define VMX_EPTP_UC_BIT (1ull << 8)
#define VMX_EPTP_WB_BIT (1ull << 14)
#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
+#define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
#define VMX_EPT_EXTENT_INDIVIDUAL_BIT (1ull << 24)
#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index aa57c079c98f..e80f291472a4 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -62,7 +62,7 @@ unsigned int boot_cpu_physical_apicid = -1U;
/*
* The highest APIC ID seen during enumeration.
*
- * On AMD, this determines the messaging protocol we can use: if all APIC IDs
+ * This determines the messaging protocol we can use: if all APIC IDs
* are in the 0 ... 7 range, then we can use logical addressing which
* has some performance advantages (better broadcasting).
*
@@ -1898,14 +1898,24 @@ void __cpuinit generic_processor_info(int apicid, int version)
max_physical_apicid = apicid;
#ifdef CONFIG_X86_32
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
- if (num_processors > 8)
- def_to_bigsmp = 1;
- break;
- case X86_VENDOR_AMD:
- if (max_physical_apicid >= 8)
+ /*
+ * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
+ * but we need to work other dependencies like SMP_SUSPEND etc
+ * before this can be done without some confusion.
+ * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
+ * - Ashok Raj <ashok.raj@intel.com>
+ */
+ if (max_physical_apicid >= 8) {
+ switch (boot_cpu_data.x86_vendor) {
+ case X86_VENDOR_INTEL:
+ if (!APIC_XAPIC(version)) {
+ def_to_bigsmp = 0;
+ break;
+ }
+ /* If P4 and above fall through */
+ case X86_VENDOR_AMD:
def_to_bigsmp = 1;
+ }
}
#endif
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index eacbd2b31d27..a8f4b9a12fdb 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -223,7 +223,7 @@ struct apic apic_flat = {
};
/*
- * Physflat mode is used when there are more than 8 CPUs on a AMD system.
+ * Physflat mode is used when there are more than 8 CPUs on a system.
* We cannot use logical delivery in this case because the mask
* overflows, so use physical mode.
*/
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index de00c4619a55..53243ca7816d 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2434,6 +2434,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
cfg = irq_cfg(irq);
raw_spin_lock(&desc->lock);
+ /*
+ * Check if the irq migration is in progress. If so, we
+ * haven't received the cleanup request yet for this irq.
+ */
+ if (cfg->move_in_progress)
+ goto unlock;
+
if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
goto unlock;
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 0159a69396cb..4ada42c3dabb 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -438,8 +438,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
* Ayiee, looks like this CPU is stuck ...
* wait a few IRQs (5 seconds) before doing the oops ...
*/
- __this_cpu_inc(per_cpu_var(alert_counter));
- if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz)
+ __this_cpu_inc(alert_counter);
+ if (__this_cpu_read(alert_counter) == 5 * nmi_hz)
/*
* die_nmi will return ONLY if NOTIFY_STOP happens..
*/
@@ -447,7 +447,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
regs, panic_on_timeout);
} else {
__get_cpu_var(last_irq_sum) = sum;
- __this_cpu_write(per_cpu_var(alert_counter), 0);
+ __this_cpu_write(alert_counter, 0);
}
/* see if the nmi watchdog went off */
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index c4cbd3080c1c..65edc180fc82 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -64,23 +64,16 @@ void __init default_setup_apic_routing(void)
apic = &apic_x2apic_phys;
else
apic = &apic_x2apic_cluster;
+ printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
}
#endif
if (apic == &apic_flat) {
- switch (boot_cpu_data.x86_vendor) {
- case X86_VENDOR_INTEL:
- if (num_processors > 8)
- apic = &apic_physflat;
- break;
- case X86_VENDOR_AMD:
- if (max_physical_apicid >= 8)
- apic = &apic_physflat;
- }
+ if (max_physical_apicid >= 8)
+ apic = &apic_physflat;
+ printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
}
- printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
-
if (is_vsmp_box()) {
/* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 5f92494dab61..70c36c5aeb92 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -5,7 +5,7 @@
*
* SGI UV APIC functions (note: not an Intel compatible APIC)
*
- * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2007-2009 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/cpumask.h>
#include <linux/hardirq.h>
@@ -624,8 +624,8 @@ void __init uv_system_init(void)
}
uv_bios_init();
- uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
- &sn_coherency_id, &sn_region_size);
+ uv_bios_get_sn_info(0, &uv_type, &sn_partition_id, &sn_coherency_id,
+ &sn_region_size, &system_serial_number);
uv_rtc_init();
for_each_present_cpu(cpu) {
diff --git a/arch/x86/kernel/bios_uv.c b/arch/x86/kernel/bios_uv.c
index b0206a211b09..c918ebab52ab 100644
--- a/arch/x86/kernel/bios_uv.c
+++ b/arch/x86/kernel/bios_uv.c
@@ -15,8 +15,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
- * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
- * Copyright (c) Russ Anderson
+ * Copyright (c) 2008-2009 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) Russ Anderson <rja@sgi.com>
*/
#include <linux/efi.h>
@@ -30,6 +30,7 @@ static struct uv_systab uv_systab;
s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
{
struct uv_systab *tab = &uv_systab;
+ s64 ret;
if (!tab->function)
/*
@@ -37,9 +38,11 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
*/
return BIOS_STATUS_UNIMPLEMENTED;
- return efi_call6((void *)__va(tab->function),
- (u64)which, a1, a2, a3, a4, a5);
+ ret = efi_call6((void *)__va(tab->function), (u64)which,
+ a1, a2, a3, a4, a5);
+ return ret;
}
+EXPORT_SYMBOL_GPL(uv_bios_call);
s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
u64 a4, u64 a5)
@@ -73,11 +76,14 @@ long sn_coherency_id;
EXPORT_SYMBOL_GPL(sn_coherency_id);
long sn_region_size;
EXPORT_SYMBOL_GPL(sn_region_size);
+long system_serial_number;
+EXPORT_SYMBOL_GPL(system_serial_number);
int uv_type;
+EXPORT_SYMBOL_GPL(uv_type);
s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
- long *region)
+ long *region, long *ssn)
{
s64 ret;
u64 v0, v1;
@@ -97,8 +103,11 @@ s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
*coher = part.coherence_id;
if (region)
*region = part.region_size;
+ if (ssn)
+ *ssn = v1;
return ret;
}
+EXPORT_SYMBOL_GPL(uv_bios_get_sn_info);
int
uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
@@ -185,4 +194,3 @@ void uv_bios_init(void)
void uv_bios_init(void) { }
#endif
-
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig
index f138c6c389b9..870e6cc6ad28 100644
--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig
@@ -10,6 +10,20 @@ if CPU_FREQ
comment "CPUFreq processor drivers"
+config X86_PCC_CPUFREQ
+ tristate "Processor Clocking Control interface driver"
+ depends on ACPI && ACPI_PROCESSOR
+ help
+ This driver adds support for the PCC interface.
+
+ For details, take a look at:
+ <file:Documentation/cpu-freq/pcc-cpufreq.txt>.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pcc-cpufreq.
+
+ If in doubt, say N.
+
config X86_ACPI_CPUFREQ
tristate "ACPI Processor P-States driver"
select CPU_FREQ_TABLE
diff --git a/arch/x86/kernel/cpu/cpufreq/Makefile b/arch/x86/kernel/cpu/cpufreq/Makefile
index 509296df294d..1840c0a5170b 100644
--- a/arch/x86/kernel/cpu/cpufreq/Makefile
+++ b/arch/x86/kernel/cpu/cpufreq/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
+obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o
obj-$(CONFIG_X86_LONGHAUL) += longhaul.o
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
new file mode 100644
index 000000000000..ff36d2979a90
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
@@ -0,0 +1,620 @@
+/*
+ * pcc-cpufreq.c - Processor Clocking Control firmware cpufreq interface
+ *
+ * Copyright (C) 2009 Red Hat, Matthew Garrett <mjg@redhat.com>
+ * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
+ * Nagananda Chumbalkar <nagananda.chumbalkar@hp.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or NON
+ * INFRINGEMENT. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/compiler.h>
+
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <acpi/processor.h>
+
+#define PCC_VERSION "1.00.00"
+#define POLL_LOOPS 300
+
+#define CMD_COMPLETE 0x1
+#define CMD_GET_FREQ 0x0
+#define CMD_SET_FREQ 0x1
+
+#define BUF_SZ 4
+
+#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, \
+ "pcc-cpufreq", msg)
+
+struct pcc_register_resource {
+ u8 descriptor;
+ u16 length;
+ u8 space_id;
+ u8 bit_width;
+ u8 bit_offset;
+ u8 access_size;
+ u64 address;
+} __attribute__ ((packed));
+
+struct pcc_memory_resource {
+ u8 descriptor;
+ u16 length;
+ u8 space_id;
+ u8 resource_usage;
+ u8 type_specific;
+ u64 granularity;
+ u64 minimum;
+ u64 maximum;
+ u64 translation_offset;
+ u64 address_length;
+} __attribute__ ((packed));
+
+static struct cpufreq_driver pcc_cpufreq_driver;
+
+struct pcc_header {
+ u32 signature;
+ u16 length;
+ u8 major;
+ u8 minor;
+ u32 features;
+ u16 command;
+ u16 status;
+ u32 latency;
+ u32 minimum_time;
+ u32 maximum_time;
+ u32 nominal;
+ u32 throttled_frequency;
+ u32 minimum_frequency;
+};
+
+static void __iomem *pcch_virt_addr;
+static struct pcc_header __iomem *pcch_hdr;
+
+static DEFINE_SPINLOCK(pcc_lock);
+
+static struct acpi_generic_address doorbell;
+
+static u64 doorbell_preserve;
+static u64 doorbell_write;
+
+static u8 OSC_UUID[16] = {0x63, 0x9B, 0x2C, 0x9F, 0x70, 0x91, 0x49, 0x1f,
+ 0xBB, 0x4F, 0xA5, 0x98, 0x2F, 0xA1, 0xB5, 0x46};
+
+struct pcc_cpu {
+ u32 input_offset;
+ u32 output_offset;
+};
+
+static struct pcc_cpu *pcc_cpu_info;
+
+static int pcc_cpufreq_verify(struct cpufreq_policy *policy)
+{
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
+ policy->cpuinfo.max_freq);
+ return 0;
+}
+
+static inline void pcc_cmd(void)
+{
+ u64 doorbell_value;
+ int i;
+
+ acpi_read(&doorbell_value, &doorbell);
+ acpi_write((doorbell_value & doorbell_preserve) | doorbell_write,
+ &doorbell);
+
+ for (i = 0; i < POLL_LOOPS; i++) {
+ if (ioread16(&pcch_hdr->status) & CMD_COMPLETE)
+ break;
+ }
+}
+
+static inline void pcc_clear_mapping(void)
+{
+ if (pcch_virt_addr)
+ iounmap(pcch_virt_addr);
+ pcch_virt_addr = NULL;
+}
+
+static unsigned int pcc_get_freq(unsigned int cpu)
+{
+ struct pcc_cpu *pcc_cpu_data;
+ unsigned int curr_freq;
+ unsigned int freq_limit;
+ u16 status;
+ u32 input_buffer;
+ u32 output_buffer;
+
+ spin_lock(&pcc_lock);
+
+ dprintk("get: get_freq for CPU %d\n", cpu);
+ pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+
+ input_buffer = 0x1;
+ iowrite32(input_buffer,
+ (pcch_virt_addr + pcc_cpu_data->input_offset));
+ iowrite16(CMD_GET_FREQ, &pcch_hdr->command);
+
+ pcc_cmd();
+
+ output_buffer =
+ ioread32(pcch_virt_addr + pcc_cpu_data->output_offset);
+
+ /* Clear the input buffer - we are done with the current command */
+ memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
+
+ status = ioread16(&pcch_hdr->status);
+ if (status != CMD_COMPLETE) {
+ dprintk("get: FAILED: for CPU %d, status is %d\n",
+ cpu, status);
+ goto cmd_incomplete;
+ }
+ iowrite16(0, &pcch_hdr->status);
+ curr_freq = (((ioread32(&pcch_hdr->nominal) * (output_buffer & 0xff))
+ / 100) * 1000);
+
+ dprintk("get: SUCCESS: (virtual) output_offset for cpu %d is "
+ "0x%x, contains a value of: 0x%x. Speed is: %d MHz\n",
+ cpu, (pcch_virt_addr + pcc_cpu_data->output_offset),
+ output_buffer, curr_freq);
+
+ freq_limit = (output_buffer >> 8) & 0xff;
+ if (freq_limit != 0xff) {
+ dprintk("get: frequency for cpu %d is being temporarily"
+ " capped at %d\n", cpu, curr_freq);
+ }
+
+ spin_unlock(&pcc_lock);
+ return curr_freq;
+
+cmd_incomplete:
+ iowrite16(0, &pcch_hdr->status);
+ spin_unlock(&pcc_lock);
+ return -EINVAL;
+}
+
+static int pcc_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ struct pcc_cpu *pcc_cpu_data;
+ struct cpufreq_freqs freqs;
+ u16 status;
+ u32 input_buffer;
+ int cpu;
+
+ spin_lock(&pcc_lock);
+ cpu = policy->cpu;
+ pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+
+ dprintk("target: CPU %d should go to target freq: %d "
+ "(virtual) input_offset is 0x%x\n",
+ cpu, target_freq,
+ (pcch_virt_addr + pcc_cpu_data->input_offset));
+
+ freqs.new = target_freq;
+ freqs.cpu = cpu;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ input_buffer = 0x1 | (((target_freq * 100)
+ / (ioread32(&pcch_hdr->nominal) * 1000)) << 8);
+ iowrite32(input_buffer,
+ (pcch_virt_addr + pcc_cpu_data->input_offset));
+ iowrite16(CMD_SET_FREQ, &pcch_hdr->command);
+
+ pcc_cmd();
+
+ /* Clear the input buffer - we are done with the current command */
+ memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
+
+ status = ioread16(&pcch_hdr->status);
+ if (status != CMD_COMPLETE) {
+ dprintk("target: FAILED for cpu %d, with status: 0x%x\n",
+ cpu, status);
+ goto cmd_incomplete;
+ }
+ iowrite16(0, &pcch_hdr->status);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ dprintk("target: was SUCCESSFUL for cpu %d\n", cpu);
+ spin_unlock(&pcc_lock);
+
+ return 0;
+
+cmd_incomplete:
+ iowrite16(0, &pcch_hdr->status);
+ spin_unlock(&pcc_lock);
+ return -EINVAL;
+}
+
+static int pcc_get_offset(int cpu)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object *pccp, *offset;
+ struct pcc_cpu *pcc_cpu_data;
+ struct acpi_processor *pr;
+ int ret = 0;
+
+ pr = per_cpu(processors, cpu);
+ pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu);
+
+ status = acpi_evaluate_object(pr->handle, "PCCP", NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ pccp = buffer.pointer;
+ if (!pccp || pccp->type != ACPI_TYPE_PACKAGE) {
+ ret = -ENODEV;
+ goto out_free;
+ };
+
+ offset = &(pccp->package.elements[0]);
+ if (!offset || offset->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ pcc_cpu_data->input_offset = offset->integer.value;
+
+ offset = &(pccp->package.elements[1]);
+ if (!offset || offset->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ pcc_cpu_data->output_offset = offset->integer.value;
+
+ memset_io((pcch_virt_addr + pcc_cpu_data->input_offset), 0, BUF_SZ);
+ memset_io((pcch_virt_addr + pcc_cpu_data->output_offset), 0, BUF_SZ);
+
+ dprintk("pcc_get_offset: for CPU %d: pcc_cpu_data "
+ "input_offset: 0x%x, pcc_cpu_data output_offset: 0x%x\n",
+ cpu, pcc_cpu_data->input_offset, pcc_cpu_data->output_offset);
+out_free:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
+{
+ acpi_status status;
+ struct acpi_object_list input;
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ union acpi_object in_params[4];
+ union acpi_object *out_obj;
+ u32 capabilities[2];
+ u32 errors;
+ u32 supported;
+ int ret = 0;
+
+ input.count = 4;
+ input.pointer = in_params;
+ input.count = 4;
+ input.pointer = in_params;
+ in_params[0].type = ACPI_TYPE_BUFFER;
+ in_params[0].buffer.length = 16;
+ in_params[0].buffer.pointer = OSC_UUID;
+ in_params[1].type = ACPI_TYPE_INTEGER;
+ in_params[1].integer.value = 1;
+ in_params[2].type = ACPI_TYPE_INTEGER;
+ in_params[2].integer.value = 2;
+ in_params[3].type = ACPI_TYPE_BUFFER;
+ in_params[3].buffer.length = 8;
+ in_params[3].buffer.pointer = (u8 *)&capabilities;
+
+ capabilities[0] = OSC_QUERY_ENABLE;
+ capabilities[1] = 0x1;
+
+ status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (!output.length)
+ return -ENODEV;
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
+ if (errors) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ supported = *((u32 *)(out_obj->buffer.pointer + 4));
+ if (!(supported & 0x1)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ kfree(output.pointer);
+ capabilities[0] = 0x0;
+ capabilities[1] = 0x1;
+
+ status = acpi_evaluate_object(*handle, "_OSC", &input, &output);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ if (!output.length)
+ return -ENODEV;
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ errors = *((u32 *)out_obj->buffer.pointer) & ~(1 << 0);
+ if (errors) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ supported = *((u32 *)(out_obj->buffer.pointer + 4));
+ if (!(supported & 0x1)) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+out_free:
+ kfree(output.pointer);
+ return ret;
+}
+
+static int __init pcc_cpufreq_probe(void)
+{
+ acpi_status status;
+ struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
+ struct pcc_memory_resource *mem_resource;
+ struct pcc_register_resource *reg_resource;
+ union acpi_object *out_obj, *member;
+ acpi_handle handle, osc_handle;
+ int ret = 0;
+
+ status = acpi_get_handle(NULL, "\\_SB", &handle);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ status = acpi_get_handle(handle, "_OSC", &osc_handle);
+ if (ACPI_SUCCESS(status)) {
+ ret = pcc_cpufreq_do_osc(&osc_handle);
+ if (ret)
+ dprintk("probe: _OSC evaluation did not succeed\n");
+ /* Firmware's use of _OSC is optional */
+ ret = 0;
+ }
+
+ status = acpi_evaluate_object(handle, "PCCH", NULL, &output);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ out_obj = output.pointer;
+ if (out_obj->type != ACPI_TYPE_PACKAGE) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ member = &out_obj->package.elements[0];
+ if (member->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ mem_resource = (struct pcc_memory_resource *)member->buffer.pointer;
+
+ dprintk("probe: mem_resource descriptor: 0x%x,"
+ " length: %d, space_id: %d, resource_usage: %d,"
+ " type_specific: %d, granularity: 0x%llx,"
+ " minimum: 0x%llx, maximum: 0x%llx,"
+ " translation_offset: 0x%llx, address_length: 0x%llx\n",
+ mem_resource->descriptor, mem_resource->length,
+ mem_resource->space_id, mem_resource->resource_usage,
+ mem_resource->type_specific, mem_resource->granularity,
+ mem_resource->minimum, mem_resource->maximum,
+ mem_resource->translation_offset,
+ mem_resource->address_length);
+
+ if (mem_resource->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ ret = -ENODEV;
+ goto out_free;
+ }
+
+ pcch_virt_addr = ioremap_nocache(mem_resource->minimum,
+ mem_resource->address_length);
+ if (pcch_virt_addr == NULL) {
+ dprintk("probe: could not map shared mem region\n");
+ goto out_free;
+ }
+ pcch_hdr = pcch_virt_addr;
+
+ dprintk("probe: PCCH header (virtual) addr: 0x%p\n", pcch_hdr);
+ dprintk("probe: PCCH header is at physical address: 0x%llx,"
+ " signature: 0x%x, length: %d bytes, major: %d, minor: %d,"
+ " supported features: 0x%x, command field: 0x%x,"
+ " status field: 0x%x, nominal latency: %d us\n",
+ mem_resource->minimum, ioread32(&pcch_hdr->signature),
+ ioread16(&pcch_hdr->length), ioread8(&pcch_hdr->major),
+ ioread8(&pcch_hdr->minor), ioread32(&pcch_hdr->features),
+ ioread16(&pcch_hdr->command), ioread16(&pcch_hdr->status),
+ ioread32(&pcch_hdr->latency));
+
+ dprintk("probe: min time between commands: %d us,"
+ " max time between commands: %d us,"
+ " nominal CPU frequency: %d MHz,"
+ " minimum CPU frequency: %d MHz,"
+ " minimum CPU frequency without throttling: %d MHz\n",
+ ioread32(&pcch_hdr->minimum_time),
+ ioread32(&pcch_hdr->maximum_time),
+ ioread32(&pcch_hdr->nominal),
+ ioread32(&pcch_hdr->throttled_frequency),
+ ioread32(&pcch_hdr->minimum_frequency));
+
+ member = &out_obj->package.elements[1];
+ if (member->type != ACPI_TYPE_BUFFER) {
+ ret = -ENODEV;
+ goto pcch_free;
+ }
+
+ reg_resource = (struct pcc_register_resource *)member->buffer.pointer;
+
+ doorbell.space_id = reg_resource->space_id;
+ doorbell.bit_width = reg_resource->bit_width;
+ doorbell.bit_offset = reg_resource->bit_offset;
+ doorbell.access_width = 64;
+ doorbell.address = reg_resource->address;
+
+ dprintk("probe: doorbell: space_id is %d, bit_width is %d, "
+ "bit_offset is %d, access_width is %d, address is 0x%llx\n",
+ doorbell.space_id, doorbell.bit_width, doorbell.bit_offset,
+ doorbell.access_width, reg_resource->address);
+
+ member = &out_obj->package.elements[2];
+ if (member->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto pcch_free;
+ }
+
+ doorbell_preserve = member->integer.value;
+
+ member = &out_obj->package.elements[3];
+ if (member->type != ACPI_TYPE_INTEGER) {
+ ret = -ENODEV;
+ goto pcch_free;
+ }
+
+ doorbell_write = member->integer.value;
+
+ dprintk("probe: doorbell_preserve: 0x%llx,"
+ " doorbell_write: 0x%llx\n",
+ doorbell_preserve, doorbell_write);
+
+ pcc_cpu_info = alloc_percpu(struct pcc_cpu);
+ if (!pcc_cpu_info) {
+ ret = -ENOMEM;
+ goto pcch_free;
+ }
+
+ printk(KERN_DEBUG "pcc-cpufreq: (v%s) driver loaded with frequency"
+ " limits: %d MHz, %d MHz\n", PCC_VERSION,
+ ioread32(&pcch_hdr->minimum_frequency),
+ ioread32(&pcch_hdr->nominal));
+ kfree(output.pointer);
+ return ret;
+pcch_free:
+ pcc_clear_mapping();
+out_free:
+ kfree(output.pointer);
+ return ret;
+}
+
+static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ unsigned int result = 0;
+
+ if (!pcch_virt_addr) {
+ result = -1;
+ goto pcch_null;
+ }
+
+ result = pcc_get_offset(cpu);
+ if (result) {
+ dprintk("init: PCCP evaluation failed\n");
+ goto free;
+ }
+
+ policy->max = policy->cpuinfo.max_freq =
+ ioread32(&pcch_hdr->nominal) * 1000;
+ policy->min = policy->cpuinfo.min_freq =
+ ioread32(&pcch_hdr->minimum_frequency) * 1000;
+ policy->cur = pcc_get_freq(cpu);
+
+ dprintk("init: policy->max is %d, policy->min is %d\n",
+ policy->max, policy->min);
+
+ return 0;
+free:
+ pcc_clear_mapping();
+ free_percpu(pcc_cpu_info);
+pcch_null:
+ return result;
+}
+
+static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static struct cpufreq_driver pcc_cpufreq_driver = {
+ .flags = CPUFREQ_CONST_LOOPS,
+ .get = pcc_get_freq,
+ .verify = pcc_cpufreq_verify,
+ .target = pcc_cpufreq_target,
+ .init = pcc_cpufreq_cpu_init,
+ .exit = pcc_cpufreq_cpu_exit,
+ .name = "pcc-cpufreq",
+ .owner = THIS_MODULE,
+};
+
+static int __init pcc_cpufreq_init(void)
+{
+ int ret;
+
+ if (acpi_disabled)
+ return 0;
+
+ ret = pcc_cpufreq_probe();
+ if (ret) {
+ dprintk("pcc_cpufreq_init: PCCH evaluation failed\n");
+ return ret;
+ }
+
+ ret = cpufreq_register_driver(&pcc_cpufreq_driver);
+
+ return ret;
+}
+
+static void __exit pcc_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&pcc_cpufreq_driver);
+
+ pcc_clear_mapping();
+
+ free_percpu(pcc_cpu_info);
+}
+
+MODULE_AUTHOR("Matthew Garrett, Naga Chumbalkar");
+MODULE_VERSION(PCC_VERSION);
+MODULE_DESCRIPTION("Processor Clocking Control interface driver");
+MODULE_LICENSE("GPL");
+
+late_initcall(pcc_cpufreq_init);
+module_exit(pcc_cpufreq_exit);
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index f125e5c551c0..6e44519960c8 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1356,6 +1356,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
kfree(data->powernow_table);
kfree(data);
+ per_cpu(powernow_data, pol->cpu) = NULL;
return 0;
}
@@ -1375,7 +1376,7 @@ static unsigned int powernowk8_get(unsigned int cpu)
int err;
if (!data)
- return -EINVAL;
+ return 0;
smp_call_function_single(cpu, query_values_on_cpu, &err, true);
if (err)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index a8aacd4b513c..2e4c9f73821b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -35,6 +35,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/debugfs.h>
+#include <linux/edac_mce.h>
#include <asm/processor.h>
#include <asm/hw_irq.h>
@@ -161,6 +162,15 @@ void mce_log(struct mce *mce)
entry = rcu_dereference(mcelog.next);
for (;;) {
/*
+ * If edac_mce is enabled, it will check the error type
+ * and will process it, if it is a known error.
+ * Otherwise, the error will be sent through mcelog
+ * interface
+ */
+ if (edac_mce_parse(mce))
+ return;
+
+ /*
* When the buffer fills up discard new entries.
* Assume that the earlier errors are the more
* interesting ones:
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 898df9719afb..74f4e85a5727 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -115,17 +115,6 @@ int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
return !test_bit(counter, perfctr_nmi_owner);
}
-
-/* checks the an msr for availability */
-int avail_to_resrv_perfctr_nmi(unsigned int msr)
-{
- unsigned int counter;
-
- counter = nmi_perfctr_msr_to_bit(msr);
- BUG_ON(counter > NMI_MAX_COUNTER_BITS);
-
- return !test_bit(counter, perfctr_nmi_owner);
-}
EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
int reserve_perfctr_nmi(unsigned int msr)
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 05ed7ab2ca48..a1a7876cadcb 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -733,13 +733,13 @@ struct early_res {
};
static struct early_res early_res[MAX_EARLY_RES] __initdata = {
{ 0, PAGE_SIZE, "BIOS data page", 1 }, /* BIOS data page */
-#ifdef CONFIG_X86_32
+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_TRAMPOLINE)
/*
* But first pinch a few for the stack/trampoline stuff
* FIXME: Don't need the extra page at 4K, but need to fix
* trampoline before removing it. (see the GDT stuff)
*/
- { PAGE_SIZE, PAGE_SIZE, "EX TRAMPOLINE", 1 },
+ { PAGE_SIZE, PAGE_SIZE + PAGE_SIZE, "EX TRAMPOLINE", 1 },
#endif
{}
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 7fd318bac59c..37c3d4b17d85 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -442,8 +442,8 @@ is386: movl $2,%ecx # set MP
*/
cmpb $0,ready
jne 1f
- movl $per_cpu__gdt_page,%eax
- movl $per_cpu__stack_canary,%ecx
+ movl $gdt_page,%eax
+ movl $stack_canary,%ecx
movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
shrl $16, %ecx
movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
@@ -706,7 +706,7 @@ idt_descr:
.word 0 # 32 bit align gdt_desc.address
ENTRY(early_gdt_descr)
.word GDT_ENTRIES*8-1
- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
+ .long gdt_page /* Overwritten for secondary CPUs */
/*
* The boot_gdt must mirror the equivalent in setup.S and is
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 05d5fec64a94..cbf19e07f7d5 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -466,7 +466,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
{
int i, cpu, rc = NOTIFY_STOP;
struct perf_event *bp;
- unsigned long dr7, dr6;
+ unsigned long dr6;
unsigned long *dr6_p;
/* The DR6 value is pointed by args->err */
@@ -477,7 +477,6 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
if ((dr6 & DR_TRAP_BITS) == 0)
return NOTIFY_DONE;
- get_debugreg(dr7, 7);
/* Disable breakpoints during exception handling */
set_debugreg(0UL, 7);
/*
@@ -525,7 +524,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
if (dr6 & (~DR_TRAP_BITS))
rc = NOTIFY_DONE;
- set_debugreg(dr7, 7);
+ set_debugreg(__get_cpu_var(cpu_dr7), 7);
put_cpu();
return rc;
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index dd74fe7273b1..f4bf7a51987b 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -42,24 +42,13 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/nmi.h>
+#include <linux/hw_breakpoint.h>
#include <asm/debugreg.h>
#include <asm/apicdef.h>
#include <asm/system.h>
-
#include <asm/apic.h>
-/*
- * Put the error code here just in case the user cares:
- */
-static int gdb_x86errcode;
-
-/*
- * Likewise, the vector number here (since GDB only gets the signal
- * number through the usual means, and that's not very specific):
- */
-static int gdb_x86vector = -1;
-
/**
* pt_regs_to_gdb_regs - Convert ptrace regs to GDB regs
* @gdb_regs: A pointer to hold the registers in the order GDB wants.
@@ -204,40 +193,27 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
static struct hw_breakpoint {
unsigned enabled;
- unsigned type;
- unsigned len;
unsigned long addr;
+ struct perf_event **pev;
} breakinfo[4];
static void kgdb_correct_hw_break(void)
{
- unsigned long dr7;
- int correctit = 0;
- int breakbit;
int breakno;
- get_debugreg(dr7, 7);
for (breakno = 0; breakno < 4; breakno++) {
- breakbit = 2 << (breakno << 1);
- if (!(dr7 & breakbit) && breakinfo[breakno].enabled) {
- correctit = 1;
- dr7 |= breakbit;
- dr7 &= ~(0xf0000 << (breakno << 2));
- dr7 |= ((breakinfo[breakno].len << 2) |
- breakinfo[breakno].type) <<
- ((breakno << 2) + 16);
- set_debugreg(breakinfo[breakno].addr, breakno);
-
- } else {
- if ((dr7 & breakbit) && !breakinfo[breakno].enabled) {
- correctit = 1;
- dr7 &= ~breakbit;
- dr7 &= ~(0xf0000 << (breakno << 2));
- }
- }
+ struct perf_event *bp;
+ int val;
+ int cpu = raw_smp_processor_id();
+ if (!breakinfo[breakno].enabled)
+ continue;
+ bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
+ if (bp->attr.disabled != 1)
+ continue;
+ val = arch_install_hw_breakpoint(bp);
+ if (!val)
+ bp->attr.disabled = 0;
}
- if (correctit)
- set_debugreg(dr7, 7);
}
static int
@@ -259,46 +235,74 @@ kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
static void kgdb_remove_all_hw_break(void)
{
int i;
+ int cpu = raw_smp_processor_id();
+ struct perf_event *bp;
- for (i = 0; i < 4; i++)
- memset(&breakinfo[i], 0, sizeof(struct hw_breakpoint));
+ for (i = 0; i < 4; i++) {
+ if (!breakinfo[i].enabled)
+ continue;
+ bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (bp->attr.disabled == 1)
+ continue;
+ arch_uninstall_hw_breakpoint(bp);
+ bp->attr.disabled = 1;
+ }
}
static int
kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
{
- unsigned type;
int i;
+ struct perf_event *bp;
+ struct arch_hw_breakpoint *info;
for (i = 0; i < 4; i++)
if (!breakinfo[i].enabled)
break;
if (i == 4)
return -1;
-
+ bp = *per_cpu_ptr(breakinfo[i].pev, raw_smp_processor_id());
+ info = counter_arch_bp(bp);
switch (bptype) {
case BP_HARDWARE_BREAKPOINT:
- type = 0;
- len = 1;
+ len = 1;
+ info->type = X86_BREAKPOINT_EXECUTE;
break;
case BP_WRITE_WATCHPOINT:
- type = 1;
+ info->type = X86_BREAKPOINT_WRITE;
break;
case BP_ACCESS_WATCHPOINT:
- type = 3;
+ info->type = X86_BREAKPOINT_RW;
break;
default:
return -1;
}
- if (len == 1 || len == 2 || len == 4)
- breakinfo[i].len = len - 1;
- else
+ switch (len) {
+ case 1:
+ info->len = X86_BREAKPOINT_LEN_1;
+ break;
+ case 2:
+ info->len = X86_BREAKPOINT_LEN_2;
+ break;
+ case 4:
+ info->len = X86_BREAKPOINT_LEN_4;
+ break;
+#ifdef CONFIG_X86_64
+ case 8:
+ info->len = X86_BREAKPOINT_LEN_8;
+ break;
+#endif
+ default:
return -1;
+ }
- breakinfo[i].enabled = 1;
breakinfo[i].addr = addr;
- breakinfo[i].type = type;
+ info->address = addr;
+ bp->attr.bp_addr = info->address;
+ bp->attr.bp_len = info->len;
+ bp->attr.bp_type = info->type;
+ breakinfo[i].enabled = 1;
return 0;
}
@@ -313,25 +317,21 @@ kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
*/
void kgdb_disable_hw_debug(struct pt_regs *regs)
{
+ int i;
+ int cpu = raw_smp_processor_id();
+ struct perf_event *bp;
+
/* Disable hardware debugging while we are in kgdb: */
set_debugreg(0UL, 7);
-}
-
-/**
- * kgdb_post_primary_code - Save error vector/code numbers.
- * @regs: Original pt_regs.
- * @e_vector: Original error vector.
- * @err_code: Original error code.
- *
- * This is needed on architectures which support SMP and KGDB.
- * This function is called after all the slave cpus have been put
- * to a know spin state and the primary CPU has control over KGDB.
- */
-void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
-{
- /* primary processor is completely in the debugger */
- gdb_x86vector = e_vector;
- gdb_x86errcode = err_code;
+ for (i = 0; i < 4; i++) {
+ if (!breakinfo[i].enabled)
+ continue;
+ bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (bp->attr.disabled == 1)
+ continue;
+ arch_uninstall_hw_breakpoint(bp);
+ bp->attr.disabled = 1;
+ }
}
#ifdef CONFIG_SMP
@@ -378,7 +378,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
struct pt_regs *linux_regs)
{
unsigned long addr;
- unsigned long dr6;
char *ptr;
int newPC;
@@ -404,20 +403,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
raw_smp_processor_id());
}
- get_debugreg(dr6, 6);
- if (!(dr6 & 0x4000)) {
- int breakno;
-
- for (breakno = 0; breakno < 4; breakno++) {
- if (dr6 & (1 << breakno) &&
- breakinfo[breakno].type == 0) {
- /* Set restore flag: */
- linux_regs->flags |= X86_EFLAGS_RF;
- break;
- }
- }
- }
- set_debugreg(0UL, 6);
kgdb_correct_hw_break();
return 0;
@@ -448,6 +433,7 @@ single_step_cont(struct pt_regs *regs, struct die_args *args)
}
static int was_in_debug_nmi[NR_CPUS];
+static int recieved_hw_brk[NR_CPUS];
static int __kgdb_notify(struct die_args *args, unsigned long cmd)
{
@@ -485,23 +471,26 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
break;
case DIE_DEBUG:
- if (atomic_read(&kgdb_cpu_doing_single_step) ==
- raw_smp_processor_id()) {
+ if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
if (user_mode(regs))
return single_step_cont(regs, args);
break;
- } else if (test_thread_flag(TIF_SINGLESTEP))
+ } else if (test_thread_flag(TIF_SINGLESTEP)) {
/* This means a user thread is single stepping
* a system call which should be ignored
*/
return NOTIFY_DONE;
+ } else if (recieved_hw_brk[raw_smp_processor_id()] == 1) {
+ recieved_hw_brk[raw_smp_processor_id()] = 0;
+ return NOTIFY_STOP;
+ }
/* fall through */
default:
if (user_mode(regs))
return NOTIFY_DONE;
}
- if (kgdb_handle_exception(args->trapnr, args->signr, args->err, regs))
+ if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
return NOTIFY_DONE;
/* Must touch watchdog before return to normal operation */
@@ -509,6 +498,26 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
return NOTIFY_STOP;
}
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig,
+
+ };
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ return __kgdb_notify(&args, cmd);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
static int
kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
{
@@ -531,6 +540,23 @@ static struct notifier_block kgdb_notifier = {
.priority = -INT_MAX,
};
+static void kgdb_hw_bp(struct perf_event *bp, int nmi,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct die_args args;
+
+ args.trapnr = 0;
+ args.signr = 5;
+ args.err = 0;
+ args.regs = regs;
+ args.str = "debug";
+ if (__kgdb_notify(&args, DIE_DEBUG) == NOTIFY_STOP)
+ recieved_hw_brk[raw_smp_processor_id()] = 1;
+ else
+ recieved_hw_brk[raw_smp_processor_id()] = 0;
+}
+
/**
* kgdb_arch_init - Perform any architecture specific initalization.
*
@@ -539,7 +565,42 @@ static struct notifier_block kgdb_notifier = {
*/
int kgdb_arch_init(void)
{
- return register_die_notifier(&kgdb_notifier);
+ int i, cpu;
+ int ret;
+ struct perf_event_attr attr;
+ struct perf_event **pevent;
+
+ ret = register_die_notifier(&kgdb_notifier);
+ if (ret != 0)
+ return ret;
+ /*
+ * Pre-allocate the hw breakpoint structions in the non-atomic
+ * portion of kgdb because this operation requires mutexs to
+ * complete.
+ */
+ attr.bp_addr = (unsigned long)kgdb_arch_init;
+ attr.type = PERF_TYPE_BREAKPOINT;
+ attr.bp_len = HW_BREAKPOINT_LEN_1;
+ attr.bp_type = HW_BREAKPOINT_X;
+ attr.disabled = 1;
+ for (i = 0; i < 4; i++) {
+ breakinfo[i].pev = register_wide_hw_breakpoint(&attr,
+ kgdb_hw_bp);
+ if (IS_ERR(breakinfo[i].pev)) {
+ printk(KERN_ERR "kgdb: Could not allocate hw breakpoints\n");
+ breakinfo[i].pev = NULL;
+ kgdb_arch_exit();
+ return -1;
+ }
+ for_each_online_cpu(cpu) {
+ pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (pevent[0]->destroy != NULL) {
+ pevent[0]->destroy = NULL;
+ release_bp_slot(*pevent);
+ }
+ }
+ }
+ return ret;
}
/**
@@ -550,6 +611,13 @@ int kgdb_arch_init(void)
*/
void kgdb_arch_exit(void)
{
+ int i;
+ for (i = 0; i < 4; i++) {
+ if (breakinfo[i].pev) {
+ unregister_wide_hw_breakpoint(breakinfo[i].pev);
+ breakinfo[i].pev = NULL;
+ }
+ }
unregister_die_notifier(&kgdb_notifier);
}
@@ -582,6 +650,11 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
return instruction_pointer(regs);
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->ip = ip;
+}
+
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: */
.gdb_bpt_instr = { 0xcc },
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 017d937639fe..118428085ea2 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -604,7 +604,7 @@ ptrace_modify_breakpoint(struct perf_event *bp, int len, int type,
struct perf_event_attr attr;
/*
- * We shoud have at least an inactive breakpoint at this
+ * We should have at least an inactive breakpoint at this
* slot. It means the user is writing dr7 without having
* written the address register first
*/
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 1545bc0c9845..1ebf3bd670df 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -34,7 +34,7 @@ EXPORT_SYMBOL(pm_power_off);
static const struct desc_ptr no_idt = {};
static int reboot_mode;
-enum reboot_type reboot_type = BOOT_KBD;
+enum reboot_type reboot_type = BOOT_ACPI;
int reboot_force;
#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 15228b5d3eb7..817ddc50961f 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -337,3 +337,7 @@ ENTRY(sys_call_table)
.long sys_rt_tgsigqueueinfo /* 335 */
.long sys_perf_event_open
.long sys_recvmmsg
+ .long sys_fanotify_init
+ .long sys_fanotify_mark
+ .long sys_getprlimit /* 340 */
+ .long sys_setprlimit
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 33399176512a..6ae2122467dc 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -15,6 +15,7 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
+#include <linux/kgdb.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
@@ -460,6 +461,11 @@ void restart_nmi(void)
/* May run on IST stack. */
dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
{
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+ if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
+ == NOTIFY_STOP)
+ return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
#ifdef CONFIG_KPROBES
if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
== NOTIFY_STOP)
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 74c92bb194df..25bbb9bfc312 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -171,7 +171,7 @@ static int vmi_timer_next_event(unsigned long delta,
{
/* Unfortunately, set_next_event interface only passes relative
* expiry, but we want absolute expiry. It'd be better if were
- * were passed an aboslute expiry, since a bunch of time may
+ * were passed an absolute expiry, since a bunch of time may
* have been stolen between the time the delta is computed and
* when we set the alarm below. */
cycle_t now = vmi_timer_ops.get_cycle_counter(vmi_counter(VMI_ONESHOT));
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index f92a0da608cb..44879df55696 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -341,7 +341,7 @@ SECTIONS
* Per-cpu symbols which need to be offset from __per_cpu_load
* for the boot processor.
*/
-#define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
+#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
INIT_PER_CPU(gdt_page);
INIT_PER_CPU(irq_stack_union);
@@ -352,7 +352,7 @@ INIT_PER_CPU(irq_stack_union);
"kernel image bigger than KERNEL_IMAGE_SIZE");
#ifdef CONFIG_SMP
-. = ASSERT((per_cpu__irq_stack_union == 0),
+. = ASSERT((irq_stack_union == 0),
"irq_stack_union is not at start of per-cpu area");
#endif
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9055e5872ff0..1c0c6ab9c60f 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -301,7 +301,8 @@ static int __init vsyscall_init(void)
register_sysctl_table(kernel_root_table2);
#endif
on_each_cpu(cpu_vsyscall_init, NULL, 1);
- hotcpu_notifier(cpu_vsyscall_notifier, 0);
+ /* notifier priority > KVM */
+ hotcpu_notifier(cpu_vsyscall_notifier, 30);
return 0;
}
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 4cd498332466..06871111bf54 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -29,6 +29,7 @@ config KVM
select HAVE_KVM_EVENTFD
select KVM_APIC_ARCHITECTURE
select USER_RETURN_NOTIFIER
+ select KVM_MMIO
---help---
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 296aba49472a..caad18954ed3 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -602,7 +602,7 @@ static const struct kvm_io_device_ops speaker_dev_ops = {
.write = speaker_ioport_write,
};
-/* Caller must have writers lock on slots_lock */
+/* Caller must hold slots_lock */
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
{
struct kvm_pit *pit;
@@ -642,13 +642,13 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
kvm_iodevice_init(&pit->dev, &pit_dev_ops);
- ret = __kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
+ ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &pit->dev);
if (ret < 0)
goto fail;
if (flags & KVM_PIT_SPEAKER_DUMMY) {
kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
- ret = __kvm_io_bus_register_dev(&kvm->pio_bus,
+ ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS,
&pit->speaker_dev);
if (ret < 0)
goto fail_unregister;
@@ -657,7 +657,7 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
return pit;
fail_unregister:
- __kvm_io_bus_unregister_dev(&kvm->pio_bus, &pit->dev);
+ kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
fail:
if (pit->irq_source_id >= 0)
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index d057c0cbd245..d5753a75d58c 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -533,7 +533,9 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
* Initialize PIO device
*/
kvm_iodevice_init(&s->dev, &picdev_ops);
- ret = kvm_io_bus_register_dev(kvm, &kvm->pio_bus, &s->dev);
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, &s->dev);
+ mutex_unlock(&kvm->slots_lock);
if (ret < 0) {
kfree(s);
return NULL;
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 7bcc5b6a4403..35acc36e1782 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -38,4 +38,16 @@ static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
return vcpu->arch.pdptrs[index];
}
+static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
+{
+ if (mask & vcpu->arch.cr4_guest_owned_bits)
+ kvm_x86_ops->decache_cr4_guest_bits(vcpu);
+ return vcpu->arch.cr4 & mask;
+}
+
+static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
+{
+ return kvm_read_cr4_bits(vcpu, ~0UL);
+}
+
#endif
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 3063a0c4858b..ba8c045da782 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -373,6 +373,12 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
if (unlikely(!apic_enabled(apic)))
break;
+ if (trig_mode) {
+ apic_debug("level trig mode for vector %d", vector);
+ apic_set_vector(vector, apic->regs + APIC_TMR);
+ } else
+ apic_clear_vector(vector, apic->regs + APIC_TMR);
+
result = !apic_test_and_set_irr(vector, apic);
trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
trig_mode, vector, !result);
@@ -383,11 +389,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
break;
}
- if (trig_mode) {
- apic_debug("level trig mode for vector %d", vector);
- apic_set_vector(vector, apic->regs + APIC_TMR);
- } else
- apic_clear_vector(vector, apic->regs + APIC_TMR);
kvm_vcpu_kick(vcpu);
break;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4c3e5b2314cb..4f5508c35100 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -29,6 +29,7 @@
#include <linux/swap.h>
#include <linux/hugetlb.h>
#include <linux/compiler.h>
+#include <linux/srcu.h>
#include <asm/page.h>
#include <asm/cmpxchg.h>
@@ -142,10 +143,6 @@ module_param(oos_shadow, bool, 0644);
#define PFERR_RSVD_MASK (1U << 3)
#define PFERR_FETCH_MASK (1U << 4)
-#define PT_PDPE_LEVEL 3
-#define PT_DIRECTORY_LEVEL 2
-#define PT_PAGE_TABLE_LEVEL 1
-
#define RMAP_EXT 4
#define ACC_EXEC_MASK 1
@@ -477,7 +474,7 @@ static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
- return page_size;
+ return PT_PAGE_TABLE_LEVEL;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, addr);
@@ -503,8 +500,7 @@ out:
static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
{
struct kvm_memory_slot *slot;
- int host_level;
- int level = PT_PAGE_TABLE_LEVEL;
+ int host_level, level, max_level;
slot = gfn_to_memslot(vcpu->kvm, large_gfn);
if (slot && slot->dirty_bitmap)
@@ -515,11 +511,12 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
if (host_level == PT_PAGE_TABLE_LEVEL)
return host_level;
- for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) {
+ max_level = kvm_x86_ops->get_lpage_level() < host_level ?
+ kvm_x86_ops->get_lpage_level() : host_level;
+ for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
break;
- }
return level - 1;
}
@@ -664,6 +661,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
prev_desc = desc;
desc = desc->more;
}
+ pr_err("rmap_remove: %p %llx many->many\n", spte, *spte);
BUG();
}
}
@@ -808,20 +806,15 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
{
int i, j;
int retval = 0;
+ struct kvm_memslots *slots;
- /*
- * If mmap_sem isn't taken, we can look the memslots with only
- * the mmu_lock by skipping over the slots with userspace_addr == 0.
- */
- for (i = 0; i < kvm->nmemslots; i++) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ slots = rcu_dereference(kvm->memslots);
+
+ for (i = 0; i < slots->nmemslots; i++) {
+ struct kvm_memory_slot *memslot = &slots->memslots[i];
unsigned long start = memslot->userspace_addr;
unsigned long end;
- /* mmu_lock protects userspace_addr */
- if (!start)
- continue;
-
end = start + (memslot->npages << PAGE_SHIFT);
if (hva >= start && hva < end) {
gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
@@ -1617,7 +1610,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
{
- int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
+ int slot = memslot_id(kvm, gfn);
struct kvm_mmu_page *sp = page_header(__pa(pte));
__set_bit(slot, sp->slot_bitmap);
@@ -2938,10 +2931,9 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
- int npages;
+ int npages, idx;
- if (!down_read_trylock(&kvm->slots_lock))
- continue;
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
npages = kvm->arch.n_alloc_mmu_pages -
kvm->arch.n_free_mmu_pages;
@@ -2954,7 +2946,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
nr_to_scan--;
spin_unlock(&kvm->mmu_lock);
- up_read(&kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
}
if (kvm_freed)
list_move_tail(&kvm_freed->vm_list, &vm_list);
@@ -3021,9 +3013,11 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
int i;
unsigned int nr_mmu_pages;
unsigned int nr_pages = 0;
+ struct kvm_memslots *slots;
- for (i = 0; i < kvm->nmemslots; i++)
- nr_pages += kvm->memslots[i].npages;
+ slots = rcu_dereference(kvm->memslots);
+ for (i = 0; i < slots->nmemslots; i++)
+ nr_pages += slots->memslots[i].npages;
nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
nr_mmu_pages = max(nr_mmu_pages,
@@ -3293,10 +3287,12 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
static int count_rmaps(struct kvm_vcpu *vcpu)
{
int nmaps = 0;
- int i, j, k;
+ int i, j, k, idx;
+ idx = srcu_read_lock(&kvm->srcu);
+ slots = rcu_dereference(kvm->memslots);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
+ struct kvm_memory_slot *m = &slots->memslots[i];
struct kvm_rmap_desc *d;
for (j = 0; j < m->npages; ++j) {
@@ -3319,6 +3315,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
}
}
}
+ srcu_read_unlock(&kvm->srcu, idx);
return nmaps;
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 61a1b3884b49..ff583423968d 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -2,6 +2,7 @@
#define __KVM_X86_MMU_H
#include <linux/kvm_host.h>
+#include "kvm_cache_regs.h"
#define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
@@ -37,6 +38,10 @@
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
+#define PT_PDPE_LEVEL 3
+#define PT_DIRECTORY_LEVEL 2
+#define PT_PAGE_TABLE_LEVEL 1
+
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
@@ -64,12 +69,12 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
static inline int is_pae(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cr4 & X86_CR4_PAE;
+ return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
}
static inline int is_pse(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cr4 & X86_CR4_PSE;
+ return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
}
static inline int is_paging(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1d9b33843c80..cf64fc026e3e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -765,14 +765,16 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (unlikely(cpu != vcpu->cpu)) {
u64 delta;
- /*
- * Make sure that the guest sees a monotonically
- * increasing TSC.
- */
- delta = vcpu->arch.host_tsc - native_read_tsc();
- svm->vmcb->control.tsc_offset += delta;
- if (is_nested(svm))
- svm->nested.hsave->control.tsc_offset += delta;
+ if (check_tsc_unstable()) {
+ /*
+ * Make sure that the guest sees a monotonically
+ * increasing TSC.
+ */
+ delta = vcpu->arch.host_tsc - native_read_tsc();
+ svm->vmcb->control.tsc_offset += delta;
+ if (is_nested(svm))
+ svm->nested.hsave->control.tsc_offset += delta;
+ }
vcpu->cpu = cpu;
kvm_migrate_timers(vcpu);
svm->asid_generation = 0;
@@ -2852,6 +2854,10 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
return 0;
}
+static void svm_cpuid_update(struct kvm_vcpu *vcpu)
+{
+}
+
static const struct trace_print_flags svm_exit_reasons_str[] = {
{ SVM_EXIT_READ_CR0, "read_cr0" },
{ SVM_EXIT_READ_CR3, "read_cr3" },
@@ -2905,9 +2911,14 @@ static const struct trace_print_flags svm_exit_reasons_str[] = {
{ -1, NULL }
};
-static bool svm_gb_page_enable(void)
+static int svm_get_lpage_level(void)
{
- return true;
+ return PT_PDPE_LEVEL;
+}
+
+static bool svm_rdtscp_supported(void)
+{
+ return false;
}
static struct kvm_x86_ops svm_x86_ops = {
@@ -2975,7 +2986,11 @@ static struct kvm_x86_ops svm_x86_ops = {
.get_mt_mask = svm_get_mt_mask,
.exit_reasons_str = svm_exit_reasons_str,
- .gb_page_enable = svm_gb_page_enable,
+ .get_lpage_level = svm_get_lpage_level,
+
+ .cpuid_update = svm_cpuid_update,
+
+ .rdtscp_supported = svm_rdtscp_supported,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d4918d6fc924..9b197b25b66d 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -61,6 +61,21 @@ module_param_named(unrestricted_guest,
static int __read_mostly emulate_invalid_guest_state = 0;
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
+#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST \
+ (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
+#define KVM_GUEST_CR0_MASK \
+ (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST \
+ (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
+#define KVM_VM_CR0_ALWAYS_ON \
+ (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_CR4_GUEST_OWNED_BITS \
+ (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
+ | X86_CR4_OSXMMEXCPT)
+
+#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
+#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
+
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
@@ -136,6 +151,8 @@ struct vcpu_vmx {
ktime_t entry_time;
s64 vnmi_blocked_time;
u32 exit_reason;
+
+ bool rdtscp_enabled;
};
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -210,7 +227,7 @@ static const u32 vmx_msr_index[] = {
#ifdef CONFIG_X86_64
MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
#endif
- MSR_EFER, MSR_K6_STAR,
+ MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR,
};
#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
@@ -301,6 +318,11 @@ static inline bool cpu_has_vmx_ept_2m_page(void)
return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
}
+static inline bool cpu_has_vmx_ept_1g_page(void)
+{
+ return !!(vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT);
+}
+
static inline int cpu_has_vmx_invept_individual_addr(void)
{
return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
@@ -347,6 +369,12 @@ static inline int cpu_has_vmx_vpid(void)
SECONDARY_EXEC_ENABLE_VPID;
}
+static inline int cpu_has_vmx_rdtscp(void)
+{
+ return vmcs_config.cpu_based_2nd_exec_ctrl &
+ SECONDARY_EXEC_RDTSCP;
+}
+
static inline int cpu_has_virtual_nmis(void)
{
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
@@ -878,6 +906,11 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
}
+static bool vmx_rdtscp_supported(void)
+{
+ return cpu_has_vmx_rdtscp();
+}
+
/*
* Swap MSR entry in host/guest MSR entry array.
*/
@@ -913,6 +946,9 @@ static void setup_msrs(struct vcpu_vmx *vmx)
index = __find_msr_index(vmx, MSR_CSTAR);
if (index >= 0)
move_msr_up(vmx, index, save_nmsrs++);
+ index = __find_msr_index(vmx, MSR_TSC_AUX);
+ if (index >= 0 && vmx->rdtscp_enabled)
+ move_msr_up(vmx, index, save_nmsrs++);
/*
* MSR_K6_STAR is only needed on long mode guests, and only
* if efer.sce is enabled.
@@ -1002,6 +1038,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
case MSR_IA32_SYSENTER_ESP:
data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
+ case MSR_TSC_AUX:
+ if (!to_vmx(vcpu)->rdtscp_enabled)
+ return 1;
+ /* Otherwise falls through */
default:
vmx_load_host_state(to_vmx(vcpu));
msr = find_msr_entry(to_vmx(vcpu), msr_index);
@@ -1065,7 +1105,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
vcpu->arch.pat = data;
break;
}
- /* Otherwise falls through to kvm_set_msr_common */
+ ret = kvm_set_msr_common(vcpu, msr_index, data);
+ break;
+ case MSR_TSC_AUX:
+ if (!vmx->rdtscp_enabled)
+ return 1;
+ /* Check reserved bit, higher 32 bits should be zero */
+ if ((data >> 32) != 0)
+ return 1;
+ /* Otherwise falls through */
default:
msr = find_msr_entry(vmx, msr_index);
if (msr) {
@@ -1224,6 +1272,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
CPU_BASED_USE_IO_BITMAPS |
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING |
+ CPU_BASED_MWAIT_EXITING |
+ CPU_BASED_MONITOR_EXITING |
CPU_BASED_INVLPG_EXITING;
opt = CPU_BASED_TPR_SHADOW |
CPU_BASED_USE_MSR_BITMAPS |
@@ -1243,7 +1293,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST |
- SECONDARY_EXEC_PAUSE_LOOP_EXITING;
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING |
+ SECONDARY_EXEC_RDTSCP;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
@@ -1457,8 +1508,12 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
static gva_t rmode_tss_base(struct kvm *kvm)
{
if (!kvm->arch.tss_addr) {
- gfn_t base_gfn = kvm->memslots[0].base_gfn +
- kvm->memslots[0].npages - 3;
+ struct kvm_memslots *slots;
+ gfn_t base_gfn;
+
+ slots = rcu_dereference(kvm->memslots);
+ base_gfn = kvm->memslots->memslots[0].base_gfn +
+ kvm->memslots->memslots[0].npages - 3;
return base_gfn << PAGE_SHIFT;
}
return kvm->arch.tss_addr;
@@ -1600,8 +1655,10 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{
- vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
- vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
+ ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
+
+ vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
+ vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
}
static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -1646,7 +1703,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0;
- vmx_set_cr4(vcpu, vcpu->arch.cr4);
+ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
} else if (!is_paging(vcpu)) {
/* From nonpaging to paging */
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1654,23 +1711,13 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
~(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0;
- vmx_set_cr4(vcpu, vcpu->arch.cr4);
+ vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
}
if (!(cr0 & X86_CR0_WP))
*hw_cr0 &= ~X86_CR0_WP;
}
-static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
- struct kvm_vcpu *vcpu)
-{
- if (!is_paging(vcpu)) {
- *hw_cr4 &= ~X86_CR4_PAE;
- *hw_cr4 |= X86_CR4_PSE;
- } else if (!(vcpu->arch.cr4 & X86_CR4_PAE))
- *hw_cr4 &= ~X86_CR4_PAE;
-}
-
static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -1748,8 +1795,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
vcpu->arch.cr4 = cr4;
- if (enable_ept)
- ept_update_paging_mode_cr4(&hw_cr4, vcpu);
+ if (enable_ept) {
+ if (!is_paging(vcpu)) {
+ hw_cr4 &= ~X86_CR4_PAE;
+ hw_cr4 |= X86_CR4_PSE;
+ } else if (!(cr4 & X86_CR4_PAE)) {
+ hw_cr4 &= ~X86_CR4_PAE;
+ }
+ }
vmcs_writel(CR4_READ_SHADOW, cr4);
vmcs_writel(GUEST_CR4, hw_cr4);
@@ -2175,7 +2228,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_page)
goto out;
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
@@ -2188,7 +2241,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
@@ -2197,7 +2250,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
if (kvm->arch.ept_identity_pagetable)
goto out;
kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
@@ -2212,7 +2265,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
@@ -2384,14 +2437,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
for (i = 0; i < NR_VMX_MSR; ++i) {
u32 index = vmx_msr_index[i];
u32 data_low, data_high;
- u64 data;
int j = vmx->nmsrs;
if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
if (wrmsr_safe(index, data_low, data_high) < 0)
continue;
- data = data_low | ((u64)data_high << 32);
vmx->guest_msrs[j].index = i;
vmx->guest_msrs[j].data = 0;
vmx->guest_msrs[j].mask = -1ull;
@@ -2404,7 +2455,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
- vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
+ vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
+ if (enable_ept)
+ vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+ vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
rdtscll(tsc_this);
@@ -2429,10 +2483,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 msr;
- int ret;
+ int ret, idx;
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
if (!init_rmode(vmx->vcpu.kvm)) {
ret = -ENOMEM;
goto out;
@@ -2540,7 +2594,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->emulation_required = 0;
out:
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
return ret;
}
@@ -3035,7 +3089,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
vcpu->arch.eff_db[dr] = val;
break;
case 4 ... 5:
- if (vcpu->arch.cr4 & X86_CR4_DE)
+ if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
kvm_queue_exception(vcpu, UD_VECTOR);
break;
case 6:
@@ -3416,6 +3470,12 @@ static int handle_pause(struct kvm_vcpu *vcpu)
return 1;
}
+static int handle_invalid_op(struct kvm_vcpu *vcpu)
+{
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -3453,6 +3513,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
[EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
[EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
+ [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
+ [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
};
static const int kvm_vmx_max_exit_handlers =
@@ -3936,32 +3998,83 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
return ret;
}
+#define _ER(x) { EXIT_REASON_##x, #x }
+
static const struct trace_print_flags vmx_exit_reasons_str[] = {
- { EXIT_REASON_EXCEPTION_NMI, "exception" },
- { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" },
- { EXIT_REASON_TRIPLE_FAULT, "triple_fault" },
- { EXIT_REASON_NMI_WINDOW, "nmi_window" },
- { EXIT_REASON_IO_INSTRUCTION, "io_instruction" },
- { EXIT_REASON_CR_ACCESS, "cr_access" },
- { EXIT_REASON_DR_ACCESS, "dr_access" },
- { EXIT_REASON_CPUID, "cpuid" },
- { EXIT_REASON_MSR_READ, "rdmsr" },
- { EXIT_REASON_MSR_WRITE, "wrmsr" },
- { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" },
- { EXIT_REASON_HLT, "halt" },
- { EXIT_REASON_INVLPG, "invlpg" },
- { EXIT_REASON_VMCALL, "hypercall" },
- { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" },
- { EXIT_REASON_APIC_ACCESS, "apic_access" },
- { EXIT_REASON_WBINVD, "wbinvd" },
- { EXIT_REASON_TASK_SWITCH, "task_switch" },
- { EXIT_REASON_EPT_VIOLATION, "ept_violation" },
+ _ER(EXCEPTION_NMI),
+ _ER(EXTERNAL_INTERRUPT),
+ _ER(TRIPLE_FAULT),
+ _ER(PENDING_INTERRUPT),
+ _ER(NMI_WINDOW),
+ _ER(TASK_SWITCH),
+ _ER(CPUID),
+ _ER(HLT),
+ _ER(INVLPG),
+ _ER(RDPMC),
+ _ER(RDTSC),
+ _ER(VMCALL),
+ _ER(VMCLEAR),
+ _ER(VMLAUNCH),
+ _ER(VMPTRLD),
+ _ER(VMPTRST),
+ _ER(VMREAD),
+ _ER(VMRESUME),
+ _ER(VMWRITE),
+ _ER(VMOFF),
+ _ER(VMON),
+ _ER(CR_ACCESS),
+ _ER(DR_ACCESS),
+ _ER(IO_INSTRUCTION),
+ _ER(MSR_READ),
+ _ER(MSR_WRITE),
+ _ER(MWAIT_INSTRUCTION),
+ _ER(MONITOR_INSTRUCTION),
+ _ER(PAUSE_INSTRUCTION),
+ _ER(MCE_DURING_VMENTRY),
+ _ER(TPR_BELOW_THRESHOLD),
+ _ER(APIC_ACCESS),
+ _ER(EPT_VIOLATION),
+ _ER(EPT_MISCONFIG),
+ _ER(WBINVD),
{ -1, NULL }
};
-static bool vmx_gb_page_enable(void)
+#undef _ER
+
+static int vmx_get_lpage_level(void)
+{
+ if (enable_ept && !cpu_has_vmx_ept_1g_page())
+ return PT_DIRECTORY_LEVEL;
+ else
+ /* For shadow and EPT supported 1GB page */
+ return PT_PDPE_LEVEL;
+}
+
+static inline u32 bit(int bitno)
{
- return false;
+ return 1 << (bitno & 31);
+}
+
+static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 exec_control;
+
+ vmx->rdtscp_enabled = false;
+ if (vmx_rdtscp_supported()) {
+ exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+ if (exec_control & SECONDARY_EXEC_RDTSCP) {
+ best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+ if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
+ vmx->rdtscp_enabled = true;
+ else {
+ exec_control &= ~SECONDARY_EXEC_RDTSCP;
+ vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+ exec_control);
+ }
+ }
+ }
}
static struct kvm_x86_ops vmx_x86_ops = {
@@ -4027,7 +4140,11 @@ static struct kvm_x86_ops vmx_x86_ops = {
.get_mt_mask = vmx_get_mt_mask,
.exit_reasons_str = vmx_exit_reasons_str,
- .gb_page_enable = vmx_gb_page_enable,
+ .get_lpage_level = vmx_get_lpage_level,
+
+ .cpuid_update = vmx_cpuid_update,
+
+ .rdtscp_supported = vmx_rdtscp_supported,
};
static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6651dbf58675..915a826b589b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -38,6 +38,7 @@
#include <linux/intel-iommu.h>
#include <linux/cpufreq.h>
#include <linux/user-return-notifier.h>
+#include <linux/srcu.h>
#include <trace/events/kvm.h>
#undef TRACE_INCLUDE_FILE
#define CREATE_TRACE_POINTS
@@ -93,16 +94,16 @@ module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
struct kvm_shared_msrs_global {
int nr;
- struct kvm_shared_msr {
- u32 msr;
- u64 value;
- } msrs[KVM_NR_SHARED_MSRS];
+ u32 msrs[KVM_NR_SHARED_MSRS];
};
struct kvm_shared_msrs {
struct user_return_notifier urn;
bool registered;
- u64 current_value[KVM_NR_SHARED_MSRS];
+ struct kvm_shared_msr_values {
+ u64 host;
+ u64 curr;
+ } values[KVM_NR_SHARED_MSRS];
};
static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
@@ -147,53 +148,64 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
static void kvm_on_user_return(struct user_return_notifier *urn)
{
unsigned slot;
- struct kvm_shared_msr *global;
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
+ struct kvm_shared_msr_values *values;
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
- global = &shared_msrs_global.msrs[slot];
- if (global->value != locals->current_value[slot]) {
- wrmsrl(global->msr, global->value);
- locals->current_value[slot] = global->value;
+ values = &locals->values[slot];
+ if (values->host != values->curr) {
+ wrmsrl(shared_msrs_global.msrs[slot], values->host);
+ values->curr = values->host;
}
}
locals->registered = false;
user_return_notifier_unregister(urn);
}
-void kvm_define_shared_msr(unsigned slot, u32 msr)
+static void shared_msr_update(unsigned slot, u32 msr)
{
- int cpu;
+ struct kvm_shared_msrs *smsr;
u64 value;
+ smsr = &__get_cpu_var(shared_msrs);
+ /* only read, and nobody should modify it at this time,
+ * so don't need lock */
+ if (slot >= shared_msrs_global.nr) {
+ printk(KERN_ERR "kvm: invalid MSR slot!");
+ return;
+ }
+ rdmsrl_safe(msr, &value);
+ smsr->values[slot].host = value;
+ smsr->values[slot].curr = value;
+}
+
+void kvm_define_shared_msr(unsigned slot, u32 msr)
+{
if (slot >= shared_msrs_global.nr)
shared_msrs_global.nr = slot + 1;
- shared_msrs_global.msrs[slot].msr = msr;
- rdmsrl_safe(msr, &value);
- shared_msrs_global.msrs[slot].value = value;
- for_each_online_cpu(cpu)
- per_cpu(shared_msrs, cpu).current_value[slot] = value;
+ shared_msrs_global.msrs[slot] = msr;
+ /* we need ensured the shared_msr_global have been updated */
+ smp_wmb();
}
EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
static void kvm_shared_msr_cpu_online(void)
{
unsigned i;
- struct kvm_shared_msrs *locals = &__get_cpu_var(shared_msrs);
for (i = 0; i < shared_msrs_global.nr; ++i)
- locals->current_value[i] = shared_msrs_global.msrs[i].value;
+ shared_msr_update(i, shared_msrs_global.msrs[i]);
}
void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
{
struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
- if (((value ^ smsr->current_value[slot]) & mask) == 0)
+ if (((value ^ smsr->values[slot].curr) & mask) == 0)
return;
- smsr->current_value[slot] = value;
- wrmsrl(shared_msrs_global.msrs[slot].msr, value);
+ smsr->values[slot].curr = value;
+ wrmsrl(shared_msrs_global.msrs[slot], value);
if (!smsr->registered) {
smsr->urn.on_user_return = kvm_on_user_return;
user_return_notifier_register(&smsr->urn);
@@ -257,12 +269,68 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
+#define EXCPT_BENIGN 0
+#define EXCPT_CONTRIBUTORY 1
+#define EXCPT_PF 2
+
+static int exception_class(int vector)
+{
+ switch (vector) {
+ case PF_VECTOR:
+ return EXCPT_PF;
+ case DE_VECTOR:
+ case TS_VECTOR:
+ case NP_VECTOR:
+ case SS_VECTOR:
+ case GP_VECTOR:
+ return EXCPT_CONTRIBUTORY;
+ default:
+ break;
+ }
+ return EXCPT_BENIGN;
+}
+
+static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
+ unsigned nr, bool has_error, u32 error_code)
+{
+ u32 prev_nr;
+ int class1, class2;
+
+ if (!vcpu->arch.exception.pending) {
+ queue:
+ vcpu->arch.exception.pending = true;
+ vcpu->arch.exception.has_error_code = has_error;
+ vcpu->arch.exception.nr = nr;
+ vcpu->arch.exception.error_code = error_code;
+ return;
+ }
+
+ /* to check exception */
+ prev_nr = vcpu->arch.exception.nr;
+ if (prev_nr == DF_VECTOR) {
+ /* triple fault -> shutdown */
+ set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+ return;
+ }
+ class1 = exception_class(prev_nr);
+ class2 = exception_class(nr);
+ if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
+ || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
+ /* generate double fault per SDM Table 5-5 */
+ vcpu->arch.exception.pending = true;
+ vcpu->arch.exception.has_error_code = true;
+ vcpu->arch.exception.nr = DF_VECTOR;
+ vcpu->arch.exception.error_code = 0;
+ } else
+ /* replace previous exception with a new one in a hope
+ that instruction re-execution will regenerate lost
+ exception */
+ goto queue;
+}
+
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
- WARN_ON(vcpu->arch.exception.pending);
- vcpu->arch.exception.pending = true;
- vcpu->arch.exception.has_error_code = false;
- vcpu->arch.exception.nr = nr;
+ kvm_multiple_exception(vcpu, nr, false, 0);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);
@@ -270,25 +338,6 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
u32 error_code)
{
++vcpu->stat.pf_guest;
-
- if (vcpu->arch.exception.pending) {
- switch(vcpu->arch.exception.nr) {
- case DF_VECTOR:
- /* triple fault -> shutdown */
- set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
- return;
- case PF_VECTOR:
- vcpu->arch.exception.nr = DF_VECTOR;
- vcpu->arch.exception.error_code = 0;
- return;
- default:
- /* replace previous exception with a new one in a hope
- that instruction re-execution will regenerate lost
- exception */
- vcpu->arch.exception.pending = false;
- break;
- }
- }
vcpu->arch.cr2 = addr;
kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
}
@@ -301,11 +350,7 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
- WARN_ON(vcpu->arch.exception.pending);
- vcpu->arch.exception.pending = true;
- vcpu->arch.exception.has_error_code = true;
- vcpu->arch.exception.nr = nr;
- vcpu->arch.exception.error_code = error_code;
+ kvm_multiple_exception(vcpu, nr, true, error_code);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
@@ -449,7 +494,7 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
- unsigned long old_cr4 = vcpu->arch.cr4;
+ unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
if (cr4 & CR4_RESERVED_BITS) {
@@ -1262,15 +1307,15 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data))
{
- int i;
+ int i, idx;
vcpu_load(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
for (i = 0; i < msrs->nmsrs; ++i)
if (do_msr(vcpu, entries[i].index, &entries[i].data))
break;
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
vcpu_put(vcpu);
@@ -1531,6 +1576,7 @@ static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
cpuid_fix_nx_cap(vcpu);
r = 0;
kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);
out_free:
vfree(cpuid_entries);
@@ -1553,6 +1599,7 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
goto out;
vcpu->arch.cpuid_nent = cpuid->nent;
kvm_apic_set_version(vcpu);
+ kvm_x86_ops->cpuid_update(vcpu);
return 0;
out:
@@ -1595,12 +1642,15 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
u32 index, int *nent, int maxnent)
{
unsigned f_nx = is_efer_nx() ? F(NX) : 0;
- unsigned f_gbpages = kvm_x86_ops->gb_page_enable() ? F(GBPAGES) : 0;
#ifdef CONFIG_X86_64
+ unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
+ ? F(GBPAGES) : 0;
unsigned f_lm = F(LM);
#else
+ unsigned f_gbpages = 0;
unsigned f_lm = 0;
#endif
+ unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
/* cpuid 1.edx */
const u32 kvm_supported_word0_x86_features =
@@ -1620,7 +1670,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
F(PAT) | F(PSE36) | 0 /* Reserved */ |
f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
- F(FXSR) | F(FXSR_OPT) | f_gbpages | 0 /* RDTSCP */ |
+ F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
/* cpuid 1.ecx */
const u32 kvm_supported_word4_x86_features =
@@ -1867,7 +1917,7 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
return 0;
if (mce->status & MCI_STATUS_UC) {
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
- !(vcpu->arch.cr4 & X86_CR4_MCE)) {
+ !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
printk(KERN_DEBUG "kvm: set_mce: "
"injects mce exception while "
"previous one is in progress!\n");
@@ -2161,14 +2211,14 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
spin_lock(&kvm->mmu_lock);
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
spin_unlock(&kvm->mmu_lock);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
}
@@ -2177,13 +2227,35 @@ static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
return kvm->arch.n_alloc_mmu_pages;
}
+gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
+{
+ int i;
+ struct kvm_mem_alias *alias;
+ struct kvm_mem_aliases *aliases;
+
+ aliases = rcu_dereference(kvm->arch.aliases);
+
+ for (i = 0; i < aliases->naliases; ++i) {
+ alias = &aliases->aliases[i];
+ if (alias->flags & KVM_ALIAS_INVALID)
+ continue;
+ if (gfn >= alias->base_gfn
+ && gfn < alias->base_gfn + alias->npages)
+ return alias->target_gfn + gfn - alias->base_gfn;
+ }
+ return gfn;
+}
+
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
struct kvm_mem_alias *alias;
+ struct kvm_mem_aliases *aliases;
+
+ aliases = rcu_dereference(kvm->arch.aliases);
- for (i = 0; i < kvm->arch.naliases; ++i) {
- alias = &kvm->arch.aliases[i];
+ for (i = 0; i < aliases->naliases; ++i) {
+ alias = &aliases->aliases[i];
if (gfn >= alias->base_gfn
&& gfn < alias->base_gfn + alias->npages)
return alias->target_gfn + gfn - alias->base_gfn;
@@ -2201,6 +2273,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
{
int r, n;
struct kvm_mem_alias *p;
+ struct kvm_mem_aliases *aliases, *old_aliases;
r = -EINVAL;
/* General sanity checks */
@@ -2217,26 +2290,48 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
< alias->target_phys_addr)
goto out;
- down_write(&kvm->slots_lock);
- spin_lock(&kvm->mmu_lock);
+ r = -ENOMEM;
+ aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+ if (!aliases)
+ goto out;
+
+ mutex_lock(&kvm->slots_lock);
+
+ /* invalidate any gfn reference in case of deletion/shrinking */
+ memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
+ aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
+ old_aliases = kvm->arch.aliases;
+ rcu_assign_pointer(kvm->arch.aliases, aliases);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kvm_mmu_zap_all(kvm);
+ kfree(old_aliases);
+
+ r = -ENOMEM;
+ aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+ if (!aliases)
+ goto out_unlock;
- p = &kvm->arch.aliases[alias->slot];
+ memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
+
+ p = &aliases->aliases[alias->slot];
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
p->npages = alias->memory_size >> PAGE_SHIFT;
p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
+ p->flags &= ~(KVM_ALIAS_INVALID);
for (n = KVM_ALIAS_SLOTS; n > 0; --n)
- if (kvm->arch.aliases[n - 1].npages)
+ if (aliases->aliases[n - 1].npages)
break;
- kvm->arch.naliases = n;
-
- spin_unlock(&kvm->mmu_lock);
- kvm_mmu_zap_all(kvm);
+ aliases->naliases = n;
- up_write(&kvm->slots_lock);
-
- return 0;
+ old_aliases = kvm->arch.aliases;
+ rcu_assign_pointer(kvm->arch.aliases, aliases);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kfree(old_aliases);
+ r = 0;
+out_unlock:
+ mutex_unlock(&kvm->slots_lock);
out:
return r;
}
@@ -2365,29 +2460,62 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
- int r;
- int n;
+ int r, n, i;
struct kvm_memory_slot *memslot;
- int is_dirty = 0;
+ unsigned long is_dirty = 0;
+ unsigned long *dirty_bitmap = NULL;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log(kvm, log, &is_dirty);
- if (r)
+ r = -EINVAL;
+ if (log->slot >= KVM_MEMORY_SLOTS)
+ goto out;
+
+ memslot = &kvm->memslots->memslots[log->slot];
+ r = -ENOENT;
+ if (!memslot->dirty_bitmap)
+ goto out;
+
+ n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+
+ r = -ENOMEM;
+ dirty_bitmap = vmalloc(n);
+ if (!dirty_bitmap)
goto out;
+ memset(dirty_bitmap, 0, n);
+
+ for (i = 0; !is_dirty && i < n/sizeof(long); i++)
+ is_dirty = memslot->dirty_bitmap[i];
/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
+ struct kvm_memslots *slots, *old_slots;
+
spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
- memslot = &kvm->memslots[log->slot];
- n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
- memset(memslot->dirty_bitmap, 0, n);
+
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+
+ memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+ slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
+
+ old_slots = kvm->memslots;
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+ dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
+ kfree(old_slots);
}
+
r = 0;
+ if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
+ r = -EFAULT;
+out_free:
+ vfree(dirty_bitmap);
out:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
@@ -2500,7 +2628,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
sizeof(struct kvm_pit_config)))
goto out;
create_pit:
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
@@ -2509,7 +2637,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
if (kvm->arch.vpit)
r = 0;
create_pit_unlock:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
break;
case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
@@ -2726,7 +2854,7 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
!kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
return 0;
- return kvm_io_bus_write(&vcpu->kvm->mmio_bus, addr, len, v);
+ return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
}
static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
@@ -2735,7 +2863,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
!kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
return 0;
- return kvm_io_bus_read(&vcpu->kvm->mmio_bus, addr, len, v);
+ return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
}
static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
@@ -3220,11 +3348,12 @@ static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
int r;
if (vcpu->arch.pio.in)
- r = kvm_io_bus_read(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
+ r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
- r = kvm_io_bus_write(&vcpu->kvm->pio_bus, vcpu->arch.pio.port,
- vcpu->arch.pio.size, pd);
+ r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
+ vcpu->arch.pio.port, vcpu->arch.pio.size,
+ pd);
return r;
}
@@ -3235,7 +3364,7 @@ static int pio_string_write(struct kvm_vcpu *vcpu)
int i, r = 0;
for (i = 0; i < io->cur_count; i++) {
- if (kvm_io_bus_write(&vcpu->kvm->pio_bus,
+ if (kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
io->port, io->size, pd)) {
r = -EOPNOTSUPP;
break;
@@ -3584,7 +3713,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
{
unsigned long value;
- kvm_x86_ops->decache_cr4_guest_bits(vcpu);
switch (cr) {
case 0:
value = vcpu->arch.cr0;
@@ -3596,7 +3724,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
value = vcpu->arch.cr3;
break;
case 4:
- value = vcpu->arch.cr4;
+ value = kvm_read_cr4(vcpu);
break;
case 8:
value = kvm_get_cr8(vcpu);
@@ -3624,7 +3752,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
kvm_set_cr3(vcpu, val);
break;
case 4:
- kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
+ kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
break;
case 8:
kvm_set_cr8(vcpu, val & 0xfUL);
@@ -3691,6 +3819,7 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
}
return best;
}
+EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
{
@@ -3774,14 +3903,15 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
static void vapic_exit(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
+ int idx;
if (!apic || !apic->vapic_addr)
return;
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_release_page_dirty(apic->vapic_page);
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -3910,7 +4040,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
kvm_lapic_sync_to_vapic(vcpu);
}
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
kvm_guest_enter();
@@ -3952,7 +4082,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
preempt_enable();
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
/*
* Profile KVM exit RIPs:
@@ -3974,6 +4104,7 @@ out:
static int __vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
+ struct kvm *kvm = vcpu->kvm;
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
pr_debug("vcpu %d received sipi with vector # %x\n",
@@ -3985,7 +4116,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
vapic_enter(vcpu);
r = 1;
@@ -3993,9 +4124,9 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
r = vcpu_enter_guest(vcpu);
else {
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
{
switch(vcpu->arch.mp_state) {
@@ -4030,13 +4161,13 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
++vcpu->stat.signal_exits;
}
if (need_resched()) {
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_resched(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
}
}
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
post_kvm_run_save(vcpu);
vapic_exit(vcpu);
@@ -4075,10 +4206,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->mmio_read_completed = 1;
vcpu->mmio_needed = 0;
- down_read(&vcpu->kvm->slots_lock);
+ vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = emulate_instruction(vcpu, vcpu->arch.mmio_fault_cr2, 0,
EMULTYPE_NO_DECODE);
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r == EMULATE_DO_MMIO) {
/*
* Read-modify-write. Back to userspace.
@@ -4205,11 +4336,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
sregs->gdt.limit = dt.limit;
sregs->gdt.base = dt.base;
- kvm_x86_ops->decache_cr4_guest_bits(vcpu);
sregs->cr0 = vcpu->arch.cr0;
sregs->cr2 = vcpu->arch.cr2;
sregs->cr3 = vcpu->arch.cr3;
- sregs->cr4 = vcpu->arch.cr4;
+ sregs->cr4 = kvm_read_cr4(vcpu);
sregs->cr8 = kvm_get_cr8(vcpu);
sregs->efer = vcpu->arch.shadow_efer;
sregs->apic_base = kvm_get_apic_base(vcpu);
@@ -4378,6 +4508,15 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
(kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
}
+static void kvm_check_segment_descriptor(struct kvm_vcpu *vcpu, int seg,
+ u16 selector)
+{
+ /* NULL selector is not valid for CS and SS */
+ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
+ if (!selector)
+ kvm_queue_exception_e(vcpu, TS_VECTOR, selector >> 3);
+}
+
int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
int type_bits, int seg)
{
@@ -4387,6 +4526,8 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
return kvm_load_realmode_segment(vcpu, selector, seg);
if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
return 1;
+
+ kvm_check_segment_descriptor(vcpu, seg, selector);
kvm_seg.type |= type_bits;
if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
@@ -4694,13 +4835,11 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_x86_ops->set_efer(vcpu, sregs->efer);
kvm_set_apic_base(vcpu, sregs->apic_base);
- kvm_x86_ops->decache_cr4_guest_bits(vcpu);
-
mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
- mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
+ mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.cr3);
@@ -4833,11 +4972,12 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
{
unsigned long vaddr = tr->linear_address;
gpa_t gpa;
+ int idx;
vcpu_load(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1;
@@ -5088,10 +5228,12 @@ fail:
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
+ int idx;
+
kvm_free_lapic(vcpu);
- down_read(&vcpu->kvm->slots_lock);
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_mmu_destroy(vcpu);
- up_read(&vcpu->kvm->slots_lock);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
free_page((unsigned long)vcpu->arch.pio_data);
}
@@ -5102,6 +5244,12 @@ struct kvm *kvm_arch_create_vm(void)
if (!kvm)
return ERR_PTR(-ENOMEM);
+ kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+ if (!kvm->arch.aliases) {
+ kfree(kvm);
+ return ERR_PTR(-ENOMEM);
+ }
+
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
@@ -5158,16 +5306,17 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
put_page(kvm->arch.apic_access_page);
if (kvm->arch.ept_identity_pagetable)
put_page(kvm->arch.ept_identity_pagetable);
+ kfree(kvm->arch.aliases);
kfree(kvm);
}
-int kvm_arch_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- int npages = mem->memory_size >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
+ int npages = memslot->npages;
/*To keep backward compatibility with older userspace,
*x86 needs to hanlde !user_alloc case.
@@ -5187,26 +5336,35 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);
- /* set userspace_addr atomically for kvm_hva_to_rmapp */
- spin_lock(&kvm->mmu_lock);
memslot->userspace_addr = userspace_addr;
- spin_unlock(&kvm->mmu_lock);
- } else {
- if (!old.user_alloc && old.rmap) {
- int ret;
-
- down_write(&current->mm->mmap_sem);
- ret = do_munmap(current->mm, old.userspace_addr,
- old.npages * PAGE_SIZE);
- up_write(&current->mm->mmap_sem);
- if (ret < 0)
- printk(KERN_WARNING
- "kvm_vm_ioctl_set_memory_region: "
- "failed to munmap memory\n");
- }
}
}
+
+ return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_userspace_memory_region *mem,
+ struct kvm_memory_slot old,
+ int user_alloc)
+{
+
+ int npages = mem->memory_size >> PAGE_SHIFT;
+
+ if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
+ int ret;
+
+ down_write(&current->mm->mmap_sem);
+ ret = do_munmap(current->mm, old.userspace_addr,
+ old.npages * PAGE_SIZE);
+ up_write(&current->mm->mmap_sem);
+ if (ret < 0)
+ printk(KERN_WARNING
+ "kvm_vm_ioctl_set_memory_region: "
+ "failed to munmap memory\n");
+ }
+
spin_lock(&kvm->mmu_lock);
if (!kvm->arch.n_requested_mmu_pages) {
unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
@@ -5215,8 +5373,6 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
spin_unlock(&kvm->mmu_lock);
-
- return 0;
}
void kvm_arch_flush_shadow(struct kvm *kvm)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index c973f8e2a6cf..9a0c258a86be 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -892,8 +892,7 @@ void __init mem_init(void)
reservedpages << (PAGE_SHIFT-10),
datasize >> 10,
initsize >> 10,
- (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
- );
+ totalhigh_pages << (PAGE_SHIFT-10));
printk(KERN_INFO "virtual kernel memory layout:\n"
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
diff --git a/arch/x86/pci/intel_bus.c b/arch/x86/pci/intel_bus.c
index b7a55dc55d13..f81a2fa8fe25 100644
--- a/arch/x86/pci/intel_bus.c
+++ b/arch/x86/pci/intel_bus.c
@@ -49,6 +49,10 @@ static void __devinit pci_root_bus_res(struct pci_dev *dev)
u64 mmioh_base, mmioh_end;
int bus_base, bus_end;
+ /* some sys doesn't get mmconf enabled */
+ if (dev->cfg_size < 0x120)
+ return;
+
if (pci_root_num >= PCI_ROOT_NR) {
printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n");
return;
diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c
index 4061bb0f267d..a8194c892385 100644
--- a/arch/x86/pci/legacy.c
+++ b/arch/x86/pci/legacy.c
@@ -11,28 +11,14 @@
*/
static void __devinit pcibios_fixup_peer_bridges(void)
{
- int n, devfn;
- long node;
+ int n;
if (pcibios_last_bus <= 0 || pcibios_last_bus > 0xff)
return;
DBG("PCI: Peer bridge fixup\n");
- for (n=0; n <= pcibios_last_bus; n++) {
- u32 l;
- if (pci_find_bus(0, n))
- continue;
- node = get_mp_bus_to_node(n);
- for (devfn = 0; devfn < 256; devfn += 8) {
- if (!raw_pci_read(0, n, devfn, PCI_VENDOR_ID, 2, &l) &&
- l != 0x0000 && l != 0xffff) {
- DBG("Found device at %02x:%02x [%04x]\n", n, devfn, l);
- printk(KERN_INFO "PCI: Discovered peer bus %02x\n", n);
- pci_scan_bus_on_node(n, &pci_root_ops, node);
- break;
- }
- }
- }
+ for (n=0; n <= pcibios_last_bus; n++)
+ pcibios_scan_specific_bus(n);
}
static int __init pci_legacy_init(void)
@@ -53,6 +39,28 @@ static int __init pci_legacy_init(void)
return 0;
}
+void pcibios_scan_specific_bus(int busn)
+{
+ int devfn;
+ long node;
+ u32 l;
+
+ if (pci_find_bus(0, busn))
+ return;
+
+ node = get_mp_bus_to_node(busn);
+ for (devfn = 0; devfn < 256; devfn += 8) {
+ if (!raw_pci_read(0, busn, devfn, PCI_VENDOR_ID, 2, &l) &&
+ l != 0x0000 && l != 0xffff) {
+ DBG("Found device at %02x:%02x [%04x]\n", busn, devfn, l);
+ printk(KERN_INFO "PCI: Discovered peer bus %02x\n", busn);
+ pci_scan_bus_on_node(busn, &pci_root_ops, node);
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(pcibios_scan_specific_bus);
+
int __init pci_subsys_init(void)
{
#ifdef CONFIG_X86_NUMAQ
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 563d20504988..deafb65ef44e 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -361,7 +361,7 @@ static void xen_cpu_die(unsigned int cpu)
alternatives_smp_switch(0);
}
-static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */
+static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
{
play_dead_common();
HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
index 88e15deb8b82..22a2093b5862 100644
--- a/arch/x86/xen/xen-asm_32.S
+++ b/arch/x86/xen/xen-asm_32.S
@@ -90,9 +90,9 @@ ENTRY(xen_iret)
GET_THREAD_INFO(%eax)
movl TI_cpu(%eax), %eax
movl __per_cpu_offset(,%eax,4), %eax
- mov per_cpu__xen_vcpu(%eax), %eax
+ mov xen_vcpu(%eax), %eax
#else
- movl per_cpu__xen_vcpu, %eax
+ movl xen_vcpu, %eax
#endif
/* check IF state we're restoring */
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index cbdabb0dd6d7..98e6bf61b0ac 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -39,8 +39,6 @@ int put_io_context(struct io_context *ioc)
if (atomic_long_dec_and_test(&ioc->refcount)) {
rcu_read_lock();
- if (ioc->aic && ioc->aic->dtor)
- ioc->aic->dtor(ioc->aic);
cfq_dtor(ioc);
rcu_read_unlock();
@@ -76,8 +74,6 @@ void exit_io_context(struct task_struct *task)
task_unlock(task);
if (atomic_dec_and_test(&ioc->nr_tasks)) {
- if (ioc->aic && ioc->aic->exit)
- ioc->aic->exit(ioc->aic);
cfq_exit(ioc);
}
@@ -97,7 +93,6 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
ret->ioprio = 0;
ret->last_waited = jiffies; /* doesn't matter... */
ret->nr_batch_requests = 0; /* because this is 0 */
- ret->aic = NULL;
INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ret->cic_list);
ret->ioc_data = NULL;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d52d4adc440b..78549c723783 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -507,7 +507,7 @@ static unsigned int lcm(unsigned int a, unsigned int b)
* blk_stack_limits - adjust queue_limits for stacked devices
* @t: the stacking driver limits (top device)
* @b: the underlying queue limits (bottom, component device)
- * @offset: offset to beginning of data within component device
+ * @start: first data sector within component device
*
* Description:
* This function is used by stacking drivers like MD and DM to ensure
@@ -525,10 +525,9 @@ static unsigned int lcm(unsigned int a, unsigned int b)
* the alignment_offset is undefined.
*/
int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
- sector_t offset)
+ sector_t start)
{
- sector_t alignment;
- unsigned int top, bottom;
+ unsigned int top, bottom, alignment, ret = 0;
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
@@ -546,7 +545,9 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size);
- alignment = queue_limit_alignment_offset(b, offset);
+ t->misaligned |= b->misaligned;
+
+ alignment = queue_limit_alignment_offset(b, start);
/* Bottom device has different alignment. Check that it is
* compatible with the current top alignment.
@@ -558,8 +559,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
bottom = max(b->physical_block_size, b->io_min) + alignment;
/* Verify that top and bottom intervals line up */
- if (max(top, bottom) & (min(top, bottom) - 1))
+ if (max(top, bottom) & (min(top, bottom) - 1)) {
t->misaligned = 1;
+ ret = -1;
+ }
}
t->logical_block_size = max(t->logical_block_size,
@@ -578,18 +581,21 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
if (t->physical_block_size & (t->logical_block_size - 1)) {
t->physical_block_size = t->logical_block_size;
t->misaligned = 1;
+ ret = -1;
}
/* Minimum I/O a multiple of the physical block size? */
if (t->io_min & (t->physical_block_size - 1)) {
t->io_min = t->physical_block_size;
t->misaligned = 1;
+ ret = -1;
}
/* Optimal I/O a multiple of the physical block size? */
if (t->io_opt & (t->physical_block_size - 1)) {
t->io_opt = 0;
t->misaligned = 1;
+ ret = -1;
}
/* Find lowest common alignment_offset */
@@ -597,16 +603,14 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
& (max(t->physical_block_size, t->io_min) - 1);
/* Verify that new alignment_offset is on a logical block boundary */
- if (t->alignment_offset & (t->logical_block_size - 1))
+ if (t->alignment_offset & (t->logical_block_size - 1)) {
t->misaligned = 1;
+ ret = -1;
+ }
/* Discard alignment and granularity */
if (b->discard_granularity) {
- unsigned int granularity = b->discard_granularity;
- offset &= granularity - 1;
-
- alignment = (granularity + b->discard_alignment - offset)
- & (granularity - 1);
+ alignment = queue_limit_discard_alignment(b, start);
if (t->discard_granularity != 0 &&
t->discard_alignment != alignment) {
@@ -626,20 +630,41 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
(t->discard_granularity - 1);
}
- return t->misaligned ? -1 : 0;
+ return ret;
}
EXPORT_SYMBOL(blk_stack_limits);
/**
+ * bdev_stack_limits - adjust queue limits for stacked drivers
+ * @t: the stacking driver limits (top device)
+ * @bdev: the component block_device (bottom)
+ * @start: first data sector within component device
+ *
+ * Description:
+ * Merges queue limits for a top device and a block_device. Returns
+ * 0 if alignment didn't change. Returns -1 if adding the bottom
+ * device caused misalignment.
+ */
+int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
+ sector_t start)
+{
+ struct request_queue *bq = bdev_get_queue(bdev);
+
+ start += get_start_sect(bdev);
+
+ return blk_stack_limits(t, &bq->limits, start);
+}
+EXPORT_SYMBOL(bdev_stack_limits);
+
+/**
* disk_stack_limits - adjust queue limits for stacked drivers
* @disk: MD/DM gendisk (top)
* @bdev: the underlying block device (bottom)
* @offset: offset to beginning of data within component device
*
* Description:
- * Merges the limits for two queues. Returns 0 if alignment
- * didn't change. Returns -1 if adding the bottom device caused
- * misalignment.
+ * Merges the limits for a top level gendisk and a bottom level
+ * block_device.
*/
void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset)
@@ -647,9 +672,7 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
struct request_queue *t = disk->queue;
struct request_queue *b = bdev_get_queue(bdev);
- offset += get_start_sect(bdev) << 9;
-
- if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) {
+ if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
disk_name(disk, 0, top);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 918c7fd9aeb1..ee130f14d1fc 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3077,6 +3077,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
return true;
/*
+ * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
+ */
+ if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
+ return false;
+
+ /*
* if the new request is sync, but the currently running queue is
* not, let the sync request have priority.
*/
diff --git a/block/genhd.c b/block/genhd.c
index b11a4ad7d571..d13ba76a169c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -867,7 +867,7 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
- return sprintf(buf, "%u\n", queue_discard_alignment(disk->queue));
+ return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
}
static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 81c185a6971f..6a2e295ee227 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -114,6 +114,16 @@ config CRYPTO_NULL
help
These are 'Null' algorithms, used by IPsec, which do nothing.
+config CRYPTO_PCRYPT
+ tristate "Parallel crypto engine (EXPERIMENTAL)"
+ depends on SMP && EXPERIMENTAL
+ select PADATA
+ select CRYPTO_MANAGER
+ select CRYPTO_AEAD
+ help
+ This converts an arbitrary crypto algorithm into a parallel
+ algorithm that executes in kernel threads.
+
config CRYPTO_WORKQUEUE
tristate
diff --git a/crypto/Makefile b/crypto/Makefile
index 9e8f61908cb5..d7e6441df7fe 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -56,6 +56,7 @@ obj-$(CONFIG_CRYPTO_XTS) += xts.o
obj-$(CONFIG_CRYPTO_CTR) += ctr.o
obj-$(CONFIG_CRYPTO_GCM) += gcm.o
obj-$(CONFIG_CRYPTO_CCM) += ccm.o
+obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
new file mode 100644
index 000000000000..b9527d05e17d
--- /dev/null
+++ b/crypto/pcrypt.c
@@ -0,0 +1,445 @@
+/*
+ * pcrypt - Parallel crypto wrapper.
+ *
+ * Copyright (C) 2009 secunet Security Networks AG
+ * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <crypto/pcrypt.h>
+
+static struct padata_instance *pcrypt_enc_padata;
+static struct padata_instance *pcrypt_dec_padata;
+static struct workqueue_struct *encwq;
+static struct workqueue_struct *decwq;
+
+struct pcrypt_instance_ctx {
+ struct crypto_spawn spawn;
+ unsigned int tfm_count;
+};
+
+struct pcrypt_aead_ctx {
+ struct crypto_aead *child;
+ unsigned int cb_cpu;
+};
+
+static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
+ struct padata_instance *pinst)
+{
+ unsigned int cpu_index, cpu, i;
+
+ cpu = *cb_cpu;
+
+ if (cpumask_test_cpu(cpu, cpu_active_mask))
+ goto out;
+
+ cpu_index = cpu % cpumask_weight(cpu_active_mask);
+
+ cpu = cpumask_first(cpu_active_mask);
+ for (i = 0; i < cpu_index; i++)
+ cpu = cpumask_next(cpu, cpu_active_mask);
+
+ *cb_cpu = cpu;
+
+out:
+ return padata_do_parallel(pinst, padata, cpu);
+}
+
+static int pcrypt_aead_setkey(struct crypto_aead *parent,
+ const u8 *key, unsigned int keylen)
+{
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
+
+ return crypto_aead_setkey(ctx->child, key, keylen);
+}
+
+static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
+ unsigned int authsize)
+{
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
+
+ return crypto_aead_setauthsize(ctx->child, authsize);
+}
+
+static void pcrypt_aead_serial(struct padata_priv *padata)
+{
+ struct pcrypt_request *preq = pcrypt_padata_request(padata);
+ struct aead_request *req = pcrypt_request_ctx(preq);
+
+ aead_request_complete(req->base.data, padata->info);
+}
+
+static void pcrypt_aead_giv_serial(struct padata_priv *padata)
+{
+ struct pcrypt_request *preq = pcrypt_padata_request(padata);
+ struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
+
+ aead_request_complete(req->areq.base.data, padata->info);
+}
+
+static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
+{
+ struct aead_request *req = areq->data;
+ struct pcrypt_request *preq = aead_request_ctx(req);
+ struct padata_priv *padata = pcrypt_request_padata(preq);
+
+ padata->info = err;
+ req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ padata_do_serial(padata);
+}
+
+static void pcrypt_aead_enc(struct padata_priv *padata)
+{
+ struct pcrypt_request *preq = pcrypt_padata_request(padata);
+ struct aead_request *req = pcrypt_request_ctx(preq);
+
+ padata->info = crypto_aead_encrypt(req);
+
+ if (padata->info)
+ return;
+
+ padata_do_serial(padata);
+}
+
+static int pcrypt_aead_encrypt(struct aead_request *req)
+{
+ int err;
+ struct pcrypt_request *preq = aead_request_ctx(req);
+ struct aead_request *creq = pcrypt_request_ctx(preq);
+ struct padata_priv *padata = pcrypt_request_padata(preq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
+ u32 flags = aead_request_flags(req);
+
+ memset(padata, 0, sizeof(struct padata_priv));
+
+ padata->parallel = pcrypt_aead_enc;
+ padata->serial = pcrypt_aead_serial;
+
+ aead_request_set_tfm(creq, ctx->child);
+ aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
+ pcrypt_aead_done, req);
+ aead_request_set_crypt(creq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_assoc(creq, req->assoc, req->assoclen);
+
+ err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
+ if (err)
+ return err;
+ else
+ err = crypto_aead_encrypt(creq);
+
+ return err;
+}
+
+static void pcrypt_aead_dec(struct padata_priv *padata)
+{
+ struct pcrypt_request *preq = pcrypt_padata_request(padata);
+ struct aead_request *req = pcrypt_request_ctx(preq);
+
+ padata->info = crypto_aead_decrypt(req);
+
+ if (padata->info)
+ return;
+
+ padata_do_serial(padata);
+}
+
+static int pcrypt_aead_decrypt(struct aead_request *req)
+{
+ int err;
+ struct pcrypt_request *preq = aead_request_ctx(req);
+ struct aead_request *creq = pcrypt_request_ctx(preq);
+ struct padata_priv *padata = pcrypt_request_padata(preq);
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
+ u32 flags = aead_request_flags(req);
+
+ memset(padata, 0, sizeof(struct padata_priv));
+
+ padata->parallel = pcrypt_aead_dec;
+ padata->serial = pcrypt_aead_serial;
+
+ aead_request_set_tfm(creq, ctx->child);
+ aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
+ pcrypt_aead_done, req);
+ aead_request_set_crypt(creq, req->src, req->dst,
+ req->cryptlen, req->iv);
+ aead_request_set_assoc(creq, req->assoc, req->assoclen);
+
+ err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata);
+ if (err)
+ return err;
+ else
+ err = crypto_aead_decrypt(creq);
+
+ return err;
+}
+
+static void pcrypt_aead_givenc(struct padata_priv *padata)
+{
+ struct pcrypt_request *preq = pcrypt_padata_request(padata);
+ struct aead_givcrypt_request *req = pcrypt_request_ctx(preq);
+
+ padata->info = crypto_aead_givencrypt(req);
+
+ if (padata->info)
+ return;
+
+ padata_do_serial(padata);
+}
+
+static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req)
+{
+ int err;
+ struct aead_request *areq = &req->areq;
+ struct pcrypt_request *preq = aead_request_ctx(areq);
+ struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq);
+ struct padata_priv *padata = pcrypt_request_padata(preq);
+ struct crypto_aead *aead = aead_givcrypt_reqtfm(req);
+ struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
+ u32 flags = aead_request_flags(areq);
+
+ memset(padata, 0, sizeof(struct padata_priv));
+
+ padata->parallel = pcrypt_aead_givenc;
+ padata->serial = pcrypt_aead_giv_serial;
+
+ aead_givcrypt_set_tfm(creq, ctx->child);
+ aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
+ pcrypt_aead_done, areq);
+ aead_givcrypt_set_crypt(creq, areq->src, areq->dst,
+ areq->cryptlen, areq->iv);
+ aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen);
+ aead_givcrypt_set_giv(creq, req->giv, req->seq);
+
+ err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata);
+ if (err)
+ return err;
+ else
+ err = crypto_aead_givencrypt(creq);
+
+ return err;
+}
+
+static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm)
+{
+ int cpu, cpu_index;
+ struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
+ struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst);
+ struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_aead *cipher;
+
+ ictx->tfm_count++;
+
+ cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask);
+
+ ctx->cb_cpu = cpumask_first(cpu_active_mask);
+ for (cpu = 0; cpu < cpu_index; cpu++)
+ ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask);
+
+ cipher = crypto_spawn_aead(crypto_instance_ctx(inst));
+
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ tfm->crt_aead.reqsize = sizeof(struct pcrypt_request)
+ + sizeof(struct aead_givcrypt_request)
+ + crypto_aead_reqsize(cipher);
+
+ return 0;
+}
+
+static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ crypto_free_aead(ctx->child);
+}
+
+static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg)
+{
+ struct crypto_instance *inst;
+ struct pcrypt_instance_ctx *ctx;
+ int err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
+ if (!inst) {
+ inst = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ err = -ENAMETOOLONG;
+ if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+ "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
+ goto out_free_inst;
+
+ memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
+
+ ctx = crypto_instance_ctx(inst);
+ err = crypto_init_spawn(&ctx->spawn, alg, inst,
+ CRYPTO_ALG_TYPE_MASK);
+ if (err)
+ goto out_free_inst;
+
+ inst->alg.cra_priority = alg->cra_priority + 100;
+ inst->alg.cra_blocksize = alg->cra_blocksize;
+ inst->alg.cra_alignmask = alg->cra_alignmask;
+
+out:
+ return inst;
+
+out_free_inst:
+ kfree(inst);
+ inst = ERR_PTR(err);
+ goto out;
+}
+
+static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb)
+{
+ struct crypto_instance *inst;
+ struct crypto_alg *alg;
+ struct crypto_attr_type *algt;
+
+ algt = crypto_get_attr_type(tb);
+
+ alg = crypto_get_attr_alg(tb, algt->type,
+ (algt->mask & CRYPTO_ALG_TYPE_MASK));
+ if (IS_ERR(alg))
+ return ERR_CAST(alg);
+
+ inst = pcrypt_alloc_instance(alg);
+ if (IS_ERR(inst))
+ goto out_put_alg;
+
+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC;
+ inst->alg.cra_type = &crypto_aead_type;
+
+ inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize;
+ inst->alg.cra_aead.geniv = alg->cra_aead.geniv;
+ inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize;
+
+ inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
+
+ inst->alg.cra_init = pcrypt_aead_init_tfm;
+ inst->alg.cra_exit = pcrypt_aead_exit_tfm;
+
+ inst->alg.cra_aead.setkey = pcrypt_aead_setkey;
+ inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize;
+ inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt;
+ inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt;
+ inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt;
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return inst;
+}
+
+static struct crypto_instance *pcrypt_alloc(struct rtattr **tb)
+{
+ struct crypto_attr_type *algt;
+
+ algt = crypto_get_attr_type(tb);
+ if (IS_ERR(algt))
+ return ERR_CAST(algt);
+
+ switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ return pcrypt_alloc_aead(tb);
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void pcrypt_free(struct crypto_instance *inst)
+{
+ struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
+
+ crypto_drop_spawn(&ctx->spawn);
+ kfree(inst);
+}
+
+static struct crypto_template pcrypt_tmpl = {
+ .name = "pcrypt",
+ .alloc = pcrypt_alloc,
+ .free = pcrypt_free,
+ .module = THIS_MODULE,
+};
+
+static int __init pcrypt_init(void)
+{
+ encwq = create_workqueue("pencrypt");
+ if (!encwq)
+ goto err;
+
+ decwq = create_workqueue("pdecrypt");
+ if (!decwq)
+ goto err_destroy_encwq;
+
+
+ pcrypt_enc_padata = padata_alloc(cpu_possible_mask, encwq);
+ if (!pcrypt_enc_padata)
+ goto err_destroy_decwq;
+
+ pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq);
+ if (!pcrypt_dec_padata)
+ goto err_free_padata;
+
+ padata_start(pcrypt_enc_padata);
+ padata_start(pcrypt_dec_padata);
+
+ return crypto_register_template(&pcrypt_tmpl);
+
+err_free_padata:
+ padata_free(pcrypt_enc_padata);
+
+err_destroy_decwq:
+ destroy_workqueue(decwq);
+
+err_destroy_encwq:
+ destroy_workqueue(encwq);
+
+err:
+ return -ENOMEM;
+}
+
+static void __exit pcrypt_exit(void)
+{
+ padata_stop(pcrypt_enc_padata);
+ padata_stop(pcrypt_dec_padata);
+
+ destroy_workqueue(encwq);
+ destroy_workqueue(decwq);
+
+ padata_free(pcrypt_enc_padata);
+ padata_free(pcrypt_dec_padata);
+
+ crypto_unregister_template(&pcrypt_tmpl);
+}
+
+module_init(pcrypt_init);
+module_exit(pcrypt_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
+MODULE_DESCRIPTION("Parallel crypto wrapper");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 7620bfce92f2..c494d7610be1 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1477,9 +1477,54 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
return err;
}
+static int alg_test_null(const struct alg_test_desc *desc,
+ const char *driver, u32 type, u32 mask)
+{
+ return 0;
+}
+
/* Please keep this list sorted by algorithm name. */
static const struct alg_test_desc alg_test_descs[] = {
{
+ .alg = "__driver-cbc-aes-aesni",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
+ }, {
+ .alg = "__driver-ecb-aes-aesni",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
+ }, {
+ .alg = "__ghash-pclmulqdqni",
+ .test = alg_test_null,
+ .suite = {
+ .hash = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }, {
.alg = "ansi_cprng",
.test = alg_test_cprng,
.fips_allowed = 1,
@@ -1623,6 +1668,30 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "cryptd(__driver-ecb-aes-aesni)",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
+ }, {
+ .alg = "cryptd(__ghash-pclmulqdqni)",
+ .test = alg_test_null,
+ .suite = {
+ .hash = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }, {
.alg = "ctr(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -1669,6 +1738,21 @@ static const struct alg_test_desc alg_test_descs[] = {
}
}
}, {
+ .alg = "ecb(__aes-aesni)",
+ .test = alg_test_null,
+ .suite = {
+ .cipher = {
+ .enc = {
+ .vecs = NULL,
+ .count = 0
+ },
+ .dec = {
+ .vecs = NULL,
+ .count = 0
+ }
+ }
+ }
+ }, {
.alg = "ecb(aes)",
.test = alg_test_skcipher,
.fips_allowed = 1,
diff --git a/drivers/Makefile b/drivers/Makefile
index 6ee53c7a57a1..f0d8b97909d3 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -111,3 +111,4 @@ obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
obj-y += platform/
obj-y += ieee802154/
+obj-y += vbus/
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 93d2c7971df6..191cf2bf408c 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -360,4 +360,6 @@ config ACPI_SBS
To compile this driver as a module, choose M here:
the modules will be called sbs and sbshc.
+source "drivers/acpi/apei/Kconfig"
+
endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 66cc3f36a954..c00d683bdfe2 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,6 +19,7 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
+acpi-y += atomicio.o
acpi-y += hest.o
# sleep related files
@@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o
processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
+
+obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 97991ac6f5fc..7e52295f1ecc 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -208,7 +208,7 @@ static int power_saving_thread(void *data)
* the mechanism only works when all CPUs have RT task running,
* as if one CPU hasn't RT task, RT task from other CPUs will
* borrow CPU time from this CPU and cause RT task use > 95%
- * CPU time. To make 'avoid staration' work, takes a nap here.
+ * CPU time. To make 'avoid starvation' work, takes a nap here.
*/
if (do_sleep)
schedule_timeout_killable(HZ * idle_pct / 100);
@@ -222,14 +222,18 @@ static struct task_struct *ps_tsks[NR_CPUS];
static unsigned int ps_tsk_num;
static int create_power_saving_task(void)
{
+ int rc = -ENOMEM;
+
ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
(void *)(unsigned long)ps_tsk_num,
"power_saving/%d", ps_tsk_num);
- if (ps_tsks[ps_tsk_num]) {
+ rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0;
+ if (!rc)
ps_tsk_num++;
- return 0;
- }
- return -EINVAL;
+ else
+ ps_tsks[ps_tsk_num] = NULL;
+
+ return rc;
}
static void destroy_power_saving_task(void)
@@ -237,6 +241,7 @@ static void destroy_power_saving_task(void)
if (ps_tsk_num > 0) {
ps_tsk_num--;
kthread_stop(ps_tsks[ps_tsk_num]);
+ ps_tsks[ps_tsk_num] = NULL;
}
}
@@ -253,7 +258,7 @@ static void set_power_saving_task_num(unsigned int num)
}
}
-static int acpi_pad_idle_cpus(unsigned int num_cpus)
+static void acpi_pad_idle_cpus(unsigned int num_cpus)
{
get_online_cpus();
@@ -261,7 +266,6 @@ static int acpi_pad_idle_cpus(unsigned int num_cpus)
set_power_saving_task_num(num_cpus);
put_online_cpus();
- return 0;
}
static uint32_t acpi_pad_idle_cpus_num(void)
@@ -369,19 +373,21 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device)
static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
- acpi_status status;
union acpi_object *package;
int rev, num, ret = -EINVAL;
- status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer)))
+ return -EINVAL;
+
+ if (!buffer.length || !buffer.pointer)
return -EINVAL;
+
package = buffer.pointer;
if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
goto out;
rev = package->package.elements[0].integer.value;
num = package->package.elements[1].integer.value;
- if (rev != 1)
+ if (rev != 1 || num < 0)
goto out;
*num_cpus = num;
ret = 0;
@@ -410,7 +416,7 @@ static void acpi_pad_ost(acpi_handle handle, int stat,
static void acpi_pad_handle_notify(acpi_handle handle)
{
- int num_cpus, ret;
+ int num_cpus;
uint32_t idle_cpus;
mutex_lock(&isolated_cpus_lock);
@@ -418,12 +424,9 @@ static void acpi_pad_handle_notify(acpi_handle handle)
mutex_unlock(&isolated_cpus_lock);
return;
}
- ret = acpi_pad_idle_cpus(num_cpus);
+ acpi_pad_idle_cpus(num_cpus);
idle_cpus = acpi_pad_idle_cpus_num();
- if (!ret)
- acpi_pad_ost(handle, 0, idle_cpus);
- else
- acpi_pad_ost(handle, 1, 0);
+ acpi_pad_ost(handle, 0, idle_cpus);
mutex_unlock(&isolated_cpus_lock);
}
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
new file mode 100644
index 000000000000..22791e4ab1cb
--- /dev/null
+++ b/drivers/acpi/apei/Kconfig
@@ -0,0 +1,16 @@
+config ACPI_APEI
+ tristate "ACPI Platform Error Interface (APEI)"
+ depends on X86
+ help
+ APEI allows to report errors (for example from the chipset)
+ to the operating system. This improves NMI handling
+ especially. In addition it supports error serialization and
+ error injection.
+
+config ACPI_APEI_EINJ
+ tristate "APEI Error INJection (EINJ)"
+ depends on ACPI_APEI && DEBUG_FS
+ help
+ EINJ provides a hardware error injection mechanism, it is
+ mainly used for debugging and testing the other parts of
+ APEI and some other RAS features.
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
new file mode 100644
index 000000000000..fea86a9c3c2b
--- /dev/null
+++ b/drivers/acpi/apei/Makefile
@@ -0,0 +1,4 @@
+obj-$(CONFIG_ACPI_APEI) += apei.o
+obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+
+apei-y := apei-base.o hest.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
new file mode 100644
index 000000000000..c35fec71e5e4
--- /dev/null
+++ b/drivers/acpi/apei/apei-base.c
@@ -0,0 +1,589 @@
+/*
+ * apei-base.c - ACPI Platform Error Interface (APEI) supporting
+ * infrastructure
+ *
+ * APEI allows to report errors (for example from the chipset) to the
+ * the operating system. This improves NMI handling especially. In
+ * addition it supports error serialization and error injection.
+ *
+ * For more information about APEI, please refer to ACPI Specification
+ * version 4.0, chapter 17.
+ *
+ * This file has Common functions used by more than one APEI tables,
+ * including framework of interpreter for ERST and EINJ; resource
+ * management for APEI registers.
+ *
+ * Copyright (C) 2009, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <acpi/atomicio.h>
+
+#include "apei-internal.h"
+
+#define APEI_PFX "APEI: "
+
+struct dentry *apei_debug_dir;
+EXPORT_SYMBOL_GPL(apei_debug_dir);
+
+int hest_disable;
+EXPORT_SYMBOL(hest_disable);
+
+/*
+ * APEI ERST (Error Record Serialization Table) and EINJ (Error
+ * INJection) interpreter framework.
+ */
+
+#define APEI_EXEC_PRESERVE_REGISTER 0x1
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries)
+{
+ ctx->ins_table = ins_table;
+ ctx->instructions = instructions;
+ ctx->action_table = action_table;
+ ctx->entries = entries;
+}
+EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
+{
+ int rc;
+
+ rc = acpi_atomic_read(val, &entry->register_region);
+ if (rc)
+ return rc;
+ *val >>= entry->register_region.bit_offset;
+ *val &= entry->mask;
+
+ return 0;
+}
+
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+ u64 val = 0;
+
+ rc = __apei_exec_read_register(entry, &val);
+ if (rc)
+ return rc;
+ ctx->value = val;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register);
+
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ rc = apei_exec_read_register(ctx, entry);
+ if (rc)
+ return rc;
+ ctx->value = (ctx->value == entry->value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
+
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
+{
+ int rc;
+
+ val &= entry->mask;
+ val <<= entry->register_region.bit_offset;
+ if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
+ u64 valr = 0;
+ rc = acpi_atomic_read(&valr, &entry->register_region);
+ if (rc)
+ return rc;
+ valr &= ~(entry->mask << entry->register_region.bit_offset);
+ val |= valr;
+ }
+ rc = acpi_atomic_write(val, &entry->register_region);
+
+ return rc;
+}
+
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return __apei_exec_write_register(entry, ctx->value);
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register);
+
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ int rc;
+
+ ctx->value = entry->value;
+ rc = apei_exec_write_register(ctx, entry);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
+
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_noop);
+
+/*
+ * Interpret the specified action. Go through whole action table,
+ * execute all instructions belong to the action.
+ */
+int apei_exec_run(struct apei_exec_context *ctx, u8 action)
+{
+ int rc;
+ u32 i, ip;
+ struct acpi_whea_header *entry;
+ apei_exec_ins_func_t run;
+
+ ctx->ip = 0;
+
+ /*
+ * "ip" is the instruction pointer of current instruction,
+ * "ctx->ip" specifies the next instruction to executed,
+ * instruction "run" function may change the "ctx->ip" to
+ * implement "goto" semantics.
+ */
+rewind:
+ ip = 0;
+ for (i = 0; i < ctx->entries; i++) {
+ entry = &ctx->action_table[i];
+ if (entry->action != action)
+ continue;
+ if (ip == ctx->ip) {
+ if (entry->instruction >= ctx->instructions ||
+ !ctx->ins_table[entry->instruction].run) {
+ pr_info(APEI_PFX FW_WARN
+ "Invalid action table, unknown instruction type: %d\n",
+ entry->instruction);
+ return -EINVAL;
+ }
+ run = ctx->ins_table[entry->instruction].run;
+ rc = run(ctx, entry);
+ if (rc < 0)
+ return rc;
+ else if (rc != APEI_EXEC_SET_IP)
+ ctx->ip++;
+ }
+ ip++;
+ if (ctx->ip < ip)
+ goto rewind;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_exec_run);
+
+typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data);
+
+static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
+ apei_exec_entry_func_t func,
+ void *data,
+ int *end)
+{
+ u8 ins;
+ int i, rc;
+ struct acpi_whea_header *entry;
+ struct apei_exec_ins_type *ins_table = ctx->ins_table;
+
+ for (i = 0; i < ctx->entries; i++) {
+ entry = ctx->action_table + i;
+ ins = entry->instruction;
+ if (end)
+ *end = i;
+ if (ins >= ctx->instructions || !ins_table[ins].run) {
+ pr_info(APEI_PFX FW_WARN
+ "Invalid action table, unknown instruction type: %d\n",
+ ins);
+ return -EINVAL;
+ }
+ rc = func(ctx, entry, data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int pre_map_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ return acpi_pre_map_gar(&entry->register_region);
+
+ return 0;
+}
+
+/*
+ * Pre-map all GARs in action table to make it possible to access them
+ * in NMI handler.
+ */
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
+{
+ int rc, end;
+
+ rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
+ NULL, &end);
+ if (rc) {
+ struct apei_exec_context ctx_unmap;
+ memcpy(&ctx_unmap, ctx, sizeof(*ctx));
+ ctx_unmap.entries = end;
+ apei_exec_post_unmap_gars(&ctx_unmap);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
+
+static int post_unmap_gar_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ u8 ins = entry->instruction;
+
+ if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
+ acpi_post_unmap_gar(&entry->register_region);
+
+ return 0;
+}
+
+/* Post-unmap all GAR in action table. */
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
+{
+ return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
+ NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
+
+/*
+ * Resource management for GARs in APEI
+ */
+struct apei_res {
+ struct list_head list;
+ unsigned long start;
+ unsigned long end;
+};
+
+static int apei_res_add(struct list_head *res_list,
+ unsigned long start, unsigned long size)
+{
+ struct apei_res *res, *resn, *res_ins = NULL;
+ unsigned long end = start + size;
+
+ if (end <= start)
+ return 0;
+repeat:
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ if (res->start > end || res->end < start)
+ continue;
+ else if (end <= res->end && start >= res->start) {
+ kfree(res_ins);
+ return 0;
+ }
+ list_del(&res->list);
+ res->start = start = min(res->start, start);
+ res->end = end = max(res->end, end);
+ kfree(res_ins);
+ res_ins = res;
+ goto repeat;
+ }
+
+ if (res_ins)
+ list_add(&res_ins->list, res_list);
+ else {
+ res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res_ins)
+ return -ENOMEM;
+ res_ins->start = start;
+ res_ins->end = end;
+ list_add(&res_ins->list, res_list);
+ }
+
+ return 0;
+}
+
+static int apei_res_sub(struct list_head *res_list1,
+ struct list_head *res_list2)
+{
+ struct apei_res *res1, *resn1, *res2, *res;
+ res1 = list_entry(res_list1->next, struct apei_res, list);
+ resn1 = list_entry(res1->list.next, struct apei_res, list);
+ while (&res1->list != res_list1) {
+ list_for_each_entry(res2, res_list2, list) {
+ if (res1->start >= res2->end ||
+ res1->end <= res2->start)
+ continue;
+ else if (res1->end <= res2->end &&
+ res1->start >= res2->start) {
+ list_del(&res1->list);
+ kfree(res1);
+ break;
+ } else if (res1->end > res2->end &&
+ res1->start < res2->start) {
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ res->start = res2->end;
+ res->end = res1->end;
+ res1->end = res2->start;
+ list_add(&res->list, &res1->list);
+ resn1 = res;
+ } else {
+ if (res1->start < res2->start)
+ res1->end = res2->start;
+ else
+ res1->start = res2->end;
+ }
+ }
+ res1 = resn1;
+ resn1 = list_entry(resn1->list.next, struct apei_res, list);
+ }
+
+ return 0;
+}
+
+static void apei_res_clean(struct list_head *res_list)
+{
+ struct apei_res *res, *resn;
+
+ list_for_each_entry_safe(res, resn, res_list, list) {
+ list_del(&res->list);
+ kfree(res);
+ }
+}
+
+void apei_resources_fini(struct apei_resources *resources)
+{
+ apei_res_clean(&resources->iomem);
+ apei_res_clean(&resources->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_fini);
+
+/*
+ * EINJ has two groups of GARs (EINJ table entry and trigger table
+ * entry), so common resources are subtracted from the trigger table
+ * resources before the second requesting.
+ */
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2)
+{
+ int rc;
+
+ rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
+ if (rc)
+ return rc;
+ return apei_res_sub(&resources1->ioport, &resources2->ioport);
+}
+EXPORT_SYMBOL_GPL(apei_resources_sub);
+
+/*
+ * IO memory/port rersource management mechanism is used to check
+ * whether memory/port area used by GARs conflicts with normal memory
+ * or IO memory/port of devices.
+ */
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc)
+{
+ struct apei_res *res, *res_bak;
+ struct resource *r;
+
+ list_for_each_entry(res, &resources->iomem, list) {
+ r = request_mem_region(res->start, res->end - res->start,
+ desc);
+ if (!r) {
+ pr_info(APEI_PFX
+ "Can not request iomem region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_iomem;
+ }
+ }
+
+ list_for_each_entry(res, &resources->ioport, list) {
+ r = request_region(res->start, res->end - res->start, desc);
+ if (!r) {
+ pr_info(APEI_PFX
+ "Can not request ioport region <%016llx-%016llx> for GARs.\n",
+ (unsigned long long)res->start,
+ (unsigned long long)res->end);
+ res_bak = res;
+ goto err_unmap_ioport;
+ }
+ }
+
+ return 0;
+err_unmap_ioport:
+ list_for_each_entry(res, &resources->ioport, list) {
+ if (res == res_bak)
+ break;
+ release_mem_region(res->start, res->end - res->start);
+ }
+ res_bak = NULL;
+err_unmap_iomem:
+ list_for_each_entry(res, &resources->iomem, list) {
+ if (res == res_bak)
+ break;
+ release_region(res->start, res->end - res->start);
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(apei_resources_request);
+
+void apei_resources_release(struct apei_resources *resources)
+{
+ struct apei_res *res;
+
+ list_for_each_entry(res, &resources->iomem, list)
+ release_mem_region(res->start, res->end - res->start);
+ list_for_each_entry(res, &resources->ioport, list)
+ release_region(res->start, res->end - res->start);
+}
+EXPORT_SYMBOL_GPL(apei_resources_release);
+
+static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ pr_info(APEI_PFX FW_BUG
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ pr_info(APEI_PFX FW_BUG
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ pr_info(APEI_PFX FW_BUG
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int collect_res_callback(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry,
+ void *data)
+{
+ struct apei_resources *resources = data;
+ struct acpi_generic_address *reg = &entry->register_region;
+ u8 ins = entry->instruction;
+ u64 paddr;
+ int rc;
+
+ if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
+ return 0;
+
+ rc = apei_check_gar(reg, &paddr);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return apei_res_add(&resources->iomem, paddr,
+ reg->bit_width / 8);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return apei_res_add(&resources->ioport, paddr,
+ reg->bit_width / 8);
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Same register may be used by multiple instructions in GARs, so
+ * resources are collected before requesting.
+ */
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources)
+{
+ return apei_exec_for_each_entry(ctx, collect_res_callback,
+ resources, NULL);
+}
+EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
+
+static int __init apei_init(void)
+{
+ int rc;
+
+ apei_debug_dir = debugfs_create_dir("apei", NULL);
+ if (!apei_debug_dir)
+ return -ENOMEM;
+ if (!hest_disable) {
+ rc = hest_init();
+ if (rc) {
+ hest_disable = 1;
+ if (rc != -ENODEV)
+ pr_err(
+ "ACPI: APEI: Failed to initialize Hardware "
+ "Error Source Table (HEST) subsystem\n");
+ }
+ }
+
+ return 0;
+}
+
+static void __exit apei_exit(void)
+{
+ debugfs_remove_recursive(apei_debug_dir);
+}
+
+module_init(apei_init);
+module_exit(apei_exit);
+
+module_param(hest_disable, int, 0444);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("ACPI Platform Error Interface support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
new file mode 100644
index 000000000000..1ab65984a780
--- /dev/null
+++ b/drivers/acpi/apei/apei-internal.h
@@ -0,0 +1,97 @@
+/*
+ * apei-internal.h - ACPI Platform Error Interface internal
+ * definations.
+ */
+
+#ifndef APEI_INTERNAL_H
+#define APEI_INTERNAL_H
+
+int hest_init(void);
+
+struct apei_exec_context;
+
+typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+
+#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001
+
+struct apei_exec_ins_type {
+ u32 flags;
+ apei_exec_ins_func_t run;
+};
+
+struct apei_exec_context {
+ u32 ip;
+ u64 value;
+ u64 var1;
+ u64 var2;
+ u64 src_base;
+ u64 dst_base;
+ struct apei_exec_ins_type *ins_table;
+ u32 instructions;
+ struct acpi_whea_header *action_table;
+ u32 entries;
+};
+
+void apei_exec_ctx_init(struct apei_exec_context *ctx,
+ struct apei_exec_ins_type *ins_table,
+ u32 instructions,
+ struct acpi_whea_header *action_table,
+ u32 entries);
+
+static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx,
+ u64 input)
+{
+ ctx->value = input;
+}
+
+static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
+{
+ return ctx->value;
+}
+
+int apei_exec_run(struct apei_exec_context *ctx, u8 action);
+
+/* Common instruction implementation */
+
+/* IP has been set in instruction function */
+#define APEI_EXEC_SET_IP 1
+
+int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
+int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
+int apei_exec_read_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_read_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_write_register_value(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_noop(struct apei_exec_context *ctx,
+ struct acpi_whea_header *entry);
+int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
+int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
+
+struct apei_resources {
+ struct list_head iomem;
+ struct list_head ioport;
+};
+
+static inline void apei_resources_init(struct apei_resources *resources)
+{
+ INIT_LIST_HEAD(&resources->iomem);
+ INIT_LIST_HEAD(&resources->ioport);
+}
+
+void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_sub(struct apei_resources *resources1,
+ struct apei_resources *resources2);
+int apei_resources_request(struct apei_resources *resources,
+ const char *desc);
+void apei_resources_release(struct apei_resources *resources);
+int apei_exec_collect_resources(struct apei_exec_context *ctx,
+ struct apei_resources *resources);
+
+struct dentry;
+extern struct dentry *apei_debug_dir;
+#endif
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
new file mode 100644
index 000000000000..5521609c4b54
--- /dev/null
+++ b/drivers/acpi/apei/einj.c
@@ -0,0 +1,471 @@
+/*
+ * APEI Error INJection support
+ *
+ * EINJ provides a hardware error injection mechanism, this is useful
+ * for debugging and testing of other APEI and RAS features.
+ *
+ * For more information about EINJ, please refer to ACPI Specification
+ * version 4.0, section 17.5.
+ *
+ * Copyright 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <acpi/acpi.h>
+
+#include "apei-internal.h"
+
+#define EINJ_PFX "EINJ: "
+
+#define EINJ_OP_BUSY 0x1
+#define EINJ_STATUS_SUCCESS 0x0
+#define EINJ_STATUS_FAIL 0x1
+#define EINJ_STATUS_INVAL 0x2
+
+#define EINJ_TAB_ENTRY(tab) \
+ ((struct acpi_whea_header *)((char *)(tab) + \
+ sizeof(struct acpi_table_einj)))
+
+static struct acpi_table_einj *einj_tab;
+
+static struct apei_resources einj_resources;
+
+static struct apei_exec_ins_type einj_ins_type[] = {
+ [ACPI_EINJ_READ_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register,
+ },
+ [ACPI_EINJ_READ_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_read_register_value,
+ },
+ [ACPI_EINJ_WRITE_REGISTER] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register,
+ },
+ [ACPI_EINJ_WRITE_REGISTER_VALUE] = {
+ .flags = APEI_EXEC_INS_ACCESS_REGISTER,
+ .run = apei_exec_write_register_value,
+ },
+ [ACPI_EINJ_NOOP] = {
+ .flags = 0,
+ .run = apei_exec_noop,
+ },
+};
+
+/*
+ * Prevent EINJ interpreter to run simultaneously, because the
+ * corresponding firmware implementation may not work properly when
+ * invoked simultaneously.
+ */
+static DEFINE_MUTEX(einj_mutex);
+
+static void einj_exec_ctx_init(struct apei_exec_context *ctx)
+{
+ apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
+ EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
+}
+
+static int __einj_get_available_error_type(u32 *type)
+{
+ struct apei_exec_context ctx;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ *type = apei_exec_ctx_get_output(&ctx);
+
+ return 0;
+}
+
+/* Get error injection capabilities of the platform */
+static int einj_get_available_error_type(u32 *type)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_get_available_error_type(type);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+/* do sanity check to trigger table */
+static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
+{
+ if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
+ return -EINVAL;
+ if (trigger_tab->table_size > PAGE_SIZE ||
+ trigger_tab->table_size <= trigger_tab->header_size)
+ return -EINVAL;
+ if (trigger_tab->entry_count !=
+ (trigger_tab->table_size - trigger_tab->header_size) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Execute instructions in trigger error action table */
+static int __einj_error_trigger(u64 trigger_paddr)
+{
+ struct acpi_einj_trigger *trigger_tab = NULL;
+ struct apei_exec_context trigger_ctx;
+ struct apei_resources trigger_resources;
+ struct acpi_whea_header *trigger_entry;
+ struct resource *r;
+ u32 table_size;
+ int rc = -EIO;
+
+ r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_info(EINJ_PFX
+ "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
+ (unsigned long long)trigger_paddr,
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
+ goto out;
+ }
+ trigger_tab = ioremap(trigger_paddr, sizeof(*trigger_tab));
+ if (!trigger_tab) {
+ pr_info(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_header;
+ }
+ rc = einj_check_trigger_header(trigger_tab);
+ if (rc) {
+ pr_info(EINJ_PFX FW_BUG
+ "The trigger error action table is invalid\n");
+ goto out_rel_header;
+ }
+ rc = -EIO;
+ table_size = trigger_tab->table_size;
+ r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab),
+ "APEI EINJ Trigger Table");
+ if (!r) {
+ pr_info(EINJ_PFX
+"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
+ (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + table_size);
+ goto out_rel_header;
+ }
+ iounmap(trigger_tab);
+ trigger_tab = ioremap(trigger_paddr, table_size);
+ if (!trigger_tab) {
+ pr_info(EINJ_PFX "Failed to map trigger table!\n");
+ goto out_rel_entry;
+ }
+ trigger_entry = (struct acpi_whea_header *)
+ ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ apei_resources_init(&trigger_resources);
+ apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
+ ARRAY_SIZE(einj_ins_type),
+ trigger_entry, trigger_tab->entry_count);
+ rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_sub(&trigger_resources, &einj_resources);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
+ if (rc)
+ goto out_fini;
+ rc = apei_exec_pre_map_gars(&trigger_ctx);
+ if (rc)
+ goto out_release;
+
+ rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR);
+
+ apei_exec_post_unmap_gars(&trigger_ctx);
+out_release:
+ apei_resources_release(&trigger_resources);
+out_fini:
+ apei_resources_fini(&trigger_resources);
+out_rel_entry:
+ release_mem_region(trigger_paddr + sizeof(*trigger_tab),
+ table_size - sizeof(*trigger_tab));
+out_rel_header:
+ release_mem_region(trigger_paddr, sizeof(*trigger_tab));
+out:
+ if (trigger_tab)
+ iounmap(trigger_tab);
+
+ return rc;
+}
+
+static int __einj_error_inject(u32 type)
+{
+ struct apei_exec_context ctx;
+ unsigned long start;
+ u64 val, trigger_paddr;
+ int rc;
+
+ einj_exec_ctx_init(&ctx);
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
+ if (rc)
+ return rc;
+ apei_exec_ctx_set_input(&ctx, type);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
+ if (rc)
+ return rc;
+ /* Firmware should respond within 5 jiffies */
+ start = jiffies;
+ do {
+ if (time_after_eq(jiffies, start + 5)) {
+ pr_info(EINJ_PFX FW_WARN
+ "Firmware does not respond in time\n");
+ return -EIO;
+ }
+ rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ } while (val & EINJ_OP_BUSY);
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS);
+ if (rc)
+ return rc;
+ val = apei_exec_ctx_get_output(&ctx);
+ if (val != EINJ_STATUS_SUCCESS)
+ return -EBUSY;
+
+ rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
+ if (rc)
+ return rc;
+ trigger_paddr = apei_exec_ctx_get_output(&ctx);
+ rc = __einj_error_trigger(trigger_paddr);
+ if (rc)
+ return rc;
+ rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
+
+ return rc;
+}
+
+/* Inject the specified hardware error */
+static int einj_error_inject(u32 type)
+{
+ int rc;
+
+ mutex_lock(&einj_mutex);
+ rc = __einj_error_inject(type);
+ mutex_unlock(&einj_mutex);
+
+ return rc;
+}
+
+static u32 error_type;
+static struct dentry *einj_debug_dir;
+
+static int available_error_type_show(struct seq_file *m, void *v)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (available_error_type & 0x0001)
+ seq_printf(m, "0x00000001\tProcessor Correctable\n");
+ if (available_error_type & 0x0002)
+ seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0004)
+ seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n");
+ if (available_error_type & 0x0008)
+ seq_printf(m, "0x00000008\tMemory Correctable\n");
+ if (available_error_type & 0x0010)
+ seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0020)
+ seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n");
+ if (available_error_type & 0x0040)
+ seq_printf(m, "0x00000040\tPCI Express Correctable\n");
+ if (available_error_type & 0x0080)
+ seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0100)
+ seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n");
+ if (available_error_type & 0x0200)
+ seq_printf(m, "0x00000200\tPlatform Correctable\n");
+ if (available_error_type & 0x0400)
+ seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n");
+ if (available_error_type & 0x0800)
+ seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n");
+
+ return 0;
+}
+
+static int available_error_type_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, available_error_type_show, NULL);
+}
+
+static const struct file_operations available_error_type_fops = {
+ .open = available_error_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int error_type_get(void *data, u64 *val)
+{
+ *val = error_type;
+
+ return 0;
+}
+
+static int error_type_set(void *data, u64 val)
+{
+ int rc;
+ u32 available_error_type = 0;
+
+ /* Only one error type can be specified */
+ if (val & (val - 1))
+ return -EINVAL;
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (!(val & available_error_type))
+ return -EINVAL;
+ error_type = val;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
+ error_type_set, "0x%llx\n");
+
+static int error_inject_set(void *data, u64 val)
+{
+ if (!error_type)
+ return -EINVAL;
+
+ return einj_error_inject(error_type);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
+ error_inject_set, "%llu\n");
+
+static int einj_check_table(struct acpi_table_einj *einj_tab)
+{
+ if (einj_tab->header_length != sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->header.length < sizeof(struct acpi_table_einj))
+ return -EINVAL;
+ if (einj_tab->entries !=
+ (einj_tab->header.length - sizeof(struct acpi_table_einj)) /
+ sizeof(struct acpi_einj_entry))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __init einj_init(void)
+{
+ int rc;
+ acpi_status status;
+ struct dentry *fentry;
+ struct apei_exec_context ctx;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ status = acpi_get_table(ACPI_SIG_EINJ, 0,
+ (struct acpi_table_header **)&einj_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(EINJ_PFX "Table is not found!\n");
+ return -ENODEV;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_info(EINJ_PFX "Failed to get table, %s\n", msg);
+ return -EINVAL;
+ }
+
+ rc = einj_check_table(einj_tab);
+ if (rc) {
+ pr_info(EINJ_PFX FW_BUG "EINJ table is invalid\n");
+ return -EINVAL;
+ }
+
+ rc = -ENOMEM;
+ einj_debug_dir = debugfs_create_dir("einj", apei_debug_dir);
+ if (!einj_debug_dir)
+ goto err_cleanup;
+ fentry = debugfs_create_file("available_error_type", S_IRUSR,
+ einj_debug_dir, NULL,
+ &available_error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
+ einj_debug_dir, NULL, &error_type_fops);
+ if (!fentry)
+ goto err_cleanup;
+ fentry = debugfs_create_file("error_inject", S_IWUSR,
+ einj_debug_dir, NULL, &error_inject_fops);
+ if (!fentry)
+ goto err_cleanup;
+
+ apei_resources_init(&einj_resources);
+ einj_exec_ctx_init(&ctx);
+ rc = apei_exec_collect_resources(&ctx, &einj_resources);
+ if (rc)
+ goto err_fini;
+ rc = apei_resources_request(&einj_resources, "APEI EINJ");
+ if (rc)
+ goto err_fini;
+ rc = apei_exec_pre_map_gars(&ctx);
+ if (rc)
+ goto err_release;
+
+ pr_info(EINJ_PFX "Error INJection is initialized.\n");
+
+ return 0;
+
+err_release:
+ apei_resources_release(&einj_resources);
+err_fini:
+ apei_resources_fini(&einj_resources);
+err_cleanup:
+ debugfs_remove_recursive(einj_debug_dir);
+
+ return rc;
+}
+
+static void __exit einj_exit(void)
+{
+ struct apei_exec_context ctx;
+
+ einj_exec_ctx_init(&ctx);
+ apei_exec_post_unmap_gars(&ctx);
+ apei_resources_release(&einj_resources);
+ apei_resources_fini(&einj_resources);
+ debugfs_remove_recursive(einj_debug_dir);
+}
+
+module_init(einj_init);
+module_exit(einj_exit);
+
+MODULE_AUTHOR("Huang Ying");
+MODULE_DESCRIPTION("APEI Error INJection support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
new file mode 100644
index 000000000000..f872e5429677
--- /dev/null
+++ b/drivers/acpi/apei/hest.c
@@ -0,0 +1,147 @@
+/*
+ * APEI Hardware Error Souce Table support
+ *
+ * HEST describes error sources in detail; communicates operational
+ * parameters (i.e. severity levels, masking bits, and threshold
+ * values) to OS as necessary. It also allows the platform to report
+ * error sources for which OS would typically not implement support
+ * (for example, chipset-specific error registers).
+ *
+ * For more information about HEST, please refer to ACPI Specification
+ * version 4.0, section 17.3.2.
+ *
+ * Copyright 2009 Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation;
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/kdebug.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <acpi/apei.h>
+
+#include "apei-internal.h"
+
+#define HEST_PFX "HEST: "
+
+/* HEST table parsing */
+
+static struct acpi_table_hest *hest_tab;
+
+static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data)
+{
+ return 0;
+}
+
+static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
+ [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
+ [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
+ [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi),
+ [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root),
+ [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer),
+ [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
+ [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
+};
+
+static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
+{
+ u16 hest_type = hest_hdr->type;
+ int len;
+
+ if (hest_type >= ACPI_HEST_TYPE_RESERVED)
+ return 0;
+
+ len = hest_esrc_len_tab[hest_type];
+
+ if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) {
+ struct acpi_hest_ia_corrected *cmc;
+ cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
+ len = sizeof(*cmc) + cmc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) {
+ struct acpi_hest_ia_machine_check *mc;
+ mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
+ len = sizeof(*mc) + mc->num_hardware_banks *
+ sizeof(struct acpi_hest_ia_error_bank);
+ }
+ BUG_ON(len == -1);
+
+ return len;
+};
+
+int apei_hest_parse(apei_hest_func_t func, void *data)
+{
+ struct acpi_hest_header *hest_hdr;
+ int i, rc, len;
+
+ if (hest_disable)
+ return -EINVAL;
+
+ hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
+ for (i = 0; i < hest_tab->error_source_count; i++) {
+ len = hest_esrc_len(hest_hdr);
+ if (!len) {
+ pr_info(HEST_PFX FW_WARN "Unknown or unused hardware "
+ "error source type: %d\n", hest_hdr->type);
+ return -EINVAL;
+ }
+ if ((void *)hest_hdr + len >
+ (void *)hest_tab + hest_tab->header.length) {
+ pr_info(HEST_PFX FW_BUG "Table contents overflow!\n");
+ return -EINVAL;
+ }
+
+ rc = func(hest_hdr, data);
+ if (rc)
+ return rc;
+
+ hest_hdr = (void *)hest_hdr + len;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_hest_parse);
+
+int __init hest_init(void)
+{
+ acpi_status status;
+ int rc;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ status = acpi_get_table(ACPI_SIG_HEST, 0,
+ (struct acpi_table_header **)&hest_tab);
+ if (status == AE_NOT_FOUND) {
+ pr_info(HEST_PFX "Table is not found!\n");
+ return -ENODEV;
+ } else if (ACPI_FAILURE(status)) {
+ const char *msg = acpi_format_exception(status);
+ pr_info(HEST_PFX "Failed to get table, %s\n", msg);
+ return -EINVAL;
+ }
+
+ rc = apei_hest_parse(hest_void_parse, NULL);
+ if (rc)
+ return rc;
+
+ pr_info(HEST_PFX "HEST table parsing is initialized.\n");
+
+ return 0;
+}
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
new file mode 100644
index 000000000000..4ed13853eff2
--- /dev/null
+++ b/drivers/acpi/atomicio.c
@@ -0,0 +1,360 @@
+/*
+ * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
+ * accessing in atomic context.
+ *
+ * This is used for NMI handler to access IO memory area, because
+ * ioremap/iounmap can not be used in NMI handler. The IO memory area
+ * is pre-mapped in process context and accessed in NMI handler.
+ *
+ * Copyright (C) 2009, Intel Corp.
+ * Author: Huang Ying <ying.huang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/kref.h>
+#include <linux/rculist.h>
+#include <linux/interrupt.h>
+#include <acpi/atomicio.h>
+
+#define ACPI_PFX "ACPI: "
+
+static LIST_HEAD(acpi_iomaps);
+/*
+ * Used for mutual exclusion between writers of acpi_iomaps list, for
+ * synchronization between readers and writer, RCU is used.
+ */
+static DEFINE_SPINLOCK(acpi_iomaps_lock);
+
+struct acpi_iomap {
+ struct list_head list;
+ void __iomem *vaddr;
+ unsigned long size;
+ phys_addr_t paddr;
+ struct kref ref;
+};
+
+/* acpi_iomaps_lock or RCU read lock must be held before calling */
+static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ list_for_each_entry_rcu(map, &acpi_iomaps, list) {
+ if (map->paddr + map->size >= paddr + size &&
+ map->paddr <= paddr)
+ return map;
+ }
+ return NULL;
+}
+
+/*
+ * Atomic "ioremap" used by NMI handler, if the specified IO memory
+ * area is not pre-mapped, NULL will be returned.
+ *
+ * acpi_iomaps_lock or RCU read lock must be held before calling
+ */
+static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map)
+ return map->vaddr + (paddr - map->paddr);
+ else
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
+ unsigned long size)
+{
+ struct acpi_iomap *map;
+
+ map = __acpi_find_iomap(paddr, size);
+ if (map) {
+ kref_get(&map->ref);
+ return map->vaddr + (paddr - map->paddr);
+ } else
+ return NULL;
+}
+
+/*
+ * Used to pre-map the specified IO memory area. First try to find
+ * whether the area is already pre-mapped, if it is, increase the
+ * reference count (in __acpi_try_ioremap) and return; otherwise, do
+ * the real ioremap, and add the mapping into acpi_iomaps list.
+ */
+static void __iomem *acpi_pre_map(phys_addr_t paddr,
+ unsigned long size)
+{
+ void __iomem *vaddr;
+ struct acpi_iomap *map;
+ unsigned long pg_sz, flags;
+ phys_addr_t pg_off;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ if (vaddr)
+ return vaddr;
+
+ pg_off = paddr & PAGE_MASK;
+ pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
+ vaddr = ioremap(pg_off, pg_sz);
+ if (!vaddr)
+ return NULL;
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ goto err_unmap;
+ INIT_LIST_HEAD(&map->list);
+ map->paddr = pg_off;
+ map->size = pg_sz;
+ map->vaddr = vaddr;
+ kref_init(&map->ref);
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ vaddr = __acpi_try_ioremap(paddr, size);
+ if (vaddr) {
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+ iounmap(map->vaddr);
+ kfree(map);
+ return vaddr;
+ }
+ list_add_tail_rcu(&map->list, &acpi_iomaps);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ return vaddr + (paddr - pg_off);
+err_unmap:
+ iounmap(vaddr);
+ return NULL;
+}
+
+/* acpi_iomaps_lock must be held before calling */
+static void __acpi_kref_del_iomap(struct kref *ref)
+{
+ struct acpi_iomap *map;
+
+ map = container_of(ref, struct acpi_iomap, ref);
+ list_del_rcu(&map->list);
+}
+
+/*
+ * Used to post-unmap the specified IO memory area. The iounmap is
+ * done only if the reference count goes zero.
+ */
+static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
+{
+ struct acpi_iomap *map;
+ unsigned long flags;
+ int del;
+
+ spin_lock_irqsave(&acpi_iomaps_lock, flags);
+ map = __acpi_find_iomap(paddr, size);
+ BUG_ON(!map);
+ del = kref_put(&map->ref, __acpi_kref_del_iomap);
+ spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
+
+ if (!del)
+ return;
+
+ synchronize_rcu();
+ iounmap(map->vaddr);
+ kfree(map);
+}
+
+/* In NMI handler, should set silent = 1 */
+static int acpi_check_gar(struct acpi_generic_address *reg,
+ u64 *paddr, int silent)
+{
+ u32 width, space_id;
+
+ width = reg->bit_width;
+ space_id = reg->space_id;
+ /* Handle possible alignment issues */
+ memcpy(paddr, &reg->address, sizeof(*paddr));
+ if (!*paddr) {
+ if (!silent)
+ pr_info(ACPI_PFX FW_BUG
+ "Invalid physical address in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
+ if (!silent)
+ pr_info(ACPI_PFX FW_BUG
+ "Invalid bit width in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
+ if (!silent)
+ pr_info(ACPI_PFX FW_BUG
+ "Invalid address space type in GAR [0x%llx/%u/%u]\n",
+ *paddr, width, space_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Pre-map, working on GAR */
+int acpi_pre_map_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ void __iomem *vaddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
+ if (!vaddr)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
+
+/* Post-unmap, working on GAR */
+int acpi_post_unmap_gar(struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return 0;
+
+ rc = acpi_check_gar(reg, &paddr, 0);
+ if (rc)
+ return rc;
+
+ acpi_post_unmap(paddr, reg->bit_width / 8);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
+
+/*
+ * Can be used in atomic (including NMI) or process context. RCU read
+ * lock can only be released after the IO memory area accessing.
+ */
+static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ *val = readb(addr);
+ break;
+ case 16:
+ *val = readw(addr);
+ break;
+ case 32:
+ *val = readl(addr);
+ break;
+ case 64:
+ *val = readq(addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
+{
+ void __iomem *addr;
+
+ rcu_read_lock();
+ addr = __acpi_ioremap_fast(paddr, width);
+ switch (width) {
+ case 8:
+ writeb(val, addr);
+ break;
+ case 16:
+ writew(val, addr);
+ break;
+ case 32:
+ writel(val, addr);
+ break;
+ case 64:
+ writeq(val, addr);
+ break;
+ default:
+ return -EINVAL;
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/* GAR accessing in atomic (including NMI) or process context */
+int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ *val = 0;
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_read_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_read);
+
+int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
+{
+ u64 paddr;
+ int rc;
+
+ rc = acpi_check_gar(reg, &paddr, 1);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ return acpi_atomic_write_mem(paddr, val, reg->bit_width);
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ return acpi_os_write_port(paddr, val, reg->bit_width);
+ default:
+ return -EINVAL;
+ }
+}
+EXPORT_SYMBOL_GPL(acpi_atomic_write);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index cf761b904e4a..ae9226de93a6 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -190,16 +190,16 @@ int acpi_bus_get_power(acpi_handle handle, int *state)
* Get the device's power state either directly (via _PSC) or
* indirectly (via power resources).
*/
- if (device->power.flags.explicit_get) {
+ if (device->power.flags.power_resources) {
+ result = acpi_power_get_inferred_state(device);
+ if (result)
+ return result;
+ } else if (device->power.flags.explicit_get) {
status = acpi_evaluate_integer(device->handle, "_PSC",
NULL, &psc);
if (ACPI_FAILURE(status))
return -ENODEV;
device->power.state = (int)psc;
- } else if (device->power.flags.power_resources) {
- result = acpi_power_get_inferred_state(device);
- if (result)
- return result;
}
*state = device->power.state;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index fd1801bdee66..d6471bb6852f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -201,14 +201,13 @@ unlock:
spin_unlock_irqrestore(&ec->curr_lock, flags);
}
-static void acpi_ec_gpe_query(void *ec_cxt);
+static int acpi_ec_sync_query(struct acpi_ec *ec);
-static int ec_check_sci(struct acpi_ec *ec, u8 state)
+static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
{
if (state & ACPI_EC_FLAG_SCI) {
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
- return acpi_os_execute(OSL_EC_BURST_HANDLER,
- acpi_ec_gpe_query, ec);
+ return acpi_ec_sync_query(ec);
}
return 0;
}
@@ -249,11 +248,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
{
unsigned long tmp;
int ret = 0;
- pr_debug(PREFIX "transaction start\n");
- /* disable GPE during transaction if storm is detected */
- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
- acpi_disable_gpe(NULL, ec->gpe);
- }
if (EC_FLAGS_MSI)
udelay(ACPI_EC_MSI_UDELAY);
/* start transaction */
@@ -265,20 +259,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
spin_unlock_irqrestore(&ec->curr_lock, tmp);
ret = ec_poll(ec);
- pr_debug(PREFIX "transaction end\n");
spin_lock_irqsave(&ec->curr_lock, tmp);
ec->curr = NULL;
spin_unlock_irqrestore(&ec->curr_lock, tmp);
- if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
- /* check if we received SCI during transaction */
- ec_check_sci(ec, acpi_ec_read_status(ec));
- /* it is safe to enable GPE outside of transaction */
- acpi_enable_gpe(NULL, ec->gpe);
- } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
- pr_info(PREFIX "GPE storm detected, "
- "transactions will use polling mode\n");
- set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
- }
return ret;
}
@@ -321,7 +304,26 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
status = -ETIME;
goto end;
}
+ pr_debug(PREFIX "transaction start\n");
+ /* disable GPE during transaction if storm is detected */
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ acpi_disable_gpe(NULL, ec->gpe);
+ }
+
status = acpi_ec_transaction_unlocked(ec, t);
+
+ /* check if we received SCI during transaction */
+ ec_check_sci_sync(ec, acpi_ec_read_status(ec));
+ if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
+ msleep(1);
+ /* it is safe to enable GPE outside of transaction */
+ acpi_enable_gpe(NULL, ec->gpe);
+ } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) {
+ pr_info(PREFIX "GPE storm detected, "
+ "transactions will use polling mode\n");
+ set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
+ }
+ pr_debug(PREFIX "transaction end\n");
end:
if (ec->global_lock)
acpi_release_global_lock(glk);
@@ -443,7 +445,7 @@ int ec_transaction(u8 command,
EXPORT_SYMBOL(ec_transaction);
-static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
+static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data)
{
int result;
u8 d;
@@ -452,20 +454,16 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data)
.wlen = 0, .rlen = 1};
if (!ec || !data)
return -EINVAL;
-
/*
* Query the EC to find out which _Qxx method we need to evaluate.
* Note that successful completion of the query causes the ACPI_EC_SCI
* bit to be cleared (and thus clearing the interrupt source).
*/
-
- result = acpi_ec_transaction(ec, &t);
+ result = acpi_ec_transaction_unlocked(ec, &t);
if (result)
return result;
-
if (!d)
return -ENODATA;
-
*data = d;
return 0;
}
@@ -509,43 +507,79 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
-static void acpi_ec_gpe_query(void *ec_cxt)
+static void acpi_ec_run(void *cxt)
{
- struct acpi_ec *ec = ec_cxt;
- u8 value = 0;
- struct acpi_ec_query_handler *handler, copy;
-
- if (!ec || acpi_ec_query(ec, &value))
+ struct acpi_ec_query_handler *handler = cxt;
+ if (!handler)
return;
- mutex_lock(&ec->lock);
+ pr_debug(PREFIX "start query execution\n");
+ if (handler->func)
+ handler->func(handler->data);
+ else if (handler->handle)
+ acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
+ pr_debug(PREFIX "stop query execution\n");
+ kfree(handler);
+}
+
+static int acpi_ec_sync_query(struct acpi_ec *ec)
+{
+ u8 value = 0;
+ int status;
+ struct acpi_ec_query_handler *handler, *copy;
+ if ((status = acpi_ec_query_unlocked(ec, &value)))
+ return status;
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
/* have custom handler for this bit */
- memcpy(&copy, handler, sizeof(copy));
- mutex_unlock(&ec->lock);
- if (copy.func) {
- copy.func(copy.data);
- } else if (copy.handle) {
- acpi_evaluate_object(copy.handle, NULL, NULL, NULL);
- }
- return;
+ copy = kmalloc(sizeof(*handler), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+ memcpy(copy, handler, sizeof(*copy));
+ pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value);
+ return acpi_os_execute((copy->func) ?
+ OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
+ acpi_ec_run, copy);
}
}
+ return 0;
+}
+
+static void acpi_ec_gpe_query(void *ec_cxt)
+{
+ struct acpi_ec *ec = ec_cxt;
+ if (!ec)
+ return;
+ mutex_lock(&ec->lock);
+ acpi_ec_sync_query(ec);
mutex_unlock(&ec->lock);
}
+static void acpi_ec_gpe_query(void *ec_cxt);
+
+static int ec_check_sci(struct acpi_ec *ec, u8 state)
+{
+ if (state & ACPI_EC_FLAG_SCI) {
+ if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
+ pr_debug(PREFIX "push gpe query to the queue\n");
+ return acpi_os_execute(OSL_NOTIFY_HANDLER,
+ acpi_ec_gpe_query, ec);
+ }
+ }
+ return 0;
+}
+
static u32 acpi_ec_gpe_handler(void *data)
{
struct acpi_ec *ec = data;
- u8 status;
pr_debug(PREFIX "~~~> interrupt\n");
- status = acpi_ec_read_status(ec);
- advance_transaction(ec, status);
- if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0)
+ advance_transaction(ec, acpi_ec_read_status(ec));
+ if (ec_transaction_done(ec) &&
+ (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
wake_up(&ec->wait);
- ec_check_sci(ec, status);
+ ec_check_sci(ec, acpi_ec_read_status(ec));
+ }
return ACPI_INTERRUPT_HANDLED;
}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index d0d25e2e1ced..1ac678d2c51c 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -435,7 +435,7 @@ acpi_system_write_wakeup_device(struct file *file,
found_dev->wakeup.gpe_device)) {
printk(KERN_WARNING
"ACPI: '%s' and '%s' have the same GPE, "
- "can't disable/enable one seperately\n",
+ "can't disable/enable one separately\n",
dev->pnp.bus_id, found_dev->pnp.bus_id);
dev->wakeup.state.enabled =
found_dev->wakeup.state.enabled;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 9863c98c81ba..e9b7b402dbfb 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -123,6 +123,8 @@ static const struct file_operations acpi_processor_info_fops = {
#endif
DEFINE_PER_CPU(struct acpi_processor *, processors);
+EXPORT_PER_CPU_SYMBOL(processors);
+
struct acpi_processor_errata errata __read_mostly;
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index d9339806df45..fd09229282ea 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -242,7 +242,7 @@ static int smbus_alarm(void *context)
case ACPI_SBS_CHARGER:
case ACPI_SBS_MANAGER:
case ACPI_SBS_BATTERY:
- acpi_os_execute(OSL_GPE_HANDLER,
+ acpi_os_execute(OSL_NOTIFY_HANDLER,
acpi_smbus_callback, hc);
default:;
}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 72e76b4b6538..b765790b32be 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -78,6 +78,13 @@ MODULE_LICENSE("GPL");
static int brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
+/*
+ * By default, we don't allow duplicate ACPI video bus devices
+ * under the same VGA controller
+ */
+static int allow_duplicates;
+module_param(allow_duplicates, bool, 0644);
+
static int register_count = 0;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device, int type);
@@ -2239,11 +2246,47 @@ static int acpi_video_resume(struct acpi_device *device)
return AE_OK;
}
+static acpi_status
+acpi_video_bus_match(acpi_handle handle, u32 level, void *context,
+ void **return_value)
+{
+ struct acpi_device *device = context;
+ struct acpi_device *sibling;
+ int result;
+
+ if (handle == device->handle)
+ return AE_CTRL_TERMINATE;
+
+ result = acpi_bus_get_device(handle, &sibling);
+ if (result)
+ return AE_OK;
+
+ if (!strcmp(acpi_device_name(sibling), ACPI_VIDEO_BUS_NAME))
+ return AE_ALREADY_EXISTS;
+
+ return AE_OK;
+}
+
static int acpi_video_bus_add(struct acpi_device *device)
{
struct acpi_video_bus *video;
struct input_dev *input;
int error;
+ acpi_status status;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
+ device->parent->handle, 1,
+ acpi_video_bus_match, NULL,
+ device, NULL);
+ if (status == AE_ALREADY_EXISTS) {
+ printk(KERN_WARNING FW_BUG
+ "Duplicate ACPI video bus devices for the"
+ " same VGA controller, please try module "
+ "parameter \"video.allow_duplicates=1\""
+ "if the current driver doesn't work.\n");
+ if (!allow_duplicates)
+ return -ENODEV;
+ }
video = kzalloc(sizeof(struct acpi_video_bus), GFP_KERNEL);
if (!video)
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index b8bea100a160..1535bc401e59 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -93,6 +93,9 @@ enum {
AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
AHCI_RX_FIS_SZ,
+ AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
+ AHCI_CMD_TBL_AR_SZ +
+ (AHCI_RX_FIS_SZ * 16),
AHCI_IRQ_ON_SG = (1 << 31),
AHCI_CMD_ATAPI = (1 << 5),
AHCI_CMD_WRITE = (1 << 6),
@@ -170,6 +173,7 @@ enum {
PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
+ PORT_FBS = 0x40, /* FIS-based Switching */
/* PORT_IRQ_{STAT,MASK} bits */
PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
@@ -208,6 +212,7 @@ enum {
PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
+ PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
PORT_CMD_PMP = (1 << 17), /* PMP attached */
PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
@@ -222,6 +227,14 @@ enum {
PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
+ PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
+ PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
+ PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
+ PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
+ PORT_FBS_SDE = (1 << 2), /* FBS single device error */
+ PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
+ PORT_FBS_EN = (1 << 0), /* Enable FBS */
+
/* hpriv->flags bits */
AHCI_HFLAG_NO_NCQ = (1 << 0),
AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
@@ -304,6 +317,9 @@ struct ahci_port_priv {
unsigned int ncq_saw_dmas:1;
unsigned int ncq_saw_sdb:1;
u32 intr_mask; /* interrupts to enable */
+ bool fbs_supported; /* set iff FBS is supported */
+ bool fbs_enabled; /* set iff FBS is enabled */
+ int fbs_last_dev; /* save FBS.DEV of last FIS */
/* enclosure management info per PM slot */
struct ahci_em_priv em_priv[EM_MAX_SLOTS];
};
@@ -315,9 +331,12 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
static int ahci_port_start(struct ata_port *ap);
static void ahci_port_stop(struct ata_port *ap);
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
static void ahci_qc_prep(struct ata_queued_cmd *qc);
static void ahci_freeze(struct ata_port *ap);
static void ahci_thaw(struct ata_port *ap);
+static void ahci_enable_fbs(struct ata_port *ap);
+static void ahci_disable_fbs(struct ata_port *ap);
static void ahci_pmp_attach(struct ata_port *ap);
static void ahci_pmp_detach(struct ata_port *ap);
static int ahci_softreset(struct ata_link *link, unsigned int *class,
@@ -390,7 +409,7 @@ static struct scsi_host_template ahci_sht = {
static struct ata_port_operations ahci_ops = {
.inherits = &sata_pmp_port_ops,
- .qc_defer = sata_pmp_qc_defer_cmd_switch,
+ .qc_defer = ahci_pmp_qc_defer,
.qc_prep = ahci_qc_prep,
.qc_issue = ahci_qc_issue,
.qc_fill_rtf = ahci_qc_fill_rtf,
@@ -2045,6 +2064,17 @@ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
return si;
}
+static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ahci_port_priv *pp = ap->private_data;
+
+ if (!sata_pmp_attached(ap) || pp->fbs_enabled)
+ return ata_std_qc_defer(qc);
+ else
+ return sata_pmp_qc_defer_cmd_switch(qc);
+}
+
static void ahci_qc_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
@@ -2083,6 +2113,31 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
ahci_fill_cmd_slot(pp, qc->tag, opts);
}
+static void ahci_fbs_dec_intr(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int retries = 3;
+
+ DPRINTK("ENTER\n");
+ BUG_ON(!pp->fbs_enabled);
+
+ /* time to wait for DEC is not specified by AHCI spec,
+ * add a retry loop for safety.
+ */
+ writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ while ((fbs & PORT_FBS_DEC) && retries--) {
+ udelay(1);
+ fbs = readl(port_mmio + PORT_FBS);
+ }
+
+ if (fbs & PORT_FBS_DEC)
+ dev_printk(KERN_ERR, ap->host->dev,
+ "failed to clear device error\n");
+}
+
static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -2091,12 +2146,26 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
struct ata_link *link = NULL;
struct ata_queued_cmd *active_qc;
struct ata_eh_info *active_ehi;
+ bool fbs_need_dec = false;
u32 serror;
- /* determine active link */
- ata_for_each_link(link, ap, EDGE)
- if (ata_link_active(link))
- break;
+ /* determine active link with error */
+ if (pp->fbs_enabled) {
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ int pmp = fbs >> PORT_FBS_DWE_OFFSET;
+
+ if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
+ ata_link_online(&ap->pmp_link[pmp])) {
+ link = &ap->pmp_link[pmp];
+ fbs_need_dec = true;
+ }
+
+ } else
+ ata_for_each_link(link, ap, EDGE)
+ if (ata_link_active(link))
+ break;
+
if (!link)
link = &ap->link;
@@ -2153,8 +2222,13 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
}
if (irq_stat & PORT_IRQ_IF_ERR) {
- host_ehi->err_mask |= AC_ERR_ATA_BUS;
- host_ehi->action |= ATA_EH_RESET;
+ if (fbs_need_dec)
+ active_ehi->err_mask |= AC_ERR_DEV;
+ else {
+ host_ehi->err_mask |= AC_ERR_ATA_BUS;
+ host_ehi->action |= ATA_EH_RESET;
+ }
+
ata_ehi_push_desc(host_ehi, "interface fatal error");
}
@@ -2169,7 +2243,10 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
if (irq_stat & PORT_IRQ_FREEZE)
ata_port_freeze(ap);
- else
+ else if (fbs_need_dec) {
+ ata_link_abort(link);
+ ahci_fbs_dec_intr(ap);
+ } else
ata_port_abort(ap);
}
@@ -2222,12 +2299,19 @@ static void ahci_port_intr(struct ata_port *ap)
/* If the 'N' bit in word 0 of the FIS is set,
* we just received asynchronous notification.
* Tell libata about it.
+ *
+ * Lack of SNotification should not appear in
+ * ahci 1.2, so the workaround is unnecessary
+ * when FBS is enabled.
*/
- const __le32 *f = pp->rx_fis + RX_FIS_SDB;
- u32 f0 = le32_to_cpu(f[0]);
-
- if (f0 & (1 << 15))
- sata_async_notification(ap);
+ if (pp->fbs_enabled)
+ WARN_ON_ONCE(1);
+ else {
+ const __le32 *f = pp->rx_fis + RX_FIS_SDB;
+ u32 f0 = le32_to_cpu(f[0]);
+ if (f0 & (1 << 15))
+ sata_async_notification(ap);
+ }
}
}
@@ -2321,6 +2405,15 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.protocol == ATA_PROT_NCQ)
writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
+
+ if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
+ u32 fbs = readl(port_mmio + PORT_FBS);
+ fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
+ fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
+ writel(fbs, port_mmio + PORT_FBS);
+ pp->fbs_last_dev = qc->dev->link->pmp;
+ }
+
writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
ahci_sw_activity(qc->dev->link);
@@ -2333,6 +2426,9 @@ static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
struct ahci_port_priv *pp = qc->ap->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+ if (pp->fbs_enabled)
+ d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
+
ata_tf_from_fis(d2h_fis, &qc->result_tf);
return true;
}
@@ -2381,6 +2477,71 @@ static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
ahci_kick_engine(ap);
}
+static void ahci_enable_fbs(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+ int rc;
+
+ if (!pp->fbs_supported)
+ return;
+
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN) {
+ pp->fbs_enabled = true;
+ pp->fbs_last_dev = -1; /* initialization */
+ return;
+ }
+
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ return;
+
+ writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN) {
+ dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
+ pp->fbs_enabled = true;
+ pp->fbs_last_dev = -1; /* initialization */
+ } else
+ dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
+
+ ahci_start_engine(ap);
+}
+
+static void ahci_disable_fbs(struct ata_port *ap)
+{
+ struct ahci_port_priv *pp = ap->private_data;
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 fbs;
+ int rc;
+
+ if (!pp->fbs_supported)
+ return;
+
+ fbs = readl(port_mmio + PORT_FBS);
+ if ((fbs & PORT_FBS_EN) == 0) {
+ pp->fbs_enabled = false;
+ return;
+ }
+
+ rc = ahci_stop_engine(ap);
+ if (rc)
+ return;
+
+ writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
+ fbs = readl(port_mmio + PORT_FBS);
+ if (fbs & PORT_FBS_EN)
+ dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
+ else {
+ dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
+ pp->fbs_enabled = false;
+ }
+
+ ahci_start_engine(ap);
+}
+
static void ahci_pmp_attach(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
@@ -2391,6 +2552,8 @@ static void ahci_pmp_attach(struct ata_port *ap)
cmd |= PORT_CMD_PMP;
writel(cmd, port_mmio + PORT_CMD);
+ ahci_enable_fbs(ap);
+
pp->intr_mask |= PORT_IRQ_BAD_PMP;
writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
}
@@ -2401,6 +2564,8 @@ static void ahci_pmp_detach(struct ata_port *ap)
struct ahci_port_priv *pp = ap->private_data;
u32 cmd;
+ ahci_disable_fbs(ap);
+
cmd = readl(port_mmio + PORT_CMD);
cmd &= ~PORT_CMD_PMP;
writel(cmd, port_mmio + PORT_CMD);
@@ -2492,20 +2657,40 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
static int ahci_port_start(struct ata_port *ap)
{
+ struct ahci_host_priv *hpriv = ap->host->private_data;
struct device *dev = ap->host->dev;
struct ahci_port_priv *pp;
void *mem;
dma_addr_t mem_dma;
+ size_t dma_sz, rx_fis_sz;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
- mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
- GFP_KERNEL);
+ /* check FBS capability */
+ if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
+ void __iomem *port_mmio = ahci_port_base(ap);
+ u32 cmd = readl(port_mmio + PORT_CMD);
+ if (cmd & PORT_CMD_FBSCP)
+ pp->fbs_supported = true;
+ else
+ dev_printk(KERN_WARNING, dev,
+ "The port is not capable of FBS\n");
+ }
+
+ if (pp->fbs_supported) {
+ dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
+ rx_fis_sz = AHCI_RX_FIS_SZ * 16;
+ } else {
+ dma_sz = AHCI_PORT_PRIV_DMA_SZ;
+ rx_fis_sz = AHCI_RX_FIS_SZ;
+ }
+
+ mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
if (!mem)
return -ENOMEM;
- memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
+ memset(mem, 0, dma_sz);
/*
* First item in chunk of DMA memory: 32-slot command table,
@@ -2523,8 +2708,8 @@ static int ahci_port_start(struct ata_port *ap)
pp->rx_fis = mem;
pp->rx_fis_dma = mem_dma;
- mem += AHCI_RX_FIS_SZ;
- mem_dma += AHCI_RX_FIS_SZ;
+ mem += rx_fis_sz;
+ mem_dma += rx_fis_sz;
/*
* Third item: data area for storing a single command
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 22ff51bdbc8a..c6b9f5a818fd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2232,7 +2232,7 @@ retry:
* Some drives were very specific about that exact sequence.
*
* Note that ATA4 says lba is mandatory so the second check
- * shoud never trigger.
+ * should never trigger.
*/
if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
err_mask = ata_dev_init_params(dev, id[3], id[6]);
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 0bd48e8f21bd..3ec88f2c8665 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -11,9 +11,7 @@
*
*
* TODO
- * Maybe PLL mode
- * Look into engine reset on timeout errors. Should not be
- * required.
+ * Look into engine reset on timeout errors. Should not be required.
*/
@@ -27,7 +25,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt366"
-#define DRV_VERSION "0.6.7"
+#define DRV_VERSION "0.6.8"
struct hpt_clock {
u8 xfer_mode;
@@ -207,17 +205,8 @@ static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
{
struct hpt_clock *clocks = ap->host->private_data;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
- u32 addr2 = 0x51 + 4 * ap->port_no;
+ u32 addr = 0x40 + 4 * adev->devno;
u32 mask, reg;
- u8 fast;
-
- /* Fast interrupt prediction disable, hold off interrupt disable */
- pci_read_config_byte(pdev, addr2, &fast);
- if (fast & 0x80) {
- fast &= ~0x80;
- pci_write_config_byte(pdev, addr2, fast);
- }
/* determine timing mask and find matching clock entry */
if (mode < XFER_MW_DMA_0)
@@ -240,9 +229,9 @@ static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev,
* on-chip PIO FIFO/buffer (and PIO MST mode as well) to avoid
* problems handling I/O errors later.
*/
- pci_read_config_dword(pdev, addr1, &reg);
+ pci_read_config_dword(pdev, addr, &reg);
reg = ((reg & ~mask) | (clocks->timing & mask)) & ~0xc0000000;
- pci_write_config_dword(pdev, addr1, reg);
+ pci_write_config_dword(pdev, addr, reg);
}
/**
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index 4224cfccedef..228dc1a8992f 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -24,7 +24,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt37x"
-#define DRV_VERSION "0.6.14"
+#define DRV_VERSION "0.6.15"
struct hpt_clock {
u8 xfer_speed;
@@ -39,25 +39,24 @@ struct hpt_chip {
/* key for bus clock timings
* bit
- * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
- * DMA. cycles = value + 1
- * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
- * DMA. cycles = value + 1
- * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
+ * 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
+ * cycles = value + 1
+ * 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
+ * cycles = value + 1
+ * 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
* register access.
- * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
+ * 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file
* register access.
- * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
- * during task file register access.
- * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
- * xfer.
- * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
+ * 18:20 udma_cycle_time. Clock cycles for UDMA xfer.
+ * 21 CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock.
+ * 22:24 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer.
+ * 25:27 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file
* register access.
- * 28 UDMA enable
- * 29 DMA enable
- * 30 PIO_MST enable. if set, the chip is in bus master mode during
- * PIO.
- * 31 FIFO enable.
+ * 28 UDMA enable.
+ * 29 DMA enable.
+ * 30 PIO_MST enable. If set, the chip is in bus master mode during
+ * PIO xfer.
+ * 31 FIFO enable. Only for PIO.
*/
static struct hpt_clock hpt37x_timings_33[] = {
@@ -384,20 +383,12 @@ static int hpt37x_pre_reset(struct ata_link *link, unsigned long deadline)
return ata_sff_prereset(link, deadline);
}
-/**
- * hpt370_set_piomode - PIO setup
- * @ap: ATA interface
- * @adev: device on the interface
- *
- * Perform PIO mode setup.
- */
-
-static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
+static void hpt370_set_mode(struct ata_port *ap, struct ata_device *adev,
+ u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr1, addr2;
- u32 reg;
- u32 mode;
+ u32 reg, timing, mask;
u8 fast;
addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
@@ -409,11 +400,31 @@ static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
fast |= 0x01;
pci_write_config_byte(pdev, addr2, fast);
+ /* Determine timing mask and find matching mode entry */
+ if (mode < XFER_MW_DMA_0)
+ mask = 0xcfc3ffff;
+ else if (mode < XFER_UDMA_0)
+ mask = 0x31c001ff;
+ else
+ mask = 0x303c0000;
+
+ timing = hpt37x_find_mode(ap, mode);
+
pci_read_config_dword(pdev, addr1, &reg);
- mode = hpt37x_find_mode(ap, adev->pio_mode);
- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */
- reg &= ~0xCFC3FFFF; /* Strip timing bits */
- pci_write_config_dword(pdev, addr1, reg | mode);
+ reg = (reg & ~mask) | (timing & mask);
+ pci_write_config_dword(pdev, addr1, reg);
+}
+/**
+ * hpt370_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ *
+ * Perform PIO mode setup.
+ */
+
+static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ hpt370_set_mode(ap, adev, adev->pio_mode);
}
/**
@@ -421,33 +432,12 @@ static void hpt370_set_piomode(struct ata_port *ap, struct ata_device *adev)
* @ap: ATA interface
* @adev: Device being configured
*
- * Set up the channel for MWDMA or UDMA modes. Much the same as with
- * PIO, load the mode number and then set MWDMA or UDMA flag.
+ * Set up the channel for MWDMA or UDMA modes.
*/
static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 addr1, addr2;
- u32 reg, mode, mask;
- u8 fast;
-
- addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
- addr2 = 0x51 + 4 * ap->port_no;
-
- /* Fast interrupt prediction disable, hold off interrupt disable */
- pci_read_config_byte(pdev, addr2, &fast);
- fast &= ~0x02;
- fast |= 0x01;
- pci_write_config_byte(pdev, addr2, fast);
-
- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000;
-
- pci_read_config_dword(pdev, addr1, &reg);
- mode = hpt37x_find_mode(ap, adev->dma_mode);
- mode &= mask;
- reg &= ~mask;
- pci_write_config_dword(pdev, addr1, reg | mode);
+ hpt370_set_mode(ap, adev, adev->dma_mode);
}
/**
@@ -461,24 +451,25 @@ static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u8 dma_stat = ioread8(ap->ioaddr.bmdma_addr + 2);
- u8 dma_cmd;
void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+ u8 dma_stat = ioread8(bmdma + ATA_DMA_STATUS);
+ u8 dma_cmd;
- if (dma_stat & 0x01) {
+ if (dma_stat & ATA_DMA_ACTIVE) {
udelay(20);
- dma_stat = ioread8(bmdma + 2);
+ dma_stat = ioread8(bmdma + ATA_DMA_STATUS);
}
- if (dma_stat & 0x01) {
+ if (dma_stat & ATA_DMA_ACTIVE) {
/* Clear the engine */
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
udelay(10);
/* Stop DMA */
- dma_cmd = ioread8(bmdma );
- iowrite8(dma_cmd & 0xFE, bmdma);
+ dma_cmd = ioread8(bmdma + ATA_DMA_CMD);
+ iowrite8(dma_cmd & ~ATA_DMA_START, bmdma + ATA_DMA_CMD);
/* Clear Error */
- dma_stat = ioread8(bmdma + 2);
- iowrite8(dma_stat | 0x06 , bmdma + 2);
+ dma_stat = ioread8(bmdma + ATA_DMA_STATUS);
+ iowrite8(dma_stat | ATA_DMA_INTR | ATA_DMA_ERR,
+ bmdma + ATA_DMA_STATUS);
/* Clear the engine */
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
udelay(10);
@@ -486,20 +477,12 @@ static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
ata_bmdma_stop(qc);
}
-/**
- * hpt372_set_piomode - PIO setup
- * @ap: ATA interface
- * @adev: device on the interface
- *
- * Perform PIO mode setup.
- */
-
-static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
+static void hpt372_set_mode(struct ata_port *ap, struct ata_device *adev,
+ u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr1, addr2;
- u32 reg;
- u32 mode;
+ u32 reg, timing, mask;
u8 fast;
addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
@@ -510,13 +493,32 @@ static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
fast &= ~0x07;
pci_write_config_byte(pdev, addr2, fast);
+ /* Determine timing mask and find matching mode entry */
+ if (mode < XFER_MW_DMA_0)
+ mask = 0xcfc3ffff;
+ else if (mode < XFER_UDMA_0)
+ mask = 0x31c001ff;
+ else
+ mask = 0x303c0000;
+
+ timing = hpt37x_find_mode(ap, mode);
+
pci_read_config_dword(pdev, addr1, &reg);
- mode = hpt37x_find_mode(ap, adev->pio_mode);
+ reg = (reg & ~mask) | (timing & mask);
+ pci_write_config_dword(pdev, addr1, reg);
+}
+
+/**
+ * hpt372_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ *
+ * Perform PIO mode setup.
+ */
- printk("Find mode for %d reports %X\n", adev->pio_mode, mode);
- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */
- reg &= ~0xCFC3FFFF; /* Strip timing bits */
- pci_write_config_dword(pdev, addr1, reg | mode);
+static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ hpt372_set_mode(ap, adev, adev->pio_mode);
}
/**
@@ -524,33 +526,12 @@ static void hpt372_set_piomode(struct ata_port *ap, struct ata_device *adev)
* @ap: ATA interface
* @adev: Device being configured
*
- * Set up the channel for MWDMA or UDMA modes. Much the same as with
- * PIO, load the mode number and then set MWDMA or UDMA flag.
+ * Set up the channel for MWDMA or UDMA modes.
*/
static void hpt372_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 addr1, addr2;
- u32 reg, mode, mask;
- u8 fast;
-
- addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
- addr2 = 0x51 + 4 * ap->port_no;
-
- /* Fast interrupt prediction disable, hold off interrupt disable */
- pci_read_config_byte(pdev, addr2, &fast);
- fast &= ~0x07;
- pci_write_config_byte(pdev, addr2, fast);
-
- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000;
-
- pci_read_config_dword(pdev, addr1, &reg);
- mode = hpt37x_find_mode(ap, adev->dma_mode);
- printk("Find mode for DMA %d reports %X\n", adev->dma_mode, mode);
- mode &= mask;
- reg &= ~mask;
- pci_write_config_dword(pdev, addr1, reg | mode);
+ hpt372_set_mode(ap, adev, adev->dma_mode);
}
/**
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index dd26bc73bd9a..4a291221f277 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -25,7 +25,7 @@
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
-#define DRV_VERSION "0.3.8"
+#define DRV_VERSION "0.3.10"
enum {
HPT_PCI_FAST = (1 << 31),
@@ -45,25 +45,24 @@ struct hpt_chip {
/* key for bus clock timings
* bit
- * 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
- * DMA. cycles = value + 1
- * 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
- * DMA. cycles = value + 1
- * 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
+ * 0:3 data_high_time. Inactive time of DIOW_/DIOR_ for PIO and MW DMA.
+ * cycles = value + 1
+ * 4:8 data_low_time. Active time of DIOW_/DIOR_ for PIO and MW DMA.
+ * cycles = value + 1
+ * 9:12 cmd_high_time. Inactive time of DIOW_/DIOR_ during task file
* register access.
- * 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
+ * 13:17 cmd_low_time. Active time of DIOW_/DIOR_ during task file
* register access.
- * 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
- * during task file register access.
- * 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
- * xfer.
- * 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
+ * 18:20 udma_cycle_time. Clock cycles for UDMA xfer.
+ * 21 CLK frequency for UDMA: 0=ATA clock, 1=dual ATA clock.
+ * 22:24 pre_high_time. Time to initialize 1st cycle for PIO and MW DMA xfer.
+ * 25:27 cmd_pre_high_time. Time to initialize 1st PIO cycle for task file
* register access.
- * 28 UDMA enable
- * 29 DMA enable
- * 30 PIO_MST enable. if set, the chip is in bus master mode during
- * PIO.
- * 31 FIFO enable.
+ * 28 UDMA enable.
+ * 29 DMA enable.
+ * 30 PIO_MST enable. If set, the chip is in bus master mode during
+ * PIO xfer.
+ * 31 FIFO enable. Only for PIO.
*/
/* 66MHz DPLL clocks */
@@ -161,20 +160,12 @@ static int hpt3x2n_pre_reset(struct ata_link *link, unsigned long deadline)
return ata_sff_prereset(link, deadline);
}
-/**
- * hpt3x2n_set_piomode - PIO setup
- * @ap: ATA interface
- * @adev: device on the interface
- *
- * Perform PIO mode setup.
- */
-
-static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
+static void hpt3x2n_set_mode(struct ata_port *ap, struct ata_device *adev,
+ u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr1, addr2;
- u32 reg;
- u32 mode;
+ u32 reg, timing, mask;
u8 fast;
addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
@@ -185,11 +176,32 @@ static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
fast &= ~0x07;
pci_write_config_byte(pdev, addr2, fast);
+ /* Determine timing mask and find matching mode entry */
+ if (mode < XFER_MW_DMA_0)
+ mask = 0xcfc3ffff;
+ else if (mode < XFER_UDMA_0)
+ mask = 0x31c001ff;
+ else
+ mask = 0x303c0000;
+
+ timing = hpt3x2n_find_mode(ap, mode);
+
pci_read_config_dword(pdev, addr1, &reg);
- mode = hpt3x2n_find_mode(ap, adev->pio_mode);
- mode &= 0xCFC3FFFF; /* Leave DMA bits alone */
- reg &= ~0xCFC3FFFF; /* Strip timing bits */
- pci_write_config_dword(pdev, addr1, reg | mode);
+ reg = (reg & ~mask) | (timing & mask);
+ pci_write_config_dword(pdev, addr1, reg);
+}
+
+/**
+ * hpt3x2n_set_piomode - PIO setup
+ * @ap: ATA interface
+ * @adev: device on the interface
+ *
+ * Perform PIO mode setup.
+ */
+
+static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
+{
+ hpt3x2n_set_mode(ap, adev, adev->pio_mode);
}
/**
@@ -197,32 +209,12 @@ static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
* @ap: ATA interface
* @adev: Device being configured
*
- * Set up the channel for MWDMA or UDMA modes. Much the same as with
- * PIO, load the mode number and then set MWDMA or UDMA flag.
+ * Set up the channel for MWDMA or UDMA modes.
*/
static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 addr1, addr2;
- u32 reg, mode, mask;
- u8 fast;
-
- addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
- addr2 = 0x51 + 4 * ap->port_no;
-
- /* Fast interrupt prediction disable, hold off interrupt disable */
- pci_read_config_byte(pdev, addr2, &fast);
- fast &= ~0x07;
- pci_write_config_byte(pdev, addr2, fast);
-
- mask = adev->dma_mode < XFER_UDMA_0 ? 0x31C001FF : 0x303C0000;
-
- pci_read_config_dword(pdev, addr1, &reg);
- mode = hpt3x2n_find_mode(ap, adev->dma_mode);
- mode &= mask;
- reg &= ~mask;
- pci_write_config_dword(pdev, addr1, reg | mode);
+ hpt3x2n_set_mode(ap, adev, adev->dma_mode);
}
/**
@@ -544,16 +536,16 @@ static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
pci_mhz);
/* Set our private data up. We only need a few flags so we use
it directly */
- if (pci_mhz > 60) {
+ if (pci_mhz > 60)
hpriv = (void *)(PCI66 | USE_DPLL);
- /*
- * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
- * the MISC. register to stretch the UltraDMA Tss timing.
- * NOTE: This register is only writeable via I/O space.
- */
- if (dev->device == PCI_DEVICE_ID_TTI_HPT371)
- outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
- }
+
+ /*
+ * On HPT371N, if ATA clock is 66 MHz we must set bit 2 in
+ * the MISC. register to stretch the UltraDMA Tss timing.
+ * NOTE: This register is only writeable via I/O space.
+ */
+ if (dev->device == PCI_DEVICE_ID_TTI_HPT371)
+ outb(inb(iobase + 0x9c) | 0x04, iobase + 0x9c);
/* Now kick off ATA set up */
return ata_pci_sff_init_one(dev, ppi, &hpt3x2n_sht, hpriv);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index bc53fed89b1e..f7d6ebaa0418 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2064,12 +2064,10 @@ fore200e_get_esi(struct fore200e* fore200e)
return -EBUSY;
}
- printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
fore200e->name,
(prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
- prom->serial_number & 0xFFFF,
- prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
- prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
+ prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
for (i = 0; i < ESI_LEN; i++) {
fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
@@ -2845,13 +2843,12 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
" interrupt line:\t\t%s\n"
" physical base address:\t0x%p\n"
" virtual base address:\t0x%p\n"
- " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
+ " factory address (ESI):\t%pM\n"
" board serial number:\t\t%d\n\n",
fore200e_irq_itoa(fore200e->irq),
(void*)fore200e->phys_base,
fore200e->virt_base,
- fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
- fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
+ fore200e->esi,
fore200e->esi[4] * 256 + fore200e->esi[5]);
return len;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index e33ae0025b12..01f36c08cb52 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3557,10 +3557,7 @@ init_card(struct atm_dev *dev)
if (tmp) {
memcpy(card->atmdev->esi, tmp->dev_addr, 6);
- printk("%s: ESI %02x:%02x:%02x:%02x:%02x:%02x\n",
- card->name, card->atmdev->esi[0], card->atmdev->esi[1],
- card->atmdev->esi[2], card->atmdev->esi[3],
- card->atmdev->esi[4], card->atmdev->esi[5]);
+ printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
}
/*
* XXX: </hack>
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index cf97c34cbaf1..7fe7c324e7ef 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -998,9 +998,7 @@ static int __devinit eeprom_validate(struct lanai_dev *lanai)
(unsigned int) e[EEPROM_MAC_REV + i]);
return -EIO;
}
- DPRINTK("eeprom: MAC address = %02X:%02X:%02X:%02X:%02X:%02X\n",
- e[EEPROM_MAC + 0], e[EEPROM_MAC + 1], e[EEPROM_MAC + 2],
- e[EEPROM_MAC + 3], e[EEPROM_MAC + 4], e[EEPROM_MAC + 5]);
+ DPRINTK("eeprom: MAC address = %pM\n", &e[EEPROM_MAC]);
/* Verify serial number */
lanai->serialno = eeprom_be4(lanai, EEPROM_SERIAL);
v = eeprom_be4(lanai, EEPROM_SERIAL_REV);
@@ -2483,14 +2481,8 @@ static int lanai_proc_read(struct atm_dev *atmdev, loff_t *pos, char *page)
return sprintf(page, "revision: board=%d, pci_if=%d\n",
lanai->board_rev, (int) lanai->pci->revision);
if (left-- == 0)
- return sprintf(page, "EEPROM ESI: "
- "%02X:%02X:%02X:%02X:%02X:%02X\n",
- lanai->eeprom[EEPROM_MAC + 0],
- lanai->eeprom[EEPROM_MAC + 1],
- lanai->eeprom[EEPROM_MAC + 2],
- lanai->eeprom[EEPROM_MAC + 3],
- lanai->eeprom[EEPROM_MAC + 4],
- lanai->eeprom[EEPROM_MAC + 5]);
+ return sprintf(page, "EEPROM ESI: %pM\n",
+ &lanai->eeprom[EEPROM_MAC]);
if (left-- == 0)
return sprintf(page, "status: SOOL=%d, LOCD=%d, LED=%d, "
"GPIN=%d\n", (lanai->status & STATUS_SOOL) ? 1 : 0,
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 3da804b1627d..50838407b117 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -807,9 +807,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
}
}
- printk("nicstar%d: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", i,
- card->atmdev->esi[0], card->atmdev->esi[1], card->atmdev->esi[2],
- card->atmdev->esi[3], card->atmdev->esi[4], card->atmdev->esi[5]);
+ printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
card->atmdev->dev_data = card;
card->atmdev->ci_range.vpi_bits = card->vpibits;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 48adf80926a0..2e7e46eb152c 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -25,6 +25,7 @@
#include <linux/resume-trace.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/async.h>
#include "../base.h"
#include "power.h"
@@ -42,6 +43,7 @@
LIST_HEAD(dpm_list);
static DEFINE_MUTEX(dpm_list_mtx);
+static pm_message_t pm_transition;
/*
* Set once the preparation of devices for a PM transition has started, reset
@@ -56,6 +58,7 @@ static bool transition_started;
void device_pm_init(struct device *dev)
{
dev->power.status = DPM_ON;
+ init_completion(&dev->power.completion);
pm_runtime_init(dev);
}
@@ -111,6 +114,7 @@ void device_pm_remove(struct device *dev)
pr_debug("PM: Removing info for %s:%s\n",
dev->bus ? dev->bus->name : "No Bus",
kobject_name(&dev->kobj));
+ complete_all(&dev->power.completion);
mutex_lock(&dpm_list_mtx);
list_del_init(&dev->power.entry);
mutex_unlock(&dpm_list_mtx);
@@ -188,6 +192,31 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
}
/**
+ * dpm_wait - Wait for a PM operation to complete.
+ * @dev: Device to wait for.
+ * @async: If unset, wait only if the device's power.async_suspend flag is set.
+ */
+static void dpm_wait(struct device *dev, bool async)
+{
+ if (!dev)
+ return;
+
+ if (async || (pm_async_enabled && dev->power.async_suspend))
+ wait_for_completion(&dev->power.completion);
+}
+
+static int dpm_wait_fn(struct device *dev, void *async_ptr)
+{
+ dpm_wait(dev, *((bool *)async_ptr));
+ return 0;
+}
+
+static void dpm_wait_for_children(struct device *dev, bool async)
+{
+ device_for_each_child(dev, &async, dpm_wait_fn);
+}
+
+/**
* pm_op - Execute the PM operation appropriate for given PM event.
* @dev: Device to handle.
* @ops: PM operations to choose from.
@@ -271,8 +300,9 @@ static int pm_noirq_op(struct device *dev,
ktime_t calltime, delta, rettime;
if (initcall_debug) {
- pr_info("calling %s_i+ @ %i\n",
- dev_name(dev), task_pid_nr(current));
+ pr_info("calling %s+ @ %i, parent: %s\n",
+ dev_name(dev), task_pid_nr(current),
+ dev->parent ? dev_name(dev->parent) : "none");
calltime = ktime_get();
}
@@ -446,8 +476,8 @@ EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
* legacy_resume - Execute a legacy (bus or class) resume callback for device.
- * dev: Device to resume.
- * cb: Resume callback to execute.
+ * @dev: Device to resume.
+ * @cb: Resume callback to execute.
*/
static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
{
@@ -468,16 +498,20 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being resumed asynchronously.
*/
-static int device_resume(struct device *dev, pm_message_t state)
+static int device_resume(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
+ dpm_wait(dev->parent, async);
down(&dev->sem);
+ dev->power.status = DPM_RESUMING;
+
if (dev->bus) {
if (dev->bus->pm) {
pm_dev_dbg(dev, state, "");
@@ -510,11 +544,30 @@ static int device_resume(struct device *dev, pm_message_t state)
}
End:
up(&dev->sem);
+ complete_all(&dev->power.completion);
TRACE_RESUME(error);
return error;
}
+static void async_resume(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ pm_dev_dbg(dev, pm_transition, "async ");
+ error = device_resume(dev, pm_transition, true);
+ if (error)
+ pm_dev_err(dev, pm_transition, " async", error);
+ put_device(dev);
+}
+
+static bool is_async(struct device *dev)
+{
+ return dev->power.async_suspend && pm_async_enabled
+ && !pm_trace_is_enabled();
+}
+
/**
* dpm_resume - Execute "resume" callbacks for non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -525,21 +578,34 @@ static int device_resume(struct device *dev, pm_message_t state)
static void dpm_resume(pm_message_t state)
{
struct list_head list;
+ struct device *dev;
ktime_t starttime = ktime_get();
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
- while (!list_empty(&dpm_list)) {
- struct device *dev = to_device(dpm_list.next);
+ pm_transition = state;
+
+ list_for_each_entry(dev, &dpm_list, power.entry) {
+ if (dev->power.status < DPM_OFF)
+ continue;
+
+ INIT_COMPLETION(dev->power.completion);
+ if (is_async(dev)) {
+ get_device(dev);
+ async_schedule(async_resume, dev);
+ }
+ }
+ while (!list_empty(&dpm_list)) {
+ dev = to_device(dpm_list.next);
get_device(dev);
- if (dev->power.status >= DPM_OFF) {
+ if (dev->power.status >= DPM_OFF && !is_async(dev)) {
int error;
- dev->power.status = DPM_RESUMING;
mutex_unlock(&dpm_list_mtx);
- error = device_resume(dev, state);
+ pm_dev_dbg(dev, state, "sync ");
+ error = device_resume(dev, state, false);
mutex_lock(&dpm_list_mtx);
if (error)
@@ -554,6 +620,7 @@ static void dpm_resume(pm_message_t state)
}
list_splice(&list, &dpm_list);
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
dpm_show_time(starttime, state, NULL);
}
@@ -711,8 +778,9 @@ EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
/**
* legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
- * dev: Device to suspend.
- * cb: Suspend callback to execute.
+ * @dev: Device to suspend.
+ * @state: PM transition of the system being carried out.
+ * @cb: Suspend callback to execute.
*/
static int legacy_suspend(struct device *dev, pm_message_t state,
int (*cb)(struct device *dev, pm_message_t state))
@@ -730,17 +798,24 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
return error;
}
+static int async_error;
+
/**
* device_suspend - Execute "suspend" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
+ * @async: If true, the device is being suspended asynchronously.
*/
-static int device_suspend(struct device *dev, pm_message_t state)
+static int __device_suspend(struct device *dev, pm_message_t state, bool async)
{
int error = 0;
+ dpm_wait_for_children(dev, async);
down(&dev->sem);
+ if (async_error)
+ goto End;
+
if (dev->class) {
if (dev->class->pm) {
pm_dev_dbg(dev, state, "class ");
@@ -771,12 +846,46 @@ static int device_suspend(struct device *dev, pm_message_t state)
error = legacy_suspend(dev, state, dev->bus->suspend);
}
}
+
+ if (!error)
+ dev->power.status = DPM_OFF;
+
End:
up(&dev->sem);
+ complete_all(&dev->power.completion);
return error;
}
+static void async_suspend(void *data, async_cookie_t cookie)
+{
+ struct device *dev = (struct device *)data;
+ int error;
+
+ pm_dev_dbg(dev, pm_transition, "async ");
+ error = __device_suspend(dev, pm_transition, true);
+ if (error) {
+ pm_dev_err(dev, pm_transition, " async", error);
+ async_error = error;
+ }
+
+ put_device(dev);
+}
+
+static int device_suspend(struct device *dev)
+{
+ INIT_COMPLETION(dev->power.completion);
+
+ if (pm_async_enabled && dev->power.async_suspend) {
+ get_device(dev);
+ async_schedule(async_suspend, dev);
+ return 0;
+ }
+
+ pm_dev_dbg(dev, pm_transition, "sync ");
+ return __device_suspend(dev, pm_transition, false);
+}
+
/**
* dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
* @state: PM transition of the system being carried out.
@@ -789,13 +898,15 @@ static int dpm_suspend(pm_message_t state)
INIT_LIST_HEAD(&list);
mutex_lock(&dpm_list_mtx);
+ pm_transition = state;
+ async_error = 0;
while (!list_empty(&dpm_list)) {
struct device *dev = to_device(dpm_list.prev);
get_device(dev);
mutex_unlock(&dpm_list_mtx);
- error = device_suspend(dev, state);
+ error = device_suspend(dev);
mutex_lock(&dpm_list_mtx);
if (error) {
@@ -803,13 +914,17 @@ static int dpm_suspend(pm_message_t state)
put_device(dev);
break;
}
- dev->power.status = DPM_OFF;
if (!list_empty(&dev->power.entry))
list_move(&dev->power.entry, &list);
put_device(dev);
+ if (async_error)
+ break;
}
list_splice(&list, dpm_list.prev);
mutex_unlock(&dpm_list_mtx);
+ async_synchronize_full();
+ if (!error)
+ error = async_error;
if (!error)
dpm_show_time(starttime, state, NULL);
return error;
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b8fa1aa5225a..c0bd03c83b9c 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -12,10 +12,10 @@ static inline void pm_runtime_remove(struct device *dev) {}
#ifdef CONFIG_PM_SLEEP
-/*
- * main.c
- */
+/* kernel/power/main.c */
+extern int pm_async_enabled;
+/* drivers/base/power/main.c */
extern struct list_head dpm_list; /* The active device list */
static inline struct device *to_device(struct list_head *entry)
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 596aeecfdffe..45d8e67a5612 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -38,6 +38,22 @@
* wakeup events internally (unless they are disabled), keeping
* their hardware in low power modes whenever they're unused. This
* saves runtime power, without requiring system-wide sleep states.
+ *
+ * async - Report/change current async suspend setting for the device
+ *
+ * If set, the PM core will attempt to suspend and resume the device during
+ * system power transitions (e.g. suspend to RAM, hibernation) in parallel
+ * with other devices it doesn't appear to depend on (to the PM core's
+ * knowledge).
+ *
+ * + "enabled\n" to permit the asynchronous suspend/resume of the device
+ * + "disabled\n" to forbid it
+ *
+ * NOTE: It generally is unsafe to permit the asynchronous suspend/resume
+ * of a device unless it is certain that all of the PM dependencies of the
+ * device are known to the PM core. However, for some devices this
+ * attribute is set to "enabled" by bus type code or device drivers and in
+ * that cases it should be safe to leave the default value.
*/
static const char enabled[] = "enabled";
@@ -77,9 +93,40 @@ wake_store(struct device * dev, struct device_attribute *attr,
static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store);
+#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
+static ssize_t async_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n",
+ device_async_suspend_enabled(dev) ? enabled : disabled);
+}
+
+static ssize_t async_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ char *cp;
+ int len = n;
+
+ cp = memchr(buf, '\n', n);
+ if (cp)
+ len = cp - buf;
+ if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0)
+ device_enable_async_suspend(dev);
+ else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0)
+ device_disable_async_suspend(dev);
+ else
+ return -EINVAL;
+ return n;
+}
+
+static DEVICE_ATTR(async, 0644, async_show, async_store);
+#endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */
static struct attribute * power_attrs[] = {
&dev_attr_wakeup.attr,
+#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG
+ &dev_attr_async.attr,
+#endif
NULL,
};
static struct attribute_group pm_attr_group = {
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index ce1fa923c414..7412b5d4f5f3 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -7134,7 +7134,7 @@ static struct DAC960_privdata DAC960_P_privdata = {
.MemoryWindowSize = DAC960_PD_RegisterWindowSize,
};
-static struct pci_device_id DAC960_id_table[] = {
+static const struct pci_device_id DAC960_id_table[] = {
{
.vendor = PCI_VENDOR_ID_MYLEX,
.device = PCI_DEVICE_ID_MYLEX_DAC960_GEM,
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 64a223b0cc22..eabc01a25813 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -986,8 +986,12 @@ aoecmd_cfg_rsp(struct sk_buff *skb)
}
sysminor = SYSMINOR(aoemajor, h->minor);
- if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
- printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
+ if (h->minor >= NPERSHELF) {
+ printk(KERN_INFO "aoe: e%ld.%d: AoE minor address too large\n",
+ aoemajor, (int) h->minor);
+ return;
+ } else if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
+ printk(KERN_INFO "aoe: e%ld.%d: AoE major address too large\n",
aoemajor, (int) h->minor);
return;
}
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index a5af1d6dda8b..e35cf59cbfde 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1470,8 +1470,6 @@ repeat:
void do_fd_request(struct request_queue * q)
{
- unsigned long flags;
-
DPRINT(("do_fd_request for pid %d\n",current->pid));
while( fdc_busy ) sleep_on( &fdc_wait );
fdc_busy = 1;
diff --git a/drivers/block/drbd/Kconfig b/drivers/block/drbd/Kconfig
index f4acd04ebeef..df0983787390 100644
--- a/drivers/block/drbd/Kconfig
+++ b/drivers/block/drbd/Kconfig
@@ -3,7 +3,7 @@
#
comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected"
- depends on !PROC_FS || !INET || !CONNECTOR
+ depends on PROC_FS='n' || INET='n' || CONNECTOR='n'
config BLK_DEV_DRBD
tristate "DRBD Distributed Replicated Block Device support"
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index c97558763430..2bf3a6ef3684 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1275,7 +1275,7 @@ struct bm_extent {
#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
-#elif !defined(CONFIG_LBD) && BITS_PER_LONG == 32
+#elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
#else
@@ -1371,10 +1371,9 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
extern void drbd_suspend_io(struct drbd_conf *mdev);
extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size);
-extern sector_t drbd_new_dev_size(struct drbd_conf *,
- struct drbd_backing_dev *);
+extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
-extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *) __must_hold(local);
+extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *);
extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9348f33f6242..e898ad9eb1c3 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1298,6 +1298,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
dev_err(DEV, "Sending state in drbd_io_error() failed\n");
}
+ wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
lc_destroy(mdev->resync);
mdev->resync = NULL;
lc_destroy(mdev->act_log);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 4e0726aa53b0..1292e0620663 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
* Returns 0 on success, negative return values indicate errors.
* You should call drbd_md_sync() after calling this function.
*/
-enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local)
+enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local)
{
sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size;
@@ -541,7 +541,7 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_ho
/* TODO: should only be some assert here, not (re)init... */
drbd_md_set_sector_offsets(mdev, mdev->ldev);
- size = drbd_new_dev_size(mdev, mdev->ldev);
+ size = drbd_new_dev_size(mdev, mdev->ldev, force);
if (drbd_get_capacity(mdev->this_bdev) != size ||
drbd_bm_capacity(mdev) != size) {
@@ -596,7 +596,7 @@ out:
}
sector_t
-drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
+drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
{
sector_t p_size = mdev->p_size; /* partner's disk size. */
sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
@@ -606,6 +606,11 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
m_size = drbd_get_max_capacity(bdev);
+ if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
+ dev_warn(DEV, "Resize while not connected was forced by the user!\n");
+ p_size = m_size;
+ }
+
if (p_size && m_size) {
size = min_t(sector_t, p_size, m_size);
} else {
@@ -965,7 +970,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* Prevent shrinking of consistent devices ! */
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
- drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) {
+ drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
dev_warn(DEV, "refusing to truncate a consistent device\n");
retcode = ERR_DISK_TO_SMALL;
goto force_diskless_dec;
@@ -1052,7 +1057,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
!drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
set_bit(USE_DEGR_WFC_T, &mdev->flags);
- dd = drbd_determin_dev_size(mdev);
+ dd = drbd_determin_dev_size(mdev, 0);
if (dd == dev_size_error) {
retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec;
@@ -1271,7 +1276,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail;
}
- if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) {
+ if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
retcode = ERR_AUTH_ALG_ND;
goto fail;
}
@@ -1504,7 +1509,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
- dd = drbd_determin_dev_size(mdev);
+ dd = drbd_determin_dev_size(mdev, rs.resize_force);
drbd_md_sync(mdev);
put_ldev(mdev);
if (dd == dev_size_error) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 259c1351b152..f22a5283128a 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -878,9 +878,13 @@ retry:
if (mdev->cram_hmac_tfm) {
/* drbd_request_state(mdev, NS(conn, WFAuth)); */
- if (!drbd_do_auth(mdev)) {
+ switch (drbd_do_auth(mdev)) {
+ case -1:
dev_err(DEV, "Authentication of peer failed\n");
return -1;
+ case 0:
+ dev_err(DEV, "Authentication of peer failed, trying again.\n");
+ return 0;
}
}
@@ -1201,10 +1205,11 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
case WO_bdev_flush:
case WO_drain_io:
- D_ASSERT(rv == FE_STILL_LIVE);
- set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
- drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
- rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
+ if (rv == FE_STILL_LIVE) {
+ set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
+ drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
+ rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
+ }
if (rv == FE_RECYCLED)
return TRUE;
@@ -2865,7 +2870,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
/* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */
- if (drbd_new_dev_size(mdev, mdev->ldev) <
+ if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
drbd_get_capacity(mdev->this_bdev) &&
mdev->state.disk >= D_OUTDATED &&
mdev->state.conn < C_CONNECTED) {
@@ -2880,7 +2885,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
#undef min_not_zero
if (get_ldev(mdev)) {
- dd = drbd_determin_dev_size(mdev);
+ dd = drbd_determin_dev_size(mdev, 0);
put_ldev(mdev);
if (dd == dev_size_error)
return FALSE;
@@ -3830,10 +3835,17 @@ static int drbd_do_auth(struct drbd_conf *mdev)
{
dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
- return 0;
+ return -1;
}
#else
#define CHALLENGE_LEN 64
+
+/* Return value:
+ 1 - auth succeeded,
+ 0 - failed, try again (network error),
+ -1 - auth failed, don't try again.
+*/
+
static int drbd_do_auth(struct drbd_conf *mdev)
{
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
@@ -3854,7 +3866,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
(u8 *)mdev->net_conf->shared_secret, key_len);
if (rv) {
dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3877,14 +3889,14 @@ static int drbd_do_auth(struct drbd_conf *mdev)
if (p.length > CHALLENGE_LEN*2) {
dev_err(DEV, "expected AuthChallenge payload too big.\n");
- rv = 0;
+ rv = -1;
goto fail;
}
peers_ch = kmalloc(p.length, GFP_NOIO);
if (peers_ch == NULL) {
dev_err(DEV, "kmalloc of peers_ch failed\n");
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3900,7 +3912,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) {
dev_err(DEV, "kmalloc of response failed\n");
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3910,7 +3922,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = crypto_hash_digest(&desc, &sg, sg.length, response);
if (rv) {
dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3944,9 +3956,9 @@ static int drbd_do_auth(struct drbd_conf *mdev)
}
right_response = kmalloc(resp_size, GFP_NOIO);
- if (response == NULL) {
+ if (right_response == NULL) {
dev_err(DEV, "kmalloc of right_response failed\n");
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3955,7 +3967,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
if (rv) {
dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
- rv = 0;
+ rv = -1;
goto fail;
}
@@ -3964,6 +3976,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
if (rv)
dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
resp_size, mdev->net_conf->cram_hmac_alg);
+ else
+ rv = -1;
fail:
kfree(peers_ch);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index a7c4184f4a63..7bd7b2f83116 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -409,7 +409,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
static void carm_remove_one (struct pci_dev *pdev);
static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-static struct pci_device_id carm_pci_tbl[] = {
+static const struct pci_device_id carm_pci_tbl[] = {
{ PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
{ PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
{ } /* terminate list */
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index c739b203fe91..d86d1357ccef 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -393,7 +393,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum);
#define ub_usb_ids usb_storage_usb_ids
#else
-static struct usb_device_id ub_usb_ids[] = {
+static const struct usb_device_id ub_usb_ids[] = {
{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
{ }
};
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 51042f0ba7e1..c17e622f85ee 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -404,7 +404,7 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
kfree(vblk);
}
-static struct virtio_device_id id_table[] = {
+static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
};
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 05a31e55d278..a84702d1a35e 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1050,7 +1050,7 @@ static const struct block_device_operations xlvbd_block_fops =
};
-static struct xenbus_device_id blkfront_ids[] = {
+static const struct xenbus_device_id blkfront_ids[] = {
{ "vbd" },
{ "" }
};
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index e5c5415eb45e..e1c95e208a66 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1227,7 +1227,7 @@ static int __devexit ace_of_remove(struct of_device *op)
}
/* Match table for of_platform binding */
-static struct of_device_id ace_of_match[] __devinitdata = {
+static const struct of_device_id ace_of_match[] __devinitconst = {
{ .compatible = "xlnx,opb-sysace-1.00.b", },
{ .compatible = "xlnx,opb-sysace-1.00.c", },
{ .compatible = "xlnx,xps-sysace-1.00.a", },
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 652367aa6546..058fbccf2f52 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -195,5 +195,16 @@ config BT_MRVL_SDIO
Say Y here to compile support for Marvell BT-over-SDIO driver
into the kernel or say M to compile it as module.
-endmenu
+config BT_ATH3K
+ tristate "Atheros firmware download driver"
+ depends on BT_HCIBTUSB
+ select FW_LOADER
+ help
+ Bluetooth firmware download driver.
+ This driver loads the firmware into the Atheros Bluetooth
+ chipset.
+ Say Y here to compile support for "Atheros firmware download driver"
+ into the kernel or say M to compile it as module (ath3k).
+
+endmenu
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index b3f57d2d4eb0..7e5aed598121 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_BT_HCIBTUART) += btuart_cs.o
obj-$(CONFIG_BT_HCIBTUSB) += btusb.o
obj-$(CONFIG_BT_HCIBTSDIO) += btsdio.o
+obj-$(CONFIG_BT_ATH3K) += ath3k.o
obj-$(CONFIG_BT_MRVL) += btmrvl.o
obj-$(CONFIG_BT_MRVL_SDIO) += btmrvl_sdio.o
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
new file mode 100644
index 000000000000..add9485ca5b6
--- /dev/null
+++ b/drivers/bluetooth/ath3k.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/usb.h>
+#include <net/bluetooth/bluetooth.h>
+
+#define VERSION "1.0"
+
+
+static struct usb_device_id ath3k_table[] = {
+ /* Atheros AR3011 */
+ { USB_DEVICE(0x0CF3, 0x3000) },
+ { } /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, ath3k_table);
+
+#define USB_REQ_DFU_DNLOAD 1
+#define BULK_SIZE 4096
+
+struct ath3k_data {
+ struct usb_device *udev;
+ u8 *fw_data;
+ u32 fw_size;
+ u32 fw_sent;
+};
+
+static int ath3k_load_firmware(struct ath3k_data *data,
+ unsigned char *firmware,
+ int count)
+{
+ u8 *send_buf;
+ int err, pipe, len, size, sent = 0;
+
+ BT_DBG("ath3k %p udev %p", data, data->udev);
+
+ pipe = usb_sndctrlpipe(data->udev, 0);
+
+ if ((usb_control_msg(data->udev, pipe,
+ USB_REQ_DFU_DNLOAD,
+ USB_TYPE_VENDOR, 0, 0,
+ firmware, 20, USB_CTRL_SET_TIMEOUT)) < 0) {
+ BT_ERR("Can't change to loading configuration err");
+ return -EBUSY;
+ }
+ sent += 20;
+ count -= 20;
+
+ send_buf = kmalloc(BULK_SIZE, GFP_ATOMIC);
+ if (!send_buf) {
+ BT_ERR("Can't allocate memory chunk for firmware");
+ return -ENOMEM;
+ }
+
+ while (count) {
+ size = min_t(uint, count, BULK_SIZE);
+ pipe = usb_sndbulkpipe(data->udev, 0x02);
+ memcpy(send_buf, firmware + sent, size);
+
+ err = usb_bulk_msg(data->udev, pipe, send_buf, size,
+ &len, 3000);
+
+ if (err || (len != size)) {
+ BT_ERR("Error in firmware loading err = %d,"
+ "len = %d, size = %d", err, len, size);
+ goto error;
+ }
+
+ sent += size;
+ count -= size;
+ }
+
+ kfree(send_buf);
+ return 0;
+
+error:
+ kfree(send_buf);
+ return err;
+}
+
+static int ath3k_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ const struct firmware *firmware;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct ath3k_data *data;
+ int size;
+
+ BT_DBG("intf %p id %p", intf, id);
+
+ if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
+ return -ENODEV;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->udev = udev;
+
+ if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
+ kfree(data);
+ return -EIO;
+ }
+
+ size = max_t(uint, firmware->size, 4096);
+ data->fw_data = kmalloc(size, GFP_KERNEL);
+ if (!data->fw_data) {
+ release_firmware(firmware);
+ kfree(data);
+ return -ENOMEM;
+ }
+
+ memcpy(data->fw_data, firmware->data, firmware->size);
+ data->fw_size = firmware->size;
+ data->fw_sent = 0;
+ release_firmware(firmware);
+
+ usb_set_intfdata(intf, data);
+ if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) {
+ usb_set_intfdata(intf, NULL);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ath3k_disconnect(struct usb_interface *intf)
+{
+ struct ath3k_data *data = usb_get_intfdata(intf);
+
+ BT_DBG("ath3k_disconnect intf %p", intf);
+
+ kfree(data->fw_data);
+ kfree(data);
+}
+
+static struct usb_driver ath3k_driver = {
+ .name = "ath3k",
+ .probe = ath3k_probe,
+ .disconnect = ath3k_disconnect,
+ .id_table = ath3k_table,
+};
+
+static int __init ath3k_init(void)
+{
+ BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION);
+ return usb_register(&ath3k_driver);
+}
+
+static void __exit ath3k_exit(void)
+{
+ usb_deregister(&ath3k_driver);
+}
+
+module_init(ath3k_init);
+module_exit(ath3k_exit);
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Atheros AR30xx firmware driver");
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("ath3k-1.fw");
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index eafd4af0746e..b0c84c19f442 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -39,7 +39,7 @@
#define VERSION "1.2"
-static struct usb_device_id bcm203x_table[] = {
+static const struct usb_device_id bcm203x_table[] = {
/* Broadcom Blutonium (BCM2033) */
{ USB_DEVICE(0x0a5c, 0x2033) },
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index f36defa37764..e397170b4e02 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -975,7 +975,7 @@ static struct sdio_driver bt_mrvl_sdio = {
.remove = btmrvl_sdio_remove,
};
-static int btmrvl_sdio_init_module(void)
+static int __init btmrvl_sdio_init_module(void)
{
if (sdio_register_driver(&bt_mrvl_sdio) != 0) {
BT_ERR("SDIO Driver Registration Failed");
@@ -988,7 +988,7 @@ static int btmrvl_sdio_init_module(void)
return 0;
}
-static void btmrvl_sdio_exit_module(void)
+static void __exit btmrvl_sdio_exit_module(void)
{
/* Set the flag as user is removing this module. */
user_rmmod = 1;
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index f957edf7e45d..dbc579e13b2b 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-y += misc.o
obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o selection.o keyboard.o
+obj-$(CONFIG_KDB_KEYBOARD) += kdb_keyboard.o
obj-$(CONFIG_BFIN_JTAG_COMM) += bfin_jtag_comm.o
obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index a56ca080e108..c3ab46da51a3 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -285,18 +285,22 @@ int agp_add_bridge(struct agp_bridge_data *bridge)
{
int error;
- if (agp_off)
- return -ENODEV;
+ if (agp_off) {
+ error = -ENODEV;
+ goto err_put_bridge;
+ }
if (!bridge->dev) {
printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto err_put_bridge;
}
/* Grab reference on the chipset driver. */
if (!try_module_get(bridge->driver->owner)) {
dev_info(&bridge->dev->dev, "can't lock chipset driver\n");
- return -EINVAL;
+ error = -EINVAL;
+ goto err_put_bridge;
}
error = agp_backend_initialize(bridge);
@@ -326,6 +330,7 @@ frontend_err:
agp_backend_cleanup(bridge);
err_out:
module_put(bridge->driver->owner);
+err_put_bridge:
agp_put_bridge(bridge);
return error;
}
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
index 9047b2714653..58752b70efea 100644
--- a/drivers/char/agp/hp-agp.c
+++ b/drivers/char/agp/hp-agp.c
@@ -488,9 +488,8 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
handle = obj;
do {
status = acpi_get_object_info(handle, &info);
- if (ACPI_SUCCESS(status)) {
+ if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
/* TBD check _CID also */
- info->hardware_id.string[sizeof(info->hardware_id.length)-1] = '\0';
match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
kfree(info);
if (match) {
@@ -509,6 +508,9 @@ zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
handle = parent;
} while (ACPI_SUCCESS(status));
+ if (ACPI_FAILURE(status))
+ return AE_OK; /* found no enclosing IOC */
+
if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
return AE_OK;
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 87060266ef91..6ea1014697d1 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -186,3 +186,15 @@ config HW_RANDOM_MXC_RNGA
module will be called mxc-rnga.
If unsure, say Y.
+
+config HW_RANDOM_NOMADIK
+ tristate "ST-Ericsson Nomadik Random Number Generator support"
+ depends on HW_RANDOM && PLAT_NOMADIK
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator hardware found on ST-Ericsson SoCs (8815 and 8500).
+
+ To compile this driver as a module, choose M here: the
+ module will be called nomadik-rng.
+
+ If unsure, say Y.
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index 5eeb1303f0d0..4273308aa1e3 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
+obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
new file mode 100644
index 000000000000..a8b4c4010144
--- /dev/null
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -0,0 +1,103 @@
+/*
+ * Nomadik RNG support
+ * Copyright 2009 Alessandro Rubini
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/amba/bus.h>
+#include <linux/hw_random.h>
+#include <linux/io.h>
+
+static int nmk_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ void __iomem *base = (void __iomem *)rng->priv;
+
+ /*
+ * The register is 32 bits and gives 16 random bits (low half).
+ * A subsequent read will delay the core for 400ns, so we just read
+ * once and accept the very unlikely very small delay, even if wait==0.
+ */
+ *(u16 *)data = __raw_readl(base + 8) & 0xffff;
+ return 2;
+}
+
+/* we have at most one RNG per machine, granted */
+static struct hwrng nmk_rng = {
+ .name = "nomadik",
+ .read = nmk_rng_read,
+};
+
+static int nmk_rng_probe(struct amba_device *dev, struct amba_id *id)
+{
+ void __iomem *base;
+ int ret;
+
+ ret = amba_request_regions(dev, dev->dev.init_name);
+ if (ret)
+ return ret;
+ ret = -ENOMEM;
+ base = ioremap(dev->res.start, resource_size(&dev->res));
+ if (!base)
+ goto out_release;
+ nmk_rng.priv = (unsigned long)base;
+ ret = hwrng_register(&nmk_rng);
+ if (ret)
+ goto out_unmap;
+ return 0;
+
+out_unmap:
+ iounmap(base);
+out_release:
+ amba_release_regions(dev);
+ return ret;
+}
+
+static int nmk_rng_remove(struct amba_device *dev)
+{
+ void __iomem *base = (void __iomem *)nmk_rng.priv;
+ hwrng_unregister(&nmk_rng);
+ iounmap(base);
+ amba_release_regions(dev);
+ return 0;
+}
+
+static struct amba_id nmk_rng_ids[] = {
+ {
+ .id = 0x000805e1,
+ .mask = 0x000fffff, /* top bits are rev and cfg: accept all */
+ },
+ {0, 0},
+};
+
+static struct amba_driver nmk_rng_driver = {
+ .drv = {
+ .owner = THIS_MODULE,
+ .name = "rng",
+ },
+ .probe = nmk_rng_probe,
+ .remove = nmk_rng_remove,
+ .id_table = nmk_rng_ids,
+};
+
+static int __init nmk_rng_init(void)
+{
+ return amba_driver_register(&nmk_rng_driver);
+}
+
+static void __devexit nmk_rng_exit(void)
+{
+ amba_driver_unregister(&nmk_rng_driver);
+}
+
+module_init(nmk_rng_init);
+module_exit(nmk_rng_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/kdb_keyboard.c b/drivers/char/kdb_keyboard.c
new file mode 100644
index 000000000000..95aa10286e40
--- /dev/null
+++ b/drivers/char/kdb_keyboard.c
@@ -0,0 +1,204 @@
+/*
+ * Kernel Debugger Architecture Dependent Console I/O handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.
+ *
+ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/kdb.h>
+#include <linux/keyboard.h>
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include "kdb_keyboard.h"
+
+
+static int kbd_exists;
+
+/*
+ * Check if the keyboard controller has a keypress for us.
+ * Some parts (Enter Release, LED change) are still blocking polled here,
+ * but hopefully they are all short.
+ */
+int kdb_get_kbd_char(void)
+{
+ int scancode, scanstatus;
+ static int shift_lock; /* CAPS LOCK state (0-off, 1-on) */
+ static int shift_key; /* Shift next keypress */
+ static int ctrl_key;
+ u_short keychar;
+
+ if (KDB_FLAG(NO_I8042) || KDB_FLAG(NO_VT_CONSOLE) ||
+ (inb(KBD_STATUS_REG) == 0xff && inb(KBD_DATA_REG) == 0xff)) {
+ kbd_exists = 0;
+ return -1;
+ }
+ kbd_exists = 1;
+
+ if ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
+ return -1;
+
+ /*
+ * Fetch the scancode
+ */
+ scancode = inb(KBD_DATA_REG);
+ scanstatus = inb(KBD_STATUS_REG);
+
+ /*
+ * Ignore mouse events.
+ */
+ if (scanstatus & KBD_STAT_MOUSE_OBF)
+ return -1;
+
+ /*
+ * Ignore release, trigger on make
+ * (except for shift keys, where we want to
+ * keep the shift state so long as the key is
+ * held down).
+ */
+
+ if (((scancode&0x7f) == 0x2a) || ((scancode&0x7f) == 0x36)) {
+ /*
+ * Next key may use shift table
+ */
+ if ((scancode & 0x80) == 0)
+ shift_key = 1;
+ else
+ shift_key = 0;
+ return -1;
+ }
+
+ if ((scancode&0x7f) == 0x1d) {
+ /*
+ * Left ctrl key
+ */
+ if ((scancode & 0x80) == 0)
+ ctrl_key = 1;
+ else
+ ctrl_key = 0;
+ return -1;
+ }
+
+ if ((scancode & 0x80) != 0)
+ return -1;
+
+ scancode &= 0x7f;
+
+ /*
+ * Translate scancode
+ */
+
+ if (scancode == 0x3a) {
+ /*
+ * Toggle caps lock
+ */
+ shift_lock ^= 1;
+
+#ifdef KDB_BLINK_LED
+ kdb_toggleled(0x4);
+#endif
+ return -1;
+ }
+
+ if (scancode == 0x0e) {
+ /*
+ * Backspace
+ */
+ return 8;
+ }
+
+ /* Special Key */
+ switch (scancode) {
+ case 0xF: /* Tab */
+ return 9;
+ case 0x53: /* Del */
+ return 4;
+ case 0x47: /* Home */
+ return 1;
+ case 0x4F: /* End */
+ return 5;
+ case 0x4B: /* Left */
+ return 2;
+ case 0x48: /* Up */
+ return 16;
+ case 0x50: /* Down */
+ return 14;
+ case 0x4D: /* Right */
+ return 6;
+ }
+
+ if (scancode == 0xe0)
+ return -1;
+
+ /*
+ * For Japanese 86/106 keyboards
+ * See comment in drivers/char/pc_keyb.c.
+ * - Masahiro Adegawa
+ */
+ if (scancode == 0x73)
+ scancode = 0x59;
+ else if (scancode == 0x7d)
+ scancode = 0x7c;
+
+ if (!shift_lock && !shift_key && !ctrl_key) {
+ keychar = plain_map[scancode];
+ } else if ((shift_lock || shift_key) && key_maps[1]) {
+ keychar = key_maps[1][scancode];
+ } else if (ctrl_key && key_maps[4]) {
+ keychar = key_maps[4][scancode];
+ } else {
+ keychar = 0x0020;
+ kdb_printf("Unknown state/scancode (%d)\n", scancode);
+ }
+ keychar &= 0x0fff;
+ if (keychar == '\t')
+ keychar = ' ';
+ switch (KTYP(keychar)) {
+ case KT_LETTER:
+ case KT_LATIN:
+ if (isprint(keychar))
+ break; /* printable characters */
+ /* drop through */
+ case KT_SPEC:
+ if (keychar == K_ENTER)
+ break;
+ /* drop through */
+ default:
+ return -1; /* ignore unprintables */
+ }
+
+ if ((scancode & 0x7f) == 0x1c) {
+ /*
+ * enter key. All done. Absorb the release scancode.
+ */
+ while ((inb(KBD_STATUS_REG) & KBD_STAT_OBF) == 0)
+ ;
+
+ /*
+ * Fetch the scancode
+ */
+ scancode = inb(KBD_DATA_REG);
+ scanstatus = inb(KBD_STATUS_REG);
+
+ while (scanstatus & KBD_STAT_MOUSE_OBF) {
+ scancode = inb(KBD_DATA_REG);
+ scanstatus = inb(KBD_STATUS_REG);
+ }
+
+ if (scancode != 0x9c) {
+ /*
+ * Wasn't an enter-release, why not?
+ */
+ kdb_printf("kdb: expected enter got 0x%x status 0x%x\n",
+ scancode, scanstatus);
+ }
+
+ return 13;
+ }
+
+ return keychar & 0xff;
+}
+EXPORT_SYMBOL_GPL(kdb_get_kbd_char);
diff --git a/drivers/char/kdb_keyboard.h b/drivers/char/kdb_keyboard.h
new file mode 100644
index 000000000000..5541668cce0a
--- /dev/null
+++ b/drivers/char/kdb_keyboard.h
@@ -0,0 +1,143 @@
+/*
+ * include/linux/pc_keyb.h
+ *
+ * PC Keyboard And Keyboard Controller
+ *
+ * (c) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ */
+
+/*
+ * Configuration Switches
+ */
+
+#undef KBD_REPORT_ERR /* Report keyboard errors */
+#define KBD_REPORT_UNKN /* Report unknown scan codes */
+#define KBD_REPORT_TIMEOUTS /* Report keyboard timeouts */
+#undef KBD_IS_FOCUS_9000 /* We have the brain-damaged
+ * FOCUS-9000 keyboard */
+#undef INITIALIZE_MOUSE /* Define if your PS/2 mouse
+ * needs initialization. */
+
+#define KBD_INIT_TIMEOUT 1000 /* Timeout in ms for initializing the
+ * keyboard */
+#define KBC_TIMEOUT 250 /* Timeout in ms for sending
+ * to keyboard controller */
+#define KBD_TIMEOUT 1000 /* Timeout in ms for keyboard
+ * command acknowledge */
+
+/*
+ * Internal variables of the driver
+ */
+
+extern unsigned char pckbd_read_mask;
+extern unsigned char aux_device_present;
+
+/*
+ * Keyboard Controller Registers on normal PCs.
+ */
+
+#define KBD_STATUS_REG 0x64 /* Status register (R) */
+#define KBD_CNTL_REG 0x64 /* Controller command register (W) */
+#define KBD_DATA_REG 0x60 /* Keyboard data register (R/W) */
+
+/*
+ * Keyboard Controller Commands
+ */
+
+#define KBD_CCMD_READ_MODE 0x20 /* Read mode bits */
+#define KBD_CCMD_WRITE_MODE 0x60 /* Write mode bits */
+#define KBD_CCMD_GET_VERSION 0xA1 /* Get controller version */
+#define KBD_CCMD_MOUSE_DISABLE 0xA7 /* Disable mouse interface */
+#define KBD_CCMD_MOUSE_ENABLE 0xA8 /* Enable mouse interface */
+#define KBD_CCMD_TEST_MOUSE 0xA9 /* Mouse interface test */
+#define KBD_CCMD_SELF_TEST 0xAA /* Controller self test */
+#define KBD_CCMD_KBD_TEST 0xAB /* Keyboard interface test */
+#define KBD_CCMD_KBD_DISABLE 0xAD /* Keyboard interface disable */
+#define KBD_CCMD_KBD_ENABLE 0xAE /* Keyboard interface enable */
+#define KBD_CCMD_WRITE_AUX_OBUF 0xD3 /* Write to output buffer as if
+ initiated by the auxiliary device */
+#define KBD_CCMD_WRITE_MOUSE 0xD4 /* Write the following byte to
+ * the mouse */
+
+/*
+ * Keyboard Commands
+ */
+
+#define KBD_CMD_SET_LEDS 0xED /* Set keyboard leds */
+#define KBD_CMD_SET_RATE 0xF3 /* Set typematic rate */
+#define KBD_CMD_ENABLE 0xF4 /* Enable scanning */
+#define KBD_CMD_DISABLE 0xF5 /* Disable scanning */
+#define KBD_CMD_RESET 0xFF /* Reset */
+
+/*
+ * Keyboard Replies
+ */
+
+#define KBD_REPLY_POR 0xAA /* Power on reset */
+#define KBD_REPLY_ACK 0xFA /* Command ACK */
+#define KBD_REPLY_RESEND 0xFE /* Command NACK, send the cmd again */
+
+/*
+ * Status Register Bits
+ */
+
+#define KBD_STAT_OBF 0x01 /* Keyboard output buffer full */
+#define KBD_STAT_IBF 0x02 /* Keyboard input buffer full */
+#define KBD_STAT_SELFTEST 0x04 /* Self test successful */
+#define KBD_STAT_CMD 0x08 /* Last write was a command
+ * write (0=data) */
+#define KBD_STAT_UNLOCKED 0x10 /* Zero if keyboard locked */
+#define KBD_STAT_MOUSE_OBF 0x20 /* Mouse output buffer full */
+#define KBD_STAT_GTO 0x40 /* General receive/xmit timeout */
+#define KBD_STAT_PERR 0x80 /* Parity error */
+
+#define AUX_STAT_OBF (KBD_STAT_OBF | KBD_STAT_MOUSE_OBF)
+
+/*
+ * Controller Mode Register Bits
+ */
+
+#define KBD_MODE_KBD_INT 0x01 /* Keyboard data generate IRQ1 */
+#define KBD_MODE_MOUSE_INT 0x02 /* Mouse data generate IRQ12 */
+#define KBD_MODE_SYS 0x04 /* The system flag (?) */
+#define KBD_MODE_NO_KEYLOCK 0x08 /* The keylock doesn't affect
+ * the keyboard if set */
+#define KBD_MODE_DISABLE_KBD 0x10 /* Disable keyboard interface */
+#define KBD_MODE_DISABLE_MOUSE 0x20 /* Disable mouse interface */
+#define KBD_MODE_KCC 0x40 /* Scan code conversion to PC format */
+#define KBD_MODE_RFU 0x80
+
+/*
+ * Mouse Commands
+ */
+
+#define AUX_SET_RES 0xE8 /* Set resolution */
+#define AUX_SET_SCALE11 0xE6 /* Set 1:1 scaling */
+#define AUX_SET_SCALE21 0xE7 /* Set 2:1 scaling */
+#define AUX_GET_SCALE 0xE9 /* Get scaling factor */
+#define AUX_SET_STREAM 0xEA /* Set stream mode */
+#define AUX_SET_SAMPLE 0xF3 /* Set sample rate */
+#define AUX_ENABLE_DEV 0xF4 /* Enable aux device */
+#define AUX_DISABLE_DEV 0xF5 /* Disable aux device */
+#define AUX_RESET 0xFF /* Reset aux device */
+#define AUX_ACK 0xFA /* Command byte ACK. */
+
+#define AUX_BUF_SIZE 2048 /* This might be better divisible by
+ three to make overruns stay in sync
+ but then the read function would need
+ a lock etc - ick */
+
+struct aux_queue {
+ unsigned long head;
+ unsigned long tail;
+ wait_queue_head_t proc_list;
+ struct fasync_struct *fasync;
+ unsigned char buf[AUX_BUF_SIZE];
+};
+
+
+/* How to access the keyboard macros on this platform. */
+#define kbd_read_input() inb(KBD_DATA_REG)
+#define kbd_read_status() inb(KBD_STATUS_REG)
+#define kbd_write_output(val) outb(val, KBD_DATA_REG)
+#define kbd_write_command(val) outb(val, KBD_CNTL_REG)
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
index f706b1dffdb3..1db54f62f9e1 100644
--- a/drivers/char/keyboard.c
+++ b/drivers/char/keyboard.c
@@ -1195,6 +1195,11 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
if (keycode < BTN_MISC && printk_ratelimit())
printk(KERN_WARNING "keyboard.c: can't emulate rawmode for keycode %d\n", keycode);
+ if (down)
+ set_bit(keycode, key_down);
+ else
+ clear_bit(keycode, key_down);
+
#ifdef CONFIG_MAGIC_SYSRQ /* Handle the SysRq Hack */
if (keycode == KEY_SYSRQ && (sysrq_down || (down == 1 && sysrq_alt))) {
if (!sysrq_down) {
@@ -1237,11 +1242,6 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
raw_mode = 1;
}
- if (down)
- set_bit(keycode, key_down);
- else
- clear_bit(keycode, key_down);
-
if (rep &&
(!vc_kbd_mode(kbd, VC_REPEAT) ||
(tty && !L_ECHO(tty) && tty_chars_in_buffer(tty)))) {
@@ -1410,6 +1410,23 @@ static const struct input_device_id kbd_ids[] = {
MODULE_DEVICE_TABLE(input, kbd_ids);
+#ifdef CONFIG_KGDB_KDB
+void kbd_clear_keys(void)
+{
+ int i, j, k;
+
+ for (i = 0; i < ARRAY_SIZE(key_down); i++) {
+ k = i * BITS_PER_LONG;
+ for (j = 0; j < BITS_PER_LONG; j++, k++) {
+ if (test_bit(k, key_down)) {
+ kbd_keycode(k, 0, 0);
+ }
+ }
+ }
+}
+#endif
+
+
static struct input_handler kbd_handler = {
.event = kbd_event,
.connect = kbd_connect,
@@ -1417,6 +1434,9 @@ static struct input_handler kbd_handler = {
.start = kbd_start,
.name = "kbd",
.id_table = kbd_ids,
+#ifdef CONFIG_KGDB_KDB
+ .dbg_clear_keys = kbd_clear_keys,
+#endif
};
int __init kbd_init(void)
diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
index fdbcc9fd6d31..5eb83c3ca20d 100644
--- a/drivers/char/nvram.c
+++ b/drivers/char/nvram.c
@@ -336,14 +336,12 @@ static int nvram_ioctl(struct inode *inode, struct file *file,
static int nvram_open(struct inode *inode, struct file *file)
{
- lock_kernel();
spin_lock(&nvram_state_lock);
if ((nvram_open_cnt && (file->f_flags & O_EXCL)) ||
(nvram_open_mode & NVRAM_EXCL) ||
((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) {
spin_unlock(&nvram_state_lock);
- unlock_kernel();
return -EBUSY;
}
@@ -354,7 +352,6 @@ static int nvram_open(struct inode *inode, struct file *file)
nvram_open_cnt++;
spin_unlock(&nvram_state_lock);
- unlock_kernel();
return 0;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 8258982b49ec..ee69e5258ca2 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1200,7 +1200,7 @@ const struct file_operations urandom_fops = {
void generate_random_uuid(unsigned char uuid_out[16])
{
get_random_bytes(uuid_out, 16);
- /* Set UUID version to 4 --- truely random generation */
+ /* Set UUID version to 4 --- truly random generation */
uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40;
/* Set the UUID variant to DCE */
uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 67bc2ece7b4b..3fbffd11d989 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1811,19 +1811,21 @@ error_out:
*/
int cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *data;
struct cpufreq_policy policy;
int ret;
- if (!data) {
- ret = -ENODEV;
- goto no_policy;
- }
-
if (unlikely(lock_policy_rwsem_write(cpu))) {
ret = -EINVAL;
goto fail;
}
+ data = cpufreq_cpu_get(cpu);
+
+ if (!data) {
+ dprintk("Update: No policy on cpu: %u\n", cpu);
+ ret = -ENODEV;
+ goto no_policy;
+ }
dprintk("updating policy for CPU %u\n", cpu);
memcpy(&policy, data, sizeof(struct cpufreq_policy));
@@ -1848,11 +1850,10 @@ int cpufreq_update_policy(unsigned int cpu)
ret = __cpufreq_set_policy(data, &policy);
- unlock_policy_rwsem_write(cpu);
-
-fail:
cpufreq_cpu_put(data);
no_policy:
+ unlock_policy_rwsem_write(cpu);
+fail:
return ret;
}
EXPORT_SYMBOL(cpufreq_update_policy);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b34ade2332b..bd444dc93cf2 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -554,6 +554,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
(dbs_tuners_ins.up_threshold -
dbs_tuners_ins.down_differential);
+ if (freq_next < policy->min)
+ freq_next = policy->min;
+
if (!dbs_tuners_ins.powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
CPUFREQ_RELATION_L);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 68104434ebb5..73655aeb3a60 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -18,6 +18,7 @@
#include <linux/hrtimer.h>
#include <linux/tick.h>
#include <linux/sched.h>
+#include <linux/math64.h>
#define BUCKETS 12
#define RESOLUTION 1024
@@ -169,6 +170,12 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
static void menu_update(struct cpuidle_device *dev);
+/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
+static u64 div_round64(u64 dividend, u32 divisor)
+{
+ return div_u64(dividend + (divisor / 2), divisor);
+}
+
/**
* menu_select - selects the next idle state to enter
* @dev: the CPU
@@ -209,9 +216,8 @@ static int menu_select(struct cpuidle_device *dev)
data->correction_factor[data->bucket] = RESOLUTION * DECAY;
/* Make sure to round up for half microseconds */
- data->predicted_us = DIV_ROUND_CLOSEST(
- data->expected_us * data->correction_factor[data->bucket],
- RESOLUTION * DECAY);
+ data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
+ RESOLUTION * DECAY);
/*
* We want to default to C1 (hlt), not to busy polling
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 4801162919d9..03e71b1a5128 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -135,8 +135,8 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
/*
* The requested key size is not supported by HW, do a fallback
*/
- op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
+ op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+ op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
ret = crypto_cipher_setkey(op->fallback.cip, key, len);
if (ret) {
@@ -263,7 +263,7 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
if (IS_ERR(op->fallback.cip)) {
printk(KERN_ERR "Error allocating fallback algo %s\n", name);
- return PTR_ERR(op->fallback.blk);
+ return PTR_ERR(op->fallback.cip);
}
return 0;
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 55c9c59b3f71..aedef7941b22 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -69,6 +69,9 @@ config EDAC_MM_EDAC
occurred so that a particular failing memory module can be
replaced. If unsure, select 'Y'.
+config EDAC_MCE
+ bool
+
config EDAC_AMD64
tristate "AMD64 (Opteron, Athlon64) K8, F10h, F11h"
depends on EDAC_MM_EDAC && K8_NB && X86_64 && PCI && EDAC_DECODE_MCE
@@ -166,6 +169,16 @@ config EDAC_I5400
Support for error detection and correction the Intel
i5400 MCH chipset (Seaburg).
+config EDAC_I7CORE
+ tristate "Intel i7 Core (Nehalem) processors"
+ depends on EDAC_MM_EDAC && PCI && X86
+ select EDAC_MCE
+ help
+ Support for error detection and correction the Intel
+ i7 Core (Nehalem) Integrated Memory Controller that exists on
+ newer processors like i7 Core, i7 Core Extreme, Xeon 35xx
+ and Xeon 55xx processors.
+
config EDAC_I82860
tristate "Intel 82860"
depends on EDAC_MM_EDAC && PCI && X86_32
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index bc5dc232a0fb..ca6b1bb24ccc 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -8,6 +8,7 @@
obj-$(CONFIG_EDAC) := edac_stub.o
obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
+obj-$(CONFIG_EDAC_MCE) += edac_mce.o
edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
edac_core-objs += edac_module.o edac_device_sysfs.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_EDAC_CPC925) += cpc925_edac.o
obj-$(CONFIG_EDAC_I5000) += i5000_edac.o
obj-$(CONFIG_EDAC_I5100) += i5100_edac.o
obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
+obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 001b2e797fb3..efca9343d26a 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -341,12 +341,30 @@ struct csrow_info {
struct channel_info *channels;
};
+struct mcidev_sysfs_group {
+ const char *name; /* group name */
+ struct mcidev_sysfs_attribute *mcidev_attr; /* group attributes */
+};
+
+struct mcidev_sysfs_group_kobj {
+ struct list_head list; /* list for all instances within a mc */
+
+ struct kobject kobj; /* kobj for the group */
+
+ struct mcidev_sysfs_group *grp; /* group description table */
+ struct mem_ctl_info *mci; /* the parent */
+};
+
/* mcidev_sysfs_attribute structure
* used for driver sysfs attributes and in mem_ctl_info
* sysfs top level entries
*/
struct mcidev_sysfs_attribute {
- struct attribute attr;
+ /* It should use either attr or grp */
+ struct attribute attr;
+ struct mcidev_sysfs_group *grp; /* Points to a group of attributes */
+
+ /* Ops for show/store values at the attribute - not used on group */
ssize_t (*show)(struct mem_ctl_info *,char *);
ssize_t (*store)(struct mem_ctl_info *, const char *,size_t);
};
@@ -424,6 +442,9 @@ struct mem_ctl_info {
/* edac sysfs device control */
struct kobject edac_mci_kobj;
+ /* list for all grp instances within a mc */
+ struct list_head grp_kobj_list;
+
/* Additional top controller level attributes, but specified
* by the low level driver.
*
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index e1d4ce083481..299547c04e51 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -556,6 +556,8 @@ static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr,
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
if (mcidev_attr->show)
return mcidev_attr->show(mem_ctl_info, buffer);
@@ -568,6 +570,8 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
struct mem_ctl_info *mem_ctl_info = to_mci(kobj);
struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
if (mcidev_attr->store)
return mcidev_attr->store(mem_ctl_info, buffer, count);
@@ -725,28 +729,118 @@ void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci)
#define EDAC_DEVICE_SYMLINK "device"
+#define grp_to_mci(k) (container_of(k, struct mcidev_sysfs_group_kobj, kobj)->mci)
+
+/* MCI show/store functions for top most object */
+static ssize_t inst_grp_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
+ struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
+ if (mcidev_attr->show)
+ return mcidev_attr->show(mem_ctl_info, buffer);
+
+ return -EIO;
+}
+
+static ssize_t inst_grp_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct mem_ctl_info *mem_ctl_info = grp_to_mci(kobj);
+ struct mcidev_sysfs_attribute *mcidev_attr = to_mcidev_attr(attr);
+
+ debugf1("%s() mem_ctl_info %p\n", __func__, mem_ctl_info);
+
+ if (mcidev_attr->store)
+ return mcidev_attr->store(mem_ctl_info, buffer, count);
+
+ return -EIO;
+}
+
+/* No memory to release for this kobj */
+static void edac_inst_grp_release(struct kobject *kobj)
+{
+ struct mcidev_sysfs_group_kobj *grp;
+ struct mem_ctl_info *mci;
+
+ debugf1("%s()\n", __func__);
+
+ grp = container_of(kobj, struct mcidev_sysfs_group_kobj, kobj);
+ mci = grp->mci;
+
+ kobject_put(&mci->edac_mci_kobj);
+}
+
+/* Intermediate show/store table */
+static struct sysfs_ops inst_grp_ops = {
+ .show = inst_grp_show,
+ .store = inst_grp_store
+};
+
+/* the kobj_type instance for a instance group */
+static struct kobj_type ktype_inst_grp = {
+ .release = edac_inst_grp_release,
+ .sysfs_ops = &inst_grp_ops,
+};
+
+
/*
* edac_create_mci_instance_attributes
- * create MC driver specific attributes at the topmost level
- * directory of this mci instance.
+ * create MC driver specific attributes bellow an specified kobj
+ * This routine calls itself recursively, in order to create an entire
+ * object tree.
*/
-static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci)
+static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci,
+ struct mcidev_sysfs_attribute *sysfs_attrib,
+ struct kobject *kobj)
{
int err;
- struct mcidev_sysfs_attribute *sysfs_attrib;
- /* point to the start of the array and iterate over it
- * adding each attribute listed to this mci instance's kobject
- */
- sysfs_attrib = mci->mc_driver_sysfs_attributes;
+ debugf1("%s()\n", __func__);
+
+ while (sysfs_attrib) {
+ if (sysfs_attrib->grp) {
+ struct mcidev_sysfs_group_kobj *grp_kobj;
+
+ grp_kobj = kzalloc(sizeof(*grp_kobj), GFP_KERNEL);
+ if (!grp_kobj)
+ return -ENOMEM;
+
+ list_add_tail(&grp_kobj->list, &mci->grp_kobj_list);
+
+ grp_kobj->grp = sysfs_attrib->grp;
+ grp_kobj->mci = mci;
+
+ debugf0("%s() grp %s, mci %p\n", __func__,
+ sysfs_attrib->grp->name, mci);
+
+ err = kobject_init_and_add(&grp_kobj->kobj,
+ &ktype_inst_grp,
+ &mci->edac_mci_kobj,
+ sysfs_attrib->grp->name);
+ if (err)
+ return err;
+
+ err = edac_create_mci_instance_attributes(mci,
+ grp_kobj->grp->mcidev_attr,
+ &grp_kobj->kobj);
+
+ if (err)
+ return err;
+ } else if (sysfs_attrib->attr.name) {
+ debugf0("%s() file %s\n", __func__,
+ sysfs_attrib->attr.name);
+
+ err = sysfs_create_file(kobj, &sysfs_attrib->attr);
+ } else
+ break;
- while (sysfs_attrib && sysfs_attrib->attr.name) {
- err = sysfs_create_file(&mci->edac_mci_kobj,
- (struct attribute*) sysfs_attrib);
if (err) {
return err;
}
-
sysfs_attrib++;
}
@@ -758,21 +852,44 @@ static int edac_create_mci_instance_attributes(struct mem_ctl_info *mci)
* remove MC driver specific attributes at the topmost level
* directory of this mci instance.
*/
-static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci)
+static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
+ struct mcidev_sysfs_attribute *sysfs_attrib,
+ struct kobject *kobj, int count)
{
- struct mcidev_sysfs_attribute *sysfs_attrib;
+ struct mcidev_sysfs_group_kobj *grp_kobj, *tmp;
- /* point to the start of the array and iterate over it
- * adding each attribute listed to this mci instance's kobject
- */
- sysfs_attrib = mci->mc_driver_sysfs_attributes;
+ debugf1("%s()\n", __func__);
- /* loop if there are attributes and until we hit a NULL entry */
- while (sysfs_attrib && sysfs_attrib->attr.name) {
- sysfs_remove_file(&mci->edac_mci_kobj,
- (struct attribute *) sysfs_attrib);
+ /*
+ * loop if there are attributes and until we hit a NULL entry
+ * Remove first all the atributes
+ */
+ while (sysfs_attrib) {
+ if (sysfs_attrib->grp) {
+ list_for_each_entry(grp_kobj, &mci->grp_kobj_list,
+ list)
+ if (grp_kobj->grp == sysfs_attrib->grp)
+ edac_remove_mci_instance_attributes(mci,
+ grp_kobj->grp->mcidev_attr,
+ &grp_kobj->kobj, count + 1);
+ } else if (sysfs_attrib->attr.name) {
+ debugf0("%s() file %s\n", __func__,
+ sysfs_attrib->attr.name);
+ sysfs_remove_file(kobj, &sysfs_attrib->attr);
+ } else
+ break;
sysfs_attrib++;
}
+
+ /*
+ * Now that all attributes got removed, it is save to remove all groups
+ */
+ if (!count)
+ list_for_each_entry_safe(grp_kobj, tmp, &mci->grp_kobj_list,
+ list) {
+ debugf0("%s() grp %s\n", __func__, grp_kobj->grp->name);
+ kobject_put(&grp_kobj->kobj);
+ }
}
@@ -793,6 +910,8 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s() idx=%d\n", __func__, mci->mc_idx);
+ INIT_LIST_HEAD(&mci->grp_kobj_list);
+
/* create a symlink for the device */
err = sysfs_create_link(kobj_mci, &mci->dev->kobj,
EDAC_DEVICE_SYMLINK);
@@ -805,7 +924,9 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
* then create them now for the driver.
*/
if (mci->mc_driver_sysfs_attributes) {
- err = edac_create_mci_instance_attributes(mci);
+ err = edac_create_mci_instance_attributes(mci,
+ mci->mc_driver_sysfs_attributes,
+ &mci->edac_mci_kobj);
if (err) {
debugf1("%s() failure to create mci attributes\n",
__func__);
@@ -840,7 +961,8 @@ fail1:
}
/* remove the mci instance's attributes, if any */
- edac_remove_mci_instance_attributes(mci);
+ edac_remove_mci_instance_attributes(mci,
+ mci->mc_driver_sysfs_attributes, &mci->edac_mci_kobj, 0);
/* remove the symlink */
sysfs_remove_link(kobj_mci, EDAC_DEVICE_SYMLINK);
@@ -874,8 +996,9 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
debugf0("%s() remove_mci_instance\n", __func__);
/* remove this mci instance's attribtes */
- edac_remove_mci_instance_attributes(mci);
-
+ edac_remove_mci_instance_attributes(mci,
+ mci->mc_driver_sysfs_attributes,
+ &mci->edac_mci_kobj, 0);
debugf0("%s() unregister this mci kobj\n", __func__);
/* unregister this instance's kobject */
diff --git a/drivers/edac/edac_mce.c b/drivers/edac/edac_mce.c
new file mode 100644
index 000000000000..9ccdc5b140e7
--- /dev/null
+++ b/drivers/edac/edac_mce.c
@@ -0,0 +1,61 @@
+/* Provides edac interface to mcelog events
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License version 2.
+ *
+ * Copyright (c) 2009 by:
+ * Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ */
+
+#include <linux/module.h>
+#include <linux/edac_mce.h>
+#include <asm/mce.h>
+
+int edac_mce_enabled;
+EXPORT_SYMBOL_GPL(edac_mce_enabled);
+
+
+/*
+ * Extension interface
+ */
+
+static LIST_HEAD(edac_mce_list);
+static DEFINE_MUTEX(edac_mce_lock);
+
+int edac_mce_register(struct edac_mce *edac_mce)
+{
+ mutex_lock(&edac_mce_lock);
+ list_add_tail(&edac_mce->list, &edac_mce_list);
+ mutex_unlock(&edac_mce_lock);
+ return 0;
+}
+EXPORT_SYMBOL(edac_mce_register);
+
+void edac_mce_unregister(struct edac_mce *edac_mce)
+{
+ mutex_lock(&edac_mce_lock);
+ list_del(&edac_mce->list);
+ mutex_unlock(&edac_mce_lock);
+}
+EXPORT_SYMBOL(edac_mce_unregister);
+
+int edac_mce_parse(struct mce *mce)
+{
+ struct edac_mce *edac_mce;
+
+ list_for_each_entry(edac_mce, &edac_mce_list, list) {
+ if (edac_mce->check_error(edac_mce->priv, mce))
+ return 1;
+ }
+
+ /* Nobody queued the error */
+ return 0;
+}
+EXPORT_SYMBOL_GPL(edac_mce_parse);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("EDAC Driver for mcelog captured errors");
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
new file mode 100644
index 000000000000..e944b63d9f06
--- /dev/null
+++ b/drivers/edac/i7core_edac.c
@@ -0,0 +1,1977 @@
+/* Intel 7 core Memory Controller kernel module (Nehalem)
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License version 2 only.
+ *
+ * Copyright (c) 2009 by:
+ * Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ *
+ * Forked and adapted from the i5400_edac driver
+ *
+ * Based on the following public Intel datasheets:
+ * Intel Core i7 Processor Extreme Edition and Intel Core i7 Processor
+ * Datasheet, Volume 2:
+ * http://download.intel.com/design/processor/datashts/320835.pdf
+ * Intel Xeon Processor 5500 Series Datasheet Volume 2
+ * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
+ * also available at:
+ * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/edac.h>
+#include <linux/mmzone.h>
+#include <linux/edac_mce.h>
+#include <linux/smp.h>
+#include <asm/processor.h>
+
+#include "edac_core.h"
+
+/*
+ * This is used for Nehalem-EP and Nehalem-EX devices, where the non-core
+ * registers start at bus 255, and are not reported by BIOS.
+ * We currently find devices with only 2 sockets. In order to support more QPI
+ * Quick Path Interconnect, just increment this number.
+ */
+#define MAX_SOCKET_BUSES 2
+
+
+/*
+ * Alter this version for the module when modifications are made
+ */
+#define I7CORE_REVISION " Ver: 1.0.0 " __DATE__
+#define EDAC_MOD_STR "i7core_edac"
+
+/*
+ * Debug macros
+ */
+#define i7core_printk(level, fmt, arg...) \
+ edac_printk(level, "i7core", fmt, ##arg)
+
+#define i7core_mc_printk(mci, level, fmt, arg...) \
+ edac_mc_chipset_printk(mci, level, "i7core", fmt, ##arg)
+
+/*
+ * i7core Memory Controller Registers
+ */
+
+ /* OFFSETS for Device 0 Function 0 */
+
+#define MC_CFG_CONTROL 0x90
+
+ /* OFFSETS for Device 3 Function 0 */
+
+#define MC_CONTROL 0x48
+#define MC_STATUS 0x4c
+#define MC_MAX_DOD 0x64
+
+/*
+ * OFFSETS for Device 3 Function 4, as inicated on Xeon 5500 datasheet:
+ * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
+ */
+
+#define MC_TEST_ERR_RCV1 0x60
+ #define DIMM2_COR_ERR(r) ((r) & 0x7fff)
+
+#define MC_TEST_ERR_RCV0 0x64
+ #define DIMM1_COR_ERR(r) (((r) >> 16) & 0x7fff)
+ #define DIMM0_COR_ERR(r) ((r) & 0x7fff)
+
+/* OFFSETS for Device 3 Function 2, as inicated on Xeon 5500 datasheet */
+#define MC_COR_ECC_CNT_0 0x80
+#define MC_COR_ECC_CNT_1 0x84
+#define MC_COR_ECC_CNT_2 0x88
+#define MC_COR_ECC_CNT_3 0x8c
+#define MC_COR_ECC_CNT_4 0x90
+#define MC_COR_ECC_CNT_5 0x94
+
+#define DIMM_TOP_COR_ERR(r) (((r) >> 16) & 0x7fff)
+#define DIMM_BOT_COR_ERR(r) ((r) & 0x7fff)
+
+
+ /* OFFSETS for Devices 4,5 and 6 Function 0 */
+
+#define MC_CHANNEL_DIMM_INIT_PARAMS 0x58
+ #define THREE_DIMMS_PRESENT (1 << 24)
+ #define SINGLE_QUAD_RANK_PRESENT (1 << 23)
+ #define QUAD_RANK_PRESENT (1 << 22)
+ #define REGISTERED_DIMM (1 << 15)
+
+#define MC_CHANNEL_MAPPER 0x60
+ #define RDLCH(r, ch) ((((r) >> (3 + (ch * 6))) & 0x07) - 1)
+ #define WRLCH(r, ch) ((((r) >> (ch * 6)) & 0x07) - 1)
+
+#define MC_CHANNEL_RANK_PRESENT 0x7c
+ #define RANK_PRESENT_MASK 0xffff
+
+#define MC_CHANNEL_ADDR_MATCH 0xf0
+#define MC_CHANNEL_ERROR_MASK 0xf8
+#define MC_CHANNEL_ERROR_INJECT 0xfc
+ #define INJECT_ADDR_PARITY 0x10
+ #define INJECT_ECC 0x08
+ #define MASK_CACHELINE 0x06
+ #define MASK_FULL_CACHELINE 0x06
+ #define MASK_MSB32_CACHELINE 0x04
+ #define MASK_LSB32_CACHELINE 0x02
+ #define NO_MASK_CACHELINE 0x00
+ #define REPEAT_EN 0x01
+
+ /* OFFSETS for Devices 4,5 and 6 Function 1 */
+
+#define MC_DOD_CH_DIMM0 0x48
+#define MC_DOD_CH_DIMM1 0x4c
+#define MC_DOD_CH_DIMM2 0x50
+ #define RANKOFFSET_MASK ((1 << 12) | (1 << 11) | (1 << 10))
+ #define RANKOFFSET(x) ((x & RANKOFFSET_MASK) >> 10)
+ #define DIMM_PRESENT_MASK (1 << 9)
+ #define DIMM_PRESENT(x) (((x) & DIMM_PRESENT_MASK) >> 9)
+ #define MC_DOD_NUMBANK_MASK ((1 << 8) | (1 << 7))
+ #define MC_DOD_NUMBANK(x) (((x) & MC_DOD_NUMBANK_MASK) >> 7)
+ #define MC_DOD_NUMRANK_MASK ((1 << 6) | (1 << 5))
+ #define MC_DOD_NUMRANK(x) (((x) & MC_DOD_NUMRANK_MASK) >> 5)
+ #define MC_DOD_NUMROW_MASK ((1 << 4) | (1 << 3) | (1 << 2))
+ #define MC_DOD_NUMROW(x) (((x) & MC_DOD_NUMROW_MASK) >> 2)
+ #define MC_DOD_NUMCOL_MASK 3
+ #define MC_DOD_NUMCOL(x) ((x) & MC_DOD_NUMCOL_MASK)
+
+#define MC_RANK_PRESENT 0x7c
+
+#define MC_SAG_CH_0 0x80
+#define MC_SAG_CH_1 0x84
+#define MC_SAG_CH_2 0x88
+#define MC_SAG_CH_3 0x8c
+#define MC_SAG_CH_4 0x90
+#define MC_SAG_CH_5 0x94
+#define MC_SAG_CH_6 0x98
+#define MC_SAG_CH_7 0x9c
+
+#define MC_RIR_LIMIT_CH_0 0x40
+#define MC_RIR_LIMIT_CH_1 0x44
+#define MC_RIR_LIMIT_CH_2 0x48
+#define MC_RIR_LIMIT_CH_3 0x4C
+#define MC_RIR_LIMIT_CH_4 0x50
+#define MC_RIR_LIMIT_CH_5 0x54
+#define MC_RIR_LIMIT_CH_6 0x58
+#define MC_RIR_LIMIT_CH_7 0x5C
+#define MC_RIR_LIMIT_MASK ((1 << 10) - 1)
+
+#define MC_RIR_WAY_CH 0x80
+ #define MC_RIR_WAY_OFFSET_MASK (((1 << 14) - 1) & ~0x7)
+ #define MC_RIR_WAY_RANK_MASK 0x7
+
+/*
+ * i7core structs
+ */
+
+#define NUM_CHANS 3
+#define MAX_DIMMS 3 /* Max DIMMS per channel */
+#define MAX_MCR_FUNC 4
+#define MAX_CHAN_FUNC 3
+
+struct i7core_info {
+ u32 mc_control;
+ u32 mc_status;
+ u32 max_dod;
+ u32 ch_map;
+};
+
+
+struct i7core_inject {
+ int enable;
+
+ u32 section;
+ u32 type;
+ u32 eccmask;
+
+ /* Error address mask */
+ int channel, dimm, rank, bank, page, col;
+};
+
+struct i7core_channel {
+ u32 ranks;
+ u32 dimms;
+};
+
+struct pci_id_descr {
+ int dev;
+ int func;
+ int dev_id;
+ int optional;
+};
+
+struct i7core_dev {
+ struct list_head list;
+ u8 socket;
+ struct pci_dev **pdev;
+ int n_devs;
+ struct mem_ctl_info *mci;
+};
+
+struct i7core_pvt {
+ struct pci_dev *pci_noncore;
+ struct pci_dev *pci_mcr[MAX_MCR_FUNC + 1];
+ struct pci_dev *pci_ch[NUM_CHANS][MAX_CHAN_FUNC + 1];
+
+ struct i7core_dev *i7core_dev;
+
+ struct i7core_info info;
+ struct i7core_inject inject;
+ struct i7core_channel channel[NUM_CHANS];
+
+ int channels; /* Number of active channels */
+
+ int ce_count_available;
+ int csrow_map[NUM_CHANS][MAX_DIMMS];
+
+ /* ECC corrected errors counts per udimm */
+ unsigned long udimm_ce_count[MAX_DIMMS];
+ int udimm_last_ce_count[MAX_DIMMS];
+ /* ECC corrected errors counts per rdimm */
+ unsigned long rdimm_ce_count[NUM_CHANS][MAX_DIMMS];
+ int rdimm_last_ce_count[NUM_CHANS][MAX_DIMMS];
+
+ unsigned int is_registered;
+
+ /* mcelog glue */
+ struct edac_mce edac_mce;
+
+ /* Fifo double buffers */
+ struct mce mce_entry[MCE_LOG_LEN];
+ struct mce mce_outentry[MCE_LOG_LEN];
+
+ /* Fifo in/out counters */
+ unsigned mce_in, mce_out;
+
+ /* Count indicator to show errors not got */
+ unsigned mce_overrun;
+};
+
+/* Static vars */
+static LIST_HEAD(i7core_edac_list);
+static DEFINE_MUTEX(i7core_edac_lock);
+
+#define PCI_DESCR(device, function, device_id) \
+ .dev = (device), \
+ .func = (function), \
+ .dev_id = (device_id)
+
+struct pci_id_descr pci_dev_descr_i7core[] = {
+ /* Memory controller */
+ { PCI_DESCR(3, 0, PCI_DEVICE_ID_INTEL_I7_MCR) },
+ { PCI_DESCR(3, 1, PCI_DEVICE_ID_INTEL_I7_MC_TAD) },
+ /* Exists only for RDIMM */
+ { PCI_DESCR(3, 2, PCI_DEVICE_ID_INTEL_I7_MC_RAS), .optional = 1 },
+ { PCI_DESCR(3, 4, PCI_DEVICE_ID_INTEL_I7_MC_TEST) },
+
+ /* Channel 0 */
+ { PCI_DESCR(4, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL) },
+ { PCI_DESCR(4, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR) },
+ { PCI_DESCR(4, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK) },
+ { PCI_DESCR(4, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC) },
+
+ /* Channel 1 */
+ { PCI_DESCR(5, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL) },
+ { PCI_DESCR(5, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR) },
+ { PCI_DESCR(5, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK) },
+ { PCI_DESCR(5, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC) },
+
+ /* Channel 2 */
+ { PCI_DESCR(6, 0, PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL) },
+ { PCI_DESCR(6, 1, PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR) },
+ { PCI_DESCR(6, 2, PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK) },
+ { PCI_DESCR(6, 3, PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC) },
+
+ /* Generic Non-core registers */
+ /*
+ * This is the PCI device on i7core and on Xeon 35xx (8086:2c41)
+ * On Xeon 55xx, however, it has a different id (8086:2c40). So,
+ * the probing code needs to test for the other address in case of
+ * failure of this one
+ */
+ { PCI_DESCR(0, 0, PCI_DEVICE_ID_INTEL_I7_NONCORE) },
+
+};
+
+/*
+ * pci_device_id table for which devices we are looking for
+ */
+static const struct pci_device_id i7core_pci_tbl[] __devinitdata = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_X58_HUB_MGMT)},
+ {0,} /* 0 terminated list. */
+};
+
+static struct edac_pci_ctl_info *i7core_pci;
+
+/****************************************************************************
+ Anciliary status routines
+ ****************************************************************************/
+
+ /* MC_CONTROL bits */
+#define CH_ACTIVE(pvt, ch) ((pvt)->info.mc_control & (1 << (8 + ch)))
+#define ECCx8(pvt) ((pvt)->info.mc_control & (1 << 1))
+
+ /* MC_STATUS bits */
+#define ECC_ENABLED(pvt) ((pvt)->info.mc_status & (1 << 4))
+#define CH_DISABLED(pvt, ch) ((pvt)->info.mc_status & (1 << ch))
+
+ /* MC_MAX_DOD read functions */
+static inline int numdimms(u32 dimms)
+{
+ return (dimms & 0x3) + 1;
+}
+
+static inline int numrank(u32 rank)
+{
+ static int ranks[4] = { 1, 2, 4, -EINVAL };
+
+ return ranks[rank & 0x3];
+}
+
+static inline int numbank(u32 bank)
+{
+ static int banks[4] = { 4, 8, 16, -EINVAL };
+
+ return banks[bank & 0x3];
+}
+
+static inline int numrow(u32 row)
+{
+ static int rows[8] = {
+ 1 << 12, 1 << 13, 1 << 14, 1 << 15,
+ 1 << 16, -EINVAL, -EINVAL, -EINVAL,
+ };
+
+ return rows[row & 0x7];
+}
+
+static inline int numcol(u32 col)
+{
+ static int cols[8] = {
+ 1 << 10, 1 << 11, 1 << 12, -EINVAL,
+ };
+ return cols[col & 0x3];
+}
+
+static struct i7core_dev *get_i7core_dev(u8 socket)
+{
+ struct i7core_dev *i7core_dev;
+
+ list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
+ if (i7core_dev->socket == socket)
+ return i7core_dev;
+ }
+
+ return NULL;
+}
+
+/****************************************************************************
+ Memory check routines
+ ****************************************************************************/
+static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
+ unsigned func)
+{
+ struct i7core_dev *i7core_dev = get_i7core_dev(socket);
+ int i;
+
+ if (!i7core_dev)
+ return NULL;
+
+ for (i = 0; i < i7core_dev->n_devs; i++) {
+ if (!i7core_dev->pdev[i])
+ continue;
+
+ if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
+ PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
+ return i7core_dev->pdev[i];
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * i7core_get_active_channels() - gets the number of channels and csrows
+ * @socket: Quick Path Interconnect socket
+ * @channels: Number of channels that will be returned
+ * @csrows: Number of csrows found
+ *
+ * Since EDAC core needs to know in advance the number of available channels
+ * and csrows, in order to allocate memory for csrows/channels, it is needed
+ * to run two similar steps. At the first step, implemented on this function,
+ * it checks the number of csrows/channels present at one socket.
+ * this is used in order to properly allocate the size of mci components.
+ *
+ * It should be noticed that none of the current available datasheets explain
+ * or even mention how csrows are seen by the memory controller. So, we need
+ * to add a fake description for csrows.
+ * So, this driver is attributing one DIMM memory for one csrow.
+ */
+static int i7core_get_active_channels(u8 socket, unsigned *channels,
+ unsigned *csrows)
+{
+ struct pci_dev *pdev = NULL;
+ int i, j;
+ u32 status, control;
+
+ *channels = 0;
+ *csrows = 0;
+
+ pdev = get_pdev_slot_func(socket, 3, 0);
+ if (!pdev) {
+ i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
+ socket);
+ return -ENODEV;
+ }
+
+ /* Device 3 function 0 reads */
+ pci_read_config_dword(pdev, MC_STATUS, &status);
+ pci_read_config_dword(pdev, MC_CONTROL, &control);
+
+ for (i = 0; i < NUM_CHANS; i++) {
+ u32 dimm_dod[3];
+ /* Check if the channel is active */
+ if (!(control & (1 << (8 + i))))
+ continue;
+
+ /* Check if the channel is disabled */
+ if (status & (1 << i))
+ continue;
+
+ pdev = get_pdev_slot_func(socket, i + 4, 1);
+ if (!pdev) {
+ i7core_printk(KERN_ERR, "Couldn't find socket %d "
+ "fn %d.%d!!!\n",
+ socket, i + 4, 1);
+ return -ENODEV;
+ }
+ /* Devices 4-6 function 1 */
+ pci_read_config_dword(pdev,
+ MC_DOD_CH_DIMM0, &dimm_dod[0]);
+ pci_read_config_dword(pdev,
+ MC_DOD_CH_DIMM1, &dimm_dod[1]);
+ pci_read_config_dword(pdev,
+ MC_DOD_CH_DIMM2, &dimm_dod[2]);
+
+ (*channels)++;
+
+ for (j = 0; j < 3; j++) {
+ if (!DIMM_PRESENT(dimm_dod[j]))
+ continue;
+ (*csrows)++;
+ }
+ }
+
+ debugf0("Number of active channels on socket %d: %d\n",
+ socket, *channels);
+
+ return 0;
+}
+
+static int get_dimm_config(struct mem_ctl_info *mci, int *csrow)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ struct csrow_info *csr;
+ struct pci_dev *pdev;
+ int i, j;
+ unsigned long last_page = 0;
+ enum edac_type mode;
+ enum mem_type mtype;
+
+ /* Get data from the MC register, function 0 */
+ pdev = pvt->pci_mcr[0];
+ if (!pdev)
+ return -ENODEV;
+
+ /* Device 3 function 0 reads */
+ pci_read_config_dword(pdev, MC_CONTROL, &pvt->info.mc_control);
+ pci_read_config_dword(pdev, MC_STATUS, &pvt->info.mc_status);
+ pci_read_config_dword(pdev, MC_MAX_DOD, &pvt->info.max_dod);
+ pci_read_config_dword(pdev, MC_CHANNEL_MAPPER, &pvt->info.ch_map);
+
+ debugf0("QPI %d control=0x%08x status=0x%08x dod=0x%08x map=0x%08x\n",
+ pvt->i7core_dev->socket, pvt->info.mc_control, pvt->info.mc_status,
+ pvt->info.max_dod, pvt->info.ch_map);
+
+ if (ECC_ENABLED(pvt)) {
+ debugf0("ECC enabled with x%d SDCC\n", ECCx8(pvt) ? 8 : 4);
+ if (ECCx8(pvt))
+ mode = EDAC_S8ECD8ED;
+ else
+ mode = EDAC_S4ECD4ED;
+ } else {
+ debugf0("ECC disabled\n");
+ mode = EDAC_NONE;
+ }
+
+ /* FIXME: need to handle the error codes */
+ debugf0("DOD Max limits: DIMMS: %d, %d-ranked, %d-banked "
+ "x%x x 0x%x\n",
+ numdimms(pvt->info.max_dod),
+ numrank(pvt->info.max_dod >> 2),
+ numbank(pvt->info.max_dod >> 4),
+ numrow(pvt->info.max_dod >> 6),
+ numcol(pvt->info.max_dod >> 9));
+
+ for (i = 0; i < NUM_CHANS; i++) {
+ u32 data, dimm_dod[3], value[8];
+
+ if (!CH_ACTIVE(pvt, i)) {
+ debugf0("Channel %i is not active\n", i);
+ continue;
+ }
+ if (CH_DISABLED(pvt, i)) {
+ debugf0("Channel %i is disabled\n", i);
+ continue;
+ }
+
+ /* Devices 4-6 function 0 */
+ pci_read_config_dword(pvt->pci_ch[i][0],
+ MC_CHANNEL_DIMM_INIT_PARAMS, &data);
+
+ pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ?
+ 4 : 2;
+
+ if (data & REGISTERED_DIMM)
+ mtype = MEM_RDDR3;
+ else
+ mtype = MEM_DDR3;
+#if 0
+ if (data & THREE_DIMMS_PRESENT)
+ pvt->channel[i].dimms = 3;
+ else if (data & SINGLE_QUAD_RANK_PRESENT)
+ pvt->channel[i].dimms = 1;
+ else
+ pvt->channel[i].dimms = 2;
+#endif
+
+ /* Devices 4-6 function 1 */
+ pci_read_config_dword(pvt->pci_ch[i][1],
+ MC_DOD_CH_DIMM0, &dimm_dod[0]);
+ pci_read_config_dword(pvt->pci_ch[i][1],
+ MC_DOD_CH_DIMM1, &dimm_dod[1]);
+ pci_read_config_dword(pvt->pci_ch[i][1],
+ MC_DOD_CH_DIMM2, &dimm_dod[2]);
+
+ debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
+ "%d ranks, %cDIMMs\n",
+ i,
+ RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
+ data,
+ pvt->channel[i].ranks,
+ (data & REGISTERED_DIMM) ? 'R' : 'U');
+
+ for (j = 0; j < 3; j++) {
+ u32 banks, ranks, rows, cols;
+ u32 size, npages;
+
+ if (!DIMM_PRESENT(dimm_dod[j]))
+ continue;
+
+ banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
+ ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
+ rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
+ cols = numcol(MC_DOD_NUMCOL(dimm_dod[j]));
+
+ /* DDR3 has 8 I/O banks */
+ size = (rows * cols * banks * ranks) >> (20 - 3);
+
+ pvt->channel[i].dimms++;
+
+ debugf0("\tdimm %d %d Mb offset: %x, "
+ "bank: %d, rank: %d, row: %#x, col: %#x\n",
+ j, size,
+ RANKOFFSET(dimm_dod[j]),
+ banks, ranks, rows, cols);
+
+#if PAGE_SHIFT > 20
+ npages = size >> (PAGE_SHIFT - 20);
+#else
+ npages = size << (20 - PAGE_SHIFT);
+#endif
+
+ csr = &mci->csrows[*csrow];
+ csr->first_page = last_page + 1;
+ last_page += npages;
+ csr->last_page = last_page;
+ csr->nr_pages = npages;
+
+ csr->page_mask = 0;
+ csr->grain = 8;
+ csr->csrow_idx = *csrow;
+ csr->nr_channels = 1;
+
+ csr->channels[0].chan_idx = i;
+ csr->channels[0].ce_count = 0;
+
+ pvt->csrow_map[i][j] = *csrow;
+
+ switch (banks) {
+ case 4:
+ csr->dtype = DEV_X4;
+ break;
+ case 8:
+ csr->dtype = DEV_X8;
+ break;
+ case 16:
+ csr->dtype = DEV_X16;
+ break;
+ default:
+ csr->dtype = DEV_UNKNOWN;
+ }
+
+ csr->edac_mode = mode;
+ csr->mtype = mtype;
+
+ (*csrow)++;
+ }
+
+ pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
+ pci_read_config_dword(pdev, MC_SAG_CH_1, &value[1]);
+ pci_read_config_dword(pdev, MC_SAG_CH_2, &value[2]);
+ pci_read_config_dword(pdev, MC_SAG_CH_3, &value[3]);
+ pci_read_config_dword(pdev, MC_SAG_CH_4, &value[4]);
+ pci_read_config_dword(pdev, MC_SAG_CH_5, &value[5]);
+ pci_read_config_dword(pdev, MC_SAG_CH_6, &value[6]);
+ pci_read_config_dword(pdev, MC_SAG_CH_7, &value[7]);
+ debugf1("\t[%i] DIVBY3\tREMOVED\tOFFSET\n", i);
+ for (j = 0; j < 8; j++)
+ debugf1("\t\t%#x\t%#x\t%#x\n",
+ (value[j] >> 27) & 0x1,
+ (value[j] >> 24) & 0x7,
+ (value[j] && ((1 << 24) - 1)));
+ }
+
+ return 0;
+}
+
+/****************************************************************************
+ Error insertion routines
+ ****************************************************************************/
+
+/* The i7core has independent error injection features per channel.
+ However, to have a simpler code, we don't allow enabling error injection
+ on more than one channel.
+ Also, since a change at an inject parameter will be applied only at enable,
+ we're disabling error injection on all write calls to the sysfs nodes that
+ controls the error code injection.
+ */
+static int disable_inject(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+
+ pvt->inject.enable = 0;
+
+ if (!pvt->pci_ch[pvt->inject.channel][0])
+ return -ENODEV;
+
+ pci_write_config_dword(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_INJECT, 0);
+
+ return 0;
+}
+
+/*
+ * i7core inject inject.section
+ *
+ * accept and store error injection inject.section value
+ * bit 0 - refers to the lower 32-byte half cacheline
+ * bit 1 - refers to the upper 32-byte half cacheline
+ */
+static ssize_t i7core_inject_section_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int rc;
+
+ if (pvt->inject.enable)
+ disable_inject(mci);
+
+ rc = strict_strtoul(data, 10, &value);
+ if ((rc < 0) || (value > 3))
+ return -EIO;
+
+ pvt->inject.section = (u32) value;
+ return count;
+}
+
+static ssize_t i7core_inject_section_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ return sprintf(data, "0x%08x\n", pvt->inject.section);
+}
+
+/*
+ * i7core inject.type
+ *
+ * accept and store error injection inject.section value
+ * bit 0 - repeat enable - Enable error repetition
+ * bit 1 - inject ECC error
+ * bit 2 - inject parity error
+ */
+static ssize_t i7core_inject_type_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int rc;
+
+ if (pvt->inject.enable)
+ disable_inject(mci);
+
+ rc = strict_strtoul(data, 10, &value);
+ if ((rc < 0) || (value > 7))
+ return -EIO;
+
+ pvt->inject.type = (u32) value;
+ return count;
+}
+
+static ssize_t i7core_inject_type_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ return sprintf(data, "0x%08x\n", pvt->inject.type);
+}
+
+/*
+ * i7core_inject_inject.eccmask_store
+ *
+ * The type of error (UE/CE) will depend on the inject.eccmask value:
+ * Any bits set to a 1 will flip the corresponding ECC bit
+ * Correctable errors can be injected by flipping 1 bit or the bits within
+ * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
+ * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
+ * uncorrectable error to be injected.
+ */
+static ssize_t i7core_inject_eccmask_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ unsigned long value;
+ int rc;
+
+ if (pvt->inject.enable)
+ disable_inject(mci);
+
+ rc = strict_strtoul(data, 10, &value);
+ if (rc < 0)
+ return -EIO;
+
+ pvt->inject.eccmask = (u32) value;
+ return count;
+}
+
+static ssize_t i7core_inject_eccmask_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ return sprintf(data, "0x%08x\n", pvt->inject.eccmask);
+}
+
+/*
+ * i7core_addrmatch
+ *
+ * The type of error (UE/CE) will depend on the inject.eccmask value:
+ * Any bits set to a 1 will flip the corresponding ECC bit
+ * Correctable errors can be injected by flipping 1 bit or the bits within
+ * a symbol pair (2 consecutive aligned 8-bit pairs - i.e. 7:0 and 15:8 or
+ * 23:16 and 31:24). Flipping bits in two symbol pairs will cause an
+ * uncorrectable error to be injected.
+ */
+
+#define DECLARE_ADDR_MATCH(param, limit) \
+static ssize_t i7core_inject_store_##param( \
+ struct mem_ctl_info *mci, \
+ const char *data, size_t count) \
+{ \
+ struct i7core_pvt *pvt; \
+ long value; \
+ int rc; \
+ \
+ debugf1("%s()\n", __func__); \
+ pvt = mci->pvt_info; \
+ \
+ if (pvt->inject.enable) \
+ disable_inject(mci); \
+ \
+ if (!strcasecmp(data, "any") || !strcasecmp(data, "any\n"))\
+ value = -1; \
+ else { \
+ rc = strict_strtoul(data, 10, &value); \
+ if ((rc < 0) || (value >= limit)) \
+ return -EIO; \
+ } \
+ \
+ pvt->inject.param = value; \
+ \
+ return count; \
+} \
+ \
+static ssize_t i7core_inject_show_##param( \
+ struct mem_ctl_info *mci, \
+ char *data) \
+{ \
+ struct i7core_pvt *pvt; \
+ \
+ pvt = mci->pvt_info; \
+ debugf1("%s() pvt=%p\n", __func__, pvt); \
+ if (pvt->inject.param < 0) \
+ return sprintf(data, "any\n"); \
+ else \
+ return sprintf(data, "%d\n", pvt->inject.param);\
+}
+
+#define ATTR_ADDR_MATCH(param) \
+ { \
+ .attr = { \
+ .name = #param, \
+ .mode = (S_IRUGO | S_IWUSR) \
+ }, \
+ .show = i7core_inject_show_##param, \
+ .store = i7core_inject_store_##param, \
+ }
+
+DECLARE_ADDR_MATCH(channel, 3);
+DECLARE_ADDR_MATCH(dimm, 3);
+DECLARE_ADDR_MATCH(rank, 4);
+DECLARE_ADDR_MATCH(bank, 32);
+DECLARE_ADDR_MATCH(page, 0x10000);
+DECLARE_ADDR_MATCH(col, 0x4000);
+
+static int write_and_test(struct pci_dev *dev, int where, u32 val)
+{
+ u32 read;
+ int count;
+
+ debugf0("setting pci %02x:%02x.%x reg=%02x value=%08x\n",
+ dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
+ where, val);
+
+ for (count = 0; count < 10; count++) {
+ if (count)
+ msleep(100);
+ pci_write_config_dword(dev, where, val);
+ pci_read_config_dword(dev, where, &read);
+
+ if (read == val)
+ return 0;
+ }
+
+ i7core_printk(KERN_ERR, "Error during set pci %02x:%02x.%x reg=%02x "
+ "write=%08x. Read=%08x\n",
+ dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
+ where, val, read);
+
+ return -EINVAL;
+}
+
+/*
+ * This routine prepares the Memory Controller for error injection.
+ * The error will be injected when some process tries to write to the
+ * memory that matches the given criteria.
+ * The criteria can be set in terms of a mask where dimm, rank, bank, page
+ * and col can be specified.
+ * A -1 value for any of the mask items will make the MCU to ignore
+ * that matching criteria for error injection.
+ *
+ * It should be noticed that the error will only happen after a write operation
+ * on a memory that matches the condition. if REPEAT_EN is not enabled at
+ * inject mask, then it will produce just one error. Otherwise, it will repeat
+ * until the injectmask would be cleaned.
+ *
+ * FIXME: This routine assumes that MAXNUMDIMMS value of MC_MAX_DOD
+ * is reliable enough to check if the MC is using the
+ * three channels. However, this is not clear at the datasheet.
+ */
+static ssize_t i7core_inject_enable_store(struct mem_ctl_info *mci,
+ const char *data, size_t count)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 injectmask;
+ u64 mask = 0;
+ int rc;
+ long enable;
+
+ if (!pvt->pci_ch[pvt->inject.channel][0])
+ return 0;
+
+ rc = strict_strtoul(data, 10, &enable);
+ if ((rc < 0))
+ return 0;
+
+ if (enable) {
+ pvt->inject.enable = 1;
+ } else {
+ disable_inject(mci);
+ return count;
+ }
+
+ /* Sets pvt->inject.dimm mask */
+ if (pvt->inject.dimm < 0)
+ mask |= 1LL << 41;
+ else {
+ if (pvt->channel[pvt->inject.channel].dimms > 2)
+ mask |= (pvt->inject.dimm & 0x3LL) << 35;
+ else
+ mask |= (pvt->inject.dimm & 0x1LL) << 36;
+ }
+
+ /* Sets pvt->inject.rank mask */
+ if (pvt->inject.rank < 0)
+ mask |= 1LL << 40;
+ else {
+ if (pvt->channel[pvt->inject.channel].dimms > 2)
+ mask |= (pvt->inject.rank & 0x1LL) << 34;
+ else
+ mask |= (pvt->inject.rank & 0x3LL) << 34;
+ }
+
+ /* Sets pvt->inject.bank mask */
+ if (pvt->inject.bank < 0)
+ mask |= 1LL << 39;
+ else
+ mask |= (pvt->inject.bank & 0x15LL) << 30;
+
+ /* Sets pvt->inject.page mask */
+ if (pvt->inject.page < 0)
+ mask |= 1LL << 38;
+ else
+ mask |= (pvt->inject.page & 0xffff) << 14;
+
+ /* Sets pvt->inject.column mask */
+ if (pvt->inject.col < 0)
+ mask |= 1LL << 37;
+ else
+ mask |= (pvt->inject.col & 0x3fff);
+
+ /*
+ * bit 0: REPEAT_EN
+ * bits 1-2: MASK_HALF_CACHELINE
+ * bit 3: INJECT_ECC
+ * bit 4: INJECT_ADDR_PARITY
+ */
+
+ injectmask = (pvt->inject.type & 1) |
+ (pvt->inject.section & 0x3) << 1 |
+ (pvt->inject.type & 0x6) << (3 - 1);
+
+ /* Unlock writes to registers - this register is write only */
+ pci_write_config_dword(pvt->pci_noncore,
+ MC_CFG_CONTROL, 0x2);
+
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ADDR_MATCH, mask);
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ADDR_MATCH + 4, mask >> 32L);
+
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_MASK, pvt->inject.eccmask);
+
+ write_and_test(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_INJECT, injectmask);
+
+ /*
+ * This is something undocumented, based on my tests
+ * Without writing 8 to this register, errors aren't injected. Not sure
+ * why.
+ */
+ pci_write_config_dword(pvt->pci_noncore,
+ MC_CFG_CONTROL, 8);
+
+ debugf0("Error inject addr match 0x%016llx, ecc 0x%08x,"
+ " inject 0x%08x\n",
+ mask, pvt->inject.eccmask, injectmask);
+
+
+ return count;
+}
+
+static ssize_t i7core_inject_enable_show(struct mem_ctl_info *mci,
+ char *data)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 injectmask;
+
+ pci_read_config_dword(pvt->pci_ch[pvt->inject.channel][0],
+ MC_CHANNEL_ERROR_INJECT, &injectmask);
+
+ debugf0("Inject error read: 0x%018x\n", injectmask);
+
+ if (injectmask & 0x0c)
+ pvt->inject.enable = 1;
+
+ return sprintf(data, "%d\n", pvt->inject.enable);
+}
+
+#define DECLARE_COUNTER(param) \
+static ssize_t i7core_show_counter_##param( \
+ struct mem_ctl_info *mci, \
+ char *data) \
+{ \
+ struct i7core_pvt *pvt = mci->pvt_info; \
+ \
+ debugf1("%s() \n", __func__); \
+ if (!pvt->ce_count_available || (pvt->is_registered)) \
+ return sprintf(data, "data unavailable\n"); \
+ return sprintf(data, "%lu\n", \
+ pvt->udimm_ce_count[param]); \
+}
+
+#define ATTR_COUNTER(param) \
+ { \
+ .attr = { \
+ .name = __stringify(udimm##param), \
+ .mode = (S_IRUGO | S_IWUSR) \
+ }, \
+ .show = i7core_show_counter_##param \
+ }
+
+DECLARE_COUNTER(0);
+DECLARE_COUNTER(1);
+DECLARE_COUNTER(2);
+
+/*
+ * Sysfs struct
+ */
+
+
+static struct mcidev_sysfs_attribute i7core_addrmatch_attrs[] = {
+ ATTR_ADDR_MATCH(channel),
+ ATTR_ADDR_MATCH(dimm),
+ ATTR_ADDR_MATCH(rank),
+ ATTR_ADDR_MATCH(bank),
+ ATTR_ADDR_MATCH(page),
+ ATTR_ADDR_MATCH(col),
+ { .attr = { .name = NULL } }
+};
+
+static struct mcidev_sysfs_group i7core_inject_addrmatch = {
+ .name = "inject_addrmatch",
+ .mcidev_attr = i7core_addrmatch_attrs,
+};
+
+static struct mcidev_sysfs_attribute i7core_udimm_counters_attrs[] = {
+ ATTR_COUNTER(0),
+ ATTR_COUNTER(1),
+ ATTR_COUNTER(2),
+};
+
+static struct mcidev_sysfs_group i7core_udimm_counters = {
+ .name = "all_channel_counts",
+ .mcidev_attr = i7core_udimm_counters_attrs,
+};
+
+static struct mcidev_sysfs_attribute i7core_sysfs_attrs[] = {
+ {
+ .attr = {
+ .name = "inject_section",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_section_show,
+ .store = i7core_inject_section_store,
+ }, {
+ .attr = {
+ .name = "inject_type",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_type_show,
+ .store = i7core_inject_type_store,
+ }, {
+ .attr = {
+ .name = "inject_eccmask",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_eccmask_show,
+ .store = i7core_inject_eccmask_store,
+ }, {
+ .grp = &i7core_inject_addrmatch,
+ }, {
+ .attr = {
+ .name = "inject_enable",
+ .mode = (S_IRUGO | S_IWUSR)
+ },
+ .show = i7core_inject_enable_show,
+ .store = i7core_inject_enable_store,
+ },
+ { .attr = { .name = NULL } }, /* Reserved for udimm counters */
+ { .attr = { .name = NULL } }
+};
+
+/****************************************************************************
+ Device initialization routines: put/get, init/exit
+ ****************************************************************************/
+
+/*
+ * i7core_put_devices 'put' all the devices that we have
+ * reserved via 'get'
+ */
+static void i7core_put_devices(struct i7core_dev *i7core_dev)
+{
+ int i;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+ for (i = 0; i < i7core_dev->n_devs; i++) {
+ struct pci_dev *pdev = i7core_dev->pdev[i];
+ if (!pdev)
+ continue;
+ debugf0("Removing dev %02x:%02x.%d\n",
+ pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ pci_dev_put(pdev);
+ }
+ kfree(i7core_dev->pdev);
+ list_del(&i7core_dev->list);
+ kfree(i7core_dev);
+}
+
+static void i7core_put_all_devices(void)
+{
+ struct i7core_dev *i7core_dev, *tmp;
+
+ list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list)
+ i7core_put_devices(i7core_dev);
+}
+
+static void i7core_xeon_pci_fixup(int dev_id)
+{
+ struct pci_dev *pdev = NULL;
+ int i;
+ /*
+ * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core pci buses
+ * aren't announced by acpi. So, we need to use a legacy scan probing
+ * to detect them
+ */
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
+ if (unlikely(!pdev)) {
+ for (i = 0; i < MAX_SOCKET_BUSES; i++)
+ pcibios_scan_specific_bus(255-i);
+ }
+}
+
+/*
+ * i7core_get_devices Find and perform 'get' operation on the MCH's
+ * device/functions we want to reference for this driver
+ *
+ * Need to 'get' device 16 func 1 and func 2
+ */
+int i7core_get_onedevice(struct pci_dev **prev, int devno,
+ struct pci_id_descr *dev_descr, unsigned n_devs)
+{
+ struct i7core_dev *i7core_dev;
+
+ struct pci_dev *pdev = NULL;
+ u8 bus = 0;
+ u8 socket = 0;
+
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ dev_descr->dev_id, *prev);
+
+ /*
+ * On Xeon 55xx, the Intel Quckpath Arch Generic Non-core regs
+ * is at addr 8086:2c40, instead of 8086:2c41. So, we need
+ * to probe for the alternate address in case of failure
+ */
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
+ pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
+
+ if (!pdev) {
+ if (*prev) {
+ *prev = pdev;
+ return 0;
+ }
+
+ if (dev_descr->optional)
+ return 0;
+
+ i7core_printk(KERN_ERR,
+ "Device not found: dev %02x.%d PCI ID %04x:%04x\n",
+ dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+
+ /* End of list, leave */
+ return -ENODEV;
+ }
+ bus = pdev->bus->number;
+
+ if (bus == 0x3f)
+ socket = 0;
+ else
+ socket = 255 - bus;
+
+ i7core_dev = get_i7core_dev(socket);
+ if (!i7core_dev) {
+ i7core_dev = kzalloc(sizeof(*i7core_dev), GFP_KERNEL);
+ if (!i7core_dev)
+ return -ENOMEM;
+ i7core_dev->pdev = kzalloc(sizeof(*i7core_dev->pdev) * n_devs,
+ GFP_KERNEL);
+ if (!i7core_dev->pdev)
+ return -ENOMEM;
+ i7core_dev->socket = socket;
+ i7core_dev->n_devs = n_devs;
+ list_add_tail(&i7core_dev->list, &i7core_edac_list);
+ }
+
+ if (i7core_dev->pdev[devno]) {
+ i7core_printk(KERN_ERR,
+ "Duplicated device for "
+ "dev %02x:%02x.%d PCI ID %04x:%04x\n",
+ bus, dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ pci_dev_put(pdev);
+ return -ENODEV;
+ }
+
+ i7core_dev->pdev[devno] = pdev;
+
+ /* Sanity check */
+ if (unlikely(PCI_SLOT(pdev->devfn) != dev_descr->dev ||
+ PCI_FUNC(pdev->devfn) != dev_descr->func)) {
+ i7core_printk(KERN_ERR,
+ "Device PCI ID %04x:%04x "
+ "has dev %02x:%02x.%d instead of dev %02x:%02x.%d\n",
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id,
+ bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ bus, dev_descr->dev, dev_descr->func);
+ return -ENODEV;
+ }
+
+ /* Be sure that the device is enabled */
+ if (unlikely(pci_enable_device(pdev) < 0)) {
+ i7core_printk(KERN_ERR,
+ "Couldn't enable "
+ "dev %02x:%02x.%d PCI ID %04x:%04x\n",
+ bus, dev_descr->dev, dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+ return -ENODEV;
+ }
+
+ debugf0("Detected socket %d dev %02x:%02x.%d PCI ID %04x:%04x\n",
+ socket, bus, dev_descr->dev,
+ dev_descr->func,
+ PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
+
+ *prev = pdev;
+
+ return 0;
+}
+
+static int i7core_get_devices(struct pci_id_descr dev_descr[], unsigned n_devs)
+{
+ int i, rc;
+ struct pci_dev *pdev = NULL;
+
+ for (i = 0; i < n_devs; i++) {
+ pdev = NULL;
+ do {
+ rc = i7core_get_onedevice(&pdev, i, &dev_descr[i],
+ n_devs);
+ if (rc < 0) {
+ i7core_put_all_devices();
+ return -ENODEV;
+ }
+ } while (pdev);
+ }
+
+ return 0;
+}
+
+static int mci_bind_devs(struct mem_ctl_info *mci,
+ struct i7core_dev *i7core_dev)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ struct pci_dev *pdev;
+ int i, func, slot;
+
+ /* Associates i7core_dev and mci for future usage */
+ pvt->i7core_dev = i7core_dev;
+ i7core_dev->mci = mci;
+
+ pvt->is_registered = 0;
+ for (i = 0; i < i7core_dev->n_devs; i++) {
+ pdev = i7core_dev->pdev[i];
+ if (!pdev)
+ continue;
+
+ func = PCI_FUNC(pdev->devfn);
+ slot = PCI_SLOT(pdev->devfn);
+ if (slot == 3) {
+ if (unlikely(func > MAX_MCR_FUNC))
+ goto error;
+ pvt->pci_mcr[func] = pdev;
+ } else if (likely(slot >= 4 && slot < 4 + NUM_CHANS)) {
+ if (unlikely(func > MAX_CHAN_FUNC))
+ goto error;
+ pvt->pci_ch[slot - 4][func] = pdev;
+ } else if (!slot && !func)
+ pvt->pci_noncore = pdev;
+ else
+ goto error;
+
+ debugf0("Associated fn %d.%d, dev = %p, socket %d\n",
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pdev, i7core_dev->socket);
+
+ if (PCI_SLOT(pdev->devfn) == 3 &&
+ PCI_FUNC(pdev->devfn) == 2)
+ pvt->is_registered = 1;
+ }
+
+ /*
+ * Add extra nodes to count errors on udimm
+ * For registered memory, this is not needed, since the counters
+ * are already displayed at the standard locations
+ */
+ if (!pvt->is_registered)
+ i7core_sysfs_attrs[ARRAY_SIZE(i7core_sysfs_attrs)-2].grp =
+ &i7core_udimm_counters;
+
+ return 0;
+
+error:
+ i7core_printk(KERN_ERR, "Device %d, function %d "
+ "is out of the expected range\n",
+ slot, func);
+ return -EINVAL;
+}
+
+/****************************************************************************
+ Error check routines
+ ****************************************************************************/
+static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci,
+ int chan, int dimm, int add)
+{
+ char *msg;
+ struct i7core_pvt *pvt = mci->pvt_info;
+ int row = pvt->csrow_map[chan][dimm], i;
+
+ for (i = 0; i < add; i++) {
+ msg = kasprintf(GFP_KERNEL, "Corrected error "
+ "(Socket=%d channel=%d dimm=%d)",
+ pvt->i7core_dev->socket, chan, dimm);
+
+ edac_mc_handle_fbd_ce(mci, row, 0, msg);
+ kfree (msg);
+ }
+}
+
+static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
+ int chan, int new0, int new1, int new2)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ int add0 = 0, add1 = 0, add2 = 0;
+ /* Updates CE counters if it is not the first time here */
+ if (pvt->ce_count_available) {
+ /* Updates CE counters */
+
+ add2 = new2 - pvt->rdimm_last_ce_count[chan][2];
+ add1 = new1 - pvt->rdimm_last_ce_count[chan][1];
+ add0 = new0 - pvt->rdimm_last_ce_count[chan][0];
+
+ if (add2 < 0)
+ add2 += 0x7fff;
+ pvt->rdimm_ce_count[chan][2] += add2;
+
+ if (add1 < 0)
+ add1 += 0x7fff;
+ pvt->rdimm_ce_count[chan][1] += add1;
+
+ if (add0 < 0)
+ add0 += 0x7fff;
+ pvt->rdimm_ce_count[chan][0] += add0;
+ } else
+ pvt->ce_count_available = 1;
+
+ /* Store the new values */
+ pvt->rdimm_last_ce_count[chan][2] = new2;
+ pvt->rdimm_last_ce_count[chan][1] = new1;
+ pvt->rdimm_last_ce_count[chan][0] = new0;
+
+ /*updated the edac core */
+ if (add0 != 0)
+ i7core_rdimm_update_csrow(mci, chan, 0, add0);
+ if (add1 != 0)
+ i7core_rdimm_update_csrow(mci, chan, 1, add1);
+ if (add2 != 0)
+ i7core_rdimm_update_csrow(mci, chan, 2, add2);
+
+}
+
+static void i7core_rdimm_check_mc_ecc_err(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 rcv[3][2];
+ int i, new0, new1, new2;
+
+ /*Read DEV 3: FUN 2: MC_COR_ECC_CNT regs directly*/
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_0,
+ &rcv[0][0]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_1,
+ &rcv[0][1]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_2,
+ &rcv[1][0]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_3,
+ &rcv[1][1]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_4,
+ &rcv[2][0]);
+ pci_read_config_dword(pvt->pci_mcr[2], MC_COR_ECC_CNT_5,
+ &rcv[2][1]);
+ for (i = 0 ; i < 3; i++) {
+ debugf3("MC_COR_ECC_CNT%d = 0x%x; MC_COR_ECC_CNT%d = 0x%x\n",
+ (i * 2), rcv[i][0], (i * 2) + 1, rcv[i][1]);
+ /*if the channel has 3 dimms*/
+ if (pvt->channel[i].dimms > 2) {
+ new0 = DIMM_BOT_COR_ERR(rcv[i][0]);
+ new1 = DIMM_TOP_COR_ERR(rcv[i][0]);
+ new2 = DIMM_BOT_COR_ERR(rcv[i][1]);
+ } else {
+ new0 = DIMM_TOP_COR_ERR(rcv[i][0]) +
+ DIMM_BOT_COR_ERR(rcv[i][0]);
+ new1 = DIMM_TOP_COR_ERR(rcv[i][1]) +
+ DIMM_BOT_COR_ERR(rcv[i][1]);
+ new2 = 0;
+ }
+
+ i7core_rdimm_update_ce_count(mci, i, new0, new1, new2);
+ }
+}
+
+/* This function is based on the device 3 function 4 registers as described on:
+ * Intel Xeon Processor 5500 Series Datasheet Volume 2
+ * http://www.intel.com/Assets/PDF/datasheet/321322.pdf
+ * also available at:
+ * http://www.arrownac.com/manufacturers/intel/s/nehalem/5500-datasheet-v2.pdf
+ */
+static void i7core_udimm_check_mc_ecc_err(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ u32 rcv1, rcv0;
+ int new0, new1, new2;
+
+ if (!pvt->pci_mcr[4]) {
+ debugf0("%s MCR registers not found\n", __func__);
+ return;
+ }
+
+ /* Corrected test errors */
+ pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV1, &rcv1);
+ pci_read_config_dword(pvt->pci_mcr[4], MC_TEST_ERR_RCV0, &rcv0);
+
+ /* Store the new values */
+ new2 = DIMM2_COR_ERR(rcv1);
+ new1 = DIMM1_COR_ERR(rcv0);
+ new0 = DIMM0_COR_ERR(rcv0);
+
+ /* Updates CE counters if it is not the first time here */
+ if (pvt->ce_count_available) {
+ /* Updates CE counters */
+ int add0, add1, add2;
+
+ add2 = new2 - pvt->udimm_last_ce_count[2];
+ add1 = new1 - pvt->udimm_last_ce_count[1];
+ add0 = new0 - pvt->udimm_last_ce_count[0];
+
+ if (add2 < 0)
+ add2 += 0x7fff;
+ pvt->udimm_ce_count[2] += add2;
+
+ if (add1 < 0)
+ add1 += 0x7fff;
+ pvt->udimm_ce_count[1] += add1;
+
+ if (add0 < 0)
+ add0 += 0x7fff;
+ pvt->udimm_ce_count[0] += add0;
+
+ if (add0 | add1 | add2)
+ i7core_printk(KERN_ERR, "New Corrected error(s): "
+ "dimm0: +%d, dimm1: +%d, dimm2 +%d\n",
+ add0, add1, add2);
+ } else
+ pvt->ce_count_available = 1;
+
+ /* Store the new values */
+ pvt->udimm_last_ce_count[2] = new2;
+ pvt->udimm_last_ce_count[1] = new1;
+ pvt->udimm_last_ce_count[0] = new0;
+}
+
+/*
+ * According with tables E-11 and E-12 of chapter E.3.3 of Intel 64 and IA-32
+ * Architectures Software Developer’s Manual Volume 3B.
+ * Nehalem are defined as family 0x06, model 0x1a
+ *
+ * The MCA registers used here are the following ones:
+ * struct mce field MCA Register
+ * m->status MSR_IA32_MC8_STATUS
+ * m->addr MSR_IA32_MC8_ADDR
+ * m->misc MSR_IA32_MC8_MISC
+ * In the case of Nehalem, the error information is masked at .status and .misc
+ * fields
+ */
+static void i7core_mce_output_error(struct mem_ctl_info *mci,
+ struct mce *m)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ char *type, *optype, *err, *msg;
+ unsigned long error = m->status & 0x1ff0000l;
+ u32 optypenum = (m->status >> 4) & 0x07;
+ u32 core_err_cnt = (m->status >> 38) && 0x7fff;
+ u32 dimm = (m->misc >> 16) & 0x3;
+ u32 channel = (m->misc >> 18) & 0x3;
+ u32 syndrome = m->misc >> 32;
+ u32 errnum = find_first_bit(&error, 32);
+ int csrow;
+
+ if (m->mcgstatus & 1)
+ type = "FATAL";
+ else
+ type = "NON_FATAL";
+
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request";
+ break;
+ case 1:
+ optype = "read error";
+ break;
+ case 2:
+ optype = "write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "scrubbing error";
+ break;
+ default:
+ optype = "reserved";
+ break;
+ }
+
+ switch (errnum) {
+ case 16:
+ err = "read ECC error";
+ break;
+ case 17:
+ err = "RAS ECC error";
+ break;
+ case 18:
+ err = "write parity error";
+ break;
+ case 19:
+ err = "redundacy loss";
+ break;
+ case 20:
+ err = "reserved";
+ break;
+ case 21:
+ err = "memory range error";
+ break;
+ case 22:
+ err = "RTID out of range";
+ break;
+ case 23:
+ err = "address parity error";
+ break;
+ case 24:
+ err = "byte enable parity error";
+ break;
+ default:
+ err = "unknown";
+ }
+
+ /* FIXME: should convert addr into bank and rank information */
+ msg = kasprintf(GFP_ATOMIC,
+ "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
+ "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
+ type, (long long) m->addr, m->cpu, dimm, channel,
+ syndrome, core_err_cnt, (long long)m->status,
+ (long long)m->misc, optype, err);
+
+ debugf0("%s", msg);
+
+ csrow = pvt->csrow_map[channel][dimm];
+
+ /* Call the helper to output message */
+ if (m->mcgstatus & 1)
+ edac_mc_handle_fbd_ue(mci, csrow, 0,
+ 0 /* FIXME: should be channel here */, msg);
+ else if (!pvt->is_registered)
+ edac_mc_handle_fbd_ce(mci, csrow,
+ 0 /* FIXME: should be channel here */, msg);
+
+ kfree(msg);
+}
+
+/*
+ * i7core_check_error Retrieve and process errors reported by the
+ * hardware. Called by the Core module.
+ */
+static void i7core_check_error(struct mem_ctl_info *mci)
+{
+ struct i7core_pvt *pvt = mci->pvt_info;
+ int i;
+ unsigned count = 0;
+ struct mce *m;
+
+ /*
+ * MCE first step: Copy all mce errors into a temporary buffer
+ * We use a double buffering here, to reduce the risk of
+ * loosing an error.
+ */
+ smp_rmb();
+ count = (pvt->mce_out + MCE_LOG_LEN - pvt->mce_in)
+ % MCE_LOG_LEN;
+ if (!count)
+ return;
+
+ m = pvt->mce_outentry;
+ if (pvt->mce_in + count > MCE_LOG_LEN) {
+ unsigned l = MCE_LOG_LEN - pvt->mce_in;
+
+ memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * l);
+ smp_wmb();
+ pvt->mce_in = 0;
+ count -= l;
+ m += l;
+ }
+ memcpy(m, &pvt->mce_entry[pvt->mce_in], sizeof(*m) * count);
+ smp_wmb();
+ pvt->mce_in += count;
+
+ smp_rmb();
+ if (pvt->mce_overrun) {
+ i7core_printk(KERN_ERR, "Lost %d memory errors\n",
+ pvt->mce_overrun);
+ smp_wmb();
+ pvt->mce_overrun = 0;
+ }
+
+ /*
+ * MCE second step: parse errors and display
+ */
+ for (i = 0; i < count; i++)
+ i7core_mce_output_error(mci, &pvt->mce_outentry[i]);
+
+ /*
+ * Now, let's increment CE error counts
+ */
+ if (!pvt->is_registered)
+ i7core_udimm_check_mc_ecc_err(mci);
+ else
+ i7core_rdimm_check_mc_ecc_err(mci);
+}
+
+/*
+ * i7core_mce_check_error Replicates mcelog routine to get errors
+ * This routine simply queues mcelog errors, and
+ * return. The error itself should be handled later
+ * by i7core_check_error.
+ * WARNING: As this routine should be called at NMI time, extra care should
+ * be taken to avoid deadlocks, and to be as fast as possible.
+ */
+static int i7core_mce_check_error(void *priv, struct mce *mce)
+{
+ struct mem_ctl_info *mci = priv;
+ struct i7core_pvt *pvt = mci->pvt_info;
+
+ /*
+ * Just let mcelog handle it if the error is
+ * outside the memory controller
+ */
+ if (((mce->status & 0xffff) >> 7) != 1)
+ return 0;
+
+ /* Bank 8 registers are the only ones that we know how to handle */
+ if (mce->bank != 8)
+ return 0;
+
+#ifdef CONFIG_SMP
+ /* Only handle if it is the right mc controller */
+ if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
+ return 0;
+#endif
+
+ smp_rmb();
+ if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
+ smp_wmb();
+ pvt->mce_overrun++;
+ return 0;
+ }
+
+ /* Copy memory error at the ringbuffer */
+ memcpy(&pvt->mce_entry[pvt->mce_out], mce, sizeof(*mce));
+ smp_wmb();
+ pvt->mce_out = (pvt->mce_out + 1) % MCE_LOG_LEN;
+
+ /* Handle fatal errors immediately */
+ if (mce->mcgstatus & 1)
+ i7core_check_error(mci);
+
+ /* Advice mcelog that the error were handled */
+ return 1;
+}
+
+static int i7core_register_mci(struct i7core_dev *i7core_dev,
+ int num_channels, int num_csrows)
+{
+ struct mem_ctl_info *mci;
+ struct i7core_pvt *pvt;
+ int csrow = 0;
+ int rc;
+
+ /* allocate a new MC control structure */
+ mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels,
+ i7core_dev->socket);
+ if (unlikely(!mci))
+ return -ENOMEM;
+
+ debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci);
+
+ /* record ptr to the generic device */
+ mci->dev = &i7core_dev->pdev[0]->dev;
+
+ pvt = mci->pvt_info;
+ memset(pvt, 0, sizeof(*pvt));
+
+ /*
+ * FIXME: how to handle RDDR3 at MCI level? It is possible to have
+ * Mixed RDDR3/UDDR3 with Nehalem, provided that they are on different
+ * memory channels
+ */
+ mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE;
+ mci->edac_cap = EDAC_FLAG_NONE;
+ mci->mod_name = "i7core_edac.c";
+ mci->mod_ver = I7CORE_REVISION;
+ mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d",
+ i7core_dev->socket);
+ mci->dev_name = pci_name(i7core_dev->pdev[0]);
+ mci->ctl_page_to_phys = NULL;
+ mci->mc_driver_sysfs_attributes = i7core_sysfs_attrs;
+ /* Set the function pointer to an actual operation function */
+ mci->edac_check = i7core_check_error;
+
+ /* Store pci devices at mci for faster access */
+ rc = mci_bind_devs(mci, i7core_dev);
+ if (unlikely(rc < 0))
+ goto fail;
+
+ /* Get dimm basic config */
+ get_dimm_config(mci, &csrow);
+
+ /* add this new MC control structure to EDAC's list of MCs */
+ if (unlikely(edac_mc_add_mc(mci))) {
+ debugf0("MC: " __FILE__
+ ": %s(): failed edac_mc_add_mc()\n", __func__);
+ /* FIXME: perhaps some code should go here that disables error
+ * reporting if we just enabled it
+ */
+
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ /* allocating generic PCI control info */
+ i7core_pci = edac_pci_create_generic_ctl(&i7core_dev->pdev[0]->dev,
+ EDAC_MOD_STR);
+ if (unlikely(!i7core_pci)) {
+ printk(KERN_WARNING
+ "%s(): Unable to create PCI control\n",
+ __func__);
+ printk(KERN_WARNING
+ "%s(): PCI error report via EDAC not setup\n",
+ __func__);
+ }
+
+ /* Default error mask is any memory */
+ pvt->inject.channel = 0;
+ pvt->inject.dimm = -1;
+ pvt->inject.rank = -1;
+ pvt->inject.bank = -1;
+ pvt->inject.page = -1;
+ pvt->inject.col = -1;
+
+ /* Registers on edac_mce in order to receive memory errors */
+ pvt->edac_mce.priv = mci;
+ pvt->edac_mce.check_error = i7core_mce_check_error;
+
+ rc = edac_mce_register(&pvt->edac_mce);
+ if (unlikely(rc < 0)) {
+ debugf0("MC: " __FILE__
+ ": %s(): failed edac_mce_register()\n", __func__);
+ }
+
+fail:
+ edac_mc_free(mci);
+ return rc;
+}
+
+/*
+ * i7core_probe Probe for ONE instance of device to see if it is
+ * present.
+ * return:
+ * 0 for FOUND a device
+ * < 0 for error code
+ */
+static int __devinit i7core_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int dev_idx = id->driver_data;
+ int rc;
+ struct i7core_dev *i7core_dev;
+
+ /*
+ * All memory controllers are allocated at the first pass.
+ */
+ if (unlikely(dev_idx >= 1))
+ return -EINVAL;
+
+ /* get the pci devices we want to reserve for our use */
+ mutex_lock(&i7core_edac_lock);
+
+ rc = i7core_get_devices(pci_dev_descr_i7core,
+ ARRAY_SIZE(pci_dev_descr_i7core));
+ if (unlikely(rc < 0))
+ goto fail0;
+
+ list_for_each_entry(i7core_dev, &i7core_edac_list, list) {
+ int channels;
+ int csrows;
+
+ /* Check the number of active and not disabled channels */
+ rc = i7core_get_active_channels(i7core_dev->socket,
+ &channels, &csrows);
+ if (unlikely(rc < 0))
+ goto fail1;
+
+ rc = i7core_register_mci(i7core_dev, channels, csrows);
+ if (unlikely(rc < 0))
+ goto fail1;
+ }
+
+ i7core_printk(KERN_INFO, "Driver loaded.\n");
+
+ mutex_unlock(&i7core_edac_lock);
+ return 0;
+
+fail1:
+ i7core_put_all_devices();
+fail0:
+ mutex_unlock(&i7core_edac_lock);
+ return rc;
+}
+
+/*
+ * i7core_remove destructor for one instance of device
+ *
+ */
+static void __devexit i7core_remove(struct pci_dev *pdev)
+{
+ struct mem_ctl_info *mci;
+ struct i7core_dev *i7core_dev, *tmp;
+
+ debugf0(__FILE__ ": %s()\n", __func__);
+
+ if (i7core_pci)
+ edac_pci_release_generic_ctl(i7core_pci);
+
+ /*
+ * we have a trouble here: pdev value for removal will be wrong, since
+ * it will point to the X58 register used to detect that the machine
+ * is a Nehalem or upper design. However, due to the way several PCI
+ * devices are grouped together to provide MC functionality, we need
+ * to use a different method for releasing the devices
+ */
+
+ mutex_lock(&i7core_edac_lock);
+ list_for_each_entry_safe(i7core_dev, tmp, &i7core_edac_list, list) {
+ mci = edac_mc_del_mc(&i7core_dev->pdev[0]->dev);
+ if (mci) {
+ struct i7core_pvt *pvt = mci->pvt_info;
+
+ i7core_dev = pvt->i7core_dev;
+ edac_mce_unregister(&pvt->edac_mce);
+ kfree(mci->ctl_name);
+ edac_mc_free(mci);
+ i7core_put_devices(i7core_dev);
+ } else {
+ i7core_printk(KERN_ERR,
+ "Couldn't find mci for socket %d\n",
+ i7core_dev->socket);
+ }
+ }
+ mutex_unlock(&i7core_edac_lock);
+}
+
+MODULE_DEVICE_TABLE(pci, i7core_pci_tbl);
+
+/*
+ * i7core_driver pci_driver structure for this module
+ *
+ */
+static struct pci_driver i7core_driver = {
+ .name = "i7core_edac",
+ .probe = i7core_probe,
+ .remove = __devexit_p(i7core_remove),
+ .id_table = i7core_pci_tbl,
+};
+
+/*
+ * i7core_init Module entry function
+ * Try to initialize this module for its devices
+ */
+static int __init i7core_init(void)
+{
+ int pci_rc;
+
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+
+ /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+ opstate_init();
+
+ i7core_xeon_pci_fixup(pci_dev_descr_i7core[0].dev_id);
+
+ pci_rc = pci_register_driver(&i7core_driver);
+
+ if (pci_rc >= 0)
+ return 0;
+
+ i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
+ pci_rc);
+
+ return pci_rc;
+}
+
+/*
+ * i7core_exit() Module exit function
+ * Unregister the driver
+ */
+static void __exit i7core_exit(void)
+{
+ debugf2("MC: " __FILE__ ": %s()\n", __func__);
+ pci_unregister_driver(&i7core_driver);
+}
+
+module_init(i7core_init);
+module_exit(i7core_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)");
+MODULE_DESCRIPTION("MC Driver for Intel i7 Core memory controllers - "
+ I7CORE_REVISION);
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 9d0dfcbe2c1c..eecd52dc8e98 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -43,7 +43,7 @@
#include "core.h"
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
{
ci->p = p + 1;
ci->end = ci->p + (p[0] >> 16);
@@ -59,9 +59,76 @@ int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
}
EXPORT_SYMBOL(fw_csr_iterator_next);
+static const u32 *search_leaf(const u32 *directory, int search_key)
+{
+ struct fw_csr_iterator ci;
+ int last_key = 0, key, value;
+
+ fw_csr_iterator_init(&ci, directory);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ if (last_key == search_key &&
+ key == (CSR_DESCRIPTOR | CSR_LEAF))
+ return ci.p - 1 + value;
+
+ last_key = key;
+ }
+
+ return NULL;
+}
+
+static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
+{
+ unsigned int quadlets, i;
+ char c;
+
+ if (!size || !buf)
+ return -EINVAL;
+
+ quadlets = min(block[0] >> 16, 256U);
+ if (quadlets < 2)
+ return -ENODATA;
+
+ if (block[1] != 0 || block[2] != 0)
+ /* unknown language/character set */
+ return -ENODATA;
+
+ block += 3;
+ quadlets -= 2;
+ for (i = 0; i < quadlets * 4 && i < size - 1; i++) {
+ c = block[i / 4] >> (24 - 8 * (i % 4));
+ if (c == '\0')
+ break;
+ buf[i] = c;
+ }
+ buf[i] = '\0';
+
+ return i;
+}
+
+/**
+ * fw_csr_string - reads a string from the configuration ROM
+ * @directory: e.g. root directory or unit directory
+ * @key: the key of the preceding directory entry
+ * @buf: where to put the string
+ * @size: size of @buf, in bytes
+ *
+ * The string is taken from a minimal ASCII text descriptor leaf after
+ * the immediate entry with @key. The string is zero-terminated.
+ * Returns strlen(buf) or a negative error code.
+ */
+int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
+{
+ const u32 *leaf = search_leaf(directory, key);
+ if (!leaf)
+ return -ENOENT;
+
+ return textual_leaf_to_string(leaf, buf, size);
+}
+EXPORT_SYMBOL(fw_csr_string);
+
static bool is_fw_unit(struct device *dev);
-static int match_unit_directory(u32 *directory, u32 match_flags,
+static int match_unit_directory(const u32 *directory, u32 match_flags,
const struct ieee1394_device_id *id)
{
struct fw_csr_iterator ci;
@@ -195,7 +262,7 @@ static ssize_t show_immediate(struct device *dev,
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
- u32 *dir;
+ const u32 *dir;
int key, value, ret = -ENOENT;
down_read(&fw_device_rwsem);
@@ -226,10 +293,10 @@ static ssize_t show_text_leaf(struct device *dev,
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
- struct fw_csr_iterator ci;
- u32 *dir, *block = NULL, *p, *end;
- int length, key, value, last_key = 0, ret = -ENOENT;
- char *b;
+ const u32 *dir;
+ size_t bufsize;
+ char dummy_buf[2];
+ int ret;
down_read(&fw_device_rwsem);
@@ -238,40 +305,23 @@ static ssize_t show_text_leaf(struct device *dev,
else
dir = fw_device(dev)->config_rom + 5;
- fw_csr_iterator_init(&ci, dir);
- while (fw_csr_iterator_next(&ci, &key, &value)) {
- if (attr->key == last_key &&
- key == (CSR_DESCRIPTOR | CSR_LEAF))
- block = ci.p - 1 + value;
- last_key = key;
+ if (buf) {
+ bufsize = PAGE_SIZE - 1;
+ } else {
+ buf = dummy_buf;
+ bufsize = 1;
}
- if (block == NULL)
- goto out;
-
- length = min(block[0] >> 16, 256U);
- if (length < 3)
- goto out;
+ ret = fw_csr_string(dir, attr->key, buf, bufsize);
- if (block[1] != 0 || block[2] != 0)
- /* Unknown encoding. */
- goto out;
-
- if (buf == NULL) {
- ret = length * 4;
- goto out;
+ if (ret >= 0) {
+ /* Strip trailing whitespace and add newline. */
+ while (ret > 0 && isspace(buf[ret - 1]))
+ ret--;
+ strcpy(buf + ret, "\n");
+ ret++;
}
- b = buf;
- end = &block[length + 1];
- for (p = &block[3]; p < end; p++, b += 4)
- * (u32 *) b = (__force u32) __cpu_to_be32(*p);
-
- /* Strip trailing whitespace and add newline. */
- while (b--, (isspace(*b) || *b == '\0') && b > buf);
- strcpy(b + 1, "\n");
- ret = b + 2 - buf;
- out:
up_read(&fw_device_rwsem);
return ret;
@@ -371,7 +421,7 @@ static ssize_t guid_show(struct device *dev,
return ret;
}
-static int units_sprintf(char *buf, u32 *directory)
+static int units_sprintf(char *buf, const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
@@ -453,7 +503,8 @@ static int read_rom(struct fw_device *device,
*/
static int read_bus_info_block(struct fw_device *device, int generation)
{
- u32 *rom, *stack, *old_rom, *new_rom;
+ const u32 *old_rom, *new_rom;
+ u32 *rom, *stack;
u32 sp, key;
int i, end, length, ret = -1;
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index a61571c63c59..6610d2d38802 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2657,7 +2657,7 @@ static int pci_resume(struct pci_dev *dev)
}
#endif
-static struct pci_device_id pci_table[] = {
+static const struct pci_device_id pci_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
{ }
};
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index d485cdd8cbac..7e33b0b1704c 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1014,7 +1014,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
return 0;
}
-static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
+static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
+ const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
@@ -1027,7 +1028,7 @@ static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
return 0;
}
-static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
+static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
u32 *model, u32 *firmware_revision)
{
struct fw_csr_iterator ci;
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 051d1ebbd287..5aeb3b541c80 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -380,7 +380,6 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
struct ibft_nic *nic = entry->nic;
void *ibft_loc = entry->header;
char *str = buf;
- char *mac;
int val;
if (!nic)
@@ -421,10 +420,7 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
str += sprintf(str, "%d\n", nic->vlan);
break;
case ibft_eth_mac:
- mac = nic->mac;
- str += sprintf(str, "%02x:%02x:%02x:%02x:%02x:%02x\n",
- (u8)mac[0], (u8)mac[1], (u8)mac[2],
- (u8)mac[3], (u8)mac[4], (u8)mac[5]);
+ str += sprintf(str, "%pM\n", nic->mac);
break;
case ibft_eth_hostname:
str += sprintf_string(str, nic->hostname_len,
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index a019b49ecc9b..e1fa0abd3246 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -162,6 +162,13 @@ config GPIO_WM831X
Say yes here to access the GPIO signals of WM831x power management
chips from Wolfson Microelectronics.
+config GPIO_WM8350
+ tristate "WM8350 GPIOs"
+ depends on MFD_WM8350
+ help
+ Say yes here to access the GPIO signals of WM8350 power management
+ chips from Wolfson Microelectronics.
+
config GPIO_ADP5520
tristate "GPIO Support for ADP5520 PMIC"
depends on PMIC_ADP5520
@@ -172,6 +179,15 @@ config GPIO_ADP5520
To compile this driver as a module, choose M here: the module will
be called adp5520-gpio.
+config GPIO_ADP5588
+ tristate "ADP5588 I2C GPIO expander"
+ depends on I2C
+ help
+ This option enables support for 18 GPIOs found
+ on Analog Devices ADP5588 GPIO Expanders.
+ To compile this driver as a module, choose M here: the module will be
+ called adp5588-gpio.
+
comment "PCI GPIO expanders:"
config GPIO_CS5535
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 52fe4cf734c7..bf5515d787e6 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -5,6 +5,7 @@ ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
+obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
obj-$(CONFIG_GPIO_MAX7301) += max7301.o
obj-$(CONFIG_GPIO_MAX732X) += max732x.o
@@ -21,3 +22,4 @@ obj-$(CONFIG_GPIO_CS5535) += cs5535-gpio.o
obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
+obj-$(CONFIG_GPIO_WM8350) += wm8350-gpiolib.o
diff --git a/drivers/gpio/adp5588-gpio.c b/drivers/gpio/adp5588-gpio.c
new file mode 100644
index 000000000000..afc097a16b33
--- /dev/null
+++ b/drivers/gpio/adp5588-gpio.c
@@ -0,0 +1,266 @@
+/*
+ * GPIO Chip driver for Analog Devices
+ * ADP5588 I/O Expander and QWERTY Keypad Controller
+ *
+ * Copyright 2009 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+
+#include <linux/i2c/adp5588.h>
+
+#define DRV_NAME "adp5588-gpio"
+#define MAXGPIO 18
+#define ADP_BANK(offs) ((offs) >> 3)
+#define ADP_BIT(offs) (1u << ((offs) & 0x7))
+
+struct adp5588_gpio {
+ struct i2c_client *client;
+ struct gpio_chip gpio_chip;
+ struct mutex lock; /* protect cached dir, dat_out */
+ unsigned gpio_start;
+ uint8_t dat_out[3];
+ uint8_t dir[3];
+};
+
+static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
+{
+ int ret = i2c_smbus_read_byte_data(client, reg);
+
+ if (ret < 0)
+ dev_err(&client->dev, "Read Error\n");
+
+ return ret;
+}
+
+static int adp5588_gpio_write(struct i2c_client *client, u8 reg, u8 val)
+{
+ int ret = i2c_smbus_write_byte_data(client, reg, val);
+
+ if (ret < 0)
+ dev_err(&client->dev, "Write Error\n");
+
+ return ret;
+}
+
+static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
+{
+ struct adp5588_gpio *dev =
+ container_of(chip, struct adp5588_gpio, gpio_chip);
+
+ return !!(adp5588_gpio_read(dev->client, GPIO_DAT_STAT1 + ADP_BANK(off))
+ & ADP_BIT(off));
+}
+
+static void adp5588_gpio_set_value(struct gpio_chip *chip,
+ unsigned off, int val)
+{
+ unsigned bank, bit;
+ struct adp5588_gpio *dev =
+ container_of(chip, struct adp5588_gpio, gpio_chip);
+
+ bank = ADP_BANK(off);
+ bit = ADP_BIT(off);
+
+ mutex_lock(&dev->lock);
+ if (val)
+ dev->dat_out[bank] |= bit;
+ else
+ dev->dat_out[bank] &= ~bit;
+
+ adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank,
+ dev->dat_out[bank]);
+ mutex_unlock(&dev->lock);
+}
+
+static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
+{
+ int ret;
+ unsigned bank;
+ struct adp5588_gpio *dev =
+ container_of(chip, struct adp5588_gpio, gpio_chip);
+
+ bank = ADP_BANK(off);
+
+ mutex_lock(&dev->lock);
+ dev->dir[bank] &= ~ADP_BIT(off);
+ ret = adp5588_gpio_write(dev->client, GPIO_DIR1 + bank, dev->dir[bank]);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static int adp5588_gpio_direction_output(struct gpio_chip *chip,
+ unsigned off, int val)
+{
+ int ret;
+ unsigned bank, bit;
+ struct adp5588_gpio *dev =
+ container_of(chip, struct adp5588_gpio, gpio_chip);
+
+ bank = ADP_BANK(off);
+ bit = ADP_BIT(off);
+
+ mutex_lock(&dev->lock);
+ dev->dir[bank] |= bit;
+
+ if (val)
+ dev->dat_out[bank] |= bit;
+ else
+ dev->dat_out[bank] &= ~bit;
+
+ ret = adp5588_gpio_write(dev->client, GPIO_DAT_OUT1 + bank,
+ dev->dat_out[bank]);
+ ret |= adp5588_gpio_write(dev->client, GPIO_DIR1 + bank,
+ dev->dir[bank]);
+ mutex_unlock(&dev->lock);
+
+ return ret;
+}
+
+static int __devinit adp5588_gpio_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
+ struct adp5588_gpio *dev;
+ struct gpio_chip *gc;
+ int ret, i, revid;
+
+ if (pdata == NULL) {
+ dev_err(&client->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
+ return -EIO;
+ }
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ dev_err(&client->dev, "failed to alloc memory\n");
+ return -ENOMEM;
+ }
+
+ dev->client = client;
+
+ gc = &dev->gpio_chip;
+ gc->direction_input = adp5588_gpio_direction_input;
+ gc->direction_output = adp5588_gpio_direction_output;
+ gc->get = adp5588_gpio_get_value;
+ gc->set = adp5588_gpio_set_value;
+ gc->can_sleep = 1;
+
+ gc->base = pdata->gpio_start;
+ gc->ngpio = MAXGPIO;
+ gc->label = client->name;
+ gc->owner = THIS_MODULE;
+
+ mutex_init(&dev->lock);
+
+
+ ret = adp5588_gpio_read(dev->client, DEV_ID);
+ if (ret < 0)
+ goto err;
+
+ revid = ret & ADP5588_DEVICE_ID_MASK;
+
+ for (i = 0, ret = 0; i <= ADP_BANK(MAXGPIO); i++) {
+ dev->dat_out[i] = adp5588_gpio_read(client, GPIO_DAT_OUT1 + i);
+ dev->dir[i] = adp5588_gpio_read(client, GPIO_DIR1 + i);
+ ret |= adp5588_gpio_write(client, KP_GPIO1 + i, 0);
+ ret |= adp5588_gpio_write(client, GPIO_PULL1 + i,
+ (pdata->pullup_dis_mask >> (8 * i)) & 0xFF);
+
+ if (ret)
+ goto err;
+ }
+
+ ret = gpiochip_add(&dev->gpio_chip);
+ if (ret)
+ goto err;
+
+ dev_info(&client->dev, "gpios %d..%d on a %s Rev. %d\n",
+ gc->base, gc->base + gc->ngpio - 1,
+ client->name, revid);
+
+ if (pdata->setup) {
+ ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
+ if (ret < 0)
+ dev_warn(&client->dev, "setup failed, %d\n", ret);
+ }
+
+ i2c_set_clientdata(client, dev);
+ return 0;
+
+err:
+ kfree(dev);
+ return ret;
+}
+
+static int __devexit adp5588_gpio_remove(struct i2c_client *client)
+{
+ struct adp5588_gpio_platform_data *pdata = client->dev.platform_data;
+ struct adp5588_gpio *dev = i2c_get_clientdata(client);
+ int ret;
+
+ if (pdata->teardown) {
+ ret = pdata->teardown(client,
+ dev->gpio_chip.base, dev->gpio_chip.ngpio,
+ pdata->context);
+ if (ret < 0) {
+ dev_err(&client->dev, "teardown failed %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = gpiochip_remove(&dev->gpio_chip);
+ if (ret) {
+ dev_err(&client->dev, "gpiochip_remove failed %d\n", ret);
+ return ret;
+ }
+
+ kfree(dev);
+ return 0;
+}
+
+static const struct i2c_device_id adp5588_gpio_id[] = {
+ {DRV_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, adp5588_gpio_id);
+
+static struct i2c_driver adp5588_gpio_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = adp5588_gpio_probe,
+ .remove = __devexit_p(adp5588_gpio_remove),
+ .id_table = adp5588_gpio_id,
+};
+
+static int __init adp5588_gpio_init(void)
+{
+ return i2c_add_driver(&adp5588_gpio_driver);
+}
+
+module_init(adp5588_gpio_init);
+
+static void __exit adp5588_gpio_exit(void)
+{
+ i2c_del_driver(&adp5588_gpio_driver);
+}
+
+module_exit(adp5588_gpio_exit);
+
+MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_DESCRIPTION("GPIO ADP5588 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a25ad284a272..350842ad3632 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -858,8 +858,6 @@ int gpio_sysfs_set_active_low(unsigned gpio, int value)
desc = &gpio_desc[gpio];
if (test_bit(FLAG_EXPORT, &desc->flags)) {
- struct device *dev;
-
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev == NULL) {
status = -ENODEV;
diff --git a/drivers/gpio/wm8350-gpiolib.c b/drivers/gpio/wm8350-gpiolib.c
new file mode 100644
index 000000000000..511840d1c7ba
--- /dev/null
+++ b/drivers/gpio/wm8350-gpiolib.c
@@ -0,0 +1,181 @@
+/*
+ * wm835x-gpiolib.c -- gpiolib support for Wolfson WM835x PMICs
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/mfd/core.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+#include <linux/mfd/wm8350/core.h>
+#include <linux/mfd/wm8350/gpio.h>
+
+struct wm8350_gpio_data {
+ struct wm8350 *wm8350;
+ struct gpio_chip gpio_chip;
+};
+
+static inline struct wm8350_gpio_data *to_wm8350_gpio(struct gpio_chip *chip)
+{
+ return container_of(chip, struct wm8350_gpio_data, gpio_chip);
+}
+
+static int wm8350_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
+{
+ struct wm8350_gpio_data *wm8350_gpio = to_wm8350_gpio(chip);
+ struct wm8350 *wm8350 = wm8350_gpio->wm8350;
+
+ return wm8350_set_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O,
+ 1 << offset);
+}
+
+static int wm8350_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct wm8350_gpio_data *wm8350_gpio = to_wm8350_gpio(chip);
+ struct wm8350 *wm8350 = wm8350_gpio->wm8350;
+ int ret;
+
+ ret = wm8350_reg_read(wm8350, WM8350_GPIO_LEVEL);
+ if (ret < 0)
+ return ret;
+
+ if (ret & (1 << offset))
+ return 1;
+ else
+ return 0;
+}
+
+static void wm8350_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct wm8350_gpio_data *wm8350_gpio = to_wm8350_gpio(chip);
+ struct wm8350 *wm8350 = wm8350_gpio->wm8350;
+
+ if (value)
+ wm8350_set_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
+ else
+ wm8350_clear_bits(wm8350, WM8350_GPIO_LEVEL, 1 << offset);
+}
+
+static int wm8350_gpio_direction_out(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct wm8350_gpio_data *wm8350_gpio = to_wm8350_gpio(chip);
+ struct wm8350 *wm8350 = wm8350_gpio->wm8350;
+ int ret;
+
+ ret = wm8350_clear_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O,
+ 1 << offset);
+ if (ret < 0)
+ return ret;
+
+ /* Don't have an atomic direction/value setup */
+ wm8350_gpio_set(chip, offset, value);
+
+ return 0;
+}
+
+static int wm8350_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ struct wm8350_gpio_data *wm8350_gpio = to_wm8350_gpio(chip);
+ struct wm8350 *wm8350 = wm8350_gpio->wm8350;
+
+ if (!wm8350->irq_base)
+ return -EINVAL;
+
+ return wm8350->irq_base + WM8350_IRQ_GPIO(offset);
+}
+
+static struct gpio_chip template_chip = {
+ .label = "wm8350",
+ .owner = THIS_MODULE,
+ .direction_input = wm8350_gpio_direction_in,
+ .get = wm8350_gpio_get,
+ .direction_output = wm8350_gpio_direction_out,
+ .set = wm8350_gpio_set,
+ .to_irq = wm8350_gpio_to_irq,
+ .can_sleep = 1,
+};
+
+static int __devinit wm8350_gpio_probe(struct platform_device *pdev)
+{
+ struct wm8350 *wm8350 = dev_get_drvdata(pdev->dev.parent);
+ struct wm8350_platform_data *pdata = wm8350->dev->platform_data;
+ struct wm8350_gpio_data *wm8350_gpio;
+ int ret;
+
+ wm8350_gpio = kzalloc(sizeof(*wm8350_gpio), GFP_KERNEL);
+ if (wm8350_gpio == NULL)
+ return -ENOMEM;
+
+ wm8350_gpio->wm8350 = wm8350;
+ wm8350_gpio->gpio_chip = template_chip;
+ wm8350_gpio->gpio_chip.ngpio = 13;
+ wm8350_gpio->gpio_chip.dev = &pdev->dev;
+ if (pdata && pdata->gpio_base)
+ wm8350_gpio->gpio_chip.base = pdata->gpio_base;
+ else
+ wm8350_gpio->gpio_chip.base = -1;
+
+ ret = gpiochip_add(&wm8350_gpio->gpio_chip);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip, %d\n",
+ ret);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, wm8350_gpio);
+
+ return ret;
+
+err:
+ kfree(wm8350_gpio);
+ return ret;
+}
+
+static int __devexit wm8350_gpio_remove(struct platform_device *pdev)
+{
+ struct wm8350_gpio_data *wm8350_gpio = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = gpiochip_remove(&wm8350_gpio->gpio_chip);
+ if (ret == 0)
+ kfree(wm8350_gpio);
+
+ return ret;
+}
+
+static struct platform_driver wm8350_gpio_driver = {
+ .driver.name = "wm8350-gpio",
+ .driver.owner = THIS_MODULE,
+ .probe = wm8350_gpio_probe,
+ .remove = __devexit_p(wm8350_gpio_remove),
+};
+
+static int __init wm8350_gpio_init(void)
+{
+ return platform_driver_register(&wm8350_gpio_driver);
+}
+subsys_initcall(wm8350_gpio_init);
+
+static void __exit wm8350_gpio_exit(void)
+{
+ platform_driver_unregister(&wm8350_gpio_driver);
+}
+module_exit(wm8350_gpio_exit);
+
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("GPIO interface for WM8350 PMICs");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm8350-gpio");
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 628eae3e9b83..a1fce68e3bbe 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -39,8 +39,7 @@ static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
- PAGE_SIZE,
- gart_info->table_mask);
+ PAGE_SIZE);
if (gart_info->table_handle == NULL)
return -ENOMEM;
@@ -112,6 +111,13 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
+ if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
+ DRM_ERROR("fail to set dma mask to 0x%Lx\n",
+ gart_info->table_mask);
+ ret = 1;
+ goto done;
+ }
+
ret = drm_ati_alloc_pcigart_table(dev, gart_info);
if (ret) {
DRM_ERROR("cannot allocate PCI GART page!\n");
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 3d09e304f6f4..8417cc4c43f1 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -326,7 +326,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
+ dmah = drm_pci_alloc(dev, map->size, map->size);
if (!dmah) {
kfree(map);
return -ENOMEM;
@@ -885,7 +885,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
while (entry->buf_count < count) {
- dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
+ dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
if (!dmah) {
/* Set count correctly so we free the proper amount. */
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 5124401f266a..d91fb8c0b7b3 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -158,6 +158,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
+ { DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
};
static struct drm_prop_enum_list drm_encoder_enum_list[] =
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 4231d6db72ec..077313f0d47f 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -216,7 +216,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_helper_crtc_in_use);
/**
- * drm_disable_unused_functions - disable unused objects
+ * drm_helper_disable_unused_functions - disable unused objects
* @dev: DRM device
*
* LOCKING:
@@ -1032,7 +1032,7 @@ bool drm_helper_initial_config(struct drm_device *dev)
/*
* we shouldn't end up with no modes here.
*/
- WARN(!count, "No connectors reported connected with modes\n");
+ printk(KERN_INFO "No connectors reported conncted with modes\n");
drm_setup_crtcs(dev);
@@ -1162,6 +1162,9 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
int drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+ struct drm_crtc_helper_funcs *crtc_funcs;
int ret;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1174,6 +1177,25 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
if (ret == false)
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+
+ /* Turn off outputs that were already powered off */
+ if (drm_helper_choose_crtc_dpms(crtc)) {
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+ if(encoder->crtc != crtc)
+ continue;
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ (*encoder_funcs->dpms) (encoder,
+ drm_helper_choose_encoder_dpms(encoder));
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
+ }
+ }
}
/* disable the unused connectors while restoring the modesetting */
drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5c9f79877cbf..defcaf108460 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -911,23 +911,27 @@ static int drm_cvt_modes(struct drm_connector *connector,
struct drm_device *dev = connector->dev;
struct cvt_timing *cvt;
const int rates[] = { 60, 85, 75, 60, 50 };
+ const u8 empty[3] = { 0, 0, 0 };
for (i = 0; i < 4; i++) {
int uninitialized_var(width), height;
cvt = &(timing->data.other_data.data.cvt[i]);
- height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
- switch (cvt->code[1] & 0xc0) {
+ if (!memcmp(cvt->code, empty, 3))
+ continue;
+
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+ switch (cvt->code[1] & 0x0c) {
case 0x00:
width = height * 4 / 3;
break;
- case 0x40:
+ case 0x04:
width = height * 16 / 9;
break;
- case 0x80:
+ case 0x08:
width = height * 16 / 10;
break;
- case 0xc0:
+ case 0x0c:
width = height * 15 / 9;
break;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1b49fa055f4f..e00301f88f6b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -29,6 +29,7 @@
*/
#include <linux/sysrq.h>
#include <linux/fb.h>
+#include <linux/kgdb.h>
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_fb_helper.h"
@@ -156,7 +157,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
force = DRM_FORCE_ON;
break;
case 'D':
- if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) ||
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) &&
(connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
force = DRM_FORCE_ON;
else
@@ -233,6 +234,80 @@ int drm_fb_helper_parse_command_line(struct drm_device *dev)
return 0;
}
+#define to_fb_helper(ops) (container_of((ops), struct drm_fb_helper, kdb_ops))
+
+static int drm_fb_kdb_enter(struct dbg_kms_console_ops *ops)
+{
+ struct drm_fb_helper *helper = to_fb_helper(ops);
+ struct drm_crtc_helper_funcs *funcs;
+ int i;
+
+ if (list_empty(&kernel_fb_helper_list))
+ return false;
+
+ list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set =
+ &helper->crtc_info[i].mode_set;
+
+ if (!mode_set->crtc->enabled)
+ continue;
+
+ funcs = mode_set->crtc->helper_private;
+ funcs->mode_set_base_atomic(mode_set->crtc,
+ mode_set->fb,
+ mode_set->x,
+ mode_set->y);
+
+ }
+ }
+
+ return 0;
+}
+
+/* Find the real fb for a given fb helper CRTC */
+static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *c;
+
+ list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ if (crtc->base.id == c->base.id)
+ return c->fb;
+ }
+
+ return NULL;
+}
+
+static int drm_fb_kdb_exit(struct dbg_kms_console_ops *ops)
+{
+ struct drm_fb_helper *helper = to_fb_helper(ops);
+ struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_framebuffer *fb;
+ int i;
+
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
+ crtc = mode_set->crtc;
+ funcs = crtc->helper_private;
+ fb = drm_mode_config_fb(crtc);
+
+ if (!crtc->enabled)
+ continue;
+
+ if (!fb) {
+ DRM_ERROR("no fb to restore??\n");
+ continue;
+ }
+
+ funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
+ crtc->y);
+ }
+
+ return 0;
+}
+
bool drm_fb_helper_force_kernel_mode(void)
{
int i = 0;
@@ -321,9 +396,9 @@ static void drm_fb_helper_on(struct fb_info *info)
!crtc->enabled)
continue;
- mutex_lock(&dev->mode_config.mutex);
+ dbg_safe_mutex_lock(&dev->mode_config.mutex);
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
+ dbg_safe_mutex_unlock(&dev->mode_config.mutex);
/* Found a CRTC on this fb, now find encoders */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -331,9 +406,9 @@ static void drm_fb_helper_on(struct fb_info *info)
struct drm_encoder_helper_funcs *encoder_funcs;
encoder_funcs = encoder->helper_private;
- mutex_lock(&dev->mode_config.mutex);
+ dbg_safe_mutex_lock(&dev->mode_config.mutex);
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
- mutex_unlock(&dev->mode_config.mutex);
+ dbg_safe_mutex_unlock(&dev->mode_config.mutex);
}
}
}
@@ -602,15 +677,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct drm_framebuffer *fb = fb_helper->fb;
int depth;
- if (var->pixclock != 0)
+ if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
/* Need to resize the fb object !!! */
- if (var->xres > fb->width || var->yres > fb->height) {
- DRM_ERROR("Requested width/height is greater than current fb "
- "object %dx%d > %dx%d\n", var->xres, var->yres,
- fb->width, fb->height);
- DRM_ERROR("Need resizing code.\n");
+ if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
+ DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+ "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
+ fb->width, fb->height, fb->bits_per_pixel);
return -EINVAL;
}
@@ -745,9 +819,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
modeset->y = var->yoffset;
if (modeset->num_connectors) {
- mutex_lock(&dev->mode_config.mutex);
+ dbg_safe_mutex_lock(&dev->mode_config.mutex);
ret = crtc->funcs->set_config(modeset);
- mutex_unlock(&dev->mode_config.mutex);
+ dbg_safe_mutex_unlock(&dev->mode_config.mutex);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
@@ -924,6 +998,9 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
+ fb_helper->kdb_ops.activate_console = drm_fb_kdb_enter;
+ fb_helper->kdb_ops.restore_console = drm_fb_kdb_exit;
+ dbg_kms_console_ops_register(&fb_helper->kdb_ops);
printk(KERN_INFO "registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
@@ -938,6 +1015,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
{
list_del(&helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
+ dbg_kms_console_ops_unregister(&helper->kdb_ops);
printk(KERN_INFO "unregistered panic notifier\n");
atomic_notifier_chain_unregister(&panic_notifier_list,
&paniced);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 7998ee66b317..b98384dbd9a7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -115,6 +115,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
dev->num_crtcs = 0;
}
+EXPORT_SYMBOL(drm_vblank_cleanup);
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
@@ -163,7 +164,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
}
dev->vblank_disable_allowed = 0;
-
return 0;
err:
@@ -493,6 +493,9 @@ EXPORT_SYMBOL(drm_vblank_off);
*/
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{
+ /* vblank is not initialized (IRQ not installed ?) */
+ if (!dev->num_crtcs)
+ return;
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 577094fb1995..e68ebf92fa2a 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -47,8 +47,7 @@
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
-drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
- dma_addr_t maxaddr)
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
#if 1
@@ -63,11 +62,6 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
if (align > size)
return NULL;
- if (pci_set_dma_mask(dev->pdev, maxaddr) != 0) {
- DRM_ERROR("Setting pci dma mask failed\n");
- return NULL;
- }
-
dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
if (!dmah)
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 18476bf0b580..9c9998c4dceb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -272,7 +272,7 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
mem = kmap_atomic(pages[page], KM_USER0);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
- kunmap_atomic(pages[page], KM_USER0);
+ kunmap_atomic(mem, KM_USER0);
}
}
@@ -386,34 +386,6 @@ out:
return 0;
}
-static int i915_registers_info(struct seq_file *m, void *data) {
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t reg;
-
-#define DUMP_RANGE(start, end) \
- for (reg=start; reg < end; reg += 4) \
- seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
-
- DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
- DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
- DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
- DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
- DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
- DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
- DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
- DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
- DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
- DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
- DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
- DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
- DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
- DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
-
- return 0;
-}
-
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -519,7 +491,6 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
}
static struct drm_info_list i915_debugfs_list[] = {
- {"i915_regs", i915_registers_info, 0},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 701bfeac7f57..bbe47812e4b6 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -123,7 +123,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
- drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
@@ -813,9 +813,13 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PAGEFLIPPING:
value = 1;
break;
+ case I915_PARAM_HAS_EXECBUF2:
+ /* depends on GEM */
+ value = dev_priv->has_gem;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
- param->param);
+ param->param);
return -EINVAL;
}
@@ -1117,7 +1121,8 @@ static void i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *compressed_llb;
- unsigned long cfb_base, ll_base;
+ unsigned long cfb_base;
+ unsigned long ll_base = 0;
/* Leave 1M for line length buffer & misc. */
compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
@@ -1200,14 +1205,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
0xff000000;
- if (IS_MOBILE(dev) || IS_I9XX(dev))
- dev_priv->cursor_needs_physical = true;
- else
- dev_priv->cursor_needs_physical = false;
-
- if (IS_I965G(dev) || IS_G33(dev))
- dev_priv->cursor_needs_physical = false;
-
/* Basic memrange allocator for stolen space (aka vram) */
drm_mm_init(&dev_priv->vram, 0, prealloc_size);
DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
@@ -1257,6 +1254,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (ret)
goto destroy_ringbuffer;
+ intel_modeset_init(dev);
+
ret = drm_irq_install(dev);
if (ret)
goto destroy_ringbuffer;
@@ -1271,8 +1270,6 @@ static int i915_load_modeset_init(struct drm_device *dev,
I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
- intel_modeset_init(dev);
-
drm_helper_initial_config(dev);
return 0;
@@ -1360,7 +1357,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
resource_size_t base, size;
- int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+ int ret = 0, mmio_bar;
uint32_t agp_size, prealloc_size, prealloc_start;
/* i915 has 4 more counters */
@@ -1376,8 +1373,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
+ dev_priv->info = (struct intel_device_info *) flags;
/* Add register map (needed for suspend/resume) */
+ mmio_bar = IS_I9XX(dev) ? 0 : 1;
base = drm_get_resource_start(dev, mmio_bar);
size = drm_get_resource_len(dev, mmio_bar);
@@ -1652,6 +1651,7 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 24286ca168fc..be631cc3e4dc 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -33,7 +33,6 @@
#include "i915_drm.h"
#include "i915_drv.h"
-#include "drm_pciids.h"
#include <linux/console.h>
#include "drm_crtc_helper.h"
@@ -48,8 +47,124 @@ module_param_named(powersave, i915_powersave, int, 0400);
static struct drm_driver driver;
-static struct pci_device_id pciidlist[] = {
- i915_PCI_IDS
+#define INTEL_VGA_DEVICE(id, info) { \
+ .class = PCI_CLASS_DISPLAY_VGA << 8, \
+ .class_mask = 0xffff00, \
+ .vendor = 0x8086, \
+ .device = id, \
+ .subvendor = PCI_ANY_ID, \
+ .subdevice = PCI_ANY_ID, \
+ .driver_data = (unsigned long) info }
+
+const static struct intel_device_info intel_i830_info = {
+ .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_845g_info = {
+ .is_i8xx = 1,
+};
+
+const static struct intel_device_info intel_i85x_info = {
+ .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_i865g_info = {
+ .is_i8xx = 1,
+};
+
+const static struct intel_device_info intel_i915g_info = {
+ .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i915gm_info = {
+ .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+ .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i945g_info = {
+ .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+};
+const static struct intel_device_info intel_i945gm_info = {
+ .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1,
+ .has_hotplug = 1, .cursor_needs_physical = 1,
+};
+
+const static struct intel_device_info intel_i965g_info = {
+ .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_i965gm_info = {
+ .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_g33_info = {
+ .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_g45_info = {
+ .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_gm45_info = {
+ .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_pineview_info = {
+ .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_ironlake_d_info = {
+ .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_pipe_cxsr = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_ironlake_m_info = {
+ .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+ .need_gfx_hws = 1, .has_rc6 = 1,
+ .has_hotplug = 1,
+};
+
+const static struct pci_device_id pciidlist[] = {
+ INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
+ INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
+ INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info),
+ INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
+ INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
+ INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
+ INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
+ INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
+ INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
+ INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
+ INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
+ INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
+ INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
+ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
+ INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
+ INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+ {0, 0, 0}
};
#if defined(CONFIG_DRM_I915_KMS)
@@ -284,6 +399,52 @@ i915_pci_resume(struct pci_dev *pdev)
return i915_resume(dev);
}
+static int
+i915_pm_suspend(struct device *dev)
+{
+ return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND);
+}
+
+static int
+i915_pm_resume(struct device *dev)
+{
+ return i915_pci_resume(to_pci_dev(dev));
+}
+
+static int
+i915_pm_freeze(struct device *dev)
+{
+ return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE);
+}
+
+static int
+i915_pm_thaw(struct device *dev)
+{
+ /* thaw during hibernate, do nothing! */
+ return 0;
+}
+
+static int
+i915_pm_poweroff(struct device *dev)
+{
+ return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE);
+}
+
+static int
+i915_pm_restore(struct device *dev)
+{
+ return i915_pci_resume(to_pci_dev(dev));
+}
+
+const struct dev_pm_ops i915_pm_ops = {
+ .suspend = i915_pm_suspend,
+ .resume = i915_pm_resume,
+ .freeze = i915_pm_freeze,
+ .thaw = i915_pm_thaw,
+ .poweroff = i915_pm_poweroff,
+ .restore = i915_pm_restore,
+};
+
static struct vm_operations_struct i915_gem_vm_ops = {
.fault = i915_gem_fault,
.open = drm_gem_vm_open,
@@ -344,10 +505,7 @@ static struct drm_driver driver = {
.id_table = pciidlist,
.probe = i915_pci_probe,
.remove = i915_pci_remove,
-#ifdef CONFIG_PM
- .resume = i915_pci_resume,
- .suspend = i915_pci_suspend,
-#endif
+ .driver.pm = &i915_pm_ops,
},
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fbecac72f5bb..29dd67626967 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -172,9 +172,31 @@ struct drm_i915_display_funcs {
struct intel_overlay;
+struct intel_device_info {
+ u8 is_mobile : 1;
+ u8 is_i8xx : 1;
+ u8 is_i915g : 1;
+ u8 is_i9xx : 1;
+ u8 is_i945gm : 1;
+ u8 is_i965g : 1;
+ u8 is_i965gm : 1;
+ u8 is_g33 : 1;
+ u8 need_gfx_hws : 1;
+ u8 is_g4x : 1;
+ u8 is_pineview : 1;
+ u8 is_ironlake : 1;
+ u8 has_fbc : 1;
+ u8 has_rc6 : 1;
+ u8 has_pipe_cxsr : 1;
+ u8 has_hotplug : 1;
+ u8 cursor_needs_physical : 1;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
+ const struct intel_device_info *info;
+
int has_gem;
void __iomem *regs;
@@ -232,8 +254,6 @@ typedef struct drm_i915_private {
int hangcheck_count;
uint32_t last_acthd;
- bool cursor_needs_physical;
-
struct drm_mm vram;
unsigned long cfb_size;
@@ -287,8 +307,6 @@ typedef struct drm_i915_private {
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
- u32 saveRENDERSTANDBY;
- u32 savePWRCTXA;
u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
@@ -561,6 +579,7 @@ typedef struct drm_i915_private {
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
+ struct drm_connector *int_lvds_connector;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -794,6 +813,8 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+int i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
@@ -860,6 +881,9 @@ void i915_gem_shrinker_exit(void);
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
+bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
+ int tiling_mode);
+bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -982,67 +1006,33 @@ extern void g4x_disable_fbc(struct drm_device *dev);
extern int i915_wrap_ring(struct drm_device * dev);
extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
-
-#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\
- (dev)->pci_device == 0x27AE)
-#define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \
- (dev)->pci_device == 0x2982 || \
- (dev)->pci_device == 0x2992 || \
- (dev)->pci_device == 0x29A2 || \
- (dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12 || \
- (dev)->pci_device == 0x2A42 || \
- (dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2E42 || \
- (dev)->pci_device == 0x0042 || \
- (dev)->pci_device == 0x0046)
-
-#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
- (dev)->pci_device == 0x2A12)
-
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
-
-#define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
- (dev)->pci_device == 0x2E12 || \
- (dev)->pci_device == 0x2E22 || \
- (dev)->pci_device == 0x2E32 || \
- (dev)->pci_device == 0x2E42 || \
- IS_GM45(dev))
-
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
-
-#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
- (dev)->pci_device == 0x29B2 || \
- (dev)->pci_device == 0x29D2 || \
- (IS_PINEVIEW(dev)))
-
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
+#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
-
-#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
- IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
- IS_IRONLAKE(dev))
+#define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake)
+#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
- IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
- IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
- IS_IRONLAKE(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
@@ -1054,17 +1044,14 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
-#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
/* dsparb controlled by hw only */
#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
-#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
- (IS_I9XX(dev) || IS_GM45(dev)) && \
- !IS_PINEVIEW(dev) && \
- !IS_IRONLAKE(dev))
-#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8c463cf2050a..2748609f05b3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2021,9 +2021,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
/* blow away mappings if mapped through GTT */
i915_gem_release_mmap(obj);
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
/* Move the object to the CPU domain to ensure that
* any possible CPU writes while it's not in the GTT
* are flushed when we go to remap it. This will
@@ -2039,6 +2036,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
BUG_ON(obj_priv->active);
+ /* release the fence reg _after_ flushing */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ i915_gem_clear_fence_reg(obj);
+
if (obj_priv->agp_mem != NULL) {
drm_unbind_agp(obj_priv->agp_mem);
drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
@@ -2581,9 +2582,6 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
bool retry_alloc = false;
int ret;
- if (dev_priv->mm.suspended)
- return -EBUSY;
-
if (obj_priv->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
@@ -3198,7 +3196,7 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
static int
i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_file *file_priv,
- struct drm_i915_gem_exec_object *entry,
+ struct drm_i915_gem_exec_object2 *entry,
struct drm_i915_gem_relocation_entry *relocs)
{
struct drm_device *dev = obj->dev;
@@ -3206,12 +3204,35 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int i, ret;
void __iomem *reloc_page;
+ bool need_fence;
+
+ need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj_priv->tiling_mode != I915_TILING_NONE;
+
+ /* Check fence reg constraints and rebind if necessary */
+ if (need_fence && !i915_obj_fenceable(dev, obj))
+ i915_gem_object_unbind(obj);
/* Choose the GTT offset for our buffer and put it there. */
ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
if (ret)
return ret;
+ /*
+ * Pre-965 chips need a fence register set up in order to
+ * properly handle blits to/from tiled surfaces.
+ */
+ if (need_fence) {
+ ret = i915_gem_object_get_fence_reg(obj);
+ if (ret != 0) {
+ if (ret != -EBUSY && ret != -ERESTARTSYS)
+ DRM_ERROR("Failure to install fence: %d\n",
+ ret);
+ i915_gem_object_unpin(obj);
+ return ret;
+ }
+ }
+
entry->offset = obj_priv->gtt_offset;
/* Apply the relocations, using the GTT aperture to avoid cache
@@ -3373,7 +3394,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
*/
static int
i915_dispatch_gem_execbuffer(struct drm_device *dev,
- struct drm_i915_gem_execbuffer *exec,
+ struct drm_i915_gem_execbuffer2 *exec,
struct drm_clip_rect *cliprects,
uint64_t exec_offset)
{
@@ -3463,7 +3484,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
}
static int
-i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t buffer_count,
struct drm_i915_gem_relocation_entry **relocs)
{
@@ -3478,8 +3499,10 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
}
*relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
- if (*relocs == NULL)
+ if (*relocs == NULL) {
+ DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count);
return -ENOMEM;
+ }
for (i = 0; i < buffer_count; i++) {
struct drm_i915_gem_relocation_entry __user *user_relocs;
@@ -3503,7 +3526,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
}
static int
-i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
+i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list,
uint32_t buffer_count,
struct drm_i915_gem_relocation_entry *relocs)
{
@@ -3536,7 +3559,7 @@ err:
}
static int
-i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
+i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec,
uint64_t exec_offset)
{
uint32_t exec_start, exec_len;
@@ -3589,18 +3612,18 @@ i915_gem_wait_for_pending_flip(struct drm_device *dev,
}
int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec_list)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_exec_object *exec_list = NULL;
struct drm_gem_object **object_list = NULL;
struct drm_gem_object *batch_obj;
struct drm_i915_gem_object *obj_priv;
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_gem_relocation_entry *relocs;
- int ret, ret2, i, pinned = 0;
+ int ret = 0, ret2, i, pinned = 0;
uint64_t exec_offset;
uint32_t seqno, flush_domains, reloc_index;
int pin_tries, flips;
@@ -3614,25 +3637,13 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
return -EINVAL;
}
- /* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
- if (exec_list == NULL || object_list == NULL) {
- DRM_ERROR("Failed to allocate exec or object list "
- "for %d buffers\n",
+ if (object_list == NULL) {
+ DRM_ERROR("Failed to allocate object list for %d buffers\n",
args->buffer_count);
ret = -ENOMEM;
goto pre_mutex_err;
}
- ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- goto pre_mutex_err;
- }
if (args->num_cliprects != 0) {
cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
@@ -3884,20 +3895,6 @@ err:
mutex_unlock(&dev->struct_mutex);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
- }
- }
-
/* Copy the updated relocations out regardless of current error
* state. Failure to update the relocs would mean that the next
* time userland calls execbuf, it would do so with presumed offset
@@ -3914,12 +3911,158 @@ err:
pre_mutex_err:
drm_free_large(object_list);
- drm_free_large(exec_list);
kfree(cliprects);
return ret;
}
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (!IS_I965G(dev))
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = 0;
+
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++)
+ exec_list[i].offset = exec2_list[i].offset;
+ /* ... and back out to userspace */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ } else {
+ DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret);
+ }
+
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec2_list);
+ return ret;
+}
+
int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
{
@@ -3933,19 +4076,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
if (ret)
return ret;
}
- /*
- * Pre-965 chips need a fence register set up in order to
- * properly handle tiled surfaces.
- */
- if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj);
- if (ret != 0) {
- if (ret != -EBUSY && ret != -ERESTARTSYS)
- DRM_ERROR("Failure to install fence: %d\n",
- ret);
- return ret;
- }
- }
+
obj_priv->pin_count++;
/* If the object is not active and not pending a flush,
@@ -4708,7 +4839,7 @@ int i915_gem_init_phys_object(struct drm_device *dev,
phys_obj->id = id;
- phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
+ phys_obj->handle = drm_pci_alloc(dev, size, 0);
if (!phys_obj->handle) {
ret = -ENOMEM;
goto kfree_obj;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 30d6af6c09bb..df278b2685bf 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -304,35 +304,39 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
/**
- * Returns the size of the fence for a tiled object of the given size.
+ * Returns whether an object is currently fenceable. If not, it may need
+ * to be unbound and have its pitch adjusted.
*/
-static int
-i915_get_fence_size(struct drm_device *dev, int size)
+bool
+i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
{
- int i;
- int start;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
if (IS_I965G(dev)) {
/* The 965 can have fences at any page boundary. */
- return ALIGN(size, 4096);
+ if (obj->size & 4095)
+ return false;
+ return true;
+ } else if (IS_I9XX(dev)) {
+ if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+ return false;
} else {
- /* Align the size to a power of two greater than the smallest
- * fence size.
- */
- if (IS_I9XX(dev))
- start = 1024 * 1024;
- else
- start = 512 * 1024;
+ if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+ return false;
+ }
- for (i = start; i < size; i <<= 1)
- ;
+ /* Power of two sized... */
+ if (obj->size & (obj->size - 1))
+ return false;
- return i;
- }
+ /* Objects must be size aligned as well */
+ if (obj_priv->gtt_offset & (obj->size - 1))
+ return false;
+ return true;
}
/* Check pitch constriants for all chips & tiling formats */
-static bool
+bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
@@ -384,12 +388,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
if (stride & (stride - 1))
return false;
- /* We don't 0handle the aperture area covered by the fence being bigger
- * than the object size.
- */
- if (i915_get_fence_size(dev, size) != size)
- return false;
-
return true;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 85f4c5de97e2..7cd8110051b6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -313,6 +313,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
dev_priv->mm.irq_gem_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
DRM_WAKEUP(&dev_priv->irq_queue);
+ dev_priv->hangcheck_count = 0;
+ mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
}
if (de_iir & DE_GSE)
@@ -1084,6 +1086,10 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
(void) I915_READ(IER);
}
+/*
+ * Must be called after intel_modeset_init or hotplug interrupts won't be
+ * enabled correctly.
+ */
int i915_driver_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1106,19 +1112,23 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
- /* Leave other bits alone */
- hotplug_en |= HOTPLUG_EN_MASK;
+ /* Note HDMI and DP share bits */
+ if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMIC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
+ hotplug_en |= HDMID_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOC_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
+ hotplug_en |= SDVOB_HOTPLUG_INT_EN;
+ if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS)
+ hotplug_en |= CRT_HOTPLUG_INT_EN;
+ /* Ignore TV since it's buggy */
+
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
- dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS |
- TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS |
- SDVOB_HOTPLUG_INT_STATUS;
- if (IS_G4X(dev)) {
- dev_priv->hotplug_supported_mask |=
- HDMIB_HOTPLUG_INT_STATUS |
- HDMIC_HOTPLUG_INT_STATUS |
- HDMID_HOTPLUG_INT_STATUS;
- }
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 974b3cf70618..149d360d64a3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -879,13 +879,6 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
#define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f
-#define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \
- HDMIC_HOTPLUG_INT_EN | \
- HDMID_HOTPLUG_INT_EN | \
- SDVOB_HOTPLUG_INT_EN | \
- SDVOC_HOTPLUG_INT_EN | \
- CRT_HOTPLUG_INT_EN)
-
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@@ -982,6 +975,8 @@
#define LVDS_PORT_EN (1 << 31)
/* Selects pipe B for LVDS data. Must be set on pre-965. */
#define LVDS_PIPEB_SELECT (1 << 30)
+/* LVDS dithering flag on 965/g4x platform */
+#define LVDS_ENABLE_DITHER (1 << 25)
/* Enable border for unscaled (or aspect-scaled) display */
#define LVDS_BORDER_ENABLE (1 << 15)
/*
@@ -1751,6 +1746,8 @@
/* Display & cursor control */
+/* dithering flag on Ironlake */
+#define PIPE_ENABLE_DITHER (1 << 4)
/* Pipe A */
#define PIPEADSL 0x70000
#define PIPEACONF 0x70008
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index d5ebb00a9d49..a3b90c9561dc 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -732,12 +732,6 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
- /* Render Standby */
- if (I915_HAS_RC6(dev)) {
- dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
- dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
- }
-
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -793,12 +787,6 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
- /* Render Standby */
- if (I915_HAS_RC6(dev)) {
- I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
- I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
- }
-
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9f3d3e563414..ddefc871edfe 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -548,4 +548,6 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
+
+ dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 52cd9b006da2..037884071a93 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -262,6 +262,14 @@ struct intel_limit {
#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
+#define IRONLAKE_P_DISPLAY_PORT_MIN 10
+#define IRONLAKE_P_DISPLAY_PORT_MAX 20
+#define IRONLAKE_P2_DISPLAY_PORT_FAST 10
+#define IRONLAKE_P2_DISPLAY_PORT_SLOW 10
+#define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0
+#define IRONLAKE_P1_DISPLAY_PORT_MIN 1
+#define IRONLAKE_P1_DISPLAY_PORT_MAX 2
+
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
@@ -271,9 +279,6 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
-static bool
-intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock);
static bool
intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
@@ -496,7 +501,7 @@ static const intel_limit_t intel_limits_ironlake_sdvo = {
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
.p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
.p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
- .find_pll = intel_ironlake_find_best_PLL,
+ .find_pll = intel_g4x_find_best_PLL,
};
static const intel_limit_t intel_limits_ironlake_lvds = {
@@ -511,7 +516,30 @@ static const intel_limit_t intel_limits_ironlake_lvds = {
.p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
.p2_slow = IRONLAKE_P2_LVDS_SLOW,
.p2_fast = IRONLAKE_P2_LVDS_FAST },
- .find_pll = intel_ironlake_find_best_PLL,
+ .find_pll = intel_g4x_find_best_PLL,
+};
+
+static const intel_limit_t intel_limits_ironlake_display_port = {
+ .dot = { .min = IRONLAKE_DOT_MIN,
+ .max = IRONLAKE_DOT_MAX },
+ .vco = { .min = IRONLAKE_VCO_MIN,
+ .max = IRONLAKE_VCO_MAX},
+ .n = { .min = IRONLAKE_N_MIN,
+ .max = IRONLAKE_N_MAX },
+ .m = { .min = IRONLAKE_M_MIN,
+ .max = IRONLAKE_M_MAX },
+ .m1 = { .min = IRONLAKE_M1_MIN,
+ .max = IRONLAKE_M1_MAX },
+ .m2 = { .min = IRONLAKE_M2_MIN,
+ .max = IRONLAKE_M2_MAX },
+ .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN,
+ .max = IRONLAKE_P_DISPLAY_PORT_MAX },
+ .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN,
+ .max = IRONLAKE_P1_DISPLAY_PORT_MAX},
+ .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT,
+ .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW,
+ .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST },
+ .find_pll = intel_find_pll_ironlake_dp,
};
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
@@ -519,6 +547,9 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_ironlake_lvds;
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ HAS_eDP)
+ limit = &intel_limits_ironlake_display_port;
else
limit = &intel_limits_ironlake_sdvo;
@@ -791,7 +822,13 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ int lvds_reg;
+
+ if (IS_IRONLAKE(dev))
+ lvds_reg = PCH_LVDS;
+ else
+ lvds_reg = LVDS;
+ if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
@@ -839,6 +876,11 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
intel_clock_t clock;
+
+ /* return directly when it is eDP */
+ if (HAS_eDP)
+ return true;
+
if (target < 200000) {
clock.n = 1;
clock.p1 = 2;
@@ -857,68 +899,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
return true;
}
-static bool
-intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
- int target, int refclk, intel_clock_t *best_clock)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- intel_clock_t clock;
- int err_most = 47;
- int err_min = 10000;
-
- /* eDP has only 2 clock choice, no n/m/p setting */
- if (HAS_eDP)
- return true;
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
- return intel_find_pll_ironlake_dp(limit, crtc, target,
- refclk, best_clock);
-
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
-
- memset(best_clock, 0, sizeof(*best_clock));
- for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
- /* based on hardware requriment prefer smaller n to precision */
- for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
- /* based on hardware requirment prefere larger m1,m2 */
- for (clock.m1 = limit->m1.max;
- clock.m1 >= limit->m1.min; clock.m1--) {
- for (clock.m2 = limit->m2.max;
- clock.m2 >= limit->m2.min; clock.m2--) {
- int this_err;
-
- intel_clock(dev, refclk, &clock);
- if (!intel_PLL_is_valid(crtc, &clock))
- continue;
- this_err = abs((10000 - (target*10000/clock.dot)));
- if (this_err < err_most) {
- *best_clock = clock;
- /* found on first matching */
- goto out;
- } else if (this_err < err_min) {
- *best_clock = clock;
- err_min = this_err;
- }
- }
- }
- }
- }
-out:
- return true;
-}
-
/* DisplayPort has only two frequencies, 162MHz and 270MHz */
static bool
intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -949,6 +929,13 @@ intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
void
intel_wait_for_vblank(struct drm_device *dev)
{
+ if (in_dbg_master()) {
+ /* When in the kernel debugger we cannot sleep */
+ preempt_disable();
+ mdelay(20);
+ preempt_enable();
+ return;
+ }
/* Wait for 20ms, i.e. one cycle at 50hz. */
msleep(20);
}
@@ -1166,6 +1153,10 @@ static void intel_update_fbc(struct drm_crtc *crtc,
goto out_disable;
}
+ /* If the kernel debugger is active, always disable compression */
+ if (in_dbg_master())
+ goto out_disable;
+
if (dev_priv->display.fbc_enabled(crtc)) {
/* We can re-enable it in this case, but need to update pitch */
if (fb->pitch > dev_priv->cfb_pitch)
@@ -1234,6 +1225,98 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
return 0;
}
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long Start, Offset;
+ int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
+ int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr;
+
+ switch (plane) {
+ case 0:
+ case 1:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+ obj_priv = obj->driver_private;
+
+ dspcntr = I915_READ(dspcntr_reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ DRM_ERROR("Unknown color depth\n");
+ return -EINVAL;
+ }
+ if (IS_I965G(dev)) {
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+ }
+
+ if (IS_IRONLAKE(dev))
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ I915_WRITE(dspcntr_reg, dspcntr);
+
+ Start = obj_priv->gtt_offset;
+ Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+
+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ I915_WRITE(dspstride, fb->pitch);
+ if (IS_I965G(dev)) {
+ I915_WRITE(dspbase, Offset);
+ I915_READ(dspbase);
+ I915_WRITE(dspsurf, Start);
+ I915_READ(dspsurf);
+ I915_WRITE(dsptileoff, (y << 16) | x);
+ } else {
+ I915_WRITE(dspbase, Start + Offset);
+ I915_READ(dspbase);
+ }
+
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
+ intel_wait_for_vblank(dev);
+ intel_increase_pllclock(crtc, true);
+
+ return 0;
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
@@ -1275,17 +1358,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
obj = intel_fb->obj;
obj_priv = obj->driver_private;
- mutex_lock(&dev->struct_mutex);
+ dbg_safe_mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj);
if (ret != 0) {
- mutex_unlock(&dev->struct_mutex);
+ dbg_safe_mutex_unlock(&dev->struct_mutex);
return ret;
}
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret != 0) {
i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
+ dbg_safe_mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1312,7 +1395,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
default:
DRM_ERROR("Unknown color depth\n");
i915_gem_object_unpin(obj);
- mutex_unlock(&dev->struct_mutex);
+ dbg_safe_mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
if (IS_I965G(dev)) {
@@ -1356,7 +1439,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
intel_increase_pllclock(crtc, true);
- mutex_unlock(&dev->struct_mutex);
+ dbg_safe_mutex_unlock(&dev->struct_mutex);
if (!dev->primary->master)
return 0;
@@ -1493,6 +1576,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
u32 temp;
int tries = 5, j, n;
+ u32 pipe_bpc;
+
+ temp = I915_READ(pipeconf_reg);
+ pipe_bpc = temp & PIPE_BPC_MASK;
/* XXX: When our outputs are all unaware of DPMS modes other than off
* and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -1524,6 +1611,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
temp = I915_READ(fdi_rx_reg);
+ /*
+ * make the BPC in FDI Rx be consistent with that in
+ * pipeconf reg.
+ */
+ temp &= ~(0x7 << 16);
+ temp |= (pipe_bpc << 11);
I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
FDI_SEL_PCDCLK |
FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
@@ -1666,6 +1759,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
/* enable PCH transcoder */
temp = I915_READ(transconf_reg);
+ /*
+ * make the BPC in transcoder be consistent with
+ * that in pipeconf reg.
+ */
+ temp &= ~PIPE_BPC_MASK;
+ temp |= pipe_bpc;
I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
I915_READ(transconf_reg);
@@ -1745,6 +1844,9 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(fdi_tx_reg);
temp = I915_READ(fdi_rx_reg);
+ /* BPC in FDI rx is consistent with that in pipeconf */
+ temp &= ~(0x07 << 16);
+ temp |= (pipe_bpc << 11);
I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
I915_READ(fdi_rx_reg);
@@ -1789,7 +1891,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
}
-
+ temp = I915_READ(transconf_reg);
+ /* BPC in transcoder is consistent with that in pipeconf */
+ temp &= ~PIPE_BPC_MASK;
+ temp |= pipe_bpc;
+ I915_WRITE(transconf_reg, temp);
+ I915_READ(transconf_reg);
udelay(100);
/* disable PCH DPLL */
@@ -2448,7 +2555,7 @@ static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
* A value of 5us seems to be a good balance; safe for very low end
* platforms but not overly aggressive on lower latency configs.
*/
-const static int latency_ns = 5000;
+static const int latency_ns = 5000;
static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
{
@@ -2559,7 +2666,7 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
/* Calc sr entries for one plane configs */
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 12000;
+ static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2598,7 +2705,7 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
/* Calc sr entries for one plane configs */
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 12000;
+ static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2667,7 +2774,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
if (HAS_FW_BLC(dev) && sr_hdisplay &&
(!planea_clock || !planeb_clock)) {
/* self-refresh has much higher latency */
- const static int sr_latency_ns = 6000;
+ static const int sr_latency_ns = 6000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
line_time_us = ((sr_hdisplay * 1000) / sr_clock);
@@ -2969,6 +3076,18 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(pipeconf_reg);
+ temp &= ~PIPE_BPC_MASK;
+ if (is_lvds) {
+ int lvds_reg = I915_READ(PCH_LVDS);
+ /* the BPC will be 6 if it is 18-bit LVDS panel */
+ if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP)
+ temp |= PIPE_8BPC;
+ else
+ temp |= PIPE_6BPC;
+ } else
+ temp |= PIPE_8BPC;
+ I915_WRITE(pipeconf_reg, temp);
+ I915_READ(pipeconf_reg);
switch (temp & PIPE_BPC_MASK) {
case PIPE_8BPC:
@@ -3195,7 +3314,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
-
+ /* set the dithering flag */
+ if (IS_I965G(dev)) {
+ if (dev_priv->lvds_dither) {
+ if (IS_IRONLAKE(dev))
+ pipeconf |= PIPE_ENABLE_DITHER;
+ else
+ lvds |= LVDS_ENABLE_DITHER;
+ } else {
+ if (IS_IRONLAKE(dev))
+ pipeconf &= ~PIPE_ENABLE_DITHER;
+ else
+ lvds &= ~LVDS_ENABLE_DITHER;
+ }
+ }
I915_WRITE(lvds_reg, lvds);
I915_READ(lvds_reg);
}
@@ -3385,7 +3517,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
- if (!dev_priv->cursor_needs_physical) {
+ if (!dev_priv->info->cursor_needs_physical) {
ret = i915_gem_object_pin(bo, PAGE_SIZE);
if (ret) {
DRM_ERROR("failed to pin cursor bo\n");
@@ -3420,7 +3552,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
I915_WRITE(base, addr);
if (intel_crtc->cursor_bo) {
- if (dev_priv->cursor_needs_physical) {
+ if (dev_priv->info->cursor_needs_physical) {
if (intel_crtc->cursor_bo != bo)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
@@ -3779,125 +3911,6 @@ static void intel_gpu_idle_timer(unsigned long arg)
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
-void intel_increase_renderclock(struct drm_device *dev, bool schedule)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (IS_IRONLAKE(dev))
- return;
-
- if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG_DRIVER("not reclocking render clock\n");
- return;
- }
-
- /* Restore render clock frequency to original value */
- if (IS_G4X(dev) || IS_I9XX(dev))
- pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
- else if (IS_I85X(dev))
- pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
- DRM_DEBUG_DRIVER("increasing render clock frequency\n");
-
- /* Schedule downclock */
- if (schedule)
- mod_timer(&dev_priv->idle_timer, jiffies +
- msecs_to_jiffies(GPU_IDLE_TIMEOUT));
-}
-
-void intel_decrease_renderclock(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (IS_IRONLAKE(dev))
- return;
-
- if (!dev_priv->render_reclock_avail) {
- DRM_DEBUG_DRIVER("not reclocking render clock\n");
- return;
- }
-
- if (IS_G4X(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK;
- gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I965G(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I965_GC_RENDER_CLOCK_MASK;
- gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I945G(dev) || IS_I945GM(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I945_GC_RENDER_CLOCK_MASK;
- gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I915G(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~I915_GC_RENDER_CLOCK_MASK;
- gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- } else if (IS_I85X(dev)) {
- u16 hpllcc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, HPLLCC, &hpllcc);
-
- /* Up to maximum... */
- hpllcc &= ~GC_CLOCK_CONTROL_MASK;
- hpllcc |= GC_CLOCK_133_200;
-
- pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
- }
- DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
-}
-
-/* Note that no increase function is needed for this - increase_renderclock()
- * will also rewrite these bits
- */
-void intel_decrease_displayclock(struct drm_device *dev)
-{
- if (IS_IRONLAKE(dev))
- return;
-
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
- IS_I915GM(dev)) {
- u16 gcfgc;
-
- /* Adjust render clock... */
- pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
-
- /* Down to minimum... */
- gcfgc &= ~0xf0;
- gcfgc |= 0x80;
-
- pci_write_config_word(dev->pdev, GCFGC, gcfgc);
- }
-}
-
#define CRTC_IDLE_TIMEOUT 1000 /* ms */
static void intel_crtc_idle_timer(unsigned long arg)
@@ -4011,12 +4024,6 @@ static void intel_idle_update(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
- /* GPU isn't processing, downclock it. */
- if (!dev_priv->busy) {
- intel_decrease_renderclock(dev);
- intel_decrease_displayclock(dev);
- }
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
@@ -4050,13 +4057,11 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy) {
+ if (!dev_priv->busy)
dev_priv->busy = true;
- intel_increase_renderclock(dev, true);
- } else {
+ else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
- }
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
@@ -4243,6 +4248,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_fixup = intel_crtc_mode_fixup,
.mode_set = intel_crtc_mode_set,
.mode_set_base = intel_pipe_set_base,
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
.prepare = intel_crtc_prepare,
.commit = intel_crtc_commit,
.load_lut = intel_crtc_load_lut,
@@ -4400,29 +4406,43 @@ static void intel_setup_outputs(struct drm_device *dev)
bool found = false;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOB\n");
found = intel_sdvo_init(dev, SDVOB);
- if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+ if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
intel_hdmi_init(dev, SDVOB);
+ }
- if (!found && SUPPORTS_INTEGRATED_DP(dev))
+ if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_B\n");
intel_dp_init(dev, DP_B);
+ }
}
/* Before G4X SDVOC doesn't have its own detect register */
- if (I915_READ(SDVOB) & SDVO_DETECTED)
+ if (I915_READ(SDVOB) & SDVO_DETECTED) {
+ DRM_DEBUG_KMS("probing SDVOC\n");
found = intel_sdvo_init(dev, SDVOC);
+ }
if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
- if (SUPPORTS_INTEGRATED_HDMI(dev))
+ if (SUPPORTS_INTEGRATED_HDMI(dev)) {
+ DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
intel_hdmi_init(dev, SDVOC);
- if (SUPPORTS_INTEGRATED_DP(dev))
+ }
+ if (SUPPORTS_INTEGRATED_DP(dev)) {
+ DRM_DEBUG_KMS("probing DP_C\n");
intel_dp_init(dev, DP_C);
+ }
}
- if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
+ if (SUPPORTS_INTEGRATED_DP(dev) &&
+ (I915_READ(DP_D) & DP_DETECTED)) {
+ DRM_DEBUG_KMS("probing DP_D\n");
intel_dp_init(dev, DP_D);
+ }
} else if (IS_I8XX(dev))
intel_dvo_init(dev);
@@ -4527,6 +4547,42 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_changed = intelfb_probe,
};
+static struct drm_gem_object *
+intel_alloc_power_context(struct drm_device *dev)
+{
+ struct drm_gem_object *pwrctx;
+ int ret;
+
+ pwrctx = drm_gem_object_alloc(dev, 4096);
+ if (!pwrctx) {
+ DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
+ return NULL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_object_pin(pwrctx, 4096);
+ if (ret) {
+ DRM_ERROR("failed to pin power context: %d\n", ret);
+ goto err_unref;
+ }
+
+ ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1);
+ if (ret) {
+ DRM_ERROR("failed to set-domain on power context: %d\n", ret);
+ goto err_unpin;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return pwrctx;
+
+err_unpin:
+ i915_gem_object_unpin(pwrctx);
+err_unref:
+ drm_gem_object_unreference(pwrctx);
+ mutex_unlock(&dev->struct_mutex);
+ return NULL;
+}
+
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4579,42 +4635,27 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
- if (I915_HAS_RC6(dev)) {
- struct drm_gem_object *pwrctx;
- struct drm_i915_gem_object *obj_priv;
- int ret;
+ if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
+ struct drm_i915_gem_object *obj_priv = NULL;
if (dev_priv->pwrctx) {
obj_priv = dev_priv->pwrctx->driver_private;
} else {
- pwrctx = drm_gem_object_alloc(dev, 4096);
- if (!pwrctx) {
- DRM_DEBUG("failed to alloc power context, "
- "RC6 disabled\n");
- goto out;
- }
+ struct drm_gem_object *pwrctx;
- ret = i915_gem_object_pin(pwrctx, 4096);
- if (ret) {
- DRM_ERROR("failed to pin power context: %d\n",
- ret);
- drm_gem_object_unreference(pwrctx);
- goto out;
+ pwrctx = intel_alloc_power_context(dev);
+ if (pwrctx) {
+ dev_priv->pwrctx = pwrctx;
+ obj_priv = pwrctx->driver_private;
}
-
- i915_gem_object_set_to_gtt_domain(pwrctx, 1);
-
- dev_priv->pwrctx = pwrctx;
- obj_priv = pwrctx->driver_private;
}
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
- I915_WRITE(MCHBAR_RENDER_STANDBY,
- I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+ if (obj_priv) {
+ I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+ I915_WRITE(MCHBAR_RENDER_STANDBY,
+ I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
+ }
}
-
-out:
- return;
}
/* Set up chip specific display functions */
@@ -4770,7 +4811,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
del_timer_sync(&intel_crtc->idle_timer);
}
- intel_increase_renderclock(dev, false);
del_timer_sync(&dev_priv->idle_timer);
if (dev_priv->display.disable_fbc)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4e7aa8b7b938..1349d9fd01c4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1402,14 +1402,20 @@ intel_dp_init(struct drm_device *dev, int output_reg)
break;
case DP_B:
case PCH_DP_B:
+ dev_priv->hotplug_supported_mask |=
+ HDMIB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case DP_C:
case PCH_DP_C:
+ dev_priv->hotplug_supported_mask |=
+ HDMIC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case DP_D:
case PCH_DP_D:
+ dev_priv->hotplug_supported_mask |=
+ HDMID_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f04dbbe7d400..06431941b233 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -303,21 +303,26 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
if (sdvox_reg == SDVOB) {
intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == SDVOC) {
intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIB) {
intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
"HDMIB");
+ dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMIC) {
intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
"HDMIC");
+ dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
} else if (sdvox_reg == HDMID) {
intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
"HDMID");
+ dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
}
if (!intel_output->ddc_bus)
goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 3118ce274e67..f4b4aa242df1 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -608,6 +608,13 @@ static const struct dmi_system_id bad_lid_status[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"),
},
},
+ {
+ .ident = "PC-81005",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MALATA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"),
+ },
+ },
{ }
};
@@ -679,7 +686,14 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, lid_notifier);
struct drm_device *dev = dev_priv->dev;
+ struct drm_connector *connector = dev_priv->int_lvds_connector;
+ /*
+ * check and update the status of LVDS connector after receiving
+ * the LID nofication event.
+ */
+ if (connector)
+ connector->status = connector->funcs->detect(connector);
if (!acpi_lid_open()) {
dev_priv->modeset_on_lid = 1;
return NOTIFY_OK;
@@ -854,65 +868,6 @@ static const struct dmi_system_id intel_no_lvds[] = {
{ } /* terminating entry */
};
-#ifdef CONFIG_ACPI
-/*
- * check_lid_device -- check whether @handle is an ACPI LID device.
- * @handle: ACPI device handle
- * @level : depth in the ACPI namespace tree
- * @context: the number of LID device when we find the device
- * @rv: a return value to fill if desired (Not use)
- */
-static acpi_status
-check_lid_device(acpi_handle handle, u32 level, void *context,
- void **return_value)
-{
- struct acpi_device *acpi_dev;
- int *lid_present = context;
-
- acpi_dev = NULL;
- /* Get the acpi device for device handle */
- if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) {
- /* If there is no ACPI device for handle, return */
- return AE_OK;
- }
-
- if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7))
- *lid_present = 1;
-
- return AE_OK;
-}
-
-/**
- * check whether there exists the ACPI LID device by enumerating the ACPI
- * device tree.
- */
-static int intel_lid_present(void)
-{
- int lid_present = 0;
-
- if (acpi_disabled) {
- /* If ACPI is disabled, there is no ACPI device tree to
- * check, so assume the LID device would have been present.
- */
- return 1;
- }
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- check_lid_device, NULL, &lid_present, NULL);
-
- return lid_present;
-}
-#else
-static int intel_lid_present(void)
-{
- /* In the absence of ACPI built in, assume that the LID device would
- * have been present.
- */
- return 1;
-}
-#endif
-
/**
* intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
* @dev: drm device
@@ -1031,12 +986,8 @@ void intel_lvds_init(struct drm_device *dev)
if (dmi_check_system(intel_no_lvds))
return;
- /*
- * Assume LVDS is present if there's an ACPI lid device or if the
- * device is present in the VBT.
- */
- if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
- DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
+ if (!lvds_is_present_in_vbt(dev)) {
+ DRM_DEBUG_KMS("LVDS is not present in VBT\n");
return;
}
@@ -1180,6 +1131,8 @@ out:
DRM_DEBUG_KMS("lid notifier registration failed\n");
dev_priv->lid_notifier.notifier_call = NULL;
}
+ /* keep the LVDS connector */
+ dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
return;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 24a3dc99716c..de5144c8c153 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2662,6 +2662,7 @@ static void intel_sdvo_create_enhance_property(struct drm_connector *connector)
bool intel_sdvo_init(struct drm_device *dev, int output_device)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_output *intel_output;
struct intel_sdvo_priv *sdvo_priv;
@@ -2708,10 +2709,12 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOB/VGA DDC BUS");
+ dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
} else {
intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA,
"SDVOC/VGA DDC BUS");
+ dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
}
if (intel_output->ddc_bus == NULL)
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 552ec110b741..1d5b9b7b033f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1840,6 +1840,8 @@ intel_tv_init(struct drm_device *dev)
drm_connector_attach_property(connector,
dev->mode_config.tv_bottom_margin_property,
tv_priv->margin[TV_MARGIN_BOTTOM]);
+
+ dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS;
out:
drm_sysfs_connector_add(connector);
}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index b1bc1ea182b8..1175429da102 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -30,12 +30,11 @@ config DRM_NOUVEAU_DEBUG
via debugfs.
menu "I2C encoder or helper chips"
- depends on DRM && I2C
+ depends on DRM && DRM_KMS_HELPER && I2C
config DRM_I2C_CH7006
tristate "Chrontel ch7006 TV encoder"
- depends on DRM_NOUVEAU
- default m
+ default m if DRM_NOUVEAU
help
Support for Chrontel ch7006 and similar TV encoders, found
on some nVidia video cards.
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 0cad6d834eb2..e342a418d434 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -33,10 +33,13 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+#include <linux/log2.h>
+
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = dev_priv->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
ttm_bo_kunmap(&nvbo->kmap);
@@ -44,12 +47,87 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
if (unlikely(nvbo->gem))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
+ if (nvbo->tile)
+ nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
+
spin_lock(&dev_priv->ttm.bo_list_lock);
list_del(&nvbo->head);
spin_unlock(&dev_priv->ttm.bo_list_lock);
kfree(nvbo);
}
+static void
+nouveau_bo_fixup_align(struct drm_device *dev,
+ uint32_t tile_mode, uint32_t tile_flags,
+ int *align, int *size)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*
+ * Some of the tile_flags have a periodic structure of N*4096 bytes,
+ * align to to that as well as the page size. Overallocate memory to
+ * avoid corruption of other buffer objects.
+ */
+ if (dev_priv->card_type == NV_50) {
+ uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
+ int i;
+
+ switch (tile_flags) {
+ case 0x1800:
+ case 0x2800:
+ case 0x4800:
+ case 0x7a00:
+ *size = roundup(*size, block_size);
+ if (is_power_of_2(block_size)) {
+ *size += 3 * block_size;
+ for (i = 1; i < 10; i++) {
+ *align = 12 * i * block_size;
+ if (!(*align % 65536))
+ break;
+ }
+ } else {
+ *size += 6 * block_size;
+ for (i = 1; i < 10; i++) {
+ *align = 8 * i * block_size;
+ if (!(*align % 65536))
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ } else {
+ if (tile_mode) {
+ if (dev_priv->chipset >= 0x40) {
+ *align = 65536;
+ *size = roundup(*size, 64 * tile_mode);
+
+ } else if (dev_priv->chipset >= 0x30) {
+ *align = 32768;
+ *size = roundup(*size, 64 * tile_mode);
+
+ } else if (dev_priv->chipset >= 0x20) {
+ *align = 16384;
+ *size = roundup(*size, 64 * tile_mode);
+
+ } else if (dev_priv->chipset >= 0x10) {
+ *align = 16384;
+ *size = roundup(*size, 32 * tile_mode);
+ }
+ }
+ }
+
+ /* ALIGN works only on powers of two. */
+ *size = roundup(*size, PAGE_SIZE);
+
+ if (dev_priv->card_type == NV_50) {
+ *size = roundup(*size, 65536);
+ *align = max(65536, *align);
+ }
+}
+
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
int size, int align, uint32_t flags, uint32_t tile_mode,
@@ -58,7 +136,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
- int ret, n = 0;
+ int ret = 0;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
@@ -70,59 +148,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
- /*
- * Some of the tile_flags have a periodic structure of N*4096 bytes,
- * align to to that as well as the page size. Overallocate memory to
- * avoid corruption of other buffer objects.
- */
- switch (tile_flags) {
- case 0x1800:
- case 0x2800:
- case 0x4800:
- case 0x7a00:
- if (dev_priv->chipset >= 0xA0) {
- /* This is based on high end cards with 448 bits
- * memory bus, could be different elsewhere.*/
- size += 6 * 28672;
- /* 8 * 28672 is the actual alignment requirement,
- * but we must also align to page size. */
- align = 2 * 8 * 28672;
- } else if (dev_priv->chipset >= 0x90) {
- size += 3 * 16384;
- align = 12 * 16834;
- } else {
- size += 3 * 8192;
- /* 12 * 8192 is the actual alignment requirement,
- * but we must also align to page size. */
- align = 2 * 12 * 8192;
- }
- break;
- default:
- break;
- }
-
+ nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
align >>= PAGE_SHIFT;
- size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
- if (dev_priv->card_type == NV_50) {
- size = (size + 65535) & ~65535;
- if (align < (65536 / PAGE_SIZE))
- align = (65536 / PAGE_SIZE);
- }
-
- if (flags & TTM_PL_FLAG_VRAM)
- nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
- if (flags & TTM_PL_FLAG_TT)
- nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
nvbo->placement.fpfn = 0;
nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
- nvbo->placement.placement = nvbo->placements;
- nvbo->placement.busy_placement = nvbo->placements;
- nvbo->placement.num_placement = n;
- nvbo->placement.num_busy_placement = n;
+ nouveau_bo_placement_set(nvbo, flags);
nvbo->channel = chan;
- nouveau_bo_placement_set(nvbo, flags);
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement, align, 0,
false, NULL, size, nouveau_bo_del_ttm);
@@ -421,6 +454,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
* TTM_PL_{VRAM,TT} directly.
*/
+
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_bo *nvbo, bool evict, bool no_wait,
@@ -455,11 +489,12 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
}
static int
-nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
- struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
+ int no_wait, struct ttm_mem_reg *new_mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct ttm_mem_reg *old_mem = &bo->mem;
struct nouveau_channel *chan;
uint64_t src_offset, dst_offset;
uint32_t page_count;
@@ -547,7 +582,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
placement.fpfn = placement.lpfn = 0;
placement.num_placement = placement.num_busy_placement = 1;
- placement.placement = &placement_memtype;
+ placement.placement = placement.busy_placement = &placement_memtype;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -559,7 +594,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
if (ret)
goto out;
@@ -585,7 +620,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
placement.fpfn = placement.lpfn = 0;
placement.num_placement = placement.num_busy_placement = 1;
- placement.placement = &placement_memtype;
+ placement.placement = placement.busy_placement = &placement_memtype;
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
@@ -597,7 +632,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
if (ret)
goto out;
@@ -612,52 +647,106 @@ out:
}
static int
-nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait, struct ttm_mem_reg *new_mem)
+nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
+ struct nouveau_tile_reg **new_tile)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_device *dev = dev_priv->dev;
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ uint64_t offset;
int ret;
- if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
- !nvbo->no_vm) {
- uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
+ if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
+ /* Nothing to do. */
+ *new_tile = NULL;
+ return 0;
+ }
+
+ offset = new_mem->mm_node->start << PAGE_SHIFT;
+ if (dev_priv->card_type == NV_50) {
ret = nv50_mem_vm_bind_linear(dev,
offset + dev_priv->vm_vram_base,
new_mem->size, nvbo->tile_flags,
offset);
if (ret)
return ret;
+
+ } else if (dev_priv->card_type >= NV_10) {
+ *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
+ nvbo->tile_mode);
}
+ return 0;
+}
+
+static void
+nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
+ struct nouveau_tile_reg *new_tile,
+ struct nouveau_tile_reg **old_tile)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = dev_priv->dev;
+
+ if (dev_priv->card_type >= NV_10 &&
+ dev_priv->card_type < NV_50) {
+ if (*old_tile)
+ nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
+
+ *old_tile = new_tile;
+ }
+}
+
+static int
+nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
+ bool no_wait, struct ttm_mem_reg *new_mem)
+{
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ struct nouveau_tile_reg *new_tile = NULL;
+ int ret = 0;
+
+ ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
+ if (ret)
+ return ret;
+
+ /* Software copy if the card isn't up and running yet. */
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
- !dev_priv->channel)
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ !dev_priv->channel) {
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ goto out;
+ }
+ /* Fake bo copy. */
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
BUG_ON(bo->mem.mm_node != NULL);
bo->mem = *new_mem;
new_mem->mm_node = NULL;
- return 0;
+ goto out;
}
- if (new_mem->mem_type == TTM_PL_SYSTEM) {
- if (old_mem->mem_type == TTM_PL_SYSTEM)
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
- if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- } else {
- if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
- return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
- }
+ /* Hardware assisted copy. */
+ if (new_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+ else if (old_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+ else
+ ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
- return 0;
+ if (!ret)
+ goto out;
+
+ /* Fallback to software copy. */
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+
+out:
+ if (ret)
+ nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
+ else
+ nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+
+ return ret;
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 9aaa972f8822..343d718a9667 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -158,6 +158,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return ret;
}
+ nouveau_dma_pre_init(chan);
+
/* Locate channel's user control regs */
if (dev_priv->card_type < NV_40)
user = NV03_USER(channel);
@@ -235,47 +237,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
return 0;
}
-int
-nouveau_channel_idle(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_engine *engine = &dev_priv->engine;
- uint32_t caches;
- int idle;
-
- if (!chan) {
- NV_ERROR(dev, "no channel...\n");
- return 1;
- }
-
- caches = nv_rd32(dev, NV03_PFIFO_CACHES);
- nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
-
- if (engine->fifo.channel_id(dev) != chan->id) {
- struct nouveau_gpuobj *ramfc =
- chan->ramfc ? chan->ramfc->gpuobj : NULL;
-
- if (!ramfc) {
- NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
- return 1;
- }
-
- engine->instmem.prepare_access(dev, false);
- if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
- idle = 0;
- else
- idle = 1;
- engine->instmem.finish_access(dev);
- } else {
- idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
- nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
- }
-
- nv_wr32(dev, NV03_PFIFO_CACHES, caches);
- return idle;
-}
-
/* stops a fifo */
void
nouveau_channel_free(struct nouveau_channel *chan)
@@ -414,7 +375,9 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
init->subchan[0].grclass = 0x0039;
else
init->subchan[0].grclass = 0x5039;
- init->nr_subchan = 1;
+ init->subchan[1].handle = NvSw;
+ init->subchan[1].grclass = NV_SW;
+ init->nr_subchan = 2;
/* Named memory object area */
ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 703553687b20..7afbe8b40d51 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -29,12 +29,22 @@
#include "nouveau_drv.h"
#include "nouveau_dma.h"
+void
+nouveau_dma_pre_init(struct nouveau_channel *chan)
+{
+ chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
+ chan->dma.put = 0;
+ chan->dma.cur = chan->dma.put;
+ chan->dma.free = chan->dma.max - chan->dma.cur;
+}
+
int
nouveau_dma_init(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *m2mf = NULL;
+ struct nouveau_gpuobj *nvsw = NULL;
int ret, i;
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
@@ -47,6 +57,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
if (ret)
return ret;
+ /* Create an NV_SW object for various sync purposes */
+ ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
+ if (ret)
+ return ret;
+
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
if (ret)
@@ -64,12 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret;
}
- /* Initialise DMA vars */
- chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
- chan->dma.put = 0;
- chan->dma.cur = chan->dma.put;
- chan->dma.free = chan->dma.max - chan->dma.cur;
-
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
if (ret)
@@ -87,6 +100,13 @@ nouveau_dma_init(struct nouveau_channel *chan)
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
OUT_RING(chan, NvNotify0);
+ /* Initialise NV_SW */
+ ret = RING_SPACE(chan, 2);
+ if (ret)
+ return ret;
+ BEGIN_RING(chan, NvSubSw, 0, 1);
+ OUT_RING(chan, NvSw);
+
/* Sit back and pray the channel works.. */
FIRE_RING(chan);
@@ -113,7 +133,7 @@ READ_GET(struct nouveau_channel *chan, uint32_t *get)
val = nvchan_rd32(chan, chan->user_get);
if (val < chan->pushbuf_base ||
- val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) {
+ val > chan->pushbuf_base + (chan->dma.max << 2)) {
/* meaningless to dma_wait() except to know whether the
* GPU has stalled or not
*/
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index 04e85d8f757e..dabfd655f93e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -46,10 +46,11 @@
/* Hardcoded object assignments to subchannels (subchannel id). */
enum {
NvSubM2MF = 0,
- NvSub2D = 1,
- NvSubCtxSurf2D = 1,
- NvSubGdiRect = 2,
- NvSubImageBlit = 3
+ NvSubSw = 1,
+ NvSub2D = 2,
+ NvSubCtxSurf2D = 2,
+ NvSubGdiRect = 3,
+ NvSubImageBlit = 4
};
/* Object handles. */
@@ -67,6 +68,7 @@ enum {
NvClipRect = 0x8000000b,
NvGdiRect = 0x8000000c,
NvImageBlit = 0x8000000d,
+ NvSw = 0x8000000e,
/* G80+ display objects */
NvEvoVRAM = 0x01000000,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 5f8cbb79c499..026419fe8791 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -59,11 +59,19 @@ struct nouveau_grctx;
#define MAX_NUM_DCB_ENTRIES 16
#define NOUVEAU_MAX_CHANNEL_NR 128
+#define NOUVEAU_MAX_TILE_NR 15
#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
#define NV50_VM_BLOCK (512*1024*1024ULL)
#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
+struct nouveau_tile_reg {
+ struct nouveau_fence *fence;
+ uint32_t addr;
+ uint32_t size;
+ bool used;
+};
+
struct nouveau_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
@@ -83,6 +91,7 @@ struct nouveau_bo {
uint32_t tile_mode;
uint32_t tile_flags;
+ struct nouveau_tile_reg *tile;
struct drm_gem_object *gem;
struct drm_file *cpu_filp;
@@ -277,8 +286,13 @@ struct nouveau_timer_engine {
};
struct nouveau_fb_engine {
+ int num_tiles;
+
int (*init)(struct drm_device *dev);
void (*takedown)(struct drm_device *dev);
+
+ void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch);
};
struct nouveau_fifo_engine {
@@ -292,6 +306,8 @@ struct nouveau_fifo_engine {
void (*disable)(struct drm_device *);
void (*enable)(struct drm_device *);
bool (*reassign)(struct drm_device *, bool enable);
+ bool (*cache_flush)(struct drm_device *dev);
+ bool (*cache_pull)(struct drm_device *dev, bool enable);
int (*channel_id)(struct drm_device *);
@@ -330,6 +346,9 @@ struct nouveau_pgraph_engine {
void (*destroy_context)(struct nouveau_channel *);
int (*load_context)(struct nouveau_channel *);
int (*unload_context)(struct drm_device *);
+
+ void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch);
};
struct nouveau_engine {
@@ -548,6 +567,12 @@ struct drm_nouveau_private {
unsigned long sg_handle;
} gart_info;
+ /* nv10-nv40 tiling regions */
+ struct {
+ struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
+ spinlock_t lock;
+ } tile;
+
/* G8x/G9x virtual address space */
uint64_t vm_gart_base;
uint64_t vm_gart_size;
@@ -685,6 +710,13 @@ extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
extern int nouveau_mem_init(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
+extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
+ uint32_t addr,
+ uint32_t size,
+ uint32_t pitch);
+extern void nv10_mem_expire_tiling(struct drm_device *dev,
+ struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence);
extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
uint32_t size, uint32_t flags,
uint64_t phys);
@@ -713,7 +745,6 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t fb_ctxdma, uint32_t tt_ctxdma);
extern void nouveau_channel_free(struct nouveau_channel *);
-extern int nouveau_channel_idle(struct nouveau_channel *chan);
/* nouveau_object.c */
extern int nouveau_gpuobj_early_init(struct drm_device *);
@@ -756,6 +787,8 @@ extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
uint32_t *o_ret);
extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
+ struct nouveau_gpuobj **);
extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
struct drm_file *);
extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
@@ -804,6 +837,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
#endif
/* nouveau_dma.c */
+extern void nouveau_dma_pre_init(struct nouveau_channel *);
extern int nouveau_dma_init(struct nouveau_channel *);
extern int nouveau_dma_wait(struct nouveau_channel *, int size);
@@ -879,16 +913,22 @@ extern void nv04_fb_takedown(struct drm_device *);
/* nv10_fb.c */
extern int nv10_fb_init(struct drm_device *);
extern void nv10_fb_takedown(struct drm_device *);
+extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv40_fb.c */
extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
+extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv04_fifo.c */
extern int nv04_fifo_init(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
extern void nv04_fifo_enable(struct drm_device *);
extern bool nv04_fifo_reassign(struct drm_device *, bool);
+extern bool nv04_fifo_cache_flush(struct drm_device *);
+extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
extern int nv04_fifo_channel_id(struct drm_device *);
extern int nv04_fifo_create_context(struct nouveau_channel *);
extern void nv04_fifo_destroy_context(struct nouveau_channel *);
@@ -941,6 +981,8 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *);
extern int nv10_graph_load_context(struct nouveau_channel *);
extern int nv10_graph_unload_context(struct drm_device *);
extern void nv10_graph_context_switch(struct drm_device *);
+extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv20_graph.c */
extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
@@ -952,6 +994,8 @@ extern int nv20_graph_unload_context(struct drm_device *);
extern int nv20_graph_init(struct drm_device *);
extern void nv20_graph_takedown(struct drm_device *);
extern int nv30_graph_init(struct drm_device *);
+extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv40_graph.c */
extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
@@ -963,6 +1007,8 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
extern int nv40_graph_load_context(struct nouveau_channel *);
extern int nv40_graph_unload_context(struct drm_device *);
extern void nv40_grctx_init(struct nouveau_grctx *);
+extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+ uint32_t, uint32_t);
/* nv50_graph.c */
extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
@@ -1030,8 +1076,7 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
/* nv04_dac.c */
extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
-extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
- struct drm_connector *connector);
+extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
extern int nv04_dac_output_offset(struct drm_encoder *encoder);
extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
@@ -1049,9 +1094,6 @@ extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
/* nv17_tv.c */
extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
-extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
- struct drm_connector *connector,
- uint32_t pin_mask);
/* nv04_display.c */
extern int nv04_display_create(struct drm_device *);
@@ -1290,14 +1332,14 @@ nv_two_reg_pll(struct drm_device *dev)
return false;
}
-#define NV50_NVSW 0x0000506e
-#define NV50_NVSW_DMA_SEMAPHORE 0x00000060
-#define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064
-#define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068
-#define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c
-#define NV50_NVSW_DMA_VBLSEM 0x0000018c
-#define NV50_NVSW_VBLSEM_OFFSET 0x00000400
-#define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404
-#define NV50_NVSW_VBLSEM_RELEASE 0x00000408
+#define NV_SW 0x0000506e
+#define NV_SW_DMA_SEMAPHORE 0x00000060
+#define NV_SW_SEMAPHORE_OFFSET 0x00000064
+#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068
+#define NV_SW_SEMAPHORE_RELEASE 0x0000006c
+#define NV_SW_DMA_VBLSEM 0x0000018c
+#define NV_SW_VBLSEM_OFFSET 0x00000400
+#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
+#define NV_SW_VBLSEM_RELEASE 0x00000408
#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 84af25c238b6..0b05c869e0e7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -64,8 +64,7 @@ nouveau_fbcon_sync(struct fb_info *info)
return 0;
if (RING_SPACE(chan, 4)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
return 0;
}
@@ -86,8 +85,7 @@ nouveau_fbcon_sync(struct fb_info *info)
}
if (ret) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
return 0;
}
@@ -212,11 +210,11 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
mode_cmd.bpp = surface_bpp;
mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
- mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256);
+ mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
mode_cmd.depth = surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
+ size = roundup(size, PAGE_SIZE);
ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
0, 0x0000, false, true, &nvbo);
@@ -380,3 +378,12 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
return 0;
}
+
+void nouveau_fbcon_gpu_lockup(struct fb_info *info)
+{
+ struct nouveau_fbcon_par *par = info->par;
+ struct drm_device *dev = par->dev;
+
+ NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ info->flags |= FBINFO_HWACCEL_DISABLED;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 8531140fedbc..462e0b87b4bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -43,5 +43,6 @@ void nouveau_fbcon_zfill(struct drm_device *dev);
int nv04_fbcon_accel_init(struct fb_info *info);
int nv50_fbcon_accel_init(struct fb_info *info);
+void nouveau_fbcon_gpu_lockup(struct fb_info *info);
#endif /* __NV50_FBCON_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index dacac9a0842a..faddf53ff9ed 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -142,7 +142,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
list_add_tail(&fence->entry, &chan->fence.pending);
spin_unlock_irqrestore(&chan->fence.lock, flags);
- BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1);
+ BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
OUT_RING(chan, fence->sequence);
FIRE_RING(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 18fd8ac9fca7..2009db2426c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -220,7 +220,6 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
}
struct validate_op {
- struct nouveau_fence *fence;
struct list_head vram_list;
struct list_head gart_list;
struct list_head both_list;
@@ -252,17 +251,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
}
static void
-validate_fini(struct validate_op *op, bool success)
+validate_fini(struct validate_op *op, struct nouveau_fence* fence)
{
- struct nouveau_fence *fence = op->fence;
-
- if (unlikely(!success))
- op->fence = NULL;
-
- validate_fini_list(&op->vram_list, op->fence);
- validate_fini_list(&op->gart_list, op->fence);
- validate_fini_list(&op->both_list, op->fence);
- nouveau_fence_unref((void *)&fence);
+ validate_fini_list(&op->vram_list, fence);
+ validate_fini_list(&op->gart_list, fence);
+ validate_fini_list(&op->both_list, fence);
}
static int
@@ -420,10 +413,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
INIT_LIST_HEAD(&op->gart_list);
INIT_LIST_HEAD(&op->both_list);
- ret = nouveau_fence_new(chan, &op->fence, false);
- if (ret)
- return ret;
-
if (nr_buffers == 0)
return 0;
@@ -541,6 +530,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
struct nouveau_channel *chan;
struct validate_op op;
+ struct nouveau_fence* fence = 0;
uint32_t *pushbuf = NULL;
int ret = 0, do_reloc = 0, i;
@@ -597,7 +587,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
OUT_RINGp(chan, pushbuf, req->nr_dwords);
- ret = nouveau_fence_emit(op.fence);
+ ret = nouveau_fence_new(chan, &fence, true);
if (ret) {
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
@@ -605,7 +595,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
if (nouveau_gem_pushbuf_sync(chan)) {
- ret = nouveau_fence_wait(op.fence, NULL, false, false);
+ ret = nouveau_fence_wait(fence, NULL, false, false);
if (ret) {
for (i = 0; i < req->nr_dwords; i++)
NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
@@ -614,7 +604,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
}
out:
- validate_fini(&op, ret == 0);
+ validate_fini(&op, fence);
+ nouveau_fence_unref((void**)&fence);
mutex_unlock(&dev->struct_mutex);
kfree(pushbuf);
kfree(bo);
@@ -634,6 +625,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
struct drm_gem_object *gem;
struct nouveau_bo *pbbo;
struct validate_op op;
+ struct nouveau_fence* fence = 0;
int i, ret = 0, do_reloc = 0;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
@@ -772,7 +764,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
OUT_RING(chan, 0);
}
- ret = nouveau_fence_emit(op.fence);
+ ret = nouveau_fence_new(chan, &fence, true);
if (ret) {
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
WIND_RING(chan);
@@ -780,7 +772,8 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
}
out:
- validate_fini(&op, ret == 0);
+ validate_fini(&op, fence);
+ nouveau_fence_unref((void**)&fence);
mutex_unlock(&dev->struct_mutex);
kfree(bo);
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 370c72c968d1..919a619ca7fa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -635,6 +635,7 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
if ((nv_rd32(dev, 0x400500) & isb) != isb)
nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
}
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 5158a12f7844..fb9bdd6edf1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -192,6 +192,92 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
}
/*
+ * NV10-NV40 tiling helpers
+ */
+
+static void
+nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = addr;
+ tile->size = size;
+ tile->used = !!pitch;
+ nouveau_fence_unref((void **)&tile->fence);
+
+ if (!pfifo->cache_flush(dev))
+ return;
+
+ pfifo->reassign(dev, false);
+ pfifo->cache_flush(dev);
+ pfifo->cache_pull(dev, false);
+
+ nouveau_wait_for_idle(dev);
+
+ pgraph->set_region_tiling(dev, i, addr, size, pitch);
+ pfb->set_region_tiling(dev, i, addr, size, pitch);
+
+ pfifo->cache_pull(dev, true);
+ pfifo->reassign(dev, true);
+}
+
+struct nouveau_tile_reg *
+nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
+ uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
+ int i;
+
+ spin_lock(&dev_priv->tile.lock);
+
+ for (i = 0; i < pfb->num_tiles; i++) {
+ if (tile[i].used)
+ /* Tile region in use. */
+ continue;
+
+ if (tile[i].fence &&
+ !nouveau_fence_signalled(tile[i].fence, NULL))
+ /* Pending tile region. */
+ continue;
+
+ if (max(tile[i].addr, addr) <
+ min(tile[i].addr + tile[i].size, addr + size))
+ /* Kill an intersecting tile region. */
+ nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
+
+ if (pitch && !found) {
+ /* Free tile region. */
+ nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
+ found = &tile[i];
+ }
+ }
+
+ spin_unlock(&dev_priv->tile.lock);
+
+ return found;
+}
+
+void
+nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
+ struct nouveau_fence *fence)
+{
+ if (fence) {
+ /* Mark it as pending. */
+ tile->fence = fence;
+ nouveau_fence_ref(fence);
+ }
+
+ tile->used = false;
+}
+
+/*
* NV50 VM helpers
*/
int
@@ -513,6 +599,7 @@ nouveau_mem_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
spin_lock_init(&dev_priv->ttm.bo_list_lock);
+ spin_lock_init(&dev_priv->tile.lock);
dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 93379bb81bea..6c2cf81716df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -881,7 +881,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
return 0;
}
-static int
+int
nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
struct nouveau_gpuobj **gpuobj_ret)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index fa1b0e7165b9..251f1b3b38b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -349,19 +349,19 @@
#define NV04_PGRAPH_BLEND 0x00400824
#define NV04_PGRAPH_STORED_FMT 0x00400830
#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
-#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16))
-#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16))
-#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16))
-#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16))
+#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
+#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
+#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
+#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
#define NV04_PGRAPH_U_RAM 0x00400D00
-#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16))
-#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16))
-#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16))
-#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16))
+#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
#define NV04_PGRAPH_V_RAM 0x00400D40
#define NV04_PGRAPH_W_RAM 0x00400D80
#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index e76ec2d207a9..09b9a46dfc0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -76,6 +76,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv04_fifo_channel_id;
engine->fifo.create_context = nv04_fifo_create_context;
engine->fifo.destroy_context = nv04_fifo_destroy_context;
@@ -100,6 +102,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
engine->graph.grclass = nv10_graph_grclass;
engine->graph.init = nv10_graph_init;
engine->graph.takedown = nv10_graph_takedown;
@@ -109,12 +112,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv10_graph_load_context;
engine->graph.unload_context = nv10_graph_unload_context;
+ engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -139,6 +145,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
engine->graph.grclass = nv20_graph_grclass;
engine->graph.init = nv20_graph_init;
engine->graph.takedown = nv20_graph_takedown;
@@ -148,12 +155,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.fifo_access = nv04_graph_fifo_access;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
+ engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -178,6 +188,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
engine->graph.grclass = nv30_graph_grclass;
engine->graph.init = nv30_graph_init;
engine->graph.takedown = nv20_graph_takedown;
@@ -187,12 +198,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv20_graph_destroy_context;
engine->graph.load_context = nv20_graph_load_context;
engine->graph.unload_context = nv20_graph_unload_context;
+ engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv10_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv10_fifo_create_context;
engine->fifo.destroy_context = nv10_fifo_destroy_context;
@@ -218,6 +232,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv40_fb_init;
engine->fb.takedown = nv40_fb_takedown;
+ engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
engine->graph.grclass = nv40_graph_grclass;
engine->graph.init = nv40_graph_init;
engine->graph.takedown = nv40_graph_takedown;
@@ -227,12 +242,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->graph.destroy_context = nv40_graph_destroy_context;
engine->graph.load_context = nv40_graph_load_context;
engine->graph.unload_context = nv40_graph_unload_context;
+ engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
engine->fifo.channels = 32;
engine->fifo.init = nv40_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.disable = nv04_fifo_disable;
engine->fifo.enable = nv04_fifo_enable;
engine->fifo.reassign = nv04_fifo_reassign;
+ engine->fifo.cache_flush = nv04_fifo_cache_flush;
+ engine->fifo.cache_pull = nv04_fifo_cache_pull;
engine->fifo.channel_id = nv10_fifo_channel_id;
engine->fifo.create_context = nv40_fifo_create_context;
engine->fifo.destroy_context = nv40_fifo_destroy_context;
@@ -624,7 +642,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->chipset = (reg0 & 0xff00000) >> 20;
/* NV04 or NV05 */
} else if ((reg0 & 0xff00fff0) == 0x20004000) {
- dev_priv->chipset = 0x04;
+ if (reg0 & 0x00f00000)
+ dev_priv->chipset = 0x05;
+ else
+ dev_priv->chipset = 0x04;
} else
dev_priv->chipset = 0xff;
@@ -704,8 +725,8 @@ static void nouveau_close(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- /* In the case of an error dev_priv may not be be allocated yet */
- if (dev_priv && dev_priv->card_type)
+ /* In the case of an error dev_priv may not be allocated yet */
+ if (dev_priv)
nouveau_card_takedown(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 187eb84e4da5..c385d50f041b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,45 +28,17 @@
#include "nouveau_drv.h"
-static struct vm_operations_struct nouveau_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
-
-static int
-nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo = vma->vm_private_data;
- int ret;
-
- if (unlikely(bo == NULL))
- return VM_FAULT_NOPAGE;
-
- ret = ttm_vm_ops->fault(vma, vmf);
- return ret;
-}
-
int
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct drm_nouveau_private *dev_priv =
file_priv->minor->dev->dev_private;
- int ret;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return drm_mmap(filp, vma);
- ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
- if (unlikely(ret != 0))
- return ret;
-
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- nouveau_ttm_vm_ops = *ttm_vm_ops;
- nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
- }
-
- vma->vm_ops = &nouveau_ttm_vm_ops;
- return 0;
+ return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index d9f32879ba38..d0e038d28948 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -212,16 +212,15 @@ out:
return connector_status_disconnected;
}
-enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
+uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
- uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+ uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
- int head, present = 0;
+ int head;
#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
if (dcb->type == OUTPUT_TV) {
@@ -287,13 +286,7 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
msleep(5);
- temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
-
- if (dcb->type == OUTPUT_TV)
- present = (nv17_tv_detect(encoder, connector, temp)
- == connector_status_connected);
- else
- present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI;
+ sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
@@ -310,15 +303,25 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
- if (present) {
- NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or));
+ return sample;
+}
+
+static enum drm_connector_status
+nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ uint32_t sample = nv17_dac_sample_load(encoder);
+
+ if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
+ NV_INFO(dev, "Load detected on output %c\n",
+ '@' + ffs(dcb->or));
return connector_status_connected;
+ } else {
+ return connector_status_disconnected;
}
-
- return connector_status_disconnected;
}
-
static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 09a31071ee58..d910873c1368 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -39,8 +39,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
return;
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -62,14 +61,12 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
struct drm_device *dev = par->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
- uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color];
if (info->state != FBINFO_STATE_RUNNING)
return;
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -80,7 +77,11 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
- OUT_RING(chan, color);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+ else
+ OUT_RING(chan, rect->color);
BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
OUT_RING(chan, (rect->dx << 16) | rect->dy);
OUT_RING(chan, (rect->width << 16) | rect->height);
@@ -109,8 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -144,8 +144,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int iter_len = dsize > 128 ? 128 : dsize;
if (RING_SPACE(chan, iter_len + 1)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
cfb_imageblit(info, image);
return;
}
@@ -184,6 +183,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
struct drm_device *dev = par->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->channel;
+ const int sub = NvSubCtxSurf2D;
int surface_fmt, pattern_fmt, rect_fmt;
int ret;
@@ -242,30 +242,29 @@ nv04_fbcon_accel_init(struct fb_info *info)
return ret;
if (RING_SPACE(chan, 49)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
return 0;
}
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvCtxSurf2D);
- BEGIN_RING(chan, 1, 0x0184, 2);
+ BEGIN_RING(chan, sub, 0x0184, 2);
OUT_RING(chan, NvDmaFB);
OUT_RING(chan, NvDmaFB);
- BEGIN_RING(chan, 1, 0x0300, 4);
+ BEGIN_RING(chan, sub, 0x0300, 4);
OUT_RING(chan, surface_fmt);
OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvRop);
- BEGIN_RING(chan, 1, 0x0300, 1);
+ BEGIN_RING(chan, sub, 0x0300, 1);
OUT_RING(chan, 0x55);
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvImagePatt);
- BEGIN_RING(chan, 1, 0x0300, 8);
+ BEGIN_RING(chan, sub, 0x0300, 8);
OUT_RING(chan, pattern_fmt);
#ifdef __BIG_ENDIAN
OUT_RING(chan, 2);
@@ -279,9 +278,9 @@ nv04_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, ~0);
OUT_RING(chan, ~0);
- BEGIN_RING(chan, 1, 0x0000, 1);
+ BEGIN_RING(chan, sub, 0x0000, 1);
OUT_RING(chan, NvClipRect);
- BEGIN_RING(chan, 1, 0x0300, 2);
+ BEGIN_RING(chan, sub, 0x0300, 2);
OUT_RING(chan, 0);
OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 0c3cd53c7313..f31347b8c9b0 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -71,6 +71,40 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable)
return (reassign == 1);
}
+bool
+nv04_fifo_cache_flush(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ uint64_t start = ptimer->read(dev);
+
+ do {
+ if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
+ return true;
+
+ } while (ptimer->read(dev) - start < 100000000);
+
+ NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
+
+ return false;
+}
+
+bool
+nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
+{
+ uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
+
+ if (enable) {
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
+ } else {
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+ }
+
+ return !!(pull & 1);
+}
+
int
nv04_fifo_channel_id(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index d561d773c0f4..e260986ea65a 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -28,6 +28,10 @@
#include "nouveau_drv.h"
static uint32_t nv04_graph_ctx_regs[] = {
+ 0x0040053c,
+ 0x00400544,
+ 0x00400540,
+ 0x00400548,
NV04_PGRAPH_CTX_SWITCH1,
NV04_PGRAPH_CTX_SWITCH2,
NV04_PGRAPH_CTX_SWITCH3,
@@ -102,69 +106,69 @@ static uint32_t nv04_graph_ctx_regs[] = {
NV04_PGRAPH_PATT_COLOR0,
NV04_PGRAPH_PATT_COLOR1,
NV04_PGRAPH_PATT_COLORRAM+0x00,
- NV04_PGRAPH_PATT_COLORRAM+0x01,
- NV04_PGRAPH_PATT_COLORRAM+0x02,
- NV04_PGRAPH_PATT_COLORRAM+0x03,
NV04_PGRAPH_PATT_COLORRAM+0x04,
- NV04_PGRAPH_PATT_COLORRAM+0x05,
- NV04_PGRAPH_PATT_COLORRAM+0x06,
- NV04_PGRAPH_PATT_COLORRAM+0x07,
NV04_PGRAPH_PATT_COLORRAM+0x08,
- NV04_PGRAPH_PATT_COLORRAM+0x09,
- NV04_PGRAPH_PATT_COLORRAM+0x0A,
- NV04_PGRAPH_PATT_COLORRAM+0x0B,
- NV04_PGRAPH_PATT_COLORRAM+0x0C,
- NV04_PGRAPH_PATT_COLORRAM+0x0D,
- NV04_PGRAPH_PATT_COLORRAM+0x0E,
- NV04_PGRAPH_PATT_COLORRAM+0x0F,
+ NV04_PGRAPH_PATT_COLORRAM+0x0c,
NV04_PGRAPH_PATT_COLORRAM+0x10,
- NV04_PGRAPH_PATT_COLORRAM+0x11,
- NV04_PGRAPH_PATT_COLORRAM+0x12,
- NV04_PGRAPH_PATT_COLORRAM+0x13,
NV04_PGRAPH_PATT_COLORRAM+0x14,
- NV04_PGRAPH_PATT_COLORRAM+0x15,
- NV04_PGRAPH_PATT_COLORRAM+0x16,
- NV04_PGRAPH_PATT_COLORRAM+0x17,
NV04_PGRAPH_PATT_COLORRAM+0x18,
- NV04_PGRAPH_PATT_COLORRAM+0x19,
- NV04_PGRAPH_PATT_COLORRAM+0x1A,
- NV04_PGRAPH_PATT_COLORRAM+0x1B,
- NV04_PGRAPH_PATT_COLORRAM+0x1C,
- NV04_PGRAPH_PATT_COLORRAM+0x1D,
- NV04_PGRAPH_PATT_COLORRAM+0x1E,
- NV04_PGRAPH_PATT_COLORRAM+0x1F,
+ NV04_PGRAPH_PATT_COLORRAM+0x1c,
NV04_PGRAPH_PATT_COLORRAM+0x20,
- NV04_PGRAPH_PATT_COLORRAM+0x21,
- NV04_PGRAPH_PATT_COLORRAM+0x22,
- NV04_PGRAPH_PATT_COLORRAM+0x23,
NV04_PGRAPH_PATT_COLORRAM+0x24,
- NV04_PGRAPH_PATT_COLORRAM+0x25,
- NV04_PGRAPH_PATT_COLORRAM+0x26,
- NV04_PGRAPH_PATT_COLORRAM+0x27,
NV04_PGRAPH_PATT_COLORRAM+0x28,
- NV04_PGRAPH_PATT_COLORRAM+0x29,
- NV04_PGRAPH_PATT_COLORRAM+0x2A,
- NV04_PGRAPH_PATT_COLORRAM+0x2B,
- NV04_PGRAPH_PATT_COLORRAM+0x2C,
- NV04_PGRAPH_PATT_COLORRAM+0x2D,
- NV04_PGRAPH_PATT_COLORRAM+0x2E,
- NV04_PGRAPH_PATT_COLORRAM+0x2F,
+ NV04_PGRAPH_PATT_COLORRAM+0x2c,
NV04_PGRAPH_PATT_COLORRAM+0x30,
- NV04_PGRAPH_PATT_COLORRAM+0x31,
- NV04_PGRAPH_PATT_COLORRAM+0x32,
- NV04_PGRAPH_PATT_COLORRAM+0x33,
NV04_PGRAPH_PATT_COLORRAM+0x34,
- NV04_PGRAPH_PATT_COLORRAM+0x35,
- NV04_PGRAPH_PATT_COLORRAM+0x36,
- NV04_PGRAPH_PATT_COLORRAM+0x37,
NV04_PGRAPH_PATT_COLORRAM+0x38,
- NV04_PGRAPH_PATT_COLORRAM+0x39,
- NV04_PGRAPH_PATT_COLORRAM+0x3A,
- NV04_PGRAPH_PATT_COLORRAM+0x3B,
- NV04_PGRAPH_PATT_COLORRAM+0x3C,
- NV04_PGRAPH_PATT_COLORRAM+0x3D,
- NV04_PGRAPH_PATT_COLORRAM+0x3E,
- NV04_PGRAPH_PATT_COLORRAM+0x3F,
+ NV04_PGRAPH_PATT_COLORRAM+0x3c,
+ NV04_PGRAPH_PATT_COLORRAM+0x40,
+ NV04_PGRAPH_PATT_COLORRAM+0x44,
+ NV04_PGRAPH_PATT_COLORRAM+0x48,
+ NV04_PGRAPH_PATT_COLORRAM+0x4c,
+ NV04_PGRAPH_PATT_COLORRAM+0x50,
+ NV04_PGRAPH_PATT_COLORRAM+0x54,
+ NV04_PGRAPH_PATT_COLORRAM+0x58,
+ NV04_PGRAPH_PATT_COLORRAM+0x5c,
+ NV04_PGRAPH_PATT_COLORRAM+0x60,
+ NV04_PGRAPH_PATT_COLORRAM+0x64,
+ NV04_PGRAPH_PATT_COLORRAM+0x68,
+ NV04_PGRAPH_PATT_COLORRAM+0x6c,
+ NV04_PGRAPH_PATT_COLORRAM+0x70,
+ NV04_PGRAPH_PATT_COLORRAM+0x74,
+ NV04_PGRAPH_PATT_COLORRAM+0x78,
+ NV04_PGRAPH_PATT_COLORRAM+0x7c,
+ NV04_PGRAPH_PATT_COLORRAM+0x80,
+ NV04_PGRAPH_PATT_COLORRAM+0x84,
+ NV04_PGRAPH_PATT_COLORRAM+0x88,
+ NV04_PGRAPH_PATT_COLORRAM+0x8c,
+ NV04_PGRAPH_PATT_COLORRAM+0x90,
+ NV04_PGRAPH_PATT_COLORRAM+0x94,
+ NV04_PGRAPH_PATT_COLORRAM+0x98,
+ NV04_PGRAPH_PATT_COLORRAM+0x9c,
+ NV04_PGRAPH_PATT_COLORRAM+0xa0,
+ NV04_PGRAPH_PATT_COLORRAM+0xa4,
+ NV04_PGRAPH_PATT_COLORRAM+0xa8,
+ NV04_PGRAPH_PATT_COLORRAM+0xac,
+ NV04_PGRAPH_PATT_COLORRAM+0xb0,
+ NV04_PGRAPH_PATT_COLORRAM+0xb4,
+ NV04_PGRAPH_PATT_COLORRAM+0xb8,
+ NV04_PGRAPH_PATT_COLORRAM+0xbc,
+ NV04_PGRAPH_PATT_COLORRAM+0xc0,
+ NV04_PGRAPH_PATT_COLORRAM+0xc4,
+ NV04_PGRAPH_PATT_COLORRAM+0xc8,
+ NV04_PGRAPH_PATT_COLORRAM+0xcc,
+ NV04_PGRAPH_PATT_COLORRAM+0xd0,
+ NV04_PGRAPH_PATT_COLORRAM+0xd4,
+ NV04_PGRAPH_PATT_COLORRAM+0xd8,
+ NV04_PGRAPH_PATT_COLORRAM+0xdc,
+ NV04_PGRAPH_PATT_COLORRAM+0xe0,
+ NV04_PGRAPH_PATT_COLORRAM+0xe4,
+ NV04_PGRAPH_PATT_COLORRAM+0xe8,
+ NV04_PGRAPH_PATT_COLORRAM+0xec,
+ NV04_PGRAPH_PATT_COLORRAM+0xf0,
+ NV04_PGRAPH_PATT_COLORRAM+0xf4,
+ NV04_PGRAPH_PATT_COLORRAM+0xf8,
+ NV04_PGRAPH_PATT_COLORRAM+0xfc,
NV04_PGRAPH_PATTERN,
0x0040080c,
NV04_PGRAPH_PATTERN_SHAPE,
@@ -247,14 +251,6 @@ static uint32_t nv04_graph_ctx_regs[] = {
0x004004f8,
0x0040047c,
0x004004fc,
- 0x0040053c,
- 0x00400544,
- 0x00400540,
- 0x00400548,
- 0x00400560,
- 0x00400568,
- 0x00400564,
- 0x0040056c,
0x00400534,
0x00400538,
0x00400514,
@@ -341,9 +337,8 @@ static uint32_t nv04_graph_ctx_regs[] = {
0x00400500,
0x00400504,
NV04_PGRAPH_VALID1,
- NV04_PGRAPH_VALID2
-
-
+ NV04_PGRAPH_VALID2,
+ NV04_PGRAPH_DEBUG_3
};
struct graph_state {
@@ -388,6 +383,18 @@ nv04_graph_context_switch(struct drm_device *dev)
pgraph->fifo_access(dev, true);
}
+static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
+ if (nv04_graph_ctx_regs[i] == reg)
+ return &ctx->nv04[i];
+ }
+
+ return NULL;
+}
+
int nv04_graph_create_context(struct nouveau_channel *chan)
{
struct graph_state *pgraph_ctx;
@@ -398,15 +405,8 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
if (pgraph_ctx == NULL)
return -ENOMEM;
- /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */
- pgraph_ctx->nv04[0] = 0x0001ffff;
- /* is it really needed ??? */
-#if 0
- dev_priv->fifos[channel].pgraph_ctx[1] =
- nv_rd32(dev, NV_PGRAPH_DEBUG_4);
- dev_priv->fifos[channel].pgraph_ctx[2] =
- nv_rd32(dev, 0x004006b0);
-#endif
+ *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
+
return 0;
}
@@ -429,9 +429,13 @@ int nv04_graph_load_context(struct nouveau_channel *chan)
nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
- nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24);
+
+ tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
+
tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
+
return 0;
}
@@ -494,7 +498,7 @@ int nv04_graph_init(struct drm_device *dev)
nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= dev_priv->engine.fifo.channels << 24;
+ tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
/* These don't belong here, they're part of a per-channel context */
@@ -533,7 +537,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
struct drm_device *dev = chan->dev;
- uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff;
+ uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
uint32_t tmp;
@@ -547,7 +551,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
return 0;
}
-static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = {
+static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
{ 0x0150, nv04_graph_mthd_set_ref },
{}
};
@@ -558,7 +562,7 @@ static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
};
struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
- { 0x0039, false, nv04_graph_mthds_m2mf },
+ { 0x0039, false, NULL },
{ 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
{ 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
{ 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
@@ -574,6 +578,7 @@ struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
{ 0x0053, false, NULL }, /* surf3d */
{ 0x0054, false, NULL }, /* tex_tri */
{ 0x0055, false, NULL }, /* multitex_tri */
+ { 0x506e, true, nv04_graph_mthds_sw },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
index 79e2d104d70a..cc5cda44e501 100644
--- a/drivers/gpu/drm/nouveau/nv10_fb.c
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -3,17 +3,37 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+void
+nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch) {
+ if (dev_priv->card_type >= NV_20)
+ addr |= 1;
+ else
+ addr |= 1 << 31;
+ }
+
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), addr);
+}
+
int
nv10_fb_init(struct drm_device *dev)
{
- uint32_t fb_bar_size;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
int i;
- fb_bar_size = drm_get_resource_len(dev, 0) - 1;
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, NV10_PFB_TILE(i), 0);
- nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
- }
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_region_tiling(dev, i, 0, 0, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index 6870e0ee2e7e..fcf2cdd19493 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -807,6 +807,20 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan)
chan->pgraph_ctx = NULL;
}
+void
+nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch)
+ addr |= 1 << 31;
+
+ nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
+}
+
int nv10_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -838,17 +852,9 @@ int nv10_graph_init(struct drm_device *dev)
} else
nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
- /* copy tile info from PFB */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, NV10_PGRAPH_TILE(i),
- nv_rd32(dev, NV10_PFB_TILE(i)));
- nv_wr32(dev, NV10_PGRAPH_TLIMIT(i),
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- nv_wr32(dev, NV10_PGRAPH_TSIZE(i),
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- nv_wr32(dev, NV10_PGRAPH_TSTATUS(i),
- nv_rd32(dev, NV10_PFB_TSTATUS(i)));
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 81c01353a9f9..58b917c3341b 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -33,13 +33,103 @@
#include "nouveau_hw.h"
#include "nv17_tv.h"
-enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
- struct drm_connector *connector,
- uint32_t pin_mask)
+static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+ uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
+ fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
+ uint32_t sample = 0;
+ int head;
+
+#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
+ testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
+ if (dev_priv->vbios->tvdactestval)
+ testval = dev_priv->vbios->tvdactestval;
+
+ dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+ head = (dacclk & 0x100) >> 8;
+
+ /* Save the previous state. */
+ gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
+ gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
+ fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
+ fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
+ fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
+ fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+ test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+ ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c);
+ ctv_14 = NVReadRAMDAC(dev, head, 0x680c14);
+ ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
+
+ /* Prepare the DAC for load detection. */
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true);
+
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
+ NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+ NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 |
+ NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
+ NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |
+ NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS);
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0);
+
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
+ (dacclk & ~0xff) | 0x22);
+ msleep(1);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
+ (dacclk & ~0xff) | 0x21);
+
+ NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20);
+ NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16);
+
+ /* Sample pin 0x4 (usually S-video luma). */
+ NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff);
+ msleep(20);
+ sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
+ & 0x4 << 28;
+
+ /* Sample the remaining pins. */
+ NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff);
+ msleep(20);
+ sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
+ & 0xa << 28;
+
+ /* Restore the previous state. */
+ NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c);
+ NVWriteRAMDAC(dev, head, 0x680c14, ctv_14);
+ NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk);
+ NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
+ NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1);
+ nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0);
+
+ return sample;
+}
+
+static enum drm_connector_status
+nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_mode_config *conf = &dev->mode_config;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+ struct dcb_entry *dcb = tv_enc->base.dcb;
- tv_enc->pin_mask = pin_mask >> 28 & 0xe;
+ if (dev_priv->chipset == 0x42 ||
+ dev_priv->chipset == 0x43)
+ tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe;
+ else
+ tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe;
switch (tv_enc->pin_mask) {
case 0x2:
@@ -50,7 +140,7 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
break;
case 0xe:
- if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output)
+ if (dcb->tvconf.has_component_output)
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
else
tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
@@ -61,11 +151,16 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
}
drm_connector_property_set_value(connector,
- encoder->dev->mode_config.tv_subconnector_property,
- tv_enc->subconnector);
+ conf->tv_subconnector_property,
+ tv_enc->subconnector);
- return tv_enc->subconnector ? connector_status_connected :
- connector_status_disconnected;
+ if (tv_enc->subconnector) {
+ NV_INFO(dev, "Load detected on output %c\n",
+ '@' + ffs(dcb->or));
+ return connector_status_connected;
+ } else {
+ return connector_status_disconnected;
+ }
}
static const struct {
@@ -633,7 +728,7 @@ static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
.prepare = nv17_tv_prepare,
.commit = nv17_tv_commit,
.mode_set = nv17_tv_mode_set,
- .detect = nv17_dac_detect,
+ .detect = nv17_tv_detect,
};
static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index 18ba74f19703..d6fc0a82f03d 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -514,6 +514,27 @@ nv20_graph_rdi(struct drm_device *dev)
nouveau_wait_for_idle(dev);
}
+void
+nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch)
+ addr |= 1;
+
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
+}
+
int
nv20_graph_init(struct drm_device *dev)
{
@@ -572,27 +593,10 @@ nv20_graph_init(struct drm_device *dev)
nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
}
- /* copy tile info from PFB */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, 0x00400904 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- nv_wr32(dev, 0x00400908 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- /* which is NV40_PGRAPH_TSIZE0(i) ?? */
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- nv_wr32(dev, 0x00400900 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TILE(i)));
- /* which is NV40_PGRAPH_TILE0(i) ?? */
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
- nv_rd32(dev, NV10_PFB_TILE(i)));
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+
for (i = 0; i < 8; i++) {
nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
@@ -704,18 +708,9 @@ nv30_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x4000c0, 0x00000016);
- /* copy tile info from PFB */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- nv_wr32(dev, 0x00400904 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TLIMIT(i)));
- /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
- nv_wr32(dev, 0x00400908 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TSIZE(i)));
- /* which is NV40_PGRAPH_TSIZE0(i) ?? */
- nv_wr32(dev, 0x00400900 + i * 0x10,
- nv_rd32(dev, NV10_PFB_TILE(i)));
- /* which is NV40_PGRAPH_TILE0(i) ?? */
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
index ca1d27107a8e..3cd07d8d5bd7 100644
--- a/drivers/gpu/drm/nouveau/nv40_fb.c
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -3,12 +3,37 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+void
+nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch)
+ addr |= 1;
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), addr);
+ break;
+
+ default:
+ nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
+ nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
+ nv_wr32(dev, NV40_PFB_TILE(i), addr);
+ break;
+ }
+}
+
int
nv40_fb_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t fb_bar_size, tmp;
- int num_tiles;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ uint32_t tmp;
int i;
/* This is strictly a NV4x register (don't know about NV5x). */
@@ -23,35 +48,23 @@ nv40_fb_init(struct drm_device *dev)
case 0x45:
tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
- num_tiles = NV10_PFB_TILE__SIZE;
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
break;
case 0x46: /* G72 */
case 0x47: /* G70 */
case 0x49: /* G71 */
case 0x4b: /* G73 */
case 0x4c: /* C51 (G7X version) */
- num_tiles = NV40_PFB_TILE__SIZE_1;
+ pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
break;
default:
- num_tiles = NV40_PFB_TILE__SIZE_0;
+ pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
break;
}
- fb_bar_size = drm_get_resource_len(dev, 0) - 1;
- switch (dev_priv->chipset) {
- case 0x40:
- for (i = 0; i < num_tiles; i++) {
- nv_wr32(dev, NV10_PFB_TILE(i), 0);
- nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
- }
- break;
- default:
- for (i = 0; i < num_tiles; i++) {
- nv_wr32(dev, NV40_PFB_TILE(i), 0);
- nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size);
- }
- break;
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_region_tiling(dev, i, 0, 0, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 2b332bb55acf..53e8afe1dcd1 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -181,6 +181,48 @@ nv40_graph_unload_context(struct drm_device *dev)
return ret;
}
+void
+nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t limit = max(1u, addr + size) - 1;
+
+ if (pitch)
+ addr |= 1;
+
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x4a:
+ case 0x4e:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ break;
+
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+ break;
+
+ default:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+ break;
+ }
+}
+
/*
* G70 0x47
* G71 0x49
@@ -195,7 +237,8 @@ nv40_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv =
(struct drm_nouveau_private *)dev->dev_private;
- uint32_t vramsz, tmp;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ uint32_t vramsz;
int i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -292,74 +335,9 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x400b38, 0x2ffff800);
nv_wr32(dev, 0x400b3c, 0x00006000);
- /* copy tile info from PFB */
- switch (dev_priv->chipset) {
- case 0x40: /* vanilla NV40 */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
- tmp = nv_rd32(dev, NV10_PFB_TILE(i));
- nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
- tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
- nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
- tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
- nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
- tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
- nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
- }
- break;
- case 0x44:
- case 0x4a:
- case 0x4e: /* NV44-based cores don't have 0x406900? */
- for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
- tmp = nv_rd32(dev, NV40_PFB_TILE(i));
- nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
- nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
- nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
- nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
- }
- break;
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b: /* G7X-based cores */
- for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
- tmp = nv_rd32(dev, NV40_PFB_TILE(i));
- nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
- nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
- nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
- nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
- }
- break;
- default: /* everything else */
- for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
- tmp = nv_rd32(dev, NV40_PFB_TILE(i));
- nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
- nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
- nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
- tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
- nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
- nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
- }
- break;
- }
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
/* begin RAM config */
vramsz = drm_get_resource_len(dev, 0) - 1;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a9263d92a231..90f0bf59fbcd 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -690,9 +690,21 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
int pxclk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_connector *nv_connector = NULL;
+ struct drm_encoder *encoder;
struct nvbios *bios = &dev_priv->VBIOS;
uint32_t mc, script = 0, or;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb != dcbent)
+ continue;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ break;
+ }
+
or = ffs(dcbent->or) - 1;
mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
switch (dcbent->type) {
@@ -711,6 +723,11 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
} else
if (bios->fp.strapless_is_24bit & 1)
script |= 0x0200;
+
+ if (nv_connector && nv_connector->edid &&
+ (nv_connector->edid->revision >= 4) &&
+ (nv_connector->edid->input & 0x70) >= 0x20)
+ script |= 0x0200;
}
if (nouveau_uscript_lvds >= 0) {
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 6bcc6d39e9b0..e4f279ee61cf 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -16,9 +16,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -31,7 +29,11 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
OUT_RING(chan, 1);
}
BEGIN_RING(chan, NvSub2D, 0x0588, 1);
- OUT_RING(chan, rect->color);
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+ info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+ OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+ else
+ OUT_RING(chan, rect->color);
BEGIN_RING(chan, NvSub2D, 0x0600, 4);
OUT_RING(chan, rect->dx);
OUT_RING(chan, rect->dy);
@@ -56,9 +58,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
return;
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -101,8 +101,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
}
if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
}
if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -135,9 +134,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
int push = dwords > 2047 ? 2047 : dwords;
if (RING_SPACE(chan, push + 1)) {
- NV_ERROR(dev,
- "GPU lockup - switching to software fbcon\n");
- info->flags |= FBINFO_HWACCEL_DISABLED;
+ nouveau_fbcon_gpu_lockup(info);
cfb_imageblit(info, image);
return;
}
@@ -199,7 +196,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
ret = RING_SPACE(chan, 59);
if (ret) {
- NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+ nouveau_fbcon_gpu_lockup(info);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index b7282284f080..39caf167587d 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -384,8 +384,8 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
nv_ro32(dev, cache, (ptr * 2) + 1));
}
- nv_wr32(dev, 0x3210, cnt << 2);
- nv_wr32(dev, 0x3270, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
/* guessing that all the 0x34xx regs aren't on NV50 */
if (!IS_G80) {
@@ -398,8 +398,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
dev_priv->engine.instmem.finish_access(dev);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
return 0;
}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index b5f5fe75e6af..1cc7b937b1ea 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -24,6 +24,9 @@ $(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
$(call if_changed,mkregtable)
+$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
$(call if_changed,mkregtable)
@@ -35,6 +38,8 @@ $(obj)/rv515.o: $(obj)/rv515_reg_safe.h
$(obj)/r300.o: $(obj)/r300_reg_safe.h
+$(obj)/r420.o: $(obj)/r420_reg_safe.h
+
$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
diff --git a/drivers/gpu/drm/radeon/ObjectID.h b/drivers/gpu/drm/radeon/ObjectID.h
index 6d0183c61d3b..c714179d1bfa 100644
--- a/drivers/gpu/drm/radeon/ObjectID.h
+++ b/drivers/gpu/drm/radeon/ObjectID.h
@@ -1,5 +1,5 @@
/*
-* Copyright 2006-2007 Advanced Micro Devices, Inc.
+* Copyright 2006-2007 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -41,14 +41,14 @@
/****************************************************/
/* Encoder Object ID Definition */
/****************************************************/
-#define ENCODER_OBJECT_ID_NONE 0x00
+#define ENCODER_OBJECT_ID_NONE 0x00
/* Radeon Class Display Hardware */
#define ENCODER_OBJECT_ID_INTERNAL_LVDS 0x01
#define ENCODER_OBJECT_ID_INTERNAL_TMDS1 0x02
#define ENCODER_OBJECT_ID_INTERNAL_TMDS2 0x03
#define ENCODER_OBJECT_ID_INTERNAL_DAC1 0x04
-#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
+#define ENCODER_OBJECT_ID_INTERNAL_DAC2 0x05 /* TV/CV DAC */
#define ENCODER_OBJECT_ID_INTERNAL_SDVOA 0x06
#define ENCODER_OBJECT_ID_INTERNAL_SDVOB 0x07
@@ -56,11 +56,11 @@
#define ENCODER_OBJECT_ID_SI170B 0x08
#define ENCODER_OBJECT_ID_CH7303 0x09
#define ENCODER_OBJECT_ID_CH7301 0x0A
-#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_INTERNAL_DVO1 0x0B /* This belongs to Radeon Class Display Hardware */
#define ENCODER_OBJECT_ID_EXTERNAL_SDVOA 0x0C
#define ENCODER_OBJECT_ID_EXTERNAL_SDVOB 0x0D
#define ENCODER_OBJECT_ID_TITFP513 0x0E
-#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
+#define ENCODER_OBJECT_ID_INTERNAL_LVTM1 0x0F /* not used for Radeon */
#define ENCODER_OBJECT_ID_VT1623 0x10
#define ENCODER_OBJECT_ID_HDMI_SI1930 0x11
#define ENCODER_OBJECT_ID_HDMI_INTERNAL 0x12
@@ -68,9 +68,9 @@
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 0x13
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 0x14
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 0x15
-#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
-#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
-#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 0x16 /* Shared with CV/TV and CRT */
+#define ENCODER_OBJECT_ID_SI178 0X17 /* External TMDS (dual link, no HDCP.) */
+#define ENCODER_OBJECT_ID_MVPU_FPGA 0x18 /* MVPU FPGA chip */
#define ENCODER_OBJECT_ID_INTERNAL_DDI 0x19
#define ENCODER_OBJECT_ID_VT1625 0x1A
#define ENCODER_OBJECT_ID_HDMI_SI1932 0x1B
@@ -86,7 +86,7 @@
/****************************************************/
/* Connector Object ID Definition */
/****************************************************/
-#define CONNECTOR_OBJECT_ID_NONE 0x00
+#define CONNECTOR_OBJECT_ID_NONE 0x00
#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I 0x01
#define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I 0x02
#define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D 0x03
@@ -96,7 +96,7 @@
#define CONNECTOR_OBJECT_ID_SVIDEO 0x07
#define CONNECTOR_OBJECT_ID_YPbPr 0x08
#define CONNECTOR_OBJECT_ID_D_CONNECTOR 0x09
-#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
+#define CONNECTOR_OBJECT_ID_9PIN_DIN 0x0A /* Supports both CV & TV */
#define CONNECTOR_OBJECT_ID_SCART 0x0B
#define CONNECTOR_OBJECT_ID_HDMI_TYPE_A 0x0C
#define CONNECTOR_OBJECT_ID_HDMI_TYPE_B 0x0D
@@ -106,6 +106,8 @@
#define CONNECTOR_OBJECT_ID_CROSSFIRE 0x11
#define CONNECTOR_OBJECT_ID_HARDCODE_DVI 0x12
#define CONNECTOR_OBJECT_ID_DISPLAYPORT 0x13
+#define CONNECTOR_OBJECT_ID_eDP 0x14
+#define CONNECTOR_OBJECT_ID_MXM 0x15
/* deleted */
@@ -116,6 +118,14 @@
#define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL 0x01
/****************************************************/
+/* Generic Object ID Definition */
+/****************************************************/
+#define GENERIC_OBJECT_ID_NONE 0x00
+#define GENERIC_OBJECT_ID_GLSYNC 0x01
+#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE 0x02
+#define GENERIC_OBJECT_ID_MXM_OPM 0x03
+
+/****************************************************/
/* Graphics Object ENUM ID Definition */
/****************************************************/
#define GRAPH_OBJECT_ENUM_ID1 0x01
@@ -124,6 +134,7 @@
#define GRAPH_OBJECT_ENUM_ID4 0x04
#define GRAPH_OBJECT_ENUM_ID5 0x05
#define GRAPH_OBJECT_ENUM_ID6 0x06
+#define GRAPH_OBJECT_ENUM_ID7 0x07
/****************************************************/
/* Graphics Object ID Bit definition */
@@ -133,35 +144,35 @@
#define RESERVED1_ID_MASK 0x0800
#define OBJECT_TYPE_MASK 0x7000
#define RESERVED2_ID_MASK 0x8000
-
+
#define OBJECT_ID_SHIFT 0x00
#define ENUM_ID_SHIFT 0x08
#define OBJECT_TYPE_SHIFT 0x0C
+
/****************************************************/
/* Graphics Object family definition */
/****************************************************/
-#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \
- (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
- GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
+#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+ GRAPHICS_OBJECT_ID << OBJECT_ID_SHIFT)
/****************************************************/
/* GPU Object ID definition - Shared with BIOS */
/****************************************************/
-#define GPU_ENUM_ID1 (GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+#define GPU_ENUM_ID1 ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
/****************************************************/
/* Encoder Object ID definition - Shared with BIOS */
/****************************************************/
/*
-#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1 0x2101
#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 0x2102
#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 0x2103
#define ENCODER_INTERNAL_DAC1_ENUM_ID1 0x2104
#define ENCODER_INTERNAL_DAC2_ENUM_ID1 0x2105
#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 0x2106
#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 0x2107
-#define ENCODER_SIL170B_ENUM_ID1 0x2108
+#define ENCODER_SIL170B_ENUM_ID1 0x2108
#define ENCODER_CH7303_ENUM_ID1 0x2109
#define ENCODER_CH7301_ENUM_ID1 0x210A
#define ENCODER_INTERNAL_DVO1_ENUM_ID1 0x210B
@@ -175,8 +186,8 @@
#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 0x2113
#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 0x2114
#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 0x2115
-#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
-#define ENCODER_SI178_ENUM_ID1 0x2117
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 0x2116
+#define ENCODER_SI178_ENUM_ID1 0x2117
#define ENCODER_MVPU_FPGA_ENUM_ID1 0x2118
#define ENCODER_INTERNAL_DDI_ENUM_ID1 0x2119
#define ENCODER_VT1625_ENUM_ID1 0x211A
@@ -185,205 +196,169 @@
#define ENCODER_DP_DP501_ENUM_ID1 0x211D
#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 0x211E
*/
-#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
-
-#define ENCODER_SIL170B_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
-
-#define ENCODER_CH7303_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
-
-#define ENCODER_CH7301_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
-
-#define ENCODER_TITFP513_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_VT1623_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_SI1930_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */
-
-#define ENCODER_SI178_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
-
-#define ENCODER_MVPU_FPGA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DDI_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
-
-#define ENCODER_VT1625_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_SI1932_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
-
-#define ENCODER_DP_DP501_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
-
-#define ENCODER_DP_AN9801_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_SIL170B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7303_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7301_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_TITFP513_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1623_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1930_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) // Shared with CV/TV and CRT
+
+#define ENCODER_SI178_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
+
+#define ENCODER_MVPU_FPGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DDI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1625_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1932_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_DP501_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_AN9801_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
@@ -406,167 +381,253 @@
#define CONNECTOR_7PIN_DIN_ENUM_ID1 0x310F
#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 0x3110
*/
-#define CONNECTOR_LVDS_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_VGA_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_VGA_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_COMPOSITE_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SVIDEO_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_YPbPr_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_9PIN_DIN_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SCART_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_7PIN_DIN_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_CROSSFIRE_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_CROSSFIRE_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \
- (GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
- CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+#define CONNECTOR_LVDS_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+#define CONNECTOR_7PIN_DIN_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_MXM_ENUM_ID1 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_A
+
+#define CONNECTOR_MXM_ENUM_ID2 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_B
+
+#define CONNECTOR_MXM_ENUM_ID3 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_C
+
+#define CONNECTOR_MXM_ENUM_ID4 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DP_D
+
+#define CONNECTOR_MXM_ENUM_ID5 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_TXxx
+
+#define CONNECTOR_MXM_ENUM_ID6 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_LVDS_UXxx
+
+#define CONNECTOR_MXM_ENUM_ID7 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
+ CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT) //Mapping to MXM_DAC
/****************************************************/
/* Router Object ID definition - Shared with BIOS */
/****************************************************/
-#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \
- (GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
- GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
- ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
/* deleted */
/****************************************************/
+/* Generic Object ID definition - Shared with BIOS */
+/****************************************************/
+#define GENERICOBJECT_GLSYNC_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_MXM_OPM_ENUM_ID1 (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+ GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+ GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+
+/****************************************************/
/* Object Cap definition - Shared with BIOS */
/****************************************************/
#define GRAPHICS_OBJECT_CAP_I2C 0x00000001L
#define GRAPHICS_OBJECT_CAP_TABLE_ID 0x00000002L
+
#define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID 0x01
#define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID 0x02
#define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID 0x03
@@ -575,4 +636,8 @@
#pragma pack()
#endif
-#endif /*GRAPHICTYPE */
+#endif /*GRAPHICTYPE */
+
+
+
+
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 91ad0d1c1b17..2a88029f6a1e 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2275,7 +2275,7 @@ typedef struct _ATOM_LCD_RTS_RECORD {
UCHAR ucRTSValue;
} ATOM_LCD_RTS_RECORD;
-/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */
+/* !! If the record below exits, it should always be the first record for easy use in command table!!! */
typedef struct _ATOM_LCD_MODE_CONTROL_CAP {
UCHAR ucRecordType;
USHORT usLCDCap;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 0d63c4436e7c..3eb0ca5b3d73 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -468,7 +468,8 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
radeon_connector = to_radeon_connector(connector);
@@ -582,7 +583,8 @@ void dp_link_train(struct drm_encoder *encoder,
u8 train_set[4];
int i;
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
return;
if (!radeon_encoder->enc_priv)
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 0d79577c1576..607241c6a8a9 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -661,8 +661,10 @@ static int parser_auth(struct table *t, const char *filename)
fseek(file, 0, SEEK_SET);
/* get header */
- if (fgets(buf, 1024, file) == NULL)
+ if (fgets(buf, 1024, file) == NULL) {
+ fclose(file);
return -1;
+ }
/* first line will contain the last register
* and gpu name */
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 71727460968f..8760d66e058a 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -131,7 +131,8 @@ void r100_hpd_init(struct radeon_device *rdev)
break;
}
}
- r100_irq_set(rdev);
+ if (rdev->irq.installed)
+ r100_irq_set(rdev);
}
void r100_hpd_fini(struct radeon_device *rdev)
@@ -243,6 +244,11 @@ int r100_irq_set(struct radeon_device *rdev)
{
uint32_t tmp = 0;
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WREG32(R_000040_GEN_INT_CNTL, 0);
+ return -EINVAL;
+ }
if (rdev->irq.sw_int) {
tmp |= RADEON_SW_INT_ENABLE;
}
@@ -356,6 +362,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 16) | (1 << 17));
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
@@ -1713,14 +1724,6 @@ void r100_gpu_init(struct radeon_device *rdev)
r100_hdp_reset(rdev);
}
-void r100_hdp_flush(struct radeon_device *rdev)
-{
- u32 tmp;
- tmp = RREG32(RADEON_HOST_PATH_CNTL);
- tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
- WREG32(RADEON_HOST_PATH_CNTL, tmp);
-}
-
void r100_hdp_reset(struct radeon_device *rdev)
{
uint32_t tmp;
@@ -3313,6 +3316,7 @@ static int r100_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -3371,6 +3375,7 @@ void r100_fini(struct radeon_device *rdev)
radeon_gem_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 3f2cc9e2e8d9..0051d11b907c 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -36,7 +36,15 @@
#include "rv350d.h"
#include "r300_reg_safe.h"
-/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */
+/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
+ *
+ * GPU Errata:
+ * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
+ * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
+ * However, scheduling such write to the ring seems harmless, i suspect
+ * the CP read collide with the flush somehow, or maybe the MC, hard to
+ * tell. (Jerome Glisse)
+ */
/*
* rv370,rv380 PCIE GART
@@ -178,6 +186,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(0x1720, 0));
radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
+ RADEON_HDP_READ_BUFFER_INVALIDATE);
+ radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
@@ -1258,6 +1271,7 @@ static int r300_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
@@ -1322,6 +1336,7 @@ void r300_fini(struct radeon_device *rdev)
rv370_pcie_gart_fini(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_irq_kms_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c05a7270cf0c..053404e71a9d 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -30,7 +30,15 @@
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
+#include "r100d.h"
#include "r420d.h"
+#include "r420_reg_safe.h"
+
+static void r420_set_reg_safe(struct radeon_device *rdev)
+{
+ rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
+ rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
+}
int r420_mc_init(struct radeon_device *rdev)
{
@@ -165,6 +173,34 @@ static void r420_clock_resume(struct radeon_device *rdev)
WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
}
+static void r420_cp_errata_init(struct radeon_device *rdev)
+{
+ /* RV410 and R420 can lock up if CP DMA to host memory happens
+ * while the 2D engine is busy.
+ *
+ * The proper workaround is to queue a RESYNC at the beginning
+ * of the CP init, apparently.
+ */
+ radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
+ radeon_ring_lock(rdev, 8);
+ radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
+ radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
+ radeon_ring_write(rdev, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev);
+}
+
+static void r420_cp_errata_fini(struct radeon_device *rdev)
+{
+ /* Catch the RESYNC we dispatched all the way back,
+ * at the very beginning of the CP init.
+ */
+ radeon_ring_lock(rdev, 8);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
+ radeon_ring_unlock_commit(rdev);
+ radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
+}
+
static int r420_startup(struct radeon_device *rdev)
{
int r;
@@ -190,12 +226,14 @@ static int r420_startup(struct radeon_device *rdev)
r420_pipes_init(rdev);
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
return r;
}
+ r420_cp_errata_init(rdev);
r = r100_wb_init(rdev);
if (r) {
dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
@@ -238,6 +276,7 @@ int r420_resume(struct radeon_device *rdev)
int r420_suspend(struct radeon_device *rdev)
{
+ r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
r100_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -346,7 +385,7 @@ int r420_init(struct radeon_device *rdev)
if (r)
return r;
}
- r300_set_reg_safe(rdev);
+ r420_set_reg_safe(rdev);
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 0f3843b6dac7..9a189072f2b9 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -186,6 +186,7 @@ static int r520_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index a0ac3c134b1b..c0651991c3e4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -285,7 +285,8 @@ void r600_hpd_init(struct radeon_device *rdev)
}
}
}
- r600_irq_set(rdev);
+ if (rdev->irq.installed)
+ r600_irq_set(rdev);
}
void r600_hpd_fini(struct radeon_device *rdev)
@@ -726,6 +727,10 @@ int r600_mc_init(struct radeon_device *rdev)
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+
+ if (rdev->flags & RADEON_IS_IGP)
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
return 0;
}
@@ -1384,11 +1389,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_PORT_DATA);
}
-void r600_hdp_flush(struct radeon_device *rdev)
-{
- WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
-}
-
/*
* CP & Ring
*/
@@ -1785,6 +1785,8 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+ radeon_ring_write(rdev, 1);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
@@ -2089,8 +2091,7 @@ void r600_fini(struct radeon_device *rdev)
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
- if (rdev->flags & RADEON_IS_AGP)
- radeon_agp_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
@@ -2461,6 +2462,10 @@ int r600_irq_set(struct radeon_device *rdev)
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ return -EINVAL;
+ }
/* don't enable anything if the ih is disabled */
if (!rdev->ih.enabled)
return 0;
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 9aecafb51b66..8787ea89dc6e 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -577,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
- ring_size += 5; /* fence emit for VB IB */
+ ring_size += 7; /* fence emit for VB IB */
ring_size += 5; /* done copy */
- ring_size += 5; /* fence emit for done copy */
+ ring_size += 7; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
WARN_ON(r);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 53b55608102b..eb5f99b9469d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -319,10 +319,12 @@ struct radeon_mc {
u64 real_vram_size;
int vram_mtrr;
bool vram_is_ddr;
+ bool igp_sideport_enabled;
};
int radeon_mc_setup(struct radeon_device *rdev);
-
+bool radeon_combios_sideport_present(struct radeon_device *rdev);
+bool radeon_atombios_sideport_present(struct radeon_device *rdev);
/*
* GPU scratch registers structures, functions & helpers
@@ -654,7 +656,6 @@ struct radeon_asic {
uint32_t offset, uint32_t obj_size);
int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
- void (*hdp_flush)(struct radeon_device *rdev);
void (*hpd_init)(struct radeon_device *rdev);
void (*hpd_fini)(struct radeon_device *rdev);
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -667,11 +668,14 @@ struct radeon_asic {
struct r100_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
+ u32 hdp_cntl;
};
struct r300_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
+ u32 resync_scratch;
+ u32 hdp_cntl;
};
struct r600_asic {
@@ -1007,7 +1011,6 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
-#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 54bf49a6d676..220f454ea9fa 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -252,10 +252,8 @@ void radeon_agp_resume(struct radeon_device *rdev)
void radeon_agp_fini(struct radeon_device *rdev)
{
#if __OS_HAS_AGP
- if (rdev->flags & RADEON_IS_AGP) {
- if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
- drm_agp_release(rdev->ddev);
- }
+ if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+ drm_agp_release(rdev->ddev);
}
#endif
}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index eb29217bbf1d..f2fbd2e4e9df 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -77,7 +77,6 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
-void r100_hdp_flush(struct radeon_device *rdev);
void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -114,7 +113,6 @@ static struct radeon_asic r100_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
@@ -174,7 +172,6 @@ static struct radeon_asic r300_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
@@ -218,7 +215,6 @@ static struct radeon_asic r420_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
@@ -267,7 +263,6 @@ static struct radeon_asic rs400_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
@@ -324,7 +319,6 @@ static struct radeon_asic rs600_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.bandwidth_update = &rs600_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
@@ -372,7 +366,6 @@ static struct radeon_asic rs690_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
@@ -424,7 +417,6 @@ static struct radeon_asic rv515_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
@@ -467,7 +459,6 @@ static struct radeon_asic r520_asic = {
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r100_hdp_flush,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
@@ -508,7 +499,6 @@ int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
-void r600_hdp_flush(struct radeon_device *rdev);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -544,7 +534,6 @@ static struct radeon_asic r600_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r600_hdp_flush,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
@@ -589,7 +578,6 @@ static struct radeon_asic rv770_asic = {
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
- .hdp_flush = &r600_hdp_flush,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 321044bef71c..fa82ca74324e 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -114,6 +114,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
i2c.i2c_id = gpio->sucI2cId.ucAccess;
i2c.valid = true;
+ break;
}
}
@@ -345,7 +346,9 @@ const int object_connector_convert[] = {
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_Unknown,
DRM_MODE_CONNECTOR_Unknown,
- DRM_MODE_CONNECTOR_DisplayPort
+ DRM_MODE_CONNECTOR_DisplayPort,
+ DRM_MODE_CONNECTOR_eDP,
+ DRM_MODE_CONNECTOR_Unknown
};
bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
@@ -935,6 +938,43 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
return false;
}
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+};
+
+bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 data_offset;
+
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+ &crev, &data_offset);
+
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ if (igp_info) {
+ switch (crev) {
+ case 1:
+ if (igp_info->info.ucMemoryType & 0xf0)
+ return true;
+ break;
+ case 2:
+ if (igp_info->info_2.ucMemoryType & 0x0f)
+ return true;
+ break;
+ default:
+ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ break;
+ }
+ }
+ return false;
+}
+
bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
struct radeon_encoder_int_tmds *tmds)
{
@@ -1026,6 +1066,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
ss->range = ss_info->asSS_Info[i].ucSS_Range;
ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+ break;
}
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index fd94dbca33ac..7914455c96ca 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -595,6 +595,48 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
return false;
}
+bool radeon_combios_sideport_present(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ u16 igp_info;
+
+ igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
+
+ if (igp_info) {
+ if (RBIOS16(igp_info + 0x4))
+ return true;
+ }
+ return false;
+}
+
+static const uint32_t default_primarydac_adj[CHIP_LAST] = {
+ 0x00000808, /* r100 */
+ 0x00000808, /* rv100 */
+ 0x00000808, /* rs100 */
+ 0x00000808, /* rv200 */
+ 0x00000808, /* rs200 */
+ 0x00000808, /* r200 */
+ 0x00000808, /* rv250 */
+ 0x00000000, /* rs300 */
+ 0x00000808, /* rv280 */
+ 0x00000808, /* r300 */
+ 0x00000808, /* r350 */
+ 0x00000808, /* rv350 */
+ 0x00000808, /* rv380 */
+ 0x00000808, /* r420 */
+ 0x00000808, /* r423 */
+ 0x00000808, /* rv410 */
+ 0x00000000, /* rs400 */
+ 0x00000000, /* rs480 */
+};
+
+static void radeon_legacy_get_primary_dac_info_from_table(struct radeon_device *rdev,
+ struct radeon_encoder_primary_dac *p_dac)
+{
+ p_dac->ps2_pdac_adj = default_primarydac_adj[rdev->family];
+ return;
+}
+
struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
radeon_encoder
*encoder)
@@ -604,20 +646,20 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
uint16_t dac_info;
uint8_t rev, bg, dac;
struct radeon_encoder_primary_dac *p_dac = NULL;
+ int found = 0;
- if (rdev->bios == NULL)
+ p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac),
+ GFP_KERNEL);
+
+ if (!p_dac)
return NULL;
+ if (rdev->bios == NULL)
+ goto out;
+
/* check CRT table */
dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
if (dac_info) {
- p_dac =
- kzalloc(sizeof(struct radeon_encoder_primary_dac),
- GFP_KERNEL);
-
- if (!p_dac)
- return NULL;
-
rev = RBIOS8(dac_info) & 0x3;
if (rev < 2) {
bg = RBIOS8(dac_info + 0x2) & 0xf;
@@ -628,9 +670,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
-
+ found = 1;
}
+out:
+ if (!found) /* fallback to defaults */
+ radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
+
return p_dac;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 20161567dbff..9da10dd5df80 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -49,8 +49,10 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
- if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) {
if (radeon_dp_needs_link_train(radeon_connector)) {
if (connector->encoder)
dp_link_train(connector->encoder, connector);
@@ -615,7 +617,7 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
ret = connector_status_connected;
}
} else {
- if (radeon_connector->dac_load_detect) {
+ if (radeon_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
@@ -967,7 +969,8 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
}
sink_type = radeon_dp_getsinktype(radeon_connector);
- if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
if (radeon_dp_getdpcd(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type;
ret = connector_status_connected;
@@ -992,7 +995,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
/* XXX check mode bandwidth */
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return radeon_dp_mode_valid_helper(radeon_connector, mode);
else
return MODE_OK;
@@ -1145,6 +1149,7 @@ radeon_add_atom_connector(struct drm_device *dev,
subpixel_order = SubPixelHorizontalRGB;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
if (!radeon_dig_connector)
goto failed;
@@ -1157,10 +1162,16 @@ radeon_add_atom_connector(struct drm_device *dev,
goto failed;
if (i2c_bus->valid) {
/* add DP i2c bus */
- radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
+ else
+ radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
goto failed;
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP");
+ else
+ radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
if (!radeon_connector->ddc_bus)
goto failed;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 0b2f9c2ad2c1..06123ba31d31 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2145,6 +2145,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
&master_priv->sarea);
if (ret) {
DRM_ERROR("SAREA setup failed\n");
+ kfree(master_priv);
return ret;
}
master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 7c6848096bcd..0c51f8e46613 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -733,16 +733,18 @@ void radeon_device_fini(struct radeon_device *rdev)
*/
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
- struct radeon_device *rdev = dev->dev_private;
+ struct radeon_device *rdev;
struct drm_crtc *crtc;
int r;
- if (dev == NULL || rdev == NULL) {
+ if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
}
if (state.event == PM_EVENT_PRETHAW) {
return 0;
}
+ rdev = dev->dev_private;
+
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 91d72b70abc9..0ec491ead2ff 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -234,7 +234,7 @@ static const char *encoder_names[34] = {
"INTERNAL_UNIPHY2",
};
-static const char *connector_names[13] = {
+static const char *connector_names[15] = {
"Unknown",
"VGA",
"DVI-I",
@@ -248,6 +248,8 @@ static const char *connector_names[13] = {
"DisplayPort",
"HDMI-A",
"HDMI-B",
+ "TV",
+ "eDP",
};
static const char *hpd_names[7] = {
@@ -329,8 +331,11 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
ret = radeon_get_atom_connector_info_from_object_table(dev);
else
ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
- } else
+ } else {
ret = radeon_get_legacy_connector_info_from_bios(dev);
+ if (ret == false)
+ ret = radeon_get_legacy_connector_info_from_table(dev);
+ }
} else {
if (!ASIC_IS_AVIVO(rdev))
ret = radeon_get_legacy_connector_info_from_table(dev);
@@ -349,7 +354,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
int ret = 0;
- if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
if (dig->dp_i2c_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ccba95f83d11..82eb551970b9 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -596,21 +596,23 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
return ATOM_ENCODER_MODE_LVDS;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
radeon_dig_connector = radeon_connector->con_priv;
- if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
break;
- case CONNECTOR_DVI_A:
- case CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ case DRM_MODE_CONNECTOR_VGA:
return ATOM_ENCODER_MODE_CRT;
break;
- case CONNECTOR_STV:
- case CONNECTOR_CTV:
- case CONNECTOR_DIN:
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_9PinDIN:
/* fix me */
return ATOM_ENCODER_MODE_TV;
/*return ATOM_ENCODER_MODE_CV;*/
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 4cdd8b4f7549..8495d4e32e18 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -140,16 +140,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
bool radeon_fence_signaled(struct radeon_fence *fence)
{
- struct radeon_device *rdev = fence->rdev;
unsigned long irq_flags;
bool signaled = false;
- if (rdev->gpu_lockup) {
+ if (!fence)
return true;
- }
- if (fence == NULL) {
+
+ if (fence->rdev->gpu_lockup)
return true;
- }
+
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 60df2d7e7e4c..0e1325e18534 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -131,7 +131,6 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
printk(KERN_ERR "Failed to wait for object !\n");
return r;
}
- radeon_hdp_flush(robj->rdev);
}
return 0;
}
@@ -312,7 +311,6 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(gobj);
mutex_unlock(&dev->struct_mutex);
- radeon_hdp_flush(robj->rdev);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
index b79ecc4a7cc4..2f349a300195 100644
--- a/drivers/gpu/drm/radeon/radeon_irq.c
+++ b/drivers/gpu/drm/radeon/radeon_irq.c
@@ -289,16 +289,16 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr
drm_radeon_irq_emit_t *emit = data;
int result;
- if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
- return -EINVAL;
-
- LOCK_TEST_WITH_RETURN(dev, file_priv);
-
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
+ return -EINVAL;
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
result = radeon_emit_irq(dev);
if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 9223296fe37b..3cfd60fd0083 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -97,6 +97,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
rdev->irq.sw_int = false;
for (i = 0; i < 2; i++) {
rdev->irq.crtc_vblank_int[i] = false;
+ rdev->irq.hpd[i] = false;
}
radeon_irq_set(rdev);
}
@@ -128,17 +129,22 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
DRM_INFO("radeon: using MSI.\n");
}
}
- drm_irq_install(rdev->ddev);
rdev->irq.installed = true;
+ r = drm_irq_install(rdev->ddev);
+ if (r) {
+ rdev->irq.installed = false;
+ return r;
+ }
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
void radeon_irq_kms_fini(struct radeon_device *rdev)
{
+ drm_vblank_cleanup(rdev->ddev);
if (rdev->irq.installed) {
- rdev->irq.installed = false;
drm_irq_uninstall(rdev->ddev);
+ rdev->irq.installed = false;
if (rdev->msi_enabled)
pci_disable_msi(rdev->pdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 3a12bb0c0563..417684daef4c 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -77,7 +77,7 @@ struct radeon_tv_mode_constants {
unsigned pix_to_tv;
};
-static const uint16_t hor_timing_NTSC[] = {
+static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
0x0007,
0x003f,
0x0263,
@@ -98,7 +98,7 @@ static const uint16_t hor_timing_NTSC[] = {
0
};
-static const uint16_t vert_timing_NTSC[] = {
+static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
0x2001,
0x200d,
0x1006,
@@ -115,7 +115,7 @@ static const uint16_t vert_timing_NTSC[] = {
0
};
-static const uint16_t hor_timing_PAL[] = {
+static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
0x0007,
0x0058,
0x027c,
@@ -136,7 +136,7 @@ static const uint16_t hor_timing_PAL[] = {
0
};
-static const uint16_t vert_timing_PAL[] = {
+static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
0x2001,
0x200c,
0x1005,
@@ -623,9 +623,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
}
flicker_removal = (tmp + 500) / 1000;
- if (flicker_removal < 3)
- flicker_removal = 3;
- for (i = 0; i < 6; ++i) {
+ if (flicker_removal < 2)
+ flicker_removal = 2;
+ for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
if (flicker_removal == SLOPE_limit[i])
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 402369db5ba0..91cb041cb40d 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,32 +46,6 @@ struct radeon_device;
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
-enum radeon_connector_type {
- CONNECTOR_NONE,
- CONNECTOR_VGA,
- CONNECTOR_DVI_I,
- CONNECTOR_DVI_D,
- CONNECTOR_DVI_A,
- CONNECTOR_STV,
- CONNECTOR_CTV,
- CONNECTOR_LVDS,
- CONNECTOR_DIGITAL,
- CONNECTOR_SCART,
- CONNECTOR_HDMI_TYPE_A,
- CONNECTOR_HDMI_TYPE_B,
- CONNECTOR_0XC,
- CONNECTOR_0XD,
- CONNECTOR_DIN,
- CONNECTOR_DISPLAY_PORT,
- CONNECTOR_UNSUPPORTED
-};
-
-enum radeon_dvi_type {
- DVI_AUTO,
- DVI_DIGITAL,
- DVI_ANALOG
-};
-
enum radeon_rmx_type {
RMX_OFF,
RMX_FULL,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d9ffe1f56e8f..4e636de877b2 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -221,8 +221,9 @@ int radeon_bo_unpin(struct radeon_bo *bo)
int radeon_bo_evict_vram(struct radeon_device *rdev)
{
if (rdev->flags & RADEON_IS_IGP) {
- /* Useless to evict on IGP chips */
- return 0;
+ if (rdev->mc.igp_sideport_enabled == false)
+ /* Useless to evict on IGP chips */
+ return 0;
}
return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 067167cb39ca..1982a87386a1 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -1065,7 +1065,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
/* judging by the first tile offset needed, could possibly
directly address/clear 4x4 tiles instead of 8x2 * 4x4
macro tiles, though would still need clear mask for
- right/bottom if truely 4x4 granularity is desired ? */
+ right/bottom if truly 4x4 granularity is desired ? */
OUT_RING(tileoffset * 16);
/* the number of tiles to clear */
OUT_RING(nrtilesx + 1);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
new file mode 100644
index 000000000000..989f7a020832
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -0,0 +1,795 @@
+r420 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A4 SC_HYPERZ_EN
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E04 RB3D_BLENDCNTL_R3
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E0C RB3D_COLOR_CHANNEL_MASK
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4E80 RB3D_AARESOLVE_OFFSET
+0x4E84 RB3D_AARESOLVE_PITCH
+0x4E88 RB3D_AARESOLVE_CTL
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F1C ZB_BW_CNTL
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F30 ZB_ZMASK_OFFSET
+0x4F34 ZB_ZMASK_PITCH
+0x4F38 ZB_ZMASK_WRINDEX
+0x4F3C ZB_ZMASK_DWORD
+0x4F40 ZB_ZMASK_RDINDEX
+0x4F44 ZB_HIZ_OFFSET
+0x4F48 ZB_HIZ_WRINDEX
+0x4F4C ZB_HIZ_DWORD
+0x4F50 ZB_HIZ_RDINDEX
+0x4F54 ZB_HIZ_PITCH
+0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 8e3c0b807add..6801b865d1c4 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -153,7 +153,7 @@ rs600 0x6d40
0x42A4 SU_POLY_OFFSET_FRONT_SCALE
0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
0x42AC SU_POLY_OFFSET_BACK_SCALE
-0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
0x42B4 SU_POLY_OFFSET_ENABLE
0x42B8 SU_CULL_MODE
0x42C0 SU_DEPTH_SCALE
@@ -291,6 +291,8 @@ rs600 0x6d40
0x46AC US_OUT_FMT_2
0x46B0 US_OUT_FMT_3
0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
0x46C0 US_ALU_RGB_ADDR_0
0x46C4 US_ALU_RGB_ADDR_1
0x46C8 US_ALU_RGB_ADDR_2
@@ -547,6 +549,70 @@ rs600 0x6d40
0x4AB4 US_ALU_ALPHA_INST_61
0x4AB8 US_ALU_ALPHA_INST_62
0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
0x4BC0 FG_FOG_BLEND
0x4BC4 FG_FOG_FACTOR
0x4BC8 FG_FOG_COLOR_R
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 0102a0d5735c..38abf63bf2cd 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -161,7 +161,12 @@ rv515 0x6d40
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
+0x4028 GB_Z_PEQ_CONFIG
0x4100 TX_INVALTAGS
+0x4114 SU_TEX_WRAP_PS3
+0x4118 PS3_ENABLE
+0x411c PS3_VTX_FMT
+0x4120 PS3_TEX_SOURCE
0x4200 GA_POINT_S0
0x4204 GA_POINT_T0
0x4208 GA_POINT_S1
@@ -171,6 +176,7 @@ rv515 0x6d40
0x4230 GA_POINT_MINMAX
0x4234 GA_LINE_CNTL
0x4238 GA_LINE_STIPPLE_CONFIG
+0x4258 GA_COLOR_CONTROL_PS3
0x4260 GA_LINE_STIPPLE_VALUE
0x4264 GA_LINE_S0
0x4268 GA_LINE_S1
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 368415df5f3a..9f5418983e2a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -356,6 +356,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
if (r)
return r;
return 0;
@@ -395,6 +396,7 @@ static int rs400_startup(struct radeon_device *rdev)
return r;
/* Enable IRQ */
r100_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 4f8ea4260572..d5255751e7b3 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -56,6 +56,7 @@ int rs600_mc_init(struct radeon_device *rdev)
rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xffffffffUL;
r = radeon_mc_setup(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
if (r)
return r;
return 0;
@@ -134,7 +135,8 @@ void rs600_hpd_init(struct radeon_device *rdev)
break;
}
}
- rs600_irq_set(rdev);
+ if (rdev->irq.installed)
+ rs600_irq_set(rdev);
}
void rs600_hpd_fini(struct radeon_device *rdev)
@@ -315,6 +317,11 @@ int rs600_irq_set(struct radeon_device *rdev)
u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+ if (!rdev->irq.installed) {
+ WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+ WREG32(R_000040_GEN_INT_CNTL, 0);
+ return -EINVAL;
+ }
if (rdev->irq.sw_int) {
tmp |= S_000040_SW_INT_EN(1);
}
@@ -396,7 +403,7 @@ int rs600_irq_process(struct radeon_device *rdev)
}
while (status || r500_disp_int) {
/* SW interrupt */
- if (G_000040_SW_INT_EN(status))
+ if (G_000044_SW_INT(status))
radeon_fence_process(rdev);
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
@@ -553,6 +560,7 @@ static int rs600_startup(struct radeon_device *rdev)
return r;
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1e22f52d6039..cd31da913771 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -172,6 +172,7 @@ static int rs690_mc_init(struct radeon_device *rdev)
rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
rdev->mc.gtt_location = 0xFFFFFFFFUL;
r = radeon_mc_setup(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
if (r)
return r;
return 0;
@@ -625,6 +626,7 @@ static int rs690_startup(struct radeon_device *rdev)
return r;
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 59632a506b46..62756717b044 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -479,6 +479,7 @@ static int rv515_startup(struct radeon_device *rdev)
}
/* Enable IRQ */
rs600_irq_set(rdev);
+ rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
/* 1M ring buffer */
r = r100_cp_init(rdev, 1024 * 1024);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 3bcb66e52786..59c71245fb91 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1096,8 +1096,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_clocks_fini(rdev);
- if (rdev->flags & RADEON_IS_AGP)
- radeon_agp_fini(rdev);
+ radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
kfree(rdev->bios);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 24d90ea246ce..5f73774164d8 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -55,6 +55,12 @@ source "drivers/hid/usbhid/Kconfig"
menu "Special HID drivers"
depends on HID
+config HID_3M_PCT
+ tristate "3M PCT"
+ depends on USB_HID
+ ---help---
+ Support for 3M PCT touch screens.
+
config HID_A4TECH
tristate "A4 tech" if EMBEDDED
depends on USB_HID
@@ -241,6 +247,12 @@ config HID_SONY
---help---
Support for Sony PS3 controller.
+config HID_STANTUM
+ tristate "Stantum"
+ depends on USB_HID
+ ---help---
+ Support for Stantum multitouch panel.
+
config HID_SUNPLUS
tristate "Sunplus" if EMBEDDED
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 0de2dff5542c..f8dc5bac79bd 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -19,6 +19,7 @@ ifdef CONFIG_LOGIRUMBLEPAD2_FF
hid-logitech-objs += hid-lg2ff.o
endif
+obj-$(CONFIG_HID_3M_PCT) += hid-3m-pct.o
obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_SAMSUNG) += hid-samsung.o
obj-$(CONFIG_HID_SMARTJOYPLUS) += hid-sjoy.o
obj-$(CONFIG_HID_SONY) += hid-sony.o
+obj-$(CONFIG_HID_STANTUM) += hid-stantum.o
obj-$(CONFIG_HID_SUNPLUS) += hid-sunplus.o
obj-$(CONFIG_HID_GREENASIA) += hid-gaff.o
obj-$(CONFIG_HID_THRUSTMASTER) += hid-tmff.o
diff --git a/drivers/hid/hid-3m-pct.c b/drivers/hid/hid-3m-pct.c
new file mode 100644
index 000000000000..6d11e3dbbbff
--- /dev/null
+++ b/drivers/hid/hid-3m-pct.c
@@ -0,0 +1,291 @@
+/*
+ * HID driver for 3M PCT multitouch panels
+ *
+ * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+
+MODULE_VERSION("0.6");
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("3M PCT multitouch panels");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct mmm_finger {
+ __s32 x, y;
+ __u8 rank;
+ bool touch, valid;
+};
+
+struct mmm_data {
+ struct mmm_finger f[10];
+ __u8 curid, num;
+ bool touch, valid;
+};
+
+static int mmm_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_BUTTON:
+ return -1;
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_X,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_Y,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ /* we do not want to map these: no input-oriented meaning */
+ case 0x14:
+ case 0x23:
+ case HID_DG_INPUTMODE:
+ case HID_DG_DEVICEINDEX:
+ case HID_DG_CONTACTCOUNT:
+ case HID_DG_CONTACTMAX:
+ case HID_DG_INRANGE:
+ case HID_DG_CONFIDENCE:
+ return -1;
+ case HID_DG_TIPSWITCH:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+ case HID_DG_CONTACTID:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TRACKING_ID);
+ return 1;
+ }
+ /* let hid-input decide for the others */
+ return 0;
+
+ case 0xff000000:
+ /* we do not want to map these: no input-oriented meaning */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mmm_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY || usage->type == EV_ABS)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
+/*
+ * this function is called when a whole packet has been received and processed,
+ * so that it can decide what to send to the input layer.
+ */
+static void mmm_filter_event(struct mmm_data *md, struct input_dev *input)
+{
+ struct mmm_finger *oldest = 0;
+ bool pressed = false, released = false;
+ int i;
+
+ /*
+ * we need to iterate on all fingers to decide if we have a press
+ * or a release event in our touchscreen emulation.
+ */
+ for (i = 0; i < 10; ++i) {
+ struct mmm_finger *f = &md->f[i];
+ if (!f->valid) {
+ /* this finger is just placeholder data, ignore */
+ } else if (f->touch) {
+ /* this finger is on the screen */
+ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, i);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, f->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, f->y);
+ input_mt_sync(input);
+ /*
+ * touchscreen emulation: maintain the age rank
+ * of this finger, decide if we have a press
+ */
+ if (f->rank == 0) {
+ f->rank = ++(md->num);
+ if (f->rank == 1)
+ pressed = true;
+ }
+ if (f->rank == 1)
+ oldest = f;
+ } else {
+ /* this finger took off the screen */
+ /* touchscreen emulation: maintain age rank of others */
+ int j;
+
+ for (j = 0; j < 10; ++j) {
+ struct mmm_finger *g = &md->f[j];
+ if (g->rank > f->rank) {
+ g->rank--;
+ if (g->rank == 1)
+ oldest = g;
+ }
+ }
+ f->rank = 0;
+ --(md->num);
+ if (md->num == 0)
+ released = true;
+ }
+ f->valid = 0;
+ }
+
+ /* touchscreen emulation */
+ if (oldest) {
+ if (pressed)
+ input_event(input, EV_KEY, BTN_TOUCH, 1);
+ input_event(input, EV_ABS, ABS_X, oldest->x);
+ input_event(input, EV_ABS, ABS_Y, oldest->y);
+ } else if (released) {
+ input_event(input, EV_KEY, BTN_TOUCH, 0);
+ }
+}
+
+/*
+ * this function is called upon all reports
+ * so that we can accumulate contact point information,
+ * and call input_mt_sync after each point.
+ */
+static int mmm_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct mmm_data *md = hid_get_drvdata(hid);
+ /*
+ * strangely, this function can be called before
+ * field->hidinput is initialized!
+ */
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ struct input_dev *input = field->hidinput->input;
+ switch (usage->hid) {
+ case HID_DG_TIPSWITCH:
+ md->touch = value;
+ break;
+ case HID_DG_CONFIDENCE:
+ md->valid = value;
+ break;
+ case HID_DG_CONTACTID:
+ if (md->valid) {
+ md->curid = value;
+ md->f[value].touch = md->touch;
+ md->f[value].valid = 1;
+ }
+ break;
+ case HID_GD_X:
+ if (md->valid)
+ md->f[md->curid].x = value;
+ break;
+ case HID_GD_Y:
+ if (md->valid)
+ md->f[md->curid].y = value;
+ break;
+ case HID_DG_CONTACTCOUNT:
+ mmm_filter_event(md, input);
+ break;
+ }
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static int mmm_probe(struct hid_device *hdev, const struct hid_device_id *id)
+{
+ int ret;
+ struct mmm_data *md;
+
+ md = kzalloc(sizeof(struct mmm_data), GFP_KERNEL);
+ if (!md) {
+ dev_err(&hdev->dev, "cannot allocate 3M data\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, md);
+
+ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ if (ret)
+ kfree(md);
+ return ret;
+}
+
+static void mmm_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+ hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id mmm_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, mmm_devices);
+
+static const struct hid_usage_id mmm_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver mmm_driver = {
+ .name = "3m-pct",
+ .id_table = mmm_devices,
+ .probe = mmm_probe,
+ .remove = mmm_remove,
+ .input_mapping = mmm_input_mapping,
+ .input_mapped = mmm_input_mapped,
+ .usage_table = mmm_grabbed_usages,
+ .event = mmm_event,
+};
+
+static int __init mmm_init(void)
+{
+ return hid_register_driver(&mmm_driver);
+}
+
+static void __exit mmm_exit(void)
+{
+ hid_unregister_driver(&mmm_driver);
+}
+
+module_init(mmm_init);
+module_exit(mmm_exit);
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 4b96e7a898cf..78286b184ace 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -40,6 +40,11 @@ module_param(fnmode, uint, 0644);
MODULE_PARM_DESC(fnmode, "Mode of fn key on Apple keyboards (0 = disabled, "
"[1] = fkeyslast, 2 = fkeysfirst)");
+static unsigned int iso_layout = 1;
+module_param(iso_layout, uint, 0644);
+MODULE_PARM_DESC(iso_layout, "Enable/Disable hardcoded ISO-layout of the keyboard. "
+ "(0 = disabled, [1] = enabled)");
+
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
@@ -199,11 +204,13 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
}
}
- if (asc->quirks & APPLE_ISO_KEYBOARD) {
- trans = apple_find_translation(apple_iso_keyboard, usage->code);
- if (trans) {
- input_event(input, usage->type, trans->to, value);
- return 1;
+ if (iso_layout) {
+ if (asc->quirks & APPLE_ISO_KEYBOARD) {
+ trans = apple_find_translation(apple_iso_keyboard, usage->code);
+ if (trans) {
+ input_event(input, usage->type, trans->to, value);
+ return 1;
+ }
}
}
@@ -431,6 +438,13 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN |
+ APPLE_ISO_KEYBOARD },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 80792d38d25c..ca9106587876 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -387,7 +387,8 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
__u32 data;
unsigned n;
- if (item->size == 0) {
+ /* Local delimiter could have value 0, which allows size to be 0 */
+ if (item->size == 0 && item->tag != HID_LOCAL_ITEM_TAG_DELIMITER) {
dbg_hid("item data expected for local item\n");
return -1;
}
@@ -1248,6 +1249,7 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
/* a list of devices for which there is a specialized driver on HID bus */
static const struct hid_device_id hid_blacklist[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_3M, USB_DEVICE_ID_3M1968) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ATV_IRCONTROL) },
@@ -1285,6 +1287,9 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING3_JIS) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
@@ -1338,6 +1343,7 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
@@ -1553,6 +1559,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
@@ -1657,8 +1664,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 3839340e293a..e8f0b1142250 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -18,6 +18,9 @@
#ifndef HID_IDS_H_FILE
#define HID_IDS_H_FILE
+#define USB_VENDOR_ID_3M 0x0596
+#define USB_DEVICE_ID_3M1968 0x0500
+
#define USB_VENDOR_ID_A4TECH 0x09da
#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
#define USB_DEVICE_ID_A4TECH_X5_005D 0x000a
@@ -88,6 +91,9 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI 0x0236
#define USB_DEVICE_ID_APPLE_WELLSPRING3_ISO 0x0237
#define USB_DEVICE_ID_APPLE_WELLSPRING3_JIS 0x0238
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
#define USB_DEVICE_ID_APPLE_ATV_IRCONTROL 0x8241
@@ -166,6 +172,9 @@
#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
+#define USB_VENDOR_ID_ETT 0x0664
+#define USB_DEVICE_ID_TC5UH 0x0309
+
#define USB_VENDOR_ID_EZKEY 0x0518
#define USB_DEVICE_ID_BTC_8193 0x0002
@@ -390,16 +399,15 @@
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST 0x0034
#define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST 0x0046
+#define USB_VENDOR_ID_STANTUM 0x1f87
+#define USB_DEVICE_ID_MTP 0x0002
+
#define USB_VENDOR_ID_SUN 0x0430
#define USB_DEVICE_ID_RARITAN_KVM_DONGLE 0xcdab
#define USB_VENDOR_ID_SUNPLUS 0x04fc
#define USB_DEVICE_ID_SUNPLUS_WDESKTOP 0x05d8
-#define USB_VENDOR_ID_TENX 0x1130
-#define USB_DEVICE_ID_TENX_IBUDDY1 0x0001
-#define USB_DEVICE_ID_TENX_IBUDDY2 0x0002
-
#define USB_VENDOR_ID_THRUSTMASTER 0x044f
#define USB_VENDOR_ID_TOPMAX 0x0663
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 5862b0f3b55d..dad7aae9c975 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -198,7 +198,12 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
switch (field->application) {
case HID_GD_MOUSE:
case HID_GD_POINTER: code += 0x110; break;
- case HID_GD_JOYSTICK: code += 0x120; break;
+ case HID_GD_JOYSTICK:
+ if (code <= 0xf)
+ code += BTN_JOYSTICK;
+ else
+ code += BTN_TRIGGER_HAPPY;
+ break;
case HID_GD_GAMEPAD: code += 0x130; break;
default:
switch (field->physical) {
diff --git a/drivers/hid/hid-samsung.c b/drivers/hid/hid-samsung.c
index 5b222eed0692..510dd1340597 100644
--- a/drivers/hid/hid-samsung.c
+++ b/drivers/hid/hid-samsung.c
@@ -39,7 +39,17 @@
*
* 3. 135 byte report descriptor
* Report #4 has an array field with logical range 0..17 instead of 1..14.
+ *
+ * 4. 171 byte report descriptor
+ * Report #3 has an array field with logical range 0..1 instead of 1..3.
*/
+static inline void samsung_dev_trace(struct hid_device *hdev,
+ unsigned int rsize)
+{
+ dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
+ "descriptor\n", rsize);
+}
+
static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int rsize)
{
@@ -47,8 +57,7 @@ static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc[177] == 0x75 && rdesc[178] == 0x30 &&
rdesc[179] == 0x95 && rdesc[180] == 0x01 &&
rdesc[182] == 0x40) {
- dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
- "descriptor\n", 184);
+ samsung_dev_trace(hdev, 184);
rdesc[176] = 0xff;
rdesc[178] = 0x08;
rdesc[180] = 0x06;
@@ -56,17 +65,21 @@ static void samsung_report_fixup(struct hid_device *hdev, __u8 *rdesc,
} else
if (rsize == 203 && rdesc[192] == 0x15 && rdesc[193] == 0x0 &&
rdesc[194] == 0x25 && rdesc[195] == 0x12) {
- dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
- "descriptor\n", 203);
+ samsung_dev_trace(hdev, 203);
rdesc[193] = 0x1;
rdesc[195] = 0xf;
} else
if (rsize == 135 && rdesc[124] == 0x15 && rdesc[125] == 0x0 &&
rdesc[126] == 0x25 && rdesc[127] == 0x11) {
- dev_info(&hdev->dev, "fixing up Samsung IrDA %d byte report "
- "descriptor\n", 135);
+ samsung_dev_trace(hdev, 135);
rdesc[125] = 0x1;
rdesc[127] = 0xe;
+ } else
+ if (rsize == 171 && rdesc[160] == 0x15 && rdesc[161] == 0x0 &&
+ rdesc[162] == 0x25 && rdesc[163] == 0x01) {
+ samsung_dev_trace(hdev, 171);
+ rdesc[161] = 0x1;
+ rdesc[163] = 0x3;
}
}
diff --git a/drivers/hid/hid-stantum.c b/drivers/hid/hid-stantum.c
new file mode 100644
index 000000000000..add965dab932
--- /dev/null
+++ b/drivers/hid/hid-stantum.c
@@ -0,0 +1,283 @@
+/*
+ * HID driver for Stantum multitouch panels
+ *
+ * Copyright (c) 2009 Stephane Chatty <chatty@enac.fr>
+ *
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+
+MODULE_VERSION("0.6");
+MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
+MODULE_DESCRIPTION("Stantum HID multitouch panels");
+MODULE_LICENSE("GPL");
+
+#include "hid-ids.h"
+
+struct stantum_data {
+ __s32 x, y, z, w, h; /* x, y, pressure, width, height */
+ __u16 id; /* touch id */
+ bool valid; /* valid finger data, or just placeholder? */
+ bool first; /* first finger in the HID packet? */
+ bool activity; /* at least one active finger so far? */
+};
+
+static int stantum_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ switch (usage->hid & HID_USAGE_PAGE) {
+
+ case HID_UP_GENDESK:
+ switch (usage->hid) {
+ case HID_GD_X:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_X);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_X,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ case HID_GD_Y:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_POSITION_Y);
+ /* touchscreen emulation */
+ input_set_abs_params(hi->input, ABS_Y,
+ field->logical_minimum,
+ field->logical_maximum, 0, 0);
+ return 1;
+ }
+ return 0;
+
+ case HID_UP_DIGITIZER:
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ case HID_DG_CONFIDENCE:
+ case HID_DG_INPUTMODE:
+ case HID_DG_DEVICEINDEX:
+ case HID_DG_CONTACTCOUNT:
+ case HID_DG_CONTACTMAX:
+ case HID_DG_TIPPRESSURE:
+ return -1;
+
+ case HID_DG_TIPSWITCH:
+ /* touchscreen emulation */
+ hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
+ return 1;
+
+ case HID_DG_WIDTH:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MAJOR);
+ return 1;
+ case HID_DG_HEIGHT:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TOUCH_MINOR);
+ input_set_abs_params(hi->input, ABS_MT_ORIENTATION,
+ 1, 1, 0, 0);
+ return 1;
+ case HID_DG_CONTACTID:
+ hid_map_usage(hi, usage, bit, max,
+ EV_ABS, ABS_MT_TRACKING_ID);
+ return 1;
+
+ }
+ return 0;
+
+ case 0xff000000:
+ /* no input-oriented meaning */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int stantum_input_mapped(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ if (usage->type == EV_KEY || usage->type == EV_ABS)
+ clear_bit(usage->code, *bit);
+
+ return 0;
+}
+
+/*
+ * this function is called when a whole finger has been parsed,
+ * so that it can decide what to send to the input layer.
+ */
+static void stantum_filter_event(struct stantum_data *sd,
+ struct input_dev *input)
+{
+ bool wide;
+
+ if (!sd->valid) {
+ /*
+ * touchscreen emulation: if the first finger is not valid and
+ * there previously was finger activity, this is a release
+ */
+ if (sd->first && sd->activity) {
+ input_event(input, EV_KEY, BTN_TOUCH, 0);
+ sd->activity = false;
+ }
+ return;
+ }
+
+ input_event(input, EV_ABS, ABS_MT_TRACKING_ID, sd->id);
+ input_event(input, EV_ABS, ABS_MT_POSITION_X, sd->x);
+ input_event(input, EV_ABS, ABS_MT_POSITION_Y, sd->y);
+
+ wide = (sd->w > sd->h);
+ input_event(input, EV_ABS, ABS_MT_ORIENTATION, wide);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, wide ? sd->w : sd->h);
+ input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, wide ? sd->h : sd->w);
+
+#if 0
+ /* MT_PRESSURE does not exist yet */
+ input_event(input, EV_ABS, ABS_MT_PRESSURE, sd->z);
+#endif
+
+ input_mt_sync(input);
+ sd->valid = false;
+ sd->first = false;
+
+ /* touchscreen emulation */
+ if (sd->first) {
+ if (!sd->activity) {
+ input_event(input, EV_KEY, BTN_TOUCH, 1);
+ sd->activity = true;
+ }
+ input_event(input, EV_ABS, ABS_X, sd->x);
+ input_event(input, EV_ABS, ABS_Y, sd->y);
+ }
+}
+
+
+static int stantum_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ struct stantum_data *sd = hid_get_drvdata(hid);
+
+ if (hid->claimed & HID_CLAIMED_INPUT) {
+ struct input_dev *input = field->hidinput->input;
+
+ switch (usage->hid) {
+ case HID_DG_INRANGE:
+ /* this is the last field in a finger */
+ stantum_filter_event(sd, input);
+ break;
+ case HID_DG_WIDTH:
+ sd->w = value;
+ break;
+ case HID_DG_HEIGHT:
+ sd->h = value;
+ break;
+ case HID_GD_X:
+ sd->x = value;
+ break;
+ case HID_GD_Y:
+ sd->y = value;
+ break;
+ case HID_DG_TIPPRESSURE:
+ sd->z = value;
+ break;
+ case HID_DG_CONTACTID:
+ sd->id = value;
+ break;
+ case HID_DG_CONFIDENCE:
+ sd->valid = !!value;
+ break;
+ case 0xff000002:
+ /* this comes only before the first finger */
+ sd->first = true;
+ break;
+
+ default:
+ /* ignore the others */
+ return 1;
+ }
+ }
+
+ /* we have handled the hidinput part, now remains hiddev */
+ if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
+ hid->hiddev_hid_event(hid, field, usage, value);
+
+ return 1;
+}
+
+static int stantum_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int ret;
+ struct stantum_data *sd;
+
+ sd = kmalloc(sizeof(struct stantum_data), GFP_KERNEL);
+ if (!sd) {
+ dev_err(&hdev->dev, "cannot allocate Stantum data\n");
+ return -ENOMEM;
+ }
+ sd->valid = false;
+ sd->first = false;
+ sd->activity = false;
+ hid_set_drvdata(hdev, sd);
+
+ ret = hid_parse(hdev);
+ if (!ret)
+ ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+
+ if (ret)
+ kfree(sd);
+
+ return ret;
+}
+
+static void stantum_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ kfree(hid_get_drvdata(hdev));
+ hid_set_drvdata(hdev, NULL);
+}
+
+static const struct hid_device_id stantum_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_STANTUM, USB_DEVICE_ID_MTP) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, stantum_devices);
+
+static const struct hid_usage_id stantum_grabbed_usages[] = {
+ { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
+static struct hid_driver stantum_driver = {
+ .name = "stantum",
+ .id_table = stantum_devices,
+ .probe = stantum_probe,
+ .remove = stantum_remove,
+ .input_mapping = stantum_input_mapping,
+ .input_mapped = stantum_input_mapped,
+ .usage_table = stantum_grabbed_usages,
+ .event = stantum_event,
+};
+
+static int __init stantum_init(void)
+{
+ return hid_register_driver(&stantum_driver);
+}
+
+static void __exit stantum_exit(void)
+{
+ hid_unregister_driver(&stantum_driver);
+}
+
+module_init(stantum_init);
+module_exit(stantum_exit);
+
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 747542172242..12dcda529201 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -142,6 +142,7 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
wdata->butstate = rw;
input_report_key(input, BTN_0, rw & 0x02);
input_report_key(input, BTN_1, rw & 0x01);
+ input_report_key(input, BTN_TOOL_FINGER, 0xf0);
input_event(input, EV_MSC, MSC_SERIAL, 0xf0);
input_sync(input);
}
@@ -196,6 +197,9 @@ static int wacom_probe(struct hid_device *hdev,
/* Pad */
input->evbit[0] |= BIT(EV_MSC);
input->mscbit[0] |= BIT(MSC_SERIAL);
+ set_bit(BTN_0, input->keybit);
+ set_bit(BTN_1, input->keybit);
+ set_bit(BTN_TOOL_FINGER, input->keybit);
/* Distance, rubber and mouse */
input->absbit[0] |= BIT(ABS_DISTANCE);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index e2997a8d5e1b..ae516a44940a 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1342,7 +1342,7 @@ static int hid_reset_resume(struct usb_interface *intf)
#endif /* CONFIG_PM */
-static struct usb_device_id hid_usb_ids [] = {
+static const struct usb_device_id hid_usb_ids[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
.bInterfaceClass = USB_INTERFACE_CLASS_HID },
{ } /* Terminating entry */
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 46c3c566307e..68cf87749a42 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -392,7 +392,7 @@ config SENSORS_GL520SM
config SENSORS_CORETEMP
tristate "Intel Core/Core2/Atom temperature sensor"
- depends on X86 && EXPERIMENTAL
+ depends on X86 && PCI && EXPERIMENTAL
help
If you say yes here you get support for the temperature
sensor inside your CPU. Most of the family 6 CPUs
@@ -792,6 +792,16 @@ config SENSORS_ADS7828
This driver can also be built as a module. If so, the module
will be called ads7828.
+config SENSORS_AMC6821
+ tristate "Texas Instruments AMC6821"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the Texas Instruments
+ AMC6821 hardware monitoring chips.
+
+ This driver can also be build as a module. If so, the module
+ will be called amc6821.
+
config SENSORS_THMC50
tristate "Texas Instruments THMC50 / Analog Devices ADM1022"
depends on I2C && EXPERIMENTAL
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 450c8e894277..4bc215c0953f 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_SENSORS_SIS5595) += sis5595.o
obj-$(CONFIG_SENSORS_SMSC47B397)+= smsc47b397.o
obj-$(CONFIG_SENSORS_SMSC47M1) += smsc47m1.o
obj-$(CONFIG_SENSORS_SMSC47M192)+= smsc47m192.o
+obj-$(CONFIG_SENSORS_AMC6821) += amc6821.o
obj-$(CONFIG_SENSORS_THMC50) += thmc50.o
obj-$(CONFIG_SENSORS_TMP401) += tmp401.o
obj-$(CONFIG_SENSORS_TMP421) += tmp421.o
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index a1a7ef14b519..a31e77c776ae 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -94,7 +94,7 @@ static const unsigned short normal_i2c[] = { 0x58, 0x5C, I2C_CLIENT_END };
#define ADT7462_PIN24_SHIFT 6
#define ADT7462_PIN26_VOLT_INPUT 0x08
#define ADT7462_PIN25_VOLT_INPUT 0x20
-#define ADT7462_PIN28_SHIFT 6 /* cfg3 */
+#define ADT7462_PIN28_SHIFT 4 /* cfg3 */
#define ADT7462_PIN28_VOLT 0x5
#define ADT7462_REG_ALARM1 0xB8
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
new file mode 100644
index 000000000000..1c89d922d619
--- /dev/null
+++ b/drivers/hwmon/amc6821.c
@@ -0,0 +1,1116 @@
+/*
+ amc6821.c - Part of lm_sensors, Linux kernel modules for hardware
+ monitoring
+ Copyright (C) 2009 T. Mertelj <tomaz.mertelj@guest.arnes.si>
+
+ Based on max6650.c:
+ Copyright (C) 2007 Hans J. Koch <hjk@linutronix.de>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+
+#include <linux/kernel.h> /* Needed for KERN_INFO */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+
+/*
+ * Addresses to scan.
+ */
+
+static const unsigned short normal_i2c[] = {0x18, 0x19, 0x1a, 0x2c, 0x2d, 0x2e,
+ 0x4c, 0x4d, 0x4e, I2C_CLIENT_END};
+
+
+
+/*
+ * Insmod parameters
+ */
+
+static int pwminv = 0; /*Inverted PWM output. */
+module_param(pwminv, int, S_IRUGO);
+
+static int init = 1; /*Power-on initialization.*/
+module_param(init, int, S_IRUGO);
+
+
+enum chips { amc6821 };
+
+#define AMC6821_REG_DEV_ID 0x3D
+#define AMC6821_REG_COMP_ID 0x3E
+#define AMC6821_REG_CONF1 0x00
+#define AMC6821_REG_CONF2 0x01
+#define AMC6821_REG_CONF3 0x3F
+#define AMC6821_REG_CONF4 0x04
+#define AMC6821_REG_STAT1 0x02
+#define AMC6821_REG_STAT2 0x03
+#define AMC6821_REG_TDATA_LOW 0x08
+#define AMC6821_REG_TDATA_HI 0x09
+#define AMC6821_REG_LTEMP_HI 0x0A
+#define AMC6821_REG_RTEMP_HI 0x0B
+#define AMC6821_REG_LTEMP_LIMIT_MIN 0x15
+#define AMC6821_REG_LTEMP_LIMIT_MAX 0x14
+#define AMC6821_REG_RTEMP_LIMIT_MIN 0x19
+#define AMC6821_REG_RTEMP_LIMIT_MAX 0x18
+#define AMC6821_REG_LTEMP_CRIT 0x1B
+#define AMC6821_REG_RTEMP_CRIT 0x1D
+#define AMC6821_REG_PSV_TEMP 0x1C
+#define AMC6821_REG_DCY 0x22
+#define AMC6821_REG_LTEMP_FAN_CTRL 0x24
+#define AMC6821_REG_RTEMP_FAN_CTRL 0x25
+#define AMC6821_REG_DCY_LOW_TEMP 0x21
+
+#define AMC6821_REG_TACH_LLIMITL 0x10
+#define AMC6821_REG_TACH_LLIMITH 0x11
+#define AMC6821_REG_TACH_HLIMITL 0x12
+#define AMC6821_REG_TACH_HLIMITH 0x13
+
+#define AMC6821_CONF1_START 0x01
+#define AMC6821_CONF1_FAN_INT_EN 0x02
+#define AMC6821_CONF1_FANIE 0x04
+#define AMC6821_CONF1_PWMINV 0x08
+#define AMC6821_CONF1_FAN_FAULT_EN 0x10
+#define AMC6821_CONF1_FDRC0 0x20
+#define AMC6821_CONF1_FDRC1 0x40
+#define AMC6821_CONF1_THERMOVIE 0x80
+
+#define AMC6821_CONF2_PWM_EN 0x01
+#define AMC6821_CONF2_TACH_MODE 0x02
+#define AMC6821_CONF2_TACH_EN 0x04
+#define AMC6821_CONF2_RTFIE 0x08
+#define AMC6821_CONF2_LTOIE 0x10
+#define AMC6821_CONF2_RTOIE 0x20
+#define AMC6821_CONF2_PSVIE 0x40
+#define AMC6821_CONF2_RST 0x80
+
+#define AMC6821_CONF3_THERM_FAN_EN 0x80
+#define AMC6821_CONF3_REV_MASK 0x0F
+
+#define AMC6821_CONF4_OVREN 0x10
+#define AMC6821_CONF4_TACH_FAST 0x20
+#define AMC6821_CONF4_PSPR 0x40
+#define AMC6821_CONF4_MODE 0x80
+
+#define AMC6821_STAT1_RPM_ALARM 0x01
+#define AMC6821_STAT1_FANS 0x02
+#define AMC6821_STAT1_RTH 0x04
+#define AMC6821_STAT1_RTL 0x08
+#define AMC6821_STAT1_R_THERM 0x10
+#define AMC6821_STAT1_RTF 0x20
+#define AMC6821_STAT1_LTH 0x40
+#define AMC6821_STAT1_LTL 0x80
+
+#define AMC6821_STAT2_RTC 0x08
+#define AMC6821_STAT2_LTC 0x10
+#define AMC6821_STAT2_LPSV 0x20
+#define AMC6821_STAT2_L_THERM 0x40
+#define AMC6821_STAT2_THERM_IN 0x80
+
+enum {IDX_TEMP1_INPUT = 0, IDX_TEMP1_MIN, IDX_TEMP1_MAX,
+ IDX_TEMP1_CRIT, IDX_TEMP2_INPUT, IDX_TEMP2_MIN,
+ IDX_TEMP2_MAX, IDX_TEMP2_CRIT,
+ TEMP_IDX_LEN, };
+
+static const u8 temp_reg[] = {AMC6821_REG_LTEMP_HI,
+ AMC6821_REG_LTEMP_LIMIT_MIN,
+ AMC6821_REG_LTEMP_LIMIT_MAX,
+ AMC6821_REG_LTEMP_CRIT,
+ AMC6821_REG_RTEMP_HI,
+ AMC6821_REG_RTEMP_LIMIT_MIN,
+ AMC6821_REG_RTEMP_LIMIT_MAX,
+ AMC6821_REG_RTEMP_CRIT, };
+
+enum {IDX_FAN1_INPUT = 0, IDX_FAN1_MIN, IDX_FAN1_MAX,
+ FAN1_IDX_LEN, };
+
+static const u8 fan_reg_low[] = {AMC6821_REG_TDATA_LOW,
+ AMC6821_REG_TACH_LLIMITL,
+ AMC6821_REG_TACH_HLIMITL, };
+
+
+static const u8 fan_reg_hi[] = {AMC6821_REG_TDATA_HI,
+ AMC6821_REG_TACH_LLIMITH,
+ AMC6821_REG_TACH_HLIMITH, };
+
+static int amc6821_probe(
+ struct i2c_client *client,
+ const struct i2c_device_id *id);
+static int amc6821_detect(
+ struct i2c_client *client,
+ struct i2c_board_info *info);
+static int amc6821_init_client(struct i2c_client *client);
+static int amc6821_remove(struct i2c_client *client);
+static struct amc6821_data *amc6821_update_device(struct device *dev);
+
+/*
+ * Driver data (common to all clients)
+ */
+
+static const struct i2c_device_id amc6821_id[] = {
+ { "amc6821", amc6821 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, amc6821_id);
+
+static struct i2c_driver amc6821_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "amc6821",
+ },
+ .probe = amc6821_probe,
+ .remove = amc6821_remove,
+ .id_table = amc6821_id,
+ .detect = amc6821_detect,
+ .address_list = normal_i2c,
+};
+
+
+/*
+ * Client data (each client gets its own)
+ */
+
+struct amc6821_data {
+ struct device *hwmon_dev;
+ struct mutex update_lock;
+ char valid; /* zero until following fields are valid */
+ unsigned long last_updated; /* in jiffies */
+
+ /* register values */
+ int temp[TEMP_IDX_LEN];
+
+ u16 fan[FAN1_IDX_LEN];
+ u8 fan1_div;
+
+ u8 pwm1;
+ u8 temp1_auto_point_temp[3];
+ u8 temp2_auto_point_temp[3];
+ u8 pwm1_auto_point_pwm[3];
+ u8 pwm1_enable;
+ u8 pwm1_auto_channels_temp;
+
+ u8 stat1;
+ u8 stat2;
+};
+
+
+static ssize_t get_temp(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ int ix = to_sensor_dev_attr(devattr)->index;
+
+ return sprintf(buf, "%d\n", data->temp[ix] * 1000);
+}
+
+
+
+static ssize_t set_temp(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ int ix = to_sensor_dev_attr(attr)->index;
+ long val;
+
+ int ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+ val = SENSORS_LIMIT(val / 1000, -128, 127);
+
+ mutex_lock(&data->update_lock);
+ data->temp[ix] = val;
+ if (i2c_smbus_write_byte_data(client, temp_reg[ix], data->temp[ix])) {
+ dev_err(&client->dev, "Register write error, aborting.\n");
+ count = -EIO;
+ }
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+
+
+static ssize_t get_temp_alarm(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ int ix = to_sensor_dev_attr(devattr)->index;
+ u8 flag;
+
+ switch (ix) {
+ case IDX_TEMP1_MIN:
+ flag = data->stat1 & AMC6821_STAT1_LTL;
+ break;
+ case IDX_TEMP1_MAX:
+ flag = data->stat1 & AMC6821_STAT1_LTH;
+ break;
+ case IDX_TEMP1_CRIT:
+ flag = data->stat2 & AMC6821_STAT2_LTC;
+ break;
+ case IDX_TEMP2_MIN:
+ flag = data->stat1 & AMC6821_STAT1_RTL;
+ break;
+ case IDX_TEMP2_MAX:
+ flag = data->stat1 & AMC6821_STAT1_RTH;
+ break;
+ case IDX_TEMP2_CRIT:
+ flag = data->stat2 & AMC6821_STAT2_RTC;
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
+ return -EINVAL;
+ }
+ if (flag)
+ return sprintf(buf, "1");
+ else
+ return sprintf(buf, "0");
+}
+
+
+
+
+static ssize_t get_temp2_fault(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ if (data->stat1 & AMC6821_STAT1_RTF)
+ return sprintf(buf, "1");
+ else
+ return sprintf(buf, "0");
+}
+
+static ssize_t get_pwm1(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ return sprintf(buf, "%d\n", data->pwm1);
+}
+
+static ssize_t set_pwm1(
+ struct device *dev,
+ struct device_attribute *devattr,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ long val;
+ int ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ data->pwm1 = SENSORS_LIMIT(val , 0, 255);
+ i2c_smbus_write_byte_data(client, AMC6821_REG_DCY, data->pwm1);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t get_pwm1_enable(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ return sprintf(buf, "%d\n", data->pwm1_enable);
+}
+
+static ssize_t set_pwm1_enable(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ long val;
+ int config = strict_strtol(buf, 10, &val);
+ if (config)
+ return config;
+
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return -EIO;
+ }
+
+ switch (val) {
+ case 1:
+ config &= ~AMC6821_CONF1_FDRC0;
+ config &= ~AMC6821_CONF1_FDRC1;
+ break;
+ case 2:
+ config &= ~AMC6821_CONF1_FDRC0;
+ config |= AMC6821_CONF1_FDRC1;
+ break;
+ case 3:
+ config |= AMC6821_CONF1_FDRC0;
+ config |= AMC6821_CONF1_FDRC1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ mutex_lock(&data->update_lock);
+ if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ count = -EIO;
+ }
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+static ssize_t get_pwm1_auto_channels_temp(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ return sprintf(buf, "%d\n", data->pwm1_auto_channels_temp);
+}
+
+
+static ssize_t get_temp_auto_point_temp(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int ix = to_sensor_dev_attr_2(devattr)->index;
+ int nr = to_sensor_dev_attr_2(devattr)->nr;
+ struct amc6821_data *data = amc6821_update_device(dev);
+ switch (nr) {
+ case 1:
+ return sprintf(buf, "%d\n",
+ data->temp1_auto_point_temp[ix] * 1000);
+ break;
+ case 2:
+ return sprintf(buf, "%d\n",
+ data->temp2_auto_point_temp[ix] * 1000);
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr->nr (%d).\n", nr);
+ return -EINVAL;
+ }
+}
+
+
+static ssize_t get_pwm1_auto_point_pwm(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ int ix = to_sensor_dev_attr(devattr)->index;
+ struct amc6821_data *data = amc6821_update_device(dev);
+ return sprintf(buf, "%d\n", data->pwm1_auto_point_pwm[ix]);
+}
+
+
+static inline ssize_t set_slope_register(struct i2c_client *client,
+ u8 reg,
+ u8 dpwm,
+ u8 *ptemp)
+{
+ int dt;
+ u8 tmp;
+
+ dt = ptemp[2]-ptemp[1];
+ for (tmp = 4; tmp > 0; tmp--) {
+ if (dt * (0x20 >> tmp) >= dpwm)
+ break;
+ }
+ tmp |= (ptemp[1] & 0x7C) << 1;
+ if (i2c_smbus_write_byte_data(client,
+ reg, tmp)) {
+ dev_err(&client->dev, "Register write error, aborting.\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+
+
+static ssize_t set_temp_auto_point_temp(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = amc6821_update_device(dev);
+ int ix = to_sensor_dev_attr_2(attr)->index;
+ int nr = to_sensor_dev_attr_2(attr)->nr;
+ u8 *ptemp;
+ u8 reg;
+ int dpwm;
+ long val;
+ int ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ switch (nr) {
+ case 1:
+ ptemp = data->temp1_auto_point_temp;
+ reg = AMC6821_REG_LTEMP_FAN_CTRL;
+ break;
+ case 2:
+ ptemp = data->temp2_auto_point_temp;
+ reg = AMC6821_REG_RTEMP_FAN_CTRL;
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr->nr (%d).\n", nr);
+ return -EINVAL;
+ }
+
+ data->valid = 0;
+ mutex_lock(&data->update_lock);
+ switch (ix) {
+ case 0:
+ ptemp[0] = SENSORS_LIMIT(val / 1000, 0,
+ data->temp1_auto_point_temp[1]);
+ ptemp[0] = SENSORS_LIMIT(ptemp[0], 0,
+ data->temp2_auto_point_temp[1]);
+ ptemp[0] = SENSORS_LIMIT(ptemp[0], 0, 63);
+ if (i2c_smbus_write_byte_data(
+ client,
+ AMC6821_REG_PSV_TEMP,
+ ptemp[0])) {
+ dev_err(&client->dev,
+ "Register write error, aborting.\n");
+ count = -EIO;
+ }
+ goto EXIT;
+ break;
+ case 1:
+ ptemp[1] = SENSORS_LIMIT(
+ val / 1000,
+ (ptemp[0] & 0x7C) + 4,
+ 124);
+ ptemp[1] &= 0x7C;
+ ptemp[2] = SENSORS_LIMIT(
+ ptemp[2], ptemp[1] + 1,
+ 255);
+ break;
+ case 2:
+ ptemp[2] = SENSORS_LIMIT(
+ val / 1000,
+ ptemp[1]+1,
+ 255);
+ break;
+ default:
+ dev_dbg(dev, "Unknown attr->index (%d).\n", ix);
+ count = -EINVAL;
+ goto EXIT;
+ }
+ dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1];
+ if (set_slope_register(client, reg, dpwm, ptemp))
+ count = -EIO;
+
+EXIT:
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+
+static ssize_t set_pwm1_auto_point_pwm(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ int dpwm;
+ long val;
+ int ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&data->update_lock);
+ data->pwm1_auto_point_pwm[1] = SENSORS_LIMIT(val, 0, 254);
+ if (i2c_smbus_write_byte_data(client, AMC6821_REG_DCY_LOW_TEMP,
+ data->pwm1_auto_point_pwm[1])) {
+ dev_err(&client->dev, "Register write error, aborting.\n");
+ count = -EIO;
+ goto EXIT;
+ }
+ dpwm = data->pwm1_auto_point_pwm[2] - data->pwm1_auto_point_pwm[1];
+ if (set_slope_register(client, AMC6821_REG_LTEMP_FAN_CTRL, dpwm,
+ data->temp1_auto_point_temp)) {
+ count = -EIO;
+ goto EXIT;
+ }
+ if (set_slope_register(client, AMC6821_REG_RTEMP_FAN_CTRL, dpwm,
+ data->temp2_auto_point_temp)) {
+ count = -EIO;
+ goto EXIT;
+ }
+
+EXIT:
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t get_fan(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ int ix = to_sensor_dev_attr(devattr)->index;
+ if (0 == data->fan[ix])
+ return sprintf(buf, "0");
+ return sprintf(buf, "%d\n", (int)(6000000 / data->fan[ix]));
+}
+
+
+
+static ssize_t get_fan1_fault(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ if (data->stat1 & AMC6821_STAT1_FANS)
+ return sprintf(buf, "1");
+ else
+ return sprintf(buf, "0");
+}
+
+
+
+static ssize_t set_fan(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ long val;
+ int ix = to_sensor_dev_attr(attr)->index;
+ int ret = strict_strtol(buf, 10, &val);
+ if (ret)
+ return ret;
+ val = 1 > val ? 0xFFFF : 6000000/val;
+
+ mutex_lock(&data->update_lock);
+ data->fan[ix] = (u16) SENSORS_LIMIT(val, 1, 0xFFFF);
+ if (i2c_smbus_write_byte_data(client, fan_reg_low[ix],
+ data->fan[ix] & 0xFF)) {
+ dev_err(&client->dev, "Register write error, aborting.\n");
+ count = -EIO;
+ goto EXIT;
+ }
+ if (i2c_smbus_write_byte_data(client,
+ fan_reg_hi[ix], data->fan[ix] >> 8)) {
+ dev_err(&client->dev, "Register write error, aborting.\n");
+ count = -EIO;
+ }
+EXIT:
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+
+static ssize_t get_fan1_div(
+ struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct amc6821_data *data = amc6821_update_device(dev);
+ return sprintf(buf, "%d\n", data->fan1_div);
+}
+
+static ssize_t set_fan1_div(
+ struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ long val;
+ int config = strict_strtol(buf, 10, &val);
+ if (config)
+ return config;
+
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4);
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return -EIO;
+ }
+ mutex_lock(&data->update_lock);
+ switch (val) {
+ case 2:
+ config &= ~AMC6821_CONF4_PSPR;
+ data->fan1_div = 2;
+ break;
+ case 4:
+ config |= AMC6821_CONF4_PSPR;
+ data->fan1_div = 4;
+ break;
+ default:
+ mutex_unlock(&data->update_lock);
+ count = -EINVAL;
+ goto EXIT;
+ }
+ if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4, config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ count = -EIO;
+ }
+EXIT:
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ get_temp, NULL, IDX_TEMP1_INPUT);
+static SENSOR_DEVICE_ATTR(temp1_min, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP1_MIN);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP1_MAX);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP1_CRIT);
+static SENSOR_DEVICE_ATTR(temp1_min_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP1_MIN);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP1_MAX);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP1_CRIT);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR,
+ get_temp, NULL, IDX_TEMP2_INPUT);
+static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP2_MIN);
+static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP2_MAX);
+static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO | S_IWUSR, get_temp,
+ set_temp, IDX_TEMP2_CRIT);
+static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO,
+ get_temp2_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp2_min_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP2_MIN);
+static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP2_MAX);
+static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO,
+ get_temp_alarm, NULL, IDX_TEMP2_CRIT);
+static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, IDX_FAN1_INPUT);
+static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO | S_IWUSR,
+ get_fan, set_fan, IDX_FAN1_MIN);
+static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO | S_IWUSR,
+ get_fan, set_fan, IDX_FAN1_MAX);
+static SENSOR_DEVICE_ATTR(fan1_fault, S_IRUGO, get_fan1_fault, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_div, S_IRUGO | S_IWUSR,
+ get_fan1_div, set_fan1_div, 0);
+
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm1, set_pwm1, 0);
+static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
+ get_pwm1_enable, set_pwm1_enable, 0);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IRUGO,
+ get_pwm1_auto_point_pwm, NULL, 0);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO,
+ get_pwm1_auto_point_pwm, set_pwm1_auto_point_pwm, 1);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IRUGO,
+ get_pwm1_auto_point_pwm, NULL, 2);
+static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IRUGO,
+ get_pwm1_auto_channels_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_point1_temp, S_IRUGO,
+ get_temp_auto_point_temp, NULL, 1, 0);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_point2_temp, S_IWUSR | S_IRUGO,
+ get_temp_auto_point_temp, set_temp_auto_point_temp, 1, 1);
+static SENSOR_DEVICE_ATTR_2(temp1_auto_point3_temp, S_IWUSR | S_IRUGO,
+ get_temp_auto_point_temp, set_temp_auto_point_temp, 1, 2);
+
+static SENSOR_DEVICE_ATTR_2(temp2_auto_point1_temp, S_IWUSR | S_IRUGO,
+ get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 0);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_point2_temp, S_IWUSR | S_IRUGO,
+ get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 1);
+static SENSOR_DEVICE_ATTR_2(temp2_auto_point3_temp, S_IWUSR | S_IRUGO,
+ get_temp_auto_point_temp, set_temp_auto_point_temp, 2, 2);
+
+
+
+static struct attribute *amc6821_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_min.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_input.dev_attr.attr,
+ &sensor_dev_attr_temp2_min.dev_attr.attr,
+ &sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit.dev_attr.attr,
+ &sensor_dev_attr_temp2_min_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp2_fault.dev_attr.attr,
+ &sensor_dev_attr_fan1_input.dev_attr.attr,
+ &sensor_dev_attr_fan1_min.dev_attr.attr,
+ &sensor_dev_attr_fan1_max.dev_attr.attr,
+ &sensor_dev_attr_fan1_fault.dev_attr.attr,
+ &sensor_dev_attr_fan1_div.dev_attr.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
+ &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_temp1_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_temp2_auto_point3_temp.dev_attr.attr,
+ NULL
+};
+
+static struct attribute_group amc6821_attr_grp = {
+ .attrs = amc6821_attrs,
+};
+
+
+
+/* Return 0 if detection is successful, -ENODEV otherwise */
+static int amc6821_detect(
+ struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int address = client->addr;
+ int dev_id, comp_id;
+
+ dev_dbg(&adapter->dev, "amc6821_detect called.\n");
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_dbg(&adapter->dev,
+ "amc6821: I2C bus doesn't support byte mode, "
+ "skipping.\n");
+ return -ENODEV;
+ }
+
+ dev_id = i2c_smbus_read_byte_data(client, AMC6821_REG_DEV_ID);
+ comp_id = i2c_smbus_read_byte_data(client, AMC6821_REG_COMP_ID);
+ if (dev_id != 0x21 || comp_id != 0x49) {
+ dev_dbg(&adapter->dev,
+ "amc6821: detection failed at 0x%02x.\n",
+ address);
+ return -ENODEV;
+ }
+
+ /* Bit 7 of the address register is ignored, so we can check the
+ ID registers again */
+ dev_id = i2c_smbus_read_byte_data(client, 0x80 | AMC6821_REG_DEV_ID);
+ comp_id = i2c_smbus_read_byte_data(client, 0x80 | AMC6821_REG_COMP_ID);
+ if (dev_id != 0x21 || comp_id != 0x49) {
+ dev_dbg(&adapter->dev,
+ "amc6821: detection failed at 0x%02x.\n",
+ address);
+ return -ENODEV;
+ }
+
+ dev_info(&adapter->dev, "amc6821: chip found at 0x%02x.\n", address);
+ strlcpy(info->type, "amc6821", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int amc6821_probe(
+ struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct amc6821_data *data;
+ int err;
+
+ data = kzalloc(sizeof(struct amc6821_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&client->dev, "out of memory.\n");
+ return -ENOMEM;
+ }
+
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->update_lock);
+
+ /*
+ * Initialize the amc6821 chip
+ */
+ err = amc6821_init_client(client);
+ if (err)
+ goto err_free;
+
+ err = sysfs_create_group(&client->dev.kobj, &amc6821_attr_grp);
+ if (err)
+ goto err_free;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (!IS_ERR(data->hwmon_dev))
+ return 0;
+
+ err = PTR_ERR(data->hwmon_dev);
+ dev_err(&client->dev, "error registering hwmon device.\n");
+ sysfs_remove_group(&client->dev.kobj, &amc6821_attr_grp);
+err_free:
+ kfree(data);
+ return err;
+}
+
+static int amc6821_remove(struct i2c_client *client)
+{
+ struct amc6821_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &amc6821_attr_grp);
+
+ kfree(data);
+
+ return 0;
+}
+
+
+static int amc6821_init_client(struct i2c_client *client)
+{
+ int config;
+ int err = -EIO;
+
+ if (init) {
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4);
+
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return err;
+ }
+
+ config |= AMC6821_CONF4_MODE;
+
+ if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF4,
+ config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ return err;
+ }
+
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF3);
+
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return err;
+ }
+
+ dev_info(&client->dev, "Revision %d\n", config & 0x0f);
+
+ config &= ~AMC6821_CONF3_THERM_FAN_EN;
+
+ if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF3,
+ config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ return err;
+ }
+
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF2);
+
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return err;
+ }
+
+ config &= ~AMC6821_CONF2_RTFIE;
+ config &= ~AMC6821_CONF2_LTOIE;
+ config &= ~AMC6821_CONF2_RTOIE;
+ if (i2c_smbus_write_byte_data(client,
+ AMC6821_REG_CONF2, config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ return err;
+ }
+
+ config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
+
+ if (config < 0) {
+ dev_err(&client->dev,
+ "Error reading configuration register, aborting.\n");
+ return err;
+ }
+
+ config &= ~AMC6821_CONF1_THERMOVIE;
+ config &= ~AMC6821_CONF1_FANIE;
+ config |= AMC6821_CONF1_START;
+ if (pwminv)
+ config |= AMC6821_CONF1_PWMINV;
+ else
+ config &= ~AMC6821_CONF1_PWMINV;
+
+ if (i2c_smbus_write_byte_data(
+ client, AMC6821_REG_CONF1, config)) {
+ dev_err(&client->dev,
+ "Configuration register write error, aborting.\n");
+ return err;
+ }
+ }
+ return 0;
+}
+
+
+static struct amc6821_data *amc6821_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct amc6821_data *data = i2c_get_clientdata(client);
+ int timeout = HZ;
+ u8 reg;
+ int i;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + timeout) ||
+ !data->valid) {
+
+ for (i = 0; i < TEMP_IDX_LEN; i++)
+ data->temp[i] = i2c_smbus_read_byte_data(client,
+ temp_reg[i]);
+
+ data->stat1 = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_STAT1);
+ data->stat2 = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_STAT2);
+
+ data->pwm1 = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_DCY);
+ for (i = 0; i < FAN1_IDX_LEN; i++) {
+ data->fan[i] = i2c_smbus_read_byte_data(
+ client,
+ fan_reg_low[i]);
+ data->fan[i] += i2c_smbus_read_byte_data(
+ client,
+ fan_reg_hi[i]) << 8;
+ }
+ data->fan1_div = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_CONF4);
+ data->fan1_div = data->fan1_div & AMC6821_CONF4_PSPR ? 4 : 2;
+
+ data->pwm1_auto_point_pwm[0] = 0;
+ data->pwm1_auto_point_pwm[2] = 255;
+ data->pwm1_auto_point_pwm[1] = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_DCY_LOW_TEMP);
+
+ data->temp1_auto_point_temp[0] =
+ i2c_smbus_read_byte_data(client,
+ AMC6821_REG_PSV_TEMP);
+ data->temp2_auto_point_temp[0] =
+ data->temp1_auto_point_temp[0];
+ reg = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_LTEMP_FAN_CTRL);
+ data->temp1_auto_point_temp[1] = (reg & 0xF8) >> 1;
+ reg &= 0x07;
+ reg = 0x20 >> reg;
+ if (reg > 0)
+ data->temp1_auto_point_temp[2] =
+ data->temp1_auto_point_temp[1] +
+ (data->pwm1_auto_point_pwm[2] -
+ data->pwm1_auto_point_pwm[1]) / reg;
+ else
+ data->temp1_auto_point_temp[2] = 255;
+
+ reg = i2c_smbus_read_byte_data(client,
+ AMC6821_REG_RTEMP_FAN_CTRL);
+ data->temp2_auto_point_temp[1] = (reg & 0xF8) >> 1;
+ reg &= 0x07;
+ reg = 0x20 >> reg;
+ if (reg > 0)
+ data->temp2_auto_point_temp[2] =
+ data->temp2_auto_point_temp[1] +
+ (data->pwm1_auto_point_pwm[2] -
+ data->pwm1_auto_point_pwm[1]) / reg;
+ else
+ data->temp2_auto_point_temp[2] = 255;
+
+ reg = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1);
+ reg = (reg >> 5) & 0x3;
+ switch (reg) {
+ case 0: /*open loop: software sets pwm1*/
+ data->pwm1_auto_channels_temp = 0;
+ data->pwm1_enable = 1;
+ break;
+ case 2: /*closed loop: remote T (temp2)*/
+ data->pwm1_auto_channels_temp = 2;
+ data->pwm1_enable = 2;
+ break;
+ case 3: /*closed loop: local and remote T (temp2)*/
+ data->pwm1_auto_channels_temp = 3;
+ data->pwm1_enable = 3;
+ break;
+ case 1: /*semi-open loop: software sets rpm, chip controls pwm1,
+ *currently not implemented
+ */
+ data->pwm1_auto_channels_temp = 0;
+ data->pwm1_enable = 0;
+ break;
+ }
+
+ data->last_updated = jiffies;
+ data->valid = 1;
+ }
+ mutex_unlock(&data->update_lock);
+ return data;
+}
+
+
+static int __init amc6821_init(void)
+{
+ return i2c_add_driver(&amc6821_driver);
+}
+
+static void __exit amc6821_exit(void)
+{
+ i2c_del_driver(&amc6821_driver);
+}
+
+module_init(amc6821_init);
+module_exit(amc6821_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("T. Mertelj <tomaz.mertelj@guest.arnes.si>");
+MODULE_DESCRIPTION("Texas Instruments amc6821 hwmon driver");
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index 5a3ee00c0e7d..6811346c1c62 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -5,6 +5,7 @@
* See COPYING in the top level directory of the kernel tree.
*/
+#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/hwmon.h>
#include <linux/list.h>
@@ -101,6 +102,11 @@ struct atk_data {
int temperature_count;
int fan_count;
struct list_head sensor_list;
+
+ struct {
+ struct dentry *root;
+ u32 id;
+ } debugfs;
};
@@ -624,6 +630,187 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value)
return err;
}
+#ifdef CONFIG_DEBUG_FS
+static int atk_debugfs_gitm_get(void *p, u64 *val)
+{
+ struct atk_data *data = p;
+ union acpi_object *ret;
+ struct atk_acpi_ret_buffer *buf;
+ int err = 0;
+
+ if (!data->read_handle)
+ return -ENODEV;
+
+ if (!data->debugfs.id)
+ return -EINVAL;
+
+ ret = atk_gitm(data, data->debugfs.id);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ buf = (struct atk_acpi_ret_buffer *)ret->buffer.pointer;
+ if (buf->flags)
+ *val = buf->value;
+ else
+ err = -EIO;
+
+ return err;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(atk_debugfs_gitm,
+ atk_debugfs_gitm_get,
+ NULL,
+ "0x%08llx\n")
+
+static int atk_acpi_print(char *buf, size_t sz, union acpi_object *obj)
+{
+ int ret = 0;
+
+ switch (obj->type) {
+ case ACPI_TYPE_INTEGER:
+ ret = snprintf(buf, sz, "0x%08llx\n", obj->integer.value);
+ break;
+ case ACPI_TYPE_STRING:
+ ret = snprintf(buf, sz, "%s\n", obj->string.pointer);
+ break;
+ }
+
+ return ret;
+}
+
+static void atk_pack_print(char *buf, size_t sz, union acpi_object *pack)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pack->package.count; i++) {
+ union acpi_object *obj = &pack->package.elements[i];
+
+ ret = atk_acpi_print(buf, sz, obj);
+ if (ret >= sz)
+ break;
+ buf += ret;
+ sz -= ret;
+ }
+}
+
+static int atk_debugfs_ggrp_open(struct inode *inode, struct file *file)
+{
+ struct atk_data *data = inode->i_private;
+ char *buf = NULL;
+ union acpi_object *ret;
+ u8 cls;
+ int i;
+
+ if (!data->enumerate_handle)
+ return -ENODEV;
+ if (!data->debugfs.id)
+ return -EINVAL;
+
+ cls = (data->debugfs.id & 0xff000000) >> 24;
+ ret = atk_ggrp(data, cls);
+ if (IS_ERR(ret))
+ return PTR_ERR(ret);
+
+ for (i = 0; i < ret->package.count; i++) {
+ union acpi_object *pack = &ret->package.elements[i];
+ union acpi_object *id;
+
+ if (pack->type != ACPI_TYPE_PACKAGE)
+ continue;
+ if (!pack->package.count)
+ continue;
+ id = &pack->package.elements[0];
+ if (id->integer.value == data->debugfs.id) {
+ /* Print the package */
+ buf = kzalloc(512, GFP_KERNEL);
+ if (!buf) {
+ ACPI_FREE(ret);
+ return -ENOMEM;
+ }
+ atk_pack_print(buf, 512, pack);
+ break;
+ }
+ }
+ ACPI_FREE(ret);
+
+ if (!buf)
+ return -EINVAL;
+
+ file->private_data = buf;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t atk_debugfs_ggrp_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ char *str = file->private_data;
+ size_t len = strlen(str);
+
+ return simple_read_from_buffer(buf, count, pos, str, len);
+}
+
+static int atk_debugfs_ggrp_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations atk_debugfs_ggrp_fops = {
+ .read = atk_debugfs_ggrp_read,
+ .open = atk_debugfs_ggrp_open,
+ .release = atk_debugfs_ggrp_release,
+};
+
+static void atk_debugfs_init(struct atk_data *data)
+{
+ struct dentry *d;
+ struct dentry *f;
+
+ data->debugfs.id = 0;
+
+ d = debugfs_create_dir("asus_atk0110", NULL);
+ if (!d || IS_ERR(d))
+ return;
+
+ f = debugfs_create_x32("id", S_IRUSR | S_IWUSR, d, &data->debugfs.id);
+ if (!f || IS_ERR(f))
+ goto cleanup;
+
+ f = debugfs_create_file("gitm", S_IRUSR, d, data,
+ &atk_debugfs_gitm);
+ if (!f || IS_ERR(f))
+ goto cleanup;
+
+ f = debugfs_create_file("ggrp", S_IRUSR, d, data,
+ &atk_debugfs_ggrp_fops);
+ if (!f || IS_ERR(f))
+ goto cleanup;
+
+ data->debugfs.root = d;
+
+ return;
+cleanup:
+ debugfs_remove_recursive(d);
+}
+
+static void atk_debugfs_cleanup(struct atk_data *data)
+{
+ debugfs_remove_recursive(data->debugfs.root);
+}
+
+#else /* CONFIG_DEBUG_FS */
+
+static void atk_debugfs_init(struct atk_data *data)
+{
+}
+
+static void atk_debugfs_cleanup(struct atk_data *data)
+{
+}
+#endif
+
static int atk_add_sensor(struct atk_data *data, union acpi_object *obj)
{
struct device *dev = &data->acpi_dev->dev;
@@ -1047,76 +1234,75 @@ remove:
return err;
}
-static int atk_check_old_if(struct atk_data *data)
+static int atk_probe_if(struct atk_data *data)
{
struct device *dev = &data->acpi_dev->dev;
acpi_handle ret;
acpi_status status;
+ int err = 0;
/* RTMP: read temperature */
status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_TMP, &ret);
- if (status != AE_OK) {
+ if (ACPI_SUCCESS(status))
+ data->rtmp_handle = ret;
+ else
dev_dbg(dev, "method " METHOD_OLD_READ_TMP " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->rtmp_handle = ret;
/* RVLT: read voltage */
status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_VLT, &ret);
- if (status != AE_OK) {
+ if (ACPI_SUCCESS(status))
+ data->rvlt_handle = ret;
+ else
dev_dbg(dev, "method " METHOD_OLD_READ_VLT " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->rvlt_handle = ret;
/* RFAN: read fan status */
status = acpi_get_handle(data->atk_handle, METHOD_OLD_READ_FAN, &ret);
- if (status != AE_OK) {
+ if (ACPI_SUCCESS(status))
+ data->rfan_handle = ret;
+ else
dev_dbg(dev, "method " METHOD_OLD_READ_FAN " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->rfan_handle = ret;
-
- return 0;
-}
-
-static int atk_check_new_if(struct atk_data *data)
-{
- struct device *dev = &data->acpi_dev->dev;
- acpi_handle ret;
- acpi_status status;
/* Enumeration */
status = acpi_get_handle(data->atk_handle, METHOD_ENUMERATE, &ret);
- if (status != AE_OK) {
+ if (ACPI_SUCCESS(status))
+ data->enumerate_handle = ret;
+ else
dev_dbg(dev, "method " METHOD_ENUMERATE " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->enumerate_handle = ret;
/* De-multiplexer (read) */
status = acpi_get_handle(data->atk_handle, METHOD_READ, &ret);
- if (status != AE_OK) {
+ if (ACPI_SUCCESS(status))
+ data->read_handle = ret;
+ else
dev_dbg(dev, "method " METHOD_READ " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->read_handle = ret;
/* De-multiplexer (write) */
status = acpi_get_handle(data->atk_handle, METHOD_WRITE, &ret);
- if (status != AE_OK) {
- dev_dbg(dev, "method " METHOD_READ " not found: %s\n",
+ if (ACPI_SUCCESS(status))
+ data->write_handle = ret;
+ else
+ dev_dbg(dev, "method " METHOD_WRITE " not found: %s\n",
acpi_format_exception(status));
- return -ENODEV;
- }
- data->write_handle = ret;
- return 0;
+ /* Check for hwmon methods: first check "old" style methods; note that
+ * both may be present: in this case we stick to the old interface;
+ * analysis of multiple DSDTs indicates that when both interfaces
+ * are present the new one (GGRP/GITM) is not functional.
+ */
+ if (data->rtmp_handle && data->rvlt_handle && data->rfan_handle)
+ data->old_interface = true;
+ else if (data->enumerate_handle && data->read_handle &&
+ data->write_handle)
+ data->old_interface = false;
+ else
+ err = -ENODEV;
+
+ return err;
}
static int atk_add(struct acpi_device *device)
@@ -1155,28 +1341,19 @@ static int atk_add(struct acpi_device *device)
}
ACPI_FREE(buf.pointer);
- /* Check for hwmon methods: first check "old" style methods; note that
- * both may be present: in this case we stick to the old interface;
- * analysis of multiple DSDTs indicates that when both interfaces
- * are present the new one (GGRP/GITM) is not functional.
- */
- err = atk_check_old_if(data);
- if (!err) {
- dev_dbg(&device->dev, "Using old hwmon interface\n");
- data->old_interface = true;
- } else {
- err = atk_check_new_if(data);
- if (err)
- goto out;
-
- dev_dbg(&device->dev, "Using new hwmon interface\n");
- data->old_interface = false;
+ err = atk_probe_if(data);
+ if (err) {
+ dev_err(&device->dev, "No usable hwmon interface detected\n");
+ goto out;
}
- if (data->old_interface)
+ if (data->old_interface) {
+ dev_dbg(&device->dev, "Using old hwmon interface\n");
err = atk_enumerate_old_hwmon(data);
- else
+ } else {
+ dev_dbg(&device->dev, "Using new hwmon interface\n");
err = atk_enumerate_new_hwmon(data);
+ }
if (err < 0)
goto out;
if (err == 0) {
@@ -1190,6 +1367,8 @@ static int atk_add(struct acpi_device *device)
if (err)
goto cleanup;
+ atk_debugfs_init(data);
+
device->driver_data = data;
return 0;
cleanup:
@@ -1208,6 +1387,8 @@ static int atk_remove(struct acpi_device *device, int type)
device->driver_data = NULL;
+ atk_debugfs_cleanup(data);
+
atk_remove_files(data);
atk_free_sensors(data);
hwmon_device_unregister(data->hwmon_dev);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index caef39cda8c8..2d7bceeed0bc 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -33,6 +33,7 @@
#include <linux/list.h>
#include <linux/platform_device.h>
#include <linux/cpu.h>
+#include <linux/pci.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -161,6 +162,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
int usemsr_ee = 1;
int err;
u32 eax, edx;
+ struct pci_dev *host_bridge;
/* Early chips have no MSR for TjMax */
@@ -168,11 +170,21 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
usemsr_ee = 0;
}
- /* Atoms seems to have TjMax at 90C */
+ /* Atom CPUs */
if (c->x86_model == 0x1c) {
usemsr_ee = 0;
- tjmax = 90000;
+
+ host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
+
+ if (host_bridge && host_bridge->vendor == PCI_VENDOR_ID_INTEL
+ && (host_bridge->device == 0xa000 /* NM10 based nettop */
+ || host_bridge->device == 0xa010)) /* NM10 based netbook */
+ tjmax = 100000;
+ else
+ tjmax = 90000;
+
+ pci_dev_put(host_bridge);
}
if ((c->x86_model > 0xe) && (usemsr_ee)) {
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index d8a26d16d948..099a2138cdf6 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -33,6 +33,16 @@ static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
+/* CPUID function 0x80000001, ebx */
+#define CPUID_PKGTYPE_MASK 0xf0000000
+#define CPUID_PKGTYPE_F 0x00000000
+#define CPUID_PKGTYPE_AM2R2_AM3 0x10000000
+
+/* DRAM controller (PCI function 2) */
+#define REG_DCT0_CONFIG_HIGH 0x094
+#define DDR3_MODE 0x00000100
+
+/* miscellaneous (PCI function 3) */
#define REG_HARDWARE_THERMAL_CONTROL 0x64
#define HTC_ENABLE 0x00000001
@@ -85,13 +95,28 @@ static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_temp_crit, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, show_temp_crit, NULL, 1);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static bool __devinit has_erratum_319(void)
+static bool __devinit has_erratum_319(struct pci_dev *pdev)
{
+ u32 pkg_type, reg_dram_cfg;
+
+ if (boot_cpu_data.x86 != 0x10)
+ return false;
+
/*
- * Erratum 319: The thermal sensor of older Family 10h processors
- * (B steppings) may be unreliable.
+ * Erratum 319: The thermal sensor of Socket F/AM2+ processors
+ * may be unreliable.
*/
- return boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model <= 2;
+ pkg_type = cpuid_ebx(0x80000001) & CPUID_PKGTYPE_MASK;
+ if (pkg_type == CPUID_PKGTYPE_F)
+ return true;
+ if (pkg_type != CPUID_PKGTYPE_AM2R2_AM3)
+ return false;
+
+ /* Differentiate between AM2+ (bad) and AM3 (good) */
+ pci_bus_read_config_dword(pdev->bus,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 2),
+ REG_DCT0_CONFIG_HIGH, &reg_dram_cfg);
+ return !(reg_dram_cfg & DDR3_MODE);
}
static int __devinit k10temp_probe(struct pci_dev *pdev,
@@ -99,9 +124,10 @@ static int __devinit k10temp_probe(struct pci_dev *pdev,
{
struct device *hwmon_dev;
u32 reg_caps, reg_htc;
+ int unreliable = has_erratum_319(pdev);
int err;
- if (has_erratum_319() && !force) {
+ if (unreliable && !force) {
dev_err(&pdev->dev,
"unreliable CPU thermal sensor; monitoring disabled\n");
err = -ENODEV;
@@ -139,7 +165,7 @@ static int __devinit k10temp_probe(struct pci_dev *pdev,
}
dev_set_drvdata(&pdev->dev, hwmon_dev);
- if (has_erratum_319() && force)
+ if (unreliable && force)
dev_warn(&pdev->dev,
"unreliable CPU thermal sensor; check erratum 319\n");
return 0;
@@ -169,7 +195,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
dev_set_drvdata(&pdev->dev, NULL);
}
-static struct pci_device_id k10temp_id_table[] = {
+static const struct pci_device_id k10temp_id_table[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
{}
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 1fe995111841..0ceb6d6200a3 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -136,7 +136,7 @@ static SENSOR_DEVICE_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 1, 0);
static SENSOR_DEVICE_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 1, 1);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static struct pci_device_id k8temp_ids[] = {
+static const struct pci_device_id k8temp_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
{ 0 },
};
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 12f2e7086560..79c2931e3008 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -697,7 +697,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev)
return data;
}
-static struct pci_device_id sis5595_pci_ids[] = {
+static const struct pci_device_id sis5595_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index 39e82a492f26..f397ce7ad598 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -767,7 +767,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
return data;
}
-static struct pci_device_id via686a_pci_ids[] = {
+static const struct pci_device_id via686a_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) },
{ 0, }
};
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 470a1226ba2b..d47b4c9949c2 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -697,7 +697,7 @@ static struct platform_driver vt8231_driver = {
.remove = __devexit_p(vt8231_remove),
};
-static struct pci_device_id vt8231_pci_ids[] = {
+static const struct pci_device_id vt8231_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 8de7d7b87bb0..bd8f1e4d9e6c 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -480,7 +480,7 @@ static struct i2c_adapter ali1535_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id ali1535_ids[] = {
+static const struct pci_device_id ali1535_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ },
};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index f70f46582c6c..a409cfcf0629 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -87,9 +87,9 @@ static int ali1563_transaction(struct i2c_adapter * a, int size)
outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2);
timeout = ALI1563_MAX_TIMEOUT;
- do
+ do {
msleep(1);
- while (((data = inb_p(SMB_HST_STS)) & HST_STS_BUSY) && --timeout);
+ } while (((data = inb_p(SMB_HST_STS)) & HST_STS_BUSY) && --timeout);
dev_dbg(&a->dev, "Transaction (post): STS=%02x, CNTL1=%02x, "
"CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n",
@@ -157,9 +157,9 @@ static int ali1563_block_start(struct i2c_adapter * a)
outb_p(inb_p(SMB_HST_CNTL2) | HST_CNTL2_START, SMB_HST_CNTL2);
timeout = ALI1563_MAX_TIMEOUT;
- do
+ do {
msleep(1);
- while (!((data = inb_p(SMB_HST_STS)) & HST_STS_DONE) && --timeout);
+ } while (!((data = inb_p(SMB_HST_STS)) & HST_STS_DONE) && --timeout);
dev_dbg(&a->dev, "Block (post): STS=%02x, CNTL1=%02x, "
"CNTL2=%02x, CMD=%02x, ADD=%02x, DAT0=%02x, DAT1=%02x\n",
@@ -417,7 +417,7 @@ static void __devexit ali1563_remove(struct pci_dev * dev)
ali1563_shutdown(dev);
}
-static struct pci_device_id __devinitdata ali1563_id_table[] = {
+static const struct pci_device_id ali1563_id_table[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) },
{},
};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index e7e3205f1286..659f63f5e4af 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -477,7 +477,7 @@ static struct i2c_adapter ali15x3_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id ali15x3_ids[] = {
+static const struct pci_device_id ali15x3_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 8f0b90ef8c76..c5a9fa488e7f 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -308,7 +308,7 @@ static const char* chipname[] = {
"nVidia nForce", "AMD8111",
};
-static struct pci_device_id amd756_ids[] = {
+static const struct pci_device_id amd756_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B),
.driver_data = AMD756 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413),
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 5b4ad86ca166..d0dc970d7370 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -351,7 +351,7 @@ static const struct i2c_algorithm smbus_algorithm = {
};
-static struct pci_device_id amd8111_ids[] = {
+static const struct pci_device_id amd8111_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index bec9b845dd16..c767295ad1fb 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -105,7 +105,7 @@ static struct i2c_adapter hydra_adap = {
.algo_data = &hydra_bit_data,
};
-static struct pci_device_id hydra_ids[] = {
+static const struct pci_device_id hydra_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index df6ab553f975..cefe80c0f44c 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -561,7 +561,7 @@ static struct i2c_adapter i801_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id i801_ids[] = {
+static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) },
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index dba6eb053e2f..69c22f79f231 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -256,7 +256,7 @@ static struct i2c_adapter sch_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sch_ids[] = {
+static const struct pci_device_id sch_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ec11d1c4e77b..4a700587ef18 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -308,7 +308,7 @@ static struct i2c_algorithm smbus_algorithm = {
};
-static struct pci_device_id nforce2_ids[] = {
+static const struct pci_device_id nforce2_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) },
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index adf0fbb902f0..0d20ff46a518 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -400,7 +400,7 @@ static void __devexit pasemi_smb_remove(struct pci_dev *dev)
kfree(smbus);
}
-static struct pci_device_id pasemi_smb_ids[] = {
+static const struct pci_device_id pasemi_smb_ids[] = {
{ PCI_DEVICE(0x1959, 0xa003) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 0ed68e2ccd22..f7346a9bd95f 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -75,7 +75,7 @@ static int pca_isa_waitforcompletion(void *pd)
unsigned long timeout;
if (irq > -1) {
- ret = wait_event_interruptible_timeout(pca_wait,
+ ret = wait_event_timeout(pca_wait,
pca_isa_readbyte(pd, I2C_PCA_CON)
& I2C_PCA_CON_SI, pca_isa_ops.timeout);
} else {
@@ -96,7 +96,7 @@ static void pca_isa_resetchip(void *pd)
}
static irqreturn_t pca_handler(int this_irq, void *dev_id) {
- wake_up_interruptible(&pca_wait);
+ wake_up(&pca_wait);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index c4df9d411cd5..5b2213df5ed0 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -84,7 +84,7 @@ static int i2c_pca_pf_waitforcompletion(void *pd)
unsigned long timeout;
if (i2c->irq) {
- ret = wait_event_interruptible_timeout(i2c->wait,
+ ret = wait_event_timeout(i2c->wait,
i2c->algo_data.read_byte(i2c, I2C_PCA_CON)
& I2C_PCA_CON_SI, i2c->adap.timeout);
} else {
@@ -122,7 +122,7 @@ static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id)
if ((i2c->algo_data.read_byte(i2c, I2C_PCA_CON) & I2C_PCA_CON_SI) == 0)
return IRQ_NONE;
- wake_up_interruptible(&i2c->wait);
+ wake_up(&i2c->wait);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 1e245e9cad31..ee9da6fcf69a 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -324,12 +324,12 @@ static int piix4_transaction(void)
else
msleep(1);
- while ((timeout++ < MAX_TIMEOUT) &&
+ while ((++timeout < MAX_TIMEOUT) &&
((temp = inb_p(SMBHSTSTS)) & 0x01))
msleep(1);
/* If the SMBus is still busy, we give up */
- if (timeout >= MAX_TIMEOUT) {
+ if (timeout == MAX_TIMEOUT) {
dev_err(&piix4_adapter.dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
}
@@ -472,7 +472,7 @@ static struct i2c_adapter piix4_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id piix4_ids[] = {
+static const struct pci_device_id piix4_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 5d1c2603a130..2b0bd0b042d6 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -20,15 +20,15 @@
#include <linux/platform_device.h>
#include <linux/i2c-pnx.h>
#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+
#include <mach/hardware.h>
#include <mach/i2c.h>
-#include <asm/irq.h>
-#include <asm/uaccess.h>
#define I2C_PNX_TIMEOUT 10 /* msec */
#define I2C_PNX_SPEED_KHZ 100
#define I2C_PNX_REGION_SIZE 0x100
-#define PNX_DEFAULT_FREQ 13 /* MHz */
static inline int wait_timeout(long timeout, struct i2c_pnx_algo_data *data)
{
@@ -50,22 +50,21 @@ static inline int wait_reset(long timeout, struct i2c_pnx_algo_data *data)
return (timeout <= 0);
}
-static inline void i2c_pnx_arm_timer(struct i2c_adapter *adap)
+static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
{
- struct i2c_pnx_algo_data *data = adap->algo_data;
- struct timer_list *timer = &data->mif.timer;
- int expires = I2C_PNX_TIMEOUT / (1000 / HZ);
+ struct timer_list *timer = &alg_data->mif.timer;
+ unsigned long expires = msecs_to_jiffies(I2C_PNX_TIMEOUT);
if (expires <= 1)
expires = 2;
del_timer_sync(timer);
- dev_dbg(&adap->dev, "Timer armed at %lu plus %u jiffies.\n",
+ dev_dbg(&alg_data->adapter.dev, "Timer armed at %lu plus %lu jiffies.\n",
jiffies, expires);
timer->expires = jiffies + expires;
- timer->data = (unsigned long)adap;
+ timer->data = (unsigned long)&alg_data;
add_timer(timer);
}
@@ -77,34 +76,34 @@ static inline void i2c_pnx_arm_timer(struct i2c_adapter *adap)
*
* Generate a START signal in the desired mode.
*/
-static int i2c_pnx_start(unsigned char slave_addr, struct i2c_adapter *adap)
+static int i2c_pnx_start(unsigned char slave_addr,
+ struct i2c_pnx_algo_data *alg_data)
{
- struct i2c_pnx_algo_data *alg_data = adap->algo_data;
-
- dev_dbg(&adap->dev, "%s(): addr 0x%x mode %d\n", __func__,
+ dev_dbg(&alg_data->adapter.dev, "%s(): addr 0x%x mode %d\n", __func__,
slave_addr, alg_data->mif.mode);
/* Check for 7 bit slave addresses only */
if (slave_addr & ~0x7f) {
- dev_err(&adap->dev, "%s: Invalid slave address %x. "
- "Only 7-bit addresses are supported\n",
- adap->name, slave_addr);
+ dev_err(&alg_data->adapter.dev,
+ "%s: Invalid slave address %x. Only 7-bit addresses are supported\n",
+ alg_data->adapter.name, slave_addr);
return -EINVAL;
}
/* First, make sure bus is idle */
if (wait_timeout(I2C_PNX_TIMEOUT, alg_data)) {
/* Somebody else is monopolizing the bus */
- dev_err(&adap->dev, "%s: Bus busy. Slave addr = %02x, "
- "cntrl = %x, stat = %x\n",
- adap->name, slave_addr,
- ioread32(I2C_REG_CTL(alg_data)),
- ioread32(I2C_REG_STS(alg_data)));
+ dev_err(&alg_data->adapter.dev,
+ "%s: Bus busy. Slave addr = %02x, cntrl = %x, stat = %x\n",
+ alg_data->adapter.name, slave_addr,
+ ioread32(I2C_REG_CTL(alg_data)),
+ ioread32(I2C_REG_STS(alg_data)));
return -EBUSY;
} else if (ioread32(I2C_REG_STS(alg_data)) & mstatus_afi) {
/* Sorry, we lost the bus */
- dev_err(&adap->dev, "%s: Arbitration failure. "
- "Slave addr = %02x\n", adap->name, slave_addr);
+ dev_err(&alg_data->adapter.dev,
+ "%s: Arbitration failure. Slave addr = %02x\n",
+ alg_data->adapter.name, slave_addr);
return -EIO;
}
@@ -115,14 +114,14 @@ static int i2c_pnx_start(unsigned char slave_addr, struct i2c_adapter *adap)
iowrite32(ioread32(I2C_REG_STS(alg_data)) | mstatus_tdi | mstatus_afi,
I2C_REG_STS(alg_data));
- dev_dbg(&adap->dev, "%s(): sending %#x\n", __func__,
+ dev_dbg(&alg_data->adapter.dev, "%s(): sending %#x\n", __func__,
(slave_addr << 1) | start_bit | alg_data->mif.mode);
/* Write the slave address, START bit and R/W bit */
iowrite32((slave_addr << 1) | start_bit | alg_data->mif.mode,
I2C_REG_TX(alg_data));
- dev_dbg(&adap->dev, "%s(): exit\n", __func__);
+ dev_dbg(&alg_data->adapter.dev, "%s(): exit\n", __func__);
return 0;
}
@@ -133,13 +132,12 @@ static int i2c_pnx_start(unsigned char slave_addr, struct i2c_adapter *adap)
*
* Generate a STOP signal to terminate the master transaction.
*/
-static void i2c_pnx_stop(struct i2c_adapter *adap)
+static void i2c_pnx_stop(struct i2c_pnx_algo_data *alg_data)
{
- struct i2c_pnx_algo_data *alg_data = adap->algo_data;
/* Only 1 msec max timeout due to interrupt context */
long timeout = 1000;
- dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
+ dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n",
__func__, ioread32(I2C_REG_STS(alg_data)));
/* Write a STOP bit to TX FIFO */
@@ -153,7 +151,7 @@ static void i2c_pnx_stop(struct i2c_adapter *adap)
timeout--;
}
- dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
+ dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n",
__func__, ioread32(I2C_REG_STS(alg_data)));
}
@@ -163,12 +161,11 @@ static void i2c_pnx_stop(struct i2c_adapter *adap)
*
* Sends one byte of data to the slave
*/
-static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
+static int i2c_pnx_master_xmit(struct i2c_pnx_algo_data *alg_data)
{
- struct i2c_pnx_algo_data *alg_data = adap->algo_data;
u32 val;
- dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
+ dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n",
__func__, ioread32(I2C_REG_STS(alg_data)));
if (alg_data->mif.len > 0) {
@@ -184,15 +181,15 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
alg_data->mif.len--;
iowrite32(val, I2C_REG_TX(alg_data));
- dev_dbg(&adap->dev, "%s(): xmit %#x [%d]\n", __func__,
- val, alg_data->mif.len + 1);
+ dev_dbg(&alg_data->adapter.dev, "%s(): xmit %#x [%d]\n",
+ __func__, val, alg_data->mif.len + 1);
if (alg_data->mif.len == 0) {
if (alg_data->last) {
/* Wait until the STOP is seen. */
if (wait_timeout(I2C_PNX_TIMEOUT, alg_data))
- dev_err(&adap->dev, "The bus is still "
- "active after timeout\n");
+ dev_err(&alg_data->adapter.dev,
+ "The bus is still active after timeout\n");
}
/* Disable master interrupts */
iowrite32(ioread32(I2C_REG_CTL(alg_data)) &
@@ -201,14 +198,15 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
del_timer_sync(&alg_data->mif.timer);
- dev_dbg(&adap->dev, "%s(): Waking up xfer routine.\n",
+ dev_dbg(&alg_data->adapter.dev,
+ "%s(): Waking up xfer routine.\n",
__func__);
complete(&alg_data->mif.complete);
}
} else if (alg_data->mif.len == 0) {
/* zero-sized transfer */
- i2c_pnx_stop(adap);
+ i2c_pnx_stop(alg_data);
/* Disable master interrupts. */
iowrite32(ioread32(I2C_REG_CTL(alg_data)) &
@@ -217,13 +215,14 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
/* Stop timer. */
del_timer_sync(&alg_data->mif.timer);
- dev_dbg(&adap->dev, "%s(): Waking up xfer routine after "
- "zero-xfer.\n", __func__);
+ dev_dbg(&alg_data->adapter.dev,
+ "%s(): Waking up xfer routine after zero-xfer.\n",
+ __func__);
complete(&alg_data->mif.complete);
}
- dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
+ dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n",
__func__, ioread32(I2C_REG_STS(alg_data)));
return 0;
@@ -235,21 +234,21 @@ static int i2c_pnx_master_xmit(struct i2c_adapter *adap)
*
* Reads one byte data from the slave
*/
-static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
+static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data)
{
- struct i2c_pnx_algo_data *alg_data = adap->algo_data;
unsigned int val = 0;
u32 ctl = 0;
- dev_dbg(&adap->dev, "%s(): entering: stat = %04x.\n",
+ dev_dbg(&alg_data->adapter.dev, "%s(): entering: stat = %04x.\n",
__func__, ioread32(I2C_REG_STS(alg_data)));
/* Check, whether there is already data,
* or we didn't 'ask' for it yet.
*/
if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) {
- dev_dbg(&adap->dev, "%s(): Write dummy data to fill "
- "Rx-fifo...\n", __func__);
+ dev_dbg(&alg_data->adapter.dev,
+ "%s(): Write dummy data to fill Rx-fifo...\n",
+ __func__);
if (alg_data->mif.len == 1) {
/* Last byte, do not acknowledge next rcv. */
@@ -281,16 +280,16 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
if (alg_data->mif.len > 0) {
val = ioread32(I2C_REG_RX(alg_data));
*alg_data->mif.buf++ = (u8) (val & 0xff);
- dev_dbg(&adap->dev, "%s(): rcv 0x%x [%d]\n", __func__, val,
- alg_data->mif.len);
+ dev_dbg(&alg_data->adapter.dev, "%s(): rcv 0x%x [%d]\n",
+ __func__, val, alg_data->mif.len);
alg_data->mif.len--;
if (alg_data->mif.len == 0) {
if (alg_data->last)
/* Wait until the STOP is seen. */
if (wait_timeout(I2C_PNX_TIMEOUT, alg_data))
- dev_err(&adap->dev, "The bus is still "
- "active after timeout\n");
+ dev_err(&alg_data->adapter.dev,
+ "The bus is still active after timeout\n");
/* Disable master interrupts */
ctl = ioread32(I2C_REG_CTL(alg_data));
@@ -304,7 +303,7 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
}
}
- dev_dbg(&adap->dev, "%s(): exiting: stat = %04x.\n",
+ dev_dbg(&alg_data->adapter.dev, "%s(): exiting: stat = %04x.\n",
__func__, ioread32(I2C_REG_STS(alg_data)));
return 0;
@@ -312,11 +311,11 @@ static int i2c_pnx_master_rcv(struct i2c_adapter *adap)
static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
{
+ struct i2c_pnx_algo_data *alg_data = dev_id;
u32 stat, ctl;
- struct i2c_adapter *adap = dev_id;
- struct i2c_pnx_algo_data *alg_data = adap->algo_data;
- dev_dbg(&adap->dev, "%s(): mstat = %x mctrl = %x, mode = %d\n",
+ dev_dbg(&alg_data->adapter.dev,
+ "%s(): mstat = %x mctrl = %x, mode = %d\n",
__func__,
ioread32(I2C_REG_STS(alg_data)),
ioread32(I2C_REG_CTL(alg_data)),
@@ -339,10 +338,10 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
complete(&alg_data->mif.complete);
} else if (stat & mstatus_nai) {
/* Slave did not acknowledge, generate a STOP */
- dev_dbg(&adap->dev, "%s(): "
- "Slave did not acknowledge, generating a STOP.\n",
+ dev_dbg(&alg_data->adapter.dev,
+ "%s(): Slave did not acknowledge, generating a STOP.\n",
__func__);
- i2c_pnx_stop(adap);
+ i2c_pnx_stop(alg_data);
/* Disable master interrupts. */
ctl = ioread32(I2C_REG_CTL(alg_data));
@@ -368,9 +367,9 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
*/
if ((stat & mstatus_drmi) || !(stat & mstatus_rfe)) {
if (alg_data->mif.mode == I2C_SMBUS_WRITE) {
- i2c_pnx_master_xmit(adap);
+ i2c_pnx_master_xmit(alg_data);
} else if (alg_data->mif.mode == I2C_SMBUS_READ) {
- i2c_pnx_master_rcv(adap);
+ i2c_pnx_master_rcv(alg_data);
}
}
}
@@ -379,7 +378,8 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
stat = ioread32(I2C_REG_STS(alg_data));
iowrite32(stat | mstatus_tdi | mstatus_afi, I2C_REG_STS(alg_data));
- dev_dbg(&adap->dev, "%s(): exiting, stat = %x ctrl = %x.\n",
+ dev_dbg(&alg_data->adapter.dev,
+ "%s(): exiting, stat = %x ctrl = %x.\n",
__func__, ioread32(I2C_REG_STS(alg_data)),
ioread32(I2C_REG_CTL(alg_data)));
@@ -388,14 +388,13 @@ static irqreturn_t i2c_pnx_interrupt(int irq, void *dev_id)
static void i2c_pnx_timeout(unsigned long data)
{
- struct i2c_adapter *adap = (struct i2c_adapter *)data;
- struct i2c_pnx_algo_data *alg_data = adap->algo_data;
+ struct i2c_pnx_algo_data *alg_data = (struct i2c_pnx_algo_data *)data;
u32 ctl;
- dev_err(&adap->dev, "Master timed out. stat = %04x, cntrl = %04x. "
- "Resetting master...\n",
- ioread32(I2C_REG_STS(alg_data)),
- ioread32(I2C_REG_CTL(alg_data)));
+ dev_err(&alg_data->adapter.dev,
+ "Master timed out. stat = %04x, cntrl = %04x. Resetting master...\n",
+ ioread32(I2C_REG_STS(alg_data)),
+ ioread32(I2C_REG_CTL(alg_data)));
/* Reset master and disable interrupts */
ctl = ioread32(I2C_REG_CTL(alg_data));
@@ -409,15 +408,14 @@ static void i2c_pnx_timeout(unsigned long data)
complete(&alg_data->mif.complete);
}
-static inline void bus_reset_if_active(struct i2c_adapter *adap)
+static inline void bus_reset_if_active(struct i2c_pnx_algo_data *alg_data)
{
- struct i2c_pnx_algo_data *alg_data = adap->algo_data;
u32 stat;
if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_active) {
- dev_err(&adap->dev,
+ dev_err(&alg_data->adapter.dev,
"%s: Bus is still active after xfer. Reset it...\n",
- adap->name);
+ alg_data->adapter.name);
iowrite32(ioread32(I2C_REG_CTL(alg_data)) | mcntrl_reset,
I2C_REG_CTL(alg_data));
wait_reset(I2C_PNX_TIMEOUT, alg_data);
@@ -451,10 +449,11 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
struct i2c_pnx_algo_data *alg_data = adap->algo_data;
u32 stat = ioread32(I2C_REG_STS(alg_data));
- dev_dbg(&adap->dev, "%s(): entering: %d messages, stat = %04x.\n",
+ dev_dbg(&alg_data->adapter.dev,
+ "%s(): entering: %d messages, stat = %04x.\n",
__func__, num, ioread32(I2C_REG_STS(alg_data)));
- bus_reset_if_active(adap);
+ bus_reset_if_active(alg_data);
/* Process transactions in a loop. */
for (i = 0; rc >= 0 && i < num; i++) {
@@ -464,9 +463,9 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
addr = pmsg->addr;
if (pmsg->flags & I2C_M_TEN) {
- dev_err(&adap->dev,
+ dev_err(&alg_data->adapter.dev,
"%s: 10 bits addr not supported!\n",
- adap->name);
+ alg_data->adapter.name);
rc = -EINVAL;
break;
}
@@ -478,11 +477,10 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
alg_data->mif.ret = 0;
alg_data->last = (i == num - 1);
- dev_dbg(&adap->dev, "%s(): mode %d, %d bytes\n", __func__,
- alg_data->mif.mode,
- alg_data->mif.len);
+ dev_dbg(&alg_data->adapter.dev, "%s(): mode %d, %d bytes\n",
+ __func__, alg_data->mif.mode, alg_data->mif.len);
- i2c_pnx_arm_timer(adap);
+ i2c_pnx_arm_timer(alg_data);
/* initialize the completion var */
init_completion(&alg_data->mif.complete);
@@ -493,7 +491,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
I2C_REG_CTL(alg_data));
/* Put start-code and slave-address on the bus. */
- rc = i2c_pnx_start(addr, adap);
+ rc = i2c_pnx_start(addr, alg_data);
if (rc < 0)
break;
@@ -502,31 +500,32 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (!(rc = alg_data->mif.ret))
completed++;
- dev_dbg(&adap->dev, "%s(): Complete, return code = %d.\n",
+ dev_dbg(&alg_data->adapter.dev,
+ "%s(): Complete, return code = %d.\n",
__func__, rc);
/* Clear TDI and AFI bits in case they are set. */
if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_tdi) {
- dev_dbg(&adap->dev,
+ dev_dbg(&alg_data->adapter.dev,
"%s: TDI still set... clearing now.\n",
- adap->name);
+ alg_data->adapter.name);
iowrite32(stat, I2C_REG_STS(alg_data));
}
if ((stat = ioread32(I2C_REG_STS(alg_data))) & mstatus_afi) {
- dev_dbg(&adap->dev,
+ dev_dbg(&alg_data->adapter.dev,
"%s: AFI still set... clearing now.\n",
- adap->name);
+ alg_data->adapter.name);
iowrite32(stat, I2C_REG_STS(alg_data));
}
}
- bus_reset_if_active(adap);
+ bus_reset_if_active(alg_data);
/* Cleanup to be sure... */
alg_data->mif.buf = NULL;
alg_data->mif.len = 0;
- dev_dbg(&adap->dev, "%s(): exiting, stat = %x\n",
+ dev_dbg(&alg_data->adapter.dev, "%s(): exiting, stat = %x\n",
__func__, ioread32(I2C_REG_STS(alg_data)));
if (completed != num)
@@ -545,69 +544,92 @@ static struct i2c_algorithm pnx_algorithm = {
.functionality = i2c_pnx_func,
};
+#ifdef CONFIG_PM
static int i2c_pnx_controller_suspend(struct platform_device *pdev,
pm_message_t state)
{
- struct i2c_pnx_data *i2c_pnx = platform_get_drvdata(pdev);
- return i2c_pnx->suspend(pdev, state);
+ struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
+
+ /* FIXME: shouldn't this be clk_disable? */
+ clk_enable(alg_data->clk);
+
+ return 0;
}
static int i2c_pnx_controller_resume(struct platform_device *pdev)
{
- struct i2c_pnx_data *i2c_pnx = platform_get_drvdata(pdev);
- return i2c_pnx->resume(pdev);
+ struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
+
+ return clk_enable(alg_data->clk);
}
+#else
+#define i2c_pnx_controller_suspend NULL
+#define i2c_pnx_controller_resume NULL
+#endif
static int __devinit i2c_pnx_probe(struct platform_device *pdev)
{
unsigned long tmp;
int ret = 0;
struct i2c_pnx_algo_data *alg_data;
- int freq_mhz;
+ unsigned long freq;
struct i2c_pnx_data *i2c_pnx = pdev->dev.platform_data;
- if (!i2c_pnx || !i2c_pnx->adapter) {
+ if (!i2c_pnx || !i2c_pnx->name) {
dev_err(&pdev->dev, "%s: no platform data supplied\n",
__func__);
ret = -EINVAL;
goto out;
}
- platform_set_drvdata(pdev, i2c_pnx);
-
- if (i2c_pnx->calculate_input_freq)
- freq_mhz = i2c_pnx->calculate_input_freq(pdev);
- else {
- freq_mhz = PNX_DEFAULT_FREQ;
- dev_info(&pdev->dev, "Setting bus frequency to default value: "
- "%d MHz\n", freq_mhz);
+ alg_data = kzalloc(sizeof(*alg_data), GFP_KERNEL);
+ if (!alg_data) {
+ ret = -ENOMEM;
+ goto err_kzalloc;
}
- i2c_pnx->adapter->algo = &pnx_algorithm;
+ platform_set_drvdata(pdev, alg_data);
+
+ strlcpy(alg_data->adapter.name, i2c_pnx->name,
+ sizeof(alg_data->adapter.name));
+ alg_data->adapter.dev.parent = &pdev->dev;
+ alg_data->adapter.algo = &pnx_algorithm;
+ alg_data->adapter.algo_data = alg_data;
+ alg_data->adapter.nr = pdev->id;
+ alg_data->i2c_pnx = i2c_pnx;
+
+ alg_data->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(alg_data->clk)) {
+ ret = PTR_ERR(alg_data->clk);
+ goto out_drvdata;
+ }
- alg_data = i2c_pnx->adapter->algo_data;
init_timer(&alg_data->mif.timer);
alg_data->mif.timer.function = i2c_pnx_timeout;
- alg_data->mif.timer.data = (unsigned long)i2c_pnx->adapter;
+ alg_data->mif.timer.data = (unsigned long)alg_data;
/* Register I/O resource */
- if (!request_mem_region(alg_data->base, I2C_PNX_REGION_SIZE,
+ if (!request_mem_region(i2c_pnx->base, I2C_PNX_REGION_SIZE,
pdev->name)) {
dev_err(&pdev->dev,
"I/O region 0x%08x for I2C already in use.\n",
- alg_data->base);
+ i2c_pnx->base);
ret = -ENODEV;
- goto out_drvdata;
+ goto out_clkget;
}
- if (!(alg_data->ioaddr =
- (u32)ioremap(alg_data->base, I2C_PNX_REGION_SIZE))) {
+ alg_data->ioaddr = ioremap(i2c_pnx->base, I2C_PNX_REGION_SIZE);
+ if (!alg_data->ioaddr) {
dev_err(&pdev->dev, "Couldn't ioremap I2C I/O region\n");
ret = -ENOMEM;
goto out_release;
}
- i2c_pnx->set_clock_run(pdev);
+ ret = clk_enable(alg_data->clk);
+ if (ret)
+ goto out_unmap;
+
+ freq = clk_get_rate(alg_data->clk);
/*
* Clock Divisor High This value is the number of system clocks
@@ -620,45 +642,47 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev)
* the deglitching filter length.
*/
- tmp = ((freq_mhz * 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2;
+ tmp = ((freq / 1000) / I2C_PNX_SPEED_KHZ) / 2 - 2;
iowrite32(tmp, I2C_REG_CKH(alg_data));
iowrite32(tmp, I2C_REG_CKL(alg_data));
iowrite32(mcntrl_reset, I2C_REG_CTL(alg_data));
if (wait_reset(I2C_PNX_TIMEOUT, alg_data)) {
ret = -ENODEV;
- goto out_unmap;
+ goto out_clock;
}
init_completion(&alg_data->mif.complete);
- ret = request_irq(alg_data->irq, i2c_pnx_interrupt,
- 0, pdev->name, i2c_pnx->adapter);
+ ret = request_irq(i2c_pnx->irq, i2c_pnx_interrupt,
+ 0, pdev->name, alg_data);
if (ret)
goto out_clock;
/* Register this adapter with the I2C subsystem */
- i2c_pnx->adapter->dev.parent = &pdev->dev;
- i2c_pnx->adapter->nr = pdev->id;
- ret = i2c_add_numbered_adapter(i2c_pnx->adapter);
+ ret = i2c_add_numbered_adapter(&alg_data->adapter);
if (ret < 0) {
dev_err(&pdev->dev, "I2C: Failed to add bus\n");
goto out_irq;
}
dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n",
- i2c_pnx->adapter->name, alg_data->base, alg_data->irq);
+ alg_data->adapter.name, i2c_pnx->base, i2c_pnx->irq);
return 0;
out_irq:
- free_irq(alg_data->irq, i2c_pnx->adapter);
+ free_irq(i2c_pnx->irq, alg_data);
out_clock:
- i2c_pnx->set_clock_stop(pdev);
+ clk_disable(alg_data->clk);
out_unmap:
- iounmap((void *)alg_data->ioaddr);
+ iounmap(alg_data->ioaddr);
out_release:
- release_mem_region(alg_data->base, I2C_PNX_REGION_SIZE);
+ release_mem_region(i2c_pnx->base, I2C_PNX_REGION_SIZE);
+out_clkget:
+ clk_put(alg_data->clk);
out_drvdata:
+ kfree(alg_data);
+err_kzalloc:
platform_set_drvdata(pdev, NULL);
out:
return ret;
@@ -666,15 +690,16 @@ out:
static int __devexit i2c_pnx_remove(struct platform_device *pdev)
{
- struct i2c_pnx_data *i2c_pnx = platform_get_drvdata(pdev);
- struct i2c_adapter *adap = i2c_pnx->adapter;
- struct i2c_pnx_algo_data *alg_data = adap->algo_data;
-
- free_irq(alg_data->irq, i2c_pnx->adapter);
- i2c_del_adapter(adap);
- i2c_pnx->set_clock_stop(pdev);
- iounmap((void *)alg_data->ioaddr);
- release_mem_region(alg_data->base, I2C_PNX_REGION_SIZE);
+ struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev);
+ struct i2c_pnx_data *i2c_pnx = alg_data->i2c_pnx;
+
+ free_irq(i2c_pnx->irq, alg_data);
+ i2c_del_adapter(&alg_data->adapter);
+ clk_disable(alg_data->clk);
+ iounmap(alg_data->ioaddr);
+ release_mem_region(i2c_pnx->base, I2C_PNX_REGION_SIZE);
+ clk_put(alg_data->clk);
+ kfree(alg_data);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 365e0becaf12..e8148f58c207 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -33,6 +33,7 @@ struct acpi_smbus_cmi {
u8 cap_info:1;
u8 cap_read:1;
u8 cap_write:1;
+ struct smbus_methods_t *methods;
};
static const struct smbus_methods_t smbus_methods = {
@@ -41,8 +42,18 @@ static const struct smbus_methods_t smbus_methods = {
.mt_sbw = "_SBW",
};
+/* Some IBM BIOSes omit the leading underscore */
+static const struct smbus_methods_t ibm_smbus_methods = {
+ .mt_info = "SBI_",
+ .mt_sbr = "SBR_",
+ .mt_sbw = "SBW_",
+};
+
+/* The define below will go away in the final version */
+#define ACPI_SMBUS_IBM_HID "SMBUSIBM"
static const struct acpi_device_id acpi_smbus_cmi_ids[] = {
- {"SMBUS01", 0},
+ {"SMBUS01", (kernel_ulong_t)&smbus_methods},
+ {ACPI_SMBUS_IBM_HID, (kernel_ulong_t)&ibm_smbus_methods},
{"", 0}
};
@@ -150,11 +161,11 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
if (read_write == I2C_SMBUS_READ) {
protocol |= ACPI_SMBUS_PRTCL_READ;
- method = smbus_methods.mt_sbr;
+ method = smbus_cmi->methods->mt_sbr;
input.count = 3;
} else {
protocol |= ACPI_SMBUS_PRTCL_WRITE;
- method = smbus_methods.mt_sbw;
+ method = smbus_cmi->methods->mt_sbw;
input.count = 5;
}
@@ -290,13 +301,13 @@ static int acpi_smbus_cmi_add_cap(struct acpi_smbus_cmi *smbus_cmi,
union acpi_object *obj;
acpi_status status;
- if (!strcmp(name, smbus_methods.mt_info)) {
+ if (!strcmp(name, smbus_cmi->methods->mt_info)) {
status = acpi_evaluate_object(smbus_cmi->handle,
- smbus_methods.mt_info,
+ smbus_cmi->methods->mt_info,
NULL, &buffer);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Evaluating %s: %i",
- smbus_methods.mt_info, status));
+ smbus_cmi->methods->mt_info, status));
return -EIO;
}
@@ -319,9 +330,9 @@ static int acpi_smbus_cmi_add_cap(struct acpi_smbus_cmi *smbus_cmi,
kfree(buffer.pointer);
smbus_cmi->cap_info = 1;
- } else if (!strcmp(name, smbus_methods.mt_sbr))
+ } else if (!strcmp(name, smbus_cmi->methods->mt_sbr))
smbus_cmi->cap_read = 1;
- else if (!strcmp(name, smbus_methods.mt_sbw))
+ else if (!strcmp(name, smbus_cmi->methods->mt_sbw))
smbus_cmi->cap_write = 1;
else
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Unsupported CMI method: %s\n",
@@ -349,6 +360,7 @@ static acpi_status acpi_smbus_cmi_query_methods(acpi_handle handle, u32 level,
static int acpi_smbus_cmi_add(struct acpi_device *device)
{
struct acpi_smbus_cmi *smbus_cmi;
+ const struct acpi_device_id *id;
smbus_cmi = kzalloc(sizeof(struct acpi_smbus_cmi), GFP_KERNEL);
if (!smbus_cmi)
@@ -362,6 +374,11 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
smbus_cmi->cap_read = 0;
smbus_cmi->cap_write = 0;
+ for (id = acpi_smbus_cmi_ids; id->id[0]; id++)
+ if (!strcmp(id->id, acpi_device_hid(device)))
+ smbus_cmi->methods =
+ (struct smbus_methods_t *) id->driver_data;
+
acpi_walk_namespace(ACPI_TYPE_METHOD, smbus_cmi->handle, 1,
acpi_smbus_cmi_query_methods, NULL, smbus_cmi, NULL);
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 844569f7d8b7..55a71370c79b 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -369,7 +369,7 @@ static struct i2c_adapter sis5595_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis5595_ids[] __devinitdata = {
+static const struct pci_device_id sis5595_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 68cff7af7013..2309c7f1bde2 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -468,7 +468,7 @@ static struct i2c_adapter sis630_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis630_ids[] __devinitdata = {
+static const struct pci_device_id sis630_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
{ 0, }
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 1649963b00dc..d43d8f8943dd 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -245,7 +245,7 @@ static struct i2c_adapter sis96x_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis96x_ids[] = {
+static const struct pci_device_id sis96x_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index b1c050ff311d..19434f86c2cc 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -136,7 +136,7 @@ static const struct i2c_algorithm usb_algorithm = {
* Future Technology Devices International Ltd., later a pair was
* bought from EZPrototypes
*/
-static struct usb_device_id i2c_tiny_usb_table [] = {
+static const struct usb_device_id i2c_tiny_usb_table[] = {
{ USB_DEVICE(0x0403, 0xc631) }, /* FTDI */
{ USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */
{ } /* Terminating entry */
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 8b24f192103a..de78283bddbe 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,7 +89,7 @@ static struct i2c_adapter vt586b_adapter = {
};
-static struct pci_device_id vt586b_ids[] __devinitdata = {
+static const struct pci_device_id vt586b_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index e4b1543015af..d57292e5dae0 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -165,10 +165,10 @@ static int vt596_transaction(u8 size)
do {
msleep(1);
temp = inb_p(SMBHSTSTS);
- } while ((temp & 0x01) && (timeout++ < MAX_TIMEOUT));
+ } while ((temp & 0x01) && (++timeout < MAX_TIMEOUT));
/* If the SMBus is still busy, we give up */
- if (timeout >= MAX_TIMEOUT) {
+ if (timeout == MAX_TIMEOUT) {
result = -ETIMEDOUT;
dev_err(&vt596_adapter.dev, "SMBus timeout!\n");
}
@@ -444,7 +444,7 @@ release_region:
return error;
}
-static struct pci_device_id vt596_ids[] = {
+static const struct pci_device_id vt596_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3),
.driver_data = SMBBA1 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3),
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 0ac2f90ab840..d610e995bbfd 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -248,7 +248,7 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
NULL
};
-const static struct dev_pm_ops i2c_device_pm_ops = {
+static const struct dev_pm_ops i2c_device_pm_ops = {
.suspend = i2c_device_pm_suspend,
.resume = i2c_device_pm_resume,
};
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 87cef0c440ad..349a67bf1a36 100644
--- a/drivers/ide/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
@@ -56,8 +56,8 @@ static inline void auide_insw(unsigned long port, void *addr, u32 count)
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
- if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1,
- DDMA_FLAGS_NOIE)) {
+ if (!au1xxx_dbdma_put_dest(ahwif->rx_chan, virt_to_phys(addr),
+ count << 1, DDMA_FLAGS_NOIE)) {
printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
return;
}
@@ -74,8 +74,8 @@ static inline void auide_outsw(unsigned long port, void *addr, u32 count)
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
- if(!put_source_flags(ahwif->tx_chan, (void*)addr,
- count << 1, DDMA_FLAGS_NOIE)) {
+ if (!au1xxx_dbdma_put_source(ahwif->tx_chan, virt_to_phys(addr),
+ count << 1, DDMA_FLAGS_NOIE)) {
printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
return;
}
@@ -246,17 +246,14 @@ static int auide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd)
flags = DDMA_FLAGS_NOIE;
if (iswrite) {
- if(!put_source_flags(ahwif->tx_chan,
- (void*) sg_virt(sg),
- tc, flags)) {
+ if (!au1xxx_dbdma_put_source(ahwif->tx_chan,
+ sg_phys(sg), tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__func__, __LINE__);
}
- } else
- {
- if(!put_dest_flags(ahwif->rx_chan,
- (void*) sg_virt(sg),
- tc, flags)) {
+ } else {
+ if (!au1xxx_dbdma_put_dest(ahwif->rx_chan,
+ sg_phys(sg), tc, flags)) {
printk(KERN_ERR "%s failed %d\n",
__func__, __LINE__);
}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 6a0e62542167..b07232880ec9 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1365,7 +1365,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
* supported here, and not in the corresponding block interface. Our own
* ide-tape ioctls are supported on both interfaces.
*/
-static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
+static long do_idetape_chrdev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct ide_tape_obj *tape = file->private_data;
@@ -1420,6 +1420,16 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
}
}
+static long idetape_chrdev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+ lock_kernel();
+ ret = do_idetape_chrdev_ioctl(file, cmd, arg);
+ unlock_kernel();
+ return ret;
+}
+
/*
* Do a mode sense page 0 with block descriptor and if it succeeds set the tape
* block size with the reported value.
@@ -1888,7 +1898,7 @@ static const struct file_operations idetape_fops = {
.owner = THIS_MODULE,
.read = idetape_chrdev_read,
.write = idetape_chrdev_write,
- .ioctl = idetape_chrdev_ioctl,
+ .unlocked_ioctl = idetape_chrdev_ioctl,
.open = idetape_chrdev_open,
.release = idetape_chrdev_release,
};
diff --git a/drivers/ieee1394/pcilynx.c b/drivers/ieee1394/pcilynx.c
index 9555fd253865..bf47fee79808 100644
--- a/drivers/ieee1394/pcilynx.c
+++ b/drivers/ieee1394/pcilynx.c
@@ -1452,7 +1452,7 @@ static int __devinit add_card(struct pci_dev *dev,
PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
} else {
PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
- /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
+ /* FIXME: probably we should rewrite the max_rec, max_ROM(1394a),
* generation(1394a) and link_spd(1394a) field and recalculate
* the CRC */
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index fbdd73106000..cc9b5940fa97 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2083,7 +2083,7 @@ static int cma_get_port(struct rdma_id_private *id_priv)
static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
struct sockaddr *addr)
{
-#if defined(CONFIG_IPv6) || defined(CONFIG_IPV6_MODULE)
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct sockaddr_in6 *sin6;
if (addr->sa_family != AF_INET6)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 66b41351910a..d94388b81a40 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1371,15 +1371,8 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
tim.mac_addr = req->dst_mac;
tim.vlan_tag = ntohs(req->vlan_tag);
if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
- printk(KERN_ERR
- "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
- __func__,
- req->dst_mac[0],
- req->dst_mac[1],
- req->dst_mac[2],
- req->dst_mac[3],
- req->dst_mac[4],
- req->dst_mac[5]);
+ printk(KERN_ERR "%s bad dst mac %pM\n",
+ __func__, req->dst_mac);
goto reject;
}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 989555cee883..2a97c964b9ef 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1752,7 +1752,7 @@ int mlx4_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.send_cq)) {
+ if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
err = -ENOMEM;
*bad_wr = wr;
goto out;
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index d42565258fb7..cf8085bcbd6d 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -74,6 +74,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
struct mlx4_ib_dev *dev = to_mdev(pd->device);
struct mlx4_ib_srq *srq;
struct mlx4_wqe_srq_next_seg *next;
+ struct mlx4_wqe_data_seg *scatter;
int desc_size;
int buf_size;
int err;
@@ -149,6 +150,11 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
next = get_wqe(srq, i);
next->next_wqe_index =
cpu_to_be16((i + 1) & (srq->msrq.max - 1));
+
+ for (scatter = (void *) (next + 1);
+ (void *) scatter < (void *) next + desc_size;
+ ++scatter)
+ scatter->lkey = cpu_to_be32(MLX4_INVALID_LKEY);
}
err = mlx4_mtt_init(dev->dev, srq->buf.npages, srq->buf.page_shift,
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index dee6706038aa..258c639571b5 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -59,7 +59,8 @@ static void evdev_pass_event(struct evdev_client *client,
client->head &= EVDEV_BUFFER_SIZE - 1;
spin_unlock(&client->buffer_lock);
- kill_fasync(&client->fasync, SIGIO, POLL_IN);
+ if (event->type == EV_SYN)
+ kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
/*
diff --git a/drivers/input/gameport/emu10k1-gp.c b/drivers/input/gameport/emu10k1-gp.c
index b04930f7ea7d..7392992da424 100644
--- a/drivers/input/gameport/emu10k1-gp.c
+++ b/drivers/input/gameport/emu10k1-gp.c
@@ -46,7 +46,7 @@ struct emu {
int size;
};
-static struct pci_device_id emu_tbl[] = {
+static const struct pci_device_id emu_tbl[] = {
{ 0x1102, 0x7002, PCI_ANY_ID, PCI_ANY_ID }, /* SB Live gameport */
{ 0x1102, 0x7003, PCI_ANY_ID, PCI_ANY_ID }, /* Audigy gameport */
diff --git a/drivers/input/gameport/fm801-gp.c b/drivers/input/gameport/fm801-gp.c
index 8a1810f88b9e..14d3f3e208a2 100644
--- a/drivers/input/gameport/fm801-gp.c
+++ b/drivers/input/gameport/fm801-gp.c
@@ -140,7 +140,7 @@ static void __devexit fm801_gp_remove(struct pci_dev *pci)
}
}
-static struct pci_device_id fm801_gp_id_table[] = {
+static const struct pci_device_id fm801_gp_id_table[] = {
{ PCI_VENDOR_ID_FORTEMEDIA, PCI_DEVICE_ID_FM801_GP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0 }
};
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index ac11be08585e..7e18bcf05a66 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -11,6 +11,8 @@
* the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/ioport.h>
@@ -190,9 +192,8 @@ static int gameport_bind_driver(struct gameport *gameport, struct gameport_drive
error = device_bind_driver(&gameport->dev);
if (error) {
- printk(KERN_WARNING
- "gameport: device_bind_driver() failed "
- "for %s (%s) and %s, error: %d\n",
+ dev_warn(&gameport->dev,
+ "device_bind_driver() failed for %s (%s) and %s, error: %d\n",
gameport->phys, gameport->name,
drv->description, error);
drv->disconnect(gameport);
@@ -209,9 +210,9 @@ static void gameport_find_driver(struct gameport *gameport)
error = device_attach(&gameport->dev);
if (error < 0)
- printk(KERN_WARNING
- "gameport: device_attach() failed for %s (%s), error: %d\n",
- gameport->phys, gameport->name, error);
+ dev_warn(&gameport->dev,
+ "device_attach() failed for %s (%s), error: %d\n",
+ gameport->phys, gameport->name, error);
}
@@ -262,17 +263,14 @@ static int gameport_queue_event(void *object, struct module *owner,
event = kmalloc(sizeof(struct gameport_event), GFP_ATOMIC);
if (!event) {
- printk(KERN_ERR
- "gameport: Not enough memory to queue event %d\n",
- event_type);
+ pr_err("Not enough memory to queue event %d\n", event_type);
retval = -ENOMEM;
goto out;
}
if (!try_module_get(owner)) {
- printk(KERN_WARNING
- "gameport: Can't get module reference, dropping event %d\n",
- event_type);
+ pr_warning("Can't get module reference, dropping event %d\n",
+ event_type);
kfree(event);
retval = -EINVAL;
goto out;
@@ -298,14 +296,12 @@ static void gameport_free_event(struct gameport_event *event)
static void gameport_remove_duplicate_events(struct gameport_event *event)
{
- struct list_head *node, *next;
- struct gameport_event *e;
+ struct gameport_event *e, *next;
unsigned long flags;
spin_lock_irqsave(&gameport_event_lock, flags);
- list_for_each_safe(node, next, &gameport_event_list) {
- e = list_entry(node, struct gameport_event, node);
+ list_for_each_entry_safe(e, next, &gameport_event_list, node) {
if (event->object == e->object) {
/*
* If this event is of different type we should not
@@ -315,7 +311,7 @@ static void gameport_remove_duplicate_events(struct gameport_event *event)
if (event->type != e->type)
break;
- list_del_init(node);
+ list_del_init(&e->node);
gameport_free_event(e);
}
}
@@ -325,23 +321,18 @@ static void gameport_remove_duplicate_events(struct gameport_event *event)
static struct gameport_event *gameport_get_event(void)
{
- struct gameport_event *event;
- struct list_head *node;
+ struct gameport_event *event = NULL;
unsigned long flags;
spin_lock_irqsave(&gameport_event_lock, flags);
- if (list_empty(&gameport_event_list)) {
- spin_unlock_irqrestore(&gameport_event_lock, flags);
- return NULL;
+ if (!list_empty(&gameport_event_list)) {
+ event = list_first_entry(&gameport_event_list,
+ struct gameport_event, node);
+ list_del_init(&event->node);
}
- node = gameport_event_list.next;
- event = list_entry(node, struct gameport_event, node);
- list_del_init(node);
-
spin_unlock_irqrestore(&gameport_event_lock, flags);
-
return event;
}
@@ -360,16 +351,14 @@ static void gameport_handle_event(void)
if ((event = gameport_get_event())) {
switch (event->type) {
- case GAMEPORT_REGISTER_PORT:
- gameport_add_port(event->object);
- break;
- case GAMEPORT_ATTACH_DRIVER:
- gameport_attach_driver(event->object);
- break;
+ case GAMEPORT_REGISTER_PORT:
+ gameport_add_port(event->object);
+ break;
- default:
- break;
+ case GAMEPORT_ATTACH_DRIVER:
+ gameport_attach_driver(event->object);
+ break;
}
gameport_remove_duplicate_events(event);
@@ -385,16 +374,14 @@ static void gameport_handle_event(void)
*/
static void gameport_remove_pending_events(void *object)
{
- struct list_head *node, *next;
- struct gameport_event *event;
+ struct gameport_event *event, *next;
unsigned long flags;
spin_lock_irqsave(&gameport_event_lock, flags);
- list_for_each_safe(node, next, &gameport_event_list) {
- event = list_entry(node, struct gameport_event, node);
+ list_for_each_entry_safe(event, next, &gameport_event_list, node) {
if (event->object == object) {
- list_del_init(node);
+ list_del_init(&event->node);
gameport_free_event(event);
}
}
@@ -441,7 +428,6 @@ static int gameport_thread(void *nothing)
kthread_should_stop() || !list_empty(&gameport_event_list));
} while (!kthread_should_stop());
- printk(KERN_DEBUG "gameport: kgameportd exiting\n");
return 0;
}
@@ -453,6 +439,7 @@ static int gameport_thread(void *nothing)
static ssize_t gameport_show_description(struct device *dev, struct device_attribute *attr, char *buf)
{
struct gameport *gameport = to_gameport_port(dev);
+
return sprintf(buf, "%s\n", gameport->name);
}
@@ -521,7 +508,8 @@ static void gameport_init_port(struct gameport *gameport)
mutex_init(&gameport->drv_mutex);
device_initialize(&gameport->dev);
- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
+ dev_set_name(&gameport->dev, "gameport%lu",
+ (unsigned long)atomic_inc_return(&gameport_no) - 1);
gameport->dev.bus = &gameport_bus;
gameport->dev.release = gameport_release_port;
if (gameport->parent)
@@ -550,19 +538,17 @@ static void gameport_add_port(struct gameport *gameport)
list_add_tail(&gameport->node, &gameport_list);
if (gameport->io)
- printk(KERN_INFO "gameport: %s is %s, io %#x, speed %dkHz\n",
- gameport->name, gameport->phys, gameport->io, gameport->speed);
+ dev_info(&gameport->dev, "%s is %s, io %#x, speed %dkHz\n",
+ gameport->name, gameport->phys, gameport->io, gameport->speed);
else
- printk(KERN_INFO "gameport: %s is %s, speed %dkHz\n",
+ dev_info(&gameport->dev, "%s is %s, speed %dkHz\n",
gameport->name, gameport->phys, gameport->speed);
error = device_add(&gameport->dev);
if (error)
- printk(KERN_ERR
- "gameport: device_add() failed for %s (%s), error: %d\n",
+ dev_err(&gameport->dev,
+ "device_add() failed for %s (%s), error: %d\n",
gameport->phys, gameport->name, error);
- else
- gameport->registered = 1;
}
/*
@@ -584,10 +570,8 @@ static void gameport_destroy_port(struct gameport *gameport)
gameport->parent = NULL;
}
- if (gameport->registered) {
+ if (device_is_registered(&gameport->dev))
device_del(&gameport->dev);
- gameport->registered = 0;
- }
list_del_init(&gameport->node);
@@ -705,8 +689,7 @@ static void gameport_attach_driver(struct gameport_driver *drv)
error = driver_attach(&drv->driver);
if (error)
- printk(KERN_ERR
- "gameport: driver_attach() failed for %s, error: %d\n",
+ pr_err("driver_attach() failed for %s, error: %d\n",
drv->driver.name, error);
}
@@ -727,8 +710,7 @@ int __gameport_register_driver(struct gameport_driver *drv, struct module *owner
error = driver_register(&drv->driver);
if (error) {
- printk(KERN_ERR
- "gameport: driver_register() failed for %s, error: %d\n",
+ pr_err("driver_register() failed for %s, error: %d\n",
drv->driver.name, error);
return error;
}
@@ -828,7 +810,7 @@ static int __init gameport_init(void)
error = bus_register(&gameport_bus);
if (error) {
- printk(KERN_ERR "gameport: failed to register gameport bus, error: %d\n", error);
+ pr_err("failed to register gameport bus, error: %d\n", error);
return error;
}
@@ -836,7 +818,7 @@ static int __init gameport_init(void)
if (IS_ERR(gameport_task)) {
bus_unregister(&gameport_bus);
error = PTR_ERR(gameport_task);
- printk(KERN_ERR "gameport: Failed to start kgameportd, error: %d\n", error);
+ pr_err("Failed to start kgameportd, error: %d\n", error);
return error;
}
diff --git a/drivers/input/gameport/ns558.c b/drivers/input/gameport/ns558.c
index db556b71ddda..7c217848613e 100644
--- a/drivers/input/gameport/ns558.c
+++ b/drivers/input/gameport/ns558.c
@@ -166,7 +166,7 @@ static int ns558_isa_probe(int io)
#ifdef CONFIG_PNP
-static struct pnp_device_id pnp_devids[] = {
+static const struct pnp_device_id pnp_devids[] = {
{ .id = "@P@0001", .driver_data = 0 }, /* ALS 100 */
{ .id = "@P@0020", .driver_data = 0 }, /* ALS 200 */
{ .id = "@P@1001", .driver_data = 0 }, /* ALS 100+ */
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ab060710688f..5d95aef035c0 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -613,12 +613,12 @@ static int input_default_setkeycode(struct input_dev *dev,
}
}
- clear_bit(old_keycode, dev->keybit);
- set_bit(keycode, dev->keybit);
+ __clear_bit(old_keycode, dev->keybit);
+ __set_bit(keycode, dev->keybit);
for (i = 0; i < dev->keycodemax; i++) {
if (input_fetch_keycode(dev, i) == old_keycode) {
- set_bit(old_keycode, dev->keybit);
+ __set_bit(old_keycode, dev->keybit);
break; /* Setting the bit twice is useless, so break */
}
}
@@ -676,6 +676,9 @@ int input_set_keycode(struct input_dev *dev, int scancode, int keycode)
if (retval)
goto out;
+ /* Make sure KEY_RESERVED did not get enabled. */
+ __clear_bit(KEY_RESERVED, dev->keybit);
+
/*
* Simulate keyup event if keycode is not present
* in the keymap anymore
@@ -1494,6 +1497,25 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
}
EXPORT_SYMBOL(input_set_capability);
+#define INPUT_CLEANSE_BITMASK(dev, type, bits) \
+ do { \
+ if (!test_bit(EV_##type, dev->evbit)) \
+ memset(dev->bits##bit, 0, \
+ sizeof(dev->bits##bit)); \
+ } while (0)
+
+static void input_cleanse_bitmasks(struct input_dev *dev)
+{
+ INPUT_CLEANSE_BITMASK(dev, KEY, key);
+ INPUT_CLEANSE_BITMASK(dev, REL, rel);
+ INPUT_CLEANSE_BITMASK(dev, ABS, abs);
+ INPUT_CLEANSE_BITMASK(dev, MSC, msc);
+ INPUT_CLEANSE_BITMASK(dev, LED, led);
+ INPUT_CLEANSE_BITMASK(dev, SND, snd);
+ INPUT_CLEANSE_BITMASK(dev, FF, ff);
+ INPUT_CLEANSE_BITMASK(dev, SW, sw);
+}
+
/**
* input_register_device - register device with input core
* @dev: device to be registered
@@ -1513,13 +1535,19 @@ int input_register_device(struct input_dev *dev)
const char *path;
int error;
+ /* Every input device generates EV_SYN/SYN_REPORT events. */
__set_bit(EV_SYN, dev->evbit);
+ /* KEY_RESERVED is not supposed to be transmitted to userspace. */
+ __clear_bit(KEY_RESERVED, dev->keybit);
+
+ /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
+ input_cleanse_bitmasks(dev);
+
/*
* If delay and period are pre-set by the driver, then autorepeating
* is handled by the driver itself and we don't do it in input.c.
*/
-
init_timer(&dev->timer);
if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) {
dev->timer.data = (long) dev;
@@ -1635,6 +1663,21 @@ int input_register_handler(struct input_handler *handler)
}
EXPORT_SYMBOL(input_register_handler);
+#ifdef CONFIG_KGDB_KDB
+/* input_db_clear_keys - Clear any keyboards if they have a call back,
+ * after returning from the kernel debugger
+ */
+void input_dbg_clear_keys(void)
+{
+ struct input_handler *handler;
+
+ list_for_each_entry(handler, &input_handler_list, node)
+ if (handler->dbg_clear_keys)
+ handler->dbg_clear_keys();
+}
+EXPORT_SYMBOL_GPL(input_dbg_clear_keys);
+#endif
+
/**
* input_unregister_handler - unregisters an input handler
* @handler: handler to be unregistered
diff --git a/drivers/input/joystick/gf2k.c b/drivers/input/joystick/gf2k.c
index 67c207f5b1a1..45ac70eae0aa 100644
--- a/drivers/input/joystick/gf2k.c
+++ b/drivers/input/joystick/gf2k.c
@@ -277,7 +277,7 @@ static int gf2k_connect(struct gameport *gameport, struct gameport_driver *drv)
}
#ifdef RESET_WORKS
- if ((gf2k->id != (GB(19,2,0) | GB(15,3,2) | GB(12,3,5))) ||
+ if ((gf2k->id != (GB(19,2,0) | GB(15,3,2) | GB(12,3,5))) &&
(gf2k->id != (GB(31,2,0) | GB(27,3,2) | GB(24,3,5)))) {
err = -ENODEV;
goto fail2;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 482cb1204e43..66be6901619d 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -86,9 +86,8 @@
/* xbox d-pads should map to buttons, as is required for DDR pads
but we map them to axes when possible to simplify things */
-#define MAP_DPAD_TO_BUTTONS 0
-#define MAP_DPAD_TO_AXES 1
-#define MAP_DPAD_UNKNOWN 2
+#define MAP_DPAD_TO_BUTTONS (1 << 0)
+#define MAP_TRIGGERS_TO_BUTTONS (1 << 1)
#define XTYPE_XBOX 0
#define XTYPE_XBOX360 1
@@ -99,57 +98,61 @@ static int dpad_to_buttons;
module_param(dpad_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads");
+static int triggers_to_buttons;
+module_param(triggers_to_buttons, bool, S_IRUGO);
+MODULE_PARM_DESC(triggers_to_buttons, "Map triggers to buttons rather than axes for unknown pads");
+
static const struct xpad_device {
u16 idVendor;
u16 idProduct;
char *name;
- u8 dpad_mapping;
+ u8 mapping;
u8 xtype;
} xpad_device[] = {
- { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x045e, 0x0287, "Microsoft Xbox Controller S", MAP_DPAD_TO_AXES, XTYPE_XBOX },
+ { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX },
+ { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX },
+ { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX },
+ { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX },
{ 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W },
{ 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
- { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x046d, 0xc242, "Logitech Chillstream Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
- { 0x046d, 0xca84, "Logitech Xbox Cordless Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x05fd, 0x1007, "Mad Catz Controller (unverified)", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0738, 0x4516, "Mad Catz Control Pad", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0738, 0x4522, "Mad Catz LumiCON", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0738, 0x4526, "Mad Catz Control Pad Pro", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0738, 0x4536, "Mad Catz MicroCON", MAP_DPAD_TO_AXES, XTYPE_XBOX },
+ { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+ { 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 },
+ { 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX },
+ { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX },
+ { 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX },
+ { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX },
+ { 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX },
+ { 0x0738, 0x4522, "Mad Catz LumiCON", 0, XTYPE_XBOX },
+ { 0x0738, 0x4526, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX },
+ { 0x0738, 0x4536, "Mad Catz MicroCON", 0, XTYPE_XBOX },
{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
- { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
- { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
+ { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
+ { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
- { 0x0c12, 0x8802, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0c12, 0x8810, "Zeroplus Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0e4c, 0x1097, "Radica Gamester Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0e4c, 0x2390, "Radica Games Jtech Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0e6f, 0x0005, "Eclipse wireless Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0e6f, 0x0006, "Edge wireless Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0e6f, 0x0006, "Pelican 'TSZ' Wired Xbox 360 Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
- { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0f30, 0x0202, "Joytech Advanced Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
+ { 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX },
+ { 0x0c12, 0x8810, "Zeroplus Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX },
+ { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX },
+ { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX },
+ { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX },
+ { 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX },
+ { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX },
+ { 0x0e6f, 0x0006, "Pelican 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX },
+ { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
+ { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
+ { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
{ 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
- { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
+ { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 },
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
- { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
- { 0x045e, 0x028e, "Microsoft X-Box 360 pad", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
+ { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
+ { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
- { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_DPAD_TO_AXES, XTYPE_XBOX360 },
- { 0xffff, 0xffff, "Chinese-made Xbox Controller", MAP_DPAD_TO_AXES, XTYPE_XBOX },
- { 0x0000, 0x0000, "Generic X-Box pad", MAP_DPAD_UNKNOWN, XTYPE_UNKNOWN }
+ { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX },
+ { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN }
};
/* buttons shared with xbox and xbox360 */
@@ -165,13 +168,20 @@ static const signed short xpad_btn[] = {
-1 /* terminating entry */
};
-/* only used if MAP_DPAD_TO_BUTTONS */
+/* used when dpad is mapped to nuttons */
static const signed short xpad_btn_pad[] = {
BTN_LEFT, BTN_RIGHT, /* d-pad left, right */
BTN_0, BTN_1, /* d-pad up, down (XXX names??) */
-1 /* terminating entry */
};
+/* used when triggers are mapped to buttons */
+static const signed short xpad_btn_triggers[] = {
+ BTN_TL2, BTN_TR2, /* triggers left/right */
+ -1
+};
+
+
static const signed short xpad360_btn[] = { /* buttons for x360 controller */
BTN_TL, BTN_TR, /* Button LB/RB */
BTN_MODE, /* The big X button */
@@ -181,16 +191,21 @@ static const signed short xpad360_btn[] = { /* buttons for x360 controller */
static const signed short xpad_abs[] = {
ABS_X, ABS_Y, /* left stick */
ABS_RX, ABS_RY, /* right stick */
- ABS_Z, ABS_RZ, /* triggers left/right */
-1 /* terminating entry */
};
-/* only used if MAP_DPAD_TO_AXES */
+/* used when dpad is mapped to axes */
static const signed short xpad_abs_pad[] = {
ABS_HAT0X, ABS_HAT0Y, /* d-pad axes */
-1 /* terminating entry */
};
+/* used when triggers are mapped to axes */
+static const signed short xpad_abs_triggers[] = {
+ ABS_Z, ABS_RZ, /* triggers left/right */
+ -1
+};
+
/* Xbox 360 has a vendor-specific class, so we cannot match it with only
* USB_INTERFACE_INFO (also specifically refused by USB subsystem), so we
* match against vendor id as well. Wired Xbox 360 devices have protocol 1,
@@ -246,7 +261,7 @@ struct usb_xpad {
char phys[64]; /* physical device path */
- int dpad_mapping; /* map d-pad to buttons or to axes */
+ int mapping; /* map d-pad to buttons or to axes */
int xtype; /* type of xbox device */
};
@@ -277,20 +292,25 @@ static void xpad_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *d
~(__s16) le16_to_cpup((__le16 *)(data + 18)));
/* triggers left/right */
- input_report_abs(dev, ABS_Z, data[10]);
- input_report_abs(dev, ABS_RZ, data[11]);
+ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
+ input_report_key(dev, BTN_TL2, data[10]);
+ input_report_key(dev, BTN_TR2, data[11]);
+ } else {
+ input_report_abs(dev, ABS_Z, data[10]);
+ input_report_abs(dev, ABS_RZ, data[11]);
+ }
/* digital pad */
- if (xpad->dpad_mapping == MAP_DPAD_TO_AXES) {
- input_report_abs(dev, ABS_HAT0X,
- !!(data[2] & 0x08) - !!(data[2] & 0x04));
- input_report_abs(dev, ABS_HAT0Y,
- !!(data[2] & 0x02) - !!(data[2] & 0x01));
- } else /* xpad->dpad_mapping == MAP_DPAD_TO_BUTTONS */ {
+ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
input_report_key(dev, BTN_LEFT, data[2] & 0x04);
input_report_key(dev, BTN_RIGHT, data[2] & 0x08);
input_report_key(dev, BTN_0, data[2] & 0x01); /* up */
input_report_key(dev, BTN_1, data[2] & 0x02); /* down */
+ } else {
+ input_report_abs(dev, ABS_HAT0X,
+ !!(data[2] & 0x08) - !!(data[2] & 0x04));
+ input_report_abs(dev, ABS_HAT0Y,
+ !!(data[2] & 0x02) - !!(data[2] & 0x01));
}
/* start/back buttons and stick press left/right */
@@ -328,17 +348,17 @@ static void xpad360_process_packet(struct usb_xpad *xpad,
struct input_dev *dev = xpad->dev;
/* digital pad */
- if (xpad->dpad_mapping == MAP_DPAD_TO_AXES) {
- input_report_abs(dev, ABS_HAT0X,
- !!(data[2] & 0x08) - !!(data[2] & 0x04));
- input_report_abs(dev, ABS_HAT0Y,
- !!(data[2] & 0x02) - !!(data[2] & 0x01));
- } else if (xpad->dpad_mapping == MAP_DPAD_TO_BUTTONS) {
+ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
/* dpad as buttons (right, left, down, up) */
input_report_key(dev, BTN_LEFT, data[2] & 0x04);
input_report_key(dev, BTN_RIGHT, data[2] & 0x08);
input_report_key(dev, BTN_0, data[2] & 0x01); /* up */
input_report_key(dev, BTN_1, data[2] & 0x02); /* down */
+ } else {
+ input_report_abs(dev, ABS_HAT0X,
+ !!(data[2] & 0x08) - !!(data[2] & 0x04));
+ input_report_abs(dev, ABS_HAT0Y,
+ !!(data[2] & 0x02) - !!(data[2] & 0x01));
}
/* start/back buttons */
@@ -371,8 +391,13 @@ static void xpad360_process_packet(struct usb_xpad *xpad,
~(__s16) le16_to_cpup((__le16 *)(data + 12)));
/* triggers left/right */
- input_report_abs(dev, ABS_Z, data[4]);
- input_report_abs(dev, ABS_RZ, data[5]);
+ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
+ input_report_key(dev, BTN_TL2, data[4]);
+ input_report_key(dev, BTN_TR2, data[5]);
+ } else {
+ input_report_abs(dev, ABS_Z, data[4]);
+ input_report_abs(dev, ABS_RZ, data[5]);
+ }
input_sync(dev);
}
@@ -446,7 +471,7 @@ static void xpad_irq_in(struct urb *urb)
}
exit:
- retval = usb_submit_urb (urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
err ("%s - usb_submit_urb failed with result %d",
__func__, retval);
@@ -571,7 +596,7 @@ static int xpad_play_effect(struct input_dev *dev, void *data,
xpad->odata[6] = 0x00;
xpad->odata[7] = 0x00;
xpad->irq_out->transfer_buffer_length = 8;
- usb_submit_urb(xpad->irq_out, GFP_KERNEL);
+ usb_submit_urb(xpad->irq_out, GFP_ATOMIC);
}
return 0;
@@ -712,11 +737,11 @@ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs)
input_set_abs_params(input_dev, abs, -32768, 32767, 16, 128);
break;
case ABS_Z:
- case ABS_RZ: /* the triggers */
+ case ABS_RZ: /* the triggers (if mapped to axes) */
input_set_abs_params(input_dev, abs, 0, 255, 0, 0);
break;
case ABS_HAT0X:
- case ABS_HAT0Y: /* the d-pad (only if MAP_DPAD_TO_AXES) */
+ case ABS_HAT0Y: /* the d-pad (only if dpad is mapped to axes */
input_set_abs_params(input_dev, abs, -1, 1, 0, 0);
break;
}
@@ -752,10 +777,9 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
goto fail2;
xpad->udev = udev;
- xpad->dpad_mapping = xpad_device[i].dpad_mapping;
+ xpad->mapping = xpad_device[i].mapping;
xpad->xtype = xpad_device[i].xtype;
- if (xpad->dpad_mapping == MAP_DPAD_UNKNOWN)
- xpad->dpad_mapping = !dpad_to_buttons;
+
if (xpad->xtype == XTYPE_UNKNOWN) {
if (intf->cur_altsetting->desc.bInterfaceClass == USB_CLASS_VENDOR_SPEC) {
if (intf->cur_altsetting->desc.bInterfaceProtocol == 129)
@@ -764,7 +788,13 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
xpad->xtype = XTYPE_XBOX360;
} else
xpad->xtype = XTYPE_XBOX;
+
+ if (dpad_to_buttons)
+ xpad->mapping |= MAP_DPAD_TO_BUTTONS;
+ if (triggers_to_buttons)
+ xpad->mapping |= MAP_TRIGGERS_TO_BUTTONS;
}
+
xpad->dev = input_dev;
usb_make_path(udev, xpad->phys, sizeof(xpad->phys));
strlcat(xpad->phys, "/input0", sizeof(xpad->phys));
@@ -781,25 +811,37 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- /* set up buttons */
+ /* set up standard buttons and axes */
for (i = 0; xpad_common_btn[i] >= 0; i++)
- set_bit(xpad_common_btn[i], input_dev->keybit);
- if ((xpad->xtype == XTYPE_XBOX360) || (xpad->xtype == XTYPE_XBOX360W))
- for (i = 0; xpad360_btn[i] >= 0; i++)
- set_bit(xpad360_btn[i], input_dev->keybit);
- else
- for (i = 0; xpad_btn[i] >= 0; i++)
- set_bit(xpad_btn[i], input_dev->keybit);
- if (xpad->dpad_mapping == MAP_DPAD_TO_BUTTONS)
- for (i = 0; xpad_btn_pad[i] >= 0; i++)
- set_bit(xpad_btn_pad[i], input_dev->keybit);
+ __set_bit(xpad_common_btn[i], input_dev->keybit);
- /* set up axes */
for (i = 0; xpad_abs[i] >= 0; i++)
xpad_set_up_abs(input_dev, xpad_abs[i]);
- if (xpad->dpad_mapping == MAP_DPAD_TO_AXES)
+
+ /* Now set up model-specific ones */
+ if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W) {
+ for (i = 0; xpad360_btn[i] >= 0; i++)
+ __set_bit(xpad360_btn[i], input_dev->keybit);
+ } else {
+ for (i = 0; xpad_btn[i] >= 0; i++)
+ __set_bit(xpad_btn[i], input_dev->keybit);
+ }
+
+ if (xpad->mapping & MAP_DPAD_TO_BUTTONS) {
+ for (i = 0; xpad_btn_pad[i] >= 0; i++)
+ __set_bit(xpad_btn_pad[i], input_dev->keybit);
+ } else {
for (i = 0; xpad_abs_pad[i] >= 0; i++)
xpad_set_up_abs(input_dev, xpad_abs_pad[i]);
+ }
+
+ if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) {
+ for (i = 0; xpad_btn_triggers[i] >= 0; i++)
+ __set_bit(xpad_btn_triggers[i], input_dev->keybit);
+ } else {
+ for (i = 0; xpad_abs_triggers[i] >= 0; i++)
+ xpad_set_up_abs(input_dev, xpad_abs_triggers[i]);
+ }
error = xpad_init_output(intf, xpad);
if (error)
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 1f5e2ce327d6..326875be192e 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -40,26 +40,26 @@ module_param_named(set, atkbd_set, int, 0);
MODULE_PARM_DESC(set, "Select keyboard code set (2 = default, 3 = PS/2 native)");
#if defined(__i386__) || defined(__x86_64__) || defined(__hppa__)
-static int atkbd_reset;
+static bool atkbd_reset;
#else
-static int atkbd_reset = 1;
+static bool atkbd_reset = true;
#endif
module_param_named(reset, atkbd_reset, bool, 0);
MODULE_PARM_DESC(reset, "Reset keyboard during initialization");
-static int atkbd_softrepeat;
+static bool atkbd_softrepeat;
module_param_named(softrepeat, atkbd_softrepeat, bool, 0);
MODULE_PARM_DESC(softrepeat, "Use software keyboard repeat");
-static int atkbd_softraw = 1;
+static bool atkbd_softraw = true;
module_param_named(softraw, atkbd_softraw, bool, 0);
MODULE_PARM_DESC(softraw, "Use software generated rawmode");
-static int atkbd_scroll;
+static bool atkbd_scroll;
module_param_named(scroll, atkbd_scroll, bool, 0);
MODULE_PARM_DESC(scroll, "Enable scroll-wheel on MS Office and similar keyboards");
-static int atkbd_extra;
+static bool atkbd_extra;
module_param_named(extra, atkbd_extra, bool, 0);
MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards");
@@ -206,18 +206,18 @@ struct atkbd {
unsigned short keycode[ATKBD_KEYMAP_SIZE];
DECLARE_BITMAP(force_release_mask, ATKBD_KEYMAP_SIZE);
unsigned char set;
- unsigned char translated;
- unsigned char extra;
- unsigned char write;
- unsigned char softrepeat;
- unsigned char softraw;
- unsigned char scroll;
- unsigned char enabled;
+ bool translated;
+ bool extra;
+ bool write;
+ bool softrepeat;
+ bool softraw;
+ bool scroll;
+ bool enabled;
/* Accessed only from interrupt */
unsigned char emul;
- unsigned char resend;
- unsigned char release;
+ bool resend;
+ bool release;
unsigned long xl_bit;
unsigned int last;
unsigned long time;
@@ -225,8 +225,10 @@ struct atkbd {
struct delayed_work event_work;
unsigned long event_jiffies;
- struct mutex event_mutex;
unsigned long event_mask;
+
+ /* Serializes reconnect(), attr->set() and event work */
+ struct mutex mutex;
};
/*
@@ -299,18 +301,18 @@ static const unsigned int xl_table[] = {
* Checks if we should mangle the scancode to extract 'release' bit
* in translated mode.
*/
-static int atkbd_need_xlate(unsigned long xl_bit, unsigned char code)
+static bool atkbd_need_xlate(unsigned long xl_bit, unsigned char code)
{
int i;
if (code == ATKBD_RET_EMUL0 || code == ATKBD_RET_EMUL1)
- return 0;
+ return false;
for (i = 0; i < ARRAY_SIZE(xl_table); i++)
if (code == xl_table[i])
return test_bit(i, &xl_bit);
- return 1;
+ return true;
}
/*
@@ -357,7 +359,7 @@ static unsigned int atkbd_compat_scancode(struct atkbd *atkbd, unsigned int code
*/
static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
- unsigned int flags)
+ unsigned int flags)
{
struct atkbd *atkbd = serio_get_drvdata(serio);
struct input_dev *dev = atkbd->dev;
@@ -366,20 +368,18 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
int value;
unsigned short keycode;
-#ifdef ATKBD_DEBUG
- printk(KERN_DEBUG "atkbd.c: Received %02x flags %02x\n", data, flags);
-#endif
+ dev_dbg(&serio->dev, "Received %02x flags %02x\n", data, flags);
#if !defined(__i386__) && !defined (__x86_64__)
if ((flags & (SERIO_FRAME | SERIO_PARITY)) && (~flags & SERIO_TIMEOUT) && !atkbd->resend && atkbd->write) {
- printk(KERN_WARNING "atkbd.c: frame/parity error: %02x\n", flags);
+ dev_warn(&serio->dev, "Frame/parity error: %02x\n", flags);
serio_write(serio, ATKBD_CMD_RESEND);
- atkbd->resend = 1;
+ atkbd->resend = true;
goto out;
}
if (!flags && data == ATKBD_RET_ACK)
- atkbd->resend = 0;
+ atkbd->resend = false;
#endif
if (unlikely(atkbd->ps2dev.flags & PS2_FLAG_ACK))
@@ -410,32 +410,32 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
}
switch (code) {
- case ATKBD_RET_BAT:
- atkbd->enabled = 0;
- serio_reconnect(atkbd->ps2dev.serio);
- goto out;
- case ATKBD_RET_EMUL0:
- atkbd->emul = 1;
- goto out;
- case ATKBD_RET_EMUL1:
- atkbd->emul = 2;
- goto out;
- case ATKBD_RET_RELEASE:
- atkbd->release = 1;
- goto out;
- case ATKBD_RET_ACK:
- case ATKBD_RET_NAK:
- if (printk_ratelimit())
- printk(KERN_WARNING "atkbd.c: Spurious %s on %s. "
- "Some program might be trying access hardware directly.\n",
- data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys);
- goto out;
- case ATKBD_RET_ERR:
- atkbd->err_count++;
-#ifdef ATKBD_DEBUG
- printk(KERN_DEBUG "atkbd.c: Keyboard on %s reports too many keys pressed.\n", serio->phys);
-#endif
- goto out;
+ case ATKBD_RET_BAT:
+ atkbd->enabled = false;
+ serio_reconnect(atkbd->ps2dev.serio);
+ goto out;
+ case ATKBD_RET_EMUL0:
+ atkbd->emul = 1;
+ goto out;
+ case ATKBD_RET_EMUL1:
+ atkbd->emul = 2;
+ goto out;
+ case ATKBD_RET_RELEASE:
+ atkbd->release = true;
+ goto out;
+ case ATKBD_RET_ACK:
+ case ATKBD_RET_NAK:
+ if (printk_ratelimit())
+ dev_warn(&serio->dev,
+ "Spurious %s on %s. "
+ "Some program might be trying access hardware directly.\n",
+ data == ATKBD_RET_ACK ? "ACK" : "NAK", serio->phys);
+ goto out;
+ case ATKBD_RET_ERR:
+ atkbd->err_count++;
+ dev_dbg(&serio->dev, "Keyboard on %s reports too many keys pressed.\n",
+ serio->phys);
+ goto out;
}
code = atkbd_compat_scancode(atkbd, code);
@@ -449,71 +449,72 @@ static irqreturn_t atkbd_interrupt(struct serio *serio, unsigned char data,
input_event(dev, EV_MSC, MSC_SCAN, code);
switch (keycode) {
- case ATKBD_KEY_NULL:
- break;
- case ATKBD_KEY_UNKNOWN:
- printk(KERN_WARNING
- "atkbd.c: Unknown key %s (%s set %d, code %#x on %s).\n",
- atkbd->release ? "released" : "pressed",
- atkbd->translated ? "translated" : "raw",
- atkbd->set, code, serio->phys);
- printk(KERN_WARNING
- "atkbd.c: Use 'setkeycodes %s%02x <keycode>' to make it known.\n",
- code & 0x80 ? "e0" : "", code & 0x7f);
- input_sync(dev);
- break;
- case ATKBD_SCR_1:
- scroll = 1 - atkbd->release * 2;
- break;
- case ATKBD_SCR_2:
- scroll = 2 - atkbd->release * 4;
- break;
- case ATKBD_SCR_4:
- scroll = 4 - atkbd->release * 8;
- break;
- case ATKBD_SCR_8:
- scroll = 8 - atkbd->release * 16;
- break;
- case ATKBD_SCR_CLICK:
- click = !atkbd->release;
- break;
- case ATKBD_SCR_LEFT:
- hscroll = -1;
- break;
- case ATKBD_SCR_RIGHT:
- hscroll = 1;
- break;
- default:
- if (atkbd->release) {
- value = 0;
- atkbd->last = 0;
- } else if (!atkbd->softrepeat && test_bit(keycode, dev->key)) {
- /* Workaround Toshiba laptop multiple keypress */
- value = time_before(jiffies, atkbd->time) && atkbd->last == code ? 1 : 2;
- } else {
- value = 1;
- atkbd->last = code;
- atkbd->time = jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]) / 2;
- }
-
- input_event(dev, EV_KEY, keycode, value);
- input_sync(dev);
+ case ATKBD_KEY_NULL:
+ break;
+ case ATKBD_KEY_UNKNOWN:
+ dev_warn(&serio->dev,
+ "Unknown key %s (%s set %d, code %#x on %s).\n",
+ atkbd->release ? "released" : "pressed",
+ atkbd->translated ? "translated" : "raw",
+ atkbd->set, code, serio->phys);
+ dev_warn(&serio->dev,
+ "Use 'setkeycodes %s%02x <keycode>' to make it known.\n",
+ code & 0x80 ? "e0" : "", code & 0x7f);
+ input_sync(dev);
+ break;
+ case ATKBD_SCR_1:
+ scroll = 1;
+ break;
+ case ATKBD_SCR_2:
+ scroll = 2;
+ break;
+ case ATKBD_SCR_4:
+ scroll = 4;
+ break;
+ case ATKBD_SCR_8:
+ scroll = 8;
+ break;
+ case ATKBD_SCR_CLICK:
+ click = !atkbd->release;
+ break;
+ case ATKBD_SCR_LEFT:
+ hscroll = -1;
+ break;
+ case ATKBD_SCR_RIGHT:
+ hscroll = 1;
+ break;
+ default:
+ if (atkbd->release) {
+ value = 0;
+ atkbd->last = 0;
+ } else if (!atkbd->softrepeat && test_bit(keycode, dev->key)) {
+ /* Workaround Toshiba laptop multiple keypress */
+ value = time_before(jiffies, atkbd->time) && atkbd->last == code ? 1 : 2;
+ } else {
+ value = 1;
+ atkbd->last = code;
+ atkbd->time = jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]) / 2;
+ }
+
+ input_event(dev, EV_KEY, keycode, value);
+ input_sync(dev);
- if (value && test_bit(code, atkbd->force_release_mask)) {
- input_report_key(dev, keycode, 0);
- input_sync(dev);
- }
+ if (value && test_bit(code, atkbd->force_release_mask)) {
+ input_report_key(dev, keycode, 0);
+ input_sync(dev);
+ }
}
if (atkbd->scroll) {
if (click != -1)
input_report_key(dev, BTN_MIDDLE, click);
- input_report_rel(dev, REL_WHEEL, scroll);
+ input_report_rel(dev, REL_WHEEL,
+ atkbd->release ? -scroll : scroll);
input_report_rel(dev, REL_HWHEEL, hscroll);
input_sync(dev);
}
- atkbd->release = 0;
+ atkbd->release = false;
out:
return IRQ_HANDLED;
}
@@ -577,7 +578,7 @@ static void atkbd_event_work(struct work_struct *work)
{
struct atkbd *atkbd = container_of(work, struct atkbd, event_work.work);
- mutex_lock(&atkbd->event_mutex);
+ mutex_lock(&atkbd->mutex);
if (!atkbd->enabled) {
/*
@@ -596,7 +597,7 @@ static void atkbd_event_work(struct work_struct *work)
atkbd_set_repeat_rate(atkbd);
}
- mutex_unlock(&atkbd->event_mutex);
+ mutex_unlock(&atkbd->mutex);
}
/*
@@ -612,7 +613,7 @@ static void atkbd_schedule_event_work(struct atkbd *atkbd, int event_bit)
atkbd->event_jiffies = jiffies;
set_bit(event_bit, &atkbd->event_mask);
- wmb();
+ mb();
schedule_delayed_work(&atkbd->event_work, delay);
}
@@ -632,17 +633,18 @@ static int atkbd_event(struct input_dev *dev,
switch (type) {
- case EV_LED:
- atkbd_schedule_event_work(atkbd, ATKBD_LED_EVENT_BIT);
- return 0;
+ case EV_LED:
+ atkbd_schedule_event_work(atkbd, ATKBD_LED_EVENT_BIT);
+ return 0;
- case EV_REP:
- if (!atkbd->softrepeat)
- atkbd_schedule_event_work(atkbd, ATKBD_REP_EVENT_BIT);
- return 0;
- }
+ case EV_REP:
+ if (!atkbd->softrepeat)
+ atkbd_schedule_event_work(atkbd, ATKBD_REP_EVENT_BIT);
+ return 0;
- return -1;
+ default:
+ return -1;
+ }
}
/*
@@ -653,7 +655,7 @@ static int atkbd_event(struct input_dev *dev,
static inline void atkbd_enable(struct atkbd *atkbd)
{
serio_pause_rx(atkbd->ps2dev.serio);
- atkbd->enabled = 1;
+ atkbd->enabled = true;
serio_continue_rx(atkbd->ps2dev.serio);
}
@@ -665,7 +667,7 @@ static inline void atkbd_enable(struct atkbd *atkbd)
static inline void atkbd_disable(struct atkbd *atkbd)
{
serio_pause_rx(atkbd->ps2dev.serio);
- atkbd->enabled = 0;
+ atkbd->enabled = false;
serio_continue_rx(atkbd->ps2dev.serio);
}
@@ -686,7 +688,9 @@ static int atkbd_probe(struct atkbd *atkbd)
if (atkbd_reset)
if (ps2_command(ps2dev, NULL, ATKBD_CMD_RESET_BAT))
- printk(KERN_WARNING "atkbd.c: keyboard reset failed on %s\n", ps2dev->serio->phys);
+ dev_warn(&ps2dev->serio->dev,
+ "keyboard reset failed on %s\n",
+ ps2dev->serio->phys);
/*
* Then we check the keyboard ID. We should get 0xab83 under normal conditions.
@@ -716,8 +720,9 @@ static int atkbd_probe(struct atkbd *atkbd)
atkbd->id = (param[0] << 8) | param[1];
if (atkbd->id == 0xaca1 && atkbd->translated) {
- printk(KERN_ERR "atkbd.c: NCD terminal keyboards are only supported on non-translating\n");
- printk(KERN_ERR "atkbd.c: controllers. Use i8042.direct=1 to disable translation.\n");
+ dev_err(&ps2dev->serio->dev,
+ "NCD terminal keyboards are only supported on non-translating controlelrs. "
+ "Use i8042.direct=1 to disable translation.\n");
return -1;
}
@@ -735,7 +740,7 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
struct ps2dev *ps2dev = &atkbd->ps2dev;
unsigned char param[2];
- atkbd->extra = 0;
+ atkbd->extra = false;
/*
* For known special keyboards we can go ahead and set the correct set.
* We check for NCD PS/2 Sun, NorthGate OmniKey 101 and
@@ -754,7 +759,7 @@ static int atkbd_select_set(struct atkbd *atkbd, int target_set, int allow_extra
if (allow_extra) {
param[0] = 0x71;
if (!ps2_command(ps2dev, param, ATKBD_CMD_EX_ENABLE)) {
- atkbd->extra = 1;
+ atkbd->extra = true;
return 2;
}
}
@@ -819,7 +824,8 @@ static int atkbd_activate(struct atkbd *atkbd)
*/
if (ps2_command(ps2dev, NULL, ATKBD_CMD_ENABLE)) {
- printk(KERN_ERR "atkbd.c: Failed to enable keyboard on %s\n",
+ dev_err(&ps2dev->serio->dev,
+ "Failed to enable keyboard on %s\n",
ps2dev->serio->phys);
return -1;
}
@@ -849,13 +855,20 @@ static void atkbd_disconnect(struct serio *serio)
{
struct atkbd *atkbd = serio_get_drvdata(serio);
+ sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
+
atkbd_disable(atkbd);
- /* make sure we don't have a command in flight */
+ input_unregister_device(atkbd->dev);
+
+ /*
+ * Make sure we don't have a command in flight.
+ * Note that since atkbd->enabled is false event work will keep
+ * rescheduling itself until it gets canceled and will not try
+ * accessing freed input device or serio port.
+ */
cancel_delayed_work_sync(&atkbd->event_work);
- sysfs_remove_group(&serio->dev.kobj, &atkbd_attribute_group);
- input_unregister_device(atkbd->dev);
serio_close(serio);
serio_set_drvdata(serio, NULL);
kfree(atkbd);
@@ -1087,16 +1100,18 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd->dev = dev;
ps2_init(&atkbd->ps2dev, serio);
INIT_DELAYED_WORK(&atkbd->event_work, atkbd_event_work);
- mutex_init(&atkbd->event_mutex);
+ mutex_init(&atkbd->mutex);
switch (serio->id.type) {
- case SERIO_8042_XL:
- atkbd->translated = 1;
- case SERIO_8042:
- if (serio->write)
- atkbd->write = 1;
- break;
+ case SERIO_8042_XL:
+ atkbd->translated = true;
+ /* Fall through */
+
+ case SERIO_8042:
+ if (serio->write)
+ atkbd->write = true;
+ break;
}
atkbd->softraw = atkbd_softraw;
@@ -1104,7 +1119,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv)
atkbd->scroll = atkbd_scroll;
if (atkbd->softrepeat)
- atkbd->softraw = 1;
+ atkbd->softraw = true;
serio_set_drvdata(serio, atkbd);
@@ -1160,19 +1175,24 @@ static int atkbd_reconnect(struct serio *serio)
{
struct atkbd *atkbd = serio_get_drvdata(serio);
struct serio_driver *drv = serio->drv;
+ int retval = -1;
if (!atkbd || !drv) {
- printk(KERN_DEBUG "atkbd: reconnect request, but serio is disconnected, ignoring...\n");
+ dev_dbg(&serio->dev,
+ "reconnect request, but serio is disconnected, ignoring...\n");
return -1;
}
+ mutex_lock(&atkbd->mutex);
+
atkbd_disable(atkbd);
if (atkbd->write) {
if (atkbd_probe(atkbd))
- return -1;
+ goto out;
+
if (atkbd->set != atkbd_select_set(atkbd, atkbd->set, atkbd->extra))
- return -1;
+ goto out;
atkbd_activate(atkbd);
@@ -1190,8 +1210,11 @@ static int atkbd_reconnect(struct serio *serio)
}
atkbd_enable(atkbd);
+ retval = 0;
- return 0;
+ out:
+ mutex_unlock(&atkbd->mutex);
+ return retval;
}
static struct serio_device_id atkbd_serio_ids[] = {
@@ -1235,47 +1258,28 @@ static ssize_t atkbd_attr_show_helper(struct device *dev, char *buf,
ssize_t (*handler)(struct atkbd *, char *))
{
struct serio *serio = to_serio_port(dev);
- int retval;
-
- retval = serio_pin_driver(serio);
- if (retval)
- return retval;
-
- if (serio->drv != &atkbd_drv) {
- retval = -ENODEV;
- goto out;
- }
-
- retval = handler((struct atkbd *)serio_get_drvdata(serio), buf);
+ struct atkbd *atkbd = serio_get_drvdata(serio);
-out:
- serio_unpin_driver(serio);
- return retval;
+ return handler(atkbd, buf);
}
static ssize_t atkbd_attr_set_helper(struct device *dev, const char *buf, size_t count,
ssize_t (*handler)(struct atkbd *, const char *, size_t))
{
struct serio *serio = to_serio_port(dev);
- struct atkbd *atkbd;
+ struct atkbd *atkbd = serio_get_drvdata(serio);
int retval;
- retval = serio_pin_driver(serio);
+ retval = mutex_lock_interruptible(&atkbd->mutex);
if (retval)
return retval;
- if (serio->drv != &atkbd_drv) {
- retval = -ENODEV;
- goto out;
- }
-
- atkbd = serio_get_drvdata(serio);
atkbd_disable(atkbd);
retval = handler(atkbd, buf, count);
atkbd_enable(atkbd);
-out:
- serio_unpin_driver(serio);
+ mutex_unlock(&atkbd->mutex);
+
return retval;
}
@@ -1289,7 +1293,8 @@ static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t coun
struct input_dev *old_dev, *new_dev;
unsigned long value;
int err;
- unsigned char old_extra, old_set;
+ bool old_extra;
+ unsigned char old_set;
if (!atkbd->write)
return -EIO;
@@ -1372,7 +1377,7 @@ static ssize_t atkbd_set_scroll(struct atkbd *atkbd, const char *buf, size_t cou
struct input_dev *old_dev, *new_dev;
unsigned long value;
int err;
- unsigned char old_scroll;
+ bool old_scroll;
if (strict_strtoul(buf, 10, &value) || value > 1)
return -EINVAL;
@@ -1416,7 +1421,8 @@ static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
struct input_dev *old_dev, *new_dev;
unsigned long value;
int err;
- unsigned char old_set, old_extra;
+ unsigned char old_set;
+ bool old_extra;
if (!atkbd->write)
return -EIO;
@@ -1466,7 +1472,7 @@ static ssize_t atkbd_set_softrepeat(struct atkbd *atkbd, const char *buf, size_t
struct input_dev *old_dev, *new_dev;
unsigned long value;
int err;
- unsigned char old_softrepeat, old_softraw;
+ bool old_softrepeat, old_softraw;
if (!atkbd->write)
return -EIO;
@@ -1486,7 +1492,7 @@ static ssize_t atkbd_set_softrepeat(struct atkbd *atkbd, const char *buf, size_t
atkbd->dev = new_dev;
atkbd->softrepeat = value;
if (atkbd->softrepeat)
- atkbd->softraw = 1;
+ atkbd->softraw = true;
atkbd_set_device_attrs(atkbd);
err = input_register_device(atkbd->dev);
@@ -1516,7 +1522,7 @@ static ssize_t atkbd_set_softraw(struct atkbd *atkbd, const char *buf, size_t co
struct input_dev *old_dev, *new_dev;
unsigned long value;
int err;
- unsigned char old_softraw;
+ bool old_softraw;
if (strict_strtoul(buf, 10, &value) || value > 1)
return -EINVAL;
diff --git a/drivers/input/keyboard/davinci_keyscan.c b/drivers/input/keyboard/davinci_keyscan.c
index 6e52d855f637..d410d7a52f1d 100644
--- a/drivers/input/keyboard/davinci_keyscan.c
+++ b/drivers/input/keyboard/davinci_keyscan.c
@@ -174,6 +174,14 @@ static int __init davinci_ks_probe(struct platform_device *pdev)
struct davinci_ks_platform_data *pdata = pdev->dev.platform_data;
int error, i;
+ if (pdata->device_enable) {
+ error = pdata->device_enable(dev);
+ if (error < 0) {
+ dev_dbg(dev, "device enable function failed\n");
+ return error;
+ }
+ }
+
if (!pdata->keymap) {
dev_dbg(dev, "no keymap from pdata\n");
return -EINVAL;
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c
index 191cc51d6cf8..31f30087b591 100644
--- a/drivers/input/keyboard/qt2160.c
+++ b/drivers/input/keyboard/qt2160.c
@@ -362,7 +362,7 @@ static int __devexit qt2160_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id qt2160_idtable[] = {
+static const struct i2c_device_id qt2160_idtable[] = {
{ "qt2160", 0, },
{ }
};
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c
index 71b82434264d..a8d2b8db4e35 100644
--- a/drivers/input/misc/apanel.c
+++ b/drivers/input/misc/apanel.c
@@ -149,7 +149,7 @@ static void apanel_shutdown(struct i2c_client *client)
apanel_remove(client);
}
-static struct i2c_device_id apanel_id[] = {
+static const struct i2c_device_id apanel_id[] = {
{ "fujitsu_apanel", 0 },
{ }
};
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 0d1d33468b43..4f8fe0886b2a 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -139,6 +139,7 @@ struct tp_finger {
/* trackpad finger data size, empirically at least ten fingers */
#define SIZEOF_FINGER sizeof(struct tp_finger)
#define SIZEOF_ALL_FINGERS (16 * SIZEOF_FINGER)
+#define MAX_FINGER_ORIENTATION 16384
/* device-specific parameters */
struct bcm5974_param {
@@ -284,6 +285,26 @@ static void setup_events_to_report(struct input_dev *input_dev,
input_set_abs_params(input_dev, ABS_Y,
0, cfg->y.dim, cfg->y.fuzz, 0);
+ /* finger touch area */
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ /* finger approach area */
+ input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR,
+ cfg->w.devmin, cfg->w.devmax, 0, 0);
+ /* finger orientation */
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION,
+ -MAX_FINGER_ORIENTATION,
+ MAX_FINGER_ORIENTATION, 0, 0);
+ /* finger position */
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X,
+ cfg->x.devmin, cfg->x.devmax, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
+ cfg->y.devmin, cfg->y.devmax, 0, 0);
+
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(BTN_TOUCH, input_dev->keybit);
__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
@@ -310,13 +331,29 @@ static int report_bt_state(struct bcm5974 *dev, int size)
return 0;
}
+static void report_finger_data(struct input_dev *input,
+ const struct bcm5974_config *cfg,
+ const struct tp_finger *f)
+{
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, raw2int(f->force_major));
+ input_report_abs(input, ABS_MT_TOUCH_MINOR, raw2int(f->force_minor));
+ input_report_abs(input, ABS_MT_WIDTH_MAJOR, raw2int(f->size_major));
+ input_report_abs(input, ABS_MT_WIDTH_MINOR, raw2int(f->size_minor));
+ input_report_abs(input, ABS_MT_ORIENTATION,
+ MAX_FINGER_ORIENTATION - raw2int(f->orientation));
+ input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x));
+ input_report_abs(input, ABS_MT_POSITION_Y,
+ cfg->y.devmin + cfg->y.devmax - raw2int(f->abs_y));
+ input_mt_sync(input);
+}
+
/* report trackpad data as logical trackpad state */
static int report_tp_state(struct bcm5974 *dev, int size)
{
const struct bcm5974_config *c = &dev->cfg;
const struct tp_finger *f;
struct input_dev *input = dev->input;
- int raw_p, raw_w, raw_x, raw_y, raw_n;
+ int raw_p, raw_w, raw_x, raw_y, raw_n, i;
int ptest, origin, ibt = 0, nmin = 0, nmax = 0;
int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0;
@@ -329,6 +366,11 @@ static int report_tp_state(struct bcm5974 *dev, int size)
/* always track the first finger; when detached, start over */
if (raw_n) {
+
+ /* report raw trackpad data */
+ for (i = 0; i < raw_n; i++)
+ report_finger_data(input, c, &f[i]);
+
raw_p = raw2int(f->force_major);
raw_w = raw2int(f->size_major);
raw_x = raw2int(f->abs_x);
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 90be30e93556..9169d1591c1f 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -68,10 +68,6 @@ module_param(post_interrupt_delay, int, 0644);
MODULE_PARM_DESC(post_interrupt_delay,
"delay (ms) before recal after recal interrupt detected");
-static int autorecal = 1;
-module_param(autorecal, int, 0644);
-MODULE_PARM_DESC(autorecal, "enable recalibration in the driver");
-
/*
* When the touchpad gets ultra-sensitive, one can keep their finger 1/2"
* above the pad and still have it send packets. This causes a jump cursor
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 401ac6b6edd4..cabf4e1caacc 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -627,8 +627,15 @@ static int psmouse_extensions(struct psmouse *psmouse,
synaptics_hardware = true;
if (max_proto > PSMOUSE_IMEX) {
- if (!set_properties || synaptics_init(psmouse) == 0)
+/*
+ * Try activating protocol, but check if support is enabled first, since
+ * we try detecting Synaptics even when protocol is disabled.
+ */
+ if (synaptics_supported() &&
+ (!set_properties || synaptics_init(psmouse) == 0)) {
return PSMOUSE_SYNAPTICS;
+ }
+
/*
* Some Synaptics touchpads can emulate extended protocols (like IMPS/2).
* Unfortunately Logitech/Genius probes confuse some firmware versions so
@@ -1450,24 +1457,10 @@ ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *de
struct serio *serio = to_serio_port(dev);
struct psmouse_attribute *attr = to_psmouse_attr(devattr);
struct psmouse *psmouse;
- int retval;
-
- retval = serio_pin_driver(serio);
- if (retval)
- return retval;
-
- if (serio->drv != &psmouse_drv) {
- retval = -ENODEV;
- goto out;
- }
psmouse = serio_get_drvdata(serio);
- retval = attr->show(psmouse, attr->data, buf);
-
-out:
- serio_unpin_driver(serio);
- return retval;
+ return attr->show(psmouse, attr->data, buf);
}
ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *devattr,
@@ -1478,18 +1471,9 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
struct psmouse *psmouse, *parent = NULL;
int retval;
- retval = serio_pin_driver(serio);
- if (retval)
- return retval;
-
- if (serio->drv != &psmouse_drv) {
- retval = -ENODEV;
- goto out_unpin;
- }
-
retval = mutex_lock_interruptible(&psmouse_mutex);
if (retval)
- goto out_unpin;
+ goto out;
psmouse = serio_get_drvdata(serio);
@@ -1519,8 +1503,7 @@ ssize_t psmouse_attr_set_helper(struct device *dev, struct device_attribute *dev
out_unlock:
mutex_unlock(&psmouse_mutex);
- out_unpin:
- serio_unpin_driver(serio);
+ out:
return retval;
}
@@ -1582,9 +1565,7 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, co
}
mutex_unlock(&psmouse_mutex);
- serio_unpin_driver(serio);
serio_unregister_child_port(serio);
- serio_pin_driver_uninterruptible(serio);
mutex_lock(&psmouse_mutex);
if (serio->drv != &psmouse_drv) {
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 05689e732191..d3f5243fa093 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -743,6 +743,11 @@ int synaptics_init(struct psmouse *psmouse)
return -1;
}
+bool synaptics_supported(void)
+{
+ return true;
+}
+
#else /* CONFIG_MOUSE_PS2_SYNAPTICS */
void __init synaptics_module_init(void)
@@ -754,5 +759,10 @@ int synaptics_init(struct psmouse *psmouse)
return -ENOSYS;
}
+bool synaptics_supported(void)
+{
+ return false;
+}
+
#endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 838e7f2c9b30..f0f40a331dc8 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -109,5 +109,6 @@ void synaptics_module_init(void);
int synaptics_detect(struct psmouse *psmouse, bool set_properties);
int synaptics_init(struct psmouse *psmouse);
void synaptics_reset(struct psmouse *psmouse);
+bool synaptics_supported(void);
#endif /* _SYNAPTICS_H */
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 64b688daf48a..2a5982e532f8 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -524,6 +524,13 @@ static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = {
*/
static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
{
+ /* Acer Aspire 5610 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"),
+ },
+ },
+ {
/* Acer Aspire 5630 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index d84a36e545f6..33f3541aaf96 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -430,7 +430,7 @@ static bool i8042_filter(unsigned char data, unsigned char str,
}
if (i8042_platform_filter && i8042_platform_filter(data, str, serio)) {
- dbg("Filtered out by platfrom filter\n");
+ dbg("Filtered out by platform filter\n");
return true;
}
diff --git a/drivers/input/serio/pcips2.c b/drivers/input/serio/pcips2.c
index 1dacbe0d9348..797314be7af2 100644
--- a/drivers/input/serio/pcips2.c
+++ b/drivers/input/serio/pcips2.c
@@ -186,7 +186,7 @@ static void __devexit pcips2_remove(struct pci_dev *dev)
pci_disable_device(dev);
}
-static struct pci_device_id pcips2_ids[] = {
+static const struct pci_device_id pcips2_ids[] = {
{
.vendor = 0x14f2, /* MOBILITY */
.device = 0x0123, /* Keyboard */
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index e0f30186d513..c3b626e9eae7 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -26,6 +26,8 @@
* Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/stddef.h>
#include <linux/module.h>
#include <linux/serio.h>
@@ -119,11 +121,10 @@ static int serio_bind_driver(struct serio *serio, struct serio_driver *drv)
error = device_bind_driver(&serio->dev);
if (error) {
- printk(KERN_WARNING
- "serio: device_bind_driver() failed "
- "for %s (%s) and %s, error: %d\n",
- serio->phys, serio->name,
- drv->description, error);
+ dev_warn(&serio->dev,
+ "device_bind_driver() failed for %s (%s) and %s, error: %d\n",
+ serio->phys, serio->name,
+ drv->description, error);
serio_disconnect_driver(serio);
serio->dev.driver = NULL;
return error;
@@ -138,9 +139,9 @@ static void serio_find_driver(struct serio *serio)
error = device_attach(&serio->dev);
if (error < 0)
- printk(KERN_WARNING
- "serio: device_attach() failed for %s (%s), error: %d\n",
- serio->phys, serio->name, error);
+ dev_warn(&serio->dev,
+ "device_attach() failed for %s (%s), error: %d\n",
+ serio->phys, serio->name, error);
}
@@ -194,17 +195,14 @@ static int serio_queue_event(void *object, struct module *owner,
event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
if (!event) {
- printk(KERN_ERR
- "serio: Not enough memory to queue event %d\n",
- event_type);
+ pr_err("Not enough memory to queue event %d\n", event_type);
retval = -ENOMEM;
goto out;
}
if (!try_module_get(owner)) {
- printk(KERN_WARNING
- "serio: Can't get module reference, dropping event %d\n",
- event_type);
+ pr_warning("Can't get module reference, dropping event %d\n",
+ event_type);
kfree(event);
retval = -EINVAL;
goto out;
@@ -230,14 +228,12 @@ static void serio_free_event(struct serio_event *event)
static void serio_remove_duplicate_events(struct serio_event *event)
{
- struct list_head *node, *next;
- struct serio_event *e;
+ struct serio_event *e, *next;
unsigned long flags;
spin_lock_irqsave(&serio_event_lock, flags);
- list_for_each_safe(node, next, &serio_event_list) {
- e = list_entry(node, struct serio_event, node);
+ list_for_each_entry_safe(e, next, &serio_event_list, node) {
if (event->object == e->object) {
/*
* If this event is of different type we should not
@@ -247,7 +243,7 @@ static void serio_remove_duplicate_events(struct serio_event *event)
if (event->type != e->type)
break;
- list_del_init(node);
+ list_del_init(&e->node);
serio_free_event(e);
}
}
@@ -258,23 +254,18 @@ static void serio_remove_duplicate_events(struct serio_event *event)
static struct serio_event *serio_get_event(void)
{
- struct serio_event *event;
- struct list_head *node;
+ struct serio_event *event = NULL;
unsigned long flags;
spin_lock_irqsave(&serio_event_lock, flags);
- if (list_empty(&serio_event_list)) {
- spin_unlock_irqrestore(&serio_event_lock, flags);
- return NULL;
+ if (!list_empty(&serio_event_list)) {
+ event = list_first_entry(&serio_event_list,
+ struct serio_event, node);
+ list_del_init(&event->node);
}
- node = serio_event_list.next;
- event = list_entry(node, struct serio_event, node);
- list_del_init(node);
-
spin_unlock_irqrestore(&serio_event_lock, flags);
-
return event;
}
@@ -287,29 +278,27 @@ static void serio_handle_event(void)
while ((event = serio_get_event())) {
switch (event->type) {
- case SERIO_REGISTER_PORT:
- serio_add_port(event->object);
- break;
- case SERIO_RECONNECT_PORT:
- serio_reconnect_port(event->object);
- break;
+ case SERIO_REGISTER_PORT:
+ serio_add_port(event->object);
+ break;
- case SERIO_RESCAN_PORT:
- serio_disconnect_port(event->object);
- serio_find_driver(event->object);
- break;
+ case SERIO_RECONNECT_PORT:
+ serio_reconnect_port(event->object);
+ break;
- case SERIO_RECONNECT_CHAIN:
- serio_reconnect_chain(event->object);
- break;
+ case SERIO_RESCAN_PORT:
+ serio_disconnect_port(event->object);
+ serio_find_driver(event->object);
+ break;
- case SERIO_ATTACH_DRIVER:
- serio_attach_driver(event->object);
- break;
+ case SERIO_RECONNECT_CHAIN:
+ serio_reconnect_chain(event->object);
+ break;
- default:
- break;
+ case SERIO_ATTACH_DRIVER:
+ serio_attach_driver(event->object);
+ break;
}
serio_remove_duplicate_events(event);
@@ -325,16 +314,14 @@ static void serio_handle_event(void)
*/
static void serio_remove_pending_events(void *object)
{
- struct list_head *node, *next;
- struct serio_event *event;
+ struct serio_event *event, *next;
unsigned long flags;
spin_lock_irqsave(&serio_event_lock, flags);
- list_for_each_safe(node, next, &serio_event_list) {
- event = list_entry(node, struct serio_event, node);
+ list_for_each_entry_safe(event, next, &serio_event_list, node) {
if (event->object == object) {
- list_del_init(node);
+ list_del_init(&event->node);
serio_free_event(event);
}
}
@@ -380,7 +367,6 @@ static int serio_thread(void *nothing)
kthread_should_stop() || !list_empty(&serio_event_list));
} while (!kthread_should_stop());
- printk(KERN_DEBUG "serio: kseriod exiting\n");
return 0;
}
@@ -445,6 +431,11 @@ static struct attribute_group serio_id_attr_group = {
.attrs = serio_device_id_attrs,
};
+static const struct attribute_group *serio_device_attr_groups[] = {
+ &serio_id_attr_group,
+ NULL
+};
+
static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct serio *serio = to_serio_port(dev);
@@ -532,6 +523,7 @@ static void serio_init_port(struct serio *serio)
(long)atomic_inc_return(&serio_no) - 1);
serio->dev.bus = &serio_bus;
serio->dev.release = serio_release_port;
+ serio->dev.groups = serio_device_attr_groups;
if (serio->parent) {
serio->dev.parent = &serio->parent->dev;
serio->depth = serio->parent->depth + 1;
@@ -555,21 +547,15 @@ static void serio_add_port(struct serio *serio)
}
list_add_tail(&serio->node, &serio_list);
+
if (serio->start)
serio->start(serio);
+
error = device_add(&serio->dev);
if (error)
- printk(KERN_ERR
- "serio: device_add() failed for %s (%s), error: %d\n",
+ dev_err(&serio->dev,
+ "device_add() failed for %s (%s), error: %d\n",
serio->phys, serio->name, error);
- else {
- serio->registered = true;
- error = sysfs_create_group(&serio->dev.kobj, &serio_id_attr_group);
- if (error)
- printk(KERN_ERR
- "serio: sysfs_create_group() failed for %s (%s), error: %d\n",
- serio->phys, serio->name, error);
- }
}
/*
@@ -596,11 +582,8 @@ static void serio_destroy_port(struct serio *serio)
serio->parent = NULL;
}
- if (serio->registered) {
- sysfs_remove_group(&serio->dev.kobj, &serio_id_attr_group);
+ if (device_is_registered(&serio->dev))
device_del(&serio->dev);
- serio->registered = false;
- }
list_del_init(&serio->node);
serio_remove_pending_events(serio);
@@ -798,9 +781,8 @@ static void serio_attach_driver(struct serio_driver *drv)
error = driver_attach(&drv->driver);
if (error)
- printk(KERN_WARNING
- "serio: driver_attach() failed for %s with error %d\n",
- drv->driver.name, error);
+ pr_warning("driver_attach() failed for %s with error %d\n",
+ drv->driver.name, error);
}
int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name)
@@ -820,8 +802,7 @@ int __serio_register_driver(struct serio_driver *drv, struct module *owner, cons
error = driver_register(&drv->driver);
if (error) {
- printk(KERN_ERR
- "serio: driver_register() failed for %s, error: %d\n",
+ pr_err("driver_register() failed for %s, error: %d\n",
drv->driver.name, error);
return error;
}
@@ -987,7 +968,7 @@ irqreturn_t serio_interrupt(struct serio *serio,
if (likely(serio->drv)) {
ret = serio->drv->interrupt(serio, data, dfl);
- } else if (!dfl && serio->registered) {
+ } else if (!dfl && device_is_registered(&serio->dev)) {
serio_rescan(serio);
ret = IRQ_HANDLED;
}
@@ -1018,7 +999,7 @@ static int __init serio_init(void)
error = bus_register(&serio_bus);
if (error) {
- printk(KERN_ERR "serio: failed to register serio bus, error: %d\n", error);
+ pr_err("Failed to register serio bus, error: %d\n", error);
return error;
}
@@ -1026,7 +1007,7 @@ static int __init serio_init(void)
if (IS_ERR(serio_task)) {
bus_unregister(&serio_bus);
error = PTR_ERR(serio_task);
- printk(KERN_ERR "serio: Failed to start kseriod, error: %d\n", error);
+ pr_err("Failed to start kseriod, error: %d\n", error);
return error;
}
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index ebb22f88c842..78c64fb8a4b0 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -354,7 +354,7 @@ static int __devexit xps2_of_remove(struct of_device *of_dev)
}
/* Match table for of_platform binding */
-static struct of_device_id xps2_of_match[] __devinitdata = {
+static const struct of_device_id xps2_of_match[] __devinitconst = {
{ .compatible = "xlnx,xps-ps2-1.00.a", },
{ /* end of list */ },
};
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 7d005a3616d7..4be039d7dcad 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -362,7 +362,7 @@ static const int macroKeyEvents[] = {
};
/***********************************************************************
- * Map values to strings and back. Every map shoudl have the following
+ * Map values to strings and back. Every map should have the following
* as its last element: { NULL, AIPTEK_INVALID_VALUE }.
*/
#define AIPTEK_INVALID_VALUE -1
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 3d32d3f4e486..866a9ee1af1a 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -92,7 +92,7 @@ Scott Hill shill@gtcocalcomp.com
/* DATA STRUCTURES */
/* Device table */
-static struct usb_device_id gtco_usbid_table [] = {
+static const struct usb_device_id gtco_usbid_table[] = {
{ USB_DEVICE(VENDOR_ID_GTCO, PID_400) },
{ USB_DEVICE(VENDOR_ID_GTCO, PID_401) },
{ USB_DEVICE(VENDOR_ID_GTCO, PID_1000) },
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
new file mode 100644
index 000000000000..56254d2a1f6e
--- /dev/null
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -0,0 +1,241 @@
+/*
+ * Touchscreen driver for Marvell 88PM860x
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/mfd/88pm860x.h>
+
+#define MEAS_LEN (8)
+#define ACCURATE_BIT (12)
+
+/* touch register */
+#define MEAS_EN3 (0x52)
+
+#define MEAS_TSIX_1 (0x8D)
+#define MEAS_TSIX_2 (0x8E)
+#define MEAS_TSIY_1 (0x8F)
+#define MEAS_TSIY_2 (0x90)
+#define MEAS_TSIZ1_1 (0x91)
+#define MEAS_TSIZ1_2 (0x92)
+#define MEAS_TSIZ2_1 (0x93)
+#define MEAS_TSIZ2_2 (0x94)
+
+/* bit definitions of touch */
+#define MEAS_PD_EN (1 << 3)
+#define MEAS_TSIX_EN (1 << 4)
+#define MEAS_TSIY_EN (1 << 5)
+#define MEAS_TSIZ1_EN (1 << 6)
+#define MEAS_TSIZ2_EN (1 << 7)
+
+struct pm860x_touch {
+ struct input_dev *idev;
+ struct i2c_client *i2c;
+ struct pm860x_chip *chip;
+ int irq;
+ int res_x; /* resistor of Xplate */
+};
+
+static irqreturn_t pm860x_touch_handler(int irq, void *data)
+{
+ struct pm860x_touch *touch = data;
+ struct pm860x_chip *chip = touch->chip;
+ unsigned char buf[MEAS_LEN];
+ int x, y, pen_down;
+ int z1, z2, rt = 0;
+ int ret;
+
+ pm860x_mask_irq(chip, irq);
+ ret = pm860x_bulk_read(touch->i2c, MEAS_TSIX_1, MEAS_LEN, buf);
+ if (ret < 0)
+ goto out;
+
+ pen_down = buf[1] & (1 << 6);
+ x = ((buf[0] & 0xFF) << 4) | (buf[1] & 0x0F);
+ y = ((buf[2] & 0xFF) << 4) | (buf[3] & 0x0F);
+ z1 = ((buf[4] & 0xFF) << 4) | (buf[5] & 0x0F);
+ z2 = ((buf[6] & 0xFF) << 4) | (buf[7] & 0x0F);
+
+ if (pen_down) {
+ if ((x != 0) && (z1 != 0) && (touch->res_x != 0)) {
+ rt = z2 / z1 - 1;
+ rt = (rt * touch->res_x * x) >> ACCURATE_BIT;
+ dev_dbg(chip->dev, "z1:%d, z2:%d, rt:%d\n",
+ z1, z2, rt);
+ }
+ input_report_abs(touch->idev, ABS_X, x);
+ input_report_abs(touch->idev, ABS_Y, y);
+ input_report_abs(touch->idev, ABS_PRESSURE, rt);
+ input_report_key(touch->idev, BTN_TOUCH, 1);
+ dev_dbg(chip->dev, "pen down at [%d, %d].\n", x, y);
+ } else {
+ input_report_abs(touch->idev, ABS_PRESSURE, 0);
+ input_report_key(touch->idev, BTN_TOUCH, 0);
+ dev_dbg(chip->dev, "pen release\n");
+ }
+ input_sync(touch->idev);
+ pm860x_unmask_irq(chip, irq);
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int pm860x_touch_open(struct input_dev *dev)
+{
+ struct pm860x_touch *touch = input_get_drvdata(dev);
+ struct pm860x_chip *chip = touch->chip;
+ int data, ret;
+
+ data = MEAS_PD_EN | MEAS_TSIX_EN | MEAS_TSIY_EN
+ | MEAS_TSIZ1_EN | MEAS_TSIZ2_EN;
+ ret = pm860x_set_bits(touch->i2c, MEAS_EN3, data, data);
+ if (ret < 0)
+ goto out;
+ pm860x_unmask_irq(chip, touch->irq);
+ return 0;
+out:
+ return ret;
+}
+
+static void pm860x_touch_close(struct input_dev *dev)
+{
+ struct pm860x_touch *touch = input_get_drvdata(dev);
+ struct pm860x_chip *chip = touch->chip;
+ int data;
+
+ data = MEAS_PD_EN | MEAS_TSIX_EN | MEAS_TSIY_EN
+ | MEAS_TSIZ1_EN | MEAS_TSIZ2_EN;
+ pm860x_set_bits(touch->i2c, MEAS_EN3, data, 0);
+ pm860x_mask_irq(chip, touch->irq);
+}
+
+static int __devinit pm860x_touch_probe(struct platform_device *pdev)
+{
+ struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct pm860x_platform_data *pm860x_pdata = \
+ pdev->dev.parent->platform_data;
+ struct pm860x_touch_pdata *pdata = NULL;
+ struct pm860x_touch *touch;
+ int irq, ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No IRQ resource!\n");
+ return -EINVAL;
+ }
+
+ if (!pm860x_pdata) {
+ dev_err(&pdev->dev, "platform data is missing\n");
+ return -EINVAL;
+ }
+
+ pdata = pm860x_pdata->touch;
+ if (!pdata) {
+ dev_err(&pdev->dev, "touchscreen data is missing\n");
+ return -EINVAL;
+ }
+
+ touch = kzalloc(sizeof(struct pm860x_touch), GFP_KERNEL);
+ if (touch == NULL)
+ return -ENOMEM;
+ dev_set_drvdata(&pdev->dev, touch);
+
+ touch->idev = input_allocate_device();
+ if (touch->idev == NULL) {
+ dev_err(&pdev->dev, "Failed to allocate input device!\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ touch->idev->name = "88pm860x-touch";
+ touch->idev->phys = "88pm860x/input0";
+ touch->idev->id.bustype = BUS_I2C;
+ touch->idev->dev.parent = &pdev->dev;
+ touch->idev->open = pm860x_touch_open;
+ touch->idev->close = pm860x_touch_close;
+ touch->chip = chip;
+ touch->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
+ touch->irq = irq;
+ touch->res_x = pdata->res_x;
+ input_set_drvdata(touch->idev, touch);
+
+ ret = pm860x_request_irq(chip, irq, pm860x_touch_handler, touch);
+ if (ret < 0)
+ goto out_irq;
+
+ __set_bit(EV_ABS, touch->idev->evbit);
+ __set_bit(ABS_X, touch->idev->absbit);
+ __set_bit(ABS_Y, touch->idev->absbit);
+ __set_bit(ABS_PRESSURE, touch->idev->absbit);
+ __set_bit(EV_SYN, touch->idev->evbit);
+ __set_bit(EV_KEY, touch->idev->evbit);
+ __set_bit(BTN_TOUCH, touch->idev->keybit);
+
+ input_set_abs_params(touch->idev, ABS_X, 0, 1 << ACCURATE_BIT, 0, 0);
+ input_set_abs_params(touch->idev, ABS_Y, 0, 1 << ACCURATE_BIT, 0, 0);
+ input_set_abs_params(touch->idev, ABS_PRESSURE, 0, 1 << ACCURATE_BIT,
+ 0, 0);
+
+ ret = input_register_device(touch->idev);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to register touch!\n");
+ goto out_rg;
+ }
+
+ platform_set_drvdata(pdev, touch);
+ return 0;
+out_rg:
+ pm860x_free_irq(chip, irq);
+out_irq:
+ input_free_device(touch->idev);
+out:
+ kfree(touch);
+ return ret;
+}
+
+static int __devexit pm860x_touch_remove(struct platform_device *pdev)
+{
+ struct pm860x_touch *touch = platform_get_drvdata(pdev);
+
+ input_unregister_device(touch->idev);
+ pm860x_free_irq(touch->chip, touch->irq);
+ platform_set_drvdata(pdev, NULL);
+ kfree(touch);
+ return 0;
+}
+
+static struct platform_driver pm860x_touch_driver = {
+ .driver = {
+ .name = "88pm860x-touch",
+ .owner = THIS_MODULE,
+ },
+ .probe = pm860x_touch_probe,
+ .remove = __devexit_p(pm860x_touch_remove),
+};
+
+static int __init pm860x_touch_init(void)
+{
+ return platform_driver_register(&pm860x_touch_driver);
+}
+module_init(pm860x_touch_init);
+
+static void __exit pm860x_touch_exit(void)
+{
+ platform_driver_unregister(&pm860x_touch_driver);
+}
+module_exit(pm860x_touch_exit);
+
+MODULE_DESCRIPTION("Touchscreen driver for Marvell Semiconductor 88PM860x");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:88pm860x-touch");
+
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index dfafc76da4fb..77db185a5b49 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -11,6 +11,18 @@ menuconfig INPUT_TOUCHSCREEN
if INPUT_TOUCHSCREEN
+config TOUCHSCREEN_88PM860X
+ tristate "Marvell 88PM860x touchscreen"
+ depends on MFD_88PM860X
+ help
+ Say Y here if you have a 88PM860x PMIC and want to enable
+ support for the built-in touchscreen.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called 88pm860x-ts.
+
config TOUCHSCREEN_ADS7846
tristate "ADS7846/TSC2046 and ADS7843 based touchscreens"
depends on SPI_MASTER
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index d61a3b4def9a..7fef7d5cca23 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -6,6 +6,7 @@
wm97xx-ts-y := wm97xx-core.o
+obj-$(CONFIG_TOUCHSCREEN_88PM860X) += 88pm860x-ts.o
obj-$(CONFIG_TOUCHSCREEN_AD7877) += ad7877.o
obj-$(CONFIG_TOUCHSCREEN_AD7879) += ad7879.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
index 8f38c5e55ce6..486d31ba9c09 100644
--- a/drivers/input/touchscreen/elo.c
+++ b/drivers/input/touchscreen/elo.c
@@ -72,45 +72,49 @@ static void elo_process_data_10(struct elo *elo, unsigned char data)
struct input_dev *dev = elo->dev;
elo->data[elo->idx] = data;
- switch (elo->idx++) {
- case 0:
- elo->csum = 0xaa;
- if (data != ELO10_LEAD_BYTE) {
- pr_debug("elo: unsynchronized data: 0x%02x\n", data);
- elo->idx = 0;
- }
- break;
- case 9:
+ switch (elo->idx++) {
+ case 0:
+ elo->csum = 0xaa;
+ if (data != ELO10_LEAD_BYTE) {
+ dev_dbg(&elo->serio->dev,
+ "unsynchronized data: 0x%02x\n", data);
elo->idx = 0;
- if (data != elo->csum) {
- pr_debug("elo: bad checksum: 0x%02x, expected 0x%02x\n",
- data, elo->csum);
- break;
- }
- if (elo->data[1] != elo->expected_packet) {
- if (elo->data[1] != ELO10_TOUCH_PACKET)
- pr_debug("elo: unexpected packet: 0x%02x\n",
- elo->data[1]);
- break;
- }
- if (likely(elo->data[1] == ELO10_TOUCH_PACKET)) {
- input_report_abs(dev, ABS_X, (elo->data[4] << 8) | elo->data[3]);
- input_report_abs(dev, ABS_Y, (elo->data[6] << 8) | elo->data[5]);
- if (elo->data[2] & ELO10_PRESSURE)
- input_report_abs(dev, ABS_PRESSURE,
- (elo->data[8] << 8) | elo->data[7]);
- input_report_key(dev, BTN_TOUCH, elo->data[2] & ELO10_TOUCH);
- input_sync(dev);
- } else if (elo->data[1] == ELO10_ACK_PACKET) {
- if (elo->data[2] == '0')
- elo->expected_packet = ELO10_TOUCH_PACKET;
- complete(&elo->cmd_done);
- } else {
- memcpy(elo->response, &elo->data[1], ELO10_PACKET_LEN);
- elo->expected_packet = ELO10_ACK_PACKET;
- }
+ }
+ break;
+
+ case 9:
+ elo->idx = 0;
+ if (data != elo->csum) {
+ dev_dbg(&elo->serio->dev,
+ "bad checksum: 0x%02x, expected 0x%02x\n",
+ data, elo->csum);
+ break;
+ }
+ if (elo->data[1] != elo->expected_packet) {
+ if (elo->data[1] != ELO10_TOUCH_PACKET)
+ dev_dbg(&elo->serio->dev,
+ "unexpected packet: 0x%02x\n",
+ elo->data[1]);
break;
+ }
+ if (likely(elo->data[1] == ELO10_TOUCH_PACKET)) {
+ input_report_abs(dev, ABS_X, (elo->data[4] << 8) | elo->data[3]);
+ input_report_abs(dev, ABS_Y, (elo->data[6] << 8) | elo->data[5]);
+ if (elo->data[2] & ELO10_PRESSURE)
+ input_report_abs(dev, ABS_PRESSURE,
+ (elo->data[8] << 8) | elo->data[7]);
+ input_report_key(dev, BTN_TOUCH, elo->data[2] & ELO10_TOUCH);
+ input_sync(dev);
+ } else if (elo->data[1] == ELO10_ACK_PACKET) {
+ if (elo->data[2] == '0')
+ elo->expected_packet = ELO10_TOUCH_PACKET;
+ complete(&elo->cmd_done);
+ } else {
+ memcpy(elo->response, &elo->data[1], ELO10_PACKET_LEN);
+ elo->expected_packet = ELO10_ACK_PACKET;
+ }
+ break;
}
elo->csum += data;
}
@@ -123,42 +127,53 @@ static void elo_process_data_6(struct elo *elo, unsigned char data)
switch (elo->idx++) {
- case 0: if ((data & 0xc0) != 0xc0) elo->idx = 0; break;
- case 1: if ((data & 0xc0) != 0x80) elo->idx = 0; break;
- case 2: if ((data & 0xc0) != 0x40) elo->idx = 0; break;
-
- case 3:
- if (data & 0xc0) {
- elo->idx = 0;
- break;
- }
+ case 0:
+ if ((data & 0xc0) != 0xc0)
+ elo->idx = 0;
+ break;
- input_report_abs(dev, ABS_X, ((elo->data[0] & 0x3f) << 6) | (elo->data[1] & 0x3f));
- input_report_abs(dev, ABS_Y, ((elo->data[2] & 0x3f) << 6) | (elo->data[3] & 0x3f));
+ case 1:
+ if ((data & 0xc0) != 0x80)
+ elo->idx = 0;
+ break;
- if (elo->id == 2) {
- input_report_key(dev, BTN_TOUCH, 1);
- input_sync(dev);
- elo->idx = 0;
- }
+ case 2:
+ if ((data & 0xc0) != 0x40)
+ elo->idx = 0;
+ break;
+ case 3:
+ if (data & 0xc0) {
+ elo->idx = 0;
break;
+ }
- case 4:
- if (data) {
- input_sync(dev);
- elo->idx = 0;
- }
- break;
+ input_report_abs(dev, ABS_X, ((elo->data[0] & 0x3f) << 6) | (elo->data[1] & 0x3f));
+ input_report_abs(dev, ABS_Y, ((elo->data[2] & 0x3f) << 6) | (elo->data[3] & 0x3f));
- case 5:
- if ((data & 0xf0) == 0) {
- input_report_abs(dev, ABS_PRESSURE, elo->data[5]);
- input_report_key(dev, BTN_TOUCH, !!elo->data[5]);
- }
+ if (elo->id == 2) {
+ input_report_key(dev, BTN_TOUCH, 1);
input_sync(dev);
elo->idx = 0;
- break;
+ }
+
+ break;
+
+ case 4:
+ if (data) {
+ input_sync(dev);
+ elo->idx = 0;
+ }
+ break;
+
+ case 5:
+ if ((data & 0xf0) == 0) {
+ input_report_abs(dev, ABS_PRESSURE, elo->data[5]);
+ input_report_key(dev, BTN_TOUCH, !!elo->data[5]);
+ }
+ input_sync(dev);
+ elo->idx = 0;
+ break;
}
}
@@ -170,17 +185,17 @@ static void elo_process_data_3(struct elo *elo, unsigned char data)
switch (elo->idx++) {
- case 0:
- if ((data & 0x7f) != 0x01)
- elo->idx = 0;
- break;
- case 2:
- input_report_key(dev, BTN_TOUCH, !(elo->data[1] & 0x80));
- input_report_abs(dev, ABS_X, elo->data[1]);
- input_report_abs(dev, ABS_Y, elo->data[2]);
- input_sync(dev);
+ case 0:
+ if ((data & 0x7f) != 0x01)
elo->idx = 0;
- break;
+ break;
+ case 2:
+ input_report_key(dev, BTN_TOUCH, !(elo->data[1] & 0x80));
+ input_report_abs(dev, ABS_X, elo->data[1]);
+ input_report_abs(dev, ABS_Y, elo->data[2]);
+ input_sync(dev);
+ elo->idx = 0;
+ break;
}
}
@@ -189,19 +204,19 @@ static irqreturn_t elo_interrupt(struct serio *serio,
{
struct elo *elo = serio_get_drvdata(serio);
- switch(elo->id) {
- case 0:
- elo_process_data_10(elo, data);
- break;
-
- case 1:
- case 2:
- elo_process_data_6(elo, data);
- break;
-
- case 3:
- elo_process_data_3(elo, data);
- break;
+ switch (elo->id) {
+ case 0:
+ elo_process_data_10(elo, data);
+ break;
+
+ case 1:
+ case 2:
+ elo_process_data_6(elo, data);
+ break;
+
+ case 3:
+ elo_process_data_3(elo, data);
+ break;
}
return IRQ_HANDLED;
@@ -261,10 +276,10 @@ static int elo_setup_10(struct elo *elo)
if (packet[3] & ELO10_PRESSURE)
input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
- printk(KERN_INFO "elo: %sTouch touchscreen, fw: %02x.%02x, "
- "features: 0x%02x, controller: 0x%02x\n",
- elo_types[(packet[1] -'0') & 0x03],
- packet[5], packet[4], packet[3], packet[7]);
+ dev_info(&elo->serio->dev,
+ "%sTouch touchscreen, fw: %02x.%02x, features: 0x%02x, controller: 0x%02x\n",
+ elo_types[(packet[1] -'0') & 0x03],
+ packet[5], packet[4], packet[3], packet[7]);
return 0;
}
@@ -330,24 +345,24 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
switch (elo->id) {
- case 0: /* 10-byte protocol */
- if (elo_setup_10(elo))
- goto fail3;
+ case 0: /* 10-byte protocol */
+ if (elo_setup_10(elo))
+ goto fail3;
- break;
+ break;
- case 1: /* 6-byte protocol */
- input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0);
+ case 1: /* 6-byte protocol */
+ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0);
- case 2: /* 4-byte protocol */
- input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 96, 4000, 0, 0);
- break;
+ case 2: /* 4-byte protocol */
+ input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 96, 4000, 0, 0);
+ break;
- case 3: /* 3-byte protocol */
- input_set_abs_params(input_dev, ABS_X, 0, 255, 0, 0);
- input_set_abs_params(input_dev, ABS_Y, 0, 255, 0, 0);
- break;
+ case 3: /* 3-byte protocol */
+ input_set_abs_params(input_dev, ABS_X, 0, 255, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, 255, 0, 0);
+ break;
}
err = input_register_device(elo->dev);
diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c
index 7ef0d1420d3c..be23780e8a3e 100644
--- a/drivers/input/touchscreen/tsc2007.c
+++ b/drivers/input/touchscreen/tsc2007.c
@@ -358,7 +358,7 @@ static int __devexit tsc2007_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id tsc2007_idtable[] = {
+static const struct i2c_device_id tsc2007_idtable[] = {
{ "tsc2007", 0 },
{ }
};
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 09a5e7341bd5..b1b99e931f80 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -144,7 +144,7 @@ enum {
.bInterfaceClass = USB_INTERFACE_CLASS_HID, \
.bInterfaceProtocol = USB_INTERFACE_PROTOCOL_MOUSE
-static struct usb_device_id usbtouch_devices[] = {
+static const struct usb_device_id usbtouch_devices[] = {
#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
/* ignore the HID capable devices, handled by usbhid */
{USB_DEVICE_HID_CLASS(0x0eef, 0x0001), .driver_info = DEVTYPE_IGNORE},
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index c721c0a23eb8..d30436fee476 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -321,7 +321,7 @@ InitWait:
}
}
-static struct xenbus_device_id xenkbd_ids[] = {
+static const struct xenbus_device_id xenkbd_ids[] = {
{ "vkbd" },
{ "" }
};
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index a6624ad252c5..1a1420d7a828 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -3152,7 +3152,7 @@ static void
hfcmulti_pcm(struct hfc_multi *hc, int ch, int slot_tx, int bank_tx,
int slot_rx, int bank_rx)
{
- if (slot_rx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) {
+ if (slot_tx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) {
/* disable PCM */
mode_hfcmulti(hc, ch, hc->chan[ch].protocol, -1, 0, -1, 0);
return;
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 09095c747110..f0bc6fa95809 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -1712,13 +1712,13 @@ mISDNisar_init(struct isar_hw *isar, void *hw)
}
EXPORT_SYMBOL(mISDNisar_init);
-static int isar_mod_init(void)
+static int __init isar_mod_init(void)
{
pr_notice("mISDN: ISAR driver Rev. %s\n", ISAR_REV);
return 0;
}
-static void isar_mod_cleanup(void)
+static void __exit isar_mod_cleanup(void)
{
pr_notice("mISDN: ISAR module unloaded\n");
}
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 43ff4d3b046e..6eac588e0a37 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -1114,7 +1114,7 @@ static struct Bprotocol DSP = {
.create = dspcreate
};
-static int dsp_init(void)
+static int __init dsp_init(void)
{
int err;
int tics;
@@ -1212,7 +1212,7 @@ static int dsp_init(void)
}
-static void dsp_cleanup(void)
+static void __exit dsp_cleanup(void)
{
mISDN_unregister_Bprotocol(&DSP);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index f1e8af54dff0..0843fcf8b381 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -1509,7 +1509,7 @@ l1oip_init(void)
printk(KERN_DEBUG "%s: interface %d is %s with %s.\n",
__func__, l1oip_cnt, pri ? "PRI" : "BRI",
bundle ? "bundled IP packet for all B-channels" :
- "seperate IP packets for every B-channel");
+ "separate IP packets for every B-channel");
hc = kzalloc(sizeof(struct l1oip), GFP_ATOMIC);
if (!hc) {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 8a0e1ec95e4a..e0b64312e66a 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -17,6 +17,13 @@ config LEDS_CLASS
comment "LED drivers"
+config LEDS_88PM860X
+ tristate "LED Support for Marvell 88PM860x PMIC"
+ depends on LEDS_CLASS && MFD_88PM860X
+ help
+ This option enables support for on-chip LED drivers found on Marvell
+ Semiconductor 88PM8606 PMIC.
+
config LEDS_ATMEL_PWM
tristate "LED Support using Atmel PWM outputs"
depends on LEDS_CLASS && ATMEL_PWM
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 9e63869d7c0d..d76fb32b77c0 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_LEDS_CLASS) += led-class.o
obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
# LED Platform Drivers
+obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
new file mode 100644
index 000000000000..d196073a6aeb
--- /dev/null
+++ b/drivers/leds/leds-88pm860x.c
@@ -0,0 +1,325 @@
+/*
+ * LED driver for Marvell 88PM860x
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/workqueue.h>
+#include <linux/mfd/88pm860x.h>
+
+#define LED_PWM_SHIFT (3)
+#define LED_PWM_MASK (0x1F)
+#define LED_CURRENT_MASK (0x07 << 5)
+
+#define LED_BLINK_ON_MASK (0x07)
+#define LED_BLINK_PERIOD_MASK (0x0F << 3)
+#define LED_BLINK_MASK (0x7F)
+
+#define LED_BLINK_ON(x) ((x & 0x7) * 66 + 66)
+#define LED_BLINK_PERIOD(x) ((x & 0xF) * 530 + 930)
+#define LED_BLINK_ON_MIN LED_BLINK_ON(0)
+#define LED_BLINK_ON_MAX LED_BLINK_ON(0x7)
+#define LED_BLINK_PERIOD_MIN LED_BLINK_PERIOD(0)
+#define LED_BLINK_PERIOD_MAX LED_BLINK_PERIOD(0xE)
+#define LED_TO_ON(x) ((x - 66) / 66)
+#define LED_TO_PERIOD(x) ((x - 930) / 530)
+
+#define LED1_BLINK_EN (1 << 1)
+#define LED2_BLINK_EN (1 << 2)
+
+enum {
+ SET_BRIGHTNESS,
+ SET_BLINK,
+};
+
+struct pm860x_led {
+ struct led_classdev cdev;
+ struct i2c_client *i2c;
+ struct work_struct work;
+ struct pm860x_chip *chip;
+ struct mutex lock;
+ char name[MFD_NAME_SIZE];
+
+ int port;
+ int iset;
+ int command;
+ int offset;
+ unsigned char brightness;
+ unsigned char current_brightness;
+
+ int blink_data;
+ int blink_time;
+ int blink_on;
+ int blink_off;
+};
+
+/* return offset of color register */
+static inline int __led_off(int port)
+{
+ int ret = -EINVAL;
+
+ switch (port) {
+ case PM8606_LED1_RED:
+ case PM8606_LED1_GREEN:
+ case PM8606_LED1_BLUE:
+ ret = port - PM8606_LED1_RED + PM8606_RGB1B;
+ break;
+ case PM8606_LED2_RED:
+ case PM8606_LED2_GREEN:
+ case PM8606_LED2_BLUE:
+ ret = port - PM8606_LED2_RED + PM8606_RGB2B;
+ break;
+ }
+ return ret;
+}
+
+/* return offset of blink register */
+static inline int __blink_off(int port)
+{
+ int ret = -EINVAL;
+
+ switch (port) {
+ case PM8606_LED1_RED:
+ case PM8606_LED1_GREEN:
+ case PM8606_LED1_BLUE:
+ ret = PM8606_RGB1A;
+ case PM8606_LED2_RED:
+ case PM8606_LED2_GREEN:
+ case PM8606_LED2_BLUE:
+ ret = PM8606_RGB2A;
+ }
+ return ret;
+}
+
+static inline int __blink_ctl_mask(int port)
+{
+ int ret = -EINVAL;
+
+ switch (port) {
+ case PM8606_LED1_RED:
+ case PM8606_LED1_GREEN:
+ case PM8606_LED1_BLUE:
+ ret = LED1_BLINK_EN;
+ break;
+ case PM8606_LED2_RED:
+ case PM8606_LED2_GREEN:
+ case PM8606_LED2_BLUE:
+ ret = LED2_BLINK_EN;
+ break;
+ }
+ return ret;
+}
+
+static int __led_set(struct pm860x_led *led, int command)
+{
+ struct pm860x_chip *chip = led->chip;
+ int mask, ret;
+
+ mutex_lock(&led->lock);
+ switch (command) {
+ case SET_BRIGHTNESS:
+ if ((led->current_brightness == 0) && led->brightness) {
+ if (led->iset) {
+ ret = pm860x_set_bits(led->i2c, led->offset,
+ LED_CURRENT_MASK, led->iset);
+ if (ret < 0)
+ goto out;
+ }
+ } else if (led->brightness == 0) {
+ ret = pm860x_set_bits(led->i2c, led->offset,
+ LED_CURRENT_MASK, 0);
+ if (ret < 0)
+ goto out;
+ }
+ ret = pm860x_set_bits(led->i2c, led->offset, LED_PWM_MASK,
+ led->brightness);
+ if (ret < 0)
+ goto out;
+ led->current_brightness = led->brightness;
+ dev_dbg(chip->dev, "Update LED. (reg:%d, brightness:%d)\n",
+ led->offset, led->brightness);
+ break;
+ case SET_BLINK:
+ ret = pm860x_set_bits(led->i2c, led->offset,
+ LED_BLINK_MASK, led->blink_data);
+ if (ret < 0)
+ goto out;
+
+ mask = __blink_ctl_mask(led->port);
+ ret = pm860x_set_bits(led->i2c, PM8606_WLED3B, mask, mask);
+ if (ret < 0)
+ goto out;
+ dev_dbg(chip->dev, "LED blink delay on:%dms, delay off:%dms\n",
+ led->blink_on, led->blink_off);
+ break;
+ }
+out:
+ mutex_unlock(&led->lock);
+ return 0;
+}
+
+static void pm860x_led_work(struct work_struct *work)
+{
+ struct pm860x_led *led;
+
+ led = container_of(work, struct pm860x_led, work);
+ __led_set(led, led->command);
+}
+
+static void pm860x_led_set(struct led_classdev *cdev,
+ enum led_brightness value)
+{
+ struct pm860x_led *data = container_of(cdev, struct pm860x_led, cdev);
+
+ data->offset = __led_off(data->port);
+ data->brightness = value >> 3;
+ data->command = SET_BRIGHTNESS;
+ schedule_work(&data->work);
+}
+
+static int pm860x_led_blink(struct led_classdev *cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct pm860x_led *data = container_of(cdev, struct pm860x_led, cdev);
+ int period, on;
+
+ on = *delay_on;
+ if ((on < LED_BLINK_ON_MIN) || (on > LED_BLINK_ON_MAX))
+ return -EINVAL;
+
+ on = LED_TO_ON(on);
+ on = LED_BLINK_ON(on);
+
+ period = on + *delay_off;
+ if ((period < LED_BLINK_PERIOD_MIN) || (period > LED_BLINK_PERIOD_MAX))
+ return -EINVAL;
+ period = LED_TO_PERIOD(period);
+ period = LED_BLINK_PERIOD(period);
+
+ data->offset = __blink_off(data->port);
+ data->blink_on = on;
+ data->blink_off = period - data->blink_on;
+ data->blink_data = (period << 3) | data->blink_on;
+ data->command = SET_BLINK;
+ schedule_work(&data->work);
+
+ return 0;
+}
+
+static int __check_device(struct pm860x_led_pdata *pdata, char *name)
+{
+ struct pm860x_led_pdata *p = pdata;
+ int ret = -EINVAL;
+
+ while (p && p->id) {
+ if ((p->id != PM8606_ID_LED) || (p->flags < 0))
+ break;
+
+ if (!strncmp(name, pm860x_led_name[p->flags],
+ MFD_NAME_SIZE)) {
+ ret = (int)p->flags;
+ break;
+ }
+ p++;
+ }
+ return ret;
+}
+
+static int pm860x_led_probe(struct platform_device *pdev)
+{
+ struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct pm860x_platform_data *pm860x_pdata;
+ struct pm860x_led_pdata *pdata;
+ struct pm860x_led *data;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No I/O resource!\n");
+ return -EINVAL;
+ }
+
+ if (pdev->dev.parent->platform_data) {
+ pm860x_pdata = pdev->dev.parent->platform_data;
+ pdata = pm860x_pdata->led;
+ } else
+ pdata = NULL;
+
+ data = kzalloc(sizeof(struct pm860x_led), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+ strncpy(data->name, res->name, MFD_NAME_SIZE);
+ dev_set_drvdata(&pdev->dev, data);
+ data->chip = chip;
+ data->i2c = (chip->id == CHIP_PM8606) ? chip->client : chip->companion;
+ data->iset = pdata->iset;
+ data->port = __check_device(pdata, data->name);
+ if (data->port < 0)
+ return -EINVAL;
+
+ data->current_brightness = 0;
+ data->cdev.name = data->name;
+ data->cdev.brightness_set = pm860x_led_set;
+ data->cdev.blink_set = pm860x_led_blink;
+ mutex_init(&data->lock);
+ INIT_WORK(&data->work, pm860x_led_work);
+
+ ret = led_classdev_register(chip->dev, &data->cdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
+ goto out;
+ }
+ return 0;
+out:
+ kfree(data);
+ return ret;
+}
+
+static int pm860x_led_remove(struct platform_device *pdev)
+{
+ struct pm860x_led *data = platform_get_drvdata(pdev);
+
+ led_classdev_unregister(&data->cdev);
+ kfree(data);
+
+ return 0;
+}
+
+static struct platform_driver pm860x_led_driver = {
+ .driver = {
+ .name = "88pm860x-led",
+ .owner = THIS_MODULE,
+ },
+ .probe = pm860x_led_probe,
+ .remove = pm860x_led_remove,
+};
+
+static int __devinit pm860x_led_init(void)
+{
+ return platform_driver_register(&pm860x_led_driver);
+}
+module_init(pm860x_led_init);
+
+static void __devexit pm860x_led_exit(void)
+{
+ platform_driver_unregister(&pm860x_led_driver);
+}
+module_exit(pm860x_led_exit);
+
+MODULE_DESCRIPTION("LED driver for Marvell PM860x");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:88pm860x-led");
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index be625475cf6d..4b22feb01a0c 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -503,16 +503,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
return 0;
}
- if (blk_stack_limits(limits, &q->limits, start << 9) < 0)
- DMWARN("%s: target device %s is misaligned: "
+ if (bdev_stack_limits(limits, bdev, start) < 0)
+ DMWARN("%s: adding target device %s caused an alignment inconsistency: "
"physical_block_size=%u, logical_block_size=%u, "
"alignment_offset=%u, start=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b),
q->limits.physical_block_size,
q->limits.logical_block_size,
q->limits.alignment_offset,
- (unsigned long long) start << 9);
-
+ (unsigned long long) start << SECTOR_SHIFT);
/*
* Check if merge fn is supported.
@@ -1026,9 +1025,9 @@ combine_limits:
* for the table.
*/
if (blk_stack_limits(limits, &ti_limits, 0) < 0)
- DMWARN("%s: target device "
+ DMWARN("%s: adding target device "
"(start sect %llu len %llu) "
- "is misaligned",
+ "caused an alignment inconsistency",
dm_device_name(table->md),
(unsigned long long) ti->begin,
(unsigned long long) ti->len);
@@ -1080,15 +1079,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
/*
- * Each target device in the table has a data area that should normally
- * be aligned such that the DM device's alignment_offset is 0.
- * FIXME: Propagate alignment_offsets up the stack and warn of
- * sub-optimal or inconsistent settings.
- */
- limits->alignment_offset = 0;
- limits->misaligned = 0;
-
- /*
* Copy table's limits to the DM device's request_queue
*/
q->limits = *limits;
diff --git a/drivers/media/IR/Makefile b/drivers/media/IR/Makefile
index df5ddb4bbbf7..171890e7a41d 100644
--- a/drivers/media/IR/Makefile
+++ b/drivers/media/IR/Makefile
@@ -1,5 +1,5 @@
ir-common-objs := ir-functions.o ir-keymaps.o
-ir-core-objs := ir-keytable.o
+ir-core-objs := ir-keytable.o ir-sysfs.o
obj-$(CONFIG_IR_CORE) += ir-core.o
obj-$(CONFIG_VIDEO_IR) += ir-common.o
diff --git a/drivers/media/IR/ir-functions.c b/drivers/media/IR/ir-functions.c
index 776a136616d6..ab06919ad5fc 100644
--- a/drivers/media/IR/ir-functions.c
+++ b/drivers/media/IR/ir-functions.c
@@ -52,7 +52,7 @@ static void ir_input_key_event(struct input_dev *dev, struct ir_input_state *ir)
/* -------------------------------------------------------------------------- */
int ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
- int ir_type)
+ const u64 ir_type)
{
ir->ir_type = ir_type;
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index bff7a5356037..c3e36b708bc7 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -65,7 +65,7 @@ exit:
* In order to reduce the quantity of table resizes, it has a minimum
* table size of IR_TAB_MIN_SIZE.
*/
-int ir_roundup_tablesize(int n_elems)
+static int ir_roundup_tablesize(int n_elems)
{
size_t size;
@@ -81,7 +81,6 @@ int ir_roundup_tablesize(int n_elems)
return n_elems;
}
-EXPORT_SYMBOL_GPL(ir_roundup_tablesize);
/**
* ir_copy_table() - copies a keytable, discarding the unused entries
@@ -89,9 +88,11 @@ EXPORT_SYMBOL_GPL(ir_roundup_tablesize);
* @origin: origin table
*
* Copies all entries where the keycode is not KEY_UNKNOWN/KEY_RESERVED
+ * Also copies table size and table protocol.
+ * NOTE: It shouldn't copy the lock field
*/
-int ir_copy_table(struct ir_scancode_table *destin,
+static int ir_copy_table(struct ir_scancode_table *destin,
const struct ir_scancode_table *origin)
{
int i, j = 0;
@@ -105,12 +106,12 @@ int ir_copy_table(struct ir_scancode_table *destin,
j++;
}
destin->size = j;
+ destin->ir_type = origin->ir_type;
IR_dprintk(1, "Copied %d scancodes to the new keycode table\n", destin->size);
return 0;
}
-EXPORT_SYMBOL_GPL(ir_copy_table);
/**
* ir_getkeycode() - get a keycode at the evdev scancode ->keycode table
@@ -399,12 +400,14 @@ EXPORT_SYMBOL_GPL(ir_g_keycode_from_table);
* @input_dev: the struct input_dev descriptor of the device
* @rc_tab: the struct ir_scancode_table table of scancode/keymap
*
- * This routine is used to initialize the input infrastructure to work with
- * an IR.
- * It should be called before registering the IR device.
+ * This routine is used to initialize the input infrastructure
+ * to work with an IR.
+ * It will register the input/evdev interface for the device and
+ * register the syfs code for IR class
*/
int ir_input_register(struct input_dev *input_dev,
- struct ir_scancode_table *rc_tab)
+ const struct ir_scancode_table *rc_tab,
+ const struct ir_dev_props *props)
{
struct ir_input_dev *ir_dev;
struct ir_scancode *keymap = rc_tab->scan;
@@ -417,7 +420,7 @@ int ir_input_register(struct input_dev *input_dev,
if (!ir_dev)
return -ENOMEM;
- spin_lock_init(&rc_tab->lock);
+ spin_lock_init(&ir_dev->rc_tab.lock);
ir_dev->rc_tab.size = ir_roundup_tablesize(rc_tab->size);
ir_dev->rc_tab.scan = kzalloc(ir_dev->rc_tab.size *
@@ -430,6 +433,7 @@ int ir_input_register(struct input_dev *input_dev,
ir_dev->rc_tab.size * sizeof(ir_dev->rc_tab.scan));
ir_copy_table(&ir_dev->rc_tab, rc_tab);
+ ir_dev->props = props;
/* set the bits for the keys */
IR_dprintk(1, "key map size: %d\n", rc_tab->size);
@@ -447,16 +451,31 @@ int ir_input_register(struct input_dev *input_dev,
input_set_drvdata(input_dev, ir_dev);
rc = input_register_device(input_dev);
+ if (rc < 0)
+ goto err;
+
+ rc = ir_register_class(input_dev);
if (rc < 0) {
- kfree(rc_tab->scan);
- kfree(ir_dev);
- input_set_drvdata(input_dev, NULL);
+ input_unregister_device(input_dev);
+ goto err;
}
+ return 0;
+
+err:
+ kfree(rc_tab->scan);
+ kfree(ir_dev);
+ input_set_drvdata(input_dev, NULL);
return rc;
}
EXPORT_SYMBOL_GPL(ir_input_register);
+/**
+ * ir_input_unregister() - unregisters IR and frees resources
+ * @input_dev: the struct input_dev descriptor of the device
+
+ * This routine is used to free memory and de-register interfaces.
+ */
void ir_input_unregister(struct input_dev *dev)
{
struct ir_input_dev *ir_dev = input_get_drvdata(dev);
@@ -472,6 +491,8 @@ void ir_input_unregister(struct input_dev *dev)
kfree(rc_tab->scan);
rc_tab->scan = NULL;
+ ir_unregister_class(dev);
+
kfree(ir_dev);
input_unregister_device(dev);
}
diff --git a/drivers/media/IR/ir-sysfs.c b/drivers/media/IR/ir-sysfs.c
new file mode 100644
index 000000000000..6ec7f89d5142
--- /dev/null
+++ b/drivers/media/IR/ir-sysfs.c
@@ -0,0 +1,210 @@
+/* ir-register.c - handle IR scancode->keycode tables
+ *
+ * Copyright (C) 2009 by Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/input.h>
+#include <linux/device.h>
+#include <media/ir-core.h>
+
+#define IRRCV_NUM_DEVICES 256
+
+/* bit array to represent IR sysfs device number */
+static unsigned long ir_core_dev_number;
+
+/* class for /sys/class/irrcv */
+static struct class *ir_input_class;
+
+/**
+ * show_protocol() - shows the current IR protocol
+ * @d: the device descriptor
+ * @mattr: the device attribute struct (unused)
+ * @buf: a pointer to the output buffer
+ *
+ * This routine is a callback routine for input read the IR protocol type.
+ * it is trigged by reading /sys/class/irrcv/irrcv?/current_protocol.
+ * It returns the protocol name, as understood by the driver.
+ */
+static ssize_t show_protocol(struct device *d,
+ struct device_attribute *mattr, char *buf)
+{
+ char *s;
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ u64 ir_type = ir_dev->rc_tab.ir_type;
+
+ IR_dprintk(1, "Current protocol is %lld\n", (long long)ir_type);
+
+ /* FIXME: doesn't support multiple protocols at the same time */
+ if (ir_type == IR_TYPE_UNKNOWN)
+ s = "Unknown";
+ else if (ir_type == IR_TYPE_RC5)
+ s = "RC-5";
+ else if (ir_type == IR_TYPE_PD)
+ s = "Pulse/distance";
+ else if (ir_type == IR_TYPE_NEC)
+ s = "NEC";
+ else
+ s = "Other";
+
+ return sprintf(buf, "%s\n", s);
+}
+
+/**
+ * store_protocol() - shows the current IR protocol
+ * @d: the device descriptor
+ * @mattr: the device attribute struct (unused)
+ * @buf: a pointer to the input buffer
+ * @len: length of the input buffer
+ *
+ * This routine is a callback routine for changing the IR protocol type.
+ * it is trigged by reading /sys/class/irrcv/irrcv?/current_protocol.
+ * It changes the IR the protocol name, if the IR type is recognized
+ * by the driver.
+ * If an unknown protocol name is used, returns -EINVAL.
+ */
+static ssize_t store_protocol(struct device *d,
+ struct device_attribute *mattr,
+ const char *data,
+ size_t len)
+{
+ struct ir_input_dev *ir_dev = dev_get_drvdata(d);
+ u64 ir_type = IR_TYPE_UNKNOWN;
+ int rc = -EINVAL;
+ unsigned long flags;
+ char *buf;
+
+ buf = strsep((char **) &data, "\n");
+
+ if (!strcasecmp(buf, "rc-5"))
+ ir_type = IR_TYPE_RC5;
+ else if (!strcasecmp(buf, "pd"))
+ ir_type = IR_TYPE_PD;
+ else if (!strcasecmp(buf, "nec"))
+ ir_type = IR_TYPE_NEC;
+
+ if (ir_type == IR_TYPE_UNKNOWN) {
+ IR_dprintk(1, "Error setting protocol to %lld\n",
+ (long long)ir_type);
+ return -EINVAL;
+ }
+
+ if (ir_dev->props && ir_dev->props->change_protocol)
+ rc = ir_dev->props->change_protocol(ir_dev->props->priv,
+ ir_type);
+
+ if (rc < 0) {
+ IR_dprintk(1, "Error setting protocol to %lld\n",
+ (long long)ir_type);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&ir_dev->rc_tab.lock, flags);
+ ir_dev->rc_tab.ir_type = ir_type;
+ spin_unlock_irqrestore(&ir_dev->rc_tab.lock, flags);
+
+ IR_dprintk(1, "Current protocol is %lld\n",
+ (long long)ir_type);
+
+ return len;
+}
+
+/*
+ * Static device attribute struct with the sysfs attributes for IR's
+ */
+static DEVICE_ATTR(current_protocol, S_IRUGO | S_IWUSR,
+ show_protocol, store_protocol);
+
+static struct attribute *ir_dev_attrs[] = {
+ &dev_attr_current_protocol.attr,
+};
+
+/**
+ * ir_register_class() - creates the sysfs for /sys/class/irrcv/irrcv?
+ * @input_dev: the struct input_dev descriptor of the device
+ *
+ * This routine is used to register the syfs code for IR class
+ */
+int ir_register_class(struct input_dev *input_dev)
+{
+ int rc;
+ struct kobject *kobj;
+
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ int devno = find_first_zero_bit(&ir_core_dev_number,
+ IRRCV_NUM_DEVICES);
+
+ if (unlikely(devno < 0))
+ return devno;
+
+ ir_dev->attr.attrs = ir_dev_attrs;
+ ir_dev->class_dev = device_create(ir_input_class, NULL,
+ input_dev->dev.devt, ir_dev,
+ "irrcv%d", devno);
+ kobj = &ir_dev->class_dev->kobj;
+
+ printk(KERN_WARNING "Creating IR device %s\n", kobject_name(kobj));
+ rc = sysfs_create_group(kobj, &ir_dev->attr);
+ if (unlikely(rc < 0)) {
+ device_destroy(ir_input_class, input_dev->dev.devt);
+ return -ENOMEM;
+ }
+
+ ir_dev->devno = devno;
+ set_bit(devno, &ir_core_dev_number);
+
+ return 0;
+};
+
+/**
+ * ir_unregister_class() - removes the sysfs for sysfs for
+ * /sys/class/irrcv/irrcv?
+ * @input_dev: the struct input_dev descriptor of the device
+ *
+ * This routine is used to unregister the syfs code for IR class
+ */
+void ir_unregister_class(struct input_dev *input_dev)
+{
+ struct ir_input_dev *ir_dev = input_get_drvdata(input_dev);
+ struct kobject *kobj;
+
+ clear_bit(ir_dev->devno, &ir_core_dev_number);
+
+ kobj = &ir_dev->class_dev->kobj;
+
+ sysfs_remove_group(kobj, &ir_dev->attr);
+ device_destroy(ir_input_class, input_dev->dev.devt);
+
+ kfree(ir_dev->attr.name);
+}
+
+/*
+ * Init/exit code for the module. Basically, creates/removes /sys/class/irrcv
+ */
+
+static int __init ir_core_init(void)
+{
+ ir_input_class = class_create(THIS_MODULE, "irrcv");
+ if (IS_ERR(ir_input_class)) {
+ printk(KERN_ERR "ir_core: unable to register irrcv class\n");
+ return PTR_ERR(ir_input_class);
+ }
+
+ return 0;
+}
+
+static void __exit ir_core_exit(void)
+{
+ class_destroy(ir_input_class);
+}
+
+module_init(ir_core_init);
+module_exit(ir_core_exit);
diff --git a/drivers/media/common/tuners/tuner-xc2028.c b/drivers/media/common/tuners/tuner-xc2028.c
index f270e605da83..72baaf13207c 100644
--- a/drivers/media/common/tuners/tuner-xc2028.c
+++ b/drivers/media/common/tuners/tuner-xc2028.c
@@ -99,6 +99,7 @@ struct xc2028_data {
if (size != _rc) \
tuner_info("i2c output error: rc = %d (should be %d)\n",\
_rc, (int)size); \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -118,6 +119,7 @@ struct xc2028_data {
if (isize != _rc) \
tuner_err("i2c input error: rc = %d (should be %d)\n", \
_rc, (int)isize); \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -129,7 +131,7 @@ struct xc2028_data {
_val, sizeof(_val)))) { \
tuner_err("Error on line %d: %d\n", __LINE__, _rc); \
} else \
- msleep(10); \
+ msleep(priv->ctrl.msleep); \
_rc; \
})
@@ -808,10 +810,20 @@ check_device:
hwmodel, (version & 0xf000) >> 12, (version & 0xf00) >> 8,
(version & 0xf0) >> 4, version & 0xf);
+
+ if (priv->ctrl.read_not_reliable)
+ goto read_not_reliable;
+
/* Check firmware version against what we downloaded. */
if (priv->firm_version != ((version & 0xf0) << 4 | (version & 0x0f))) {
- tuner_err("Incorrect readback of firmware version.\n");
- goto fail;
+ if (!priv->ctrl.read_not_reliable) {
+ tuner_err("Incorrect readback of firmware version.\n");
+ goto fail;
+ } else {
+ tuner_err("Returned an incorrect version. However, "
+ "read is not reliable enough. Ignoring it.\n");
+ hwmodel = 3028;
+ }
}
/* Check that the tuner hardware model remains consistent over time. */
@@ -825,6 +837,7 @@ check_device:
goto fail;
}
+read_not_reliable:
memcpy(&priv->cur_fw, &new_fw, sizeof(priv->cur_fw));
/*
@@ -957,6 +970,7 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
The reset CLK is needed only with tm6000.
Driver should work fine even if this fails.
*/
+ msleep(priv->ctrl.msleep);
do_tuner_callback(fe, XC2028_RESET_CLK, 1);
msleep(10);
diff --git a/drivers/media/common/tuners/tuner-xc2028.h b/drivers/media/common/tuners/tuner-xc2028.h
index a90c35d50add..9778c96a5006 100644
--- a/drivers/media/common/tuners/tuner-xc2028.h
+++ b/drivers/media/common/tuners/tuner-xc2028.h
@@ -33,12 +33,14 @@ enum firmware_type {
struct xc2028_ctrl {
char *fname;
int max_len;
+ int msleep;
unsigned int scode_table;
unsigned int mts :1;
unsigned int input1:1;
unsigned int vhfbw7:1;
unsigned int uhfbw8:1;
unsigned int disable_power_mgmt:1;
+ unsigned int read_not_reliable:1;
unsigned int demod;
enum firmware_type type:2;
};
diff --git a/drivers/media/dvb/Kconfig b/drivers/media/dvb/Kconfig
index 35d0817126e9..cf8f65f309da 100644
--- a/drivers/media/dvb/Kconfig
+++ b/drivers/media/dvb/Kconfig
@@ -72,6 +72,10 @@ comment "Supported Earthsoft PT1 Adapters"
depends on DVB_CORE && PCI && I2C
source "drivers/media/dvb/pt1/Kconfig"
+comment "Supported Mantis Adapters"
+ depends on DVB_CORE && PCI && I2C
+ source "drivers/media/dvb/mantis/Kconfig"
+
comment "Supported DVB Frontends"
depends on DVB_CORE
source "drivers/media/dvb/frontends/Kconfig"
diff --git a/drivers/media/dvb/Makefile b/drivers/media/dvb/Makefile
index 16d262ddb45d..c12922c3659b 100644
--- a/drivers/media/dvb/Makefile
+++ b/drivers/media/dvb/Makefile
@@ -2,6 +2,18 @@
# Makefile for the kernel multimedia device drivers.
#
-obj-y := dvb-core/ frontends/ ttpci/ ttusb-dec/ ttusb-budget/ b2c2/ bt8xx/ dvb-usb/ pluto2/ siano/ dm1105/ pt1/
+obj-y := dvb-core/ \
+ frontends/ \
+ ttpci/ \
+ ttusb-dec/ \
+ ttusb-budget/ \
+ b2c2/ \
+ bt8xx/ \
+ dvb-usb/ \
+ pluto2/ \
+ siano/ \
+ dm1105/ \
+ pt1/ \
+ mantis/
obj-$(CONFIG_DVB_FIREDTV) += firewire/
diff --git a/drivers/media/dvb/bt8xx/bt878.c b/drivers/media/dvb/bt8xx/bt878.c
index a24c125331f0..2a0886ad787f 100644
--- a/drivers/media/dvb/bt8xx/bt878.c
+++ b/drivers/media/dvb/bt8xx/bt878.c
@@ -582,7 +582,7 @@ static int bt878_pci_driver_registered;
/* Module management functions */
/*******************************/
-static int bt878_init_module(void)
+static int __init bt878_init_module(void)
{
bt878_num = 0;
bt878_pci_driver_registered = 0;
@@ -600,7 +600,7 @@ static int bt878_init_module(void)
return pci_register_driver(&bt878_pci_driver);
}
-static void bt878_cleanup_module(void)
+static void __exit bt878_cleanup_module(void)
{
if (bt878_pci_driver_registered) {
bt878_pci_driver_registered = 0;
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c
index f0f483ac8b89..aadf803c261c 100644
--- a/drivers/media/dvb/dm1105/dm1105.c
+++ b/drivers/media/dvb/dm1105/dm1105.c
@@ -578,7 +578,7 @@ int __devinit dm1105_ir_init(struct dm1105dvb *dm1105)
{
struct input_dev *input_dev;
struct ir_scancode_table *ir_codes = &ir_codes_dm1105_nec_table;
- int ir_type = IR_TYPE_OTHER;
+ u64 ir_type = IR_TYPE_OTHER;
int err = -ENOMEM;
input_dev = input_allocate_device();
@@ -611,7 +611,7 @@ int __devinit dm1105_ir_init(struct dm1105dvb *dm1105)
INIT_WORK(&dm1105->ir.work, dm1105_emit_key);
- err = ir_input_register(input_dev, ir_codes);
+ err = ir_input_register(input_dev, ir_codes, NULL);
return err;
}
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 8b8558fcb042..da6552d32cfe 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -949,11 +949,8 @@ static int dvb_net_filter_sec_set(struct net_device *dev,
(*secfilter)->filter_mask[10] = mac_mask[1];
(*secfilter)->filter_mask[11]=mac_mask[0];
- dprintk("%s: filter mac=%02x %02x %02x %02x %02x %02x\n",
- dev->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
- dprintk("%s: filter mask=%02x %02x %02x %02x %02x %02x\n",
- dev->name, mac_mask[0], mac_mask[1], mac_mask[2],
- mac_mask[3], mac_mask[4], mac_mask[5]);
+ dprintk("%s: filter mac=%pM\n", dev->name, mac);
+ dprintk("%s: filter mask=%pM\n", dev->name, mac_mask);
return 0;
}
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index 05fb28e9c69e..a7b8405c291e 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -1184,6 +1184,9 @@ static struct atbm8830_config mygica_d689_atbm8830_cfg = {
.osc_clk_freq = 30400, /* in kHz */
.if_freq = 0, /* zero IF */
.zif_swap_iq = 1,
+ .agc_min = 0x2E,
+ .agc_max = 0x90,
+ .agc_hold_loop = 0,
};
static int cxusb_mygica_d689_frontend_attach(struct dvb_usb_adapter *adap)
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index 64132c0cf80d..83a35524a82a 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -1,6 +1,7 @@
/* DVB USB framework compliant Linux driver for the
* DVBWorld DVB-S 2101, 2102, DVB-S2 2104, DVB-C 3101,
-* TeVii S600, S630, S650 Cards
+* TeVii S600, S630, S650,
+* Prof 1100, 7500 Cards
* Copyright (C) 2008,2009 Igor M. Liplianin (liplianin@me.by)
*
* This program is free software; you can redistribute it and/or modify it
@@ -469,6 +470,7 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
+ struct usb_device *udev = d->udev;
int ret = 0;
int len, i, j;
@@ -488,8 +490,13 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
}
case (DW2102_VOLTAGE_CTRL): {
u8 obuf[2];
+
+ obuf[0] = 1;
+ obuf[1] = msg[j].buf[1];/* off-on */
+ ret = dw210x_op_rw(d->udev, 0x8a, 0, 0,
+ obuf, 2, DW210X_WRITE_MSG);
obuf[0] = 3;
- obuf[1] = msg[j].buf[0];
+ obuf[1] = msg[j].buf[0];/* 13v-18v */
ret = dw210x_op_rw(d->udev, 0x8a, 0, 0,
obuf, 2, DW210X_WRITE_MSG);
break;
@@ -527,6 +534,17 @@ static int s6x0_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
i += 16;
len -= 16;
} while (len > 0);
+ } else if ((udev->descriptor.idProduct == 0x7500)
+ && (j < (num - 1))) {
+ /* write register addr before read */
+ u8 obuf[msg[j].len + 2];
+ obuf[0] = msg[j + 1].len;
+ obuf[1] = (msg[j].addr << 1);
+ memcpy(obuf + 2, msg[j].buf, msg[j].len);
+ ret = dw210x_op_rw(d->udev, 0x92, 0, 0,
+ obuf, msg[j].len + 2,
+ DW210X_WRITE_MSG);
+ break;
} else {
/* write registers */
u8 obuf[msg[j].len + 2];
@@ -651,18 +669,25 @@ static int s6x0_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
static int dw210x_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
{
- static u8 command_13v[1] = {0x00};
- static u8 command_18v[1] = {0x01};
- struct i2c_msg msg[] = {
- {.addr = DW2102_VOLTAGE_CTRL, .flags = 0,
- .buf = command_13v, .len = 1},
+ static u8 command_13v[] = {0x00, 0x01};
+ static u8 command_18v[] = {0x01, 0x01};
+ static u8 command_off[] = {0x00, 0x00};
+ struct i2c_msg msg = {
+ .addr = DW2102_VOLTAGE_CTRL,
+ .flags = 0,
+ .buf = command_off,
+ .len = 2,
};
struct dvb_usb_adapter *udev_adap =
(struct dvb_usb_adapter *)(fe->dvb->priv);
if (voltage == SEC_VOLTAGE_18)
- msg[0].buf = command_18v;
- i2c_transfer(&udev_adap->dev->i2c_adap, msg, 1);
+ msg.buf = command_18v;
+ else if (voltage == SEC_VOLTAGE_13)
+ msg.buf = command_13v;
+
+ i2c_transfer(&udev_adap->dev->i2c_adap, &msg, 1);
+
return 0;
}
@@ -735,6 +760,18 @@ static struct stv6110_config dw2104_stv6110_config = {
.clk_div = 1,
};
+static struct stv0900_config prof_7500_stv0900_config = {
+ .demod_address = 0x6a,
+ .demod_mode = 0,
+ .xtal = 27000000,
+ .clkmode = 3,/* 0-CLKI, 2-XTALI, else AUTO */
+ .diseqc_mode = 2,/* 2/3 PWM */
+ .tun1_maddress = 0,/* 0x60 */
+ .tun1_adc = 0,/* 2 Vpp */
+ .path1_mode = 3,
+ .tun1_type = 3,
+};
+
static int dw2104_frontend_attach(struct dvb_usb_adapter *d)
{
struct dvb_tuner_ops *tuner_ops = NULL;
@@ -882,6 +919,19 @@ static int s6x0_frontend_attach(struct dvb_usb_adapter *d)
return -EIO;
}
+static int prof_7500_frontend_attach(struct dvb_usb_adapter *d)
+{
+ d->fe = dvb_attach(stv0900_attach, &prof_7500_stv0900_config,
+ &d->dev->i2c_adap, 0);
+ if (d->fe == NULL)
+ return -EIO;
+ d->fe->ops.set_voltage = dw210x_set_voltage;
+
+ info("Attached STV0900+STB6100A!\n");
+
+ return 0;
+}
+
static int dw2102_tuner_attach(struct dvb_usb_adapter *adap)
{
dvb_attach(dvb_pll_attach, adap->fe, 0x60,
@@ -1073,6 +1123,7 @@ static struct usb_device_id dw2102_table[] = {
{USB_DEVICE(0x9022, USB_PID_TEVII_S630)},
{USB_DEVICE(0x3011, USB_PID_PROF_1100)},
{USB_DEVICE(0x9022, USB_PID_TEVII_S660)},
+ {USB_DEVICE(0x3034, 0x7500)},
{ }
};
@@ -1387,9 +1438,30 @@ static struct dvb_usb_device_properties s6x0_properties = {
}
};
+struct dvb_usb_device_properties *p7500;
+static struct dvb_usb_device_description d7500 = {
+ "Prof 7500 USB DVB-S2",
+ {&dw2102_table[9], NULL},
+ {NULL},
+};
+
static int dw2102_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+
+ p7500 = kzalloc(sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
+ if (!p7500)
+ return -ENOMEM;
+ /* copy default structure */
+ memcpy(p7500, &s6x0_properties,
+ sizeof(struct dvb_usb_device_properties));
+ /* fill only different fields */
+ p7500->firmware = "dvb-usb-p7500.fw";
+ p7500->devices[0] = d7500;
+ p7500->rc_key_map = tbs_rc_keys;
+ p7500->rc_key_map_size = ARRAY_SIZE(tbs_rc_keys);
+ p7500->adapter->frontend_attach = prof_7500_frontend_attach;
+
if (0 == dvb_usb_device_init(intf, &dw2102_properties,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &dw2104_properties,
@@ -1397,6 +1469,8 @@ static int dw2102_probe(struct usb_interface *intf,
0 == dvb_usb_device_init(intf, &dw3101_properties,
THIS_MODULE, NULL, adapter_nr) ||
0 == dvb_usb_device_init(intf, &s6x0_properties,
+ THIS_MODULE, NULL, adapter_nr) ||
+ 0 == dvb_usb_device_init(intf, p7500,
THIS_MODULE, NULL, adapter_nr))
return 0;
@@ -1431,6 +1505,6 @@ MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
" DVB-C 3101 USB2.0,"
" TeVii S600, S630, S650, S660 USB2.0,"
- " Prof 1100 USB2.0 devices");
+ " Prof 1100, 7500 USB2.0 devices");
MODULE_VERSION("0.1");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/firewire/firedtv-1394.c b/drivers/media/dvb/firewire/firedtv-1394.c
index 7c5459c27b75..c3e0ec2dcfca 100644
--- a/drivers/media/dvb/firewire/firedtv-1394.c
+++ b/drivers/media/dvb/firewire/firedtv-1394.c
@@ -90,13 +90,14 @@ static inline struct node_entry *node_of(struct firedtv *fdtv)
return container_of(fdtv->device, struct unit_directory, device)->ne;
}
-static int node_lock(struct firedtv *fdtv, u64 addr, __be32 data[])
+static int node_lock(struct firedtv *fdtv, u64 addr, void *data)
{
+ quadlet_t *d = data;
int ret;
- ret = hpsb_node_lock(node_of(fdtv), addr, EXTCODE_COMPARE_SWAP,
- (__force quadlet_t *)&data[1], (__force quadlet_t)data[0]);
- data[0] = data[1];
+ ret = hpsb_node_lock(node_of(fdtv), addr,
+ EXTCODE_COMPARE_SWAP, &d[1], d[0]);
+ d[0] = d[1];
return ret;
}
@@ -192,9 +193,13 @@ static int node_probe(struct device *dev)
int kv_len, err;
void *kv_str;
- kv_len = (ud->model_name_kv->value.leaf.len - 2) * sizeof(quadlet_t);
- kv_str = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(ud->model_name_kv);
-
+ if (ud->model_name_kv) {
+ kv_len = (ud->model_name_kv->value.leaf.len - 2) * 4;
+ kv_str = CSR1212_TEXTUAL_DESCRIPTOR_LEAF_DATA(ud->model_name_kv);
+ } else {
+ kv_len = 0;
+ kv_str = NULL;
+ }
fdtv = fdtv_alloc(dev, &fdtv_1394_backend, kv_str, kv_len);
if (!fdtv)
return -ENOMEM;
diff --git a/drivers/media/dvb/firewire/firedtv-avc.c b/drivers/media/dvb/firewire/firedtv-avc.c
index 50c42a4b972b..1b31bebc27d6 100644
--- a/drivers/media/dvb/firewire/firedtv-avc.c
+++ b/drivers/media/dvb/firewire/firedtv-avc.c
@@ -74,7 +74,6 @@
#define EN50221_TAG_CA_INFO 0x9f8031
struct avc_command_frame {
- int length;
u8 ctype;
u8 subunit;
u8 opcode;
@@ -82,13 +81,27 @@ struct avc_command_frame {
};
struct avc_response_frame {
- int length;
u8 response;
u8 subunit;
u8 opcode;
u8 operand[509];
};
+#define LAST_OPERAND (509 - 1)
+
+static inline void clear_operands(struct avc_command_frame *c, int from, int to)
+{
+ memset(&c->operand[from], 0, to - from + 1);
+}
+
+static void pad_operands(struct avc_command_frame *c, int from)
+{
+ int to = ALIGN(from, 4);
+
+ if (from <= to && to <= LAST_OPERAND)
+ clear_operands(c, from, to);
+}
+
#define AVC_DEBUG_READ_DESCRIPTOR 0x0001
#define AVC_DEBUG_DSIT 0x0002
#define AVC_DEBUG_DSD 0x0004
@@ -202,78 +215,65 @@ static void debug_pmt(char *msg, int length)
16, 1, msg, length, false);
}
-static int __avc_write(struct firedtv *fdtv,
- const struct avc_command_frame *c, struct avc_response_frame *r)
+static int avc_write(struct firedtv *fdtv)
{
int err, retry;
- if (r)
- fdtv->avc_reply_received = false;
+ fdtv->avc_reply_received = false;
for (retry = 0; retry < 6; retry++) {
if (unlikely(avc_debug))
- debug_fcp(&c->ctype, c->length);
+ debug_fcp(fdtv->avc_data, fdtv->avc_data_length);
err = fdtv->backend->write(fdtv, FCP_COMMAND_REGISTER,
- (void *)&c->ctype, c->length);
+ fdtv->avc_data, fdtv->avc_data_length);
if (err) {
- fdtv->avc_reply_received = true;
dev_err(fdtv->device, "FCP command write failed\n");
+
return err;
}
- if (!r)
- return 0;
-
/*
* AV/C specs say that answers should be sent within 150 ms.
* Time out after 200 ms.
*/
if (wait_event_timeout(fdtv->avc_wait,
fdtv->avc_reply_received,
- msecs_to_jiffies(200)) != 0) {
- r->length = fdtv->response_length;
- memcpy(&r->response, fdtv->response, r->length);
-
+ msecs_to_jiffies(200)) != 0)
return 0;
- }
}
dev_err(fdtv->device, "FCP response timed out\n");
+
return -ETIMEDOUT;
}
-static int avc_write(struct firedtv *fdtv,
- const struct avc_command_frame *c, struct avc_response_frame *r)
+static bool is_register_rc(struct avc_response_frame *r)
{
- int ret;
-
- if (mutex_lock_interruptible(&fdtv->avc_mutex))
- return -EINTR;
-
- ret = __avc_write(fdtv, c, r);
-
- mutex_unlock(&fdtv->avc_mutex);
- return ret;
+ return r->opcode == AVC_OPCODE_VENDOR &&
+ r->operand[0] == SFE_VENDOR_DE_COMPANYID_0 &&
+ r->operand[1] == SFE_VENDOR_DE_COMPANYID_1 &&
+ r->operand[2] == SFE_VENDOR_DE_COMPANYID_2 &&
+ r->operand[3] == SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL;
}
int avc_recv(struct firedtv *fdtv, void *data, size_t length)
{
- struct avc_response_frame *r =
- data - offsetof(struct avc_response_frame, response);
+ struct avc_response_frame *r = data;
if (unlikely(avc_debug))
debug_fcp(data, length);
- if (length >= 8 &&
- r->operand[0] == SFE_VENDOR_DE_COMPANYID_0 &&
- r->operand[1] == SFE_VENDOR_DE_COMPANYID_1 &&
- r->operand[2] == SFE_VENDOR_DE_COMPANYID_2 &&
- r->operand[3] == SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL) {
- if (r->response == AVC_RESPONSE_CHANGED) {
- fdtv_handle_rc(fdtv,
- r->operand[4] << 8 | r->operand[5]);
+ if (length >= 8 && is_register_rc(r)) {
+ switch (r->response) {
+ case AVC_RESPONSE_CHANGED:
+ fdtv_handle_rc(fdtv, r->operand[4] << 8 | r->operand[5]);
schedule_work(&fdtv->remote_ctrl_work);
- } else if (r->response != AVC_RESPONSE_INTERIM) {
+ break;
+ case AVC_RESPONSE_INTERIM:
+ if (is_register_rc((void *)fdtv->avc_data))
+ goto wake;
+ break;
+ default:
dev_info(fdtv->device,
"remote control result = %d\n", r->response);
}
@@ -285,9 +285,9 @@ int avc_recv(struct firedtv *fdtv, void *data, size_t length)
return -EIO;
}
- memcpy(fdtv->response, data, length);
- fdtv->response_length = length;
-
+ memcpy(fdtv->avc_data, data, length);
+ fdtv->avc_data_length = length;
+wake:
fdtv->avc_reply_received = true;
wake_up(&fdtv->avc_wait);
@@ -318,10 +318,11 @@ static int add_pid_filter(struct firedtv *fdtv, u8 *operand)
* tuning command for setting the relative LNB frequency
* (not supported by the AVC standard)
*/
-static void avc_tuner_tuneqpsk(struct firedtv *fdtv,
- struct dvb_frontend_parameters *params,
- struct avc_command_frame *c)
+static int avc_tuner_tuneqpsk(struct firedtv *fdtv,
+ struct dvb_frontend_parameters *params)
{
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+
c->opcode = AVC_OPCODE_VENDOR;
c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
@@ -370,16 +371,18 @@ static void avc_tuner_tuneqpsk(struct firedtv *fdtv,
c->operand[13] = 0x1;
c->operand[14] = 0xff;
c->operand[15] = 0xff;
- c->length = 20;
+
+ return 16;
} else {
- c->length = 16;
+ return 13;
}
}
-static void avc_tuner_dsd_dvb_c(struct firedtv *fdtv,
- struct dvb_frontend_parameters *params,
- struct avc_command_frame *c)
+static int avc_tuner_dsd_dvb_c(struct firedtv *fdtv,
+ struct dvb_frontend_parameters *params)
{
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+
c->opcode = AVC_OPCODE_DSD;
c->operand[0] = 0; /* source plug */
@@ -440,15 +443,14 @@ static void avc_tuner_dsd_dvb_c(struct firedtv *fdtv,
c->operand[20] = 0x00;
c->operand[21] = 0x00;
- /* Add PIDs to filter */
- c->length = ALIGN(22 + add_pid_filter(fdtv, &c->operand[22]) + 3, 4);
+ return 22 + add_pid_filter(fdtv, &c->operand[22]);
}
-static void avc_tuner_dsd_dvb_t(struct firedtv *fdtv,
- struct dvb_frontend_parameters *params,
- struct avc_command_frame *c)
+static int avc_tuner_dsd_dvb_t(struct firedtv *fdtv,
+ struct dvb_frontend_parameters *params)
{
struct dvb_ofdm_parameters *ofdm = &params->u.ofdm;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
c->opcode = AVC_OPCODE_DSD;
@@ -543,55 +545,58 @@ static void avc_tuner_dsd_dvb_t(struct firedtv *fdtv,
c->operand[15] = 0x00; /* network_ID[0] */
c->operand[16] = 0x00; /* network_ID[1] */
- /* Add PIDs to filter */
- c->length = ALIGN(17 + add_pid_filter(fdtv, &c->operand[17]) + 3, 4);
+ return 17 + add_pid_filter(fdtv, &c->operand[17]);
}
int avc_tuner_dsd(struct firedtv *fdtv,
struct dvb_frontend_parameters *params)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ int pos, ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_CONTROL;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
switch (fdtv->type) {
case FIREDTV_DVB_S:
- case FIREDTV_DVB_S2: avc_tuner_tuneqpsk(fdtv, params, c); break;
- case FIREDTV_DVB_C: avc_tuner_dsd_dvb_c(fdtv, params, c); break;
- case FIREDTV_DVB_T: avc_tuner_dsd_dvb_t(fdtv, params, c); break;
+ case FIREDTV_DVB_S2: pos = avc_tuner_tuneqpsk(fdtv, params); break;
+ case FIREDTV_DVB_C: pos = avc_tuner_dsd_dvb_c(fdtv, params); break;
+ case FIREDTV_DVB_T: pos = avc_tuner_dsd_dvb_t(fdtv, params); break;
default:
BUG();
}
+ pad_operands(c, pos);
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
-
- msleep(500);
+ fdtv->avc_data_length = ALIGN(3 + pos, 4);
+ ret = avc_write(fdtv);
#if 0
- /* FIXME: */
- /* u8 *status was an out-parameter of avc_tuner_dsd, unused by caller */
+ /*
+ * FIXME:
+ * u8 *status was an out-parameter of avc_tuner_dsd, unused by caller.
+ * Check for AVC_RESPONSE_ACCEPTED here instead?
+ */
if (status)
*status = r->operand[2];
#endif
- return 0;
+ mutex_unlock(&fdtv->avc_mutex);
+
+ if (ret == 0)
+ msleep(500);
+
+ return ret;
}
int avc_tuner_set_pids(struct firedtv *fdtv, unsigned char pidc, u16 pid[])
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
- int pos, k;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ int ret, pos, k;
if (pidc > 16 && pidc != 0xff)
return -EINVAL;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_CONTROL;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -614,24 +619,27 @@ int avc_tuner_set_pids(struct firedtv *fdtv, unsigned char pidc, u16 pid[])
c->operand[pos++] = 0x00; /* tableID */
c->operand[pos++] = 0x00; /* filter_length */
}
+ pad_operands(c, pos);
- c->length = ALIGN(3 + pos, 4);
+ fdtv->avc_data_length = ALIGN(3 + pos, 4);
+ ret = avc_write(fdtv);
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ /* FIXME: check response code? */
- msleep(50);
- return 0;
+ mutex_unlock(&fdtv->avc_mutex);
+
+ if (ret == 0)
+ msleep(50);
+
+ return ret;
}
int avc_tuner_get_ts(struct firedtv *fdtv)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
- int sl;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ int ret, sl;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_CONTROL;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -646,26 +654,33 @@ int avc_tuner_get_ts(struct firedtv *fdtv)
c->operand[4] = 0x00; /* antenna number */
c->operand[5] = 0x0; /* system_specific_search_flags */
c->operand[6] = sl; /* system_specific_multiplex selection_length */
- c->operand[7] = 0x00; /* valid_flags [0] */
- c->operand[8] = 0x00; /* valid_flags [1] */
- c->operand[7 + sl] = 0x00; /* nr_of_dsit_sel_specs (always 0) */
+ /*
+ * operand[7]: valid_flags[0]
+ * operand[8]: valid_flags[1]
+ * operand[7 + sl]: nr_of_dsit_sel_specs (always 0)
+ */
+ clear_operands(c, 7, 24);
- c->length = fdtv->type == FIREDTV_DVB_T ? 24 : 28;
+ fdtv->avc_data_length = fdtv->type == FIREDTV_DVB_T ? 24 : 28;
+ ret = avc_write(fdtv);
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ /* FIXME: check response code? */
- msleep(250);
- return 0;
+ mutex_unlock(&fdtv->avc_mutex);
+
+ if (ret == 0)
+ msleep(250);
+
+ return ret;
}
int avc_identify_subunit(struct firedtv *fdtv)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ struct avc_response_frame *r = (void *)fdtv->avc_data;
+ int ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_CONTROL;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -678,31 +693,34 @@ int avc_identify_subunit(struct firedtv *fdtv)
c->operand[4] = 0x08; /* length lowbyte */
c->operand[5] = 0x00; /* offset highbyte */
c->operand[6] = 0x0d; /* offset lowbyte */
+ clear_operands(c, 7, 8); /* padding */
- c->length = 12;
-
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ fdtv->avc_data_length = 12;
+ ret = avc_write(fdtv);
+ if (ret < 0)
+ goto out;
if ((r->response != AVC_RESPONSE_STABLE &&
r->response != AVC_RESPONSE_ACCEPTED) ||
(r->operand[3] << 8) + r->operand[4] != 8) {
dev_err(fdtv->device, "cannot read subunit identifier\n");
- return -EINVAL;
+ ret = -EINVAL;
}
- return 0;
+out:
+ mutex_unlock(&fdtv->avc_mutex);
+
+ return ret;
}
#define SIZEOF_ANTENNA_INPUT_INFO 22
int avc_tuner_status(struct firedtv *fdtv, struct firedtv_tuner_status *stat)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer;
- int length;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ struct avc_response_frame *r = (void *)fdtv->avc_data;
+ int length, ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_CONTROL;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -710,27 +728,30 @@ int avc_tuner_status(struct firedtv *fdtv, struct firedtv_tuner_status *stat)
c->operand[0] = DESCRIPTOR_TUNER_STATUS;
c->operand[1] = 0xff; /* read_result_status */
- c->operand[2] = 0x00; /* reserved */
- c->operand[3] = 0; /* SIZEOF_ANTENNA_INPUT_INFO >> 8; */
- c->operand[4] = 0; /* SIZEOF_ANTENNA_INPUT_INFO & 0xff; */
- c->operand[5] = 0x00;
- c->operand[6] = 0x00;
-
- c->length = 12;
-
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ /*
+ * operand[2]: reserved
+ * operand[3]: SIZEOF_ANTENNA_INPUT_INFO >> 8
+ * operand[4]: SIZEOF_ANTENNA_INPUT_INFO & 0xff
+ */
+ clear_operands(c, 2, 31);
+
+ fdtv->avc_data_length = 12;
+ ret = avc_write(fdtv);
+ if (ret < 0)
+ goto out;
if (r->response != AVC_RESPONSE_STABLE &&
r->response != AVC_RESPONSE_ACCEPTED) {
dev_err(fdtv->device, "cannot read tuner status\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
length = r->operand[9];
if (r->operand[1] != 0x10 || length != SIZEOF_ANTENNA_INPUT_INFO) {
dev_err(fdtv->device, "got invalid tuner status\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
stat->active_system = r->operand[10];
@@ -766,20 +787,21 @@ int avc_tuner_status(struct firedtv *fdtv, struct firedtv_tuner_status *stat)
stat->ca_dvb_flag = r->operand[31] >> 3 & 1;
stat->ca_error_flag = r->operand[31] >> 2 & 1;
stat->ca_initialization_status = r->operand[31] >> 1 & 1;
+out:
+ mutex_unlock(&fdtv->avc_mutex);
- return 0;
+ return ret;
}
int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
char conttone, char nrdiseq,
struct dvb_diseqc_master_cmd *diseqcmd)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer;
- int i, j, k;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ struct avc_response_frame *r = (void *)fdtv->avc_data;
+ int pos, j, k, ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_CONTROL;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -789,41 +811,41 @@ int avc_lnb_control(struct firedtv *fdtv, char voltage, char burst,
c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
c->operand[3] = SFE_VENDOR_OPCODE_LNB_CONTROL;
-
c->operand[4] = voltage;
c->operand[5] = nrdiseq;
- i = 6;
-
+ pos = 6;
for (j = 0; j < nrdiseq; j++) {
- c->operand[i++] = diseqcmd[j].msg_len;
+ c->operand[pos++] = diseqcmd[j].msg_len;
for (k = 0; k < diseqcmd[j].msg_len; k++)
- c->operand[i++] = diseqcmd[j].msg[k];
+ c->operand[pos++] = diseqcmd[j].msg[k];
}
+ c->operand[pos++] = burst;
+ c->operand[pos++] = conttone;
+ pad_operands(c, pos);
- c->operand[i++] = burst;
- c->operand[i++] = conttone;
-
- c->length = ALIGN(3 + i, 4);
-
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ fdtv->avc_data_length = ALIGN(3 + pos, 4);
+ ret = avc_write(fdtv);
+ if (ret < 0)
+ goto out;
if (r->response != AVC_RESPONSE_ACCEPTED) {
dev_err(fdtv->device, "LNB control failed\n");
- return -EINVAL;
+ ret = -EINVAL;
}
+out:
+ mutex_unlock(&fdtv->avc_mutex);
- return 0;
+ return ret;
}
int avc_register_remote_control(struct firedtv *fdtv)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ int ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_NOTIFY;
c->subunit = AVC_SUBUNIT_TYPE_UNIT | 7;
@@ -833,10 +855,16 @@ int avc_register_remote_control(struct firedtv *fdtv)
c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
c->operand[3] = SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL;
+ c->operand[4] = 0; /* padding */
+
+ fdtv->avc_data_length = 8;
+ ret = avc_write(fdtv);
- c->length = 8;
+ /* FIXME: check response code? */
- return avc_write(fdtv, c, NULL);
+ mutex_unlock(&fdtv->avc_mutex);
+
+ return ret;
}
void avc_remote_ctrl_work(struct work_struct *work)
@@ -851,11 +879,10 @@ void avc_remote_ctrl_work(struct work_struct *work)
#if 0 /* FIXME: unused */
int avc_tuner_host2ca(struct firedtv *fdtv)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ int ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_CONTROL;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -867,15 +894,16 @@ int avc_tuner_host2ca(struct firedtv *fdtv)
c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA;
c->operand[4] = 0; /* slot */
c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */
- c->operand[6] = 0; /* more/last */
- c->operand[7] = 0; /* length */
+ clear_operands(c, 6, 8);
- c->length = 12;
+ fdtv->avc_data_length = 12;
+ ret = avc_write(fdtv);
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ /* FIXME: check response code? */
- return 0;
+ mutex_unlock(&fdtv->avc_mutex);
+
+ return ret;
}
#endif
@@ -906,12 +934,11 @@ static int get_ca_object_length(struct avc_response_frame *r)
int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer;
- int pos;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ struct avc_response_frame *r = (void *)fdtv->avc_data;
+ int pos, ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_STATUS;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -923,11 +950,12 @@ int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
c->operand[4] = 0; /* slot */
c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */
+ clear_operands(c, 6, LAST_OPERAND);
- c->length = 12;
-
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ fdtv->avc_data_length = 12;
+ ret = avc_write(fdtv);
+ if (ret < 0)
+ goto out;
/* FIXME: check response code and validate response data */
@@ -939,18 +967,19 @@ int avc_ca_app_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
app_info[4] = 0x01;
memcpy(&app_info[5], &r->operand[pos], 5 + r->operand[pos + 4]);
*len = app_info[3] + 4;
+out:
+ mutex_unlock(&fdtv->avc_mutex);
- return 0;
+ return ret;
}
int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer;
- int pos;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ struct avc_response_frame *r = (void *)fdtv->avc_data;
+ int pos, ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_STATUS;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -962,11 +991,14 @@ int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
c->operand[4] = 0; /* slot */
c->operand[5] = SFE_VENDOR_TAG_CA_APPLICATION_INFO; /* ca tag */
+ clear_operands(c, 6, LAST_OPERAND);
- c->length = 12;
+ fdtv->avc_data_length = 12;
+ ret = avc_write(fdtv);
+ if (ret < 0)
+ goto out;
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ /* FIXME: check response code and validate response data */
pos = get_ca_object_pos(r);
app_info[0] = (EN50221_TAG_CA_INFO >> 16) & 0xff;
@@ -976,17 +1008,18 @@ int avc_ca_info(struct firedtv *fdtv, char *app_info, unsigned int *len)
app_info[4] = r->operand[pos + 0];
app_info[5] = r->operand[pos + 1];
*len = app_info[3] + 4;
+out:
+ mutex_unlock(&fdtv->avc_mutex);
- return 0;
+ return ret;
}
int avc_ca_reset(struct firedtv *fdtv)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ int ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_CONTROL;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -1002,19 +1035,20 @@ int avc_ca_reset(struct firedtv *fdtv)
c->operand[7] = 1; /* length */
c->operand[8] = 0; /* force hardware reset */
- c->length = 12;
+ fdtv->avc_data_length = 12;
+ ret = avc_write(fdtv);
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ /* FIXME: check response code? */
- return 0;
+ mutex_unlock(&fdtv->avc_mutex);
+
+ return ret;
}
int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ struct avc_response_frame *r = (void *)fdtv->avc_data;
int list_management;
int program_info_length;
int pmt_cmd_id;
@@ -1022,11 +1056,12 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
int write_pos;
int es_info_length;
int crc32_csum;
+ int ret;
if (unlikely(avc_debug & AVC_DEBUG_APPLICATION_PMT))
debug_pmt(msg, length);
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_CONTROL;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -1058,7 +1093,7 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
c->operand[12] = 0x02; /* Table id=2 */
c->operand[13] = 0x80; /* Section syntax + length */
- /* c->operand[14] = XXXprogram_info_length + 12; */
+
c->operand[15] = msg[1]; /* Program number */
c->operand[16] = msg[2];
c->operand[17] = 0x01; /* Version number=0 + current/next=1 */
@@ -1106,12 +1141,7 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
write_pos += es_info_length;
}
}
-
- /* CRC */
- c->operand[write_pos++] = 0x00;
- c->operand[write_pos++] = 0x00;
- c->operand[write_pos++] = 0x00;
- c->operand[write_pos++] = 0x00;
+ write_pos += 4; /* CRC */
c->operand[7] = 0x82;
c->operand[8] = (write_pos - 10) >> 8;
@@ -1123,28 +1153,31 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
c->operand[write_pos - 3] = (crc32_csum >> 16) & 0xff;
c->operand[write_pos - 2] = (crc32_csum >> 8) & 0xff;
c->operand[write_pos - 1] = (crc32_csum >> 0) & 0xff;
+ pad_operands(c, write_pos);
- c->length = ALIGN(3 + write_pos, 4);
-
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ fdtv->avc_data_length = ALIGN(3 + write_pos, 4);
+ ret = avc_write(fdtv);
+ if (ret < 0)
+ goto out;
if (r->response != AVC_RESPONSE_ACCEPTED) {
dev_err(fdtv->device,
"CA PMT failed with response 0x%x\n", r->response);
- return -EFAULT;
+ ret = -EFAULT;
}
+out:
+ mutex_unlock(&fdtv->avc_mutex);
- return 0;
+ return ret;
}
int avc_ca_get_time_date(struct firedtv *fdtv, int *interval)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ struct avc_response_frame *r = (void *)fdtv->avc_data;
+ int ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_STATUS;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -1156,28 +1189,28 @@ int avc_ca_get_time_date(struct firedtv *fdtv, int *interval)
c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
c->operand[4] = 0; /* slot */
c->operand[5] = SFE_VENDOR_TAG_CA_DATE_TIME; /* ca tag */
- c->operand[6] = 0; /* more/last */
- c->operand[7] = 0; /* length */
+ clear_operands(c, 6, LAST_OPERAND);
- c->length = 12;
-
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ fdtv->avc_data_length = 12;
+ ret = avc_write(fdtv);
+ if (ret < 0)
+ goto out;
/* FIXME: check response code and validate response data */
*interval = r->operand[get_ca_object_pos(r)];
+out:
+ mutex_unlock(&fdtv->avc_mutex);
- return 0;
+ return ret;
}
int avc_ca_enter_menu(struct firedtv *fdtv)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer; /* FIXME: unused */
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ int ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_STATUS;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -1189,24 +1222,25 @@ int avc_ca_enter_menu(struct firedtv *fdtv)
c->operand[3] = SFE_VENDOR_OPCODE_HOST2CA;
c->operand[4] = 0; /* slot */
c->operand[5] = SFE_VENDOR_TAG_CA_ENTER_MENU;
- c->operand[6] = 0; /* more/last */
- c->operand[7] = 0; /* length */
+ clear_operands(c, 6, 8);
- c->length = 12;
+ fdtv->avc_data_length = 12;
+ ret = avc_write(fdtv);
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ /* FIXME: check response code? */
- return 0;
+ mutex_unlock(&fdtv->avc_mutex);
+
+ return ret;
}
int avc_ca_get_mmi(struct firedtv *fdtv, char *mmi_object, unsigned int *len)
{
- char buffer[sizeof(struct avc_command_frame)];
- struct avc_command_frame *c = (void *)buffer;
- struct avc_response_frame *r = (void *)buffer;
+ struct avc_command_frame *c = (void *)fdtv->avc_data;
+ struct avc_response_frame *r = (void *)fdtv->avc_data;
+ int ret;
- memset(c, 0, sizeof(*c));
+ mutex_lock(&fdtv->avc_mutex);
c->ctype = AVC_CTYPE_STATUS;
c->subunit = AVC_SUBUNIT_TYPE_TUNER | fdtv->subunit;
@@ -1218,20 +1252,21 @@ int avc_ca_get_mmi(struct firedtv *fdtv, char *mmi_object, unsigned int *len)
c->operand[3] = SFE_VENDOR_OPCODE_CA2HOST;
c->operand[4] = 0; /* slot */
c->operand[5] = SFE_VENDOR_TAG_CA_MMI;
- c->operand[6] = 0; /* more/last */
- c->operand[7] = 0; /* length */
+ clear_operands(c, 6, LAST_OPERAND);
- c->length = 12;
-
- if (avc_write(fdtv, c, r) < 0)
- return -EIO;
+ fdtv->avc_data_length = 12;
+ ret = avc_write(fdtv);
+ if (ret < 0)
+ goto out;
/* FIXME: check response code and validate response data */
*len = get_ca_object_length(r);
memcpy(mmi_object, &r->operand[get_ca_object_pos(r)], *len);
+out:
+ mutex_unlock(&fdtv->avc_mutex);
- return 0;
+ return ret;
}
#define CMP_OUTPUT_PLUG_CONTROL_REG_0 0xfffff0000904ULL
@@ -1240,14 +1275,14 @@ static int cmp_read(struct firedtv *fdtv, u64 addr, __be32 *data)
{
int ret;
- if (mutex_lock_interruptible(&fdtv->avc_mutex))
- return -EINTR;
+ mutex_lock(&fdtv->avc_mutex);
ret = fdtv->backend->read(fdtv, addr, data);
if (ret < 0)
dev_err(fdtv->device, "CMP: read I/O error\n");
mutex_unlock(&fdtv->avc_mutex);
+
return ret;
}
@@ -1255,14 +1290,19 @@ static int cmp_lock(struct firedtv *fdtv, u64 addr, __be32 data[])
{
int ret;
- if (mutex_lock_interruptible(&fdtv->avc_mutex))
- return -EINTR;
+ mutex_lock(&fdtv->avc_mutex);
+
+ /* data[] is stack-allocated and should not be DMA-mapped. */
+ memcpy(fdtv->avc_data, data, 8);
- ret = fdtv->backend->lock(fdtv, addr, data);
+ ret = fdtv->backend->lock(fdtv, addr, fdtv->avc_data);
if (ret < 0)
dev_err(fdtv->device, "CMP: lock I/O error\n");
+ else
+ memcpy(data, fdtv->avc_data, 8);
mutex_unlock(&fdtv->avc_mutex);
+
return ret;
}
diff --git a/drivers/media/dvb/firewire/firedtv-dvb.c b/drivers/media/dvb/firewire/firedtv-dvb.c
index fc9996c13e13..079e8c5b0475 100644
--- a/drivers/media/dvb/firewire/firedtv-dvb.c
+++ b/drivers/media/dvb/firewire/firedtv-dvb.c
@@ -277,7 +277,6 @@ struct firedtv *fdtv_alloc(struct device *dev,
mutex_init(&fdtv->avc_mutex);
init_waitqueue_head(&fdtv->avc_wait);
- fdtv->avc_reply_received = true;
mutex_init(&fdtv->demux_mutex);
INIT_WORK(&fdtv->remote_ctrl_work, avc_remote_ctrl_work);
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index 6223bf01efe9..75afe4f81e33 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -41,7 +41,7 @@ static int node_req(struct firedtv *fdtv, u64 addr, void *data, size_t len,
return rcode != RCODE_COMPLETE ? -EIO : 0;
}
-static int node_lock(struct firedtv *fdtv, u64 addr, __be32 data[])
+static int node_lock(struct firedtv *fdtv, u64 addr, void *data)
{
return node_req(fdtv, addr, data, 8, TCODE_LOCK_COMPARE_SWAP);
}
@@ -239,47 +239,18 @@ static const struct fw_address_region fcp_region = {
};
/* Adjust the template string if models with longer names appear. */
-#define MAX_MODEL_NAME_LEN ((int)DIV_ROUND_UP(sizeof("FireDTV ????"), 4))
-
-static size_t model_name(u32 *directory, __be32 *buffer)
-{
- struct fw_csr_iterator ci;
- int i, length, key, value, last_key = 0;
- u32 *block = NULL;
-
- fw_csr_iterator_init(&ci, directory);
- while (fw_csr_iterator_next(&ci, &key, &value)) {
- if (last_key == CSR_MODEL &&
- key == (CSR_DESCRIPTOR | CSR_LEAF))
- block = ci.p - 1 + value;
- last_key = key;
- }
-
- if (block == NULL)
- return 0;
-
- length = min((int)(block[0] >> 16) - 2, MAX_MODEL_NAME_LEN);
- if (length <= 0)
- return 0;
-
- /* fast-forward to text string */
- block += 3;
-
- for (i = 0; i < length; i++)
- buffer[i] = cpu_to_be32(block[i]);
-
- return length * 4;
-}
+#define MAX_MODEL_NAME_LEN sizeof("FireDTV ????")
static int node_probe(struct device *dev)
{
struct firedtv *fdtv;
- __be32 name[MAX_MODEL_NAME_LEN];
+ char name[MAX_MODEL_NAME_LEN];
int name_len, err;
- name_len = model_name(fw_unit(dev)->directory, name);
+ name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL,
+ name, sizeof(name));
- fdtv = fdtv_alloc(dev, &backend, (char *)name, name_len);
+ fdtv = fdtv_alloc(dev, &backend, name, name_len >= 0 ? name_len : 0);
if (!fdtv)
return -ENOMEM;
diff --git a/drivers/media/dvb/firewire/firedtv.h b/drivers/media/dvb/firewire/firedtv.h
index 35080dbb3c66..78cc28f36914 100644
--- a/drivers/media/dvb/firewire/firedtv.h
+++ b/drivers/media/dvb/firewire/firedtv.h
@@ -73,7 +73,7 @@ struct input_dev;
struct firedtv;
struct firedtv_backend {
- int (*lock)(struct firedtv *fdtv, u64 addr, __be32 data[]);
+ int (*lock)(struct firedtv *fdtv, u64 addr, void *data);
int (*read)(struct firedtv *fdtv, u64 addr, void *data);
int (*write)(struct firedtv *fdtv, u64 addr, void *data, size_t len);
int (*start_iso)(struct firedtv *fdtv);
@@ -114,8 +114,8 @@ struct firedtv {
unsigned long channel_active;
u16 channel_pid[16];
- size_t response_length;
- u8 response[512];
+ int avc_data_length;
+ u8 avc_data[512];
};
/* firedtv-1394.c */
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index a3b8b697349b..cd7f9b7cbffa 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -208,6 +208,14 @@ config DVB_DS3000
help
A DVB-S/S2 tuner module. Say Y when you want to support this frontend.
+config DVB_MB86A16
+ tristate "Fujitsu MB86A16 based"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ A DVB-S/DSS Direct Conversion reveiver.
+ Say Y when you want to support this frontend.
+
comment "DVB-T (terrestrial) frontends"
depends on DVB_CORE
@@ -587,6 +595,17 @@ config DVB_ATBM8830
help
A DMB-TH tuner module. Say Y when you want to support this frontend.
+config DVB_TDA665x
+ tristate "TDA665x tuner"
+ depends on DVB_CORE && I2C
+ default m if DVB_FE_CUSTOMISE
+ help
+ Support for tuner modules based on Philips TDA6650/TDA6651 chips.
+ Say Y when you want to support this chip.
+
+ Currently supported tuners:
+ * Panasonic ENV57H12D5 (ET-50DT)
+
comment "Tools to develop new frontends"
config DVB_DUMMY_FE
diff --git a/drivers/media/dvb/frontends/Makefile b/drivers/media/dvb/frontends/Makefile
index 47575cc7b699..874e8ada4d1d 100644
--- a/drivers/media/dvb/frontends/Makefile
+++ b/drivers/media/dvb/frontends/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_DVB_TDA10048) += tda10048.o
obj-$(CONFIG_DVB_TUNER_CX24113) += cx24113.o
obj-$(CONFIG_DVB_S5H1411) += s5h1411.o
obj-$(CONFIG_DVB_LGS8GL5) += lgs8gl5.o
+obj-$(CONFIG_DVB_TDA665x) += tda665x.o
obj-$(CONFIG_DVB_LGS8GXX) += lgs8gxx.o
obj-$(CONFIG_DVB_ATBM8830) += atbm8830.o
obj-$(CONFIG_DVB_DUMMY_FE) += dvb_dummy_fe.o
@@ -80,3 +81,4 @@ obj-$(CONFIG_DVB_STV6110x) += stv6110x.o
obj-$(CONFIG_DVB_ISL6423) += isl6423.o
obj-$(CONFIG_DVB_EC100) += ec100.o
obj-$(CONFIG_DVB_DS3000) += ds3000.o
+obj-$(CONFIG_DVB_MB86A16) += mb86a16.o
diff --git a/drivers/media/dvb/frontends/atbm8830.c b/drivers/media/dvb/frontends/atbm8830.c
index 59881a5944eb..43aac2f85c2e 100644
--- a/drivers/media/dvb/frontends/atbm8830.c
+++ b/drivers/media/dvb/frontends/atbm8830.c
@@ -170,6 +170,19 @@ static int is_locked(struct atbm_state *priv, u8 *locked)
return 0;
}
+static int set_agc_config(struct atbm_state *priv,
+ u8 min, u8 max, u8 hold_loop)
+{
+ /* no effect if both min and max are zero */
+ if (!min && !max)
+ return 0;
+
+ atbm8830_write_reg(priv, REG_AGC_MIN, min);
+ atbm8830_write_reg(priv, REG_AGC_MAX, max);
+ atbm8830_write_reg(priv, REG_AGC_HOLD_LOOP, hold_loop);
+
+ return 0;
+}
static int set_static_channel_mode(struct atbm_state *priv)
{
@@ -227,6 +240,9 @@ static int atbm8830_init(struct dvb_frontend *fe)
/*Set IF frequency*/
set_if_freq(priv, cfg->if_freq);
+ /*Set AGC Config*/
+ set_agc_config(priv, cfg->agc_min, cfg->agc_max,
+ cfg->agc_hold_loop);
/*Set static channel mode*/
set_static_channel_mode(priv);
diff --git a/drivers/media/dvb/frontends/dib8000.c b/drivers/media/dvb/frontends/dib8000.c
index 6f6fa29d9ea4..2aa97dd6a8af 100644
--- a/drivers/media/dvb/frontends/dib8000.c
+++ b/drivers/media/dvb/frontends/dib8000.c
@@ -1999,6 +1999,8 @@ static int dib8000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_par
struct dib8000_state *state = fe->demodulator_priv;
int time, ret;
+ fe->dtv_property_cache.delivery_system = SYS_ISDBT;
+
dib8000_set_output_mode(state, OUTMODE_HIGH_Z);
if (fe->ops.tuner_ops.set_params)
diff --git a/drivers/media/dvb/frontends/dib8000.h b/drivers/media/dvb/frontends/dib8000.h
index d99619ae983c..b1ee20799639 100644
--- a/drivers/media/dvb/frontends/dib8000.h
+++ b/drivers/media/dvb/frontends/dib8000.h
@@ -100,7 +100,7 @@ static inline int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_
static inline enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
- return CT_SHUTDOWN,
+ return CT_SHUTDOWN;
}
static inline void dib8000_pwm_agc_reset(struct dvb_frontend *fe)
{
diff --git a/drivers/media/dvb/frontends/lgdt3305.h b/drivers/media/dvb/frontends/lgdt3305.h
index 4fa6e52d1fe8..9cb11c9cae53 100644
--- a/drivers/media/dvb/frontends/lgdt3305.h
+++ b/drivers/media/dvb/frontends/lgdt3305.h
@@ -54,13 +54,13 @@ struct lgdt3305_config {
u16 usref_qam256; /* default: 0x2a80 */
/* disable i2c repeater - 0:repeater enabled 1:repeater disabled */
- int deny_i2c_rptr:1;
+ unsigned int deny_i2c_rptr:1;
/* spectral inversion - 0:disabled 1:enabled */
- int spectral_inversion:1;
+ unsigned int spectral_inversion:1;
/* use RF AGC loop - 0:disabled 1:enabled */
- int rf_agc_loop:1;
+ unsigned int rf_agc_loop:1;
enum lgdt3305_mpeg_mode mpeg_mode;
enum lgdt3305_tp_clock_edge tpclk_edge;
diff --git a/drivers/media/dvb/frontends/mb86a16.c b/drivers/media/dvb/frontends/mb86a16.c
new file mode 100644
index 000000000000..d05f7500e0c5
--- /dev/null
+++ b/drivers/media/dvb/frontends/mb86a16.c
@@ -0,0 +1,1878 @@
+/*
+ Fujitsu MB86A16 DVB-S/DSS DC Receiver driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include "dvb_frontend.h"
+#include "mb86a16.h"
+#include "mb86a16_priv.h"
+
+unsigned int verbose = 5;
+module_param(verbose, int, 0644);
+
+#define ABS(x) ((x) < 0 ? (-x) : (x))
+
+struct mb86a16_state {
+ struct i2c_adapter *i2c_adap;
+ const struct mb86a16_config *config;
+ struct dvb_frontend frontend;
+
+ /* tuning parameters */
+ int frequency;
+ int srate;
+
+ /* Internal stuff */
+ int master_clk;
+ int deci;
+ int csel;
+ int rsel;
+};
+
+#define MB86A16_ERROR 0
+#define MB86A16_NOTICE 1
+#define MB86A16_INFO 2
+#define MB86A16_DEBUG 3
+
+#define dprintk(x, y, z, format, arg...) do { \
+ if (z) { \
+ if ((x > MB86A16_ERROR) && (x > y)) \
+ printk(KERN_ERR "%s: " format "\n", __func__, ##arg); \
+ else if ((x > MB86A16_NOTICE) && (x > y)) \
+ printk(KERN_NOTICE "%s: " format "\n", __func__, ##arg); \
+ else if ((x > MB86A16_INFO) && (x > y)) \
+ printk(KERN_INFO "%s: " format "\n", __func__, ##arg); \
+ else if ((x > MB86A16_DEBUG) && (x > y)) \
+ printk(KERN_DEBUG "%s: " format "\n", __func__, ##arg); \
+ } else { \
+ if (x > y) \
+ printk(format, ##arg); \
+ } \
+} while (0)
+
+#define TRACE_IN dprintk(verbose, MB86A16_DEBUG, 1, "-->()")
+#define TRACE_OUT dprintk(verbose, MB86A16_DEBUG, 1, "()-->")
+
+static int mb86a16_write(struct mb86a16_state *state, u8 reg, u8 val)
+{
+ int ret;
+ u8 buf[] = { reg, val };
+
+ struct i2c_msg msg = {
+ .addr = state->config->demod_address,
+ .flags = 0,
+ .buf = buf,
+ .len = 2
+ };
+
+ dprintk(verbose, MB86A16_DEBUG, 1,
+ "writing to [0x%02x],Reg[0x%02x],Data[0x%02x]",
+ state->config->demod_address, buf[0], buf[1]);
+
+ ret = i2c_transfer(state->i2c_adap, &msg, 1);
+
+ return (ret != 1) ? -EREMOTEIO : 0;
+}
+
+static int mb86a16_read(struct mb86a16_state *state, u8 reg, u8 *val)
+{
+ int ret;
+ u8 b0[] = { reg };
+ u8 b1[] = { 0 };
+
+ struct i2c_msg msg[] = {
+ {
+ .addr = state->config->demod_address,
+ .flags = 0,
+ .buf = b0,
+ .len = 1
+ }, {
+ .addr = state->config->demod_address,
+ .flags = I2C_M_RD,
+ .buf = b1,
+ .len = 1
+ }
+ };
+ ret = i2c_transfer(state->i2c_adap, msg, 2);
+ if (ret != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "read error(reg=0x%02x, ret=0x%i)",
+ reg, ret);
+
+ return -EREMOTEIO;
+ }
+ *val = b1[0];
+
+ return ret;
+}
+
+static int CNTM_set(struct mb86a16_state *state,
+ unsigned char timint1,
+ unsigned char timint2,
+ unsigned char cnext)
+{
+ unsigned char val;
+
+ val = (timint1 << 4) | (timint2 << 2) | cnext;
+ if (mb86a16_write(state, MB86A16_CNTMR, val) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int smrt_set(struct mb86a16_state *state, int rate)
+{
+ int tmp ;
+ int m ;
+ unsigned char STOFS0, STOFS1;
+
+ m = 1 << state->deci;
+ tmp = (8192 * state->master_clk - 2 * m * rate * 8192 + state->master_clk / 2) / state->master_clk;
+
+ STOFS0 = tmp & 0x0ff;
+ STOFS1 = (tmp & 0xf00) >> 8;
+
+ if (mb86a16_write(state, MB86A16_SRATE1, (state->deci << 2) |
+ (state->csel << 1) |
+ state->rsel) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_SRATE2, STOFS0) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_SRATE3, STOFS1) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -1;
+}
+
+static int srst(struct mb86a16_state *state)
+{
+ if (mb86a16_write(state, MB86A16_RESET, 0x04) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+
+}
+
+static int afcex_data_set(struct mb86a16_state *state,
+ unsigned char AFCEX_L,
+ unsigned char AFCEX_H)
+{
+ if (mb86a16_write(state, MB86A16_AFCEXL, AFCEX_L) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_AFCEXH, AFCEX_H) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+
+ return -1;
+}
+
+static int afcofs_data_set(struct mb86a16_state *state,
+ unsigned char AFCEX_L,
+ unsigned char AFCEX_H)
+{
+ if (mb86a16_write(state, 0x58, AFCEX_L) < 0)
+ goto err;
+ if (mb86a16_write(state, 0x59, AFCEX_H) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int stlp_set(struct mb86a16_state *state,
+ unsigned char STRAS,
+ unsigned char STRBS)
+{
+ if (mb86a16_write(state, MB86A16_STRFILTCOEF1, (STRBS << 3) | (STRAS)) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int Vi_set(struct mb86a16_state *state, unsigned char ETH, unsigned char VIA)
+{
+ if (mb86a16_write(state, MB86A16_VISET2, 0x04) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_VISET3, 0xf5) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int initial_set(struct mb86a16_state *state)
+{
+ if (stlp_set(state, 5, 7))
+ goto err;
+
+ udelay(100);
+ if (afcex_data_set(state, 0, 0))
+ goto err;
+
+ udelay(100);
+ if (afcofs_data_set(state, 0, 0))
+ goto err;
+
+ udelay(100);
+ if (mb86a16_write(state, MB86A16_CRLFILTCOEF1, 0x16) < 0)
+ goto err;
+ if (mb86a16_write(state, 0x2f, 0x21) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_VIMAG, 0x38) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_FAGCS1, 0x00) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_FAGCS2, 0x1c) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_FAGCS3, 0x20) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_FAGCS4, 0x1e) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_FAGCS5, 0x23) < 0)
+ goto err;
+ if (mb86a16_write(state, 0x54, 0xff) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_TSOUT, 0x00) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int S01T_set(struct mb86a16_state *state,
+ unsigned char s1t,
+ unsigned s0t)
+{
+ if (mb86a16_write(state, 0x33, (s1t << 3) | s0t) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+
+static int EN_set(struct mb86a16_state *state,
+ int cren,
+ int afcen)
+{
+ unsigned char val;
+
+ val = 0x7a | (cren << 7) | (afcen << 2);
+ if (mb86a16_write(state, 0x49, val) < 0)
+ goto err;
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int AFCEXEN_set(struct mb86a16_state *state,
+ int afcexen,
+ int smrt)
+{
+ unsigned char AFCA ;
+
+ if (smrt > 18875)
+ AFCA = 4;
+ else if (smrt > 9375)
+ AFCA = 3;
+ else if (smrt > 2250)
+ AFCA = 2;
+ else
+ AFCA = 1;
+
+ if (mb86a16_write(state, 0x2a, 0x02 | (afcexen << 5) | (AFCA << 2)) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int DAGC_data_set(struct mb86a16_state *state,
+ unsigned char DAGCA,
+ unsigned char DAGCW)
+{
+ if (mb86a16_write(state, 0x2d, (DAGCA << 3) | DAGCW) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static void smrt_info_get(struct mb86a16_state *state, int rate)
+{
+ if (rate >= 37501) {
+ state->deci = 0; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 30001) {
+ state->deci = 0; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 26251) {
+ state->deci = 0; state->csel = 1; state->rsel = 0;
+ } else if (rate >= 22501) {
+ state->deci = 0; state->csel = 1; state->rsel = 1;
+ } else if (rate >= 18751) {
+ state->deci = 1; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 15001) {
+ state->deci = 1; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 13126) {
+ state->deci = 1; state->csel = 1; state->rsel = 0;
+ } else if (rate >= 11251) {
+ state->deci = 1; state->csel = 1; state->rsel = 1;
+ } else if (rate >= 9376) {
+ state->deci = 2; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 7501) {
+ state->deci = 2; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 6563) {
+ state->deci = 2; state->csel = 1; state->rsel = 0;
+ } else if (rate >= 5626) {
+ state->deci = 2; state->csel = 1; state->rsel = 1;
+ } else if (rate >= 4688) {
+ state->deci = 3; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 3751) {
+ state->deci = 3; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 3282) {
+ state->deci = 3; state->csel = 1; state->rsel = 0;
+ } else if (rate >= 2814) {
+ state->deci = 3; state->csel = 1; state->rsel = 1;
+ } else if (rate >= 2344) {
+ state->deci = 4; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 1876) {
+ state->deci = 4; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 1641) {
+ state->deci = 4; state->csel = 1; state->rsel = 0;
+ } else if (rate >= 1407) {
+ state->deci = 4; state->csel = 1; state->rsel = 1;
+ } else if (rate >= 1172) {
+ state->deci = 5; state->csel = 0; state->rsel = 0;
+ } else if (rate >= 939) {
+ state->deci = 5; state->csel = 0; state->rsel = 1;
+ } else if (rate >= 821) {
+ state->deci = 5; state->csel = 1; state->rsel = 0;
+ } else {
+ state->deci = 5; state->csel = 1; state->rsel = 1;
+ }
+
+ if (state->csel == 0)
+ state->master_clk = 92000;
+ else
+ state->master_clk = 61333;
+
+}
+
+static int signal_det(struct mb86a16_state *state,
+ int smrt,
+ unsigned char *SIG)
+{
+
+ int ret ;
+ int smrtd ;
+ int wait_sym ;
+
+ u32 wait_t;
+ unsigned char S[3] ;
+ int i ;
+
+ if (*SIG > 45) {
+ if (CNTM_set(state, 2, 1, 2) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
+ return -1;
+ }
+ wait_sym = 40000;
+ } else {
+ if (CNTM_set(state, 3, 1, 2) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
+ return -1;
+ }
+ wait_sym = 80000;
+ }
+ for (i = 0; i < 3; i++) {
+ if (i == 0)
+ smrtd = smrt * 98 / 100;
+ else if (i == 1)
+ smrtd = smrt;
+ else
+ smrtd = smrt * 102 / 100;
+ smrt_info_get(state, smrtd);
+ smrt_set(state, smrtd);
+ srst(state);
+ wait_t = (wait_sym + 99 * smrtd / 100) / smrtd;
+ if (wait_t == 0)
+ wait_t = 1;
+ msleep_interruptible(10);
+ if (mb86a16_read(state, 0x37, &(S[i])) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+ }
+ if ((S[1] > S[0] * 112 / 100) &&
+ (S[1] > S[2] * 112 / 100)) {
+
+ ret = 1;
+ } else {
+ ret = 0;
+ }
+ *SIG = S[1];
+
+ if (CNTM_set(state, 0, 1, 2) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
+ return -1;
+ }
+
+ return ret;
+}
+
+static int rf_val_set(struct mb86a16_state *state,
+ int f,
+ int smrt,
+ unsigned char R)
+{
+ unsigned char C, F, B;
+ int M;
+ unsigned char rf_val[5];
+ int ack = -1;
+
+ if (smrt > 37750)
+ C = 1;
+ else if (smrt > 18875)
+ C = 2;
+ else if (smrt > 5500)
+ C = 3;
+ else
+ C = 4;
+
+ if (smrt > 30500)
+ F = 3;
+ else if (smrt > 9375)
+ F = 1;
+ else if (smrt > 4625)
+ F = 0;
+ else
+ F = 2;
+
+ if (f < 1060)
+ B = 0;
+ else if (f < 1175)
+ B = 1;
+ else if (f < 1305)
+ B = 2;
+ else if (f < 1435)
+ B = 3;
+ else if (f < 1570)
+ B = 4;
+ else if (f < 1715)
+ B = 5;
+ else if (f < 1845)
+ B = 6;
+ else if (f < 1980)
+ B = 7;
+ else if (f < 2080)
+ B = 8;
+ else
+ B = 9;
+
+ M = f * (1 << R) / 2;
+
+ rf_val[0] = 0x01 | (C << 3) | (F << 1);
+ rf_val[1] = (R << 5) | ((M & 0x1f000) >> 12);
+ rf_val[2] = (M & 0x00ff0) >> 4;
+ rf_val[3] = ((M & 0x0000f) << 4) | B;
+
+ /* Frequency Set */
+ if (mb86a16_write(state, 0x21, rf_val[0]) < 0)
+ ack = 0;
+ if (mb86a16_write(state, 0x22, rf_val[1]) < 0)
+ ack = 0;
+ if (mb86a16_write(state, 0x23, rf_val[2]) < 0)
+ ack = 0;
+ if (mb86a16_write(state, 0x24, rf_val[3]) < 0)
+ ack = 0;
+ if (mb86a16_write(state, 0x25, 0x01) < 0)
+ ack = 0;
+ if (ack == 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "RF Setup - I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int afcerr_chk(struct mb86a16_state *state)
+{
+ unsigned char AFCM_L, AFCM_H ;
+ int AFCM ;
+ int afcm, afcerr ;
+
+ if (mb86a16_read(state, 0x0e, &AFCM_L) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x0f, &AFCM_H) != 2)
+ goto err;
+
+ AFCM = (AFCM_H << 8) + AFCM_L;
+
+ if (AFCM > 2048)
+ afcm = AFCM - 4096;
+ else
+ afcm = AFCM;
+ afcerr = afcm * state->master_clk / 8192;
+
+ return afcerr;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int dagcm_val_get(struct mb86a16_state *state)
+{
+ int DAGCM;
+ unsigned char DAGCM_H, DAGCM_L;
+
+ if (mb86a16_read(state, 0x45, &DAGCM_L) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x46, &DAGCM_H) != 2)
+ goto err;
+
+ DAGCM = (DAGCM_H << 8) + DAGCM_L;
+
+ return DAGCM;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int mb86a16_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+ u8 stat, stat2;
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ *status = 0;
+
+ if (mb86a16_read(state, MB86A16_SIG1, &stat) != 2)
+ goto err;
+ if (mb86a16_read(state, MB86A16_SIG2, &stat2) != 2)
+ goto err;
+ if ((stat > 25) && (stat2 > 25))
+ *status |= FE_HAS_SIGNAL;
+ if ((stat > 45) && (stat2 > 45))
+ *status |= FE_HAS_CARRIER;
+
+ if (mb86a16_read(state, MB86A16_STATUS, &stat) != 2)
+ goto err;
+
+ if (stat & 0x01)
+ *status |= FE_HAS_SYNC;
+ if (stat & 0x01)
+ *status |= FE_HAS_VITERBI;
+
+ if (mb86a16_read(state, MB86A16_FRAMESYNC, &stat) != 2)
+ goto err;
+
+ if ((stat & 0x0f) && (*status & FE_HAS_VITERBI))
+ *status |= FE_HAS_LOCK;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int sync_chk(struct mb86a16_state *state,
+ unsigned char *VIRM)
+{
+ unsigned char val;
+ int sync;
+
+ if (mb86a16_read(state, 0x0d, &val) != 2)
+ goto err;
+
+ dprintk(verbose, MB86A16_INFO, 1, "Status = %02x,", val);
+ sync = val & 0x01;
+ *VIRM = (val & 0x1c) >> 2;
+
+ return sync;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+
+}
+
+static int freqerr_chk(struct mb86a16_state *state,
+ int fTP,
+ int smrt,
+ int unit)
+{
+ unsigned char CRM, AFCML, AFCMH;
+ unsigned char temp1, temp2, temp3;
+ int crm, afcm, AFCM;
+ int crrerr, afcerr; /* kHz */
+ int frqerr; /* MHz */
+ int afcen, afcexen = 0;
+ int R, M, fOSC, fOSC_OFS;
+
+ if (mb86a16_read(state, 0x43, &CRM) != 2)
+ goto err;
+
+ if (CRM > 127)
+ crm = CRM - 256;
+ else
+ crm = CRM;
+
+ crrerr = smrt * crm / 256;
+ if (mb86a16_read(state, 0x49, &temp1) != 2)
+ goto err;
+
+ afcen = (temp1 & 0x04) >> 2;
+ if (afcen == 0) {
+ if (mb86a16_read(state, 0x2a, &temp1) != 2)
+ goto err;
+ afcexen = (temp1 & 0x20) >> 5;
+ }
+
+ if (afcen == 1) {
+ if (mb86a16_read(state, 0x0e, &AFCML) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x0f, &AFCMH) != 2)
+ goto err;
+ } else if (afcexen == 1) {
+ if (mb86a16_read(state, 0x2b, &AFCML) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x2c, &AFCMH) != 2)
+ goto err;
+ }
+ if ((afcen == 1) || (afcexen == 1)) {
+ smrt_info_get(state, smrt);
+ AFCM = ((AFCMH & 0x01) << 8) + AFCML;
+ if (AFCM > 255)
+ afcm = AFCM - 512;
+ else
+ afcm = AFCM;
+
+ afcerr = afcm * state->master_clk / 8192;
+ } else
+ afcerr = 0;
+
+ if (mb86a16_read(state, 0x22, &temp1) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x23, &temp2) != 2)
+ goto err;
+ if (mb86a16_read(state, 0x24, &temp3) != 2)
+ goto err;
+
+ R = (temp1 & 0xe0) >> 5;
+ M = ((temp1 & 0x1f) << 12) + (temp2 << 4) + (temp3 >> 4);
+ if (R == 0)
+ fOSC = 2 * M;
+ else
+ fOSC = M;
+
+ fOSC_OFS = fOSC - fTP;
+
+ if (unit == 0) { /* MHz */
+ if (crrerr + afcerr + fOSC_OFS * 1000 >= 0)
+ frqerr = (crrerr + afcerr + fOSC_OFS * 1000 + 500) / 1000;
+ else
+ frqerr = (crrerr + afcerr + fOSC_OFS * 1000 - 500) / 1000;
+ } else { /* kHz */
+ frqerr = crrerr + afcerr + fOSC_OFS * 1000;
+ }
+
+ return frqerr;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static unsigned char vco_dev_get(struct mb86a16_state *state, int smrt)
+{
+ unsigned char R;
+
+ if (smrt > 9375)
+ R = 0;
+ else
+ R = 1;
+
+ return R;
+}
+
+static void swp_info_get(struct mb86a16_state *state,
+ int fOSC_start,
+ int smrt,
+ int v, int R,
+ int swp_ofs,
+ int *fOSC,
+ int *afcex_freq,
+ unsigned char *AFCEX_L,
+ unsigned char *AFCEX_H)
+{
+ int AFCEX ;
+ int crnt_swp_freq ;
+
+ crnt_swp_freq = fOSC_start * 1000 + v * swp_ofs;
+
+ if (R == 0)
+ *fOSC = (crnt_swp_freq + 1000) / 2000 * 2;
+ else
+ *fOSC = (crnt_swp_freq + 500) / 1000;
+
+ if (*fOSC >= crnt_swp_freq)
+ *afcex_freq = *fOSC * 1000 - crnt_swp_freq;
+ else
+ *afcex_freq = crnt_swp_freq - *fOSC * 1000;
+
+ AFCEX = *afcex_freq * 8192 / state->master_clk;
+ *AFCEX_L = AFCEX & 0x00ff;
+ *AFCEX_H = (AFCEX & 0x0f00) >> 8;
+}
+
+
+static int swp_freq_calcuation(struct mb86a16_state *state, int i, int v, int *V, int vmax, int vmin,
+ int SIGMIN, int fOSC, int afcex_freq, int swp_ofs, unsigned char *SIG1)
+{
+ int swp_freq ;
+
+ if ((i % 2 == 1) && (v <= vmax)) {
+ /* positive v (case 1) */
+ if ((v - 1 == vmin) &&
+ (*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v - 1) >= 0) &&
+ (*(V + 30 + v - 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v - 1) > SIGMIN)) {
+
+ swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
+ *SIG1 = *(V + 30 + v - 1);
+ } else if ((v == vmax) &&
+ (*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v - 1) >= 0) &&
+ (*(V + 30 + v) > *(V + 30 + v - 1)) &&
+ (*(V + 30 + v) > SIGMIN)) {
+ /* (case 2) */
+ swp_freq = fOSC * 1000 + afcex_freq;
+ *SIG1 = *(V + 30 + v);
+ } else if ((*(V + 30 + v) > 0) &&
+ (*(V + 30 + v - 1) > 0) &&
+ (*(V + 30 + v - 2) > 0) &&
+ (*(V + 30 + v - 3) > 0) &&
+ (*(V + 30 + v - 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v - 2) > *(V + 30 + v - 3)) &&
+ ((*(V + 30 + v - 1) > SIGMIN) ||
+ (*(V + 30 + v - 2) > SIGMIN))) {
+ /* (case 3) */
+ if (*(V + 30 + v - 1) >= *(V + 30 + v - 2)) {
+ swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
+ *SIG1 = *(V + 30 + v - 1);
+ } else {
+ swp_freq = fOSC * 1000 + afcex_freq - swp_ofs * 2;
+ *SIG1 = *(V + 30 + v - 2);
+ }
+ } else if ((v == vmax) &&
+ (*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v - 1) >= 0) &&
+ (*(V + 30 + v - 2) >= 0) &&
+ (*(V + 30 + v) > *(V + 30 + v - 2)) &&
+ (*(V + 30 + v - 1) > *(V + 30 + v - 2)) &&
+ ((*(V + 30 + v) > SIGMIN) ||
+ (*(V + 30 + v - 1) > SIGMIN))) {
+ /* (case 4) */
+ if (*(V + 30 + v) >= *(V + 30 + v - 1)) {
+ swp_freq = fOSC * 1000 + afcex_freq;
+ *SIG1 = *(V + 30 + v);
+ } else {
+ swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
+ *SIG1 = *(V + 30 + v - 1);
+ }
+ } else {
+ swp_freq = -1 ;
+ }
+ } else if ((i % 2 == 0) && (v >= vmin)) {
+ /* Negative v (case 1) */
+ if ((*(V + 30 + v) > 0) &&
+ (*(V + 30 + v + 1) > 0) &&
+ (*(V + 30 + v + 2) > 0) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v + 2)) &&
+ (*(V + 30 + v + 1) > SIGMIN)) {
+
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
+ *SIG1 = *(V + 30 + v + 1);
+ } else if ((v + 1 == vmax) &&
+ (*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v + 1) >= 0) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v + 1) > SIGMIN)) {
+ /* (case 2) */
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
+ *SIG1 = *(V + 30 + v);
+ } else if ((v == vmin) &&
+ (*(V + 30 + v) > 0) &&
+ (*(V + 30 + v + 1) > 0) &&
+ (*(V + 30 + v + 2) > 0) &&
+ (*(V + 30 + v) > *(V + 30 + v + 1)) &&
+ (*(V + 30 + v) > *(V + 30 + v + 2)) &&
+ (*(V + 30 + v) > SIGMIN)) {
+ /* (case 3) */
+ swp_freq = fOSC * 1000 + afcex_freq;
+ *SIG1 = *(V + 30 + v);
+ } else if ((*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v + 1) >= 0) &&
+ (*(V + 30 + v + 2) >= 0) &&
+ (*(V + 30 + v + 3) >= 0) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v + 2) > *(V + 30 + v + 3)) &&
+ ((*(V + 30 + v + 1) > SIGMIN) ||
+ (*(V + 30 + v + 2) > SIGMIN))) {
+ /* (case 4) */
+ if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) {
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
+ *SIG1 = *(V + 30 + v + 1);
+ } else {
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2;
+ *SIG1 = *(V + 30 + v + 2);
+ }
+ } else if ((*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v + 1) >= 0) &&
+ (*(V + 30 + v + 2) >= 0) &&
+ (*(V + 30 + v + 3) >= 0) &&
+ (*(V + 30 + v) > *(V + 30 + v + 2)) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v + 2)) &&
+ (*(V + 30 + v) > *(V + 30 + v + 3)) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v + 3)) &&
+ ((*(V + 30 + v) > SIGMIN) ||
+ (*(V + 30 + v + 1) > SIGMIN))) {
+ /* (case 5) */
+ if (*(V + 30 + v) >= *(V + 30 + v + 1)) {
+ swp_freq = fOSC * 1000 + afcex_freq;
+ *SIG1 = *(V + 30 + v);
+ } else {
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
+ *SIG1 = *(V + 30 + v + 1);
+ }
+ } else if ((v + 2 == vmin) &&
+ (*(V + 30 + v) >= 0) &&
+ (*(V + 30 + v + 1) >= 0) &&
+ (*(V + 30 + v + 2) >= 0) &&
+ (*(V + 30 + v + 1) > *(V + 30 + v)) &&
+ (*(V + 30 + v + 2) > *(V + 30 + v)) &&
+ ((*(V + 30 + v + 1) > SIGMIN) ||
+ (*(V + 30 + v + 2) > SIGMIN))) {
+ /* (case 6) */
+ if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) {
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
+ *SIG1 = *(V + 30 + v + 1);
+ } else {
+ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2;
+ *SIG1 = *(V + 30 + v + 2);
+ }
+ } else if ((vmax == 0) && (vmin == 0) && (*(V + 30 + v) > SIGMIN)) {
+ swp_freq = fOSC * 1000;
+ *SIG1 = *(V + 30 + v);
+ } else
+ swp_freq = -1;
+ } else
+ swp_freq = -1;
+
+ return swp_freq;
+}
+
+static void swp_info_get2(struct mb86a16_state *state,
+ int smrt,
+ int R,
+ int swp_freq,
+ int *afcex_freq,
+ int *fOSC,
+ unsigned char *AFCEX_L,
+ unsigned char *AFCEX_H)
+{
+ int AFCEX ;
+
+ if (R == 0)
+ *fOSC = (swp_freq + 1000) / 2000 * 2;
+ else
+ *fOSC = (swp_freq + 500) / 1000;
+
+ if (*fOSC >= swp_freq)
+ *afcex_freq = *fOSC * 1000 - swp_freq;
+ else
+ *afcex_freq = swp_freq - *fOSC * 1000;
+
+ AFCEX = *afcex_freq * 8192 / state->master_clk;
+ *AFCEX_L = AFCEX & 0x00ff;
+ *AFCEX_H = (AFCEX & 0x0f00) >> 8;
+}
+
+static void afcex_info_get(struct mb86a16_state *state,
+ int afcex_freq,
+ unsigned char *AFCEX_L,
+ unsigned char *AFCEX_H)
+{
+ int AFCEX ;
+
+ AFCEX = afcex_freq * 8192 / state->master_clk;
+ *AFCEX_L = AFCEX & 0x00ff;
+ *AFCEX_H = (AFCEX & 0x0f00) >> 8;
+}
+
+static int SEQ_set(struct mb86a16_state *state, unsigned char loop)
+{
+ /* SLOCK0 = 0 */
+ if (mb86a16_write(state, 0x32, 0x02 | (loop << 2)) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int iq_vt_set(struct mb86a16_state *state, unsigned char IQINV)
+{
+ /* Viterbi Rate, IQ Settings */
+ if (mb86a16_write(state, 0x06, 0xdf | (IQINV << 5)) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int FEC_srst(struct mb86a16_state *state)
+{
+ if (mb86a16_write(state, MB86A16_RESET, 0x02) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int S2T_set(struct mb86a16_state *state, unsigned char S2T)
+{
+ if (mb86a16_write(state, 0x34, 0x70 | S2T) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+static int S45T_set(struct mb86a16_state *state, unsigned char S4T, unsigned char S5T)
+{
+ if (mb86a16_write(state, 0x35, 0x00 | (S5T << 4) | S4T) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
+
+static int mb86a16_set_fe(struct mb86a16_state *state)
+{
+ u8 agcval, cnmval;
+
+ int i, j;
+ int fOSC = 0;
+ int fOSC_start = 0;
+ int wait_t;
+ int fcp;
+ int swp_ofs;
+ int V[60];
+ u8 SIG1MIN;
+
+ unsigned char CREN, AFCEN, AFCEXEN;
+ unsigned char SIG1;
+ unsigned char TIMINT1, TIMINT2, TIMEXT;
+ unsigned char S0T, S1T;
+ unsigned char S2T;
+/* unsigned char S2T, S3T; */
+ unsigned char S4T, S5T;
+ unsigned char AFCEX_L, AFCEX_H;
+ unsigned char R;
+ unsigned char VIRM;
+ unsigned char ETH, VIA;
+ unsigned char junk;
+
+ int loop;
+ int ftemp;
+ int v, vmax, vmin;
+ int vmax_his, vmin_his;
+ int swp_freq, prev_swp_freq[20];
+ int prev_freq_num;
+ int signal_dupl;
+ int afcex_freq;
+ int signal;
+ int afcerr;
+ int temp_freq, delta_freq;
+ int dagcm[4];
+ int smrt_d;
+/* int freq_err; */
+ int n;
+ int ret = -1;
+ int sync;
+
+ dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
+
+ fcp = 3000;
+ swp_ofs = state->srate / 4;
+
+ for (i = 0; i < 60; i++)
+ V[i] = -1;
+
+ for (i = 0; i < 20; i++)
+ prev_swp_freq[i] = 0;
+
+ SIG1MIN = 25;
+
+ for (n = 0; ((n < 3) && (ret == -1)); n++) {
+ SEQ_set(state, 0);
+ iq_vt_set(state, 0);
+
+ CREN = 0;
+ AFCEN = 0;
+ AFCEXEN = 1;
+ TIMINT1 = 0;
+ TIMINT2 = 1;
+ TIMEXT = 2;
+ S1T = 0;
+ S0T = 0;
+
+ if (initial_set(state) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "initial set failed");
+ return -1;
+ }
+ if (DAGC_data_set(state, 3, 2) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
+ return -1;
+ }
+ if (EN_set(state, CREN, AFCEN) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
+ return -1; /* (0, 0) */
+ }
+ if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
+ return -1; /* (1, smrt) = (1, symbolrate) */
+ }
+ if (CNTM_set(state, TIMINT1, TIMINT2, TIMEXT) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "CNTM set error");
+ return -1; /* (0, 1, 2) */
+ }
+ if (S01T_set(state, S1T, S0T) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
+ return -1; /* (0, 0) */
+ }
+ smrt_info_get(state, state->srate);
+ if (smrt_set(state, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "smrt info get error");
+ return -1;
+ }
+
+ R = vco_dev_get(state, state->srate);
+ if (R == 1)
+ fOSC_start = state->frequency;
+
+ else if (R == 0) {
+ if (state->frequency % 2 == 0) {
+ fOSC_start = state->frequency;
+ } else {
+ fOSC_start = state->frequency + 1;
+ if (fOSC_start > 2150)
+ fOSC_start = state->frequency - 1;
+ }
+ }
+ loop = 1;
+ ftemp = fOSC_start * 1000;
+ vmax = 0 ;
+ while (loop == 1) {
+ ftemp = ftemp + swp_ofs;
+ vmax++;
+
+ /* Upper bound */
+ if (ftemp > 2150000) {
+ loop = 0;
+ vmax--;
+ } else {
+ if ((ftemp == 2150000) ||
+ (ftemp - state->frequency * 1000 >= fcp + state->srate / 4))
+ loop = 0;
+ }
+ }
+
+ loop = 1;
+ ftemp = fOSC_start * 1000;
+ vmin = 0 ;
+ while (loop == 1) {
+ ftemp = ftemp - swp_ofs;
+ vmin--;
+
+ /* Lower bound */
+ if (ftemp < 950000) {
+ loop = 0;
+ vmin++;
+ } else {
+ if ((ftemp == 950000) ||
+ (state->frequency * 1000 - ftemp >= fcp + state->srate / 4))
+ loop = 0;
+ }
+ }
+
+ wait_t = (8000 + state->srate / 2) / state->srate;
+ if (wait_t == 0)
+ wait_t = 1;
+
+ i = 0;
+ j = 0;
+ prev_freq_num = 0;
+ loop = 1;
+ signal = 0;
+ vmax_his = 0;
+ vmin_his = 0;
+ v = 0;
+
+ while (loop == 1) {
+ swp_info_get(state, fOSC_start, state->srate,
+ v, R, swp_ofs, &fOSC,
+ &afcex_freq, &AFCEX_L, &AFCEX_H);
+
+ udelay(100);
+ if (rf_val_set(state, fOSC, state->srate, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
+ return -1;
+ }
+ udelay(100);
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
+ return -1;
+ }
+ if (srst(state) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "srst error");
+ return -1;
+ }
+ msleep_interruptible(wait_t);
+
+ if (mb86a16_read(state, 0x37, &SIG1) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -1;
+ }
+ V[30 + v] = SIG1 ;
+ swp_freq = swp_freq_calcuation(state, i, v, V, vmax, vmin,
+ SIG1MIN, fOSC, afcex_freq,
+ swp_ofs, &SIG1); /* changed */
+
+ signal_dupl = 0;
+ for (j = 0; j < prev_freq_num; j++) {
+ if ((ABS(prev_swp_freq[j] - swp_freq)) < (swp_ofs * 3 / 2)) {
+ signal_dupl = 1;
+ dprintk(verbose, MB86A16_INFO, 1, "Probably Duplicate Signal, j = %d", j);
+ }
+ }
+ if ((signal_dupl == 0) && (swp_freq > 0) && (ABS(swp_freq - state->frequency * 1000) < fcp + state->srate / 6)) {
+ dprintk(verbose, MB86A16_DEBUG, 1, "------ Signal detect ------ [swp_freq=[%07d, srate=%05d]]", swp_freq, state->srate);
+ prev_swp_freq[prev_freq_num] = swp_freq;
+ prev_freq_num++;
+ swp_info_get2(state, state->srate, R, swp_freq,
+ &afcex_freq, &fOSC,
+ &AFCEX_L, &AFCEX_H);
+
+ if (rf_val_set(state, fOSC, state->srate, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
+ return -1;
+ }
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
+ return -1;
+ }
+ signal = signal_det(state, state->srate, &SIG1);
+ if (signal == 1) {
+ dprintk(verbose, MB86A16_ERROR, 1, "***** Signal Found *****");
+ loop = 0;
+ } else {
+ dprintk(verbose, MB86A16_ERROR, 1, "!!!!! No signal !!!!!, try again...");
+ smrt_info_get(state, state->srate);
+ if (smrt_set(state, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
+ return -1;
+ }
+ }
+ }
+ if (v > vmax)
+ vmax_his = 1 ;
+ if (v < vmin)
+ vmin_his = 1 ;
+ i++;
+
+ if ((i % 2 == 1) && (vmax_his == 1))
+ i++;
+ if ((i % 2 == 0) && (vmin_his == 1))
+ i++;
+
+ if (i % 2 == 1)
+ v = (i + 1) / 2;
+ else
+ v = -i / 2;
+
+ if ((vmax_his == 1) && (vmin_his == 1))
+ loop = 0 ;
+ }
+
+ if (signal == 1) {
+ dprintk(verbose, MB86A16_INFO, 1, " Start Freq Error Check");
+ S1T = 7 ;
+ S0T = 1 ;
+ CREN = 0 ;
+ AFCEN = 1 ;
+ AFCEXEN = 0 ;
+
+ if (S01T_set(state, S1T, S0T) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
+ return -1;
+ }
+ smrt_info_get(state, state->srate);
+ if (smrt_set(state, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
+ return -1;
+ }
+ if (EN_set(state, CREN, AFCEN) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
+ return -1;
+ }
+ if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
+ return -1;
+ }
+ afcex_info_get(state, afcex_freq, &AFCEX_L, &AFCEX_H);
+ if (afcofs_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "AFCOFS data set error");
+ return -1;
+ }
+ if (srst(state) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "srst error");
+ return -1;
+ }
+ /* delay 4~200 */
+ wait_t = 200000 / state->master_clk + 200000 / state->srate;
+ msleep(wait_t);
+ afcerr = afcerr_chk(state);
+ if (afcerr == -1)
+ return -1;
+
+ swp_freq = fOSC * 1000 + afcerr ;
+ AFCEXEN = 1 ;
+ if (state->srate >= 1500)
+ smrt_d = state->srate / 3;
+ else
+ smrt_d = state->srate / 2;
+ smrt_info_get(state, smrt_d);
+ if (smrt_set(state, smrt_d) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
+ return -1;
+ }
+ if (AFCEXEN_set(state, AFCEXEN, smrt_d) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
+ return -1;
+ }
+ R = vco_dev_get(state, smrt_d);
+ if (DAGC_data_set(state, 2, 0) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
+ return -1;
+ }
+ for (i = 0; i < 3; i++) {
+ temp_freq = swp_freq + (i - 1) * state->srate / 8;
+ swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
+ if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
+ return -1;
+ }
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
+ return -1;
+ }
+ wait_t = 200000 / state->master_clk + 40000 / smrt_d;
+ msleep(wait_t);
+ dagcm[i] = dagcm_val_get(state);
+ }
+ if ((dagcm[0] > dagcm[1]) &&
+ (dagcm[0] > dagcm[2]) &&
+ (dagcm[0] - dagcm[1] > 2 * (dagcm[2] - dagcm[1]))) {
+
+ temp_freq = swp_freq - 2 * state->srate / 8;
+ swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
+ if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
+ return -1;
+ }
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set");
+ return -1;
+ }
+ wait_t = 200000 / state->master_clk + 40000 / smrt_d;
+ msleep(wait_t);
+ dagcm[3] = dagcm_val_get(state);
+ if (dagcm[3] > dagcm[1])
+ delta_freq = (dagcm[2] - dagcm[0] + dagcm[1] - dagcm[3]) * state->srate / 300;
+ else
+ delta_freq = 0;
+ } else if ((dagcm[2] > dagcm[1]) &&
+ (dagcm[2] > dagcm[0]) &&
+ (dagcm[2] - dagcm[1] > 2 * (dagcm[0] - dagcm[1]))) {
+
+ temp_freq = swp_freq + 2 * state->srate / 8;
+ swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
+ if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set");
+ return -1;
+ }
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set");
+ return -1;
+ }
+ wait_t = 200000 / state->master_clk + 40000 / smrt_d;
+ msleep(wait_t);
+ dagcm[3] = dagcm_val_get(state);
+ if (dagcm[3] > dagcm[1])
+ delta_freq = (dagcm[2] - dagcm[0] + dagcm[3] - dagcm[1]) * state->srate / 300;
+ else
+ delta_freq = 0 ;
+
+ } else {
+ delta_freq = 0 ;
+ }
+ dprintk(verbose, MB86A16_INFO, 1, "SWEEP Frequency = %d", swp_freq);
+ swp_freq += delta_freq;
+ dprintk(verbose, MB86A16_INFO, 1, "Adjusting .., DELTA Freq = %d, SWEEP Freq=%d", delta_freq, swp_freq);
+ if (ABS(state->frequency * 1000 - swp_freq) > 3800) {
+ dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL !");
+ } else {
+
+ S1T = 0;
+ S0T = 3;
+ CREN = 1;
+ AFCEN = 0;
+ AFCEXEN = 1;
+
+ if (S01T_set(state, S1T, S0T) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
+ return -1;
+ }
+ if (DAGC_data_set(state, 0, 0) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
+ return -1;
+ }
+ R = vco_dev_get(state, state->srate);
+ smrt_info_get(state, state->srate);
+ if (smrt_set(state, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
+ return -1;
+ }
+ if (EN_set(state, CREN, AFCEN) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
+ return -1;
+ }
+ if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
+ return -1;
+ }
+ swp_info_get2(state, state->srate, R, swp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
+ if (rf_val_set(state, fOSC, state->srate, R) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
+ return -1;
+ }
+ if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
+ return -1;
+ }
+ if (srst(state) < 0) {
+ dprintk(verbose, MB86A16_ERROR, 1, "srst error");
+ return -1;
+ }
+ wait_t = 7 + (10000 + state->srate / 2) / state->srate;
+ if (wait_t == 0)
+ wait_t = 1;
+ msleep_interruptible(wait_t);
+ if (mb86a16_read(state, 0x37, &SIG1) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ if (SIG1 > 110) {
+ S2T = 4; S4T = 1; S5T = 6; ETH = 4; VIA = 6;
+ wait_t = 7 + (917504 + state->srate / 2) / state->srate;
+ } else if (SIG1 > 105) {
+ S2T = 4; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
+ wait_t = 7 + (1048576 + state->srate / 2) / state->srate;
+ } else if (SIG1 > 85) {
+ S2T = 5; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
+ wait_t = 7 + (1310720 + state->srate / 2) / state->srate;
+ } else if (SIG1 > 65) {
+ S2T = 6; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
+ wait_t = 7 + (1572864 + state->srate / 2) / state->srate;
+ } else {
+ S2T = 7; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
+ wait_t = 7 + (2097152 + state->srate / 2) / state->srate;
+ }
+ wait_t *= 2; /* FOS */
+ S2T_set(state, S2T);
+ S45T_set(state, S4T, S5T);
+ Vi_set(state, ETH, VIA);
+ srst(state);
+ msleep_interruptible(wait_t);
+ sync = sync_chk(state, &VIRM);
+ dprintk(verbose, MB86A16_INFO, 1, "-------- Viterbi=[%d] SYNC=[%d] ---------", VIRM, sync);
+ if (VIRM) {
+ if (VIRM == 4) {
+ /* 5/6 */
+ if (SIG1 > 110)
+ wait_t = (786432 + state->srate / 2) / state->srate;
+ else
+ wait_t = (1572864 + state->srate / 2) / state->srate;
+ if (state->srate < 5000)
+ /* FIXME ! , should be a long wait ! */
+ msleep_interruptible(wait_t);
+ else
+ msleep_interruptible(wait_t);
+
+ if (sync_chk(state, &junk) == 0) {
+ iq_vt_set(state, 1);
+ FEC_srst(state);
+ }
+ }
+ /* 1/2, 2/3, 3/4, 7/8 */
+ if (SIG1 > 110)
+ wait_t = (786432 + state->srate / 2) / state->srate;
+ else
+ wait_t = (1572864 + state->srate / 2) / state->srate;
+ msleep_interruptible(wait_t);
+ SEQ_set(state, 1);
+ } else {
+ dprintk(verbose, MB86A16_INFO, 1, "NO -- SYNC");
+ SEQ_set(state, 1);
+ ret = -1;
+ }
+ }
+ } else {
+ dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL");
+ ret = -1;
+ }
+
+ sync = sync_chk(state, &junk);
+ if (sync) {
+ dprintk(verbose, MB86A16_INFO, 1, "******* SYNC *******");
+ freqerr_chk(state, state->frequency, state->srate, 1);
+ ret = 0;
+ break;
+ }
+ }
+
+ mb86a16_read(state, 0x15, &agcval);
+ mb86a16_read(state, 0x26, &cnmval);
+ dprintk(verbose, MB86A16_INFO, 1, "AGC = %02x CNM = %02x", agcval, cnmval);
+
+ return ret;
+}
+
+static int mb86a16_send_diseqc_msg(struct dvb_frontend *fe,
+ struct dvb_diseqc_master_cmd *cmd)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+ int i;
+ u8 regs;
+
+ if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0)
+ goto err;
+
+ regs = 0x18;
+
+ if (cmd->msg_len > 5 || cmd->msg_len < 4)
+ return -EINVAL;
+
+ for (i = 0; i < cmd->msg_len; i++) {
+ if (mb86a16_write(state, regs, cmd->msg[i]) < 0)
+ goto err;
+
+ regs++;
+ }
+ i += 0x90;
+
+ msleep_interruptible(10);
+
+ if (mb86a16_write(state, MB86A16_DCC1, i) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int mb86a16_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ switch (burst) {
+ case SEC_MINI_A:
+ if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
+ MB86A16_DCC1_TBEN |
+ MB86A16_DCC1_TBO) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
+ goto err;
+ break;
+ case SEC_MINI_B:
+ if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
+ MB86A16_DCC1_TBEN) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
+ goto err;
+ break;
+ }
+
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int mb86a16_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ switch (tone) {
+ case SEC_TONE_ON:
+ if (mb86a16_write(state, MB86A16_TONEOUT2, 0x00) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
+ MB86A16_DCC1_CTOE) < 0)
+
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
+ goto err;
+ break;
+ case SEC_TONE_OFF:
+ if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0)
+ goto err;
+ if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0)
+ goto err;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static enum dvbfe_search mb86a16_search(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ state->frequency = p->frequency / 1000;
+ state->srate = p->u.qpsk.symbol_rate / 1000;
+
+ if (!mb86a16_set_fe(state)) {
+ dprintk(verbose, MB86A16_ERROR, 1, "Succesfully acquired LOCK");
+ return DVBFE_ALGO_SEARCH_SUCCESS;
+ }
+
+ dprintk(verbose, MB86A16_ERROR, 1, "Lock acquisition failed!");
+ return DVBFE_ALGO_SEARCH_FAILED;
+}
+
+static void mb86a16_release(struct dvb_frontend *fe)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+ kfree(state);
+}
+
+static int mb86a16_init(struct dvb_frontend *fe)
+{
+ return 0;
+}
+
+static int mb86a16_sleep(struct dvb_frontend *fe)
+{
+ return 0;
+}
+
+static int mb86a16_read_ber(struct dvb_frontend *fe, u32 *ber)
+{
+ u8 ber_mon, ber_tab, ber_lsb, ber_mid, ber_msb, ber_tim, ber_rst;
+ u32 timer;
+
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ *ber = 0;
+ if (mb86a16_read(state, MB86A16_BERMON, &ber_mon) != 2)
+ goto err;
+ if (mb86a16_read(state, MB86A16_BERTAB, &ber_tab) != 2)
+ goto err;
+ if (mb86a16_read(state, MB86A16_BERLSB, &ber_lsb) != 2)
+ goto err;
+ if (mb86a16_read(state, MB86A16_BERMID, &ber_mid) != 2)
+ goto err;
+ if (mb86a16_read(state, MB86A16_BERMSB, &ber_msb) != 2)
+ goto err;
+ /* BER monitor invalid when BER_EN = 0 */
+ if (ber_mon & 0x04) {
+ /* coarse, fast calculation */
+ *ber = ber_tab & 0x1f;
+ dprintk(verbose, MB86A16_DEBUG, 1, "BER coarse=[0x%02x]", *ber);
+ if (ber_mon & 0x01) {
+ /*
+ * BER_SEL = 1, The monitored BER is the estimated
+ * value with a Reed-Solomon decoder error amount at
+ * the deinterleaver output.
+ * monitored BER is expressed as a 20 bit output in total
+ */
+ ber_rst = ber_mon >> 3;
+ *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb;
+ if (ber_rst == 0)
+ timer = 12500000;
+ if (ber_rst == 1)
+ timer = 25000000;
+ if (ber_rst == 2)
+ timer = 50000000;
+ if (ber_rst == 3)
+ timer = 100000000;
+
+ *ber /= timer;
+ dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber);
+ } else {
+ /*
+ * BER_SEL = 0, The monitored BER is the estimated
+ * value with a Viterbi decoder error amount at the
+ * QPSK demodulator output.
+ * monitored BER is expressed as a 24 bit output in total
+ */
+ ber_tim = ber_mon >> 1;
+ *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb;
+ if (ber_tim == 0)
+ timer = 16;
+ if (ber_tim == 1)
+ timer = 24;
+
+ *ber /= 2 ^ timer;
+ dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber);
+ }
+ }
+ return 0;
+err:
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+}
+
+static int mb86a16_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
+{
+ u8 agcm = 0;
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ *strength = 0;
+ if (mb86a16_read(state, MB86A16_AGCM, &agcm) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ *strength = ((0xff - agcm) * 100) / 256;
+ dprintk(verbose, MB86A16_DEBUG, 1, "Signal strength=[%d %%]", (u8) *strength);
+ *strength = (0xffff - 0xff) + agcm;
+
+ return 0;
+}
+
+struct cnr {
+ u8 cn_reg;
+ u8 cn_val;
+};
+
+static const struct cnr cnr_tab[] = {
+ { 35, 2 },
+ { 40, 3 },
+ { 50, 4 },
+ { 60, 5 },
+ { 70, 6 },
+ { 80, 7 },
+ { 92, 8 },
+ { 103, 9 },
+ { 115, 10 },
+ { 138, 12 },
+ { 162, 15 },
+ { 180, 18 },
+ { 185, 19 },
+ { 189, 20 },
+ { 195, 22 },
+ { 199, 24 },
+ { 201, 25 },
+ { 202, 26 },
+ { 203, 27 },
+ { 205, 28 },
+ { 208, 30 }
+};
+
+static int mb86a16_read_snr(struct dvb_frontend *fe, u16 *snr)
+{
+ struct mb86a16_state *state = fe->demodulator_priv;
+ int i = 0;
+ int low_tide = 2, high_tide = 30, q_level;
+ u8 cn;
+
+ *snr = 0;
+ if (mb86a16_read(state, 0x26, &cn) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cnr_tab); i++) {
+ if (cn < cnr_tab[i].cn_reg) {
+ *snr = cnr_tab[i].cn_val;
+ break;
+ }
+ }
+ q_level = (*snr * 100) / (high_tide - low_tide);
+ dprintk(verbose, MB86A16_ERROR, 1, "SNR (Quality) = [%d dB], Level=%d %%", *snr, q_level);
+ *snr = (0xffff - 0xff) + *snr;
+
+ return 0;
+}
+
+static int mb86a16_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
+{
+ u8 dist;
+ struct mb86a16_state *state = fe->demodulator_priv;
+
+ if (mb86a16_read(state, MB86A16_DISTMON, &dist) != 2) {
+ dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
+ return -EREMOTEIO;
+ }
+ *ucblocks = dist;
+
+ return 0;
+}
+
+static enum dvbfe_algo mb86a16_frontend_algo(struct dvb_frontend *fe)
+{
+ return DVBFE_ALGO_CUSTOM;
+}
+
+static struct dvb_frontend_ops mb86a16_ops = {
+ .info = {
+ .name = "Fujitsu MB86A16 DVB-S",
+ .type = FE_QPSK,
+ .frequency_min = 950000,
+ .frequency_max = 2150000,
+ .frequency_stepsize = 3000,
+ .frequency_tolerance = 0,
+ .symbol_rate_min = 1000000,
+ .symbol_rate_max = 45000000,
+ .symbol_rate_tolerance = 500,
+ .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
+ FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 |
+ FE_CAN_FEC_7_8 | FE_CAN_QPSK |
+ FE_CAN_FEC_AUTO
+ },
+ .release = mb86a16_release,
+
+ .get_frontend_algo = mb86a16_frontend_algo,
+ .search = mb86a16_search,
+ .read_status = mb86a16_read_status,
+ .init = mb86a16_init,
+ .sleep = mb86a16_sleep,
+ .read_status = mb86a16_read_status,
+
+ .read_ber = mb86a16_read_ber,
+ .read_signal_strength = mb86a16_read_signal_strength,
+ .read_snr = mb86a16_read_snr,
+ .read_ucblocks = mb86a16_read_ucblocks,
+
+ .diseqc_send_master_cmd = mb86a16_send_diseqc_msg,
+ .diseqc_send_burst = mb86a16_send_diseqc_burst,
+ .set_tone = mb86a16_set_tone,
+};
+
+struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
+ struct i2c_adapter *i2c_adap)
+{
+ u8 dev_id = 0;
+ struct mb86a16_state *state = NULL;
+
+ state = kmalloc(sizeof(struct mb86a16_state), GFP_KERNEL);
+ if (state == NULL)
+ goto error;
+
+ state->config = config;
+ state->i2c_adap = i2c_adap;
+
+ mb86a16_read(state, 0x7f, &dev_id);
+ if (dev_id != 0xfe)
+ goto error;
+
+ memcpy(&state->frontend.ops, &mb86a16_ops, sizeof(struct dvb_frontend_ops));
+ state->frontend.demodulator_priv = state;
+ state->frontend.ops.set_voltage = state->config->set_voltage;
+
+ return &state->frontend;
+error:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(mb86a16_attach);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Manu Abraham");
diff --git a/drivers/media/dvb/frontends/mb86a16.h b/drivers/media/dvb/frontends/mb86a16.h
new file mode 100644
index 000000000000..6ea8c376394f
--- /dev/null
+++ b/drivers/media/dvb/frontends/mb86a16.h
@@ -0,0 +1,52 @@
+/*
+ Fujitsu MB86A16 DVB-S/DSS DC Receiver driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MB86A16_H
+#define __MB86A16_H
+
+#include <linux/dvb/frontend.h>
+#include "dvb_frontend.h"
+
+
+struct mb86a16_config {
+ u8 demod_address;
+
+ int (*set_voltage)(struct dvb_frontend *fe, fe_sec_voltage_t voltage);
+};
+
+
+
+#if defined(CONFIG_DVB_MB86A16) || (defined(CONFIG_DVB_MB86A16_MODULE) && defined(MODULE))
+
+extern struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
+ struct i2c_adapter *i2c_adap);
+
+#else
+
+static inline struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
+ struct i2c_adapter *i2c_adap)
+{
+ printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+#endif /* CONFIG_DVB_MB86A16 */
+
+#endif /* __MB86A16_H */
diff --git a/drivers/media/dvb/frontends/mb86a16_priv.h b/drivers/media/dvb/frontends/mb86a16_priv.h
new file mode 100644
index 000000000000..360a35acfe84
--- /dev/null
+++ b/drivers/media/dvb/frontends/mb86a16_priv.h
@@ -0,0 +1,151 @@
+/*
+ Fujitsu MB86A16 DVB-S/DSS DC Receiver driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MB86A16_PRIV_H
+#define __MB86A16_PRIV_H
+
+#define MB86A16_TSOUT 0x00
+#define MB86A16_TSOUT_HIZSEL (0x01 << 5)
+#define MB86A16_TSOUT_HIZCNTI (0x01 << 4)
+#define MB86A16_TSOUT_MODE (0x01 << 3)
+#define MB86A16_TSOUT_ORDER (0x01 << 2)
+#define MB86A16_TSOUT_ERROR (0x01 << 1)
+#define Mb86A16_TSOUT_EDGE (0x01 << 0)
+
+#define MB86A16_FEC 0x01
+#define MB86A16_FEC_FSYNC (0x01 << 5)
+#define MB86A16_FEC_PCKB8 (0x01 << 4)
+#define MB86A16_FEC_DVDS (0x01 << 3)
+#define MB86A16_FEC_EREN (0x01 << 2)
+#define Mb86A16_FEC_RSEN (0x01 << 1)
+#define MB86A16_FEC_DIEN (0x01 << 0)
+
+#define MB86A16_AGC 0x02
+#define MB86A16_AGC_AGMD (0x01 << 6)
+#define MB86A16_AGC_AGCW (0x0f << 2)
+#define MB86A16_AGC_AGCP (0x01 << 1)
+#define MB86A16_AGC_AGCR (0x01 << 0)
+
+#define MB86A16_SRATE1 0x03
+#define MB86A16_SRATE1_DECI (0x07 << 2)
+#define MB86A16_SRATE1_CSEL (0x01 << 1)
+#define MB86A16_SRATE1_RSEL (0x01 << 0)
+
+#define MB86A16_SRATE2 0x04
+#define MB86A16_SRATE2_STOFSL (0xff << 0)
+
+#define MB86A16_SRATE3 0x05
+#define MB86A16_SRATE2_STOFSH (0xff << 0)
+
+#define MB86A16_VITERBI 0x06
+#define MB86A16_FRAMESYNC 0x07
+#define MB86A16_CRLFILTCOEF1 0x08
+#define MB86A16_CRLFILTCOEF2 0x09
+#define MB86A16_STRFILTCOEF1 0x0a
+#define MB86A16_STRFILTCOEF2 0x0b
+#define MB86A16_RESET 0x0c
+#define MB86A16_STATUS 0x0d
+#define MB86A16_AFCML 0x0e
+#define MB86A16_AFCMH 0x0f
+#define MB86A16_BERMON 0x10
+#define MB86A16_BERTAB 0x11
+#define MB86A16_BERLSB 0x12
+#define MB86A16_BERMID 0x13
+#define MB86A16_BERMSB 0x14
+#define MB86A16_AGCM 0x15
+
+#define MB86A16_DCC1 0x16
+#define MB86A16_DCC1_DISTA (0x01 << 7)
+#define MB86A16_DCC1_PRTY (0x01 << 6)
+#define MB86A16_DCC1_CTOE (0x01 << 5)
+#define MB86A16_DCC1_TBEN (0x01 << 4)
+#define MB86A16_DCC1_TBO (0x01 << 3)
+#define MB86A16_DCC1_NUM (0x07 << 0)
+
+#define MB86A16_DCC2 0x17
+#define MB86A16_DCC2_DCBST (0x01 << 0)
+
+#define MB86A16_DCC3 0x18
+#define MB86A16_DCC3_CODE0 (0xff << 0)
+
+#define MB86A16_DCC4 0x19
+#define MB86A16_DCC4_CODE1 (0xff << 0)
+
+#define MB86A16_DCC5 0x1a
+#define MB86A16_DCC5_CODE2 (0xff << 0)
+
+#define MB86A16_DCC6 0x1b
+#define MB86A16_DCC6_CODE3 (0xff << 0)
+
+#define MB86A16_DCC7 0x1c
+#define MB86A16_DCC7_CODE4 (0xff << 0)
+
+#define MB86A16_DCC8 0x1d
+#define MB86A16_DCC8_CODE5 (0xff << 0)
+
+#define MB86A16_DCCOUT 0x1e
+#define MB86A16_DCCOUT_DISEN (0x01 << 0)
+
+#define MB86A16_TONEOUT1 0x1f
+#define MB86A16_TONE_TDIVL (0xff << 0)
+
+#define MB86A16_TONEOUT2 0x20
+#define MB86A16_TONE_TMD (0x03 << 2)
+#define MB86A16_TONE_TDIVH (0x03 << 0)
+
+#define MB86A16_FREQ1 0x21
+#define MB86A16_FREQ2 0x22
+#define MB86A16_FREQ3 0x23
+#define MB86A16_FREQ4 0x24
+#define MB86A16_FREQSET 0x25
+#define MB86A16_CNM 0x26
+#define MB86A16_PORT0 0x27
+#define MB86A16_PORT1 0x28
+#define MB86A16_DRCFILT 0x29
+#define MB86A16_AFC 0x2a
+#define MB86A16_AFCEXL 0x2b
+#define MB86A16_AFCEXH 0x2c
+#define MB86A16_DAGC 0x2d
+#define MB86A16_SEQMODE 0x32
+#define MB86A16_S0S1T 0x33
+#define MB86A16_S2S3T 0x34
+#define MB86A16_S4S5T 0x35
+#define MB86A16_CNTMR 0x36
+#define MB86A16_SIG1 0x37
+#define MB86A16_SIG2 0x38
+#define MB86A16_VIMAG 0x39
+#define MB86A16_VISET1 0x3a
+#define MB86A16_VISET2 0x3b
+#define MB86A16_VISET3 0x3c
+#define MB86A16_FAGCS1 0x3d
+#define MB86A16_FAGCS2 0x3e
+#define MB86A16_FAGCS3 0x3f
+#define MB86A16_FAGCS4 0x40
+#define MB86A16_FAGCS5 0x41
+#define MB86A16_FAGCS6 0x42
+#define MB86A16_CRM 0x43
+#define MB86A16_STRM 0x44
+#define MB86A16_DAGCML 0x45
+#define MB86A16_DAGCMH 0x46
+#define MB86A16_QPSKTST 0x49
+#define MB86A16_DISTMON 0x52
+#define MB86A16_VERSION 0x7f
+
+#endif /* __MB86A16_PRIV_H */
diff --git a/drivers/media/dvb/frontends/stv0900.h b/drivers/media/dvb/frontends/stv0900.h
index 29c3fa85c227..e3e35d1ce838 100644
--- a/drivers/media/dvb/frontends/stv0900.h
+++ b/drivers/media/dvb/frontends/stv0900.h
@@ -49,6 +49,8 @@ struct stv0900_config {
u8 tun2_maddress;
u8 tun1_adc;/* 1 for stv6110, 2 for stb6100 */
u8 tun2_adc;
+ u8 tun1_type;/* for now 3 for stb6100 auto, else - software */
+ u8 tun2_type;
/* Set device param to start dma */
int (*set_ts_params)(struct dvb_frontend *fe, int is_punctured);
};
diff --git a/drivers/media/dvb/frontends/stv0900_core.c b/drivers/media/dvb/frontends/stv0900_core.c
index 8762c86044a5..115dc01c2234 100644
--- a/drivers/media/dvb/frontends/stv0900_core.c
+++ b/drivers/media/dvb/frontends/stv0900_core.c
@@ -567,6 +567,46 @@ void stv0900_set_bandwidth(struct dvb_frontend *fe, u32 bandwidth)
}
}
+u32 stv0900_get_freq_auto(struct stv0900_internal *intp, int demod)
+{
+ u32 freq, round;
+ /* Formulat :
+ Tuner_Frequency(MHz) = Regs / 64
+ Tuner_granularity(MHz) = Regs / 2048
+ real_Tuner_Frequency = Tuner_Frequency(MHz) - Tuner_granularity(MHz)
+ */
+ freq = (stv0900_get_bits(intp, TUN_RFFREQ2) << 10) +
+ (stv0900_get_bits(intp, TUN_RFFREQ1) << 2) +
+ stv0900_get_bits(intp, TUN_RFFREQ0);
+
+ freq = (freq * 1000) / 64;
+
+ round = (stv0900_get_bits(intp, TUN_RFRESTE1) >> 2) +
+ stv0900_get_bits(intp, TUN_RFRESTE0);
+
+ round = (round * 1000) / 2048;
+
+ return freq + round;
+}
+
+void stv0900_set_tuner_auto(struct stv0900_internal *intp, u32 Frequency,
+ u32 Bandwidth, int demod)
+{
+ u32 tunerFrequency;
+ /* Formulat:
+ Tuner_frequency_reg= Frequency(MHz)*64
+ */
+ tunerFrequency = (Frequency * 64) / 1000;
+
+ stv0900_write_bits(intp, TUN_RFFREQ2, (tunerFrequency >> 10));
+ stv0900_write_bits(intp, TUN_RFFREQ1, (tunerFrequency >> 2) & 0xff);
+ stv0900_write_bits(intp, TUN_RFFREQ0, (tunerFrequency & 0x03));
+ /* Low Pass Filter = BW /2 (MHz)*/
+ stv0900_write_bits(intp, TUN_BW, Bandwidth / 2000000);
+ /* Tuner Write trig */
+ stv0900_write_reg(intp, TNRLD, 1);
+}
+
static s32 stv0900_get_rf_level(struct stv0900_internal *intp,
const struct stv0900_table *lookup,
enum fe_stv0900_demod_num demod)
@@ -1329,7 +1369,6 @@ static enum fe_stv0900_error stv0900_init_internal(struct dvb_frontend *fe,
enum fe_stv0900_error error = STV0900_NO_ERROR;
enum fe_stv0900_error demodError = STV0900_NO_ERROR;
struct stv0900_internal *intp = NULL;
-
int selosci, i;
struct stv0900_inode *temp_int = find_inode(state->i2c_adap,
@@ -1404,6 +1443,27 @@ static enum fe_stv0900_error stv0900_init_internal(struct dvb_frontend *fe,
stv0900_write_bits(intp, F0900_P1_RST_HWARE, 0);
}
+ intp->tuner_type[0] = p_init->tuner1_type;
+ intp->tuner_type[1] = p_init->tuner2_type;
+ /* tuner init */
+ switch (p_init->tuner1_type) {
+ case 3: /*FE_AUTO_STB6100:*/
+ stv0900_write_reg(intp, R0900_P1_TNRCFG, 0x3c);
+ stv0900_write_reg(intp, R0900_P1_TNRCFG2, 0x86);
+ stv0900_write_reg(intp, R0900_P1_TNRCFG3, 0x18);
+ stv0900_write_reg(intp, R0900_P1_TNRXTAL, 27); /* 27MHz */
+ stv0900_write_reg(intp, R0900_P1_TNRSTEPS, 0x05);
+ stv0900_write_reg(intp, R0900_P1_TNRGAIN, 0x17);
+ stv0900_write_reg(intp, R0900_P1_TNRADJ, 0x1f);
+ stv0900_write_reg(intp, R0900_P1_TNRCTL2, 0x0);
+ stv0900_write_bits(intp, F0900_P1_TUN_TYPE, 3);
+ break;
+ /* case FE_SW_TUNER: */
+ default:
+ stv0900_write_bits(intp, F0900_P1_TUN_TYPE, 6);
+ break;
+ }
+
stv0900_write_bits(intp, F0900_P1_TUN_MADDRESS, p_init->tun1_maddress);
switch (p_init->tuner1_adc) {
case 1:
@@ -1413,6 +1473,27 @@ static enum fe_stv0900_error stv0900_init_internal(struct dvb_frontend *fe,
break;
}
+ stv0900_write_reg(intp, R0900_P1_TNRLD, 1); /* hw tuner */
+
+ /* tuner init */
+ switch (p_init->tuner2_type) {
+ case 3: /*FE_AUTO_STB6100:*/
+ stv0900_write_reg(intp, R0900_P2_TNRCFG, 0x3c);
+ stv0900_write_reg(intp, R0900_P2_TNRCFG2, 0x86);
+ stv0900_write_reg(intp, R0900_P2_TNRCFG3, 0x18);
+ stv0900_write_reg(intp, R0900_P2_TNRXTAL, 27); /* 27MHz */
+ stv0900_write_reg(intp, R0900_P2_TNRSTEPS, 0x05);
+ stv0900_write_reg(intp, R0900_P2_TNRGAIN, 0x17);
+ stv0900_write_reg(intp, R0900_P2_TNRADJ, 0x1f);
+ stv0900_write_reg(intp, R0900_P2_TNRCTL2, 0x0);
+ stv0900_write_bits(intp, F0900_P2_TUN_TYPE, 3);
+ break;
+ /* case FE_SW_TUNER: */
+ default:
+ stv0900_write_bits(intp, F0900_P2_TUN_TYPE, 6);
+ break;
+ }
+
stv0900_write_bits(intp, F0900_P2_TUN_MADDRESS, p_init->tun2_maddress);
switch (p_init->tuner2_adc) {
case 1:
@@ -1422,6 +1503,8 @@ static enum fe_stv0900_error stv0900_init_internal(struct dvb_frontend *fe,
break;
}
+ stv0900_write_reg(intp, R0900_P2_TNRLD, 1); /* hw tuner */
+
stv0900_write_bits(intp, F0900_P1_TUN_IQSWAP, p_init->tun1_iq_inv);
stv0900_write_bits(intp, F0900_P2_TUN_IQSWAP, p_init->tun2_iq_inv);
stv0900_set_mclk(intp, 135000000);
@@ -1824,10 +1907,12 @@ struct dvb_frontend *stv0900_attach(const struct stv0900_config *config,
init_params.tun1_maddress = config->tun1_maddress;
init_params.tun1_iq_inv = STV0900_IQ_NORMAL;
init_params.tuner1_adc = config->tun1_adc;
+ init_params.tuner1_type = config->tun1_type;
init_params.path2_ts_clock = config->path2_mode;
init_params.ts_config = config->ts_config_regs;
init_params.tun2_maddress = config->tun2_maddress;
init_params.tuner2_adc = config->tun2_adc;
+ init_params.tuner2_type = config->tun2_type;
init_params.tun2_iq_inv = STV0900_IQ_SWAPPED;
err_stv0900 = stv0900_init_internal(&state->frontend,
diff --git a/drivers/media/dvb/frontends/stv0900_priv.h b/drivers/media/dvb/frontends/stv0900_priv.h
index d8ba8a984abe..b62b0f0a4fef 100644
--- a/drivers/media/dvb/frontends/stv0900_priv.h
+++ b/drivers/media/dvb/frontends/stv0900_priv.h
@@ -247,6 +247,7 @@ struct stv0900_init_params{
u8 tun1_maddress;
int tuner1_adc;
+ int tuner1_type;
/* IQ from the tuner1 to the demod */
enum stv0900_iq_inversion tun1_iq_inv;
@@ -254,6 +255,7 @@ struct stv0900_init_params{
u8 tun2_maddress;
int tuner2_adc;
+ int tuner2_type;
/* IQ from the tuner2 to the demod */
enum stv0900_iq_inversion tun2_iq_inv;
@@ -309,6 +311,8 @@ struct stv0900_internal{
s32 bw[2];
s32 symbol_rate[2];
s32 srch_range[2];
+ /* for software/auto tuner */
+ int tuner_type[2];
/* algorithm for search Blind, Cold or Warm*/
enum fe_stv0900_search_algo srch_algo[2];
@@ -394,4 +398,11 @@ extern enum
fe_stv0900_tracking_standard stv0900_get_standard(struct dvb_frontend *fe,
enum fe_stv0900_demod_num demod);
+extern u32
+stv0900_get_freq_auto(struct stv0900_internal *intp, int demod);
+
+extern void
+stv0900_set_tuner_auto(struct stv0900_internal *intp, u32 Frequency,
+ u32 Bandwidth, int demod);
+
#endif
diff --git a/drivers/media/dvb/frontends/stv0900_reg.h b/drivers/media/dvb/frontends/stv0900_reg.h
index 7b8edf192e97..731afe93a823 100644
--- a/drivers/media/dvb/frontends/stv0900_reg.h
+++ b/drivers/media/dvb/frontends/stv0900_reg.h
@@ -3174,17 +3174,21 @@ extern s32 shiftx(s32 x, int demod, s32 shift);
#define R0900_P1_TNRRF1 0xf4e9
#define TNRRF1 REGx(R0900_P1_TNRRF1)
#define F0900_P1_TUN_RFFREQ2 0xf4e900ff
+#define TUN_RFFREQ2 FLDx(F0900_P1_TUN_RFFREQ2)
/*P1_TNRRF0*/
#define R0900_P1_TNRRF0 0xf4ea
#define TNRRF0 REGx(R0900_P1_TNRRF0)
#define F0900_P1_TUN_RFFREQ1 0xf4ea00ff
+#define TUN_RFFREQ1 FLDx(F0900_P1_TUN_RFFREQ1)
/*P1_TNRBW*/
#define R0900_P1_TNRBW 0xf4eb
#define TNRBW REGx(R0900_P1_TNRBW)
#define F0900_P1_TUN_RFFREQ0 0xf4eb00c0
+#define TUN_RFFREQ0 FLDx(F0900_P1_TUN_RFFREQ0)
#define F0900_P1_TUN_BW 0xf4eb003f
+#define TUN_BW FLDx(F0900_P1_TUN_BW)
/*P1_TNRADJ*/
#define R0900_P1_TNRADJ 0xf4ec
@@ -3234,11 +3238,13 @@ extern s32 shiftx(s32 x, int demod, s32 shift);
#define F0900_P1_TUN_I2CLOCKED 0xf4f60010
#define F0900_P1_TUN_PROGDONE 0xf4f6000c
#define F0900_P1_TUN_RFRESTE1 0xf4f60003
+#define TUN_RFRESTE1 FLDx(F0900_P1_TUN_RFRESTE1)
/*P1_TNRRESTE*/
#define R0900_P1_TNRRESTE 0xf4f7
#define TNRRESTE REGx(R0900_P1_TNRRESTE)
#define F0900_P1_TUN_RFRESTE0 0xf4f700ff
+#define TUN_RFRESTE0 FLDx(F0900_P1_TUN_RFRESTE0)
/*P1_SMAPCOEF7*/
#define R0900_P1_SMAPCOEF7 0xf500
diff --git a/drivers/media/dvb/frontends/stv0900_sw.c b/drivers/media/dvb/frontends/stv0900_sw.c
index b8da87fa637f..5161c2884426 100644
--- a/drivers/media/dvb/frontends/stv0900_sw.c
+++ b/drivers/media/dvb/frontends/stv0900_sw.c
@@ -606,7 +606,12 @@ static int stv0900_get_demod_cold_lock(struct dvb_frontend *fe,
tuner_freq -= (current_step * currier_step);
if (intp->chip_id <= 0x20) {
- stv0900_set_tuner(fe, tuner_freq, intp->bw[d]);
+ if (intp->tuner_type[d] == 3)
+ stv0900_set_tuner_auto(intp, tuner_freq,
+ intp->bw[d], demod);
+ else
+ stv0900_set_tuner(fe, tuner_freq, intp->bw[d]);
+
stv0900_write_reg(intp, DMDISTATE, 0x1c);
stv0900_write_reg(intp, CFRINIT1, 0);
stv0900_write_reg(intp, CFRINIT0, 0);
@@ -976,8 +981,16 @@ static void stv0900_track_optimization(struct dvb_frontend *fe)
intp->rolloff) + 10000000;
if ((intp->chip_id >= 0x20) || (blind_tun_sw == 1)) {
- if (intp->srch_algo[demod] != STV0900_WARM_START)
- stv0900_set_bandwidth(fe, intp->bw[demod]);
+ if (intp->srch_algo[demod] != STV0900_WARM_START) {
+ if (intp->tuner_type[demod] == 3)
+ stv0900_set_tuner_auto(intp,
+ intp->freq[demod],
+ intp->bw[demod],
+ demod);
+ else
+ stv0900_set_bandwidth(fe,
+ intp->bw[demod]);
+ }
}
if ((intp->srch_algo[demod] == STV0900_BLIND_SEARCH) ||
@@ -1202,7 +1215,11 @@ fe_stv0900_signal_type stv0900_get_signal_params(struct dvb_frontend *fe)
}
result->standard = stv0900_get_standard(fe, d);
- result->frequency = stv0900_get_tuner_freq(fe);
+ if (intp->tuner_type[demod] == 3)
+ result->frequency = stv0900_get_freq_auto(intp, d);
+ else
+ result->frequency = stv0900_get_tuner_freq(fe);
+
offsetFreq = stv0900_get_carr_freq(intp, intp->mclk, d) / 1000;
result->frequency += offsetFreq;
result->symbol_rate = stv0900_get_symbol_rate(intp, intp->mclk, d);
@@ -1239,7 +1256,11 @@ fe_stv0900_signal_type stv0900_get_signal_params(struct dvb_frontend *fe)
if ((intp->srch_algo[d] == STV0900_BLIND_SEARCH) ||
(intp->symbol_rate[d] < 10000000)) {
offsetFreq = result->frequency - intp->freq[d];
- intp->freq[d] = stv0900_get_tuner_freq(fe);
+ if (intp->tuner_type[demod] == 3)
+ intp->freq[d] = stv0900_get_freq_auto(intp, d);
+ else
+ intp->freq[d] = stv0900_get_tuner_freq(fe);
+
if (ABS(offsetFreq) <= ((intp->srch_range[d] / 2000) + 500))
range = STV0900_RANGEOK;
else if (ABS(offsetFreq) <=
@@ -1481,7 +1502,12 @@ static u32 stv0900_search_srate_coarse(struct dvb_frontend *fe)
else
tuner_freq -= (current_step * currier_step);
- stv0900_set_tuner(fe, tuner_freq, intp->bw[demod]);
+ if (intp->tuner_type[demod] == 3)
+ stv0900_set_tuner_auto(intp, tuner_freq,
+ intp->bw[demod], demod);
+ else
+ stv0900_set_tuner(fe, tuner_freq,
+ intp->bw[demod]);
}
}
@@ -1875,7 +1901,11 @@ enum fe_stv0900_signal_type stv0900_algo(struct dvb_frontend *fe)
}
- stv0900_set_tuner(fe, intp->freq[demod], intp->bw[demod]);
+ if (intp->tuner_type[demod] == 3)
+ stv0900_set_tuner_auto(intp, intp->freq[demod],
+ intp->bw[demod], demod);
+ else
+ stv0900_set_tuner(fe, intp->freq[demod], intp->bw[demod]);
agc1_power = MAKEWORD(stv0900_get_bits(intp, AGCIQ_VALUE1),
stv0900_get_bits(intp, AGCIQ_VALUE0));
diff --git a/drivers/media/dvb/frontends/tda10021.c b/drivers/media/dvb/frontends/tda10021.c
index 6c1dbf9288d8..6ca533ea0f0e 100644
--- a/drivers/media/dvb/frontends/tda10021.c
+++ b/drivers/media/dvb/frontends/tda10021.c
@@ -426,6 +426,10 @@ struct dvb_frontend* tda10021_attach(const struct tda1002x_config* config,
id = tda10021_readreg(state, 0x1a);
if ((id & 0xf0) != 0x70) goto error;
+ /* Don't claim TDA10023 */
+ if (id == 0x7d)
+ goto error;
+
printk("TDA10021: i2c-addr = 0x%02x, id = 0x%02x\n",
state->config->demod_address, id);
diff --git a/drivers/media/dvb/frontends/tda665x.c b/drivers/media/dvb/frontends/tda665x.c
new file mode 100644
index 000000000000..87d52739c828
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda665x.c
@@ -0,0 +1,257 @@
+/*
+ TDA665x tuner driver
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include "dvb_frontend.h"
+#include "tda665x.h"
+
+struct tda665x_state {
+ struct dvb_frontend *fe;
+ struct i2c_adapter *i2c;
+ const struct tda665x_config *config;
+
+ u32 frequency;
+ u32 bandwidth;
+};
+
+static int tda665x_read(struct tda665x_state *state, u8 *buf)
+{
+ const struct tda665x_config *config = state->config;
+ int err = 0;
+ struct i2c_msg msg = { .addr = config->addr, .flags = I2C_M_RD, .buf = buf, .len = 2 };
+
+ err = i2c_transfer(state->i2c, &msg, 1);
+ if (err != 1)
+ goto exit;
+
+ return err;
+exit:
+ printk(KERN_ERR "%s: I/O Error err=<%d>\n", __func__, err);
+ return err;
+}
+
+static int tda665x_write(struct tda665x_state *state, u8 *buf, u8 length)
+{
+ const struct tda665x_config *config = state->config;
+ int err = 0;
+ struct i2c_msg msg = { .addr = config->addr, .flags = 0, .buf = buf, .len = length };
+
+ err = i2c_transfer(state->i2c, &msg, 1);
+ if (err != 1)
+ goto exit;
+
+ return err;
+exit:
+ printk(KERN_ERR "%s: I/O Error err=<%d>\n", __func__, err);
+ return err;
+}
+
+static int tda665x_get_state(struct dvb_frontend *fe,
+ enum tuner_param param,
+ struct tuner_state *tstate)
+{
+ struct tda665x_state *state = fe->tuner_priv;
+ int err = 0;
+
+ switch (param) {
+ case DVBFE_TUNER_FREQUENCY:
+ tstate->frequency = state->frequency;
+ break;
+ case DVBFE_TUNER_BANDWIDTH:
+ break;
+ default:
+ printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int tda665x_get_status(struct dvb_frontend *fe, u32 *status)
+{
+ struct tda665x_state *state = fe->tuner_priv;
+ u8 result = 0;
+ int err = 0;
+
+ *status = 0;
+
+ err = tda665x_read(state, &result);
+ if (err < 0)
+ goto exit;
+
+ if ((result >> 6) & 0x01) {
+ printk(KERN_DEBUG "%s: Tuner Phase Locked\n", __func__);
+ *status = 1;
+ }
+
+ return err;
+exit:
+ printk(KERN_ERR "%s: I/O Error\n", __func__);
+ return err;
+}
+
+static int tda665x_set_state(struct dvb_frontend *fe,
+ enum tuner_param param,
+ struct tuner_state *tstate)
+{
+ struct tda665x_state *state = fe->tuner_priv;
+ const struct tda665x_config *config = state->config;
+ u32 frequency, status = 0;
+ u8 buf[4];
+ int err = 0;
+
+ if (param & DVBFE_TUNER_FREQUENCY) {
+
+ frequency = tstate->frequency;
+ if ((frequency < config->frequency_max) || (frequency > config->frequency_min)) {
+ printk(KERN_ERR "%s: Frequency beyond limits, frequency=%d\n", __func__, frequency);
+ return -EINVAL;
+ }
+
+ frequency += config->frequency_offst;
+ frequency *= config->ref_multiplier;
+ frequency += config->ref_divider >> 1;
+ frequency /= config->ref_divider;
+
+ buf[0] = (u8) (frequency & 0x7f00) >> 8;
+ buf[1] = (u8) (frequency & 0x00ff) >> 0;
+ buf[2] = 0x80 | 0x40 | 0x02;
+ buf[3] = 0x00;
+
+ /* restore frequency */
+ frequency = tstate->frequency;
+
+ if (frequency < 153000000) {
+ /* VHF-L */
+ buf[3] |= 0x01; /* fc, Low Band, 47 - 153 MHz */
+ if (frequency < 68000000)
+ buf[3] |= 0x40; /* 83uA */
+ if (frequency < 1040000000)
+ buf[3] |= 0x60; /* 122uA */
+ if (frequency < 1250000000)
+ buf[3] |= 0x80; /* 163uA */
+ else
+ buf[3] |= 0xa0; /* 254uA */
+ } else if (frequency < 438000000) {
+ /* VHF-H */
+ buf[3] |= 0x02; /* fc, Mid Band, 153 - 438 MHz */
+ if (frequency < 230000000)
+ buf[3] |= 0x40;
+ if (frequency < 300000000)
+ buf[3] |= 0x60;
+ else
+ buf[3] |= 0x80;
+ } else {
+ /* UHF */
+ buf[3] |= 0x04; /* fc, High Band, 438 - 862 MHz */
+ if (frequency < 470000000)
+ buf[3] |= 0x60;
+ if (frequency < 526000000)
+ buf[3] |= 0x80;
+ else
+ buf[3] |= 0xa0;
+ }
+
+ /* Set params */
+ err = tda665x_write(state, buf, 5);
+ if (err < 0)
+ goto exit;
+
+ /* sleep for some time */
+ printk(KERN_DEBUG "%s: Waiting to Phase LOCK\n", __func__);
+ msleep(20);
+ /* check status */
+ err = tda665x_get_status(fe, &status);
+ if (err < 0)
+ goto exit;
+
+ if (status == 1) {
+ printk(KERN_DEBUG "%s: Tuner Phase locked: status=%d\n", __func__, status);
+ state->frequency = frequency; /* cache successful state */
+ } else {
+ printk(KERN_ERR "%s: No Phase lock: status=%d\n", __func__, status);
+ }
+ } else {
+ printk(KERN_ERR "%s: Unknown parameter (param=%d)\n", __func__, param);
+ return -EINVAL;
+ }
+
+ return 0;
+exit:
+ printk(KERN_ERR "%s: I/O Error\n", __func__);
+ return err;
+}
+
+static int tda665x_release(struct dvb_frontend *fe)
+{
+ struct tda665x_state *state = fe->tuner_priv;
+
+ fe->tuner_priv = NULL;
+ kfree(state);
+ return 0;
+}
+
+static struct dvb_tuner_ops tda665x_ops = {
+
+ .set_state = tda665x_set_state,
+ .get_state = tda665x_get_state,
+ .get_status = tda665x_get_status,
+ .release = tda665x_release
+};
+
+struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
+ const struct tda665x_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct tda665x_state *state = NULL;
+ struct dvb_tuner_info *info;
+
+ state = kzalloc(sizeof(struct tda665x_state), GFP_KERNEL);
+ if (state == NULL)
+ goto exit;
+
+ state->config = config;
+ state->i2c = i2c;
+ state->fe = fe;
+ fe->tuner_priv = state;
+ fe->ops.tuner_ops = tda665x_ops;
+ info = &fe->ops.tuner_ops.info;
+
+ memcpy(info->name, config->name, sizeof(config->name));
+ info->frequency_min = config->frequency_min;
+ info->frequency_max = config->frequency_max;
+ info->frequency_step = config->frequency_offst;
+
+ printk(KERN_DEBUG "%s: Attaching TDA665x (%s) tuner\n", __func__, info->name);
+
+ return fe;
+
+exit:
+ kfree(state);
+ return NULL;
+}
+EXPORT_SYMBOL(tda665x_attach);
+
+MODULE_DESCRIPTION("TDA665x driver");
+MODULE_AUTHOR("Manu Abraham");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/tda665x.h b/drivers/media/dvb/frontends/tda665x.h
new file mode 100644
index 000000000000..ec7927aa75ae
--- /dev/null
+++ b/drivers/media/dvb/frontends/tda665x.h
@@ -0,0 +1,52 @@
+/*
+ TDA665x tuner driver
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __TDA665x_H
+#define __TDA665x_H
+
+struct tda665x_config {
+ char name[128];
+
+ u8 addr;
+ u32 frequency_min;
+ u32 frequency_max;
+ u32 frequency_offst;
+ u32 ref_multiplier;
+ u32 ref_divider;
+};
+
+#if defined(CONFIG_DVB_TDA665x) || (defined(CONFIG_DVB_TDA665x_MODULE) && defined(MODULE))
+
+extern struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
+ const struct tda665x_config *config,
+ struct i2c_adapter *i2c);
+
+#else
+
+static inline struct dvb_frontend *tda665x_attach(struct dvb_frontend *fe,
+ const struct tda665x_config *config,
+ struct i2c_adapter *i2c)
+{
+ printk(KERN_WARNING "%s: Driver disabled by Kconfig\n", __func__);
+ return NULL;
+}
+
+#endif /* CONFIG_DVB_TDA665x */
+
+#endif /* __TDA665x_H */
diff --git a/drivers/media/dvb/mantis/Kconfig b/drivers/media/dvb/mantis/Kconfig
new file mode 100644
index 000000000000..f9219cd7bb0c
--- /dev/null
+++ b/drivers/media/dvb/mantis/Kconfig
@@ -0,0 +1,32 @@
+config MANTIS_CORE
+ tristate "Mantis/Hopper PCI bridge based devices"
+ depends on PCI && I2C
+
+ help
+ Support for PCI cards based on the Mantis and Hopper PCi bridge.
+
+ Say Y if you own such a device and want to use it.
+
+config DVB_MANTIS
+ tristate "MANTIS based cards"
+ depends on MANTIS_CORE && DVB_CORE && PCI && I2C
+ select DVB_MB86A16
+ select DVB_ZL10353
+ select DVB_STV0299
+ select DVB_PLL
+ help
+ Support for PCI cards based on the Mantis PCI bridge.
+ Say Y when you have a Mantis based DVB card and want to use it.
+
+ If unsure say N.
+
+config DVB_HOPPER
+ tristate "HOPPER based cards"
+ depends on MANTIS_CORE && DVB_CORE && PCI && I2C
+ select DVB_ZL10353
+ select DVB_PLL
+ help
+ Support for PCI cards based on the Hopper PCI bridge.
+ Say Y when you have a Hopper based DVB card and want to use it.
+
+ If unsure say N
diff --git a/drivers/media/dvb/mantis/Makefile b/drivers/media/dvb/mantis/Makefile
new file mode 100644
index 000000000000..98dc5cd258ac
--- /dev/null
+++ b/drivers/media/dvb/mantis/Makefile
@@ -0,0 +1,28 @@
+mantis_core-objs := mantis_ioc.o \
+ mantis_uart.o \
+ mantis_dma.o \
+ mantis_pci.o \
+ mantis_i2c.o \
+ mantis_dvb.o \
+ mantis_evm.o \
+ mantis_hif.o \
+ mantis_ca.o \
+ mantis_pcmcia.o \
+ mantis_input.o
+
+mantis-objs := mantis_cards.o \
+ mantis_vp1033.o \
+ mantis_vp1034.o \
+ mantis_vp1041.o \
+ mantis_vp2033.o \
+ mantis_vp2040.o \
+ mantis_vp3030.o
+
+hopper-objs := hopper_cards.o \
+ hopper_vp3028.o
+
+obj-$(CONFIG_MANTIS_CORE) += mantis_core.o
+obj-$(CONFIG_DVB_MANTIS) += mantis.o
+obj-$(CONFIG_DVB_HOPPER) += hopper.o
+
+EXTRA_CFLAGS = -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
diff --git a/drivers/media/dvb/mantis/hopper_cards.c b/drivers/media/dvb/mantis/hopper_cards.c
new file mode 100644
index 000000000000..d073c61e3c0d
--- /dev/null
+++ b/drivers/media/dvb/mantis/hopper_cards.c
@@ -0,0 +1,275 @@
+/*
+ Hopper PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <asm/irq.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "hopper_vp3028.h"
+#include "mantis_dma.h"
+#include "mantis_dvb.h"
+#include "mantis_uart.h"
+#include "mantis_ioc.h"
+#include "mantis_pci.h"
+#include "mantis_i2c.h"
+#include "mantis_reg.h"
+
+static unsigned int verbose;
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
+
+#define DRIVER_NAME "Hopper"
+
+static char *label[10] = {
+ "DMA",
+ "IRQ-0",
+ "IRQ-1",
+ "OCERR",
+ "PABRT",
+ "RIPRR",
+ "PPERR",
+ "FTRGT",
+ "RISCI",
+ "RACK"
+};
+
+static int devs;
+
+static irqreturn_t hopper_irq_handler(int irq, void *dev_id)
+{
+ u32 stat = 0, mask = 0, lstat = 0, mstat = 0;
+ u32 rst_stat = 0, rst_mask = 0;
+
+ struct mantis_pci *mantis;
+ struct mantis_ca *ca;
+
+ mantis = (struct mantis_pci *) dev_id;
+ if (unlikely(mantis == NULL)) {
+ dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ return IRQ_NONE;
+ }
+ ca = mantis->mantis_ca;
+
+ stat = mmread(MANTIS_INT_STAT);
+ mask = mmread(MANTIS_INT_MASK);
+ mstat = lstat = stat & ~MANTIS_INT_RISCSTAT;
+ if (!(stat & mask))
+ return IRQ_NONE;
+
+ rst_mask = MANTIS_GPIF_WRACK |
+ MANTIS_GPIF_OTHERR |
+ MANTIS_SBUF_WSTO |
+ MANTIS_GPIF_EXTIRQ;
+
+ rst_stat = mmread(MANTIS_GPIF_STATUS);
+ rst_stat &= rst_mask;
+ mmwrite(rst_stat, MANTIS_GPIF_STATUS);
+
+ mantis->mantis_int_stat = stat;
+ mantis->mantis_int_mask = mask;
+ dprintk(MANTIS_DEBUG, 0, "\n-- Stat=<%02x> Mask=<%02x> --", stat, mask);
+ if (stat & MANTIS_INT_RISCEN) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[0]);
+ }
+ if (stat & MANTIS_INT_IRQ0) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[1]);
+ mantis->gpif_status = rst_stat;
+ wake_up(&ca->hif_write_wq);
+ schedule_work(&ca->hif_evm_work);
+ }
+ if (stat & MANTIS_INT_IRQ1) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[2]);
+ schedule_work(&mantis->uart_work);
+ }
+ if (stat & MANTIS_INT_OCERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[3]);
+ }
+ if (stat & MANTIS_INT_PABORT) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[4]);
+ }
+ if (stat & MANTIS_INT_RIPERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[5]);
+ }
+ if (stat & MANTIS_INT_PPERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[6]);
+ }
+ if (stat & MANTIS_INT_FTRGT) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[7]);
+ }
+ if (stat & MANTIS_INT_RISCI) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[8]);
+ mantis->finished_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
+ tasklet_schedule(&mantis->tasklet);
+ }
+ if (stat & MANTIS_INT_I2CDONE) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[9]);
+ wake_up(&mantis->i2c_wq);
+ }
+ mmwrite(stat, MANTIS_INT_STAT);
+ stat &= ~(MANTIS_INT_RISCEN | MANTIS_INT_I2CDONE |
+ MANTIS_INT_I2CRACK | MANTIS_INT_PCMCIA7 |
+ MANTIS_INT_PCMCIA6 | MANTIS_INT_PCMCIA5 |
+ MANTIS_INT_PCMCIA4 | MANTIS_INT_PCMCIA3 |
+ MANTIS_INT_PCMCIA2 | MANTIS_INT_PCMCIA1 |
+ MANTIS_INT_PCMCIA0 | MANTIS_INT_IRQ1 |
+ MANTIS_INT_IRQ0 | MANTIS_INT_OCERR |
+ MANTIS_INT_PABORT | MANTIS_INT_RIPERR |
+ MANTIS_INT_PPERR | MANTIS_INT_FTRGT |
+ MANTIS_INT_RISCI);
+
+ if (stat)
+ dprintk(MANTIS_DEBUG, 0, "<Unknown> Stat=<%02x> Mask=<%02x>", stat, mask);
+
+ dprintk(MANTIS_DEBUG, 0, "\n");
+ return IRQ_HANDLED;
+}
+
+static int __devinit hopper_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+{
+ struct mantis_pci *mantis;
+ struct mantis_hwconfig *config;
+ int err = 0;
+
+ mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
+ if (mantis == NULL) {
+ printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
+ err = -ENOMEM;
+ goto fail0;
+ }
+
+ mantis->num = devs;
+ mantis->verbose = verbose;
+ mantis->pdev = pdev;
+ config = (struct mantis_hwconfig *) pci_id->driver_data;
+ config->irq_handler = &hopper_irq_handler;
+ mantis->hwconfig = config;
+
+ err = mantis_pci_init(mantis);
+ if (err) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI initialization failed <%d>", err);
+ goto fail1;
+ }
+
+ err = mantis_stream_control(mantis, STREAM_TO_HIF);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis stream control failed <%d>", err);
+ goto fail1;
+ }
+
+ err = mantis_i2c_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C initialization failed <%d>", err);
+ goto fail2;
+ }
+
+ err = mantis_get_mac(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis MAC address read failed <%d>", err);
+ goto fail2;
+ }
+
+ err = mantis_dma_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA initialization failed <%d>", err);
+ goto fail3;
+ }
+
+ err = mantis_dvb_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DVB initialization failed <%d>", err);
+ goto fail4;
+ }
+ devs++;
+
+ return err;
+
+fail4:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA exit! <%d>", err);
+ mantis_dma_exit(mantis);
+
+fail3:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C exit! <%d>", err);
+ mantis_i2c_exit(mantis);
+
+fail2:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI exit! <%d>", err);
+ mantis_pci_exit(mantis);
+
+fail1:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis free! <%d>", err);
+ kfree(mantis);
+
+fail0:
+ return err;
+}
+
+static void __devexit hopper_pci_remove(struct pci_dev *pdev)
+{
+ struct mantis_pci *mantis = pci_get_drvdata(pdev);
+
+ if (mantis) {
+ mantis_dvb_exit(mantis);
+ mantis_dma_exit(mantis);
+ mantis_i2c_exit(mantis);
+ mantis_pci_exit(mantis);
+ kfree(mantis);
+ }
+ return;
+
+}
+
+static struct pci_device_id hopper_pci_table[] = {
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_3028_DVB_T, &vp3028_config),
+ { }
+};
+
+static struct pci_driver hopper_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = hopper_pci_table,
+ .probe = hopper_pci_probe,
+ .remove = hopper_pci_remove,
+};
+
+static int __devinit hopper_init(void)
+{
+ return pci_register_driver(&hopper_pci_driver);
+}
+
+static void __devexit hopper_exit(void)
+{
+ return pci_unregister_driver(&hopper_pci_driver);
+}
+
+module_init(hopper_init);
+module_exit(hopper_exit);
+
+MODULE_DESCRIPTION("HOPPER driver");
+MODULE_AUTHOR("Manu Abraham");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/mantis/hopper_vp3028.c b/drivers/media/dvb/mantis/hopper_vp3028.c
new file mode 100644
index 000000000000..96674c78e86b
--- /dev/null
+++ b/drivers/media/dvb/mantis/hopper_vp3028.c
@@ -0,0 +1,88 @@
+/*
+ Hopper VP-3028 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "zl10353.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "hopper_vp3028.h"
+
+struct zl10353_config hopper_vp3028_config = {
+ .demod_address = 0x0f,
+};
+
+#define MANTIS_MODEL_NAME "VP-3028"
+#define MANTIS_DEV_TYPE "DVB-T"
+
+static int vp3028_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ int err = 0;
+
+ gpio_set_bits(mantis, config->reset, 0);
+ msleep(100);
+ err = mantis_frontend_power(mantis, POWER_ON);
+ msleep(100);
+ gpio_set_bits(mantis, config->reset, 1);
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ msleep(250);
+ dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)");
+ fe = zl10353_attach(&hopper_vp3028_config, adapter);
+
+ if (!fe)
+ return -1;
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+ dprintk(MANTIS_ERROR, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp3028_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_188,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp3028_frontend_init,
+ .power = GPIF_A00,
+ .reset = GPIF_A03,
+};
diff --git a/drivers/media/dvb/mantis/hopper_vp3028.h b/drivers/media/dvb/mantis/hopper_vp3028.h
new file mode 100644
index 000000000000..57239498bc87
--- /dev/null
+++ b/drivers/media/dvb/mantis/hopper_vp3028.h
@@ -0,0 +1,30 @@
+/*
+ Hopper VP-3028 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP3028_H
+#define __MANTIS_VP3028_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_3028_DVB_T 0x0028
+
+extern struct mantis_hwconfig vp3028_config;
+
+#endif /* __MANTIS_VP3028_H */
diff --git a/drivers/media/dvb/mantis/mantis_ca.c b/drivers/media/dvb/mantis/mantis_ca.c
new file mode 100644
index 000000000000..403ce043d00e
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ca.c
@@ -0,0 +1,207 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_link.h"
+#include "mantis_hif.h"
+#include "mantis_reg.h"
+
+#include "mantis_ca.h"
+
+static int mantis_ca_read_attr_mem(struct dvb_ca_en50221 *en50221, int slot, int addr)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request Attribute Mem Read", slot);
+
+ if (slot != 0)
+ return -EINVAL;
+
+ return mantis_hif_read_mem(ca, addr);
+}
+
+static int mantis_ca_write_attr_mem(struct dvb_ca_en50221 *en50221, int slot, int addr, u8 data)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request Attribute Mem Write", slot);
+
+ if (slot != 0)
+ return -EINVAL;
+
+ return mantis_hif_write_mem(ca, addr, data);
+}
+
+static int mantis_ca_read_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request CAM control Read", slot);
+
+ if (slot != 0)
+ return -EINVAL;
+
+ return mantis_hif_read_iom(ca, addr);
+}
+
+static int mantis_ca_write_cam_ctl(struct dvb_ca_en50221 *en50221, int slot, u8 addr, u8 data)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Request CAM control Write", slot);
+
+ if (slot != 0)
+ return -EINVAL;
+
+ return mantis_hif_write_iom(ca, addr, data);
+}
+
+static int mantis_ca_slot_reset(struct dvb_ca_en50221 *en50221, int slot)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Slot RESET", slot);
+ udelay(500); /* Wait.. */
+ mmwrite(0xda, MANTIS_PCMCIA_RESET); /* Leading edge assert */
+ udelay(500);
+ mmwrite(0x00, MANTIS_PCMCIA_RESET); /* Trailing edge deassert */
+ msleep(1000);
+ dvb_ca_en50221_camready_irq(&ca->en50221, 0);
+
+ return 0;
+}
+
+static int mantis_ca_slot_shutdown(struct dvb_ca_en50221 *en50221, int slot)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Slot shutdown", slot);
+
+ return 0;
+}
+
+static int mantis_ts_control(struct dvb_ca_en50221 *en50221, int slot)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): TS control", slot);
+/* mantis_set_direction(mantis, 1); */ /* Enable TS through CAM */
+
+ return 0;
+}
+
+static int mantis_slot_status(struct dvb_ca_en50221 *en50221, int slot, int open)
+{
+ struct mantis_ca *ca = en50221->data;
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Slot(%d): Poll Slot status", slot);
+
+ if (ca->slot_state == MODULE_INSERTED) {
+ dprintk(MANTIS_DEBUG, 1, "CA Module present and ready");
+ return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY;
+ } else {
+ dprintk(MANTIS_DEBUG, 1, "CA Module not present or not ready");
+ }
+
+ return 0;
+}
+
+int mantis_ca_init(struct mantis_pci *mantis)
+{
+ struct dvb_adapter *dvb_adapter = &mantis->dvb_adapter;
+ struct mantis_ca *ca;
+ int ca_flags = 0, result;
+
+ dprintk(MANTIS_DEBUG, 1, "Initializing Mantis CA");
+ ca = kzalloc(sizeof(struct mantis_ca), GFP_KERNEL);
+ if (!ca) {
+ dprintk(MANTIS_ERROR, 1, "Out of memory!, exiting ..");
+ result = -ENOMEM;
+ goto err;
+ }
+
+ ca->ca_priv = mantis;
+ mantis->mantis_ca = ca;
+ ca_flags = DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE;
+ /* register CA interface */
+ ca->en50221.owner = THIS_MODULE;
+ ca->en50221.read_attribute_mem = mantis_ca_read_attr_mem;
+ ca->en50221.write_attribute_mem = mantis_ca_write_attr_mem;
+ ca->en50221.read_cam_control = mantis_ca_read_cam_ctl;
+ ca->en50221.write_cam_control = mantis_ca_write_cam_ctl;
+ ca->en50221.slot_reset = mantis_ca_slot_reset;
+ ca->en50221.slot_shutdown = mantis_ca_slot_shutdown;
+ ca->en50221.slot_ts_enable = mantis_ts_control;
+ ca->en50221.poll_slot_status = mantis_slot_status;
+ ca->en50221.data = ca;
+
+ mutex_init(&ca->ca_lock);
+
+ init_waitqueue_head(&ca->hif_data_wq);
+ init_waitqueue_head(&ca->hif_opdone_wq);
+ init_waitqueue_head(&ca->hif_write_wq);
+
+ dprintk(MANTIS_ERROR, 1, "Registering EN50221 device");
+ result = dvb_ca_en50221_init(dvb_adapter, &ca->en50221, ca_flags, 1);
+ if (result != 0) {
+ dprintk(MANTIS_ERROR, 1, "EN50221: Initialization failed <%d>", result);
+ goto err;
+ }
+ dprintk(MANTIS_ERROR, 1, "Registered EN50221 device");
+ mantis_evmgr_init(ca);
+ return 0;
+err:
+ kfree(ca);
+ return result;
+}
+EXPORT_SYMBOL_GPL(mantis_ca_init);
+
+void mantis_ca_exit(struct mantis_pci *mantis)
+{
+ struct mantis_ca *ca = mantis->mantis_ca;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis CA exit");
+
+ mantis_evmgr_exit(ca);
+ dprintk(MANTIS_ERROR, 1, "Unregistering EN50221 device");
+ if (ca)
+ dvb_ca_en50221_release(&ca->en50221);
+
+ kfree(ca);
+}
+EXPORT_SYMBOL_GPL(mantis_ca_exit);
diff --git a/drivers/media/dvb/mantis/mantis_ca.h b/drivers/media/dvb/mantis/mantis_ca.h
new file mode 100644
index 000000000000..dc63e55f7eca
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ca.h
@@ -0,0 +1,27 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_CA_H
+#define __MANTIS_CA_H
+
+extern int mantis_ca_init(struct mantis_pci *mantis);
+extern void mantis_ca_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_CA_H */
diff --git a/drivers/media/dvb/mantis/mantis_cards.c b/drivers/media/dvb/mantis/mantis_cards.c
new file mode 100644
index 000000000000..16f1708fd3bc
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_cards.c
@@ -0,0 +1,305 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <asm/irq.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+
+#include "mantis_vp1033.h"
+#include "mantis_vp1034.h"
+#include "mantis_vp1041.h"
+#include "mantis_vp2033.h"
+#include "mantis_vp2040.h"
+#include "mantis_vp3030.h"
+
+#include "mantis_dma.h"
+#include "mantis_ca.h"
+#include "mantis_dvb.h"
+#include "mantis_uart.h"
+#include "mantis_ioc.h"
+#include "mantis_pci.h"
+#include "mantis_i2c.h"
+#include "mantis_reg.h"
+
+static unsigned int verbose;
+module_param(verbose, int, 0644);
+MODULE_PARM_DESC(verbose, "verbose startup messages, default is 1 (yes)");
+
+static int devs;
+
+#define DRIVER_NAME "Mantis"
+
+static char *label[10] = {
+ "DMA",
+ "IRQ-0",
+ "IRQ-1",
+ "OCERR",
+ "PABRT",
+ "RIPRR",
+ "PPERR",
+ "FTRGT",
+ "RISCI",
+ "RACK"
+};
+
+static irqreturn_t mantis_irq_handler(int irq, void *dev_id)
+{
+ u32 stat = 0, mask = 0, lstat = 0, mstat = 0;
+ u32 rst_stat = 0, rst_mask = 0;
+
+ struct mantis_pci *mantis;
+ struct mantis_ca *ca;
+
+ mantis = (struct mantis_pci *) dev_id;
+ if (unlikely(mantis == NULL)) {
+ dprintk(MANTIS_ERROR, 1, "Mantis == NULL");
+ return IRQ_NONE;
+ }
+ ca = mantis->mantis_ca;
+
+ stat = mmread(MANTIS_INT_STAT);
+ mask = mmread(MANTIS_INT_MASK);
+ mstat = lstat = stat & ~MANTIS_INT_RISCSTAT;
+ if (!(stat & mask))
+ return IRQ_NONE;
+
+ rst_mask = MANTIS_GPIF_WRACK |
+ MANTIS_GPIF_OTHERR |
+ MANTIS_SBUF_WSTO |
+ MANTIS_GPIF_EXTIRQ;
+
+ rst_stat = mmread(MANTIS_GPIF_STATUS);
+ rst_stat &= rst_mask;
+ mmwrite(rst_stat, MANTIS_GPIF_STATUS);
+
+ mantis->mantis_int_stat = stat;
+ mantis->mantis_int_mask = mask;
+ dprintk(MANTIS_DEBUG, 0, "\n-- Stat=<%02x> Mask=<%02x> --", stat, mask);
+ if (stat & MANTIS_INT_RISCEN) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[0]);
+ }
+ if (stat & MANTIS_INT_IRQ0) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[1]);
+ mantis->gpif_status = rst_stat;
+ wake_up(&ca->hif_write_wq);
+ schedule_work(&ca->hif_evm_work);
+ }
+ if (stat & MANTIS_INT_IRQ1) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[2]);
+ schedule_work(&mantis->uart_work);
+ }
+ if (stat & MANTIS_INT_OCERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[3]);
+ }
+ if (stat & MANTIS_INT_PABORT) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[4]);
+ }
+ if (stat & MANTIS_INT_RIPERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[5]);
+ }
+ if (stat & MANTIS_INT_PPERR) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[6]);
+ }
+ if (stat & MANTIS_INT_FTRGT) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[7]);
+ }
+ if (stat & MANTIS_INT_RISCI) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[8]);
+ mantis->finished_block = (stat & MANTIS_INT_RISCSTAT) >> 28;
+ tasklet_schedule(&mantis->tasklet);
+ }
+ if (stat & MANTIS_INT_I2CDONE) {
+ dprintk(MANTIS_DEBUG, 0, "<%s>", label[9]);
+ wake_up(&mantis->i2c_wq);
+ }
+ mmwrite(stat, MANTIS_INT_STAT);
+ stat &= ~(MANTIS_INT_RISCEN | MANTIS_INT_I2CDONE |
+ MANTIS_INT_I2CRACK | MANTIS_INT_PCMCIA7 |
+ MANTIS_INT_PCMCIA6 | MANTIS_INT_PCMCIA5 |
+ MANTIS_INT_PCMCIA4 | MANTIS_INT_PCMCIA3 |
+ MANTIS_INT_PCMCIA2 | MANTIS_INT_PCMCIA1 |
+ MANTIS_INT_PCMCIA0 | MANTIS_INT_IRQ1 |
+ MANTIS_INT_IRQ0 | MANTIS_INT_OCERR |
+ MANTIS_INT_PABORT | MANTIS_INT_RIPERR |
+ MANTIS_INT_PPERR | MANTIS_INT_FTRGT |
+ MANTIS_INT_RISCI);
+
+ if (stat)
+ dprintk(MANTIS_DEBUG, 0, "<Unknown> Stat=<%02x> Mask=<%02x>", stat, mask);
+
+ dprintk(MANTIS_DEBUG, 0, "\n");
+ return IRQ_HANDLED;
+}
+
+static int __devinit mantis_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
+{
+ struct mantis_pci *mantis;
+ struct mantis_hwconfig *config;
+ int err = 0;
+
+ mantis = kzalloc(sizeof(struct mantis_pci), GFP_KERNEL);
+ if (mantis == NULL) {
+ printk(KERN_ERR "%s ERROR: Out of memory\n", __func__);
+ err = -ENOMEM;
+ goto fail0;
+ }
+
+ mantis->num = devs;
+ mantis->verbose = verbose;
+ mantis->pdev = pdev;
+ config = (struct mantis_hwconfig *) pci_id->driver_data;
+ config->irq_handler = &mantis_irq_handler;
+ mantis->hwconfig = config;
+
+ err = mantis_pci_init(mantis);
+ if (err) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI initialization failed <%d>", err);
+ goto fail1;
+ }
+
+ err = mantis_stream_control(mantis, STREAM_TO_HIF);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis stream control failed <%d>", err);
+ goto fail1;
+ }
+
+ err = mantis_i2c_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C initialization failed <%d>", err);
+ goto fail2;
+ }
+
+ err = mantis_get_mac(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis MAC address read failed <%d>", err);
+ goto fail2;
+ }
+
+ err = mantis_dma_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA initialization failed <%d>", err);
+ goto fail3;
+ }
+
+ err = mantis_dvb_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DVB initialization failed <%d>", err);
+ goto fail4;
+ }
+ err = mantis_uart_init(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis UART initialization failed <%d>", err);
+ goto fail6;
+ }
+
+ devs++;
+
+ return err;
+
+
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis UART exit! <%d>", err);
+ mantis_uart_exit(mantis);
+
+fail6:
+fail4:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis DMA exit! <%d>", err);
+ mantis_dma_exit(mantis);
+
+fail3:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis I2C exit! <%d>", err);
+ mantis_i2c_exit(mantis);
+
+fail2:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis PCI exit! <%d>", err);
+ mantis_pci_exit(mantis);
+
+fail1:
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis free! <%d>", err);
+ kfree(mantis);
+
+fail0:
+ return err;
+}
+
+static void __devexit mantis_pci_remove(struct pci_dev *pdev)
+{
+ struct mantis_pci *mantis = pci_get_drvdata(pdev);
+
+ if (mantis) {
+
+ mantis_uart_exit(mantis);
+ mantis_dvb_exit(mantis);
+ mantis_dma_exit(mantis);
+ mantis_i2c_exit(mantis);
+ mantis_pci_exit(mantis);
+ kfree(mantis);
+ }
+ return;
+}
+
+static struct pci_device_id mantis_pci_table[] = {
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_1033_DVB_S, &vp1033_config),
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_1034_DVB_S, &vp1034_config),
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_1041_DVB_S2, &vp1041_config),
+ MAKE_ENTRY(TECHNISAT, SKYSTAR_HD2_10, &vp1041_config),
+ MAKE_ENTRY(TECHNISAT, SKYSTAR_HD2_20, &vp1041_config),
+ MAKE_ENTRY(TERRATEC, CINERGY_S2_PCI_HD, &vp1041_config),
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_2033_DVB_C, &vp2033_config),
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_2040_DVB_C, &vp2040_config),
+ MAKE_ENTRY(TECHNISAT, CABLESTAR_HD2, &vp2040_config),
+ MAKE_ENTRY(TERRATEC, CINERGY_C, &vp2033_config),
+ MAKE_ENTRY(TWINHAN_TECHNOLOGIES, MANTIS_VP_3030_DVB_T, &vp3030_config),
+ { }
+};
+
+static struct pci_driver mantis_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = mantis_pci_table,
+ .probe = mantis_pci_probe,
+ .remove = mantis_pci_remove,
+};
+
+static int __devinit mantis_init(void)
+{
+ return pci_register_driver(&mantis_pci_driver);
+}
+
+static void __devexit mantis_exit(void)
+{
+ return pci_unregister_driver(&mantis_pci_driver);
+}
+
+module_init(mantis_init);
+module_exit(mantis_exit);
+
+MODULE_DESCRIPTION("MANTIS driver");
+MODULE_AUTHOR("Manu Abraham");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/mantis/mantis_common.h b/drivers/media/dvb/mantis/mantis_common.h
new file mode 100644
index 000000000000..d0b645a483c9
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_common.h
@@ -0,0 +1,179 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_COMMON_H
+#define __MANTIS_COMMON_H
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+#include "mantis_uart.h"
+
+#include "mantis_link.h"
+
+#define MANTIS_ERROR 0
+#define MANTIS_NOTICE 1
+#define MANTIS_INFO 2
+#define MANTIS_DEBUG 3
+#define MANTIS_TMG 9
+
+#define dprintk(y, z, format, arg...) do { \
+ if (z) { \
+ if ((mantis->verbose > MANTIS_ERROR) && (mantis->verbose > y)) \
+ printk(KERN_ERR "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
+ else if ((mantis->verbose > MANTIS_NOTICE) && (mantis->verbose > y)) \
+ printk(KERN_NOTICE "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
+ else if ((mantis->verbose > MANTIS_INFO) && (mantis->verbose > y)) \
+ printk(KERN_INFO "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
+ else if ((mantis->verbose > MANTIS_DEBUG) && (mantis->verbose > y)) \
+ printk(KERN_DEBUG "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
+ else if ((mantis->verbose > MANTIS_TMG) && (mantis->verbose > y)) \
+ printk(KERN_DEBUG "%s (%d): " format "\n" , __func__ , mantis->num , ##arg); \
+ } else { \
+ if (mantis->verbose > y) \
+ printk(format , ##arg); \
+ } \
+} while(0)
+
+#define mwrite(dat, addr) writel((dat), addr)
+#define mread(addr) readl(addr)
+
+#define mmwrite(dat, addr) mwrite((dat), (mantis->mmio + (addr)))
+#define mmread(addr) mread(mantis->mmio + (addr))
+
+#define MANTIS_TS_188 0
+#define MANTIS_TS_204 1
+
+#define TWINHAN_TECHNOLOGIES 0x1822
+#define MANTIS 0x4e35
+
+#define TECHNISAT 0x1ae4
+#define TERRATEC 0x153b
+
+#define MAKE_ENTRY(__subven, __subdev, __configptr) { \
+ .vendor = TWINHAN_TECHNOLOGIES, \
+ .device = MANTIS, \
+ .subvendor = (__subven), \
+ .subdevice = (__subdev), \
+ .driver_data = (unsigned long) (__configptr) \
+}
+
+enum mantis_i2c_mode {
+ MANTIS_PAGE_MODE = 0,
+ MANTIS_BYTE_MODE,
+};
+
+struct mantis_pci;
+
+struct mantis_hwconfig {
+ char *model_name;
+ char *dev_type;
+ u32 ts_size;
+
+ enum mantis_baud baud_rate;
+ enum mantis_parity parity;
+ u32 bytes;
+
+ irqreturn_t (*irq_handler)(int irq, void *dev_id);
+ int (*frontend_init)(struct mantis_pci *mantis, struct dvb_frontend *fe);
+
+ u8 power;
+ u8 reset;
+
+ enum mantis_i2c_mode i2c_mode;
+};
+
+struct mantis_pci {
+ unsigned int verbose;
+
+ /* PCI stuff */
+ u16 vendor_id;
+ u16 device_id;
+ u16 subsystem_vendor;
+ u16 subsystem_device;
+
+ u8 latency;
+
+ struct pci_dev *pdev;
+
+ unsigned long mantis_addr;
+ void __iomem *mmio;
+
+ u8 irq;
+ u8 revision;
+
+ unsigned int num;
+
+ /* RISC Core */
+ u32 finished_block;
+ u32 last_block;
+ u32 line_bytes;
+ u32 line_count;
+ u32 risc_pos;
+ u8 *buf_cpu;
+ dma_addr_t buf_dma;
+ u32 *risc_cpu;
+ dma_addr_t risc_dma;
+
+ struct tasklet_struct tasklet;
+
+ struct i2c_adapter adapter;
+ int i2c_rc;
+ wait_queue_head_t i2c_wq;
+ struct mutex i2c_lock;
+
+ /* DVB stuff */
+ struct dvb_adapter dvb_adapter;
+ struct dvb_frontend *fe;
+ struct dvb_demux demux;
+ struct dmxdev dmxdev;
+ struct dmx_frontend fe_hw;
+ struct dmx_frontend fe_mem;
+ struct dvb_net dvbnet;
+
+ u8 feeds;
+
+ struct mantis_hwconfig *hwconfig;
+
+ u32 mantis_int_stat;
+ u32 mantis_int_mask;
+
+ /* board specific */
+ u8 mac_address[8];
+ u32 sub_vendor_id;
+ u32 sub_device_id;
+
+ /* A12 A13 A14 */
+ u32 gpio_status;
+
+ u32 gpif_status;
+
+ struct mantis_ca *mantis_ca;
+
+ wait_queue_head_t uart_wq;
+ struct work_struct uart_work;
+ spinlock_t uart_lock;
+
+ struct input_dev *rc;
+};
+
+#define MANTIS_HIF_STATUS (mantis->gpio_status)
+
+#endif /* __MANTIS_COMMON_H */
diff --git a/drivers/media/dvb/mantis/mantis_core.c b/drivers/media/dvb/mantis/mantis_core.c
new file mode 100644
index 000000000000..8113b23ce448
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_core.c
@@ -0,0 +1,238 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include "mantis_common.h"
+#include "mantis_core.h"
+#include "mantis_vp1033.h"
+#include "mantis_vp1034.h"
+#include "mantis_vp1041.h"
+#include "mantis_vp2033.h"
+#include "mantis_vp2040.h"
+#include "mantis_vp3030.h"
+
+static int read_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length)
+{
+ int err;
+ struct i2c_msg msg[] = {
+ {
+ .addr = 0x50,
+ .flags = 0,
+ .buf = data,
+ .len = 1
+ }, {
+ .addr = 0x50,
+ .flags = I2C_M_RD,
+ .buf = data,
+ .len = length
+ },
+ };
+
+ err = i2c_transfer(&mantis->adapter, msg, 2);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1,
+ "ERROR: i2c read: < err=%i d0=0x%02x d1=0x%02x >",
+ err, data[0], data[1]);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static int write_eeprom_byte(struct mantis_pci *mantis, u8 *data, u8 length)
+{
+ int err;
+
+ struct i2c_msg msg = {
+ .addr = 0x50,
+ .flags = 0,
+ .buf = data,
+ .len = length
+ };
+
+ err = i2c_transfer(&mantis->adapter, &msg, 1);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1,
+ "ERROR: i2c write: < err=%i length=0x%02x d0=0x%02x, d1=0x%02x >",
+ err, length, data[0], data[1]);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static int get_mac_address(struct mantis_pci *mantis)
+{
+ int err;
+
+ mantis->mac_address[0] = 0x08;
+ err = read_eeprom_byte(mantis, &mantis->mac_address[0], 6);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1, "Mantis EEPROM read error");
+
+ return err;
+ }
+ dprintk(verbose, MANTIS_ERROR, 0,
+ " MAC Address=[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+ mantis->mac_address[0], mantis->mac_address[1],
+ mantis->mac_address[2], mantis->mac_address[3],
+ mantis->mac_address[4], mantis->mac_address[5]);
+
+ return 0;
+}
+
+#define MANTIS_MODEL_UNKNOWN "UNKNOWN"
+#define MANTIS_DEV_UNKNOWN "UNKNOWN"
+
+struct mantis_hwconfig unknown_device = {
+ .model_name = MANTIS_MODEL_UNKNOWN,
+ .dev_type = MANTIS_DEV_UNKNOWN,
+};
+
+static void mantis_load_config(struct mantis_pci *mantis)
+{
+ switch (mantis->subsystem_device) {
+ case MANTIS_VP_1033_DVB_S: /* VP-1033 */
+ mantis->hwconfig = &vp1033_mantis_config;
+ break;
+ case MANTIS_VP_1034_DVB_S: /* VP-1034 */
+ mantis->hwconfig = &vp1034_mantis_config;
+ break;
+ case MANTIS_VP_1041_DVB_S2: /* VP-1041 */
+ case TECHNISAT_SKYSTAR_HD2:
+ mantis->hwconfig = &vp1041_mantis_config;
+ break;
+ case MANTIS_VP_2033_DVB_C: /* VP-2033 */
+ mantis->hwconfig = &vp2033_mantis_config;
+ break;
+ case MANTIS_VP_2040_DVB_C: /* VP-2040 */
+ case TERRATEC_CINERGY_C_PCI: /* VP-2040 clone */
+ case TECHNISAT_CABLESTAR_HD2:
+ mantis->hwconfig = &vp2040_mantis_config;
+ break;
+ case MANTIS_VP_3030_DVB_T: /* VP-3030 */
+ mantis->hwconfig = &vp3030_mantis_config;
+ break;
+ default:
+ mantis->hwconfig = &unknown_device;
+ break;
+ }
+}
+
+int mantis_core_init(struct mantis_pci *mantis)
+{
+ int err = 0;
+
+ mantis_load_config(mantis);
+ dprintk(verbose, MANTIS_ERROR, 0, "found a %s PCI %s device on (%02x:%02x.%x),\n",
+ mantis->hwconfig->model_name, mantis->hwconfig->dev_type,
+ mantis->pdev->bus->number, PCI_SLOT(mantis->pdev->devfn), PCI_FUNC(mantis->pdev->devfn));
+ dprintk(verbose, MANTIS_ERROR, 0, " Mantis Rev %d [%04x:%04x], ",
+ mantis->revision,
+ mantis->subsystem_vendor, mantis->subsystem_device);
+ dprintk(verbose, MANTIS_ERROR, 0,
+ "irq: %d, latency: %d\n memory: 0x%lx, mmio: 0x%p\n",
+ mantis->pdev->irq, mantis->latency,
+ mantis->mantis_addr, mantis->mantis_mmio);
+
+ err = mantis_i2c_init(mantis);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1, "Mantis I2C init failed");
+ return err;
+ }
+ err = get_mac_address(mantis);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1, "get MAC address failed");
+ return err;
+ }
+ err = mantis_dma_init(mantis);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_ERROR, 1, "Mantis DMA init failed");
+ return err;
+ }
+ err = mantis_dvb_init(mantis);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_DEBUG, 1, "Mantis DVB init failed");
+ return err;
+ }
+ err = mantis_uart_init(mantis);
+ if (err < 0) {
+ dprintk(verbose, MANTIS_DEBUG, 1, "Mantis UART init failed");
+ return err;
+ }
+
+ return 0;
+}
+
+int mantis_core_exit(struct mantis_pci *mantis)
+{
+ mantis_dma_stop(mantis);
+ dprintk(verbose, MANTIS_ERROR, 1, "DMA engine stopping");
+
+ mantis_uart_exit(mantis);
+ dprintk(verbose, MANTIS_ERROR, 1, "UART exit failed");
+
+ if (mantis_dma_exit(mantis) < 0)
+ dprintk(verbose, MANTIS_ERROR, 1, "DMA exit failed");
+ if (mantis_dvb_exit(mantis) < 0)
+ dprintk(verbose, MANTIS_ERROR, 1, "DVB exit failed");
+ if (mantis_i2c_exit(mantis) < 0)
+ dprintk(verbose, MANTIS_ERROR, 1, "I2C adapter delete.. failed");
+
+ return 0;
+}
+
+/* Turn the given bit on or off. */
+void gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value)
+{
+ u32 cur;
+
+ cur = mmread(MANTIS_GPIF_ADDR);
+ if (value)
+ mantis->gpio_status = cur | (1 << bitpos);
+ else
+ mantis->gpio_status = cur & (~(1 << bitpos));
+
+ mmwrite(mantis->gpio_status, MANTIS_GPIF_ADDR);
+ mmwrite(0x00, MANTIS_GPIF_DOUT);
+ udelay(100);
+}
+
+/* direction = 0 , no CI passthrough ; 1 , CI passthrough */
+void mantis_set_direction(struct mantis_pci *mantis, int direction)
+{
+ u32 reg;
+
+ reg = mmread(0x28);
+ dprintk(verbose, MANTIS_DEBUG, 1, "TS direction setup");
+ if (direction == 0x01) {
+ /* to CI */
+ reg |= 0x04;
+ mmwrite(reg, 0x28);
+ reg &= 0xff - 0x04;
+ mmwrite(reg, 0x28);
+ } else {
+ reg &= 0xff - 0x04;
+ mmwrite(reg, 0x28);
+ reg |= 0x04;
+ mmwrite(reg, 0x28);
+ }
+}
diff --git a/drivers/media/dvb/mantis/mantis_core.h b/drivers/media/dvb/mantis/mantis_core.h
new file mode 100644
index 000000000000..833ee42e694e
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_core.h
@@ -0,0 +1,57 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_CORE_H
+#define __MANTIS_CORE_H
+
+#include "mantis_common.h"
+
+
+#define FE_TYPE_SAT 0
+#define FE_TYPE_CAB 1
+#define FE_TYPE_TER 2
+
+#define FE_TYPE_TS204 0
+#define FE_TYPE_TS188 1
+
+
+struct vendorname {
+ u8 *sub_vendor_name;
+ u32 sub_vendor_id;
+};
+
+struct devicetype {
+ u8 *sub_device_name;
+ u32 sub_device_id;
+ u8 device_type;
+ u32 type_flags;
+};
+
+
+extern int mantis_dma_init(struct mantis_pci *mantis);
+extern int mantis_dma_exit(struct mantis_pci *mantis);
+extern void mantis_dma_start(struct mantis_pci *mantis);
+extern void mantis_dma_stop(struct mantis_pci *mantis);
+extern int mantis_i2c_init(struct mantis_pci *mantis);
+extern int mantis_i2c_exit(struct mantis_pci *mantis);
+extern int mantis_core_init(struct mantis_pci *mantis);
+extern int mantis_core_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_CORE_H */
diff --git a/drivers/media/dvb/mantis/mantis_dma.c b/drivers/media/dvb/mantis/mantis_dma.c
new file mode 100644
index 000000000000..46202a4012aa
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dma.c
@@ -0,0 +1,256 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+#include <asm/page.h>
+#include <linux/vmalloc.h>
+#include <linux/pci.h>
+
+#include <asm/irq.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_dma.h"
+
+#define RISC_WRITE (0x01 << 28)
+#define RISC_JUMP (0x07 << 28)
+#define RISC_IRQ (0x01 << 24)
+
+#define RISC_STATUS(status) ((((~status) & 0x0f) << 20) | ((status & 0x0f) << 16))
+#define RISC_FLUSH() (mantis->risc_pos = 0)
+#define RISC_INSTR(opcode) (mantis->risc_cpu[mantis->risc_pos++] = cpu_to_le32(opcode))
+
+#define MANTIS_BUF_SIZE (64 * 1024)
+#define MANTIS_BLOCK_BYTES (MANTIS_BUF_SIZE >> 4)
+#define MANTIS_BLOCK_COUNT (1 << 4)
+#define MANTIS_RISC_SIZE PAGE_SIZE
+
+int mantis_dma_exit(struct mantis_pci *mantis)
+{
+ if (mantis->buf_cpu) {
+ dprintk(MANTIS_ERROR, 1,
+ "DMA=0x%lx cpu=0x%p size=%d",
+ (unsigned long) mantis->buf_dma,
+ mantis->buf_cpu,
+ MANTIS_BUF_SIZE);
+
+ pci_free_consistent(mantis->pdev, MANTIS_BUF_SIZE,
+ mantis->buf_cpu, mantis->buf_dma);
+
+ mantis->buf_cpu = NULL;
+ }
+ if (mantis->risc_cpu) {
+ dprintk(MANTIS_ERROR, 1,
+ "RISC=0x%lx cpu=0x%p size=%lx",
+ (unsigned long) mantis->risc_dma,
+ mantis->risc_cpu,
+ MANTIS_RISC_SIZE);
+
+ pci_free_consistent(mantis->pdev, MANTIS_RISC_SIZE,
+ mantis->risc_cpu, mantis->risc_dma);
+
+ mantis->risc_cpu = NULL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_dma_exit);
+
+static inline int mantis_alloc_buffers(struct mantis_pci *mantis)
+{
+ if (!mantis->buf_cpu) {
+ mantis->buf_cpu = pci_alloc_consistent(mantis->pdev,
+ MANTIS_BUF_SIZE,
+ &mantis->buf_dma);
+ if (!mantis->buf_cpu) {
+ dprintk(MANTIS_ERROR, 1,
+ "DMA buffer allocation failed");
+
+ goto err;
+ }
+ dprintk(MANTIS_ERROR, 1,
+ "DMA=0x%lx cpu=0x%p size=%d",
+ (unsigned long) mantis->buf_dma,
+ mantis->buf_cpu, MANTIS_BUF_SIZE);
+ }
+ if (!mantis->risc_cpu) {
+ mantis->risc_cpu = pci_alloc_consistent(mantis->pdev,
+ MANTIS_RISC_SIZE,
+ &mantis->risc_dma);
+
+ if (!mantis->risc_cpu) {
+ dprintk(MANTIS_ERROR, 1,
+ "RISC program allocation failed");
+
+ mantis_dma_exit(mantis);
+
+ goto err;
+ }
+ dprintk(MANTIS_ERROR, 1,
+ "RISC=0x%lx cpu=0x%p size=%lx",
+ (unsigned long) mantis->risc_dma,
+ mantis->risc_cpu, MANTIS_RISC_SIZE);
+ }
+
+ return 0;
+err:
+ dprintk(MANTIS_ERROR, 1, "Out of memory (?) .....");
+ return -ENOMEM;
+}
+
+static inline int mantis_calc_lines(struct mantis_pci *mantis)
+{
+ mantis->line_bytes = MANTIS_BLOCK_BYTES;
+ mantis->line_count = MANTIS_BLOCK_COUNT;
+
+ while (mantis->line_bytes > 4095) {
+ mantis->line_bytes >>= 1;
+ mantis->line_count <<= 1;
+ }
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis RISC block bytes=[%d], line bytes=[%d], line count=[%d]",
+ MANTIS_BLOCK_BYTES, mantis->line_bytes, mantis->line_count);
+
+ if (mantis->line_count > 255) {
+ dprintk(MANTIS_ERROR, 1, "Buffer size error");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mantis_dma_init(struct mantis_pci *mantis)
+{
+ int err = 0;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis DMA init");
+ if (mantis_alloc_buffers(mantis) < 0) {
+ dprintk(MANTIS_ERROR, 1, "Error allocating DMA buffer");
+
+ /* Stop RISC Engine */
+ mmwrite(0, MANTIS_DMA_CTL);
+
+ goto err;
+ }
+ err = mantis_calc_lines(mantis);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "Mantis calc lines failed");
+
+ goto err;
+ }
+
+ return 0;
+err:
+ return err;
+}
+EXPORT_SYMBOL_GPL(mantis_dma_init);
+
+static inline void mantis_risc_program(struct mantis_pci *mantis)
+{
+ u32 buf_pos = 0;
+ u32 line;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis create RISC program");
+ RISC_FLUSH();
+
+ dprintk(MANTIS_DEBUG, 1, "risc len lines %u, bytes per line %u",
+ mantis->line_count, mantis->line_bytes);
+
+ for (line = 0; line < mantis->line_count; line++) {
+ dprintk(MANTIS_DEBUG, 1, "RISC PROG line=[%d]", line);
+ if (!(buf_pos % MANTIS_BLOCK_BYTES)) {
+ RISC_INSTR(RISC_WRITE |
+ RISC_IRQ |
+ RISC_STATUS(((buf_pos / MANTIS_BLOCK_BYTES) +
+ (MANTIS_BLOCK_COUNT - 1)) %
+ MANTIS_BLOCK_COUNT) |
+ mantis->line_bytes);
+ } else {
+ RISC_INSTR(RISC_WRITE | mantis->line_bytes);
+ }
+ RISC_INSTR(mantis->buf_dma + buf_pos);
+ buf_pos += mantis->line_bytes;
+ }
+ RISC_INSTR(RISC_JUMP);
+ RISC_INSTR(mantis->risc_dma);
+}
+
+void mantis_dma_start(struct mantis_pci *mantis)
+{
+ dprintk(MANTIS_DEBUG, 1, "Mantis Start DMA engine");
+
+ mantis_risc_program(mantis);
+ mmwrite(mantis->risc_dma, MANTIS_RISC_START);
+ mmwrite(mmread(MANTIS_GPIF_ADDR) | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR);
+
+ mmwrite(0, MANTIS_DMA_CTL);
+ mantis->last_block = mantis->finished_block = 0;
+
+ mmwrite(mmread(MANTIS_INT_MASK) | MANTIS_INT_RISCI, MANTIS_INT_MASK);
+
+ mmwrite(MANTIS_FIFO_EN | MANTIS_DCAP_EN
+ | MANTIS_RISC_EN, MANTIS_DMA_CTL);
+
+}
+
+void mantis_dma_stop(struct mantis_pci *mantis)
+{
+ u32 stat = 0, mask = 0;
+
+ stat = mmread(MANTIS_INT_STAT);
+ mask = mmread(MANTIS_INT_MASK);
+ dprintk(MANTIS_DEBUG, 1, "Mantis Stop DMA engine");
+
+ mmwrite((mmread(MANTIS_GPIF_ADDR) & (~(MANTIS_GPIF_HIFRDWRN))), MANTIS_GPIF_ADDR);
+
+ mmwrite((mmread(MANTIS_DMA_CTL) & ~(MANTIS_FIFO_EN |
+ MANTIS_DCAP_EN |
+ MANTIS_RISC_EN)), MANTIS_DMA_CTL);
+
+ mmwrite(mmread(MANTIS_INT_STAT), MANTIS_INT_STAT);
+
+ mmwrite(mmread(MANTIS_INT_MASK) & ~(MANTIS_INT_RISCI |
+ MANTIS_INT_RISCEN), MANTIS_INT_MASK);
+}
+
+
+void mantis_dma_xfer(unsigned long data)
+{
+ struct mantis_pci *mantis = (struct mantis_pci *) data;
+ struct mantis_hwconfig *config = mantis->hwconfig;
+
+ while (mantis->last_block != mantis->finished_block) {
+ dprintk(MANTIS_DEBUG, 1, "last block=[%d] finished block=[%d]",
+ mantis->last_block, mantis->finished_block);
+
+ (config->ts_size ? dvb_dmx_swfilter_204 : dvb_dmx_swfilter)
+ (&mantis->demux, &mantis->buf_cpu[mantis->last_block * MANTIS_BLOCK_BYTES], MANTIS_BLOCK_BYTES);
+ mantis->last_block = (mantis->last_block + 1) % MANTIS_BLOCK_COUNT;
+ }
+}
diff --git a/drivers/media/dvb/mantis/mantis_dma.h b/drivers/media/dvb/mantis/mantis_dma.h
new file mode 100644
index 000000000000..6be00fa82094
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dma.h
@@ -0,0 +1,30 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_DMA_H
+#define __MANTIS_DMA_H
+
+extern int mantis_dma_init(struct mantis_pci *mantis);
+extern int mantis_dma_exit(struct mantis_pci *mantis);
+extern void mantis_dma_start(struct mantis_pci *mantis);
+extern void mantis_dma_stop(struct mantis_pci *mantis);
+extern void mantis_dma_xfer(unsigned long data);
+
+#endif /* __MANTIS_DMA_H */
diff --git a/drivers/media/dvb/mantis/mantis_dvb.c b/drivers/media/dvb/mantis/mantis_dvb.c
new file mode 100644
index 000000000000..99d82eec3b03
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dvb.c
@@ -0,0 +1,296 @@
+/*
+ Mantis PCI bridge driver
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_dma.h"
+#include "mantis_ca.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+int mantis_frontend_power(struct mantis_pci *mantis, enum mantis_power power)
+{
+ struct mantis_hwconfig *config = mantis->hwconfig;
+
+ switch (power) {
+ case POWER_ON:
+ dprintk(MANTIS_DEBUG, 1, "Power ON");
+ gpio_set_bits(mantis, config->power, POWER_ON);
+ msleep(100);
+ gpio_set_bits(mantis, config->power, POWER_ON);
+ msleep(100);
+ break;
+
+ case POWER_OFF:
+ dprintk(MANTIS_DEBUG, 1, "Power OFF");
+ gpio_set_bits(mantis, config->power, POWER_OFF);
+ msleep(100);
+ break;
+
+ default:
+ dprintk(MANTIS_DEBUG, 1, "Unknown state <%02x>", power);
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_frontend_power);
+
+void mantis_frontend_soft_reset(struct mantis_pci *mantis)
+{
+ struct mantis_hwconfig *config = mantis->hwconfig;
+
+ dprintk(MANTIS_DEBUG, 1, "Frontend RESET");
+ gpio_set_bits(mantis, config->reset, 0);
+ msleep(100);
+ gpio_set_bits(mantis, config->reset, 0);
+ msleep(100);
+ gpio_set_bits(mantis, config->reset, 1);
+ msleep(100);
+ gpio_set_bits(mantis, config->reset, 1);
+ msleep(100);
+
+ return;
+}
+EXPORT_SYMBOL_GPL(mantis_frontend_soft_reset);
+
+static int mantis_frontend_shutdown(struct mantis_pci *mantis)
+{
+ int err;
+
+ mantis_frontend_soft_reset(mantis);
+ err = mantis_frontend_power(mantis, POWER_OFF);
+ if (err != 0) {
+ dprintk(MANTIS_ERROR, 1, "Frontend POWER OFF failed! <%d>", err);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mantis_dvb_start_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct mantis_pci *mantis = dvbdmx->priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis DVB Start feed");
+ if (!dvbdmx->dmx.frontend) {
+ dprintk(MANTIS_DEBUG, 1, "no frontend ?");
+ return -EINVAL;
+ }
+
+ mantis->feeds++;
+ dprintk(MANTIS_DEBUG, 1, "mantis start feed, feeds=%d", mantis->feeds);
+
+ if (mantis->feeds == 1) {
+ dprintk(MANTIS_DEBUG, 1, "mantis start feed & dma");
+ mantis_dma_start(mantis);
+ }
+
+ return mantis->feeds;
+}
+
+static int mantis_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
+{
+ struct dvb_demux *dvbdmx = dvbdmxfeed->demux;
+ struct mantis_pci *mantis = dvbdmx->priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis DVB Stop feed");
+ if (!dvbdmx->dmx.frontend) {
+ dprintk(MANTIS_DEBUG, 1, "no frontend ?");
+ return -EINVAL;
+ }
+
+ mantis->feeds--;
+ if (mantis->feeds == 0) {
+ dprintk(MANTIS_DEBUG, 1, "mantis stop feed and dma");
+ mantis_dma_stop(mantis);
+ }
+
+ return 0;
+}
+
+int __devinit mantis_dvb_init(struct mantis_pci *mantis)
+{
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ int result = -1;
+
+ dprintk(MANTIS_DEBUG, 1, "dvb_register_adapter");
+
+ result = dvb_register_adapter(&mantis->dvb_adapter,
+ "Mantis DVB adapter",
+ THIS_MODULE,
+ &mantis->pdev->dev,
+ adapter_nr);
+
+ if (result < 0) {
+
+ dprintk(MANTIS_ERROR, 1, "Error registering adapter");
+ return -ENODEV;
+ }
+
+ mantis->dvb_adapter.priv = mantis;
+ mantis->demux.dmx.capabilities = DMX_TS_FILTERING |
+ DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING;
+
+ mantis->demux.priv = mantis;
+ mantis->demux.filternum = 256;
+ mantis->demux.feednum = 256;
+ mantis->demux.start_feed = mantis_dvb_start_feed;
+ mantis->demux.stop_feed = mantis_dvb_stop_feed;
+ mantis->demux.write_to_decoder = NULL;
+
+ dprintk(MANTIS_DEBUG, 1, "dvb_dmx_init");
+ result = dvb_dmx_init(&mantis->demux);
+ if (result < 0) {
+ dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
+
+ goto err0;
+ }
+
+ mantis->dmxdev.filternum = 256;
+ mantis->dmxdev.demux = &mantis->demux.dmx;
+ mantis->dmxdev.capabilities = 0;
+ dprintk(MANTIS_DEBUG, 1, "dvb_dmxdev_init");
+
+ result = dvb_dmxdev_init(&mantis->dmxdev, &mantis->dvb_adapter);
+ if (result < 0) {
+
+ dprintk(MANTIS_ERROR, 1, "dvb_dmxdev_init failed, ERROR=%d", result);
+ goto err1;
+ }
+
+ mantis->fe_hw.source = DMX_FRONTEND_0;
+ result = mantis->demux.dmx.add_frontend(&mantis->demux.dmx, &mantis->fe_hw);
+ if (result < 0) {
+
+ dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
+ goto err2;
+ }
+
+ mantis->fe_mem.source = DMX_MEMORY_FE;
+ result = mantis->demux.dmx.add_frontend(&mantis->demux.dmx, &mantis->fe_mem);
+ if (result < 0) {
+ dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
+ goto err3;
+ }
+
+ result = mantis->demux.dmx.connect_frontend(&mantis->demux.dmx, &mantis->fe_hw);
+ if (result < 0) {
+ dprintk(MANTIS_ERROR, 1, "dvb_dmx_init failed, ERROR=%d", result);
+ goto err4;
+ }
+
+ dvb_net_init(&mantis->dvb_adapter, &mantis->dvbnet, &mantis->demux.dmx);
+ tasklet_init(&mantis->tasklet, mantis_dma_xfer, (unsigned long) mantis);
+ if (mantis->hwconfig) {
+ result = config->frontend_init(mantis, mantis->fe);
+ if (result < 0) {
+ dprintk(MANTIS_ERROR, 1, "!!! NO Frontends found !!!");
+ goto err5;
+ } else {
+ if (mantis->fe == NULL) {
+ dprintk(MANTIS_ERROR, 1, "FE <NULL>");
+ goto err5;
+ }
+
+ if (dvb_register_frontend(&mantis->dvb_adapter, mantis->fe)) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Frontend registration failed");
+
+ if (mantis->fe->ops.release)
+ mantis->fe->ops.release(mantis->fe);
+
+ mantis->fe = NULL;
+ goto err5;
+ }
+ }
+ }
+
+ return 0;
+
+ /* Error conditions .. */
+err5:
+ tasklet_kill(&mantis->tasklet);
+ dvb_net_release(&mantis->dvbnet);
+ dvb_unregister_frontend(mantis->fe);
+ dvb_frontend_detach(mantis->fe);
+err4:
+ mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
+
+err3:
+ mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_hw);
+
+err2:
+ dvb_dmxdev_release(&mantis->dmxdev);
+
+err1:
+ dvb_dmx_release(&mantis->demux);
+
+err0:
+ dvb_unregister_adapter(&mantis->dvb_adapter);
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(mantis_dvb_init);
+
+int __devexit mantis_dvb_exit(struct mantis_pci *mantis)
+{
+ int err;
+
+ if (mantis->fe) {
+ /* mantis_ca_exit(mantis); */
+ err = mantis_frontend_shutdown(mantis);
+ if (err != 0)
+ dprintk(MANTIS_ERROR, 1, "Frontend exit while POWER ON! <%d>", err);
+ dvb_unregister_frontend(mantis->fe);
+ dvb_frontend_detach(mantis->fe);
+ }
+
+ tasklet_kill(&mantis->tasklet);
+ dvb_net_release(&mantis->dvbnet);
+
+ mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem);
+ mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_hw);
+
+ dvb_dmxdev_release(&mantis->dmxdev);
+ dvb_dmx_release(&mantis->demux);
+
+ dprintk(MANTIS_DEBUG, 1, "dvb_unregister_adapter");
+ dvb_unregister_adapter(&mantis->dvb_adapter);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_dvb_exit);
diff --git a/drivers/media/dvb/mantis/mantis_dvb.h b/drivers/media/dvb/mantis/mantis_dvb.h
new file mode 100644
index 000000000000..464199db304e
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_dvb.h
@@ -0,0 +1,35 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_DVB_H
+#define __MANTIS_DVB_H
+
+enum mantis_power {
+ POWER_OFF = 0,
+ POWER_ON = 1
+};
+
+extern int mantis_frontend_power(struct mantis_pci *mantis, enum mantis_power power);
+extern void mantis_frontend_soft_reset(struct mantis_pci *mantis);
+
+extern int mantis_dvb_init(struct mantis_pci *mantis);
+extern int mantis_dvb_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_DVB_H */
diff --git a/drivers/media/dvb/mantis/mantis_evm.c b/drivers/media/dvb/mantis/mantis_evm.c
new file mode 100644
index 000000000000..a7b369a439d6
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_evm.c
@@ -0,0 +1,117 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_link.h"
+#include "mantis_hif.h"
+#include "mantis_reg.h"
+
+static void mantis_hifevm_work(struct work_struct *work)
+{
+ struct mantis_ca *ca = container_of(work, struct mantis_ca, hif_evm_work);
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ u32 gpif_stat, gpif_mask;
+
+ gpif_stat = mmread(MANTIS_GPIF_STATUS);
+ gpif_mask = mmread(MANTIS_GPIF_IRQCFG);
+
+ if (gpif_stat & MANTIS_GPIF_DETSTAT) {
+ if (gpif_stat & MANTIS_CARD_PLUGIN) {
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): CAM Plugin", mantis->num);
+ mmwrite(0xdada0000, MANTIS_CARD_RESET);
+ mantis_event_cam_plugin(ca);
+ dvb_ca_en50221_camchange_irq(&ca->en50221,
+ 0,
+ DVB_CA_EN50221_CAMCHANGE_INSERTED);
+ }
+ } else {
+ if (gpif_stat & MANTIS_CARD_PLUGOUT) {
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): CAM Unplug", mantis->num);
+ mmwrite(0xdada0000, MANTIS_CARD_RESET);
+ mantis_event_cam_unplug(ca);
+ dvb_ca_en50221_camchange_irq(&ca->en50221,
+ 0,
+ DVB_CA_EN50221_CAMCHANGE_REMOVED);
+ }
+ }
+
+ if (mantis->gpif_status & MANTIS_GPIF_EXTIRQ)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Ext IRQ", mantis->num);
+
+ if (mantis->gpif_status & MANTIS_SBUF_WSTO)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Timeout", mantis->num);
+
+ if (mantis->gpif_status & MANTIS_GPIF_OTHERR)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Alignment Error", mantis->num);
+
+ if (gpif_stat & MANTIS_SBUF_OVFLW)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Overflow", mantis->num);
+
+ if (gpif_stat & MANTIS_GPIF_BRRDY)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Read Ready", mantis->num);
+
+ if (gpif_stat & MANTIS_GPIF_INTSTAT)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): GPIF IRQ", mantis->num);
+
+ if (gpif_stat & MANTIS_SBUF_EMPTY)
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer Empty", mantis->num);
+
+ if (gpif_stat & MANTIS_SBUF_OPDONE) {
+ dprintk(MANTIS_DEBUG, 1, "Event Mgr: Adapter(%d) Slot(0): Smart Buffer operation complete", mantis->num);
+ ca->sbuf_status = MANTIS_SBUF_DATA_AVAIL;
+ ca->hif_event = MANTIS_SBUF_OPDONE;
+ wake_up(&ca->hif_opdone_wq);
+ }
+}
+
+int mantis_evmgr_init(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Initializing Mantis Host I/F Event manager");
+ INIT_WORK(&ca->hif_evm_work, mantis_hifevm_work);
+ mantis_pcmcia_init(ca);
+ schedule_work(&ca->hif_evm_work);
+ mantis_hif_init(ca);
+ return 0;
+}
+
+void mantis_evmgr_exit(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ dprintk(MANTIS_DEBUG, 1, "Mantis Host I/F Event manager exiting");
+ flush_scheduled_work();
+ mantis_hif_exit(ca);
+ mantis_pcmcia_exit(ca);
+}
diff --git a/drivers/media/dvb/mantis/mantis_hif.c b/drivers/media/dvb/mantis/mantis_hif.c
new file mode 100644
index 000000000000..7477dac628b4
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_hif.c
@@ -0,0 +1,240 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+
+#include "mantis_hif.h"
+#include "mantis_link.h" /* temporary due to physical layer stuff */
+
+#include "mantis_reg.h"
+
+
+static int mantis_hif_sbuf_opdone_wait(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ int rc = 0;
+
+ if (wait_event_timeout(ca->hif_opdone_wq,
+ ca->hif_event & MANTIS_SBUF_OPDONE,
+ msecs_to_jiffies(500)) == -ERESTARTSYS) {
+
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): Smart buffer operation timeout !", mantis->num);
+ rc = -EREMOTEIO;
+ }
+ dprintk(MANTIS_DEBUG, 1, "Smart Buffer Operation complete");
+ ca->hif_event &= ~MANTIS_SBUF_OPDONE;
+ return rc;
+}
+
+static int mantis_hif_write_wait(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 opdone = 0, timeout = 0;
+ int rc = 0;
+
+ if (wait_event_timeout(ca->hif_write_wq,
+ mantis->gpif_status & MANTIS_GPIF_WRACK,
+ msecs_to_jiffies(500)) == -ERESTARTSYS) {
+
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): Write ACK timed out !", mantis->num);
+ rc = -EREMOTEIO;
+ }
+ dprintk(MANTIS_DEBUG, 1, "Write Acknowledged");
+ mantis->gpif_status &= ~MANTIS_GPIF_WRACK;
+ while (!opdone) {
+ opdone = (mmread(MANTIS_GPIF_STATUS) & MANTIS_SBUF_OPDONE);
+ udelay(500);
+ timeout++;
+ if (timeout > 100) {
+ dprintk(MANTIS_ERROR, 1, "Adater(%d) Slot(0): Write operation timed out!", mantis->num);
+ rc = -ETIMEDOUT;
+ break;
+ }
+ }
+ dprintk(MANTIS_DEBUG, 1, "HIF Write success");
+ return rc;
+}
+
+
+int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 hif_addr = 0, data, count = 4;
+
+ dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF Mem Read", mantis->num);
+ mutex_lock(&ca->ca_lock);
+ hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
+ hif_addr &= ~MANTIS_GPIF_PCMCIAIOM;
+ hif_addr |= MANTIS_HIF_STATUS;
+ hif_addr |= addr;
+
+ mmwrite(hif_addr, MANTIS_GPIF_BRADDR);
+ mmwrite(count, MANTIS_GPIF_BRBYTES);
+ udelay(20);
+ mmwrite(hif_addr | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR);
+
+ if (mantis_hif_sbuf_opdone_wait(ca) != 0) {
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): GPIF Smart Buffer operation failed", mantis->num);
+ mutex_unlock(&ca->ca_lock);
+ return -EREMOTEIO;
+ }
+ data = mmread(MANTIS_GPIF_DIN);
+ mutex_unlock(&ca->ca_lock);
+ dprintk(MANTIS_DEBUG, 1, "Mem Read: 0x%02x", data);
+ return (data >> 24) & 0xff;
+}
+
+int mantis_hif_write_mem(struct mantis_ca *ca, u32 addr, u8 data)
+{
+ struct mantis_slot *slot = ca->slot;
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 hif_addr = 0;
+
+ dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF Mem Write", mantis->num);
+ mutex_lock(&ca->ca_lock);
+ hif_addr &= ~MANTIS_GPIF_HIFRDWRN;
+ hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
+ hif_addr &= ~MANTIS_GPIF_PCMCIAIOM;
+ hif_addr |= MANTIS_HIF_STATUS;
+ hif_addr |= addr;
+
+ mmwrite(slot->slave_cfg, MANTIS_GPIF_CFGSLA); /* Slot0 alone for now */
+ mmwrite(hif_addr, MANTIS_GPIF_ADDR);
+ mmwrite(data, MANTIS_GPIF_DOUT);
+
+ if (mantis_hif_write_wait(ca) != 0) {
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num);
+ mutex_unlock(&ca->ca_lock);
+ return -EREMOTEIO;
+ }
+ dprintk(MANTIS_DEBUG, 1, "Mem Write: (0x%02x to 0x%02x)", data, addr);
+ mutex_unlock(&ca->ca_lock);
+
+ return 0;
+}
+
+int mantis_hif_read_iom(struct mantis_ca *ca, u32 addr)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 data, hif_addr = 0;
+
+ dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF I/O Read", mantis->num);
+ mutex_lock(&ca->ca_lock);
+ hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
+ hif_addr |= MANTIS_GPIF_PCMCIAIOM;
+ hif_addr |= MANTIS_HIF_STATUS;
+ hif_addr |= addr;
+
+ mmwrite(hif_addr, MANTIS_GPIF_BRADDR);
+ mmwrite(1, MANTIS_GPIF_BRBYTES);
+ udelay(20);
+ mmwrite(hif_addr | MANTIS_GPIF_HIFRDWRN, MANTIS_GPIF_ADDR);
+
+ if (mantis_hif_sbuf_opdone_wait(ca) != 0) {
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num);
+ mutex_unlock(&ca->ca_lock);
+ return -EREMOTEIO;
+ }
+ data = mmread(MANTIS_GPIF_DIN);
+ dprintk(MANTIS_DEBUG, 1, "I/O Read: 0x%02x", data);
+ udelay(50);
+ mutex_unlock(&ca->ca_lock);
+
+ return (u8) data;
+}
+
+int mantis_hif_write_iom(struct mantis_ca *ca, u32 addr, u8 data)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 hif_addr = 0;
+
+ dprintk(MANTIS_DEBUG, 1, "Adapter(%d) Slot(0): Request HIF I/O Write", mantis->num);
+ mutex_lock(&ca->ca_lock);
+ hif_addr &= ~MANTIS_GPIF_PCMCIAREG;
+ hif_addr &= ~MANTIS_GPIF_HIFRDWRN;
+ hif_addr |= MANTIS_GPIF_PCMCIAIOM;
+ hif_addr |= MANTIS_HIF_STATUS;
+ hif_addr |= addr;
+
+ mmwrite(hif_addr, MANTIS_GPIF_ADDR);
+ mmwrite(data, MANTIS_GPIF_DOUT);
+
+ if (mantis_hif_write_wait(ca) != 0) {
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Slot(0): HIF Smart Buffer operation failed", mantis->num);
+ mutex_unlock(&ca->ca_lock);
+ return -EREMOTEIO;
+ }
+ dprintk(MANTIS_DEBUG, 1, "I/O Write: (0x%02x to 0x%02x)", data, addr);
+ mutex_unlock(&ca->ca_lock);
+ udelay(50);
+
+ return 0;
+}
+
+int mantis_hif_init(struct mantis_ca *ca)
+{
+ struct mantis_slot *slot = ca->slot;
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 irqcfg;
+
+ slot[0].slave_cfg = 0x70773028;
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Initializing Mantis Host Interface", mantis->num);
+
+ mutex_lock(&ca->ca_lock);
+ irqcfg = mmread(MANTIS_GPIF_IRQCFG);
+ irqcfg = MANTIS_MASK_BRRDY |
+ MANTIS_MASK_WRACK |
+ MANTIS_MASK_EXTIRQ |
+ MANTIS_MASK_WSTO |
+ MANTIS_MASK_OTHERR |
+ MANTIS_MASK_OVFLW;
+
+ mmwrite(irqcfg, MANTIS_GPIF_IRQCFG);
+ mutex_unlock(&ca->ca_lock);
+
+ return 0;
+}
+
+void mantis_hif_exit(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+ u32 irqcfg;
+
+ dprintk(MANTIS_ERROR, 1, "Adapter(%d) Exiting Mantis Host Interface", mantis->num);
+ mutex_lock(&ca->ca_lock);
+ irqcfg = mmread(MANTIS_GPIF_IRQCFG);
+ irqcfg &= ~MANTIS_MASK_BRRDY;
+ mmwrite(irqcfg, MANTIS_GPIF_IRQCFG);
+ mutex_unlock(&ca->ca_lock);
+}
diff --git a/drivers/media/dvb/mantis/mantis_hif.h b/drivers/media/dvb/mantis/mantis_hif.h
new file mode 100644
index 000000000000..9094f9ed2362
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_hif.h
@@ -0,0 +1,29 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_HIF_H
+#define __MANTIS_HIF_H
+
+#define MANTIS_HIF_MEMRD 1
+#define MANTIS_HIF_MEMWR 2
+#define MANTIS_HIF_IOMRD 3
+#define MANTIS_HIF_IOMWR 4
+
+#endif /* __MANTIS_HIF_H */
diff --git a/drivers/media/dvb/mantis/mantis_i2c.c b/drivers/media/dvb/mantis/mantis_i2c.c
new file mode 100644
index 000000000000..7870bcf9689a
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_i2c.c
@@ -0,0 +1,267 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <asm/io.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/i2c.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_i2c.h"
+
+#define TRIALS 10000
+
+static int mantis_i2c_read(struct mantis_pci *mantis, const struct i2c_msg *msg)
+{
+ u32 rxd, i, stat, trials;
+
+ dprintk(MANTIS_INFO, 0, " %s: Address=[0x%02x] <R>[ ",
+ __func__, msg->addr);
+
+ for (i = 0; i < msg->len; i++) {
+ rxd = (msg->addr << 25) | (1 << 24)
+ | MANTIS_I2C_RATE_3
+ | MANTIS_I2C_STOP
+ | MANTIS_I2C_PGMODE;
+
+ if (i == (msg->len - 1))
+ rxd &= ~MANTIS_I2C_STOP;
+
+ mmwrite(MANTIS_INT_I2CDONE, MANTIS_INT_STAT);
+ mmwrite(rxd, MANTIS_I2CDATA_CTL);
+
+ /* wait for xfer completion */
+ for (trials = 0; trials < TRIALS; trials++) {
+ stat = mmread(MANTIS_INT_STAT);
+ if (stat & MANTIS_INT_I2CDONE)
+ break;
+ }
+
+ dprintk(MANTIS_TMG, 0, "I2CDONE: trials=%d\n", trials);
+
+ /* wait for xfer completion */
+ for (trials = 0; trials < TRIALS; trials++) {
+ stat = mmread(MANTIS_INT_STAT);
+ if (stat & MANTIS_INT_I2CRACK)
+ break;
+ }
+
+ dprintk(MANTIS_TMG, 0, "I2CRACK: trials=%d\n", trials);
+
+ rxd = mmread(MANTIS_I2CDATA_CTL);
+ msg->buf[i] = (u8)((rxd >> 8) & 0xFF);
+ dprintk(MANTIS_INFO, 0, "%02x ", msg->buf[i]);
+ }
+ dprintk(MANTIS_INFO, 0, "]\n");
+
+ return 0;
+}
+
+static int mantis_i2c_write(struct mantis_pci *mantis, const struct i2c_msg *msg)
+{
+ int i;
+ u32 txd = 0, stat, trials;
+
+ dprintk(MANTIS_INFO, 0, " %s: Address=[0x%02x] <W>[ ",
+ __func__, msg->addr);
+
+ for (i = 0; i < msg->len; i++) {
+ dprintk(MANTIS_INFO, 0, "%02x ", msg->buf[i]);
+ txd = (msg->addr << 25) | (msg->buf[i] << 8)
+ | MANTIS_I2C_RATE_3
+ | MANTIS_I2C_STOP
+ | MANTIS_I2C_PGMODE;
+
+ if (i == (msg->len - 1))
+ txd &= ~MANTIS_I2C_STOP;
+
+ mmwrite(MANTIS_INT_I2CDONE, MANTIS_INT_STAT);
+ mmwrite(txd, MANTIS_I2CDATA_CTL);
+
+ /* wait for xfer completion */
+ for (trials = 0; trials < TRIALS; trials++) {
+ stat = mmread(MANTIS_INT_STAT);
+ if (stat & MANTIS_INT_I2CDONE)
+ break;
+ }
+
+ dprintk(MANTIS_TMG, 0, "I2CDONE: trials=%d\n", trials);
+
+ /* wait for xfer completion */
+ for (trials = 0; trials < TRIALS; trials++) {
+ stat = mmread(MANTIS_INT_STAT);
+ if (stat & MANTIS_INT_I2CRACK)
+ break;
+ }
+
+ dprintk(MANTIS_TMG, 0, "I2CRACK: trials=%d\n", trials);
+ }
+ dprintk(MANTIS_INFO, 0, "]\n");
+
+ return 0;
+}
+
+static int mantis_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
+{
+ int ret = 0, i = 0, trials;
+ u32 stat, data, txd;
+ struct mantis_pci *mantis;
+ struct mantis_hwconfig *config;
+
+ mantis = i2c_get_adapdata(adapter);
+ BUG_ON(!mantis);
+ config = mantis->hwconfig;
+ BUG_ON(!config);
+
+ dprintk(MANTIS_DEBUG, 1, "Messages:%d", num);
+ mutex_lock(&mantis->i2c_lock);
+
+ while (i < num) {
+ /* Byte MODE */
+ if ((config->i2c_mode & MANTIS_BYTE_MODE) &&
+ ((i + 1) < num) &&
+ (msgs[i].len < 2) &&
+ (msgs[i + 1].len < 2) &&
+ (msgs[i + 1].flags & I2C_M_RD)) {
+
+ dprintk(MANTIS_DEBUG, 0, " Byte MODE:\n");
+
+ /* Read operation */
+ txd = msgs[i].addr << 25 | (0x1 << 24)
+ | (msgs[i].buf[0] << 16)
+ | MANTIS_I2C_RATE_3;
+
+ mmwrite(txd, MANTIS_I2CDATA_CTL);
+ /* wait for xfer completion */
+ for (trials = 0; trials < TRIALS; trials++) {
+ stat = mmread(MANTIS_INT_STAT);
+ if (stat & MANTIS_INT_I2CDONE)
+ break;
+ }
+
+ /* check for xfer completion */
+ if (stat & MANTIS_INT_I2CDONE) {
+ /* check xfer was acknowledged */
+ if (stat & MANTIS_INT_I2CRACK) {
+ data = mmread(MANTIS_I2CDATA_CTL);
+ msgs[i + 1].buf[0] = (data >> 8) & 0xff;
+ dprintk(MANTIS_DEBUG, 0, " Byte <%d> RXD=0x%02x [%02x]\n", 0x0, data, msgs[i + 1].buf[0]);
+ } else {
+ /* I/O error */
+ dprintk(MANTIS_ERROR, 1, " I/O error, LINE:%d", __LINE__);
+ ret = -EIO;
+ break;
+ }
+ } else {
+ /* I/O error */
+ dprintk(MANTIS_ERROR, 1, " I/O error, LINE:%d", __LINE__);
+ ret = -EIO;
+ break;
+ }
+ i += 2; /* Write/Read operation in one go */
+ }
+
+ if (i < num) {
+ if (msgs[i].flags & I2C_M_RD)
+ ret = mantis_i2c_read(mantis, &msgs[i]);
+ else
+ ret = mantis_i2c_write(mantis, &msgs[i]);
+
+ i++;
+ if (ret < 0)
+ goto bail_out;
+ }
+
+ }
+
+ mutex_unlock(&mantis->i2c_lock);
+
+ return num;
+
+bail_out:
+ mutex_unlock(&mantis->i2c_lock);
+ return ret;
+}
+
+static u32 mantis_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_SMBUS_EMUL;
+}
+
+static struct i2c_algorithm mantis_algo = {
+ .master_xfer = mantis_i2c_xfer,
+ .functionality = mantis_i2c_func,
+};
+
+int __devinit mantis_i2c_init(struct mantis_pci *mantis)
+{
+ u32 intstat, intmask;
+ struct i2c_adapter *i2c_adapter = &mantis->adapter;
+ struct pci_dev *pdev = mantis->pdev;
+
+ init_waitqueue_head(&mantis->i2c_wq);
+ mutex_init(&mantis->i2c_lock);
+ strncpy(i2c_adapter->name, "Mantis I2C", sizeof(i2c_adapter->name));
+ i2c_set_adapdata(i2c_adapter, mantis);
+
+ i2c_adapter->owner = THIS_MODULE;
+ i2c_adapter->class = I2C_CLASS_TV_DIGITAL;
+ i2c_adapter->algo = &mantis_algo;
+ i2c_adapter->algo_data = NULL;
+ i2c_adapter->timeout = 500;
+ i2c_adapter->retries = 3;
+ i2c_adapter->dev.parent = &pdev->dev;
+
+ mantis->i2c_rc = i2c_add_adapter(i2c_adapter);
+ if (mantis->i2c_rc < 0)
+ return mantis->i2c_rc;
+
+ dprintk(MANTIS_DEBUG, 1, "Initializing I2C ..");
+
+ intstat = mmread(MANTIS_INT_STAT);
+ intmask = mmread(MANTIS_INT_MASK);
+ mmwrite(intstat, MANTIS_INT_STAT);
+ dprintk(MANTIS_DEBUG, 1, "Disabling I2C interrupt");
+ intmask = mmread(MANTIS_INT_MASK);
+ mmwrite((intmask & ~MANTIS_INT_I2CDONE), MANTIS_INT_MASK);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_i2c_init);
+
+int mantis_i2c_exit(struct mantis_pci *mantis)
+{
+ u32 intmask;
+
+ dprintk(MANTIS_DEBUG, 1, "Disabling I2C interrupt");
+ intmask = mmread(MANTIS_INT_MASK);
+ mmwrite((intmask & ~MANTIS_INT_I2CDONE), MANTIS_INT_MASK);
+
+ dprintk(MANTIS_DEBUG, 1, "Removing I2C adapter");
+ return i2c_del_adapter(&mantis->adapter);
+}
+EXPORT_SYMBOL_GPL(mantis_i2c_exit);
diff --git a/drivers/media/dvb/mantis/mantis_i2c.h b/drivers/media/dvb/mantis/mantis_i2c.h
new file mode 100644
index 000000000000..1342df2faed8
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_i2c.h
@@ -0,0 +1,30 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_I2C_H
+#define __MANTIS_I2C_H
+
+#define I2C_STOP (1 << 0)
+#define I2C_READ (1 << 1)
+
+extern int mantis_i2c_init(struct mantis_pci *mantis);
+extern int mantis_i2c_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_I2C_H */
diff --git a/drivers/media/dvb/mantis/mantis_input.c b/drivers/media/dvb/mantis/mantis_input.c
new file mode 100644
index 000000000000..4675a3b53c7d
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_input.c
@@ -0,0 +1,148 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/input.h>
+#include <media/ir-common.h>
+#include <linux/pci.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_uart.h"
+
+static struct ir_scancode mantis_ir_table[] = {
+ { 0x29, KEY_POWER },
+ { 0x28, KEY_FAVORITES },
+ { 0x30, KEY_TEXT },
+ { 0x17, KEY_INFO }, /* Preview */
+ { 0x23, KEY_EPG },
+ { 0x3b, KEY_F22 }, /* Record List */
+ { 0x3c, KEY_1 },
+ { 0x3e, KEY_2 },
+ { 0x39, KEY_3 },
+ { 0x36, KEY_4 },
+ { 0x22, KEY_5 },
+ { 0x20, KEY_6 },
+ { 0x32, KEY_7 },
+ { 0x26, KEY_8 },
+ { 0x24, KEY_9 },
+ { 0x2a, KEY_0 },
+
+ { 0x33, KEY_CANCEL },
+ { 0x2c, KEY_BACK },
+ { 0x15, KEY_CLEAR },
+ { 0x3f, KEY_TAB },
+ { 0x10, KEY_ENTER },
+ { 0x14, KEY_UP },
+ { 0x0d, KEY_RIGHT },
+ { 0x0e, KEY_DOWN },
+ { 0x11, KEY_LEFT },
+
+ { 0x21, KEY_VOLUMEUP },
+ { 0x35, KEY_VOLUMEDOWN },
+ { 0x3d, KEY_CHANNELDOWN },
+ { 0x3a, KEY_CHANNELUP },
+ { 0x2e, KEY_RECORD },
+ { 0x2b, KEY_PLAY },
+ { 0x13, KEY_PAUSE },
+ { 0x25, KEY_STOP },
+
+ { 0x1f, KEY_REWIND },
+ { 0x2d, KEY_FASTFORWARD },
+ { 0x1e, KEY_PREVIOUS }, /* Replay |< */
+ { 0x1d, KEY_NEXT }, /* Skip >| */
+
+ { 0x0b, KEY_CAMERA }, /* Capture */
+ { 0x0f, KEY_LANGUAGE }, /* SAP */
+ { 0x18, KEY_MODE }, /* PIP */
+ { 0x12, KEY_ZOOM }, /* Full screen */
+ { 0x1c, KEY_SUBTITLE },
+ { 0x2f, KEY_MUTE },
+ { 0x16, KEY_F20 }, /* L/R */
+ { 0x38, KEY_F21 }, /* Hibernate */
+
+ { 0x37, KEY_SWITCHVIDEOMODE }, /* A/V */
+ { 0x31, KEY_AGAIN }, /* Recall */
+ { 0x1a, KEY_KPPLUS }, /* Zoom+ */
+ { 0x19, KEY_KPMINUS }, /* Zoom- */
+ { 0x27, KEY_RED },
+ { 0x0C, KEY_GREEN },
+ { 0x01, KEY_YELLOW },
+ { 0x00, KEY_BLUE },
+};
+
+struct ir_scancode_table ir_mantis = {
+ .scan = mantis_ir_table,
+ .size = ARRAY_SIZE(mantis_ir_table),
+};
+EXPORT_SYMBOL_GPL(ir_mantis);
+
+int mantis_input_init(struct mantis_pci *mantis)
+{
+ struct input_dev *rc;
+ struct ir_input_state rc_state;
+ char name[80], dev[80];
+ int err;
+
+ rc = input_allocate_device();
+ if (!rc) {
+ dprintk(MANTIS_ERROR, 1, "Input device allocate failed");
+ return -ENOMEM;
+ }
+
+ sprintf(name, "Mantis %s IR receiver", mantis->hwconfig->model_name);
+ sprintf(dev, "pci-%s/ir0", pci_name(mantis->pdev));
+
+ rc->name = name;
+ rc->phys = dev;
+
+ ir_input_init(rc, &rc_state, IR_TYPE_OTHER);
+
+ rc->id.bustype = BUS_PCI;
+ rc->id.vendor = mantis->vendor_id;
+ rc->id.product = mantis->device_id;
+ rc->id.version = 1;
+ rc->dev = mantis->pdev->dev;
+
+ err = ir_input_register(rc, &ir_mantis, NULL);
+ if (err) {
+ dprintk(MANTIS_ERROR, 1, "IR device registration failed, ret = %d", err);
+ input_free_device(rc);
+ return -ENODEV;
+ }
+
+ mantis->rc = rc;
+
+ return 0;
+}
+
+int mantis_exit(struct mantis_pci *mantis)
+{
+ struct input_dev *rc = mantis->rc;
+
+ ir_input_unregister(rc);
+
+ return 0;
+}
diff --git a/drivers/media/dvb/mantis/mantis_ioc.c b/drivers/media/dvb/mantis/mantis_ioc.c
new file mode 100644
index 000000000000..de148ded52d8
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ioc.c
@@ -0,0 +1,130 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_ioc.h"
+
+static int read_eeprom_bytes(struct mantis_pci *mantis, u8 reg, u8 *data, u8 length)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+ int err;
+ u8 buf = reg;
+
+ struct i2c_msg msg[] = {
+ { .addr = 0x50, .flags = 0, .buf = &buf, .len = 1 },
+ { .addr = 0x50, .flags = I2C_M_RD, .buf = data, .len = length },
+ };
+
+ err = i2c_transfer(adapter, msg, 2);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: i2c read: < err=%i d0=0x%02x d1=0x%02x >",
+ err, data[0], data[1]);
+
+ return err;
+ }
+
+ return 0;
+}
+int mantis_get_mac(struct mantis_pci *mantis)
+{
+ int err;
+ u8 mac_addr[6] = {0};
+
+ err = read_eeprom_bytes(mantis, 0x08, mac_addr, 6);
+ if (err < 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Mantis EEPROM read error <%d>", err);
+
+ return err;
+ }
+
+ dprintk(MANTIS_ERROR, 0,
+ " MAC Address=[%02x:%02x:%02x:%02x:%02x:%02x]\n",
+ mac_addr[0],
+ mac_addr[1],
+ mac_addr[2],
+ mac_addr[3],
+ mac_addr[4],
+ mac_addr[5]);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_get_mac);
+
+/* Turn the given bit on or off. */
+void gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value)
+{
+ u32 cur;
+
+ dprintk(MANTIS_DEBUG, 1, "Set Bit <%d> to <%d>", bitpos, value);
+ cur = mmread(MANTIS_GPIF_ADDR);
+ if (value)
+ mantis->gpio_status = cur | (1 << bitpos);
+ else
+ mantis->gpio_status = cur & (~(1 << bitpos));
+
+ dprintk(MANTIS_DEBUG, 1, "GPIO Value <%02x>", mantis->gpio_status);
+ mmwrite(mantis->gpio_status, MANTIS_GPIF_ADDR);
+ mmwrite(0x00, MANTIS_GPIF_DOUT);
+}
+EXPORT_SYMBOL_GPL(gpio_set_bits);
+
+int mantis_stream_control(struct mantis_pci *mantis, enum mantis_stream_control stream_ctl)
+{
+ u32 reg;
+
+ reg = mmread(MANTIS_CONTROL);
+ switch (stream_ctl) {
+ case STREAM_TO_HIF:
+ dprintk(MANTIS_DEBUG, 1, "Set stream to HIF");
+ reg &= 0xff - MANTIS_BYPASS;
+ mmwrite(reg, MANTIS_CONTROL);
+ reg |= MANTIS_BYPASS;
+ mmwrite(reg, MANTIS_CONTROL);
+ break;
+
+ case STREAM_TO_CAM:
+ dprintk(MANTIS_DEBUG, 1, "Set stream to CAM");
+ reg |= MANTIS_BYPASS;
+ mmwrite(reg, MANTIS_CONTROL);
+ reg &= 0xff - MANTIS_BYPASS;
+ mmwrite(reg, MANTIS_CONTROL);
+ break;
+ default:
+ dprintk(MANTIS_ERROR, 1, "Unknown MODE <%02x>", stream_ctl);
+ return -1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_stream_control);
diff --git a/drivers/media/dvb/mantis/mantis_ioc.h b/drivers/media/dvb/mantis/mantis_ioc.h
new file mode 100644
index 000000000000..188fe5a81614
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_ioc.h
@@ -0,0 +1,51 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_IOC_H
+#define __MANTIS_IOC_H
+
+#define GPIF_A00 0x00
+#define GPIF_A01 0x01
+#define GPIF_A02 0x02
+#define GPIF_A03 0x03
+#define GPIF_A04 0x04
+#define GPIF_A05 0x05
+#define GPIF_A06 0x06
+#define GPIF_A07 0x07
+#define GPIF_A08 0x08
+#define GPIF_A09 0x09
+#define GPIF_A10 0x0a
+#define GPIF_A11 0x0b
+
+#define GPIF_A12 0x0c
+#define GPIF_A13 0x0d
+#define GPIF_A14 0x0e
+
+enum mantis_stream_control {
+ STREAM_TO_HIF = 0,
+ STREAM_TO_CAM
+};
+
+extern int mantis_get_mac(struct mantis_pci *mantis);
+extern void gpio_set_bits(struct mantis_pci *mantis, u32 bitpos, u8 value);
+
+extern int mantis_stream_control(struct mantis_pci *mantis, enum mantis_stream_control stream_ctl);
+
+#endif /* __MANTIS_IOC_H */
diff --git a/drivers/media/dvb/mantis/mantis_link.h b/drivers/media/dvb/mantis/mantis_link.h
new file mode 100644
index 000000000000..2a814774a001
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_link.h
@@ -0,0 +1,83 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_LINK_H
+#define __MANTIS_LINK_H
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include "dvb_ca_en50221.h"
+
+enum mantis_sbuf_status {
+ MANTIS_SBUF_DATA_AVAIL = 1,
+ MANTIS_SBUF_DATA_EMPTY = 2,
+ MANTIS_SBUF_DATA_OVFLW = 3
+};
+
+struct mantis_slot {
+ u32 timeout;
+ u32 slave_cfg;
+ u32 bar;
+};
+
+/* Physical layer */
+enum mantis_slot_state {
+ MODULE_INSERTED = 3,
+ MODULE_XTRACTED = 4
+};
+
+struct mantis_ca {
+ struct mantis_slot slot[4];
+
+ struct work_struct hif_evm_work;
+
+ u32 hif_event;
+ wait_queue_head_t hif_opdone_wq;
+ wait_queue_head_t hif_brrdyw_wq;
+ wait_queue_head_t hif_data_wq;
+ wait_queue_head_t hif_write_wq; /* HIF Write op */
+
+ enum mantis_sbuf_status sbuf_status;
+
+ enum mantis_slot_state slot_state;
+
+ void *ca_priv;
+
+ struct dvb_ca_en50221 en50221;
+ struct mutex ca_lock;
+};
+
+/* CA */
+extern void mantis_event_cam_plugin(struct mantis_ca *ca);
+extern void mantis_event_cam_unplug(struct mantis_ca *ca);
+extern int mantis_pcmcia_init(struct mantis_ca *ca);
+extern void mantis_pcmcia_exit(struct mantis_ca *ca);
+extern int mantis_evmgr_init(struct mantis_ca *ca);
+extern void mantis_evmgr_exit(struct mantis_ca *ca);
+
+/* HIF */
+extern int mantis_hif_init(struct mantis_ca *ca);
+extern void mantis_hif_exit(struct mantis_ca *ca);
+extern int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr);
+extern int mantis_hif_write_mem(struct mantis_ca *ca, u32 addr, u8 data);
+extern int mantis_hif_read_iom(struct mantis_ca *ca, u32 addr);
+extern int mantis_hif_write_iom(struct mantis_ca *ca, u32 addr, u8 data);
+
+#endif /* __MANTIS_LINK_H */
diff --git a/drivers/media/dvb/mantis/mantis_pci.c b/drivers/media/dvb/mantis/mantis_pci.c
new file mode 100644
index 000000000000..6c7534af6b44
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_pci.c
@@ -0,0 +1,177 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <linux/kmod.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+
+#include <asm/irq.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include <asm/irq.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_pci.h"
+
+#define DRIVER_NAME "Mantis Core"
+
+int __devinit mantis_pci_init(struct mantis_pci *mantis)
+{
+ u8 revision, latency;
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ struct pci_dev *pdev = mantis->pdev;
+ int err, ret = 0;
+
+ dprintk(MANTIS_ERROR, 0, "found a %s PCI %s device on (%02x:%02x.%x),\n",
+ config->model_name,
+ config->dev_type,
+ mantis->pdev->bus->number,
+ PCI_SLOT(mantis->pdev->devfn),
+ PCI_FUNC(mantis->pdev->devfn));
+
+ err = pci_enable_device(pdev);
+ if (err != 0) {
+ ret = -ENODEV;
+ dprintk(MANTIS_ERROR, 1, "ERROR: PCI enable failed <%i>", err);
+ goto fail0;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err != 0) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: Unable to obtain 32 bit DMA <%i>", err);
+ ret = -ENOMEM;
+ goto fail1;
+ }
+
+ pci_set_master(pdev);
+
+ if (!request_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0),
+ DRIVER_NAME)) {
+
+ dprintk(MANTIS_ERROR, 1, "ERROR: BAR0 Request failed !");
+ ret = -ENODEV;
+ goto fail1;
+ }
+
+ mantis->mmio = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+
+ if (!mantis->mmio) {
+ dprintk(MANTIS_ERROR, 1, "ERROR: BAR0 remap failed !");
+ ret = -ENODEV;
+ goto fail2;
+ }
+
+ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency);
+ pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+ mantis->latency = latency;
+ mantis->revision = revision;
+
+ dprintk(MANTIS_ERROR, 0, " Mantis Rev %d [%04x:%04x], ",
+ mantis->revision,
+ mantis->pdev->subsystem_vendor,
+ mantis->pdev->subsystem_device);
+
+ dprintk(MANTIS_ERROR, 0,
+ "irq: %d, latency: %d\n memory: 0x%lx, mmio: 0x%p\n",
+ mantis->pdev->irq,
+ mantis->latency,
+ mantis->mantis_addr,
+ mantis->mmio);
+
+ err = request_irq(pdev->irq,
+ config->irq_handler,
+ IRQF_SHARED,
+ DRIVER_NAME,
+ mantis);
+
+ if (err != 0) {
+
+ dprintk(MANTIS_ERROR, 1, "ERROR: IRQ registration failed ! <%d>", err);
+ ret = -ENODEV;
+ goto fail3;
+ }
+
+ pci_set_drvdata(pdev, mantis);
+ return ret;
+
+ /* Error conditions */
+fail3:
+ dprintk(MANTIS_ERROR, 1, "ERROR: <%d> I/O unmap", ret);
+ if (mantis->mmio)
+ iounmap(mantis->mmio);
+
+fail2:
+ dprintk(MANTIS_ERROR, 1, "ERROR: <%d> releasing regions", ret);
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+
+fail1:
+ dprintk(MANTIS_ERROR, 1, "ERROR: <%d> disabling device", ret);
+ pci_disable_device(pdev);
+
+fail0:
+ dprintk(MANTIS_ERROR, 1, "ERROR: <%d> exiting", ret);
+ pci_set_drvdata(pdev, NULL);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mantis_pci_init);
+
+void mantis_pci_exit(struct mantis_pci *mantis)
+{
+ struct pci_dev *pdev = mantis->pdev;
+
+ dprintk(MANTIS_NOTICE, 1, " mem: 0x%p", mantis->mmio);
+ free_irq(pdev->irq, mantis);
+ if (mantis->mmio) {
+ iounmap(mantis->mmio);
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ }
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+EXPORT_SYMBOL_GPL(mantis_pci_exit);
+
+MODULE_DESCRIPTION("Mantis PCI DTV bridge driver");
+MODULE_AUTHOR("Manu Abraham");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/mantis/mantis_pci.h b/drivers/media/dvb/mantis/mantis_pci.h
new file mode 100644
index 000000000000..65f004519086
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_pci.h
@@ -0,0 +1,27 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_PCI_H
+#define __MANTIS_PCI_H
+
+extern int mantis_pci_init(struct mantis_pci *mantis);
+extern void mantis_pci_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_PCI_H */
diff --git a/drivers/media/dvb/mantis/mantis_pcmcia.c b/drivers/media/dvb/mantis/mantis_pcmcia.c
new file mode 100644
index 000000000000..5cb545b913f6
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_pcmcia.c
@@ -0,0 +1,120 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_link.h" /* temporary due to physical layer stuff */
+#include "mantis_reg.h"
+
+/*
+ * If Slot state is already PLUG_IN event and we are called
+ * again, definitely it is jitter alone
+ */
+void mantis_event_cam_plugin(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ u32 gpif_irqcfg;
+
+ if (ca->slot_state == MODULE_XTRACTED) {
+ dprintk(MANTIS_DEBUG, 1, "Event: CAM Plugged IN: Adapter(%d) Slot(0)", mantis->num);
+ udelay(50);
+ mmwrite(0xda000000, MANTIS_CARD_RESET);
+ gpif_irqcfg = mmread(MANTIS_GPIF_IRQCFG);
+ gpif_irqcfg |= MANTIS_MASK_PLUGOUT;
+ gpif_irqcfg &= ~MANTIS_MASK_PLUGIN;
+ mmwrite(gpif_irqcfg, MANTIS_GPIF_IRQCFG);
+ udelay(500);
+ ca->slot_state = MODULE_INSERTED;
+ }
+ udelay(100);
+}
+
+/*
+ * If Slot state is already UN_PLUG event and we are called
+ * again, definitely it is jitter alone
+ */
+void mantis_event_cam_unplug(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ u32 gpif_irqcfg;
+
+ if (ca->slot_state == MODULE_INSERTED) {
+ dprintk(MANTIS_DEBUG, 1, "Event: CAM Unplugged: Adapter(%d) Slot(0)", mantis->num);
+ udelay(50);
+ mmwrite(0x00da0000, MANTIS_CARD_RESET);
+ gpif_irqcfg = mmread(MANTIS_GPIF_IRQCFG);
+ gpif_irqcfg |= MANTIS_MASK_PLUGIN;
+ gpif_irqcfg &= ~MANTIS_MASK_PLUGOUT;
+ mmwrite(gpif_irqcfg, MANTIS_GPIF_IRQCFG);
+ udelay(500);
+ ca->slot_state = MODULE_XTRACTED;
+ }
+ udelay(100);
+}
+
+int mantis_pcmcia_init(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ u32 gpif_stat, card_stat;
+
+ mmwrite(mmread(MANTIS_INT_MASK) | MANTIS_INT_IRQ0, MANTIS_INT_MASK);
+ gpif_stat = mmread(MANTIS_GPIF_STATUS);
+ card_stat = mmread(MANTIS_GPIF_IRQCFG);
+
+ if (gpif_stat & MANTIS_GPIF_DETSTAT) {
+ dprintk(MANTIS_DEBUG, 1, "CAM found on Adapter(%d) Slot(0)", mantis->num);
+ mmwrite(card_stat | MANTIS_MASK_PLUGOUT, MANTIS_GPIF_IRQCFG);
+ ca->slot_state = MODULE_INSERTED;
+ dvb_ca_en50221_camchange_irq(&ca->en50221,
+ 0,
+ DVB_CA_EN50221_CAMCHANGE_INSERTED);
+ } else {
+ dprintk(MANTIS_DEBUG, 1, "Empty Slot on Adapter(%d) Slot(0)", mantis->num);
+ mmwrite(card_stat | MANTIS_MASK_PLUGIN, MANTIS_GPIF_IRQCFG);
+ ca->slot_state = MODULE_XTRACTED;
+ dvb_ca_en50221_camchange_irq(&ca->en50221,
+ 0,
+ DVB_CA_EN50221_CAMCHANGE_REMOVED);
+ }
+
+ return 0;
+}
+
+void mantis_pcmcia_exit(struct mantis_ca *ca)
+{
+ struct mantis_pci *mantis = ca->ca_priv;
+
+ mmwrite(mmread(MANTIS_GPIF_STATUS) & (~MANTIS_CARD_PLUGOUT | ~MANTIS_CARD_PLUGIN), MANTIS_GPIF_STATUS);
+ mmwrite(mmread(MANTIS_INT_MASK) & ~MANTIS_INT_IRQ0, MANTIS_INT_MASK);
+}
diff --git a/drivers/media/dvb/mantis/mantis_reg.h b/drivers/media/dvb/mantis/mantis_reg.h
new file mode 100644
index 000000000000..7761f9dc7fe0
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_reg.h
@@ -0,0 +1,197 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_REG_H
+#define __MANTIS_REG_H
+
+/* Interrupts */
+#define MANTIS_INT_STAT 0x00
+#define MANTIS_INT_MASK 0x04
+
+#define MANTIS_INT_RISCSTAT (0x0f << 28)
+#define MANTIS_INT_RISCEN (0x01 << 27)
+#define MANTIS_INT_I2CRACK (0x01 << 26)
+
+/* #define MANTIS_INT_GPIF (0xff << 12) */
+
+#define MANTIS_INT_PCMCIA7 (0x01 << 19)
+#define MANTIS_INT_PCMCIA6 (0x01 << 18)
+#define MANTIS_INT_PCMCIA5 (0x01 << 17)
+#define MANTIS_INT_PCMCIA4 (0x01 << 16)
+#define MANTIS_INT_PCMCIA3 (0x01 << 15)
+#define MANTIS_INT_PCMCIA2 (0x01 << 14)
+#define MANTIS_INT_PCMCIA1 (0x01 << 13)
+#define MANTIS_INT_PCMCIA0 (0x01 << 12)
+#define MANTIS_INT_IRQ1 (0x01 << 11)
+#define MANTIS_INT_IRQ0 (0x01 << 10)
+#define MANTIS_INT_OCERR (0x01 << 8)
+#define MANTIS_INT_PABORT (0x01 << 7)
+#define MANTIS_INT_RIPERR (0x01 << 6)
+#define MANTIS_INT_PPERR (0x01 << 5)
+#define MANTIS_INT_FTRGT (0x01 << 3)
+#define MANTIS_INT_RISCI (0x01 << 1)
+#define MANTIS_INT_I2CDONE (0x01 << 0)
+
+/* DMA */
+#define MANTIS_DMA_CTL 0x08
+#define MANTIS_GPIF_RD (0xff << 24)
+#define MANTIS_GPIF_WR (0xff << 16)
+#define MANTIS_CPU_DO (0x01 << 10)
+#define MANTIS_DRV_DO (0x01 << 9)
+#define MANTIS_I2C_RD (0x01 << 7)
+#define MANTIS_I2C_WR (0x01 << 6)
+#define MANTIS_DCAP_MODE (0x01 << 5)
+#define MANTIS_FIFO_TP_4 (0x00 << 3)
+#define MANTIS_FIFO_TP_8 (0x01 << 3)
+#define MANTIS_FIFO_TP_16 (0x02 << 3)
+#define MANTIS_FIFO_EN (0x01 << 2)
+#define MANTIS_DCAP_EN (0x01 << 1)
+#define MANTIS_RISC_EN (0x01 << 0)
+
+/* DEBUG */
+#define MANTIS_DEBUGREG 0x0c
+#define MANTIS_DATINV (0x0e << 7)
+#define MANTIS_TOP_DEBUGSEL (0x07 << 4)
+#define MANTIS_PCMCIA_DEBUGSEL (0x0f << 0)
+
+#define MANTIS_RISC_START 0x10
+#define MANTIS_RISC_PC 0x14
+
+/* I2C */
+#define MANTIS_I2CDATA_CTL 0x18
+#define MANTIS_I2C_RATE_1 (0x00 << 6)
+#define MANTIS_I2C_RATE_2 (0x01 << 6)
+#define MANTIS_I2C_RATE_3 (0x02 << 6)
+#define MANTIS_I2C_RATE_4 (0x03 << 6)
+#define MANTIS_I2C_STOP (0x01 << 5)
+#define MANTIS_I2C_PGMODE (0x01 << 3)
+
+/* DATA */
+#define MANTIS_CMD_DATA_R1 0x20
+#define MANTIS_CMD_DATA_3 (0xff << 24)
+#define MANTIS_CMD_DATA_2 (0xff << 16)
+#define MANTIS_CMD_DATA_1 (0xff << 8)
+#define MANTIS_CMD_DATA_0 (0xff << 0)
+
+#define MANTIS_CMD_DATA_R2 0x24
+#define MANTIS_CMD_DATA_7 (0xff << 24)
+#define MANTIS_CMD_DATA_6 (0xff << 16)
+#define MANTIS_CMD_DATA_5 (0xff << 8)
+#define MANTIS_CMD_DATA_4 (0xff << 0)
+
+#define MANTIS_CONTROL 0x28
+#define MANTIS_DET (0x01 << 7)
+#define MANTIS_DAT_CF_EN (0x01 << 6)
+#define MANTIS_ACS (0x03 << 4)
+#define MANTIS_VCCEN (0x01 << 3)
+#define MANTIS_BYPASS (0x01 << 2)
+#define MANTIS_MRST (0x01 << 1)
+#define MANTIS_CRST_INT (0x01 << 0)
+
+#define MANTIS_GPIF_CFGSLA 0x84
+#define MANTIS_GPIF_WAITSMPL (0x07 << 28)
+#define MANTIS_GPIF_BYTEADDRSUB (0x01 << 25)
+#define MANTIS_GPIF_WAITPOL (0x01 << 24)
+#define MANTIS_GPIF_NCDELAY (0x07 << 20)
+#define MANTIS_GPIF_RW2CSDELAY (0x07 << 16)
+#define MANTIS_GPIF_SLFTIMEDMODE (0x01 << 15)
+#define MANTIS_GPIF_SLFTIMEDDELY (0x7f << 8)
+#define MANTIS_GPIF_DEVTYPE (0x07 << 4)
+#define MANTIS_GPIF_BIGENDIAN (0x01 << 3)
+#define MANTIS_GPIF_FETCHCMD (0x03 << 1)
+#define MANTIS_GPIF_HWORDDEV (0x01 << 0)
+
+#define MANTIS_GPIF_WSTOPER 0x90
+#define MANTIS_GPIF_WSTOPERWREN3 (0x01 << 31)
+#define MANTIS_GPIF_PARBOOTN (0x01 << 29)
+#define MANTIS_GPIF_WSTOPERSLID3 (0x1f << 24)
+#define MANTIS_GPIF_WSTOPERWREN2 (0x01 << 23)
+#define MANTIS_GPIF_WSTOPERSLID2 (0x1f << 16)
+#define MANTIS_GPIF_WSTOPERWREN1 (0x01 << 15)
+#define MANTIS_GPIF_WSTOPERSLID1 (0x1f << 8)
+#define MANTIS_GPIF_WSTOPERWREN0 (0x01 << 7)
+#define MANTIS_GPIF_WSTOPERSLID0 (0x1f << 0)
+
+#define MANTIS_GPIF_CS2RW 0x94
+#define MANTIS_GPIF_CS2RWWREN3 (0x01 << 31)
+#define MANTIS_GPIF_CS2RWDELY3 (0x3f << 24)
+#define MANTIS_GPIF_CS2RWWREN2 (0x01 << 23)
+#define MANTIS_GPIF_CS2RWDELY2 (0x3f << 16)
+#define MANTIS_GPIF_CS2RWWREN1 (0x01 << 15)
+#define MANTIS_GPIF_CS2RWDELY1 (0x3f << 8)
+#define MANTIS_GPIF_CS2RWWREN0 (0x01 << 7)
+#define MANTIS_GPIF_CS2RWDELY0 (0x3f << 0)
+
+#define MANTIS_GPIF_IRQCFG 0x98
+#define MANTIS_GPIF_IRQPOL (0x01 << 8)
+#define MANTIS_MASK_WRACK (0x01 << 7)
+#define MANTIS_MASK_BRRDY (0x01 << 6)
+#define MANTIS_MASK_OVFLW (0x01 << 5)
+#define MANTIS_MASK_OTHERR (0x01 << 4)
+#define MANTIS_MASK_WSTO (0x01 << 3)
+#define MANTIS_MASK_EXTIRQ (0x01 << 2)
+#define MANTIS_MASK_PLUGIN (0x01 << 1)
+#define MANTIS_MASK_PLUGOUT (0x01 << 0)
+
+#define MANTIS_GPIF_STATUS 0x9c
+#define MANTIS_SBUF_KILLOP (0x01 << 15)
+#define MANTIS_SBUF_OPDONE (0x01 << 14)
+#define MANTIS_SBUF_EMPTY (0x01 << 13)
+#define MANTIS_GPIF_DETSTAT (0x01 << 9)
+#define MANTIS_GPIF_INTSTAT (0x01 << 8)
+#define MANTIS_GPIF_WRACK (0x01 << 7)
+#define MANTIS_GPIF_BRRDY (0x01 << 6)
+#define MANTIS_SBUF_OVFLW (0x01 << 5)
+#define MANTIS_GPIF_OTHERR (0x01 << 4)
+#define MANTIS_SBUF_WSTO (0x01 << 3)
+#define MANTIS_GPIF_EXTIRQ (0x01 << 2)
+#define MANTIS_CARD_PLUGIN (0x01 << 1)
+#define MANTIS_CARD_PLUGOUT (0x01 << 0)
+
+#define MANTIS_GPIF_BRADDR 0xa0
+#define MANTIS_GPIF_PCMCIAREG (0x01 << 27)
+#define MANTIS_GPIF_PCMCIAIOM (0x01 << 26)
+#define MANTIS_GPIF_BR_ADDR (0xfffffff << 0)
+
+#define MANTIS_GPIF_BRBYTES 0xa4
+#define MANTIS_GPIF_BRCNT (0xfff << 0)
+
+#define MANTIS_PCMCIA_RESET 0xa8
+#define MANTIS_PCMCIA_RSTVAL (0xff << 0)
+
+#define MANTIS_CARD_RESET 0xac
+
+#define MANTIS_GPIF_ADDR 0xb0
+#define MANTIS_GPIF_HIFRDWRN (0x01 << 31)
+#define MANTIS_GPIF_PCMCIAREG (0x01 << 27)
+#define MANTIS_GPIF_PCMCIAIOM (0x01 << 26)
+#define MANTIS_GPIF_HIFADDR (0xfffffff << 0)
+
+#define MANTIS_GPIF_DOUT 0xb4
+#define MANTIS_GPIF_HIFDOUT (0xfffffff << 0)
+
+#define MANTIS_GPIF_DIN 0xb8
+#define MANTIS_GPIF_HIFDIN (0xfffffff << 0)
+
+#define MANTIS_GPIF_SPARE 0xbc
+#define MANTIS_GPIF_LOGICRD (0xffff << 16)
+#define MANTIS_GPIF_LOGICRW (0xffff << 0)
+
+#endif /* __MANTIS_REG_H */
diff --git a/drivers/media/dvb/mantis/mantis_uart.c b/drivers/media/dvb/mantis/mantis_uart.c
new file mode 100644
index 000000000000..7d2f2398fa8b
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_uart.c
@@ -0,0 +1,186 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_reg.h"
+#include "mantis_uart.h"
+
+struct mantis_uart_params {
+ enum mantis_baud baud_rate;
+ enum mantis_parity parity;
+};
+
+static struct {
+ char string[7];
+} rates[5] = {
+ { "9600" },
+ { "19200" },
+ { "38400" },
+ { "57600" },
+ { "115200" }
+};
+
+static struct {
+ char string[5];
+} parity[3] = {
+ { "NONE" },
+ { "ODD" },
+ { "EVEN" }
+};
+
+#define UART_MAX_BUF 16
+
+int mantis_uart_read(struct mantis_pci *mantis, u8 *data)
+{
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ u32 stat = 0, i;
+
+ /* get data */
+ for (i = 0; i < (config->bytes + 1); i++) {
+
+ stat = mmread(MANTIS_UART_STAT);
+
+ if (stat & MANTIS_UART_RXFIFO_FULL) {
+ dprintk(MANTIS_ERROR, 1, "RX Fifo FULL");
+ }
+ data[i] = mmread(MANTIS_UART_RXD) & 0x3f;
+
+ dprintk(MANTIS_DEBUG, 1, "Reading ... <%02x>", data[i] & 0x3f);
+
+ if (data[i] & (1 << 7)) {
+ dprintk(MANTIS_ERROR, 1, "UART framing error");
+ return -EINVAL;
+ }
+ if (data[i] & (1 << 6)) {
+ dprintk(MANTIS_ERROR, 1, "UART parity error");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void mantis_uart_work(struct work_struct *work)
+{
+ struct mantis_pci *mantis = container_of(work, struct mantis_pci, uart_work);
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ u8 buf[16];
+ int i;
+
+ mantis_uart_read(mantis, buf);
+
+ for (i = 0; i < (config->bytes + 1); i++)
+ dprintk(MANTIS_INFO, 1, "UART BUF:%d <%02x> ", i, buf[i]);
+
+ dprintk(MANTIS_DEBUG, 0, "\n");
+}
+
+static int mantis_uart_setup(struct mantis_pci *mantis,
+ struct mantis_uart_params *params)
+{
+ u32 reg;
+
+ mmwrite((mmread(MANTIS_UART_CTL) | (params->parity & 0x3)), MANTIS_UART_CTL);
+
+ reg = mmread(MANTIS_UART_BAUD);
+
+ switch (params->baud_rate) {
+ case MANTIS_BAUD_9600:
+ reg |= 0xd8;
+ break;
+ case MANTIS_BAUD_19200:
+ reg |= 0x6c;
+ break;
+ case MANTIS_BAUD_38400:
+ reg |= 0x36;
+ break;
+ case MANTIS_BAUD_57600:
+ reg |= 0x23;
+ break;
+ case MANTIS_BAUD_115200:
+ reg |= 0x11;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mmwrite(reg, MANTIS_UART_BAUD);
+
+ return 0;
+}
+
+int mantis_uart_init(struct mantis_pci *mantis)
+{
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ struct mantis_uart_params params;
+
+ /* default parity: */
+ params.baud_rate = config->baud_rate;
+ params.parity = config->parity;
+ dprintk(MANTIS_INFO, 1, "Initializing UART @ %sbps parity:%s",
+ rates[params.baud_rate].string,
+ parity[params.parity].string);
+
+ init_waitqueue_head(&mantis->uart_wq);
+ spin_lock_init(&mantis->uart_lock);
+
+ INIT_WORK(&mantis->uart_work, mantis_uart_work);
+
+ /* disable interrupt */
+ mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
+
+ mantis_uart_setup(mantis, &params);
+
+ /* default 1 byte */
+ mmwrite((mmread(MANTIS_UART_BAUD) | (config->bytes << 8)), MANTIS_UART_BAUD);
+
+ /* flush buffer */
+ mmwrite((mmread(MANTIS_UART_CTL) | MANTIS_UART_RXFLUSH), MANTIS_UART_CTL);
+
+ /* enable interrupt */
+ mmwrite(mmread(MANTIS_INT_MASK) | 0x800, MANTIS_INT_MASK);
+ mmwrite(mmread(MANTIS_UART_CTL) | MANTIS_UART_RXINT, MANTIS_UART_CTL);
+
+ schedule_work(&mantis->uart_work);
+ dprintk(MANTIS_DEBUG, 1, "UART succesfully initialized");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mantis_uart_init);
+
+void mantis_uart_exit(struct mantis_pci *mantis)
+{
+ /* disable interrupt */
+ mmwrite(mmread(MANTIS_UART_CTL) & 0xffef, MANTIS_UART_CTL);
+}
+EXPORT_SYMBOL_GPL(mantis_uart_exit);
diff --git a/drivers/media/dvb/mantis/mantis_uart.h b/drivers/media/dvb/mantis/mantis_uart.h
new file mode 100644
index 000000000000..ffb62a0a5a13
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_uart.h
@@ -0,0 +1,58 @@
+/*
+ Mantis PCI bridge driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_UART_H
+#define __MANTIS_UART_H
+
+#define MANTIS_UART_CTL 0xe0
+#define MANTIS_UART_RXINT (1 << 4)
+#define MANTIS_UART_RXFLUSH (1 << 2)
+
+#define MANTIS_UART_RXD 0xe8
+#define MANTIS_UART_BAUD 0xec
+
+#define MANTIS_UART_STAT 0xf0
+#define MANTIS_UART_RXFIFO_DATA (1 << 7)
+#define MANTIS_UART_RXFIFO_EMPTY (1 << 6)
+#define MANTIS_UART_RXFIFO_FULL (1 << 3)
+#define MANTIS_UART_FRAME_ERR (1 << 2)
+#define MANTIS_UART_PARITY_ERR (1 << 1)
+#define MANTIS_UART_RXTHRESH_INT (1 << 0)
+
+enum mantis_baud {
+ MANTIS_BAUD_9600 = 0,
+ MANTIS_BAUD_19200,
+ MANTIS_BAUD_38400,
+ MANTIS_BAUD_57600,
+ MANTIS_BAUD_115200
+};
+
+enum mantis_parity {
+ MANTIS_PARITY_NONE = 0,
+ MANTIS_PARITY_EVEN,
+ MANTIS_PARITY_ODD,
+};
+
+struct mantis_pci;
+
+extern int mantis_uart_init(struct mantis_pci *mantis);
+extern void mantis_uart_exit(struct mantis_pci *mantis);
+
+#endif /* __MANTIS_UART_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp1033.c b/drivers/media/dvb/mantis/mantis_vp1033.c
new file mode 100644
index 000000000000..4a723bda0031
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1033.c
@@ -0,0 +1,212 @@
+/*
+ Mantis VP-1033 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "stv0299.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp1033.h"
+#include "mantis_reg.h"
+
+u8 lgtdqcs001f_inittab[] = {
+ 0x01, 0x15,
+ 0x02, 0x00,
+ 0x03, 0x00,
+ 0x04, 0x2a,
+ 0x05, 0x85,
+ 0x06, 0x02,
+ 0x07, 0x00,
+ 0x08, 0x00,
+ 0x0c, 0x01,
+ 0x0d, 0x81,
+ 0x0e, 0x44,
+ 0x0f, 0x94,
+ 0x10, 0x3c,
+ 0x11, 0x84,
+ 0x12, 0xb9,
+ 0x13, 0xb5,
+ 0x14, 0x4f,
+ 0x15, 0xc9,
+ 0x16, 0x80,
+ 0x17, 0x36,
+ 0x18, 0xfb,
+ 0x19, 0xcf,
+ 0x1a, 0xbc,
+ 0x1c, 0x2b,
+ 0x1d, 0x27,
+ 0x1e, 0x00,
+ 0x1f, 0x0b,
+ 0x20, 0xa1,
+ 0x21, 0x60,
+ 0x22, 0x00,
+ 0x23, 0x00,
+ 0x28, 0x00,
+ 0x29, 0x28,
+ 0x2a, 0x14,
+ 0x2b, 0x0f,
+ 0x2c, 0x09,
+ 0x2d, 0x05,
+ 0x31, 0x1f,
+ 0x32, 0x19,
+ 0x33, 0xfc,
+ 0x34, 0x13,
+ 0xff, 0xff,
+};
+
+#define MANTIS_MODEL_NAME "VP-1033"
+#define MANTIS_DEV_TYPE "DVB-S/DSS"
+
+int lgtdqcs001f_tuner_set(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *params)
+{
+ struct mantis_pci *mantis = fe->dvb->priv;
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ u8 buf[4];
+ u32 div;
+
+
+ struct i2c_msg msg = {.addr = 0x61, .flags = 0, .buf = buf, .len = sizeof(buf)};
+
+ div = params->frequency / 250;
+
+ buf[0] = (div >> 8) & 0x7f;
+ buf[1] = div & 0xff;
+ buf[2] = 0x83;
+ buf[3] = 0xc0;
+
+ if (params->frequency < 1531000)
+ buf[3] |= 0x04;
+ else
+ buf[3] &= ~0x04;
+ if (i2c_transfer(adapter, &msg, 1) < 0) {
+ dprintk(MANTIS_ERROR, 1, "Write: I2C Transfer failed");
+ return -EIO;
+ }
+ msleep_interruptible(100);
+
+ return 0;
+}
+
+int lgtdqcs001f_set_symbol_rate(struct dvb_frontend *fe,
+ u32 srate, u32 ratio)
+{
+ u8 aclk = 0;
+ u8 bclk = 0;
+
+ if (srate < 1500000) {
+ aclk = 0xb7;
+ bclk = 0x47;
+ } else if (srate < 3000000) {
+ aclk = 0xb7;
+ bclk = 0x4b;
+ } else if (srate < 7000000) {
+ aclk = 0xb7;
+ bclk = 0x4f;
+ } else if (srate < 14000000) {
+ aclk = 0xb7;
+ bclk = 0x53;
+ } else if (srate < 30000000) {
+ aclk = 0xb6;
+ bclk = 0x53;
+ } else if (srate < 45000000) {
+ aclk = 0xb4;
+ bclk = 0x51;
+ }
+ stv0299_writereg(fe, 0x13, aclk);
+ stv0299_writereg(fe, 0x14, bclk);
+
+ stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff);
+ stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff);
+ stv0299_writereg(fe, 0x21, ratio & 0xf0);
+
+ return 0;
+}
+
+struct stv0299_config lgtdqcs001f_config = {
+ .demod_address = 0x68,
+ .inittab = lgtdqcs001f_inittab,
+ .mclk = 88000000UL,
+ .invert = 0,
+ .skip_reinit = 0,
+ .volt13_op0_op1 = STV0299_VOLT13_OP0,
+ .min_delay_ms = 100,
+ .set_symbol_rate = lgtdqcs001f_set_symbol_rate,
+};
+
+static int vp1033_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ int err = 0;
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ mantis_frontend_soft_reset(mantis);
+ msleep(250);
+
+ dprintk(MANTIS_ERROR, 1, "Probing for STV0299 (DVB-S)");
+ fe = stv0299_attach(&lgtdqcs001f_config, adapter);
+
+ if (fe) {
+ fe->ops.tuner_ops.set_params = lgtdqcs001f_tuner_set;
+ dprintk(MANTIS_ERROR, 1, "found STV0299 DVB-S frontend @ 0x%02x",
+ lgtdqcs001f_config.demod_address);
+
+ dprintk(MANTIS_ERROR, 1, "Mantis DVB-S STV0299 frontend attach success");
+ } else {
+ return -1;
+ }
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+ mantis->fe = fe;
+ dprintk(MANTIS_ERROR, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp1033_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_204,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp1033_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp1033.h b/drivers/media/dvb/mantis/mantis_vp1033.h
new file mode 100644
index 000000000000..7daaa1bf127d
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1033.h
@@ -0,0 +1,30 @@
+/*
+ Mantis VP-1033 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP1033_H
+#define __MANTIS_VP1033_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_1033_DVB_S 0x0016
+
+extern struct mantis_hwconfig vp1033_config;
+
+#endif /* __MANTIS_VP1033_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp1034.c b/drivers/media/dvb/mantis/mantis_vp1034.c
new file mode 100644
index 000000000000..8e6ae558ee57
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1034.c
@@ -0,0 +1,119 @@
+/*
+ Mantis VP-1034 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mb86a16.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp1034.h"
+#include "mantis_reg.h"
+
+struct mb86a16_config vp1034_mb86a16_config = {
+ .demod_address = 0x08,
+ .set_voltage = vp1034_set_voltage,
+};
+
+#define MANTIS_MODEL_NAME "VP-1034"
+#define MANTIS_DEV_TYPE "DVB-S/DSS"
+
+int vp1034_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage)
+{
+ struct mantis_pci *mantis = fe->dvb->priv;
+
+ switch (voltage) {
+ case SEC_VOLTAGE_13:
+ dprintk(MANTIS_ERROR, 1, "Polarization=[13V]");
+ gpio_set_bits(mantis, 13, 1);
+ gpio_set_bits(mantis, 14, 0);
+ break;
+ case SEC_VOLTAGE_18:
+ dprintk(MANTIS_ERROR, 1, "Polarization=[18V]");
+ gpio_set_bits(mantis, 13, 1);
+ gpio_set_bits(mantis, 14, 1);
+ break;
+ case SEC_VOLTAGE_OFF:
+ dprintk(MANTIS_ERROR, 1, "Frontend (dummy) POWERDOWN");
+ break;
+ default:
+ dprintk(MANTIS_ERROR, 1, "Invalid = (%d)", (u32) voltage);
+ return -EINVAL;
+ }
+ mmwrite(0x00, MANTIS_GPIF_DOUT);
+
+ return 0;
+}
+
+static int vp1034_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ int err = 0;
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ mantis_frontend_soft_reset(mantis);
+ msleep(250);
+
+ dprintk(MANTIS_ERROR, 1, "Probing for MB86A16 (DVB-S/DSS)");
+ fe = mb86a16_attach(&vp1034_mb86a16_config, adapter);
+ if (fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found MB86A16 DVB-S/DSS frontend @0x%02x",
+ vp1034_mb86a16_config.demod_address);
+
+ } else {
+ return -1;
+ }
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+ mantis->fe = fe;
+ dprintk(MANTIS_ERROR, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp1034_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_204,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp1034_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp1034.h b/drivers/media/dvb/mantis/mantis_vp1034.h
new file mode 100644
index 000000000000..323f38ef8e3d
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1034.h
@@ -0,0 +1,33 @@
+/*
+ Mantis VP-1034 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP1034_H
+#define __MANTIS_VP1034_H
+
+#include "dvb_frontend.h"
+#include "mantis_common.h"
+
+
+#define MANTIS_VP_1034_DVB_S 0x0014
+
+extern struct mantis_hwconfig vp1034_config;
+extern int vp1034_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage);
+
+#endif /* __MANTIS_VP1034_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp1041.c b/drivers/media/dvb/mantis/mantis_vp1041.c
new file mode 100644
index 000000000000..515346dd31d0
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1041.c
@@ -0,0 +1,358 @@
+/*
+ Mantis VP-1041 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp1041.h"
+#include "stb0899_reg.h"
+#include "stb0899_drv.h"
+#include "stb0899_cfg.h"
+#include "stb6100_cfg.h"
+#include "stb6100.h"
+#include "lnbp21.h"
+
+#define MANTIS_MODEL_NAME "VP-1041"
+#define MANTIS_DEV_TYPE "DSS/DVB-S/DVB-S2"
+
+static const struct stb0899_s1_reg vp1041_stb0899_s1_init_1[] = {
+
+ /* 0x0000000b, *//* SYSREG */
+ { STB0899_DEV_ID , 0x30 },
+ { STB0899_DISCNTRL1 , 0x32 },
+ { STB0899_DISCNTRL2 , 0x80 },
+ { STB0899_DISRX_ST0 , 0x04 },
+ { STB0899_DISRX_ST1 , 0x00 },
+ { STB0899_DISPARITY , 0x00 },
+ { STB0899_DISFIFO , 0x00 },
+ { STB0899_DISSTATUS , 0x20 },
+ { STB0899_DISF22 , 0x99 },
+ { STB0899_DISF22RX , 0xa8 },
+ /* SYSREG ? */
+ { STB0899_ACRPRESC , 0x11 },
+ { STB0899_ACRDIV1 , 0x0a },
+ { STB0899_ACRDIV2 , 0x05 },
+ { STB0899_DACR1 , 0x00 },
+ { STB0899_DACR2 , 0x00 },
+ { STB0899_OUTCFG , 0x00 },
+ { STB0899_MODECFG , 0x00 },
+ { STB0899_IRQSTATUS_3 , 0xfe },
+ { STB0899_IRQSTATUS_2 , 0x03 },
+ { STB0899_IRQSTATUS_1 , 0x7c },
+ { STB0899_IRQSTATUS_0 , 0xf4 },
+ { STB0899_IRQMSK_3 , 0xf3 },
+ { STB0899_IRQMSK_2 , 0xfc },
+ { STB0899_IRQMSK_1 , 0xff },
+ { STB0899_IRQMSK_0 , 0xff },
+ { STB0899_IRQCFG , 0x00 },
+ { STB0899_I2CCFG , 0x88 },
+ { STB0899_I2CRPT , 0x58 },
+ { STB0899_IOPVALUE5 , 0x00 },
+ { STB0899_IOPVALUE4 , 0x33 },
+ { STB0899_IOPVALUE3 , 0x6d },
+ { STB0899_IOPVALUE2 , 0x90 },
+ { STB0899_IOPVALUE1 , 0x60 },
+ { STB0899_IOPVALUE0 , 0x00 },
+ { STB0899_GPIO00CFG , 0x82 },
+ { STB0899_GPIO01CFG , 0x82 },
+ { STB0899_GPIO02CFG , 0x82 },
+ { STB0899_GPIO03CFG , 0x82 },
+ { STB0899_GPIO04CFG , 0x82 },
+ { STB0899_GPIO05CFG , 0x82 },
+ { STB0899_GPIO06CFG , 0x82 },
+ { STB0899_GPIO07CFG , 0x82 },
+ { STB0899_GPIO08CFG , 0x82 },
+ { STB0899_GPIO09CFG , 0x82 },
+ { STB0899_GPIO10CFG , 0x82 },
+ { STB0899_GPIO11CFG , 0x82 },
+ { STB0899_GPIO12CFG , 0x82 },
+ { STB0899_GPIO13CFG , 0x82 },
+ { STB0899_GPIO14CFG , 0x82 },
+ { STB0899_GPIO15CFG , 0x82 },
+ { STB0899_GPIO16CFG , 0x82 },
+ { STB0899_GPIO17CFG , 0x82 },
+ { STB0899_GPIO18CFG , 0x82 },
+ { STB0899_GPIO19CFG , 0x82 },
+ { STB0899_GPIO20CFG , 0x82 },
+ { STB0899_SDATCFG , 0xb8 },
+ { STB0899_SCLTCFG , 0xba },
+ { STB0899_AGCRFCFG , 0x1c }, /* 0x11 */
+ { STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */
+ { STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */
+ { STB0899_DIRCLKCFG , 0x82 },
+ { STB0899_CLKOUT27CFG , 0x7e },
+ { STB0899_STDBYCFG , 0x82 },
+ { STB0899_CS0CFG , 0x82 },
+ { STB0899_CS1CFG , 0x82 },
+ { STB0899_DISEQCOCFG , 0x20 },
+ { STB0899_GPIO32CFG , 0x82 },
+ { STB0899_GPIO33CFG , 0x82 },
+ { STB0899_GPIO34CFG , 0x82 },
+ { STB0899_GPIO35CFG , 0x82 },
+ { STB0899_GPIO36CFG , 0x82 },
+ { STB0899_GPIO37CFG , 0x82 },
+ { STB0899_GPIO38CFG , 0x82 },
+ { STB0899_GPIO39CFG , 0x82 },
+ { STB0899_NCOARSE , 0x17 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */
+ { STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */
+ { STB0899_FILTCTRL , 0x00 },
+ { STB0899_SYSCTRL , 0x01 },
+ { STB0899_STOPCLK1 , 0x20 },
+ { STB0899_STOPCLK2 , 0x00 },
+ { STB0899_INTBUFSTATUS , 0x00 },
+ { STB0899_INTBUFCTRL , 0x0a },
+ { 0xffff , 0xff },
+};
+
+static const struct stb0899_s1_reg vp1041_stb0899_s1_init_3[] = {
+ { STB0899_DEMOD , 0x00 },
+ { STB0899_RCOMPC , 0xc9 },
+ { STB0899_AGC1CN , 0x01 },
+ { STB0899_AGC1REF , 0x10 },
+ { STB0899_RTC , 0x23 },
+ { STB0899_TMGCFG , 0x4e },
+ { STB0899_AGC2REF , 0x34 },
+ { STB0899_TLSR , 0x84 },
+ { STB0899_CFD , 0xf7 },
+ { STB0899_ACLC , 0x87 },
+ { STB0899_BCLC , 0x94 },
+ { STB0899_EQON , 0x41 },
+ { STB0899_LDT , 0xf1 },
+ { STB0899_LDT2 , 0xe3 },
+ { STB0899_EQUALREF , 0xb4 },
+ { STB0899_TMGRAMP , 0x10 },
+ { STB0899_TMGTHD , 0x30 },
+ { STB0899_IDCCOMP , 0xfd },
+ { STB0899_QDCCOMP , 0xff },
+ { STB0899_POWERI , 0x0c },
+ { STB0899_POWERQ , 0x0f },
+ { STB0899_RCOMP , 0x6c },
+ { STB0899_AGCIQIN , 0x80 },
+ { STB0899_AGC2I1 , 0x06 },
+ { STB0899_AGC2I2 , 0x00 },
+ { STB0899_TLIR , 0x30 },
+ { STB0899_RTF , 0x7f },
+ { STB0899_DSTATUS , 0x00 },
+ { STB0899_LDI , 0xbc },
+ { STB0899_CFRM , 0xea },
+ { STB0899_CFRL , 0x31 },
+ { STB0899_NIRM , 0x2b },
+ { STB0899_NIRL , 0x80 },
+ { STB0899_ISYMB , 0x1d },
+ { STB0899_QSYMB , 0xa6 },
+ { STB0899_SFRH , 0x2f },
+ { STB0899_SFRM , 0x68 },
+ { STB0899_SFRL , 0x40 },
+ { STB0899_SFRUPH , 0x2f },
+ { STB0899_SFRUPM , 0x68 },
+ { STB0899_SFRUPL , 0x40 },
+ { STB0899_EQUAI1 , 0x02 },
+ { STB0899_EQUAQ1 , 0xff },
+ { STB0899_EQUAI2 , 0x04 },
+ { STB0899_EQUAQ2 , 0x05 },
+ { STB0899_EQUAI3 , 0x02 },
+ { STB0899_EQUAQ3 , 0xfd },
+ { STB0899_EQUAI4 , 0x03 },
+ { STB0899_EQUAQ4 , 0x07 },
+ { STB0899_EQUAI5 , 0x08 },
+ { STB0899_EQUAQ5 , 0xf5 },
+ { STB0899_DSTATUS2 , 0x00 },
+ { STB0899_VSTATUS , 0x00 },
+ { STB0899_VERROR , 0x86 },
+ { STB0899_IQSWAP , 0x2a },
+ { STB0899_ECNT1M , 0x00 },
+ { STB0899_ECNT1L , 0x00 },
+ { STB0899_ECNT2M , 0x00 },
+ { STB0899_ECNT2L , 0x00 },
+ { STB0899_ECNT3M , 0x0a },
+ { STB0899_ECNT3L , 0xad },
+ { STB0899_FECAUTO1 , 0x06 },
+ { STB0899_FECM , 0x01 },
+ { STB0899_VTH12 , 0xb0 },
+ { STB0899_VTH23 , 0x7a },
+ { STB0899_VTH34 , 0x58 },
+ { STB0899_VTH56 , 0x38 },
+ { STB0899_VTH67 , 0x34 },
+ { STB0899_VTH78 , 0x24 },
+ { STB0899_PRVIT , 0xff },
+ { STB0899_VITSYNC , 0x19 },
+ { STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */
+ { STB0899_TSULC , 0x42 },
+ { STB0899_RSLLC , 0x41 },
+ { STB0899_TSLPL , 0x12 },
+ { STB0899_TSCFGH , 0x0c },
+ { STB0899_TSCFGM , 0x00 },
+ { STB0899_TSCFGL , 0x00 },
+ { STB0899_TSOUT , 0x69 }, /* 0x0d for CAM */
+ { STB0899_RSSYNCDEL , 0x00 },
+ { STB0899_TSINHDELH , 0x02 },
+ { STB0899_TSINHDELM , 0x00 },
+ { STB0899_TSINHDELL , 0x00 },
+ { STB0899_TSLLSTKM , 0x1b },
+ { STB0899_TSLLSTKL , 0xb3 },
+ { STB0899_TSULSTKM , 0x00 },
+ { STB0899_TSULSTKL , 0x00 },
+ { STB0899_PCKLENUL , 0xbc },
+ { STB0899_PCKLENLL , 0xcc },
+ { STB0899_RSPCKLEN , 0xbd },
+ { STB0899_TSSTATUS , 0x90 },
+ { STB0899_ERRCTRL1 , 0xb6 },
+ { STB0899_ERRCTRL2 , 0x95 },
+ { STB0899_ERRCTRL3 , 0x8d },
+ { STB0899_DMONMSK1 , 0x27 },
+ { STB0899_DMONMSK0 , 0x03 },
+ { STB0899_DEMAPVIT , 0x5c },
+ { STB0899_PLPARM , 0x19 },
+ { STB0899_PDELCTRL , 0x48 },
+ { STB0899_PDELCTRL2 , 0x00 },
+ { STB0899_BBHCTRL1 , 0x00 },
+ { STB0899_BBHCTRL2 , 0x00 },
+ { STB0899_HYSTTHRESH , 0x77 },
+ { STB0899_MATCSTM , 0x00 },
+ { STB0899_MATCSTL , 0x00 },
+ { STB0899_UPLCSTM , 0x00 },
+ { STB0899_UPLCSTL , 0x00 },
+ { STB0899_DFLCSTM , 0x00 },
+ { STB0899_DFLCSTL , 0x00 },
+ { STB0899_SYNCCST , 0x00 },
+ { STB0899_SYNCDCSTM , 0x00 },
+ { STB0899_SYNCDCSTL , 0x00 },
+ { STB0899_ISI_ENTRY , 0x00 },
+ { STB0899_ISI_BIT_EN , 0x00 },
+ { STB0899_MATSTRM , 0xf0 },
+ { STB0899_MATSTRL , 0x02 },
+ { STB0899_UPLSTRM , 0x45 },
+ { STB0899_UPLSTRL , 0x60 },
+ { STB0899_DFLSTRM , 0xe3 },
+ { STB0899_DFLSTRL , 0x00 },
+ { STB0899_SYNCSTR , 0x47 },
+ { STB0899_SYNCDSTRM , 0x05 },
+ { STB0899_SYNCDSTRL , 0x18 },
+ { STB0899_CFGPDELSTATUS1 , 0x19 },
+ { STB0899_CFGPDELSTATUS2 , 0x2b },
+ { STB0899_BBFERRORM , 0x00 },
+ { STB0899_BBFERRORL , 0x01 },
+ { STB0899_UPKTERRORM , 0x00 },
+ { STB0899_UPKTERRORL , 0x00 },
+ { 0xffff , 0xff },
+};
+
+struct stb0899_config vp1041_stb0899_config = {
+ .init_dev = vp1041_stb0899_s1_init_1,
+ .init_s2_demod = stb0899_s2_init_2,
+ .init_s1_demod = vp1041_stb0899_s1_init_3,
+ .init_s2_fec = stb0899_s2_init_4,
+ .init_tst = stb0899_s1_init_5,
+
+ .demod_address = 0x68, /* 0xd0 >> 1 */
+
+ .xtal_freq = 27000000,
+ .inversion = IQ_SWAP_ON, /* 1 */
+
+ .lo_clk = 76500000,
+ .hi_clk = 99000000,
+
+ .esno_ave = STB0899_DVBS2_ESNO_AVE,
+ .esno_quant = STB0899_DVBS2_ESNO_QUANT,
+ .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE,
+ .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE,
+ .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD,
+ .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ,
+ .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK,
+ .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF,
+ .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT,
+
+ .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS,
+ .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET,
+ .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS,
+ .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER,
+
+ .tuner_get_frequency = stb6100_get_frequency,
+ .tuner_set_frequency = stb6100_set_frequency,
+ .tuner_set_bandwidth = stb6100_set_bandwidth,
+ .tuner_get_bandwidth = stb6100_get_bandwidth,
+ .tuner_set_rfsiggain = NULL,
+};
+
+struct stb6100_config vp1041_stb6100_config = {
+ .tuner_address = 0x60,
+ .refclock = 27000000,
+};
+
+static int vp1041_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ int err = 0;
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ mantis_frontend_soft_reset(mantis);
+ msleep(250);
+ mantis->fe = stb0899_attach(&vp1041_stb0899_config, adapter);
+ if (mantis->fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found STB0899 DVB-S/DVB-S2 frontend @0x%02x",
+ vp1041_stb0899_config.demod_address);
+
+ if (stb6100_attach(mantis->fe, &vp1041_stb6100_config, adapter)) {
+ if (!lnbp21_attach(mantis->fe, adapter, 0, 0))
+ dprintk(MANTIS_ERROR, 1, "No LNBP21 found!");
+ }
+ } else {
+ return -EREMOTEIO;
+ }
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+
+
+ dprintk(MANTIS_ERROR, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp1041_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_188,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp1041_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp1041.h b/drivers/media/dvb/mantis/mantis_vp1041.h
new file mode 100644
index 000000000000..1ae5b3de8081
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp1041.h
@@ -0,0 +1,33 @@
+/*
+ Mantis VP-1041 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP1041_H
+#define __MANTIS_VP1041_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_1041_DVB_S2 0x0031
+#define SKYSTAR_HD2_10 0x0001
+#define SKYSTAR_HD2_20 0x0003
+#define CINERGY_S2_PCI_HD 0x1179
+
+extern struct mantis_hwconfig vp1041_config;
+
+#endif /* __MANTIS_VP1041_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp2033.c b/drivers/media/dvb/mantis/mantis_vp2033.c
new file mode 100644
index 000000000000..10ce81790a8c
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2033.c
@@ -0,0 +1,187 @@
+/*
+ Mantis VP-2033 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "tda1002x.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp2033.h"
+
+#define MANTIS_MODEL_NAME "VP-2033"
+#define MANTIS_DEV_TYPE "DVB-C"
+
+struct tda1002x_config vp2033_tda1002x_cu1216_config = {
+ .demod_address = 0x18 >> 1,
+ .invert = 1,
+};
+
+struct tda10023_config vp2033_tda10023_cu1216_config = {
+ .demod_address = 0x18 >> 1,
+ .invert = 1,
+};
+
+static u8 read_pwm(struct mantis_pci *mantis)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ u8 b = 0xff;
+ u8 pwm;
+ struct i2c_msg msg[] = {
+ {.addr = 0x50, .flags = 0, .buf = &b, .len = 1},
+ {.addr = 0x50, .flags = I2C_M_RD, .buf = &pwm, .len = 1}
+ };
+
+ if ((i2c_transfer(adapter, msg, 2) != 2)
+ || (pwm == 0xff))
+ pwm = 0x48;
+
+ return pwm;
+}
+
+static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+{
+ struct mantis_pci *mantis = fe->dvb->priv;
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ u8 buf[6];
+ struct i2c_msg msg = {.addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf)};
+ int i;
+
+#define CU1216_IF 36125000
+#define TUNER_MUL 62500
+
+ u32 div = (params->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
+
+ buf[0] = (div >> 8) & 0x7f;
+ buf[1] = div & 0xff;
+ buf[2] = 0xce;
+ buf[3] = (params->frequency < 150000000 ? 0x01 :
+ params->frequency < 445000000 ? 0x02 : 0x04);
+ buf[4] = 0xde;
+ buf[5] = 0x20;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) != 1)
+ return -EIO;
+
+ /* wait for the pll lock */
+ msg.flags = I2C_M_RD;
+ msg.len = 1;
+ for (i = 0; i < 20; i++) {
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) == 1 && (buf[0] & 0x40))
+ break;
+
+ msleep(10);
+ }
+
+ /* switch the charge pump to the lower current */
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = &buf[2];
+ buf[2] &= ~0x40;
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int vp2033_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ int err = 0;
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ mantis_frontend_soft_reset(mantis);
+ msleep(250);
+
+ dprintk(MANTIS_ERROR, 1, "Probing for CU1216 (DVB-C)");
+ fe = tda10021_attach(&vp2033_tda1002x_cu1216_config,
+ adapter,
+ read_pwm(mantis));
+
+ if (fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found Philips CU1216 DVB-C frontend (TDA10021) @ 0x%02x",
+ vp2033_tda1002x_cu1216_config.demod_address);
+ } else {
+ fe = tda10023_attach(&vp2033_tda10023_cu1216_config,
+ adapter,
+ read_pwm(mantis));
+
+ if (fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found Philips CU1216 DVB-C frontend (TDA10023) @ 0x%02x",
+ vp2033_tda1002x_cu1216_config.demod_address);
+ }
+ }
+
+ if (fe) {
+ fe->ops.tuner_ops.set_params = tda1002x_cu1216_tuner_set;
+ dprintk(MANTIS_ERROR, 1, "Mantis DVB-C Philips CU1216 frontend attach success");
+ } else {
+ return -1;
+ }
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+
+ mantis->fe = fe;
+ dprintk(MANTIS_DEBUG, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp2033_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_204,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp2033_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp2033.h b/drivers/media/dvb/mantis/mantis_vp2033.h
new file mode 100644
index 000000000000..c55242b79d54
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2033.h
@@ -0,0 +1,30 @@
+/*
+ Mantis VP-2033 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP2033_H
+#define __MANTIS_VP2033_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_2033_DVB_C 0x0008
+
+extern struct mantis_hwconfig vp2033_config;
+
+#endif /* __MANTIS_VP2033_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp2040.c b/drivers/media/dvb/mantis/mantis_vp2040.c
new file mode 100644
index 000000000000..a7ca233e800b
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2040.c
@@ -0,0 +1,186 @@
+/*
+ Mantis VP-2040 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "tda1002x.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp2040.h"
+
+#define MANTIS_MODEL_NAME "VP-2040"
+#define MANTIS_DEV_TYPE "DVB-C"
+
+struct tda1002x_config vp2040_tda1002x_cu1216_config = {
+ .demod_address = 0x18 >> 1,
+ .invert = 1,
+};
+
+struct tda10023_config vp2040_tda10023_cu1216_config = {
+ .demod_address = 0x18 >> 1,
+ .invert = 1,
+};
+
+static int tda1002x_cu1216_tuner_set(struct dvb_frontend *fe, struct dvb_frontend_parameters *params)
+{
+ struct mantis_pci *mantis = fe->dvb->priv;
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ u8 buf[6];
+ struct i2c_msg msg = {.addr = 0x60, .flags = 0, .buf = buf, .len = sizeof(buf)};
+ int i;
+
+#define CU1216_IF 36125000
+#define TUNER_MUL 62500
+
+ u32 div = (params->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL;
+
+ buf[0] = (div >> 8) & 0x7f;
+ buf[1] = div & 0xff;
+ buf[2] = 0xce;
+ buf[3] = (params->frequency < 150000000 ? 0x01 :
+ params->frequency < 445000000 ? 0x02 : 0x04);
+ buf[4] = 0xde;
+ buf[5] = 0x20;
+
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) != 1)
+ return -EIO;
+
+ /* wait for the pll lock */
+ msg.flags = I2C_M_RD;
+ msg.len = 1;
+ for (i = 0; i < 20; i++) {
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) == 1 && (buf[0] & 0x40))
+ break;
+
+ msleep(10);
+ }
+
+ /* switch the charge pump to the lower current */
+ msg.flags = 0;
+ msg.len = 2;
+ msg.buf = &buf[2];
+ buf[2] &= ~0x40;
+ if (fe->ops.i2c_gate_ctrl)
+ fe->ops.i2c_gate_ctrl(fe, 1);
+
+ if (i2c_transfer(adapter, &msg, 1) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static u8 read_pwm(struct mantis_pci *mantis)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ u8 b = 0xff;
+ u8 pwm;
+ struct i2c_msg msg[] = {
+ {.addr = 0x50, .flags = 0, .buf = &b, .len = 1},
+ {.addr = 0x50, .flags = I2C_M_RD, .buf = &pwm, .len = 1}
+ };
+
+ if ((i2c_transfer(adapter, msg, 2) != 2)
+ || (pwm == 0xff))
+ pwm = 0x48;
+
+ return pwm;
+}
+
+static int vp2040_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+
+ int err = 0;
+
+ err = mantis_frontend_power(mantis, POWER_ON);
+ if (err == 0) {
+ mantis_frontend_soft_reset(mantis);
+ msleep(250);
+
+ dprintk(MANTIS_ERROR, 1, "Probing for CU1216 (DVB-C)");
+ fe = tda10021_attach(&vp2040_tda1002x_cu1216_config,
+ adapter,
+ read_pwm(mantis));
+
+ if (fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found Philips CU1216 DVB-C frontend (TDA10021) @ 0x%02x",
+ vp2040_tda1002x_cu1216_config.demod_address);
+ } else {
+ fe = tda10023_attach(&vp2040_tda10023_cu1216_config,
+ adapter,
+ read_pwm(mantis));
+
+ if (fe) {
+ dprintk(MANTIS_ERROR, 1,
+ "found Philips CU1216 DVB-C frontend (TDA10023) @ 0x%02x",
+ vp2040_tda1002x_cu1216_config.demod_address);
+ }
+ }
+
+ if (fe) {
+ fe->ops.tuner_ops.set_params = tda1002x_cu1216_tuner_set;
+ dprintk(MANTIS_ERROR, 1, "Mantis DVB-C Philips CU1216 frontend attach success");
+ } else {
+ return -1;
+ }
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+ }
+ mantis->fe = fe;
+ dprintk(MANTIS_DEBUG, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp2040_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_204,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp2040_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp2040.h b/drivers/media/dvb/mantis/mantis_vp2040.h
new file mode 100644
index 000000000000..d125e219b685
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp2040.h
@@ -0,0 +1,32 @@
+/*
+ Mantis VP-2040 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP2040_H
+#define __MANTIS_VP2040_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_2040_DVB_C 0x0043
+#define CINERGY_C 0x1178
+#define CABLESTAR_HD2 0x0002
+
+extern struct mantis_hwconfig vp2040_config;
+
+#endif /* __MANTIS_VP2040_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp3028.c b/drivers/media/dvb/mantis/mantis_vp3028.c
new file mode 100644
index 000000000000..4155c838a18a
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3028.c
@@ -0,0 +1,38 @@
+/*
+ Mantis VP-3028 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include "mantis_common.h"
+#include "mantis_vp3028.h"
+
+struct zl10353_config mantis_vp3028_config = {
+ .demod_address = 0x0f,
+};
+
+#define MANTIS_MODEL_NAME "VP-3028"
+#define MANTIS_DEV_TYPE "DVB-T"
+
+struct mantis_hwconfig vp3028_mantis_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_188,
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp3028.h b/drivers/media/dvb/mantis/mantis_vp3028.h
new file mode 100644
index 000000000000..b07be6adc522
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3028.h
@@ -0,0 +1,33 @@
+/*
+ Mantis VP-3028 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP3028_H
+#define __MANTIS_VP3028_H
+
+#include "dvb_frontend.h"
+#include "mantis_common.h"
+#include "zl10353.h"
+
+#define MANTIS_VP_3028_DVB_T 0x0028
+
+extern struct zl10353_config mantis_vp3028_config;
+extern struct mantis_hwconfig vp3028_mantis_config;
+
+#endif /* __MANTIS_VP3028_H */
diff --git a/drivers/media/dvb/mantis/mantis_vp3030.c b/drivers/media/dvb/mantis/mantis_vp3030.c
new file mode 100644
index 000000000000..1f4334214953
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3030.c
@@ -0,0 +1,105 @@
+/*
+ Mantis VP-3030 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+
+#include "dmxdev.h"
+#include "dvbdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+
+#include "zl10353.h"
+#include "tda665x.h"
+#include "mantis_common.h"
+#include "mantis_ioc.h"
+#include "mantis_dvb.h"
+#include "mantis_vp3030.h"
+
+struct zl10353_config mantis_vp3030_config = {
+ .demod_address = 0x0f,
+};
+
+struct tda665x_config env57h12d5_config = {
+ .name = "ENV57H12D5 (ET-50DT)",
+ .addr = 0x60,
+ .frequency_min = 47000000,
+ .frequency_max = 862000000,
+ .frequency_offst = 3616667,
+ .ref_multiplier = 6, /* 1/6 MHz */
+ .ref_divider = 100000, /* 1/6 MHz */
+};
+
+#define MANTIS_MODEL_NAME "VP-3030"
+#define MANTIS_DEV_TYPE "DVB-T"
+
+
+static int vp3030_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe)
+{
+ struct i2c_adapter *adapter = &mantis->adapter;
+ struct mantis_hwconfig *config = mantis->hwconfig;
+ int err = 0;
+
+ gpio_set_bits(mantis, config->reset, 0);
+ msleep(100);
+ err = mantis_frontend_power(mantis, POWER_ON);
+ msleep(100);
+ gpio_set_bits(mantis, config->reset, 1);
+
+ if (err == 0) {
+ msleep(250);
+ dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)");
+ fe = zl10353_attach(&mantis_vp3030_config, adapter);
+
+ if (!fe)
+ return -1;
+
+ tda665x_attach(fe, &env57h12d5_config, adapter);
+ } else {
+ dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>",
+ adapter->name,
+ err);
+
+ return -EIO;
+
+ }
+ mantis->fe = fe;
+ dprintk(MANTIS_ERROR, 1, "Done!");
+
+ return 0;
+}
+
+struct mantis_hwconfig vp3030_config = {
+ .model_name = MANTIS_MODEL_NAME,
+ .dev_type = MANTIS_DEV_TYPE,
+ .ts_size = MANTIS_TS_188,
+
+ .baud_rate = MANTIS_BAUD_9600,
+ .parity = MANTIS_PARITY_NONE,
+ .bytes = 0,
+
+ .frontend_init = vp3030_frontend_init,
+ .power = GPIF_A12,
+ .reset = GPIF_A13,
+
+ .i2c_mode = MANTIS_BYTE_MODE
+};
diff --git a/drivers/media/dvb/mantis/mantis_vp3030.h b/drivers/media/dvb/mantis/mantis_vp3030.h
new file mode 100644
index 000000000000..5f12c4266277
--- /dev/null
+++ b/drivers/media/dvb/mantis/mantis_vp3030.h
@@ -0,0 +1,30 @@
+/*
+ Mantis VP-3030 driver
+
+ Copyright (C) Manu Abraham (abraham.manu@gmail.com)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*/
+
+#ifndef __MANTIS_VP3030_H
+#define __MANTIS_VP3030_H
+
+#include "mantis_common.h"
+
+#define MANTIS_VP_3030_DVB_T 0x0024
+
+extern struct mantis_hwconfig vp3030_config;
+
+#endif /* __MANTIS_VP3030_H */
diff --git a/drivers/media/dvb/siano/sms-cards.c b/drivers/media/dvb/siano/sms-cards.c
index 1067b22eb0c6..cff77e2eb557 100644
--- a/drivers/media/dvb/siano/sms-cards.c
+++ b/drivers/media/dvb/siano/sms-cards.c
@@ -62,6 +62,7 @@ static struct sms_board sms_boards[] = {
[SMS1XXX_BOARD_HAUPPAUGE_WINDHAM] = {
.name = "Hauppauge WinTV MiniStick",
.type = SMS_NOVA_B0,
+ .fw[DEVICE_MODE_ISDBT_BDA] = "sms1xxx-hcw-55xxx-isdbt-02.fw",
.fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw",
.board_cfg.leds_power = 26,
.board_cfg.led0 = 27,
diff --git a/drivers/media/dvb/siano/smscoreapi.h b/drivers/media/dvb/siano/smscoreapi.h
index eec18aaf5512..8ecadecaa9d0 100644
--- a/drivers/media/dvb/siano/smscoreapi.h
+++ b/drivers/media/dvb/siano/smscoreapi.h
@@ -212,6 +212,8 @@ struct smscore_device_t {
#define MSG_SMS_DAB_CHANNEL 607
#define MSG_SMS_GET_PID_FILTER_LIST_REQ 608
#define MSG_SMS_GET_PID_FILTER_LIST_RES 609
+#define MSG_SMS_GET_STATISTICS_RES 616
+#define MSG_SMS_GET_STATISTICS_REQ 615
#define MSG_SMS_HO_PER_SLICES_IND 630
#define MSG_SMS_SET_ANTENNA_CONFIG_REQ 651
#define MSG_SMS_SET_ANTENNA_CONFIG_RES 652
@@ -339,7 +341,7 @@ struct SmsFirmware_ST {
/* Statistics information returned as response for
* SmsHostApiGetStatistics_Req */
-struct SMSHOSTLIB_STATISTICS_S {
+struct SMSHOSTLIB_STATISTICS_ST {
u32 Reserved; /* Reserved */
/* Common parameters */
@@ -424,6 +426,79 @@ struct SMSHOSTLIB_STATISTICS_S {
u32 ReservedFields[10]; /* Reserved */
};
+struct SmsMsgStatisticsInfo_ST {
+ u32 RequestResult;
+
+ struct SMSHOSTLIB_STATISTICS_ST Stat;
+
+ /* Split the calc of the SNR in DAB */
+ u32 Signal; /* dB */
+ u32 Noise; /* dB */
+
+};
+
+struct SMSHOSTLIB_ISDBT_LAYER_STAT_ST {
+ /* Per-layer information */
+ u32 CodeRate; /* Code Rate from SMSHOSTLIB_CODE_RATE_ET,
+ * 255 means layer does not exist */
+ u32 Constellation; /* Constellation from SMSHOSTLIB_CONSTELLATION_ET,
+ * 255 means layer does not exist */
+ u32 BER; /* Post Viterbi BER [1E-5], 0xFFFFFFFF indicate N/A */
+ u32 BERErrorCount; /* Post Viterbi Error Bits Count */
+ u32 BERBitCount; /* Post Viterbi Total Bits Count */
+ u32 PreBER; /* Pre Viterbi BER [1E-5], 0xFFFFFFFF indicate N/A */
+ u32 TS_PER; /* Transport stream PER [%], 0xFFFFFFFF indicate N/A */
+ u32 ErrorTSPackets; /* Number of erroneous transport-stream packets */
+ u32 TotalTSPackets; /* Total number of transport-stream packets */
+ u32 TILdepthI; /* Time interleaver depth I parameter,
+ * 255 means layer does not exist */
+ u32 NumberOfSegments; /* Number of segments in layer A,
+ * 255 means layer does not exist */
+ u32 TMCCErrors; /* TMCC errors */
+};
+
+struct SMSHOSTLIB_STATISTICS_ISDBT_ST {
+ u32 StatisticsType; /* Enumerator identifying the type of the
+ * structure. Values are the same as
+ * SMSHOSTLIB_DEVICE_MODES_E
+ *
+ * This field MUST always be first in any
+ * statistics structure */
+
+ u32 FullSize; /* Total size of the structure returned by the modem.
+ * If the size requested by the host is smaller than
+ * FullSize, the struct will be truncated */
+
+ /* Common parameters */
+ u32 IsRfLocked; /* 0 - not locked, 1 - locked */
+ u32 IsDemodLocked; /* 0 - not locked, 1 - locked */
+ u32 IsExternalLNAOn; /* 0 - external LNA off, 1 - external LNA on */
+
+ /* Reception quality */
+ s32 SNR; /* dB */
+ s32 RSSI; /* dBm */
+ s32 InBandPwr; /* In band power in dBM */
+ s32 CarrierOffset; /* Carrier Offset in Hz */
+
+ /* Transmission parameters */
+ u32 Frequency; /* Frequency in Hz */
+ u32 Bandwidth; /* Bandwidth in MHz */
+ u32 TransmissionMode; /* ISDB-T transmission mode */
+ u32 ModemState; /* 0 - Acquisition, 1 - Locked */
+ u32 GuardInterval; /* Guard Interval, 1 divided by value */
+ u32 SystemType; /* ISDB-T system type (ISDB-T / ISDB-Tsb) */
+ u32 PartialReception; /* TRUE - partial reception, FALSE otherwise */
+ u32 NumOfLayers; /* Number of ISDB-T layers in the network */
+
+ /* Per-layer information */
+ /* Layers A, B and C */
+ struct SMSHOSTLIB_ISDBT_LAYER_STAT_ST LayerInfo[3];
+ /* Per-layer statistics, see SMSHOSTLIB_ISDBT_LAYER_STAT_ST */
+
+ /* Interface information */
+ u32 SmsToHostTxErrors; /* Total number of transmission errors. */
+};
+
struct PID_STATISTICS_DATA_S {
struct PID_BURST_S {
u32 size;
diff --git a/drivers/media/dvb/siano/smsdvb.c b/drivers/media/dvb/siano/smsdvb.c
index 68bf9fbd8fed..5f3939821ca3 100644
--- a/drivers/media/dvb/siano/smsdvb.c
+++ b/drivers/media/dvb/siano/smsdvb.c
@@ -116,6 +116,118 @@ static void sms_board_dvb3_event(struct smsdvb_client_t *client,
}
}
+
+static void smsdvb_update_dvb_stats(struct RECEPTION_STATISTICS_S *pReceptionData,
+ struct SMSHOSTLIB_STATISTICS_ST *p)
+{
+ if (sms_dbg & 2) {
+ printk(KERN_DEBUG "Reserved = %d", p->Reserved);
+ printk(KERN_DEBUG "IsRfLocked = %d", p->IsRfLocked);
+ printk(KERN_DEBUG "IsDemodLocked = %d", p->IsDemodLocked);
+ printk(KERN_DEBUG "IsExternalLNAOn = %d", p->IsExternalLNAOn);
+ printk(KERN_DEBUG "SNR = %d", p->SNR);
+ printk(KERN_DEBUG "BER = %d", p->BER);
+ printk(KERN_DEBUG "FIB_CRC = %d", p->FIB_CRC);
+ printk(KERN_DEBUG "TS_PER = %d", p->TS_PER);
+ printk(KERN_DEBUG "MFER = %d", p->MFER);
+ printk(KERN_DEBUG "RSSI = %d", p->RSSI);
+ printk(KERN_DEBUG "InBandPwr = %d", p->InBandPwr);
+ printk(KERN_DEBUG "CarrierOffset = %d", p->CarrierOffset);
+ printk(KERN_DEBUG "Frequency = %d", p->Frequency);
+ printk(KERN_DEBUG "Bandwidth = %d", p->Bandwidth);
+ printk(KERN_DEBUG "TransmissionMode = %d", p->TransmissionMode);
+ printk(KERN_DEBUG "ModemState = %d", p->ModemState);
+ printk(KERN_DEBUG "GuardInterval = %d", p->GuardInterval);
+ printk(KERN_DEBUG "CodeRate = %d", p->CodeRate);
+ printk(KERN_DEBUG "LPCodeRate = %d", p->LPCodeRate);
+ printk(KERN_DEBUG "Hierarchy = %d", p->Hierarchy);
+ printk(KERN_DEBUG "Constellation = %d", p->Constellation);
+ printk(KERN_DEBUG "BurstSize = %d", p->BurstSize);
+ printk(KERN_DEBUG "BurstDuration = %d", p->BurstDuration);
+ printk(KERN_DEBUG "BurstCycleTime = %d", p->BurstCycleTime);
+ printk(KERN_DEBUG "CalculatedBurstCycleTime = %d", p->CalculatedBurstCycleTime);
+ printk(KERN_DEBUG "NumOfRows = %d", p->NumOfRows);
+ printk(KERN_DEBUG "NumOfPaddCols = %d", p->NumOfPaddCols);
+ printk(KERN_DEBUG "NumOfPunctCols = %d", p->NumOfPunctCols);
+ printk(KERN_DEBUG "ErrorTSPackets = %d", p->ErrorTSPackets);
+ printk(KERN_DEBUG "TotalTSPackets = %d", p->TotalTSPackets);
+ printk(KERN_DEBUG "NumOfValidMpeTlbs = %d", p->NumOfValidMpeTlbs);
+ printk(KERN_DEBUG "NumOfInvalidMpeTlbs = %d", p->NumOfInvalidMpeTlbs);
+ printk(KERN_DEBUG "NumOfCorrectedMpeTlbs = %d", p->NumOfCorrectedMpeTlbs);
+ printk(KERN_DEBUG "BERErrorCount = %d", p->BERErrorCount);
+ printk(KERN_DEBUG "BERBitCount = %d", p->BERBitCount);
+ printk(KERN_DEBUG "SmsToHostTxErrors = %d", p->SmsToHostTxErrors);
+ printk(KERN_DEBUG "PreBER = %d", p->PreBER);
+ printk(KERN_DEBUG "CellId = %d", p->CellId);
+ printk(KERN_DEBUG "DvbhSrvIndHP = %d", p->DvbhSrvIndHP);
+ printk(KERN_DEBUG "DvbhSrvIndLP = %d", p->DvbhSrvIndLP);
+ printk(KERN_DEBUG "NumMPEReceived = %d", p->NumMPEReceived);
+ }
+
+ pReceptionData->IsDemodLocked = p->IsDemodLocked;
+
+ pReceptionData->SNR = p->SNR;
+ pReceptionData->BER = p->BER;
+ pReceptionData->BERErrorCount = p->BERErrorCount;
+ pReceptionData->InBandPwr = p->InBandPwr;
+ pReceptionData->ErrorTSPackets = p->ErrorTSPackets;
+};
+
+
+static void smsdvb_update_isdbt_stats(struct RECEPTION_STATISTICS_S *pReceptionData,
+ struct SMSHOSTLIB_STATISTICS_ISDBT_ST *p)
+{
+ int i;
+
+ if (sms_dbg & 2) {
+ printk(KERN_DEBUG "IsRfLocked = %d", p->IsRfLocked);
+ printk(KERN_DEBUG "IsDemodLocked = %d", p->IsDemodLocked);
+ printk(KERN_DEBUG "IsExternalLNAOn = %d", p->IsExternalLNAOn);
+ printk(KERN_DEBUG "SNR = %d", p->SNR);
+ printk(KERN_DEBUG "RSSI = %d", p->RSSI);
+ printk(KERN_DEBUG "InBandPwr = %d", p->InBandPwr);
+ printk(KERN_DEBUG "CarrierOffset = %d", p->CarrierOffset);
+ printk(KERN_DEBUG "Frequency = %d", p->Frequency);
+ printk(KERN_DEBUG "Bandwidth = %d", p->Bandwidth);
+ printk(KERN_DEBUG "TransmissionMode = %d", p->TransmissionMode);
+ printk(KERN_DEBUG "ModemState = %d", p->ModemState);
+ printk(KERN_DEBUG "GuardInterval = %d", p->GuardInterval);
+ printk(KERN_DEBUG "SystemType = %d", p->SystemType);
+ printk(KERN_DEBUG "PartialReception = %d", p->PartialReception);
+ printk(KERN_DEBUG "NumOfLayers = %d", p->NumOfLayers);
+ printk(KERN_DEBUG "SmsToHostTxErrors = %d", p->SmsToHostTxErrors);
+
+ for (i = 0; i < 3; i++) {
+ printk(KERN_DEBUG "%d: CodeRate = %d", i, p->LayerInfo[i].CodeRate);
+ printk(KERN_DEBUG "%d: Constellation = %d", i, p->LayerInfo[i].Constellation);
+ printk(KERN_DEBUG "%d: BER = %d", i, p->LayerInfo[i].BER);
+ printk(KERN_DEBUG "%d: BERErrorCount = %d", i, p->LayerInfo[i].BERErrorCount);
+ printk(KERN_DEBUG "%d: BERBitCount = %d", i, p->LayerInfo[i].BERBitCount);
+ printk(KERN_DEBUG "%d: PreBER = %d", i, p->LayerInfo[i].PreBER);
+ printk(KERN_DEBUG "%d: TS_PER = %d", i, p->LayerInfo[i].TS_PER);
+ printk(KERN_DEBUG "%d: ErrorTSPackets = %d", i, p->LayerInfo[i].ErrorTSPackets);
+ printk(KERN_DEBUG "%d: TotalTSPackets = %d", i, p->LayerInfo[i].TotalTSPackets);
+ printk(KERN_DEBUG "%d: TILdepthI = %d", i, p->LayerInfo[i].TILdepthI);
+ printk(KERN_DEBUG "%d: NumberOfSegments = %d", i, p->LayerInfo[i].NumberOfSegments);
+ printk(KERN_DEBUG "%d: TMCCErrors = %d", i, p->LayerInfo[i].TMCCErrors);
+ }
+ }
+
+ pReceptionData->IsDemodLocked = p->IsDemodLocked;
+
+ pReceptionData->SNR = p->SNR;
+ pReceptionData->InBandPwr = p->InBandPwr;
+
+ pReceptionData->ErrorTSPackets = 0;
+ pReceptionData->BER = 0;
+ pReceptionData->BERErrorCount = 0;
+ for (i = 0; i < 3; i++) {
+ pReceptionData->BER += p->LayerInfo[i].BER;
+ pReceptionData->BERErrorCount += p->LayerInfo[i].BERErrorCount;
+ pReceptionData->ErrorTSPackets += p->LayerInfo[i].ErrorTSPackets;
+ }
+}
+
static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
{
struct smsdvb_client_t *client = (struct smsdvb_client_t *) context;
@@ -134,6 +246,7 @@ static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
break;
case MSG_SMS_RF_TUNE_RES:
+ case MSG_SMS_ISDBT_TUNE_RES:
complete(&client->tune_done);
break;
@@ -217,6 +330,40 @@ static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
is_status_update = true;
break;
}
+ case MSG_SMS_GET_STATISTICS_RES: {
+ union {
+ struct SMSHOSTLIB_STATISTICS_ISDBT_ST isdbt;
+ struct SmsMsgStatisticsInfo_ST dvb;
+ } *p = (void *) (phdr + 1);
+ struct RECEPTION_STATISTICS_S *pReceptionData =
+ &client->sms_stat_dvb.ReceptionData;
+
+ sms_info("MSG_SMS_GET_STATISTICS_RES");
+
+ is_status_update = true;
+
+ switch (smscore_get_device_mode(client->coredev)) {
+ case DEVICE_MODE_ISDBT:
+ case DEVICE_MODE_ISDBT_BDA:
+ smsdvb_update_isdbt_stats(pReceptionData, &p->isdbt);
+ break;
+ default:
+ smsdvb_update_dvb_stats(pReceptionData, &p->dvb.Stat);
+ }
+ if (!pReceptionData->IsDemodLocked) {
+ pReceptionData->SNR = 0;
+ pReceptionData->BER = 0;
+ pReceptionData->BERErrorCount = 0;
+ pReceptionData->InBandPwr = 0;
+ pReceptionData->ErrorTSPackets = 0;
+ }
+
+ complete(&client->tune_done);
+ break;
+ }
+ default:
+ sms_info("Unhandled message %d", phdr->msgType);
+
}
smscore_putbuffer(client->coredev, cb);
@@ -233,10 +380,10 @@ static int smsdvb_onresponse(void *context, struct smscore_buffer_t *cb)
DVB3_EVENT_UNC_ERR);
} else {
- /*client->fe_status =
- (phdr->msgType == MSG_SMS_NO_SIGNAL_IND) ?
- 0 : FE_HAS_SIGNAL;*/
- client->fe_status = 0;
+ if (client->sms_stat_dvb.ReceptionData.IsRfLocked)
+ client->fe_status = FE_HAS_SIGNAL | FE_HAS_CARRIER;
+ else
+ client->fe_status = 0;
sms_board_dvb3_event(client, DVB3_EVENT_FE_UNLOCK);
}
}
@@ -325,6 +472,20 @@ static int smsdvb_sendrequest_and_wait(struct smsdvb_client_t *client,
0 : -ETIME;
}
+static int smsdvb_send_statistics_request(struct smsdvb_client_t *client)
+{
+ int rc;
+ struct SmsMsgHdr_ST Msg = { MSG_SMS_GET_STATISTICS_REQ,
+ DVBT_BDA_CONTROL_MSG_ID,
+ HIF_TASK,
+ sizeof(struct SmsMsgHdr_ST), 0 };
+
+ rc = smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
+ &client->tune_done);
+
+ return rc;
+}
+
static inline int led_feedback(struct smsdvb_client_t *client)
{
if (client->fe_status & FE_HAS_LOCK)
@@ -337,33 +498,43 @@ static inline int led_feedback(struct smsdvb_client_t *client)
static int smsdvb_read_status(struct dvb_frontend *fe, fe_status_t *stat)
{
+ int rc;
struct smsdvb_client_t *client;
client = container_of(fe, struct smsdvb_client_t, frontend);
+ rc = smsdvb_send_statistics_request(client);
+
*stat = client->fe_status;
led_feedback(client);
- return 0;
+ return rc;
}
static int smsdvb_read_ber(struct dvb_frontend *fe, u32 *ber)
{
+ int rc;
struct smsdvb_client_t *client;
client = container_of(fe, struct smsdvb_client_t, frontend);
+ rc = smsdvb_send_statistics_request(client);
+
*ber = client->sms_stat_dvb.ReceptionData.BER;
led_feedback(client);
- return 0;
+ return rc;
}
static int smsdvb_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
+ int rc;
+
struct smsdvb_client_t *client;
client = container_of(fe, struct smsdvb_client_t, frontend);
+ rc = smsdvb_send_statistics_request(client);
+
if (client->sms_stat_dvb.ReceptionData.InBandPwr < -95)
*strength = 0;
else if (client->sms_stat_dvb.ReceptionData.InBandPwr > -29)
@@ -375,31 +546,37 @@ static int smsdvb_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
led_feedback(client);
- return 0;
+ return rc;
}
static int smsdvb_read_snr(struct dvb_frontend *fe, u16 *snr)
{
+ int rc;
struct smsdvb_client_t *client;
client = container_of(fe, struct smsdvb_client_t, frontend);
+ rc = smsdvb_send_statistics_request(client);
+
*snr = client->sms_stat_dvb.ReceptionData.SNR;
led_feedback(client);
- return 0;
+ return rc;
}
static int smsdvb_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
+ int rc;
struct smsdvb_client_t *client;
client = container_of(fe, struct smsdvb_client_t, frontend);
+ rc = smsdvb_send_statistics_request(client);
+
*ucblocks = client->sms_stat_dvb.ReceptionData.ErrorTSPackets;
led_feedback(client);
- return 0;
+ return rc;
}
static int smsdvb_get_tune_settings(struct dvb_frontend *fe,
@@ -413,9 +590,10 @@ static int smsdvb_get_tune_settings(struct dvb_frontend *fe,
return 0;
}
-static int smsdvb_set_frontend(struct dvb_frontend *fe,
- struct dvb_frontend_parameters *fep)
+static int smsdvb_dvbt_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct smsdvb_client_t *client =
container_of(fe, struct smsdvb_client_t, frontend);
@@ -429,24 +607,33 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe,
client->fe_status = FE_HAS_SIGNAL;
client->event_fe_state = -1;
client->event_unc_state = -1;
+ fe->dtv_property_cache.delivery_system = SYS_DVBT;
Msg.Msg.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
Msg.Msg.msgDstId = HIF_TASK;
Msg.Msg.msgFlags = 0;
Msg.Msg.msgType = MSG_SMS_RF_TUNE_REQ;
Msg.Msg.msgLength = sizeof(Msg);
- Msg.Data[0] = fep->frequency;
+ Msg.Data[0] = c->frequency;
Msg.Data[2] = 12000000;
- sms_debug("freq %d band %d",
- fep->frequency, fep->u.ofdm.bandwidth);
+ sms_info("%s: freq %d band %d", __func__, c->frequency,
+ c->bandwidth_hz);
- switch (fep->u.ofdm.bandwidth) {
- case BANDWIDTH_8_MHZ: Msg.Data[1] = BW_8_MHZ; break;
- case BANDWIDTH_7_MHZ: Msg.Data[1] = BW_7_MHZ; break;
- case BANDWIDTH_6_MHZ: Msg.Data[1] = BW_6_MHZ; break;
- case BANDWIDTH_AUTO: return -EOPNOTSUPP;
- default: return -EINVAL;
+ switch (c->bandwidth_hz / 1000000) {
+ case 8:
+ Msg.Data[1] = BW_8_MHZ;
+ break;
+ case 7:
+ Msg.Data[1] = BW_7_MHZ;
+ break;
+ case 6:
+ Msg.Data[1] = BW_6_MHZ;
+ break;
+ case 0:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
}
/* Disable LNA, if any. An error is returned if no LNA is present */
ret = sms_board_lna_control(client->coredev, 0);
@@ -470,6 +657,90 @@ static int smsdvb_set_frontend(struct dvb_frontend *fe,
&client->tune_done);
}
+static int smsdvb_isdbt_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct dtv_frontend_properties *c = &fe->dtv_property_cache;
+ struct smsdvb_client_t *client =
+ container_of(fe, struct smsdvb_client_t, frontend);
+
+ struct {
+ struct SmsMsgHdr_ST Msg;
+ u32 Data[4];
+ } Msg;
+
+ fe->dtv_property_cache.delivery_system = SYS_ISDBT;
+
+ Msg.Msg.msgSrcId = DVBT_BDA_CONTROL_MSG_ID;
+ Msg.Msg.msgDstId = HIF_TASK;
+ Msg.Msg.msgFlags = 0;
+ Msg.Msg.msgType = MSG_SMS_ISDBT_TUNE_REQ;
+ Msg.Msg.msgLength = sizeof(Msg);
+
+ if (c->isdbt_sb_segment_idx == -1)
+ c->isdbt_sb_segment_idx = 0;
+
+ switch (c->isdbt_sb_segment_count) {
+ case 3:
+ Msg.Data[1] = BW_ISDBT_3SEG;
+ break;
+ case 1:
+ Msg.Data[1] = BW_ISDBT_1SEG;
+ break;
+ case 0: /* AUTO */
+ switch (c->bandwidth_hz / 1000000) {
+ case 8:
+ case 7:
+ c->isdbt_sb_segment_count = 3;
+ Msg.Data[1] = BW_ISDBT_3SEG;
+ break;
+ case 6:
+ c->isdbt_sb_segment_count = 1;
+ Msg.Data[1] = BW_ISDBT_1SEG;
+ break;
+ default: /* Assumes 6 MHZ bw */
+ c->isdbt_sb_segment_count = 1;
+ c->bandwidth_hz = 6000;
+ Msg.Data[1] = BW_ISDBT_1SEG;
+ break;
+ }
+ break;
+ default:
+ sms_info("Segment count %d not supported", c->isdbt_sb_segment_count);
+ return -EINVAL;
+ }
+
+ Msg.Data[0] = c->frequency;
+ Msg.Data[2] = 12000000;
+ Msg.Data[3] = c->isdbt_sb_segment_idx;
+
+ sms_info("%s: freq %d segwidth %d segindex %d\n", __func__,
+ c->frequency, c->isdbt_sb_segment_count,
+ c->isdbt_sb_segment_idx);
+
+ return smsdvb_sendrequest_and_wait(client, &Msg, sizeof(Msg),
+ &client->tune_done);
+}
+
+static int smsdvb_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *fep)
+{
+ struct smsdvb_client_t *client =
+ container_of(fe, struct smsdvb_client_t, frontend);
+ struct smscore_device_t *coredev = client->coredev;
+
+ switch (smscore_get_device_mode(coredev)) {
+ case DEVICE_MODE_DVBT:
+ case DEVICE_MODE_DVBT_BDA:
+ return smsdvb_dvbt_set_frontend(fe, fep);
+ case DEVICE_MODE_ISDBT:
+ case DEVICE_MODE_ISDBT_BDA:
+ return smsdvb_isdbt_set_frontend(fe, fep);
+ default:
+ return -EINVAL;
+ }
+}
+
static int smsdvb_get_frontend(struct dvb_frontend *fe,
struct dvb_frontend_parameters *fep)
{
@@ -557,13 +828,6 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
/* device removal handled by onremove callback */
if (!arrival)
return 0;
-
- if (smscore_get_device_mode(coredev) != DEVICE_MODE_DVBT_BDA) {
- sms_err("SMS Device mode is not set for "
- "DVB operation.");
- return 0;
- }
-
client = kzalloc(sizeof(struct smsdvb_client_t), GFP_KERNEL);
if (!client) {
sms_err("kmalloc() failed");
diff --git a/drivers/media/dvb/ttpci/budget-ci.c b/drivers/media/dvb/ttpci/budget-ci.c
index 9782e0593733..49c2a817a06f 100644
--- a/drivers/media/dvb/ttpci/budget-ci.c
+++ b/drivers/media/dvb/ttpci/budget-ci.c
@@ -254,7 +254,7 @@ static int msp430_ir_init(struct budget_ci *budget_ci)
budget_ci->ir.timer_keyup.function = msp430_ir_keyup;
budget_ci->ir.timer_keyup.data = (unsigned long) &budget_ci->ir;
budget_ci->ir.last_raw = 0xffff; /* An impossible value */
- error = ir_input_register(input_dev, ir_codes);
+ error = ir_input_register(input_dev, ir_codes, NULL);
if (error) {
printk(KERN_ERR "budget_ci: could not init driver for IR device (code %d)\n", error);
return error;
diff --git a/drivers/media/video/bt8xx/bttv-input.c b/drivers/media/video/bt8xx/bttv-input.c
index 277a092e1214..b320dbd635aa 100644
--- a/drivers/media/video/bt8xx/bttv-input.c
+++ b/drivers/media/video/bt8xx/bttv-input.c
@@ -247,7 +247,7 @@ int bttv_input_init(struct bttv *btv)
struct card_ir *ir;
struct ir_scancode_table *ir_codes = NULL;
struct input_dev *input_dev;
- int ir_type = IR_TYPE_OTHER;
+ u64 ir_type = IR_TYPE_OTHER;
int err = -ENOMEM;
if (!btv->has_remote)
@@ -389,7 +389,7 @@ int bttv_input_init(struct bttv *btv)
bttv_ir_start(btv, ir);
/* all done */
- err = ir_input_register(btv->remote->dev, ir_codes);
+ err = ir_input_register(btv->remote->dev, ir_codes, NULL);
if (err)
goto err_out_stop;
diff --git a/drivers/media/video/cx231xx/cx231xx-input.c b/drivers/media/video/cx231xx/cx231xx-input.c
index 15826f98b688..c5771db3bfce 100644
--- a/drivers/media/video/cx231xx/cx231xx-input.c
+++ b/drivers/media/video/cx231xx/cx231xx-input.c
@@ -216,7 +216,7 @@ int cx231xx_ir_init(struct cx231xx *dev)
cx231xx_ir_start(ir);
/* all done */
- err = ir_input_register(ir->input, dev->board.ir_codes);
+ err = ir_input_register(ir->input, dev->board.ir_codes, NULL);
if (err)
goto err_out_stop;
diff --git a/drivers/media/video/cx23885/cx23885-dvb.c b/drivers/media/video/cx23885/cx23885-dvb.c
index e45d2df08138..ed99e93a5ac6 100644
--- a/drivers/media/video/cx23885/cx23885-dvb.c
+++ b/drivers/media/video/cx23885/cx23885-dvb.c
@@ -542,6 +542,9 @@ static struct atbm8830_config mygica_x8558pro_atbm8830_cfg1 = {
.osc_clk_freq = 30400, /* in kHz */
.if_freq = 0, /* zero IF */
.zif_swap_iq = 1,
+ .agc_min = 0x2E,
+ .agc_max = 0xFF,
+ .agc_hold_loop = 0,
};
static struct max2165_config mygic_x8558pro_max2165_cfg1 = {
@@ -558,6 +561,9 @@ static struct atbm8830_config mygica_x8558pro_atbm8830_cfg2 = {
.osc_clk_freq = 30400, /* in kHz */
.if_freq = 0, /* zero IF */
.zif_swap_iq = 1,
+ .agc_min = 0x2E,
+ .agc_max = 0xFF,
+ .agc_hold_loop = 0,
};
static struct max2165_config mygic_x8558pro_max2165_cfg2 = {
diff --git a/drivers/media/video/cx23885/cx23885-input.c b/drivers/media/video/cx23885/cx23885-input.c
index 768eec92ccf9..9c6620f86dca 100644
--- a/drivers/media/video/cx23885/cx23885-input.c
+++ b/drivers/media/video/cx23885/cx23885-input.c
@@ -397,7 +397,7 @@ int cx23885_input_init(struct cx23885_dev *dev)
dev->ir_input = ir;
cx23885_input_ir_start(dev);
- ret = ir_input_register(ir->dev, ir_codes);
+ ret = ir_input_register(ir->dev, ir_codes, NULL);
if (ret)
goto err_out_stop;
diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
index f9fda18b410c..de180d4d5a21 100644
--- a/drivers/media/video/cx88/cx88-input.c
+++ b/drivers/media/video/cx88/cx88-input.c
@@ -192,7 +192,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
struct cx88_IR *ir;
struct input_dev *input_dev;
struct ir_scancode_table *ir_codes = NULL;
- int ir_type = IR_TYPE_OTHER;
+ u64 ir_type = IR_TYPE_OTHER;
int err = -ENOMEM;
ir = kzalloc(sizeof(*ir), GFP_KERNEL);
@@ -383,7 +383,7 @@ int cx88_ir_init(struct cx88_core *core, struct pci_dev *pci)
cx88_ir_start(core, ir);
/* all done */
- err = ir_input_register(ir->input, ir_codes);
+ err = ir_input_register(ir->input, ir_codes, NULL);
if (err)
goto err_out_stop;
diff --git a/drivers/media/video/em28xx/em28xx-input.c b/drivers/media/video/em28xx/em28xx-input.c
index af0d935c29be..69dcf0cc1f1e 100644
--- a/drivers/media/video/em28xx/em28xx-input.c
+++ b/drivers/media/video/em28xx/em28xx-input.c
@@ -75,6 +75,10 @@ struct em28xx_IR {
unsigned int repeat_interval;
int (*get_key)(struct em28xx_IR *, struct em28xx_ir_poll_result *);
+
+ /* IR device properties */
+
+ struct ir_dev_props props;
};
/**********************************************************
@@ -336,35 +340,28 @@ static void em28xx_ir_stop(struct em28xx_IR *ir)
cancel_delayed_work_sync(&ir->work);
}
-int em28xx_ir_init(struct em28xx *dev)
+int em28xx_ir_change_protocol(void *priv, u64 ir_type)
{
- struct em28xx_IR *ir;
- struct input_dev *input_dev;
- u8 ir_config;
- int err = -ENOMEM;
-
- if (dev->board.ir_codes == NULL) {
- /* No remote control support */
- return 0;
- }
-
- ir = kzalloc(sizeof(*ir), GFP_KERNEL);
- input_dev = input_allocate_device();
- if (!ir || !input_dev)
- goto err_out_free;
-
- ir->input = input_dev;
- ir_config = EM2874_IR_RC5;
+ int rc = 0;
+ struct em28xx_IR *ir = priv;
+ struct em28xx *dev = ir->dev;
+ u8 ir_config = EM2874_IR_RC5;
/* Adjust xclk based o IR table for RC5/NEC tables */
- if (dev->board.ir_codes->ir_type == IR_TYPE_RC5) {
+
+ dev->board.ir_codes->ir_type = IR_TYPE_OTHER;
+ if (ir_type == IR_TYPE_RC5) {
dev->board.xclk |= EM28XX_XCLK_IR_RC5_MODE;
ir->full_code = 1;
- } else if (dev->board.ir_codes->ir_type == IR_TYPE_NEC) {
+ } else if (ir_type == IR_TYPE_NEC) {
dev->board.xclk &= ~EM28XX_XCLK_IR_RC5_MODE;
ir_config = EM2874_IR_NEC;
ir->full_code = 1;
- }
+ } else
+ rc = -EINVAL;
+
+ dev->board.ir_codes->ir_type = ir_type;
+
em28xx_write_reg_bits(dev, EM28XX_R0F_XCLK, dev->board.xclk,
EM28XX_XCLK_IR_RC5_MODE);
@@ -380,9 +377,42 @@ int em28xx_ir_init(struct em28xx *dev)
break;
default:
printk("Unrecognized em28xx chip id: IR not supported\n");
- goto err_out_free;
+ rc = -EINVAL;
}
+ return rc;
+}
+
+int em28xx_ir_init(struct em28xx *dev)
+{
+ struct em28xx_IR *ir;
+ struct input_dev *input_dev;
+ int err = -ENOMEM;
+
+ if (dev->board.ir_codes == NULL) {
+ /* No remote control support */
+ return 0;
+ }
+
+ ir = kzalloc(sizeof(*ir), GFP_KERNEL);
+ input_dev = input_allocate_device();
+ if (!ir || !input_dev)
+ goto err_out_free;
+
+ /* record handles to ourself */
+ ir->dev = dev;
+ dev->ir = ir;
+
+ ir->input = input_dev;
+
+ /*
+ * em2874 supports more protocols. For now, let's just announce
+ * the two protocols that were already tested
+ */
+ ir->props.allowed_protos = IR_TYPE_RC5 | IR_TYPE_NEC;
+ ir->props.priv = ir;
+ ir->props.change_protocol = em28xx_ir_change_protocol;
+
/* This is how often we ask the chip for IR information */
ir->polling = 100; /* ms */
@@ -393,6 +423,8 @@ int em28xx_ir_init(struct em28xx *dev)
usb_make_path(dev->udev, ir->phys, sizeof(ir->phys));
strlcat(ir->phys, "/input0", sizeof(ir->phys));
+ /* Set IR protocol */
+ em28xx_ir_change_protocol(ir, dev->board.ir_codes->ir_type);
err = ir_input_init(input_dev, &ir->ir, IR_TYPE_OTHER);
if (err < 0)
goto err_out_free;
@@ -405,14 +437,13 @@ int em28xx_ir_init(struct em28xx *dev)
input_dev->id.product = le16_to_cpu(dev->udev->descriptor.idProduct);
input_dev->dev.parent = &dev->udev->dev;
- /* record handles to ourself */
- ir->dev = dev;
- dev->ir = ir;
+
em28xx_ir_start(ir);
/* all done */
- err = ir_input_register(ir->input, dev->board.ir_codes);
+ err = ir_input_register(ir->input, dev->board.ir_codes,
+ &ir->props);
if (err)
goto err_out_stop;
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index e930a67d526b..bd6214d4ab3b 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -1815,6 +1815,8 @@ static int vidioc_qbuf(struct file *file, void *priv,
/* put the buffer in the 'queued' queue */
i = gspca_dev->fr_q;
gspca_dev->fr_queue[i] = index;
+ if (gspca_dev->fr_i == i)
+ gspca_dev->cur_frame = frame;
gspca_dev->fr_q = (i + 1) % gspca_dev->nframes;
PDEBUG(D_FRAM, "qbuf q:%d i:%d o:%d",
gspca_dev->fr_q,
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 4dbb882c83dc..0a6b8f07a69d 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -1533,7 +1533,7 @@ static void setexposure_96(struct gspca_dev *gspca_dev)
static void setsharpness_96(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
- u8 val;
+ s8 val;
val = sd->sharpness;
if (val < 0) { /* auto */
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index 716df6b15fc5..53e85725f36d 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -1336,6 +1336,7 @@ static const __devinitdata struct usb_device_id device_table[] = {
{USB_DEVICE(0x04fc, 0x5330), BS(SPCA533, 0)},
{USB_DEVICE(0x04fc, 0x5360), BS(SPCA536, 0)},
{USB_DEVICE(0x04fc, 0xffff), BS(SPCA504B, 0)},
+ {USB_DEVICE(0x052b, 0x1507), BS(SPCA533, MegapixV4)},
{USB_DEVICE(0x052b, 0x1513), BS(SPCA533, MegapixV4)},
{USB_DEVICE(0x052b, 0x1803), BS(SPCA533, MegaImageVI)},
{USB_DEVICE(0x0546, 0x3155), BS(SPCA533, 0)},
diff --git a/drivers/media/video/ir-kbd-i2c.c b/drivers/media/video/ir-kbd-i2c.c
index b86e35386cee..094e21dbb14f 100644
--- a/drivers/media/video/ir-kbd-i2c.c
+++ b/drivers/media/video/ir-kbd-i2c.c
@@ -299,7 +299,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
struct ir_scancode_table *ir_codes = NULL;
const char *name = NULL;
- int ir_type = 0;
+ u64 ir_type = 0;
struct IR_i2c *ir;
struct input_dev *input_dev;
struct i2c_adapter *adap = client->adapter;
@@ -446,7 +446,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
input_dev->name = ir->name;
input_dev->phys = ir->phys;
- err = ir_input_register(ir->input, ir->ir_codes);
+ err = ir_input_register(ir->input, ir->ir_codes, NULL);
if (err)
goto err_out_free;
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index f8e985989ca0..a4eaf1b75d70 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -460,7 +460,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
int polling = 0;
int rc5_gpio = 0;
int nec_gpio = 0;
- int ir_type = IR_TYPE_OTHER;
+ u64 ir_type = IR_TYPE_OTHER;
int err;
if (dev->has_remote != SAA7134_REMOTE_GPIO)
@@ -728,7 +728,7 @@ int saa7134_input_init1(struct saa7134_dev *dev)
dev->remote = ir;
saa7134_ir_start(dev, ir);
- err = ir_input_register(ir->dev, ir_codes);
+ err = ir_input_register(ir->dev, ir_codes, NULL);
if (err)
goto err_out_stop;
diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c
index 0469d7a876a8..ec8ef8c5560a 100644
--- a/drivers/media/video/uvc/uvc_ctrl.c
+++ b/drivers/media/video/uvc/uvc_ctrl.c
@@ -1393,7 +1393,7 @@ uvc_ctrl_prune_entity(struct uvc_device *dev, struct uvc_entity *entity)
size = entity->processing.bControlSize;
for (i = 0; i < ARRAY_SIZE(blacklist); ++i) {
- if (!usb_match_id(dev->intf, &blacklist[i].id))
+ if (!usb_match_one_id(dev->intf, &blacklist[i].id))
continue;
if (blacklist[i].index >= 8 * size ||
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c
index 391cccca7ffc..fc7db17afb29 100644
--- a/drivers/media/video/uvc/uvc_driver.c
+++ b/drivers/media/video/uvc/uvc_driver.c
@@ -43,8 +43,9 @@
#define DRIVER_VERSION "v0.1.0"
#endif
+unsigned int uvc_clock_param = CLOCK_MONOTONIC;
unsigned int uvc_no_drop_param;
-static unsigned int uvc_quirks_param;
+static unsigned int uvc_quirks_param = -1;
unsigned int uvc_trace_param;
unsigned int uvc_timeout_param = UVC_CTRL_STREAMING_TIMEOUT;
@@ -59,6 +60,11 @@ static struct uvc_format_desc uvc_fmts[] = {
.fcc = V4L2_PIX_FMT_YUYV,
},
{
+ .name = "YUV 4:2:2 (YUYV)",
+ .guid = UVC_GUID_FORMAT_YUY2_ISIGHT,
+ .fcc = V4L2_PIX_FMT_YUYV,
+ },
+ {
.name = "YUV 4:2:0 (NV12)",
.guid = UVC_GUID_FORMAT_NV12,
.fcc = V4L2_PIX_FMT_NV12,
@@ -1750,7 +1756,8 @@ static int uvc_probe(struct usb_interface *intf,
dev->udev = usb_get_dev(udev);
dev->intf = usb_get_intf(intf);
dev->intfnum = intf->cur_altsetting->desc.bInterfaceNumber;
- dev->quirks = id->driver_info | uvc_quirks_param;
+ dev->quirks = (uvc_quirks_param == -1)
+ ? id->driver_info : uvc_quirks_param;
if (udev->product != NULL)
strlcpy(dev->name, udev->product, sizeof dev->name);
@@ -1773,9 +1780,9 @@ static int uvc_probe(struct usb_interface *intf,
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
- if (uvc_quirks_param != 0) {
- uvc_printk(KERN_INFO, "Forcing device quirks 0x%x by module "
- "parameter for testing purpose.\n", uvc_quirks_param);
+ if (dev->quirks != id->driver_info) {
+ uvc_printk(KERN_INFO, "Forcing device quirks to 0x%x by module "
+ "parameter for testing purpose.\n", dev->quirks);
uvc_printk(KERN_INFO, "Please report required quirks to the "
"linux-uvc-devel mailing list.\n");
}
@@ -1892,6 +1899,45 @@ static int uvc_reset_resume(struct usb_interface *intf)
}
/* ------------------------------------------------------------------------
+ * Module parameters
+ */
+
+static int uvc_clock_param_get(char *buffer, struct kernel_param *kp)
+{
+ if (uvc_clock_param == CLOCK_MONOTONIC)
+ return sprintf(buffer, "CLOCK_MONOTONIC");
+ else
+ return sprintf(buffer, "CLOCK_REALTIME");
+}
+
+static int uvc_clock_param_set(const char *val, struct kernel_param *kp)
+{
+ if (strncasecmp(val, "clock_", strlen("clock_")) == 0)
+ val += strlen("clock_");
+
+ if (strcasecmp(val, "monotonic") == 0)
+ uvc_clock_param = CLOCK_MONOTONIC;
+ else if (strcasecmp(val, "realtime") == 0)
+ uvc_clock_param = CLOCK_REALTIME;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+module_param_call(clock, uvc_clock_param_set, uvc_clock_param_get,
+ &uvc_clock_param, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(clock, "Video buffers timestamp clock");
+module_param_named(nodrop, uvc_no_drop_param, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames");
+module_param_named(quirks, uvc_quirks_param, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(quirks, "Forced device quirks");
+module_param_named(trace, uvc_trace_param, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(trace, "Trace level bitmask");
+module_param_named(timeout, uvc_timeout_param, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(timeout, "Streaming control requests timeout");
+
+/* ------------------------------------------------------------------------
* Driver initialization and cleanup
*/
@@ -2197,15 +2243,6 @@ static void __exit uvc_cleanup(void)
module_init(uvc_init);
module_exit(uvc_cleanup);
-module_param_named(nodrop, uvc_no_drop_param, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(nodrop, "Don't drop incomplete frames");
-module_param_named(quirks, uvc_quirks_param, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(quirks, "Forced device quirks");
-module_param_named(trace, uvc_trace_param, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(trace, "Trace level bitmask");
-module_param_named(timeout, uvc_timeout_param, uint, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(timeout, "Streaming control requests timeout");
-
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index f854698c4061..4a925a31b0e0 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -59,9 +59,9 @@
* returns immediately.
*
* When the buffer is full, the completion handler removes it from the irq
- * queue, marks it as ready (UVC_BUF_STATE_DONE) and wakes its wait queue.
+ * queue, marks it as done (UVC_BUF_STATE_DONE) and wakes its wait queue.
* At that point, any process waiting on the buffer will be woken up. If a
- * process tries to dequeue a buffer after it has been marked ready, the
+ * process tries to dequeue a buffer after it has been marked done, the
* dequeing will succeed immediately.
*
* 2. Buffers are queued, user is waiting on a buffer and the device gets
@@ -201,6 +201,7 @@ static void __uvc_query_buffer(struct uvc_buffer *buf,
break;
case UVC_BUF_STATE_QUEUED:
case UVC_BUF_STATE_ACTIVE:
+ case UVC_BUF_STATE_READY:
v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
break;
case UVC_BUF_STATE_IDLE:
@@ -295,13 +296,15 @@ static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
{
if (nonblocking) {
return (buf->state != UVC_BUF_STATE_QUEUED &&
- buf->state != UVC_BUF_STATE_ACTIVE)
+ buf->state != UVC_BUF_STATE_ACTIVE &&
+ buf->state != UVC_BUF_STATE_READY)
? 0 : -EAGAIN;
}
return wait_event_interruptible(buf->wait,
buf->state != UVC_BUF_STATE_QUEUED &&
- buf->state != UVC_BUF_STATE_ACTIVE);
+ buf->state != UVC_BUF_STATE_ACTIVE &&
+ buf->state != UVC_BUF_STATE_READY);
}
/*
@@ -348,6 +351,7 @@ int uvc_dequeue_buffer(struct uvc_video_queue *queue,
case UVC_BUF_STATE_IDLE:
case UVC_BUF_STATE_QUEUED:
case UVC_BUF_STATE_ACTIVE:
+ case UVC_BUF_STATE_READY:
default:
uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
"(driver bug?).\n", buf->state);
@@ -489,6 +493,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
spin_lock_irqsave(&queue->irqlock, flags);
list_del(&buf->queue);
+ buf->state = UVC_BUF_STATE_DONE;
if (!list_empty(&queue->irqqueue))
nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
queue);
@@ -497,7 +502,6 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
spin_unlock_irqrestore(&queue->irqlock, flags);
buf->buf.sequence = queue->sequence++;
- do_gettimeofday(&buf->buf.timestamp);
wake_up(&buf->wait);
return nextbuf;
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 9a9802830d41..6b0666be370f 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -410,6 +410,8 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
* when the EOF bit is set to force synchronisation on the next packet.
*/
if (buf->state != UVC_BUF_STATE_ACTIVE) {
+ struct timespec ts;
+
if (fid == stream->last_fid) {
uvc_trace(UVC_TRACE_FRAME, "Dropping payload (out of "
"sync).\n");
@@ -419,6 +421,14 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
return -ENODATA;
}
+ if (uvc_clock_param == CLOCK_MONOTONIC)
+ ktime_get_ts(&ts);
+ else
+ ktime_get_real_ts(&ts);
+
+ buf->buf.timestamp.tv_sec = ts.tv_sec;
+ buf->buf.timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+
/* TODO: Handle PTS and SCR. */
buf->state = UVC_BUF_STATE_ACTIVE;
}
@@ -441,7 +451,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
if (fid != stream->last_fid && buf->buf.bytesused != 0) {
uvc_trace(UVC_TRACE_FRAME, "Frame complete (FID bit "
"toggled).\n");
- buf->state = UVC_BUF_STATE_DONE;
+ buf->state = UVC_BUF_STATE_READY;
return -EAGAIN;
}
@@ -470,7 +480,7 @@ static void uvc_video_decode_data(struct uvc_streaming *stream,
/* Complete the current frame if the buffer size was exceeded. */
if (len > maxlen) {
uvc_trace(UVC_TRACE_FRAME, "Frame complete (overflow).\n");
- buf->state = UVC_BUF_STATE_DONE;
+ buf->state = UVC_BUF_STATE_READY;
}
}
@@ -482,7 +492,7 @@ static void uvc_video_decode_end(struct uvc_streaming *stream,
uvc_trace(UVC_TRACE_FRAME, "Frame complete (EOF found).\n");
if (data[0] == len)
uvc_trace(UVC_TRACE_FRAME, "EOF in empty payload.\n");
- buf->state = UVC_BUF_STATE_DONE;
+ buf->state = UVC_BUF_STATE_READY;
if (stream->dev->quirks & UVC_QUIRK_STREAM_NO_FID)
stream->last_fid ^= UVC_STREAM_FID;
}
@@ -568,8 +578,7 @@ static void uvc_video_decode_isoc(struct urb *urb, struct uvc_streaming *stream,
uvc_video_decode_end(stream, buf, mem,
urb->iso_frame_desc[i].actual_length);
- if (buf->state == UVC_BUF_STATE_DONE ||
- buf->state == UVC_BUF_STATE_ERROR)
+ if (buf->state == UVC_BUF_STATE_READY)
buf = uvc_queue_next_buffer(&stream->queue, buf);
}
}
@@ -627,8 +636,7 @@ static void uvc_video_decode_bulk(struct urb *urb, struct uvc_streaming *stream,
if (!stream->bulk.skip_payload && buf != NULL) {
uvc_video_decode_end(stream, buf, stream->bulk.header,
stream->bulk.payload_size);
- if (buf->state == UVC_BUF_STATE_DONE ||
- buf->state == UVC_BUF_STATE_ERROR)
+ if (buf->state == UVC_BUF_STATE_READY)
buf = uvc_queue_next_buffer(&stream->queue,
buf);
}
@@ -669,7 +677,7 @@ static void uvc_video_encode_bulk(struct urb *urb, struct uvc_streaming *stream,
stream->bulk.payload_size == stream->bulk.max_payload_size) {
if (buf->buf.bytesused == stream->queue.buf_used) {
stream->queue.buf_used = 0;
- buf->state = UVC_BUF_STATE_DONE;
+ buf->state = UVC_BUF_STATE_READY;
uvc_queue_next_buffer(&stream->queue, buf);
stream->last_fid ^= UVC_STREAM_FID;
}
@@ -924,10 +932,8 @@ static int uvc_init_video_bulk(struct uvc_streaming *stream,
static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
{
struct usb_interface *intf = stream->intf;
- struct usb_host_interface *alts;
- struct usb_host_endpoint *ep = NULL;
- int intfnum = stream->intfnum;
- unsigned int bandwidth, psize, i;
+ struct usb_host_endpoint *ep;
+ unsigned int i;
int ret;
stream->last_fid = -1;
@@ -936,6 +942,12 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
stream->bulk.payload_size = 0;
if (intf->num_altsetting > 1) {
+ struct usb_host_endpoint *best_ep = NULL;
+ unsigned int best_psize = 3 * 1024;
+ unsigned int bandwidth;
+ unsigned int uninitialized_var(altsetting);
+ int intfnum = stream->intfnum;
+
/* Isochronous endpoint, select the alternate setting. */
bandwidth = stream->ctrl.dwMaxPayloadTransferSize;
@@ -949,6 +961,9 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
}
for (i = 0; i < intf->num_altsetting; ++i) {
+ struct usb_host_interface *alts;
+ unsigned int psize;
+
alts = &intf->altsetting[i];
ep = uvc_find_endpoint(alts,
stream->header.bEndpointAddress);
@@ -958,21 +973,27 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
/* Check if the bandwidth is high enough. */
psize = le16_to_cpu(ep->desc.wMaxPacketSize);
psize = (psize & 0x07ff) * (1 + ((psize >> 11) & 3));
- if (psize >= bandwidth)
- break;
+ if (psize >= bandwidth && psize <= best_psize) {
+ altsetting = i;
+ best_psize = psize;
+ best_ep = ep;
+ }
}
- if (i >= intf->num_altsetting) {
+ if (best_ep == NULL) {
uvc_trace(UVC_TRACE_VIDEO, "No fast enough alt setting "
"for requested bandwidth.\n");
return -EIO;
}
- ret = usb_set_interface(stream->dev->udev, intfnum, i);
+ uvc_trace(UVC_TRACE_VIDEO, "Selecting alternate setting %u "
+ "(%u B/frame bandwidth).\n", altsetting, best_psize);
+
+ ret = usb_set_interface(stream->dev->udev, intfnum, altsetting);
if (ret < 0)
return ret;
- ret = uvc_init_video_isoc(stream, ep, gfp_flags);
+ ret = uvc_init_video_isoc(stream, best_ep, gfp_flags);
} else {
/* Bulk endpoint, proceed to URB initialization. */
ep = uvc_find_endpoint(&intf->altsetting[0],
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h
index 7ec9a04ced50..6aa9b2c2b685 100644
--- a/drivers/media/video/uvc/uvcvideo.h
+++ b/drivers/media/video/uvc/uvcvideo.h
@@ -113,6 +113,9 @@ struct uvc_xu_control {
#define UVC_GUID_FORMAT_YUY2 \
{ 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
+#define UVC_GUID_FORMAT_YUY2_ISIGHT \
+ { 'Y', 'U', 'Y', '2', 0x00, 0x00, 0x10, 0x00, \
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x38, 0x9b, 0x71}
#define UVC_GUID_FORMAT_NV12 \
{ 'N', 'V', '1', '2', 0x00, 0x00, 0x10, 0x00, \
0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71}
@@ -365,8 +368,9 @@ enum uvc_buffer_state {
UVC_BUF_STATE_IDLE = 0,
UVC_BUF_STATE_QUEUED = 1,
UVC_BUF_STATE_ACTIVE = 2,
- UVC_BUF_STATE_DONE = 3,
- UVC_BUF_STATE_ERROR = 4,
+ UVC_BUF_STATE_READY = 3,
+ UVC_BUF_STATE_DONE = 4,
+ UVC_BUF_STATE_ERROR = 5,
};
struct uvc_buffer {
@@ -532,6 +536,7 @@ struct uvc_driver {
#define UVC_WARN_MINMAX 0
#define UVC_WARN_PROBE_DEF 1
+extern unsigned int uvc_clock_param;
extern unsigned int uvc_no_drop_param;
extern unsigned int uvc_trace_param;
extern unsigned int uvc_timeout_param;
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 7045c45da9b1..949a648f8e2e 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -111,10 +111,7 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
break;
case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */
- seq_printf(seq,
- "LAN-48 MAC address @ %02X:%02X:%02X:%02X:%02X:%02X",
- serialno[2], serialno[3],
- serialno[4], serialno[5], serialno[6], serialno[7]);
+ seq_printf(seq, "LAN-48 MAC address @ %pM", &serialno[2]);
break;
case I2O_SNFORMAT_WAN: /* WAN MAC Address */
@@ -126,10 +123,8 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */
/* FIXME: Figure out what a LAN-64 address really looks like?? */
seq_printf(seq,
- "LAN-64 MAC address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X",
- serialno[8], serialno[9],
- serialno[2], serialno[3],
- serialno[4], serialno[5], serialno[6], serialno[7]);
+ "LAN-64 MAC address @ [?:%02X:%02X:?] %pM",
+ serialno[8], serialno[9], &serialno[2]);
break;
case I2O_SNFORMAT_DDM: /* I2O DDM */
diff --git a/drivers/mfd/88pm8607.c b/drivers/mfd/88pm8607.c
deleted file mode 100644
index 7e3f65907993..000000000000
--- a/drivers/mfd/88pm8607.c
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Base driver for Marvell 88PM8607
- *
- * Copyright (C) 2009 Marvell International Ltd.
- * Haojian Zhuang <haojian.zhuang@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/i2c.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/88pm8607.h>
-
-
-#define PM8607_REG_RESOURCE(_start, _end) \
-{ \
- .start = PM8607_##_start, \
- .end = PM8607_##_end, \
- .flags = IORESOURCE_IO, \
-}
-
-static struct resource pm8607_regulator_resources[] = {
- PM8607_REG_RESOURCE(BUCK1, BUCK1),
- PM8607_REG_RESOURCE(BUCK2, BUCK2),
- PM8607_REG_RESOURCE(BUCK3, BUCK3),
- PM8607_REG_RESOURCE(LDO1, LDO1),
- PM8607_REG_RESOURCE(LDO2, LDO2),
- PM8607_REG_RESOURCE(LDO3, LDO3),
- PM8607_REG_RESOURCE(LDO4, LDO4),
- PM8607_REG_RESOURCE(LDO5, LDO5),
- PM8607_REG_RESOURCE(LDO6, LDO6),
- PM8607_REG_RESOURCE(LDO7, LDO7),
- PM8607_REG_RESOURCE(LDO8, LDO8),
- PM8607_REG_RESOURCE(LDO9, LDO9),
- PM8607_REG_RESOURCE(LDO10, LDO10),
- PM8607_REG_RESOURCE(LDO12, LDO12),
- PM8607_REG_RESOURCE(LDO14, LDO14),
-};
-
-#define PM8607_REG_DEVS(_name, _id) \
-{ \
- .name = "88pm8607-" #_name, \
- .num_resources = 1, \
- .resources = &pm8607_regulator_resources[PM8607_ID_##_id], \
-}
-
-static struct mfd_cell pm8607_devs[] = {
- PM8607_REG_DEVS(buck1, BUCK1),
- PM8607_REG_DEVS(buck2, BUCK2),
- PM8607_REG_DEVS(buck3, BUCK3),
- PM8607_REG_DEVS(ldo1, LDO1),
- PM8607_REG_DEVS(ldo2, LDO2),
- PM8607_REG_DEVS(ldo3, LDO3),
- PM8607_REG_DEVS(ldo4, LDO4),
- PM8607_REG_DEVS(ldo5, LDO5),
- PM8607_REG_DEVS(ldo6, LDO6),
- PM8607_REG_DEVS(ldo7, LDO7),
- PM8607_REG_DEVS(ldo8, LDO8),
- PM8607_REG_DEVS(ldo9, LDO9),
- PM8607_REG_DEVS(ldo10, LDO10),
- PM8607_REG_DEVS(ldo12, LDO12),
- PM8607_REG_DEVS(ldo14, LDO14),
-};
-
-static inline int pm8607_read_device(struct pm8607_chip *chip,
- int reg, int bytes, void *dest)
-{
- struct i2c_client *i2c = chip->client;
- unsigned char data;
- int ret;
-
- data = (unsigned char)reg;
- ret = i2c_master_send(i2c, &data, 1);
- if (ret < 0)
- return ret;
-
- ret = i2c_master_recv(i2c, dest, bytes);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static inline int pm8607_write_device(struct pm8607_chip *chip,
- int reg, int bytes, void *src)
-{
- struct i2c_client *i2c = chip->client;
- unsigned char buf[bytes + 1];
- int ret;
-
- buf[0] = (unsigned char)reg;
- memcpy(&buf[1], src, bytes);
-
- ret = i2c_master_send(i2c, buf, bytes + 1);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-int pm8607_reg_read(struct pm8607_chip *chip, int reg)
-{
- unsigned char data;
- int ret;
-
- mutex_lock(&chip->io_lock);
- ret = chip->read(chip, reg, 1, &data);
- mutex_unlock(&chip->io_lock);
-
- if (ret < 0)
- return ret;
- else
- return (int)data;
-}
-EXPORT_SYMBOL(pm8607_reg_read);
-
-int pm8607_reg_write(struct pm8607_chip *chip, int reg,
- unsigned char data)
-{
- int ret;
-
- mutex_lock(&chip->io_lock);
- ret = chip->write(chip, reg, 1, &data);
- mutex_unlock(&chip->io_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(pm8607_reg_write);
-
-int pm8607_bulk_read(struct pm8607_chip *chip, int reg,
- int count, unsigned char *buf)
-{
- int ret;
-
- mutex_lock(&chip->io_lock);
- ret = chip->read(chip, reg, count, buf);
- mutex_unlock(&chip->io_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(pm8607_bulk_read);
-
-int pm8607_bulk_write(struct pm8607_chip *chip, int reg,
- int count, unsigned char *buf)
-{
- int ret;
-
- mutex_lock(&chip->io_lock);
- ret = chip->write(chip, reg, count, buf);
- mutex_unlock(&chip->io_lock);
-
- return ret;
-}
-EXPORT_SYMBOL(pm8607_bulk_write);
-
-int pm8607_set_bits(struct pm8607_chip *chip, int reg,
- unsigned char mask, unsigned char data)
-{
- unsigned char value;
- int ret;
-
- mutex_lock(&chip->io_lock);
- ret = chip->read(chip, reg, 1, &value);
- if (ret < 0)
- goto out;
- value &= ~mask;
- value |= data;
- ret = chip->write(chip, reg, 1, &value);
-out:
- mutex_unlock(&chip->io_lock);
- return ret;
-}
-EXPORT_SYMBOL(pm8607_set_bits);
-
-
-static const struct i2c_device_id pm8607_id_table[] = {
- { "88PM8607", 0 },
- {}
-};
-MODULE_DEVICE_TABLE(i2c, pm8607_id_table);
-
-
-static int __devinit pm8607_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct pm8607_platform_data *pdata = client->dev.platform_data;
- struct pm8607_chip *chip;
- int i, count;
- int ret;
-
- chip = kzalloc(sizeof(struct pm8607_chip), GFP_KERNEL);
- if (chip == NULL)
- return -ENOMEM;
-
- chip->client = client;
- chip->dev = &client->dev;
- chip->read = pm8607_read_device;
- chip->write = pm8607_write_device;
- i2c_set_clientdata(client, chip);
-
- mutex_init(&chip->io_lock);
- dev_set_drvdata(chip->dev, chip);
-
- ret = pm8607_reg_read(chip, PM8607_CHIP_ID);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to read CHIP ID: %d\n", ret);
- goto out;
- }
- if ((ret & CHIP_ID_MASK) == CHIP_ID)
- dev_info(chip->dev, "Marvell 88PM8607 (ID: %02x) detected\n",
- ret);
- else {
- dev_err(chip->dev, "Failed to detect Marvell 88PM8607. "
- "Chip ID: %02x\n", ret);
- goto out;
- }
- chip->chip_id = ret;
-
- ret = pm8607_reg_read(chip, PM8607_BUCK3);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to read BUCK3 register: %d\n", ret);
- goto out;
- }
- if (ret & PM8607_BUCK3_DOUBLE)
- chip->buck3_double = 1;
-
- ret = pm8607_reg_read(chip, PM8607_MISC1);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to read MISC1 register: %d\n", ret);
- goto out;
- }
- if (pdata->i2c_port == PI2C_PORT)
- ret |= PM8607_MISC1_PI2C;
- else
- ret &= ~PM8607_MISC1_PI2C;
- ret = pm8607_reg_write(chip, PM8607_MISC1, ret);
- if (ret < 0) {
- dev_err(chip->dev, "Failed to write MISC1 register: %d\n", ret);
- goto out;
- }
-
-
- count = ARRAY_SIZE(pm8607_devs);
- for (i = 0; i < count; i++) {
- ret = mfd_add_devices(chip->dev, i, &pm8607_devs[i],
- 1, NULL, 0);
- if (ret != 0) {
- dev_err(chip->dev, "Failed to add subdevs\n");
- goto out;
- }
- }
-
- return 0;
-
-out:
- i2c_set_clientdata(client, NULL);
- kfree(chip);
- return ret;
-}
-
-static int __devexit pm8607_remove(struct i2c_client *client)
-{
- struct pm8607_chip *chip = i2c_get_clientdata(client);
-
- mfd_remove_devices(chip->dev);
- kfree(chip);
- return 0;
-}
-
-static struct i2c_driver pm8607_driver = {
- .driver = {
- .name = "88PM8607",
- .owner = THIS_MODULE,
- },
- .probe = pm8607_probe,
- .remove = __devexit_p(pm8607_remove),
- .id_table = pm8607_id_table,
-};
-
-static int __init pm8607_init(void)
-{
- int ret;
- ret = i2c_add_driver(&pm8607_driver);
- if (ret != 0)
- pr_err("Failed to register 88PM8607 I2C driver: %d\n", ret);
- return ret;
-}
-subsys_initcall(pm8607_init);
-
-static void __exit pm8607_exit(void)
-{
- i2c_del_driver(&pm8607_driver);
-}
-module_exit(pm8607_exit);
-
-MODULE_DESCRIPTION("PMIC Driver for Marvell 88PM8607");
-MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c
new file mode 100644
index 000000000000..16f0dca707a7
--- /dev/null
+++ b/drivers/mfd/88pm860x-core.c
@@ -0,0 +1,560 @@
+/*
+ * Base driver for Marvell 88PM8607
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/88pm860x.h>
+
+char pm860x_backlight_name[][MFD_NAME_SIZE] = {
+ "backlight-0",
+ "backlight-1",
+ "backlight-2",
+};
+EXPORT_SYMBOL(pm860x_backlight_name);
+
+char pm860x_led_name[][MFD_NAME_SIZE] = {
+ "led0-red",
+ "led0-green",
+ "led0-blue",
+ "led1-red",
+ "led1-green",
+ "led1-blue",
+};
+EXPORT_SYMBOL(pm860x_led_name);
+
+#define PM8606_BACKLIGHT_RESOURCE(_i, _x) \
+{ \
+ .name = pm860x_backlight_name[_i], \
+ .start = PM8606_##_x, \
+ .end = PM8606_##_x, \
+ .flags = IORESOURCE_IO, \
+}
+
+static struct resource backlight_resources[] = {
+ PM8606_BACKLIGHT_RESOURCE(PM8606_BACKLIGHT1, WLED1A),
+ PM8606_BACKLIGHT_RESOURCE(PM8606_BACKLIGHT2, WLED2A),
+ PM8606_BACKLIGHT_RESOURCE(PM8606_BACKLIGHT3, WLED3A),
+};
+
+#define PM8606_BACKLIGHT_DEVS(_i) \
+{ \
+ .name = "88pm860x-backlight", \
+ .num_resources = 1, \
+ .resources = &backlight_resources[_i], \
+ .id = _i, \
+}
+
+static struct mfd_cell backlight_devs[] = {
+ PM8606_BACKLIGHT_DEVS(PM8606_BACKLIGHT1),
+ PM8606_BACKLIGHT_DEVS(PM8606_BACKLIGHT2),
+ PM8606_BACKLIGHT_DEVS(PM8606_BACKLIGHT3),
+};
+
+#define PM8606_LED_RESOURCE(_i, _x) \
+{ \
+ .name = pm860x_led_name[_i], \
+ .start = PM8606_##_x, \
+ .end = PM8606_##_x, \
+ .flags = IORESOURCE_IO, \
+}
+
+static struct resource led_resources[] = {
+ PM8606_LED_RESOURCE(PM8606_LED1_RED, RGB2B),
+ PM8606_LED_RESOURCE(PM8606_LED1_GREEN, RGB2C),
+ PM8606_LED_RESOURCE(PM8606_LED1_BLUE, RGB2D),
+ PM8606_LED_RESOURCE(PM8606_LED2_RED, RGB1B),
+ PM8606_LED_RESOURCE(PM8606_LED2_GREEN, RGB1C),
+ PM8606_LED_RESOURCE(PM8606_LED2_BLUE, RGB1D),
+};
+
+#define PM8606_LED_DEVS(_i) \
+{ \
+ .name = "88pm860x-led", \
+ .num_resources = 1, \
+ .resources = &led_resources[_i], \
+ .id = _i, \
+}
+
+static struct mfd_cell led_devs[] = {
+ PM8606_LED_DEVS(PM8606_LED1_RED),
+ PM8606_LED_DEVS(PM8606_LED1_GREEN),
+ PM8606_LED_DEVS(PM8606_LED1_BLUE),
+ PM8606_LED_DEVS(PM8606_LED2_RED),
+ PM8606_LED_DEVS(PM8606_LED2_GREEN),
+ PM8606_LED_DEVS(PM8606_LED2_BLUE),
+};
+
+static struct resource touch_resources[] = {
+ {
+ .start = PM8607_IRQ_PEN,
+ .end = PM8607_IRQ_PEN,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell touch_devs[] = {
+ {
+ .name = "88pm860x-touch",
+ .num_resources = 1,
+ .resources = &touch_resources[0],
+ },
+};
+
+#define PM8607_REG_RESOURCE(_start, _end) \
+{ \
+ .start = PM8607_##_start, \
+ .end = PM8607_##_end, \
+ .flags = IORESOURCE_IO, \
+}
+
+static struct resource regulator_resources[] = {
+ PM8607_REG_RESOURCE(BUCK1, BUCK1),
+ PM8607_REG_RESOURCE(BUCK2, BUCK2),
+ PM8607_REG_RESOURCE(BUCK3, BUCK3),
+ PM8607_REG_RESOURCE(LDO1, LDO1),
+ PM8607_REG_RESOURCE(LDO2, LDO2),
+ PM8607_REG_RESOURCE(LDO3, LDO3),
+ PM8607_REG_RESOURCE(LDO4, LDO4),
+ PM8607_REG_RESOURCE(LDO5, LDO5),
+ PM8607_REG_RESOURCE(LDO6, LDO6),
+ PM8607_REG_RESOURCE(LDO7, LDO7),
+ PM8607_REG_RESOURCE(LDO8, LDO8),
+ PM8607_REG_RESOURCE(LDO9, LDO9),
+ PM8607_REG_RESOURCE(LDO10, LDO10),
+ PM8607_REG_RESOURCE(LDO12, LDO12),
+ PM8607_REG_RESOURCE(LDO14, LDO14),
+};
+
+#define PM8607_REG_DEVS(_name, _id) \
+{ \
+ .name = "88pm8607-" #_name, \
+ .num_resources = 1, \
+ .resources = &regulator_resources[PM8607_ID_##_id], \
+ .id = PM8607_ID_##_id, \
+}
+
+static struct mfd_cell regulator_devs[] = {
+ PM8607_REG_DEVS(buck1, BUCK1),
+ PM8607_REG_DEVS(buck2, BUCK2),
+ PM8607_REG_DEVS(buck3, BUCK3),
+ PM8607_REG_DEVS(ldo1, LDO1),
+ PM8607_REG_DEVS(ldo2, LDO2),
+ PM8607_REG_DEVS(ldo3, LDO3),
+ PM8607_REG_DEVS(ldo4, LDO4),
+ PM8607_REG_DEVS(ldo5, LDO5),
+ PM8607_REG_DEVS(ldo6, LDO6),
+ PM8607_REG_DEVS(ldo7, LDO7),
+ PM8607_REG_DEVS(ldo8, LDO8),
+ PM8607_REG_DEVS(ldo9, LDO9),
+ PM8607_REG_DEVS(ldo10, LDO10),
+ PM8607_REG_DEVS(ldo12, LDO12),
+ PM8607_REG_DEVS(ldo14, LDO14),
+};
+
+#define CHECK_IRQ(irq) \
+do { \
+ if ((irq < 0) || (irq >= PM860X_NUM_IRQ)) \
+ return -EINVAL; \
+} while (0)
+
+/* IRQs only occur on 88PM8607 */
+int pm860x_mask_irq(struct pm860x_chip *chip, int irq)
+{
+ struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \
+ : chip->companion;
+ int offset, data, ret;
+
+ CHECK_IRQ(irq);
+
+ offset = (irq >> 3) + PM8607_INT_MASK_1;
+ data = 1 << (irq % 8);
+ ret = pm860x_set_bits(i2c, offset, data, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_mask_irq);
+
+int pm860x_unmask_irq(struct pm860x_chip *chip, int irq)
+{
+ struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \
+ : chip->companion;
+ int offset, data, ret;
+
+ CHECK_IRQ(irq);
+
+ offset = (irq >> 3) + PM8607_INT_MASK_1;
+ data = 1 << (irq % 8);
+ ret = pm860x_set_bits(i2c, offset, data, data);
+
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_unmask_irq);
+
+#define INT_STATUS_NUM (3)
+
+static irqreturn_t pm8607_irq_thread(int irq, void *data)
+{
+ DECLARE_BITMAP(irq_status, PM860X_NUM_IRQ);
+ struct pm860x_chip *chip = data;
+ struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \
+ : chip->companion;
+ unsigned char status_buf[INT_STATUS_NUM << 1];
+ unsigned long value;
+ int i, ret;
+
+ irq_status[0] = 0;
+
+ /* read out status register */
+ ret = pm860x_bulk_read(i2c, PM8607_INT_STATUS1,
+ INT_STATUS_NUM << 1, status_buf);
+ if (ret < 0)
+ goto out;
+ if (chip->irq_mode) {
+ /* 0, clear by read. 1, clear by write */
+ ret = pm860x_bulk_write(i2c, PM8607_INT_STATUS1,
+ INT_STATUS_NUM, status_buf);
+ if (ret < 0)
+ goto out;
+ }
+
+ /* clear masked interrupt status */
+ for (i = 0, value = 0; i < INT_STATUS_NUM; i++) {
+ status_buf[i] &= status_buf[i + INT_STATUS_NUM];
+ irq_status[0] |= status_buf[i] << (i * 8);
+ }
+
+ while (!bitmap_empty(irq_status, PM860X_NUM_IRQ)) {
+ irq = find_first_bit(irq_status, PM860X_NUM_IRQ);
+ clear_bit(irq, irq_status);
+ dev_dbg(chip->dev, "Servicing IRQ #%d\n", irq);
+
+ mutex_lock(&chip->irq_lock);
+ if (chip->irq[irq].handler)
+ chip->irq[irq].handler(irq, chip->irq[irq].data);
+ else {
+ pm860x_mask_irq(chip, irq);
+ dev_err(chip->dev, "Nobody cares IRQ %d. "
+ "Now mask it.\n", irq);
+ for (i = 0; i < (INT_STATUS_NUM << 1); i++) {
+ dev_err(chip->dev, "status[%d]:%x\n", i,
+ status_buf[i]);
+ }
+ }
+ mutex_unlock(&chip->irq_lock);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+int pm860x_request_irq(struct pm860x_chip *chip, int irq,
+ irq_handler_t handler, void *data)
+{
+ CHECK_IRQ(irq);
+ if (!handler)
+ return -EINVAL;
+
+ mutex_lock(&chip->irq_lock);
+ chip->irq[irq].handler = handler;
+ chip->irq[irq].data = data;
+ mutex_unlock(&chip->irq_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(pm860x_request_irq);
+
+int pm860x_free_irq(struct pm860x_chip *chip, int irq)
+{
+ CHECK_IRQ(irq);
+
+ mutex_lock(&chip->irq_lock);
+ chip->irq[irq].handler = NULL;
+ chip->irq[irq].data = NULL;
+ mutex_unlock(&chip->irq_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(pm860x_free_irq);
+
+static int __devinit device_gpadc_init(struct pm860x_chip *chip,
+ struct pm860x_platform_data *pdata)
+{
+ struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \
+ : chip->companion;
+ int use_gpadc = 0, data, ret;
+
+ /* initialize GPADC without activating it */
+
+ if (pdata && pdata->touch) {
+ /* set GPADC MISC1 register */
+ data = 0;
+ data |= (pdata->touch->gpadc_prebias << 1)
+ & PM8607_GPADC_PREBIAS_MASK;
+ data |= (pdata->touch->slot_cycle << 3)
+ & PM8607_GPADC_SLOT_CYCLE_MASK;
+ data |= (pdata->touch->off_scale << 5)
+ & PM8607_GPADC_OFF_SCALE_MASK;
+ data |= (pdata->touch->sw_cal << 7)
+ & PM8607_GPADC_SW_CAL_MASK;
+ if (data) {
+ ret = pm860x_reg_write(i2c, PM8607_GPADC_MISC1, data);
+ if (ret < 0)
+ goto out;
+ }
+ /* set tsi prebias time */
+ if (pdata->touch->tsi_prebias) {
+ data = pdata->touch->tsi_prebias;
+ ret = pm860x_reg_write(i2c, PM8607_TSI_PREBIAS, data);
+ if (ret < 0)
+ goto out;
+ }
+ /* set prebias & prechg time of pen detect */
+ data = 0;
+ data |= pdata->touch->pen_prebias & PM8607_PD_PREBIAS_MASK;
+ data |= (pdata->touch->pen_prechg << 5)
+ & PM8607_PD_PRECHG_MASK;
+ if (data) {
+ ret = pm860x_reg_write(i2c, PM8607_PD_PREBIAS, data);
+ if (ret < 0)
+ goto out;
+ }
+
+ use_gpadc = 1;
+ }
+
+ /* turn on GPADC */
+ if (use_gpadc) {
+ ret = pm860x_set_bits(i2c, PM8607_GPADC_MISC1,
+ PM8607_GPADC_EN, PM8607_GPADC_EN);
+ }
+out:
+ return ret;
+}
+
+static int __devinit device_irq_init(struct pm860x_chip *chip,
+ struct pm860x_platform_data *pdata)
+{
+ struct i2c_client *i2c = (chip->id == CHIP_PM8607) ? chip->client \
+ : chip->companion;
+ unsigned char status_buf[INT_STATUS_NUM];
+ int data, mask, ret = -EINVAL;
+
+ mutex_init(&chip->irq_lock);
+
+ mask = PM8607_B0_MISC1_INV_INT | PM8607_B0_MISC1_INT_CLEAR
+ | PM8607_B0_MISC1_INT_MASK;
+ data = 0;
+ chip->irq_mode = 0;
+ if (pdata && pdata->irq_mode) {
+ /*
+ * irq_mode defines the way of clearing interrupt. If it's 1,
+ * clear IRQ by write. Otherwise, clear it by read.
+ * This control bit is valid from 88PM8607 B0 steping.
+ */
+ data |= PM8607_B0_MISC1_INT_CLEAR;
+ chip->irq_mode = 1;
+ }
+ ret = pm860x_set_bits(i2c, PM8607_B0_MISC1, mask, data);
+ if (ret < 0)
+ goto out;
+
+ /* mask all IRQs */
+ memset(status_buf, 0, INT_STATUS_NUM);
+ ret = pm860x_bulk_write(i2c, PM8607_INT_MASK_1,
+ INT_STATUS_NUM, status_buf);
+ if (ret < 0)
+ goto out;
+
+ if (chip->irq_mode) {
+ /* clear interrupt status by write */
+ memset(status_buf, 0xFF, INT_STATUS_NUM);
+ ret = pm860x_bulk_write(i2c, PM8607_INT_STATUS1,
+ INT_STATUS_NUM, status_buf);
+ } else {
+ /* clear interrupt status by read */
+ ret = pm860x_bulk_read(i2c, PM8607_INT_STATUS1,
+ INT_STATUS_NUM, status_buf);
+ }
+ if (ret < 0)
+ goto out;
+
+ memset(chip->irq, 0, sizeof(struct pm860x_irq) * PM860X_NUM_IRQ);
+
+ ret = request_threaded_irq(i2c->irq, NULL, pm8607_irq_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ "88PM8607", chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to request IRQ #%d.\n", i2c->irq);
+ goto out;
+ }
+ chip->chip_irq = i2c->irq;
+ return 0;
+out:
+ return ret;
+}
+
+static void __devexit device_irq_exit(struct pm860x_chip *chip)
+{
+ if (chip->chip_irq >= 0)
+ free_irq(chip->chip_irq, chip);
+}
+
+static void __devinit device_8606_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int ret;
+
+ if (pdata && pdata->backlight) {
+ ret = mfd_add_devices(chip->dev, 0, &backlight_devs[0],
+ ARRAY_SIZE(backlight_devs),
+ &backlight_resources[0], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add backlight "
+ "subdev\n");
+ goto out_dev;
+ }
+ }
+
+ if (pdata && pdata->led) {
+ ret = mfd_add_devices(chip->dev, 0, &led_devs[0],
+ ARRAY_SIZE(led_devs),
+ &led_resources[0], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add led "
+ "subdev\n");
+ goto out_dev;
+ }
+ }
+ return;
+out_dev:
+ mfd_remove_devices(chip->dev);
+ device_irq_exit(chip);
+}
+
+static void __devinit device_8607_init(struct pm860x_chip *chip,
+ struct i2c_client *i2c,
+ struct pm860x_platform_data *pdata)
+{
+ int data, ret;
+
+ ret = pm860x_reg_read(i2c, PM8607_CHIP_ID);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read CHIP ID: %d\n", ret);
+ goto out;
+ }
+ if ((ret & PM8607_VERSION_MASK) == PM8607_VERSION)
+ dev_info(chip->dev, "Marvell 88PM8607 (ID: %02x) detected\n",
+ ret);
+ else {
+ dev_err(chip->dev, "Failed to detect Marvell 88PM8607. "
+ "Chip ID: %02x\n", ret);
+ goto out;
+ }
+
+ ret = pm860x_reg_read(i2c, PM8607_BUCK3);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read BUCK3 register: %d\n", ret);
+ goto out;
+ }
+ if (ret & PM8607_BUCK3_DOUBLE)
+ chip->buck3_double = 1;
+
+ ret = pm860x_reg_read(i2c, PM8607_B0_MISC1);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to read MISC1 register: %d\n", ret);
+ goto out;
+ }
+
+ if (pdata && (pdata->i2c_port == PI2C_PORT))
+ data = PM8607_B0_MISC1_PI2C;
+ else
+ data = 0;
+ ret = pm860x_set_bits(i2c, PM8607_B0_MISC1, PM8607_B0_MISC1_PI2C, data);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to access MISC1:%d\n", ret);
+ goto out;
+ }
+
+ ret = device_gpadc_init(chip, pdata);
+ if (ret < 0)
+ goto out;
+
+ ret = device_irq_init(chip, pdata);
+ if (ret < 0)
+ goto out;
+
+ ret = mfd_add_devices(chip->dev, 0, &regulator_devs[0],
+ ARRAY_SIZE(regulator_devs),
+ &regulator_resources[0], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add regulator subdev\n");
+ goto out_dev;
+ }
+
+ if (pdata && pdata->touch) {
+ ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
+ ARRAY_SIZE(touch_devs),
+ &touch_resources[0], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add touch "
+ "subdev\n");
+ goto out_dev;
+ }
+ }
+ return;
+out_dev:
+ mfd_remove_devices(chip->dev);
+ device_irq_exit(chip);
+out:
+ return;
+}
+
+int pm860x_device_init(struct pm860x_chip *chip,
+ struct pm860x_platform_data *pdata)
+{
+ chip->chip_irq = -EINVAL;
+
+ switch (chip->id) {
+ case CHIP_PM8606:
+ device_8606_init(chip, chip->client, pdata);
+ break;
+ case CHIP_PM8607:
+ device_8607_init(chip, chip->client, pdata);
+ break;
+ }
+
+ if (chip->companion) {
+ switch (chip->id) {
+ case CHIP_PM8607:
+ device_8606_init(chip, chip->companion, pdata);
+ break;
+ case CHIP_PM8606:
+ device_8607_init(chip, chip->companion, pdata);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+void pm860x_device_exit(struct pm860x_chip *chip)
+{
+ device_irq_exit(chip);
+ mfd_remove_devices(chip->dev);
+}
+
+MODULE_DESCRIPTION("PMIC Driver for Marvell 88PM860x");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
new file mode 100644
index 000000000000..6d7dba2bce8a
--- /dev/null
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -0,0 +1,268 @@
+/*
+ * I2C driver for Marvell 88PM860x
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/mfd/88pm860x.h>
+
+static inline int pm860x_read_device(struct i2c_client *i2c,
+ int reg, int bytes, void *dest)
+{
+ unsigned char data;
+ int ret;
+
+ data = (unsigned char)reg;
+ ret = i2c_master_send(i2c, &data, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_master_recv(i2c, dest, bytes);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static inline int pm860x_write_device(struct i2c_client *i2c,
+ int reg, int bytes, void *src)
+{
+ unsigned char buf[bytes + 1];
+ int ret;
+
+ buf[0] = (unsigned char)reg;
+ memcpy(&buf[1], src, bytes);
+
+ ret = i2c_master_send(i2c, buf, bytes + 1);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+int pm860x_reg_read(struct i2c_client *i2c, int reg)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char data;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = pm860x_read_device(i2c, reg, 1, &data);
+ mutex_unlock(&chip->io_lock);
+
+ if (ret < 0)
+ return ret;
+ else
+ return (int)data;
+}
+EXPORT_SYMBOL(pm860x_reg_read);
+
+int pm860x_reg_write(struct i2c_client *i2c, int reg,
+ unsigned char data)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = pm860x_write_device(i2c, reg, 1, &data);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_reg_write);
+
+int pm860x_bulk_read(struct i2c_client *i2c, int reg,
+ int count, unsigned char *buf)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = pm860x_read_device(i2c, reg, count, buf);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_bulk_read);
+
+int pm860x_bulk_write(struct i2c_client *i2c, int reg,
+ int count, unsigned char *buf)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = pm860x_write_device(i2c, reg, count, buf);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_bulk_write);
+
+int pm860x_set_bits(struct i2c_client *i2c, int reg,
+ unsigned char mask, unsigned char data)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char value;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = pm860x_read_device(i2c, reg, 1, &value);
+ if (ret < 0)
+ goto out;
+ value &= ~mask;
+ value |= data;
+ ret = pm860x_write_device(i2c, reg, 1, &value);
+out:
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(pm860x_set_bits);
+
+
+static const struct i2c_device_id pm860x_id_table[] = {
+ { "88PM860x", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pm860x_id_table);
+
+static int verify_addr(struct i2c_client *i2c)
+{
+ unsigned short addr_8607[] = {0x30, 0x34};
+ unsigned short addr_8606[] = {0x10, 0x11};
+ int size, i;
+
+ if (i2c == NULL)
+ return 0;
+ size = ARRAY_SIZE(addr_8606);
+ for (i = 0; i < size; i++) {
+ if (i2c->addr == *(addr_8606 + i))
+ return CHIP_PM8606;
+ }
+ size = ARRAY_SIZE(addr_8607);
+ for (i = 0; i < size; i++) {
+ if (i2c->addr == *(addr_8607 + i))
+ return CHIP_PM8607;
+ }
+ return 0;
+}
+
+static int __devinit pm860x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pm860x_platform_data *pdata = client->dev.platform_data;
+ static struct pm860x_chip *chip;
+ struct i2c_board_info i2c_info = {
+ .type = "88PM860x",
+ .platform_data = client->dev.platform_data,
+ };
+ int addr_c, found_companion = 0;
+
+ if (pdata == NULL) {
+ pr_info("No platform data in %s!\n", __func__);
+ return -EINVAL;
+ }
+
+ /*
+ * Both client and companion client shares same platform driver.
+ * Driver distinguishes them by pdata->companion_addr.
+ * pdata->companion_addr is only assigned if companion chip exists.
+ * At the same time, the companion_addr shouldn't equal to client
+ * address.
+ */
+ addr_c = pdata->companion_addr;
+ if (addr_c && (addr_c != client->addr)) {
+ i2c_info.addr = addr_c;
+ found_companion = 1;
+ }
+
+ if (found_companion || (addr_c == 0)) {
+ chip = kzalloc(sizeof(struct pm860x_chip), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+
+ chip->id = verify_addr(client);
+ chip->companion_addr = addr_c;
+ chip->client = client;
+ i2c_set_clientdata(client, chip);
+ chip->dev = &client->dev;
+ mutex_init(&chip->io_lock);
+ dev_set_drvdata(chip->dev, chip);
+
+ if (found_companion) {
+ /*
+ * If this driver is built in, probe function is
+ * recursive.
+ * If this driver is built as module, the next probe
+ * function is called after the first one finished.
+ */
+ chip->companion = i2c_new_device(client->adapter,
+ &i2c_info);
+ }
+ }
+
+ /*
+ * If companion chip existes, it's called by companion probe.
+ * If there's no companion chip, it's called by client probe.
+ */
+ if ((addr_c == 0) || (addr_c == client->addr)) {
+ chip->companion = client;
+ i2c_set_clientdata(chip->companion, chip);
+ pm860x_device_init(chip, pdata);
+ }
+ return 0;
+}
+
+static int __devexit pm860x_remove(struct i2c_client *client)
+{
+ struct pm860x_chip *chip = i2c_get_clientdata(client);
+
+ /*
+ * If companion existes, companion client is removed first.
+ * Because companion client is registered last and removed first.
+ */
+ if (chip->companion_addr == client->addr)
+ return 0;
+ pm860x_device_exit(chip);
+ i2c_unregister_device(chip->companion);
+ i2c_set_clientdata(chip->companion, NULL);
+ i2c_set_clientdata(chip->client, NULL);
+ kfree(chip);
+ return 0;
+}
+
+static struct i2c_driver pm860x_driver = {
+ .driver = {
+ .name = "88PM860x",
+ .owner = THIS_MODULE,
+ },
+ .probe = pm860x_probe,
+ .remove = __devexit_p(pm860x_remove),
+ .id_table = pm860x_id_table,
+};
+
+static int __init pm860x_i2c_init(void)
+{
+ int ret;
+ ret = i2c_add_driver(&pm860x_driver);
+ if (ret != 0)
+ pr_err("Failed to register 88PM860x I2C driver: %d\n", ret);
+ return ret;
+}
+subsys_initcall(pm860x_i2c_init);
+
+static void __exit pm860x_i2c_exit(void)
+{
+ i2c_del_driver(&pm860x_driver);
+}
+module_exit(pm860x_i2c_exit);
+
+MODULE_DESCRIPTION("I2C Driver for Marvell 88PM860x");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 87829789243e..6099d5d8c4a2 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -9,6 +9,16 @@ config MFD_CORE
tristate
default n
+config MFD_88PM860X
+ bool "Support Marvell 88PM8606/88PM8607"
+ depends on I2C=y
+ select MFD_CORE
+ help
+ This supports for Marvell 88PM8606/88PM8607 Power Management IC.
+ This includes the I2C driver and the core APIs _only_, you have to
+ select individual components like voltage regulators, RTC and
+ battery-charger under the corresponding menus.
+
config MFD_SM501
tristate "Support for Silicon Motion SM501"
---help---
@@ -184,6 +194,16 @@ config PMIC_ADP5520
individual components like LCD backlight, LEDs, GPIOs and Kepad
under the corresponding menus.
+config MFD_MAX8925
+ tristate "Maxim Semiconductor MAX8925 PMIC Support"
+ depends on I2C
+ select MFD_CORE
+ help
+ Say yes here to support for Maxim Semiconductor MAX8925. This is
+ a Power Management IC. This driver provies common support for
+ accessing the device, additional drivers must be enabled in order
+ to use the functionality of the device.
+
config MFD_WM8400
tristate "Support Wolfson Microelectronics WM8400"
select MFD_CORE
@@ -205,7 +225,7 @@ config MFD_WM831X
functionality of the device.
config MFD_WM8350
- tristate
+ bool
config MFD_WM8350_CONFIG_MODE_0
bool
@@ -256,9 +276,9 @@ config MFD_WM8352_CONFIG_MODE_3
depends on MFD_WM8350
config MFD_WM8350_I2C
- tristate "Support Wolfson Microelectronics WM8350 with I2C"
+ bool "Support Wolfson Microelectronics WM8350 with I2C"
select MFD_WM8350
- depends on I2C
+ depends on I2C=y
help
The WM8350 is an integrated audio and power management
subsystem with watchdog and RTC functionality for embedded
@@ -329,16 +349,6 @@ config EZX_PCAP
This enables the PCAP ASIC present on EZX Phones. This is
needed for MMC, TouchScreen, Sound, USB, etc..
-config MFD_88PM8607
- bool "Support Marvell 88PM8607"
- depends on I2C=y
- select MFD_CORE
- help
- This supports for Marvell 88PM8607 Power Management IC. This includes
- the I2C driver and the core APIs _only_, you have to select
- individual components like voltage regulators, RTC and
- battery-charger under the corresponding menus.
-
config AB4500_CORE
tristate "ST-Ericsson's AB4500 Mixed Signal Power management chip"
depends on SPI
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index ca2f2c4ff05e..2b5f7070b4fb 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -2,6 +2,8 @@
# Makefile for multifunction miscellaneous devices
#
+88pm860x-objs := 88pm860x-core.o 88pm860x-i2c.o
+obj-$(CONFIG_MFD_88PM860X) += 88pm860x.o
obj-$(CONFIG_MFD_SM501) += sm501.o
obj-$(CONFIG_MFD_ASIC3) += asic3.o
obj-$(CONFIG_MFD_SH_MOBILE_SDHI) += sh_mobile_sdhi.o
@@ -11,9 +13,9 @@ obj-$(CONFIG_HTC_PASIC3) += htc-pasic3.o
obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
-obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o
-obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o
-obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o
+obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
+obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
+obj-$(CONFIG_MFD_TC6393XB) += tc6393xb.o tmio_core.o
obj-$(CONFIG_MFD_WM8400) += wm8400-core.o
wm831x-objs := wm831x-core.o wm831x-irq.o wm831x-otp.o
@@ -47,6 +49,8 @@ endif
obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
+max8925-objs := max8925-core.o max8925-i2c.o
+obj-$(CONFIG_MFD_MAX8925) += max8925.o
obj-$(CONFIG_MFD_PCF50633) += pcf50633-core.o
obj-$(CONFIG_PCF50633_ADC) += pcf50633-adc.o
@@ -54,5 +58,4 @@ obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
obj-$(CONFIG_AB4500_CORE) += ab4500-core.o
-obj-$(CONFIG_MFD_88PM8607) += 88pm8607.o
-obj-$(CONFIG_PMIC_ADP5520) += adp5520.o \ No newline at end of file
+obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index fd42a80e7bf9..aa3824a1b4f2 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -365,10 +365,13 @@ int ab3100_event_registers_startup_state_get(struct ab3100 *ab3100,
}
EXPORT_SYMBOL(ab3100_event_registers_startup_state_get);
-/* Interrupt handling worker */
-static void ab3100_work(struct work_struct *work)
+/*
+ * This is a threaded interrupt handler so we can make some
+ * I2C calls etc.
+ */
+static irqreturn_t ab3100_irq_handler(int irq, void *data)
{
- struct ab3100 *ab3100 = container_of(work, struct ab3100, work);
+ struct ab3100 *ab3100 = data;
u8 event_regs[3];
u32 fatevent;
int err;
@@ -376,7 +379,7 @@ static void ab3100_work(struct work_struct *work)
err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
event_regs, 3);
if (err)
- goto err_event_wq;
+ goto err_event;
fatevent = (event_regs[0] << 16) |
(event_regs[1] << 8) |
@@ -398,29 +401,11 @@ static void ab3100_work(struct work_struct *work)
dev_dbg(ab3100->dev,
"IRQ Event: 0x%08x\n", fatevent);
- /* By now the IRQ should be acked and deasserted so enable it again */
- enable_irq(ab3100->i2c_client->irq);
- return;
+ return IRQ_HANDLED;
- err_event_wq:
+ err_event:
dev_dbg(ab3100->dev,
- "error in event workqueue\n");
- /* Enable the IRQ anyway, what choice do we have? */
- enable_irq(ab3100->i2c_client->irq);
- return;
-}
-
-static irqreturn_t ab3100_irq_handler(int irq, void *data)
-{
- struct ab3100 *ab3100 = data;
- /*
- * Disable the IRQ and dispatch a worker to handle the
- * event. Since the chip resides on I2C this is slow
- * stuff and we will re-enable the interrupts once th
- * worker has finished.
- */
- disable_irq_nosync(irq);
- schedule_work(&ab3100->work);
+ "error reading event status\n");
return IRQ_HANDLED;
}
@@ -904,12 +889,10 @@ static int __init ab3100_probe(struct i2c_client *client,
if (err)
goto exit_no_setup;
- INIT_WORK(&ab3100->work, ab3100_work);
-
/* This real unpredictable IRQ is of course sampled for entropy */
- err = request_irq(client->irq, ab3100_irq_handler,
- IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
- "AB3100 IRQ", ab3100);
+ err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler,
+ IRQF_ONESHOT,
+ "ab3100-core", ab3100);
if (err)
goto exit_no_irq;
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index e22128c3e9a8..1dc6d05fb902 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -80,6 +80,7 @@ struct asic3 {
u16 irq_bothedge[4];
struct gpio_chip gpio;
struct device *dev;
+ void __iomem *tmio_cnf;
struct asic3_clk clocks[ARRAY_SIZE(asic3_clk_init)];
};
@@ -685,8 +686,24 @@ static struct mfd_cell asic3_cell_ds1wm = {
.resources = ds1wm_resources,
};
+static void asic3_mmc_pwr(struct platform_device *pdev, int state)
+{
+ struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+
+ tmio_core_mmc_pwr(asic->tmio_cnf, 1 - asic->bus_shift, state);
+}
+
+static void asic3_mmc_clk_div(struct platform_device *pdev, int state)
+{
+ struct asic3 *asic = dev_get_drvdata(pdev->dev.parent);
+
+ tmio_core_mmc_clk_div(asic->tmio_cnf, 1 - asic->bus_shift, state);
+}
+
static struct tmio_mmc_data asic3_mmc_data = {
- .hclk = 24576000,
+ .hclk = 24576000,
+ .set_pwr = asic3_mmc_pwr,
+ .set_clk_div = asic3_mmc_clk_div,
};
static struct resource asic3_mmc_resources[] = {
@@ -696,11 +713,6 @@ static struct resource asic3_mmc_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = ASIC3_SD_CONFIG_BASE,
- .end = ASIC3_SD_CONFIG_BASE + 0x1ff,
- .flags = IORESOURCE_MEM,
- },
- {
.start = 0,
.end = 0,
.flags = IORESOURCE_IRQ,
@@ -743,6 +755,10 @@ static int asic3_mmc_enable(struct platform_device *pdev)
asic3_set_register(asic, ASIC3_OFFSET(SDHWCTRL, SDCONF),
ASIC3_SDHWCTRL_SDPWR, 1);
+ /* ASIC3_SD_CTRL_BASE assumes 32-bit addressing, TMIO is 16-bit */
+ tmio_core_mmc_enable(asic->tmio_cnf, 1 - asic->bus_shift,
+ ASIC3_SD_CTRL_BASE >> 1);
+
return 0;
}
@@ -766,6 +782,8 @@ static struct mfd_cell asic3_cell_mmc = {
.name = "tmio-mmc",
.enable = asic3_mmc_enable,
.disable = asic3_mmc_disable,
+ .suspend = asic3_mmc_disable,
+ .resume = asic3_mmc_enable,
.driver_data = &asic3_mmc_data,
.num_resources = ARRAY_SIZE(asic3_mmc_resources),
.resources = asic3_mmc_resources,
@@ -797,14 +815,21 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
asic3_cell_ds1wm.data_size = sizeof(asic3_cell_ds1wm);
/* MMC */
+ asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) +
+ mem_sdio->start, 0x400 >> asic->bus_shift);
+ if (!asic->tmio_cnf) {
+ ret = -ENOMEM;
+ dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n");
+ goto out;
+ }
asic3_mmc_resources[0].start >>= asic->bus_shift;
asic3_mmc_resources[0].end >>= asic->bus_shift;
- asic3_mmc_resources[1].start >>= asic->bus_shift;
- asic3_mmc_resources[1].end >>= asic->bus_shift;
asic3_cell_mmc.platform_data = &asic3_cell_mmc;
asic3_cell_mmc.data_size = sizeof(asic3_cell_mmc);
+ tmio_core_set_bus_shift(1 - asic->bus_shift);
+
ret = mfd_add_devices(&pdev->dev, pdev->id,
&asic3_cell_ds1wm, 1, mem, asic->irq_base);
if (ret < 0)
@@ -820,7 +845,10 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
static void asic3_mfd_remove(struct platform_device *pdev)
{
+ struct asic3 *asic = platform_get_drvdata(pdev);
+
mfd_remove_devices(&pdev->dev);
+ iounmap(asic->tmio_cnf);
}
/* Core */
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
new file mode 100644
index 000000000000..f36c494b80f1
--- /dev/null
+++ b/drivers/mfd/max8925-core.c
@@ -0,0 +1,404 @@
+/*
+ * Base driver for Maxim MAX8925
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/max8925.h>
+
+#define IRQ_MODE_STATUS 0
+#define IRQ_MODE_MASK 1
+
+static struct resource backlight_resources[] = {
+ {
+ .name = "max8925-backlight",
+ .start = MAX8925_WLED_MODE_CNTL,
+ .end = MAX8925_WLED_CNTL,
+ .flags = IORESOURCE_IO,
+ },
+};
+
+static struct mfd_cell backlight_devs[] = {
+ {
+ .name = "max8925-backlight",
+ .num_resources = 1,
+ .resources = &backlight_resources[0],
+ .id = -1,
+ },
+};
+
+static struct resource touch_resources[] = {
+ {
+ .name = "max8925-tsc",
+ .start = MAX8925_TSC_IRQ,
+ .end = MAX8925_ADC_RES_END,
+ .flags = IORESOURCE_IO,
+ },
+};
+
+static struct mfd_cell touch_devs[] = {
+ {
+ .name = "max8925-touch",
+ .num_resources = 1,
+ .resources = &touch_resources[0],
+ .id = -1,
+ },
+};
+
+#define MAX8925_REG_RESOURCE(_start, _end) \
+{ \
+ .start = MAX8925_##_start, \
+ .end = MAX8925_##_end, \
+ .flags = IORESOURCE_IO, \
+}
+
+static struct resource regulator_resources[] = {
+ MAX8925_REG_RESOURCE(SDCTL1, SDCTL1),
+ MAX8925_REG_RESOURCE(SDCTL2, SDCTL2),
+ MAX8925_REG_RESOURCE(SDCTL3, SDCTL3),
+ MAX8925_REG_RESOURCE(LDOCTL1, LDOCTL1),
+ MAX8925_REG_RESOURCE(LDOCTL2, LDOCTL2),
+ MAX8925_REG_RESOURCE(LDOCTL3, LDOCTL3),
+ MAX8925_REG_RESOURCE(LDOCTL4, LDOCTL4),
+ MAX8925_REG_RESOURCE(LDOCTL5, LDOCTL5),
+ MAX8925_REG_RESOURCE(LDOCTL6, LDOCTL6),
+ MAX8925_REG_RESOURCE(LDOCTL7, LDOCTL7),
+ MAX8925_REG_RESOURCE(LDOCTL8, LDOCTL8),
+ MAX8925_REG_RESOURCE(LDOCTL9, LDOCTL9),
+ MAX8925_REG_RESOURCE(LDOCTL10, LDOCTL10),
+ MAX8925_REG_RESOURCE(LDOCTL11, LDOCTL11),
+ MAX8925_REG_RESOURCE(LDOCTL12, LDOCTL12),
+ MAX8925_REG_RESOURCE(LDOCTL13, LDOCTL13),
+ MAX8925_REG_RESOURCE(LDOCTL14, LDOCTL14),
+ MAX8925_REG_RESOURCE(LDOCTL15, LDOCTL15),
+ MAX8925_REG_RESOURCE(LDOCTL16, LDOCTL16),
+ MAX8925_REG_RESOURCE(LDOCTL17, LDOCTL17),
+ MAX8925_REG_RESOURCE(LDOCTL18, LDOCTL18),
+ MAX8925_REG_RESOURCE(LDOCTL19, LDOCTL19),
+ MAX8925_REG_RESOURCE(LDOCTL20, LDOCTL20),
+};
+
+#define MAX8925_REG_DEVS(_id) \
+{ \
+ .name = "max8925-regulator", \
+ .num_resources = 1, \
+ .resources = &regulator_resources[MAX8925_ID_##_id], \
+ .id = MAX8925_ID_##_id, \
+}
+
+static struct mfd_cell regulator_devs[] = {
+ MAX8925_REG_DEVS(SD1),
+ MAX8925_REG_DEVS(SD2),
+ MAX8925_REG_DEVS(SD3),
+ MAX8925_REG_DEVS(LDO1),
+ MAX8925_REG_DEVS(LDO2),
+ MAX8925_REG_DEVS(LDO3),
+ MAX8925_REG_DEVS(LDO4),
+ MAX8925_REG_DEVS(LDO5),
+ MAX8925_REG_DEVS(LDO6),
+ MAX8925_REG_DEVS(LDO7),
+ MAX8925_REG_DEVS(LDO8),
+ MAX8925_REG_DEVS(LDO9),
+ MAX8925_REG_DEVS(LDO10),
+ MAX8925_REG_DEVS(LDO11),
+ MAX8925_REG_DEVS(LDO12),
+ MAX8925_REG_DEVS(LDO13),
+ MAX8925_REG_DEVS(LDO14),
+ MAX8925_REG_DEVS(LDO15),
+ MAX8925_REG_DEVS(LDO16),
+ MAX8925_REG_DEVS(LDO17),
+ MAX8925_REG_DEVS(LDO18),
+ MAX8925_REG_DEVS(LDO19),
+ MAX8925_REG_DEVS(LDO20),
+};
+
+static int __get_irq_offset(struct max8925_chip *chip, int irq, int mode,
+ int *offset, int *bit)
+{
+ if (!offset || !bit)
+ return -EINVAL;
+
+ switch (chip->chip_id) {
+ case MAX8925_GPM:
+ *bit = irq % BITS_PER_BYTE;
+ if (irq < (BITS_PER_BYTE << 1)) { /* irq = [0,15] */
+ *offset = (mode) ? MAX8925_CHG_IRQ1_MASK
+ : MAX8925_CHG_IRQ1;
+ if (irq >= BITS_PER_BYTE)
+ (*offset)++;
+ } else { /* irq = [16,31] */
+ *offset = (mode) ? MAX8925_ON_OFF_IRQ1_MASK
+ : MAX8925_ON_OFF_IRQ1;
+ if (irq >= (BITS_PER_BYTE * 3))
+ (*offset)++;
+ }
+ break;
+ case MAX8925_ADC:
+ *bit = irq % BITS_PER_BYTE;
+ *offset = (mode) ? MAX8925_TSC_IRQ_MASK : MAX8925_TSC_IRQ;
+ break;
+ default:
+ goto out;
+ }
+ return 0;
+out:
+ dev_err(chip->dev, "Wrong irq #%d is assigned\n", irq);
+ return -EINVAL;
+}
+
+static int __check_irq(int irq)
+{
+ if ((irq < 0) || (irq >= MAX8925_NUM_IRQ))
+ return -EINVAL;
+ return 0;
+}
+
+int max8925_mask_irq(struct max8925_chip *chip, int irq)
+{
+ int offset, bit, ret;
+
+ ret = __get_irq_offset(chip, irq, IRQ_MODE_MASK, &offset, &bit);
+ if (ret < 0)
+ return ret;
+ ret = max8925_set_bits(chip->i2c, offset, 1 << bit, 1 << bit);
+ return ret;
+}
+
+int max8925_unmask_irq(struct max8925_chip *chip, int irq)
+{
+ int offset, bit, ret;
+
+ ret = __get_irq_offset(chip, irq, IRQ_MODE_MASK, &offset, &bit);
+ if (ret < 0)
+ return ret;
+ ret = max8925_set_bits(chip->i2c, offset, 1 << bit, 0);
+ return ret;
+}
+
+#define INT_STATUS_NUM (MAX8925_NUM_IRQ / BITS_PER_BYTE)
+
+static irqreturn_t max8925_irq_thread(int irq, void *data)
+{
+ struct max8925_chip *chip = data;
+ unsigned long irq_status[INT_STATUS_NUM];
+ unsigned char status_buf[INT_STATUS_NUM << 1];
+ int i, ret;
+
+ memset(irq_status, 0, sizeof(unsigned long) * INT_STATUS_NUM);
+
+ /* all these interrupt status registers are read-only */
+ switch (chip->chip_id) {
+ case MAX8925_GPM:
+ ret = max8925_bulk_read(chip->i2c, MAX8925_CHG_IRQ1,
+ 4, status_buf);
+ if (ret < 0)
+ goto out;
+ ret = max8925_bulk_read(chip->i2c, MAX8925_ON_OFF_IRQ1,
+ 2, &status_buf[4]);
+ if (ret < 0)
+ goto out;
+ ret = max8925_bulk_read(chip->i2c, MAX8925_ON_OFF_IRQ2,
+ 2, &status_buf[6]);
+ if (ret < 0)
+ goto out;
+ /* clear masked interrupt status */
+ status_buf[0] &= (~status_buf[2] & CHG_IRQ1_MASK);
+ irq_status[0] |= status_buf[0];
+ status_buf[1] &= (~status_buf[3] & CHG_IRQ2_MASK);
+ irq_status[0] |= (status_buf[1] << BITS_PER_BYTE);
+ status_buf[4] &= (~status_buf[5] & ON_OFF_IRQ1_MASK);
+ irq_status[0] |= (status_buf[4] << (BITS_PER_BYTE * 2));
+ status_buf[6] &= (~status_buf[7] & ON_OFF_IRQ2_MASK);
+ irq_status[0] |= (status_buf[6] << (BITS_PER_BYTE * 3));
+ break;
+ case MAX8925_ADC:
+ ret = max8925_bulk_read(chip->i2c, MAX8925_TSC_IRQ,
+ 2, status_buf);
+ if (ret < 0)
+ goto out;
+ /* clear masked interrupt status */
+ status_buf[0] &= (~status_buf[1] & TSC_IRQ_MASK);
+ irq_status[0] |= status_buf[0];
+ break;
+ default:
+ goto out;
+ }
+
+ for_each_bit(i, &irq_status[0], MAX8925_NUM_IRQ) {
+ clear_bit(i, irq_status);
+ dev_dbg(chip->dev, "Servicing IRQ #%d in %s\n", i, chip->name);
+
+ mutex_lock(&chip->irq_lock);
+ if (chip->irq[i].handler)
+ chip->irq[i].handler(i, chip->irq[i].data);
+ else {
+ max8925_mask_irq(chip, i);
+ dev_err(chip->dev, "Noboday cares IRQ #%d in %s. "
+ "Now mask it.\n", i, chip->name);
+ }
+ mutex_unlock(&chip->irq_lock);
+ }
+out:
+ return IRQ_HANDLED;
+}
+
+int max8925_request_irq(struct max8925_chip *chip, int irq,
+ irq_handler_t handler, void *data)
+{
+ if ((__check_irq(irq) < 0) || !handler)
+ return -EINVAL;
+
+ mutex_lock(&chip->irq_lock);
+ chip->irq[irq].handler = handler;
+ chip->irq[irq].data = data;
+ mutex_unlock(&chip->irq_lock);
+ return 0;
+}
+EXPORT_SYMBOL(max8925_request_irq);
+
+int max8925_free_irq(struct max8925_chip *chip, int irq)
+{
+ if (__check_irq(irq) < 0)
+ return -EINVAL;
+
+ mutex_lock(&chip->irq_lock);
+ chip->irq[irq].handler = NULL;
+ chip->irq[irq].data = NULL;
+ mutex_unlock(&chip->irq_lock);
+ return 0;
+}
+EXPORT_SYMBOL(max8925_free_irq);
+
+static int __devinit device_gpm_init(struct max8925_chip *chip,
+ struct i2c_client *i2c,
+ struct max8925_platform_data *pdata)
+{
+ int ret;
+
+ /* mask all IRQs */
+ ret = max8925_set_bits(i2c, MAX8925_CHG_IRQ1_MASK, 0x7, 0x7);
+ if (ret < 0)
+ goto out;
+ ret = max8925_set_bits(i2c, MAX8925_CHG_IRQ2_MASK, 0xff, 0xff);
+ if (ret < 0)
+ goto out;
+ ret = max8925_set_bits(i2c, MAX8925_ON_OFF_IRQ1_MASK, 0xff, 0xff);
+ if (ret < 0)
+ goto out;
+ ret = max8925_set_bits(i2c, MAX8925_ON_OFF_IRQ2_MASK, 0x3, 0x3);
+ if (ret < 0)
+ goto out;
+
+ chip->name = "GPM";
+ memset(chip->irq, 0, sizeof(struct max8925_irq) * MAX8925_NUM_IRQ);
+ ret = request_threaded_irq(i2c->irq, NULL, max8925_irq_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ "max8925-gpm", chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to request IRQ #%d.\n", i2c->irq);
+ goto out;
+ }
+ chip->chip_irq = i2c->irq;
+
+ /* enable hard-reset for ONKEY power-off */
+ max8925_set_bits(i2c, MAX8925_SYSENSEL, 0x80, 0x80);
+
+ ret = mfd_add_devices(chip->dev, 0, &regulator_devs[0],
+ ARRAY_SIZE(regulator_devs),
+ &regulator_resources[0], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add regulator subdev\n");
+ goto out_irq;
+ }
+
+ if (pdata && pdata->backlight) {
+ ret = mfd_add_devices(chip->dev, 0, &backlight_devs[0],
+ ARRAY_SIZE(backlight_devs),
+ &backlight_resources[0], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add backlight subdev\n");
+ goto out_dev;
+ }
+ }
+ return 0;
+out_dev:
+ mfd_remove_devices(chip->dev);
+out_irq:
+ if (chip->chip_irq)
+ free_irq(chip->chip_irq, chip);
+out:
+ return ret;
+}
+
+static int __devinit device_adc_init(struct max8925_chip *chip,
+ struct i2c_client *i2c,
+ struct max8925_platform_data *pdata)
+{
+ int ret;
+
+ /* mask all IRQs */
+ ret = max8925_set_bits(i2c, MAX8925_TSC_IRQ_MASK, 3, 3);
+
+ chip->name = "ADC";
+ memset(chip->irq, 0, sizeof(struct max8925_irq) * MAX8925_NUM_IRQ);
+ ret = request_threaded_irq(i2c->irq, NULL, max8925_irq_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ "max8925-adc", chip);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to request IRQ #%d.\n", i2c->irq);
+ goto out;
+ }
+ chip->chip_irq = i2c->irq;
+
+ if (pdata && pdata->touch) {
+ ret = mfd_add_devices(chip->dev, 0, &touch_devs[0],
+ ARRAY_SIZE(touch_devs),
+ &touch_resources[0], 0);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to add touch subdev\n");
+ goto out_irq;
+ }
+ }
+ return 0;
+out_irq:
+ if (chip->chip_irq)
+ free_irq(chip->chip_irq, chip);
+out:
+ return ret;
+}
+
+int __devinit max8925_device_init(struct max8925_chip *chip,
+ struct max8925_platform_data *pdata)
+{
+ switch (chip->chip_id) {
+ case MAX8925_GPM:
+ device_gpm_init(chip, chip->i2c, pdata);
+ break;
+ case MAX8925_ADC:
+ device_adc_init(chip, chip->i2c, pdata);
+ break;
+ }
+ return 0;
+}
+
+void max8925_device_exit(struct max8925_chip *chip)
+{
+ if (chip->chip_irq >= 0)
+ free_irq(chip->chip_irq, chip);
+ mfd_remove_devices(chip->dev);
+}
+
+MODULE_DESCRIPTION("PMIC Driver for Maxim MAX8925");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
new file mode 100644
index 000000000000..942068e730f9
--- /dev/null
+++ b/drivers/mfd/max8925-i2c.c
@@ -0,0 +1,210 @@
+/*
+ * I2C driver for Maxim MAX8925
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/mfd/max8925.h>
+
+static inline int max8925_read_device(struct i2c_client *i2c,
+ int reg, int bytes, void *dest)
+{
+ unsigned char data;
+ unsigned char *buf;
+ int ret;
+
+ buf = kzalloc(bytes + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ data = (unsigned char)reg;
+ ret = i2c_master_send(i2c, &data, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_master_recv(i2c, buf, bytes + 1);
+ if (ret < 0)
+ return ret;
+ memcpy(dest, buf, bytes);
+ return 0;
+}
+
+static inline int max8925_write_device(struct i2c_client *i2c,
+ int reg, int bytes, void *src)
+{
+ unsigned char buf[bytes + 1];
+ int ret;
+
+ buf[0] = (unsigned char)reg;
+ memcpy(&buf[1], src, bytes);
+
+ ret = i2c_master_send(i2c, buf, bytes + 1);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+int max8925_reg_read(struct i2c_client *i2c, int reg)
+{
+ struct max8925_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char data;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = max8925_read_device(i2c, reg, 1, &data);
+ mutex_unlock(&chip->io_lock);
+
+ if (ret < 0)
+ return ret;
+ else
+ return (int)data;
+}
+EXPORT_SYMBOL(max8925_reg_read);
+
+int max8925_reg_write(struct i2c_client *i2c, int reg,
+ unsigned char data)
+{
+ struct max8925_chip *chip = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = max8925_write_device(i2c, reg, 1, &data);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(max8925_reg_write);
+
+int max8925_bulk_read(struct i2c_client *i2c, int reg,
+ int count, unsigned char *buf)
+{
+ struct max8925_chip *chip = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = max8925_read_device(i2c, reg, count, buf);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(max8925_bulk_read);
+
+int max8925_bulk_write(struct i2c_client *i2c, int reg,
+ int count, unsigned char *buf)
+{
+ struct max8925_chip *chip = i2c_get_clientdata(i2c);
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = max8925_write_device(i2c, reg, count, buf);
+ mutex_unlock(&chip->io_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(max8925_bulk_write);
+
+int max8925_set_bits(struct i2c_client *i2c, int reg,
+ unsigned char mask, unsigned char data)
+{
+ struct max8925_chip *chip = i2c_get_clientdata(i2c);
+ unsigned char value;
+ int ret;
+
+ mutex_lock(&chip->io_lock);
+ ret = max8925_read_device(i2c, reg, 1, &value);
+ if (ret < 0)
+ goto out;
+ value &= ~mask;
+ value |= data;
+ ret = max8925_write_device(i2c, reg, 1, &value);
+out:
+ mutex_unlock(&chip->io_lock);
+ return ret;
+}
+EXPORT_SYMBOL(max8925_set_bits);
+
+
+static const struct i2c_device_id max8925_id_table[] = {
+ { "max8925", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, max8925_id_table);
+
+static int __devinit max8925_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max8925_platform_data *pdata = client->dev.platform_data;
+ struct max8925_chip *chip;
+
+ if (!pdata) {
+ pr_info("%s: platform data is missing\n", __func__);
+ return -EINVAL;
+ }
+ if ((pdata->chip_id <= MAX8925_INVALID)
+ || (pdata->chip_id >= MAX8925_MAX)) {
+ pr_info("#%s: wrong chip identification\n", __func__);
+ return -EINVAL;
+ }
+
+ chip = kzalloc(sizeof(struct max8925_chip), GFP_KERNEL);
+ if (chip == NULL)
+ return -ENOMEM;
+ chip->i2c = client;
+ chip->chip_id = pdata->chip_id;
+ i2c_set_clientdata(client, chip);
+ chip->dev = &client->dev;
+ mutex_init(&chip->io_lock);
+ dev_set_drvdata(chip->dev, chip);
+ max8925_device_init(chip, pdata);
+
+ return 0;
+}
+
+static int __devexit max8925_remove(struct i2c_client *client)
+{
+ struct max8925_chip *chip = i2c_get_clientdata(client);
+
+ max8925_device_exit(chip);
+ i2c_set_clientdata(client, NULL);
+ kfree(chip);
+ return 0;
+}
+
+static struct i2c_driver max8925_driver = {
+ .driver = {
+ .name = "max8925",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8925_probe,
+ .remove = __devexit_p(max8925_remove),
+ .id_table = max8925_id_table,
+};
+
+static int __init max8925_i2c_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&max8925_driver);
+ if (ret != 0)
+ pr_err("Failed to register MAX8925 I2C driver: %d\n", ret);
+ return ret;
+}
+subsys_initcall(max8925_i2c_init);
+
+static void __exit max8925_i2c_exit(void)
+{
+ i2c_del_driver(&max8925_driver);
+}
+module_exit(max8925_i2c_exit);
+
+MODULE_DESCRIPTION("I2C Driver for Maxim 8925");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c
index a1ade2324ea9..735c8a4d164f 100644
--- a/drivers/mfd/mc13783-core.c
+++ b/drivers/mfd/mc13783-core.c
@@ -619,6 +619,8 @@ err_revision:
}
/* This should go away (END) */
+ mc13783_unlock(mc13783);
+
if (pdata->flags & MC13783_USE_ADC)
mc13783_add_subdevice(mc13783, "mc13783-adc");
@@ -641,8 +643,6 @@ err_revision:
if (pdata->flags & MC13783_USE_TOUCHSCREEN)
mc13783_add_subdevice(mc13783, "mc13783-ts");
- mc13783_unlock(mc13783);
-
return 0;
}
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index 0cc5eeff5ee8..dc9ea95c0561 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1430,7 +1430,7 @@ static int __devinit sm501_plat_probe(struct platform_device *dev)
}
sm->regs_claim = request_mem_region(sm->io_res->start,
- 0x100, "sm501");
+ resource_size(sm->io_res), "sm501");
if (sm->regs_claim == NULL) {
dev_err(&dev->dev, "cannot claim registers\n");
@@ -1440,8 +1440,7 @@ static int __devinit sm501_plat_probe(struct platform_device *dev)
platform_set_drvdata(dev, sm);
- sm->regs = ioremap(sm->io_res->start,
- (sm->io_res->end - sm->io_res->start) - 1);
+ sm->regs = ioremap(sm->io_res->start, resource_size(sm->io_res));
if (sm->regs == NULL) {
dev_err(&dev->dev, "cannot remap registers\n");
@@ -1645,7 +1644,7 @@ static int __devinit sm501_pci_probe(struct pci_dev *dev,
sm->mem_res = &dev->resource[0];
sm->regs_claim = request_mem_region(sm->io_res->start,
- 0x100, "sm501");
+ resource_size(sm->io_res), "sm501");
if (sm->regs_claim == NULL) {
dev_err(&dev->dev, "cannot claim registers\n");
err= -EBUSY;
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 0a255c1f1ce7..26d9176fca91 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -38,6 +38,19 @@ enum {
T7L66XB_CELL_MMC,
};
+static const struct resource t7l66xb_mmc_resources[] = {
+ {
+ .start = 0x800,
+ .end = 0x9ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IRQ_T7L66XB_MMC,
+ .end = IRQ_T7L66XB_MMC,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
#define SCR_REVID 0x08 /* b Revision ID */
#define SCR_IMR 0x42 /* b Interrupt Mask */
#define SCR_DEV_CTL 0xe0 /* b Device control */
@@ -83,6 +96,9 @@ static int t7l66xb_mmc_enable(struct platform_device *mmc)
spin_unlock_irqrestore(&t7l66xb->lock, flags);
+ tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0,
+ t7l66xb_mmc_resources[0].start & 0xfffe);
+
return 0;
}
@@ -106,28 +122,28 @@ static int t7l66xb_mmc_disable(struct platform_device *mmc)
return 0;
}
+static void t7l66xb_mmc_pwr(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_pwr(t7l66xb->scr + 0x200, 0, state);
+}
+
+static void t7l66xb_mmc_clk_div(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct t7l66xb *t7l66xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_clk_div(t7l66xb->scr + 0x200, 0, state);
+}
+
/*--------------------------------------------------------------------------*/
static struct tmio_mmc_data t7166xb_mmc_data = {
.hclk = 24000000,
-};
-
-static const struct resource t7l66xb_mmc_resources[] = {
- {
- .start = 0x800,
- .end = 0x9ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x200,
- .end = 0x2ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = IRQ_T7L66XB_MMC,
- .end = IRQ_T7L66XB_MMC,
- .flags = IORESOURCE_IRQ,
- },
+ .set_pwr = t7l66xb_mmc_pwr,
+ .set_clk_div = t7l66xb_mmc_clk_div,
};
static const struct resource t7l66xb_nand_resources[] = {
@@ -282,6 +298,9 @@ static int t7l66xb_resume(struct platform_device *dev)
if (pdata && pdata->resume)
pdata->resume(dev);
+ tmio_core_mmc_enable(t7l66xb->scr + 0x200, 0,
+ t7l66xb_mmc_resources[0].start & 0xfffe);
+
return 0;
}
#else
@@ -341,7 +360,7 @@ static int t7l66xb_probe(struct platform_device *dev)
if (ret)
goto err_request_scr;
- t7l66xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
+ t7l66xb->scr = ioremap(rscr->start, resource_size(rscr));
if (!t7l66xb->scr) {
ret = -ENOMEM;
goto err_ioremap;
@@ -384,12 +403,12 @@ static int t7l66xb_probe(struct platform_device *dev)
err_ioremap:
release_resource(&t7l66xb->rscr);
err_request_scr:
- kfree(t7l66xb);
clk_put(t7l66xb->clk48m);
err_clk48m_get:
clk_put(t7l66xb->clk32k);
err_clk32k_get:
err_noirq:
+ kfree(t7l66xb);
return ret;
}
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 3280ab33f88a..5c7f04343d5c 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -22,28 +22,52 @@ enum {
TC6387XB_CELL_MMC,
};
+struct tc6387xb {
+ void __iomem *scr;
+ struct clk *clk32k;
+ struct resource rscr;
+};
+
+static struct resource tc6387xb_mmc_resources[] = {
+ {
+ .start = 0x800,
+ .end = 0x9ff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/*--------------------------------------------------------------------------*/
+
#ifdef CONFIG_PM
static int tc6387xb_suspend(struct platform_device *dev, pm_message_t state)
{
- struct clk *clk32k = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
if (pdata && pdata->suspend)
pdata->suspend(dev);
- clk_disable(clk32k);
+ clk_disable(tc6387xb->clk32k);
return 0;
}
static int tc6387xb_resume(struct platform_device *dev)
{
- struct clk *clk32k = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
- clk_enable(clk32k);
+ clk_enable(tc6387xb->clk32k);
if (pdata && pdata->resume)
pdata->resume(dev);
+ tmio_core_mmc_resume(tc6387xb->scr + 0x200, 0,
+ tc6387xb_mmc_resources[0].start & 0xfffe);
+
return 0;
}
#else
@@ -53,12 +77,32 @@ static int tc6387xb_resume(struct platform_device *dev)
/*--------------------------------------------------------------------------*/
+static void tc6387xb_mmc_pwr(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_pwr(tc6387xb->scr + 0x200, 0, state);
+}
+
+static void tc6387xb_mmc_clk_div(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_clk_div(tc6387xb->scr + 0x200, 0, state);
+}
+
+
static int tc6387xb_mmc_enable(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct clk *clk32k = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
- clk_enable(clk32k);
+ clk_enable(tc6387xb->clk32k);
+
+ tmio_core_mmc_enable(tc6387xb->scr + 0x200, 0,
+ tc6387xb_mmc_resources[0].start & 0xfffe);
return 0;
}
@@ -66,36 +110,20 @@ static int tc6387xb_mmc_enable(struct platform_device *mmc)
static int tc6387xb_mmc_disable(struct platform_device *mmc)
{
struct platform_device *dev = to_platform_device(mmc->dev.parent);
- struct clk *clk32k = platform_get_drvdata(dev);
+ struct tc6387xb *tc6387xb = platform_get_drvdata(dev);
- clk_disable(clk32k);
+ clk_disable(tc6387xb->clk32k);
return 0;
}
-/*--------------------------------------------------------------------------*/
-
static struct tmio_mmc_data tc6387xb_mmc_data = {
.hclk = 24000000,
+ .set_pwr = tc6387xb_mmc_pwr,
+ .set_clk_div = tc6387xb_mmc_clk_div,
};
-static struct resource tc6387xb_mmc_resources[] = {
- {
- .start = 0x800,
- .end = 0x9ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0x200,
- .end = 0x2ff,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IRQ,
- },
-};
+/*--------------------------------------------------------------------------*/
static struct mfd_cell tc6387xb_cells[] = {
[TC6387XB_CELL_MMC] = {
@@ -111,8 +139,9 @@ static struct mfd_cell tc6387xb_cells[] = {
static int tc6387xb_probe(struct platform_device *dev)
{
struct tc6387xb_platform_data *pdata = dev->dev.platform_data;
- struct resource *iomem;
+ struct resource *iomem, *rscr;
struct clk *clk32k;
+ struct tc6387xb *tc6387xb;
int irq, ret;
iomem = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -120,18 +149,40 @@ static int tc6387xb_probe(struct platform_device *dev)
return -EINVAL;
}
+ tc6387xb = kzalloc(sizeof *tc6387xb, GFP_KERNEL);
+ if (!tc6387xb)
+ return -ENOMEM;
+
ret = platform_get_irq(dev, 0);
if (ret >= 0)
irq = ret;
else
- goto err_resource;
+ goto err_no_irq;
clk32k = clk_get(&dev->dev, "CLK_CK32K");
if (IS_ERR(clk32k)) {
ret = PTR_ERR(clk32k);
+ goto err_no_clk;
+ }
+
+ rscr = &tc6387xb->rscr;
+ rscr->name = "tc6387xb-core";
+ rscr->start = iomem->start;
+ rscr->end = iomem->start + 0xff;
+ rscr->flags = IORESOURCE_MEM;
+
+ ret = request_resource(iomem, rscr);
+ if (ret)
goto err_resource;
+
+ tc6387xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
+ if (!tc6387xb->scr) {
+ ret = -ENOMEM;
+ goto err_ioremap;
}
- platform_set_drvdata(dev, clk32k);
+
+ tc6387xb->clk32k = clk32k;
+ platform_set_drvdata(dev, tc6387xb);
if (pdata && pdata->enable)
pdata->enable(dev);
@@ -149,8 +200,13 @@ static int tc6387xb_probe(struct platform_device *dev)
if (!ret)
return 0;
- clk_put(clk32k);
+err_ioremap:
+ release_resource(&tc6387xb->rscr);
err_resource:
+ clk_put(clk32k);
+err_no_clk:
+err_no_irq:
+ kfree(tc6387xb);
return ret;
}
@@ -195,3 +251,4 @@ MODULE_DESCRIPTION("Toshiba TC6387XB core driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton");
MODULE_ALIAS("platform:tc6387xb");
+
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 1429a7341a9a..c59e5c5737d0 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -136,10 +136,6 @@ static int tc6393xb_nand_enable(struct platform_device *nand)
return 0;
}
-static struct tmio_mmc_data tc6393xb_mmc_data = {
- .hclk = 24000000,
-};
-
static struct resource __devinitdata tc6393xb_nand_resources[] = {
{
.start = 0x1000,
@@ -165,11 +161,6 @@ static struct resource __devinitdata tc6393xb_mmc_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = 0x200,
- .end = 0x2ff,
- .flags = IORESOURCE_MEM,
- },
- {
.start = IRQ_TC6393_MMC,
.end = IRQ_TC6393_MMC,
.flags = IORESOURCE_IRQ,
@@ -346,6 +337,50 @@ int tc6393xb_lcd_mode(struct platform_device *fb,
}
EXPORT_SYMBOL(tc6393xb_lcd_mode);
+static int tc6393xb_mmc_enable(struct platform_device *mmc)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_enable(tc6393xb->scr + 0x200, 0,
+ tc6393xb_mmc_resources[0].start & 0xfffe);
+
+ return 0;
+}
+
+static int tc6393xb_mmc_resume(struct platform_device *mmc)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_resume(tc6393xb->scr + 0x200, 0,
+ tc6393xb_mmc_resources[0].start & 0xfffe);
+
+ return 0;
+}
+
+static void tc6393xb_mmc_pwr(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_pwr(tc6393xb->scr + 0x200, 0, state);
+}
+
+static void tc6393xb_mmc_clk_div(struct platform_device *mmc, int state)
+{
+ struct platform_device *dev = to_platform_device(mmc->dev.parent);
+ struct tc6393xb *tc6393xb = platform_get_drvdata(dev);
+
+ tmio_core_mmc_clk_div(tc6393xb->scr + 0x200, 0, state);
+}
+
+static struct tmio_mmc_data tc6393xb_mmc_data = {
+ .hclk = 24000000,
+ .set_pwr = tc6393xb_mmc_pwr,
+ .set_clk_div = tc6393xb_mmc_clk_div,
+};
+
static struct mfd_cell __devinitdata tc6393xb_cells[] = {
[TC6393XB_CELL_NAND] = {
.name = "tmio-nand",
@@ -355,6 +390,8 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = {
},
[TC6393XB_CELL_MMC] = {
.name = "tmio-mmc",
+ .enable = tc6393xb_mmc_enable,
+ .resume = tc6393xb_mmc_resume,
.driver_data = &tc6393xb_mmc_data,
.num_resources = ARRAY_SIZE(tc6393xb_mmc_resources),
.resources = tc6393xb_mmc_resources,
@@ -610,7 +647,7 @@ static int __devinit tc6393xb_probe(struct platform_device *dev)
if (ret)
goto err_request_scr;
- tc6393xb->scr = ioremap(rscr->start, rscr->end - rscr->start + 1);
+ tc6393xb->scr = ioremap(rscr->start, resource_size(rscr));
if (!tc6393xb->scr) {
ret = -ENOMEM;
goto err_ioremap;
@@ -836,3 +873,4 @@ MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton, Dmitry Baryshkov and Dirk Opfer");
MODULE_DESCRIPTION("tc6393xb Toshiba Mobile IO Controller");
MODULE_ALIAS("platform:tc6393xb");
+
diff --git a/drivers/mfd/tmio_core.c b/drivers/mfd/tmio_core.c
new file mode 100644
index 000000000000..eddc19ae464b
--- /dev/null
+++ b/drivers/mfd/tmio_core.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright(c) 2009 Ian Molton <spyro@f2s.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/mfd/tmio.h>
+
+int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base)
+{
+ /* Enable the MMC/SD Control registers */
+ sd_config_write16(cnf, shift, CNF_CMD, SDCREN);
+ sd_config_write32(cnf, shift, CNF_CTL_BASE, base & 0xfffe);
+
+ /* Disable SD power during suspend */
+ sd_config_write8(cnf, shift, CNF_PWR_CTL_3, 0x01);
+
+ /* The below is required but why? FIXME */
+ sd_config_write8(cnf, shift, CNF_STOP_CLK_CTL, 0x1f);
+
+ /* Power down SD bus */
+ sd_config_write8(cnf, shift, CNF_PWR_CTL_2, 0x00);
+
+ return 0;
+}
+EXPORT_SYMBOL(tmio_core_mmc_enable);
+
+int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base)
+{
+
+ /* Enable the MMC/SD Control registers */
+ sd_config_write16(cnf, shift, CNF_CMD, SDCREN);
+ sd_config_write32(cnf, shift, CNF_CTL_BASE, base & 0xfffe);
+
+ return 0;
+}
+EXPORT_SYMBOL(tmio_core_mmc_resume);
+
+void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state)
+{
+ sd_config_write8(cnf, shift, CNF_PWR_CTL_2, state ? 0x02 : 0x00);
+}
+EXPORT_SYMBOL(tmio_core_mmc_pwr);
+
+void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state)
+{
+ sd_config_write8(cnf, shift, CNF_SD_CLK_MODE, state ? 1 : 0);
+}
+EXPORT_SYMBOL(tmio_core_mmc_clk_div);
+
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index 8485a7018060..9a970bd68775 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -134,8 +134,7 @@ static inline int is_reg_locked(struct wm8350 *wm8350, u8 reg)
wm8350->reg_cache[WM8350_SECURITY] == WM8350_UNLOCK_KEY)
return 0;
- if ((reg == WM8350_GPIO_CONFIGURATION_I_O) ||
- (reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
+ if ((reg >= WM8350_GPIO_FUNCTION_SELECT_1 &&
reg <= WM8350_GPIO_FUNCTION_SELECT_4) ||
(reg >= WM8350_BATTERY_CHARGER_CONTROL_1 &&
reg <= WM8350_BATTERY_CHARGER_CONTROL_3))
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index c8df547c4747..f56c9adf9493 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -18,7 +18,7 @@
#include <linux/bug.h>
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <linux/workqueue.h>
+#include <linux/irq.h>
#include <linux/mfd/wm8350/core.h>
#include <linux/mfd/wm8350/audio.h>
@@ -29,8 +29,6 @@
#include <linux/mfd/wm8350/supply.h>
#include <linux/mfd/wm8350/wdt.h>
-#define WM8350_NUM_IRQ_REGS 7
-
#define WM8350_INT_OFFSET_1 0
#define WM8350_INT_OFFSET_2 1
#define WM8350_POWER_UP_INT_OFFSET 2
@@ -366,19 +364,10 @@ static struct wm8350_irq_data wm8350_irqs[] = {
},
};
-static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq)
+static inline struct wm8350_irq_data *irq_to_wm8350_irq(struct wm8350 *wm8350,
+ int irq)
{
- mutex_lock(&wm8350->irq_mutex);
-
- if (wm8350->irq[irq].handler)
- wm8350->irq[irq].handler(irq, wm8350->irq[irq].data);
- else {
- dev_err(wm8350->dev, "irq %d nobody cared. now masked.\n",
- irq);
- wm8350_mask_irq(wm8350, irq);
- }
-
- mutex_unlock(&wm8350->irq_mutex);
+ return &wm8350_irqs[irq - wm8350->irq_base];
}
/*
@@ -386,7 +375,9 @@ static void wm8350_irq_call_handler(struct wm8350 *wm8350, int irq)
* interrupts are clear on read the IRQ line will be reasserted and
* the physical IRQ will be handled again if another interrupt is
* asserted while we run - in the normal course of events this is a
- * rare occurrence so we save I2C/SPI reads.
+ * rare occurrence so we save I2C/SPI reads. We're also assuming that
+ * it's rare to get lots of interrupts firing simultaneously so try to
+ * minimise I/O.
*/
static irqreturn_t wm8350_irq(int irq, void *irq_data)
{
@@ -397,7 +388,6 @@ static irqreturn_t wm8350_irq(int irq, void *irq_data)
struct wm8350_irq_data *data;
int i;
- /* TODO: Use block reads to improve performance? */
level_one = wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS)
& ~wm8350_reg_read(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK);
@@ -416,93 +406,101 @@ static irqreturn_t wm8350_irq(int irq, void *irq_data)
sub_reg[data->reg] =
wm8350_reg_read(wm8350, WM8350_INT_STATUS_1 +
data->reg);
- sub_reg[data->reg] &=
- ~wm8350_reg_read(wm8350,
- WM8350_INT_STATUS_1_MASK +
- data->reg);
+ sub_reg[data->reg] &= ~wm8350->irq_masks[data->reg];
read_done[data->reg] = 1;
}
if (sub_reg[data->reg] & data->mask)
- wm8350_irq_call_handler(wm8350, i);
+ handle_nested_irq(wm8350->irq_base + i);
}
return IRQ_HANDLED;
}
-int wm8350_register_irq(struct wm8350 *wm8350, int irq,
- irq_handler_t handler, unsigned long flags,
- const char *name, void *data)
+static void wm8350_irq_lock(unsigned int irq)
{
- if (irq < 0 || irq > WM8350_NUM_IRQ || !handler)
- return -EINVAL;
-
- if (wm8350->irq[irq].handler)
- return -EBUSY;
-
- mutex_lock(&wm8350->irq_mutex);
- wm8350->irq[irq].handler = handler;
- wm8350->irq[irq].data = data;
- mutex_unlock(&wm8350->irq_mutex);
-
- wm8350_unmask_irq(wm8350, irq);
+ struct wm8350 *wm8350 = get_irq_chip_data(irq);
- return 0;
+ mutex_lock(&wm8350->irq_lock);
}
-EXPORT_SYMBOL_GPL(wm8350_register_irq);
-int wm8350_free_irq(struct wm8350 *wm8350, int irq)
+static void wm8350_irq_sync_unlock(unsigned int irq)
{
- if (irq < 0 || irq > WM8350_NUM_IRQ)
- return -EINVAL;
+ struct wm8350 *wm8350 = get_irq_chip_data(irq);
+ int i;
- wm8350_mask_irq(wm8350, irq);
+ for (i = 0; i < ARRAY_SIZE(wm8350->irq_masks); i++) {
+ /* If there's been a change in the mask write it back
+ * to the hardware. */
+ if (wm8350->irq_masks[i] !=
+ wm8350->reg_cache[WM8350_INT_STATUS_1_MASK + i])
+ WARN_ON(wm8350_reg_write(wm8350,
+ WM8350_INT_STATUS_1_MASK + i,
+ wm8350->irq_masks[i]));
+ }
- mutex_lock(&wm8350->irq_mutex);
- wm8350->irq[irq].handler = NULL;
- mutex_unlock(&wm8350->irq_mutex);
- return 0;
+ mutex_unlock(&wm8350->irq_lock);
}
-EXPORT_SYMBOL_GPL(wm8350_free_irq);
-int wm8350_mask_irq(struct wm8350 *wm8350, int irq)
+static void wm8350_irq_enable(unsigned int irq)
{
- return wm8350_set_bits(wm8350, WM8350_INT_STATUS_1_MASK +
- wm8350_irqs[irq].reg,
- wm8350_irqs[irq].mask);
+ struct wm8350 *wm8350 = get_irq_chip_data(irq);
+ struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq);
+
+ wm8350->irq_masks[irq_data->reg] &= ~irq_data->mask;
}
-EXPORT_SYMBOL_GPL(wm8350_mask_irq);
-int wm8350_unmask_irq(struct wm8350 *wm8350, int irq)
+static void wm8350_irq_disable(unsigned int irq)
{
- return wm8350_clear_bits(wm8350, WM8350_INT_STATUS_1_MASK +
- wm8350_irqs[irq].reg,
- wm8350_irqs[irq].mask);
+ struct wm8350 *wm8350 = get_irq_chip_data(irq);
+ struct wm8350_irq_data *irq_data = irq_to_wm8350_irq(wm8350, irq);
+
+ wm8350->irq_masks[irq_data->reg] |= irq_data->mask;
}
-EXPORT_SYMBOL_GPL(wm8350_unmask_irq);
+
+static struct irq_chip wm8350_irq_chip = {
+ .name = "wm8350",
+ .bus_lock = wm8350_irq_lock,
+ .bus_sync_unlock = wm8350_irq_sync_unlock,
+ .disable = wm8350_irq_disable,
+ .enable = wm8350_irq_enable,
+};
int wm8350_irq_init(struct wm8350 *wm8350, int irq,
struct wm8350_platform_data *pdata)
{
- int ret;
+ int ret, cur_irq, i;
int flags = IRQF_ONESHOT;
if (!irq) {
- dev_err(wm8350->dev, "No IRQ configured\n");
- return -EINVAL;
+ dev_warn(wm8350->dev, "No interrupt support, no core IRQ\n");
+ return 0;
+ }
+
+ if (!pdata || !pdata->irq_base) {
+ dev_warn(wm8350->dev, "No interrupt support, no IRQ base\n");
+ return 0;
}
+ /* Mask top level interrupts */
wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_INT_STATUS_1_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_INT_STATUS_2_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_GPIO_INT_STATUS_MASK, 0xFFFF);
- wm8350_reg_write(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK, 0xFFFF);
- mutex_init(&wm8350->irq_mutex);
+ /* Mask all individual interrupts by default and cache the
+ * masks. We read the masks back since there are unwritable
+ * bits in the mask registers. */
+ for (i = 0; i < ARRAY_SIZE(wm8350->irq_masks); i++) {
+ wm8350_reg_write(wm8350, WM8350_INT_STATUS_1_MASK + i,
+ 0xFFFF);
+ wm8350->irq_masks[i] =
+ wm8350_reg_read(wm8350,
+ WM8350_INT_STATUS_1_MASK + i);
+ }
+
+ mutex_init(&wm8350->irq_lock);
wm8350->chip_irq = irq;
+ wm8350->irq_base = pdata->irq_base;
- if (pdata && pdata->irq_high) {
+ if (pdata->irq_high) {
flags |= IRQF_TRIGGER_HIGH;
wm8350_set_bits(wm8350, WM8350_SYSTEM_CONTROL_1,
@@ -514,11 +512,32 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
WM8350_IRQ_POL);
}
+ /* Register with genirq */
+ for (cur_irq = wm8350->irq_base;
+ cur_irq < ARRAY_SIZE(wm8350_irqs) + wm8350->irq_base;
+ cur_irq++) {
+ set_irq_chip_data(cur_irq, wm8350);
+ set_irq_chip_and_handler(cur_irq, &wm8350_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ set_irq_noprobe(cur_irq);
+#endif
+ }
+
ret = request_threaded_irq(irq, NULL, wm8350_irq, flags,
"wm8350", wm8350);
if (ret != 0)
dev_err(wm8350->dev, "Failed to request IRQ: %d\n", ret);
+ /* Allow interrupts to fire */
+ wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0);
+
return ret;
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index e3551d20464f..9eaa6477cec2 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -90,6 +90,35 @@ config IBM_ASM
information on the specific driver level and support statement
for your IBM server.
+config HWLAT_DETECTOR
+ tristate "Testing module to detect hardware-induced latencies"
+ depends on DEBUG_FS
+ depends on RING_BUFFER
+ default m
+ ---help---
+ A simple hardware latency detector. Use this module to detect
+ large latencies introduced by the behavior of the underlying
+ system firmware external to Linux. We do this using periodic
+ use of stop_machine to grab all available CPUs and measure
+ for unexplainable gaps in the CPU timestamp counter(s). By
+ default, the module is not enabled until the "enable" file
+ within the "hwlat_detector" debugfs directory is toggled.
+
+ This module is often used to detect SMI (System Management
+ Interrupts) on x86 systems, though is not x86 specific. To
+ this end, we default to using a sample window of 1 second,
+ during which we will sample for 0.5 seconds. If an SMI or
+ similar event occurs during that time, it is recorded
+ into an 8K samples global ring buffer until retreived.
+
+ WARNING: This software should never be enabled (it can be built
+ but should not be turned on after it is loaded) in a production
+ environment where high latencies are a concern since the
+ sampling mechanism actually introduces latencies for
+ regular tasks while the CPU(s) are being held.
+
+ If unsure, say N
+
config PHANTOM
tristate "Sensable PHANToM (PCI)"
depends on PCI
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 049ff2482f30..374408da8d09 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -26,5 +26,6 @@ obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
+obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o
obj-y += eeprom/
obj-y += cb710/
diff --git a/drivers/misc/hwlat_detector.c b/drivers/misc/hwlat_detector.c
new file mode 100644
index 000000000000..e02d8e17b484
--- /dev/null
+++ b/drivers/misc/hwlat_detector.c
@@ -0,0 +1,1208 @@
+/*
+ * hwlat_detector.c - A simple Hardware Latency detector.
+ *
+ * Use this module to detect large system latencies induced by the behavior of
+ * certain underlying system hardware or firmware, independent of Linux itself.
+ * The code was developed originally to detect the presence of SMIs on Intel
+ * and AMD systems, although there is no dependency upon x86 herein.
+ *
+ * The classical example usage of this module is in detecting the presence of
+ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
+ * somewhat special form of hardware interrupt spawned from earlier CPU debug
+ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
+ * LPC (or other device) to generate a special interrupt under certain
+ * circumstances, for example, upon expiration of a special SMI timer device,
+ * due to certain external thermal readings, on certain I/O address accesses,
+ * and other situations. An SMI hits a special CPU pin, triggers a special
+ * SMI mode (complete with special memory map), and the OS is unaware.
+ *
+ * Although certain hardware-inducing latencies are necessary (for example,
+ * a modern system often requires an SMI handler for correct thermal control
+ * and remote management) they can wreak havoc upon any OS-level performance
+ * guarantees toward low-latency, especially when the OS is not even made
+ * aware of the presence of these interrupts. For this reason, we need a
+ * somewhat brute force mechanism to detect these interrupts. In this case,
+ * we do it by hogging all of the CPU(s) for configurable timer intervals,
+ * sampling the built-in CPU timer, looking for discontiguous readings.
+ *
+ * WARNING: This implementation necessarily introduces latencies. Therefore,
+ * you should NEVER use this module in a production environment
+ * requiring any kind of low-latency performance guarantee(s).
+ *
+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
+ *
+ * Includes useful feedback from Clark Williams <clark@redhat.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ring_buffer.h>
+#include <linux/stop_machine.h>
+#include <linux/time.h>
+#include <linux/hrtimer.h>
+#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+
+#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */
+#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */
+#define U64STR_SIZE 22 /* 20 digits max */
+
+#define VERSION "1.0.0"
+#define BANNER "hwlat_detector: "
+#define DRVNAME "hwlat_detector"
+#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */
+#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
+#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
+
+/* Module metadata */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jon Masters <jcm@redhat.com>");
+MODULE_DESCRIPTION("A simple hardware latency detector");
+MODULE_VERSION(VERSION);
+
+/* Module parameters */
+
+static int debug;
+static int enabled;
+static int threshold;
+
+module_param(debug, int, 0); /* enable debug */
+module_param(enabled, int, 0); /* enable detector */
+module_param(threshold, int, 0); /* latency threshold */
+
+/* Buffering and sampling */
+
+static struct ring_buffer *ring_buffer; /* sample buffer */
+static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */
+static unsigned long buf_size = BUF_SIZE_DEFAULT;
+static struct task_struct *kthread; /* sampling thread */
+
+/* DebugFS filesystem entries */
+
+static struct dentry *debug_dir; /* debugfs directory */
+static struct dentry *debug_max; /* maximum TSC delta */
+static struct dentry *debug_count; /* total detect count */
+static struct dentry *debug_sample_width; /* sample width us */
+static struct dentry *debug_sample_window; /* sample window us */
+static struct dentry *debug_sample; /* raw samples us */
+static struct dentry *debug_threshold; /* threshold us */
+static struct dentry *debug_enable; /* enable/disable */
+
+/* Individual samples and global state */
+
+struct sample; /* latency sample */
+struct data; /* Global state */
+
+/* Sampling functions */
+static int __buffer_add_sample(struct sample *sample);
+static struct sample *buffer_get_sample(struct sample *sample);
+static int get_sample(void *unused);
+
+/* Threading and state */
+static int kthread_fn(void *unused);
+static int start_kthread(void);
+static int stop_kthread(void);
+static void __reset_stats(void);
+static int init_stats(void);
+
+/* Debugfs interface */
+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos, const u64 *entry);
+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos, u64 *entry);
+static int debug_sample_fopen(struct inode *inode, struct file *filp);
+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos);
+static int debug_sample_release(struct inode *inode, struct file *filp);
+static int debug_enable_fopen(struct inode *inode, struct file *filp);
+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos);
+static ssize_t debug_enable_fwrite(struct file *file,
+ const char __user *user_buffer,
+ size_t user_size, loff_t *offset);
+
+/* Initialization functions */
+static int init_debugfs(void);
+static void free_debugfs(void);
+static int detector_init(void);
+static void detector_exit(void);
+
+/* Individual latency samples are stored here when detected and packed into
+ * the ring_buffer circular buffer, where they are overwritten when
+ * more than buf_size/sizeof(sample) samples are received. */
+struct sample {
+ u64 seqnum; /* unique sequence */
+ u64 duration; /* ktime delta */
+ struct timespec timestamp; /* wall time */
+};
+
+/* keep the global state somewhere. Mostly used under stop_machine. */
+static struct data {
+
+ struct mutex lock; /* protect changes */
+
+ u64 count; /* total since reset */
+ u64 max_sample; /* max hardware latency */
+ u64 threshold; /* sample threshold level */
+
+ u64 sample_window; /* total sampling window (on+off) */
+ u64 sample_width; /* active sampling portion of window */
+
+ atomic_t sample_open; /* whether the sample file is open */
+
+ wait_queue_head_t wq; /* waitqeue for new sample values */
+
+} data;
+
+/**
+ * __buffer_add_sample - add a new latency sample recording to the ring buffer
+ * @sample: The new latency sample value
+ *
+ * This receives a new latency sample and records it in a global ring buffer.
+ * No additional locking is used in this case - suited for stop_machine use.
+ */
+static int __buffer_add_sample(struct sample *sample)
+{
+ return ring_buffer_write(ring_buffer,
+ sizeof(struct sample), sample);
+}
+
+/**
+ * buffer_get_sample - remove a hardware latency sample from the ring buffer
+ * @sample: Pre-allocated storage for the sample
+ *
+ * This retrieves a hardware latency sample from the global circular buffer
+ */
+static struct sample *buffer_get_sample(struct sample *sample)
+{
+ struct ring_buffer_event *e = NULL;
+ struct sample *s = NULL;
+ unsigned int cpu = 0;
+
+ if (!sample)
+ return NULL;
+
+ /* ring_buffers are per-cpu but we just want any value */
+ /* so we'll start with this cpu and try others if not */
+ /* Steven is planning to add a generic mechanism */
+ mutex_lock(&ring_buffer_mutex);
+ e = ring_buffer_consume(ring_buffer, smp_processor_id(), NULL);
+ if (!e) {
+ for_each_online_cpu(cpu) {
+ e = ring_buffer_consume(ring_buffer, cpu, NULL);
+ if (e)
+ break;
+ }
+ }
+
+ if (e) {
+ s = ring_buffer_event_data(e);
+ memcpy(sample, s, sizeof(struct sample));
+ } else
+ sample = NULL;
+ mutex_unlock(&ring_buffer_mutex);
+
+ return sample;
+}
+
+/**
+ * get_sample - sample the CPU TSC and look for likely hardware latencies
+ * @unused: This is not used but is a part of the stop_machine API
+ *
+ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
+ * hardware-induced latency. Called under stop_machine, with data.lock held.
+ */
+static int get_sample(void *unused)
+{
+ ktime_t start, t1, t2;
+ s64 diff, total = 0;
+ u64 sample = 0;
+ int ret = 1;
+
+ start = ktime_get(); /* start timestamp */
+
+ do {
+
+ t1 = ktime_get(); /* we'll look for a discontinuity */
+ t2 = ktime_get();
+
+ total = ktime_to_us(ktime_sub(t2, start)); /* sample width */
+ diff = ktime_to_us(ktime_sub(t2, t1)); /* current diff */
+
+ /* This shouldn't happen */
+ if (diff < 0) {
+ printk(KERN_ERR BANNER "time running backwards\n");
+ goto out;
+ }
+
+ if (diff > sample)
+ sample = diff; /* only want highest value */
+
+ } while (total <= data.sample_width);
+
+ /* If we exceed the threshold value, we have found a hardware latency */
+ if (sample > data.threshold) {
+ struct sample s;
+
+ data.count++;
+ s.seqnum = data.count;
+ s.duration = sample;
+ s.timestamp = CURRENT_TIME;
+ __buffer_add_sample(&s);
+
+ /* Keep a running maximum ever recorded hardware latency */
+ if (sample > data.max_sample)
+ data.max_sample = sample;
+
+ wake_up(&data.wq); /* wake up reader(s) */
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+/*
+ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
+ * @unused: A required part of the kthread API.
+ *
+ * Used to periodically sample the CPU TSC via a call to get_sample. We
+ * use stop_machine, whith does (intentionally) introduce latency since we
+ * need to ensure nothing else might be running (and thus pre-empting).
+ * Obviously this should never be used in production environments.
+ *
+ * stop_machine will schedule us typically only on CPU0 which is fine for
+ * almost every real-world hardware latency situation - but we might later
+ * generalize this if we find there are any actualy systems with alternate
+ * SMI delivery or other non CPU0 hardware latencies.
+ */
+static int kthread_fn(void *unused)
+{
+ int err = 0;
+ u64 interval = 0;
+
+ while (!kthread_should_stop()) {
+
+ mutex_lock(&data.lock);
+
+ err = stop_machine(get_sample, unused, 0);
+ if (err) {
+ /* Houston, we have a problem */
+ mutex_unlock(&data.lock);
+ goto err_out;
+ }
+
+ interval = data.sample_window - data.sample_width;
+ do_div(interval, USEC_PER_MSEC); /* modifies interval value */
+
+ mutex_unlock(&data.lock);
+
+ if (msleep_interruptible(interval))
+ goto out;
+ }
+ goto out;
+err_out:
+ printk(KERN_ERR BANNER "could not call stop_machine, disabling\n");
+ enabled = 0;
+out:
+ return err;
+
+}
+
+/**
+ * start_kthread - Kick off the hardware latency sampling/detector kthread
+ *
+ * This starts a kernel thread that will sit and sample the CPU timestamp
+ * counter (TSC or similar) and look for potential hardware latencies.
+ */
+static int start_kthread(void)
+{
+ kthread = kthread_run(kthread_fn, NULL,
+ DRVNAME);
+ if (IS_ERR(kthread)) {
+ printk(KERN_ERR BANNER "could not start sampling thread\n");
+ enabled = 0;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * stop_kthread - Inform the hardware latency samping/detector kthread to stop
+ *
+ * This kicks the running hardware latency sampling/detector kernel thread and
+ * tells it to stop sampling now. Use this on unload and at system shutdown.
+ */
+static int stop_kthread(void)
+{
+ int ret;
+
+ ret = kthread_stop(kthread);
+
+ return ret;
+}
+
+/**
+ * __reset_stats - Reset statistics for the hardware latency detector
+ *
+ * We use data to store various statistics and global state. We call this
+ * function in order to reset those when "enable" is toggled on or off, and
+ * also at initialization. Should be called with data.lock held.
+ */
+static void __reset_stats(void)
+{
+ data.count = 0;
+ data.max_sample = 0;
+ ring_buffer_reset(ring_buffer); /* flush out old sample entries */
+}
+
+/**
+ * init_stats - Setup global state statistics for the hardware latency detector
+ *
+ * We use data to store various statistics and global state. We also use
+ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
+ * induced system latencies. This function initializes these structures and
+ * allocates the global ring buffer also.
+ */
+static int init_stats(void)
+{
+ int ret = -ENOMEM;
+
+ mutex_init(&data.lock);
+ init_waitqueue_head(&data.wq);
+ atomic_set(&data.sample_open, 0);
+
+ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);
+
+ if (WARN(!ring_buffer, KERN_ERR BANNER
+ "failed to allocate ring buffer!\n"))
+ goto out;
+
+ __reset_stats();
+ data.threshold = DEFAULT_LAT_THRESHOLD; /* threshold us */
+ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
+ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */
+
+ ret = 0;
+
+out:
+ return ret;
+
+}
+
+/*
+ * simple_data_read - Wrapper read function for global state debugfs entries
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ * @entry: The entry to read from
+ *
+ * This function provides a generic read implementation for the global state
+ * "data" structure debugfs filesystem entries. It would be nice to use
+ * simple_attr_read directly, but we need to make sure that the data.lock
+ * spinlock is held during the actual read (even though we likely won't ever
+ * actually race here as the updater runs under a stop_machine context).
+ */
+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos, const u64 *entry)
+{
+ char buf[U64STR_SIZE];
+ u64 val = 0;
+ int len = 0;
+
+ memset(buf, 0, sizeof(buf));
+
+ if (!entry)
+ return -EFAULT;
+
+ mutex_lock(&data.lock);
+ val = *entry;
+ mutex_unlock(&data.lock);
+
+ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+
+}
+
+/*
+ * simple_data_write - Wrapper write function for global state debugfs entries
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to write value from
+ * @cnt: The maximum number of bytes to write
+ * @ppos: The current "file" position
+ * @entry: The entry to write to
+ *
+ * This function provides a generic write implementation for the global state
+ * "data" structure debugfs filesystem entries. It would be nice to use
+ * simple_attr_write directly, but we need to make sure that the data.lock
+ * spinlock is held during the actual write (even though we likely won't ever
+ * actually race here as the updater runs under a stop_machine context).
+ */
+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos, u64 *entry)
+{
+ char buf[U64STR_SIZE];
+ int csize = min(cnt, sizeof(buf));
+ u64 val = 0;
+ int err = 0;
+
+ memset(buf, '\0', sizeof(buf));
+ if (copy_from_user(buf, ubuf, csize))
+ return -EFAULT;
+
+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
+ err = strict_strtoull(buf, 10, &val);
+ if (err)
+ return -EINVAL;
+
+ mutex_lock(&data.lock);
+ *entry = val;
+ mutex_unlock(&data.lock);
+
+ return csize;
+}
+
+/**
+ * debug_count_fopen - Open function for "count" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "count" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_count_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_count_fread - Read function for "count" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "count" debugfs
+ * interface to the hardware latency detector. Can be used to read the
+ * number of latency readings exceeding the configured threshold since
+ * the detector was last reset (e.g. by writing a zero into "count").
+ */
+static ssize_t debug_count_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.count);
+}
+
+/**
+ * debug_count_fwrite - Write function for "count" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "count" debugfs
+ * interface to the hardware latency detector. Can be used to write a
+ * desired value, especially to zero the total count.
+ */
+static ssize_t debug_count_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ return simple_data_write(filp, ubuf, cnt, ppos, &data.count);
+}
+
+/**
+ * debug_enable_fopen - Dummy open function for "enable" debugfs interface
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "enable" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_enable_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_enable_fread - Read function for "enable" debugfs interface
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "enable" debugfs
+ * interface to the hardware latency detector. Can be used to determine
+ * whether the detector is currently enabled ("0\n" or "1\n" returned).
+ */
+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[4];
+
+ if ((cnt < sizeof(buf)) || (*ppos))
+ return 0;
+
+ buf[0] = enabled ? '1' : '0';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ if (copy_to_user(ubuf, buf, strlen(buf)))
+ return -EFAULT;
+ return *ppos = strlen(buf);
+}
+
+/**
+ * debug_enable_fwrite - Write function for "enable" debugfs interface
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "enable" debugfs
+ * interface to the hardware latency detector. Can be used to enable or
+ * disable the detector, which will have the side-effect of possibly
+ * also resetting the global stats and kicking off the measuring
+ * kthread (on an enable) or the converse (upon a disable).
+ */
+static ssize_t debug_enable_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ char buf[4];
+ int csize = min(cnt, sizeof(buf));
+ long val = 0;
+ int err = 0;
+
+ memset(buf, '\0', sizeof(buf));
+ if (copy_from_user(buf, ubuf, csize))
+ return -EFAULT;
+
+ buf[sizeof(buf)-1] = '\0'; /* just in case */
+ err = strict_strtoul(buf, 10, &val);
+ if (0 != err)
+ return -EINVAL;
+
+ if (val) {
+ if (enabled)
+ goto unlock;
+ enabled = 1;
+ __reset_stats();
+ if (start_kthread())
+ return -EFAULT;
+ } else {
+ if (!enabled)
+ goto unlock;
+ enabled = 0;
+ stop_kthread();
+ wake_up(&data.wq); /* reader(s) should return */
+ }
+unlock:
+ return csize;
+}
+
+/**
+ * debug_max_fopen - Open function for "max" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "max" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_max_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_max_fread - Read function for "max" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "max" debugfs
+ * interface to the hardware latency detector. Can be used to determine
+ * the maximum latency value observed since it was last reset.
+ */
+static ssize_t debug_max_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample);
+}
+
+/**
+ * debug_max_fwrite - Write function for "max" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "max" debugfs
+ * interface to the hardware latency detector. Can be used to reset the
+ * maximum or set it to some other desired value - if, then, subsequent
+ * measurements exceed this value, the maximum will be updated.
+ */
+static ssize_t debug_max_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample);
+}
+
+
+/**
+ * debug_sample_fopen - An open function for "sample" debugfs interface
+ * @inode: The in-kernel inode representation of this debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function handles opening the "sample" file within the hardware
+ * latency detector debugfs directory interface. This file is used to read
+ * raw samples from the global ring_buffer and allows the user to see a
+ * running latency history. Can be opened blocking or non-blocking,
+ * affecting whether it behaves as a buffer read pipe, or does not.
+ * Implements simple locking to prevent multiple simultaneous use.
+ */
+static int debug_sample_fopen(struct inode *inode, struct file *filp)
+{
+ if (!atomic_add_unless(&data.sample_open, 1, 1))
+ return -EBUSY;
+ else
+ return 0;
+}
+
+/**
+ * debug_sample_fread - A read function for "sample" debugfs interface
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that will contain the samples read
+ * @cnt: The maximum bytes to read from the debugfs "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function handles reading from the "sample" file within the hardware
+ * latency detector debugfs directory interface. This file is used to read
+ * raw samples from the global ring_buffer and allows the user to see a
+ * running latency history. By default this will block pending a new
+ * value written into the sample buffer, unless there are already a
+ * number of value(s) waiting in the buffer, or the sample file was
+ * previously opened in a non-blocking mode of operation.
+ */
+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ int len = 0;
+ char buf[64];
+ struct sample *sample = NULL;
+
+ if (!enabled)
+ return 0;
+
+ sample = kzalloc(sizeof(struct sample), GFP_KERNEL);
+ if (!sample)
+ return -ENOMEM;
+
+ while (!buffer_get_sample(sample)) {
+
+ DEFINE_WAIT(wait);
+
+ if (filp->f_flags & O_NONBLOCK) {
+ len = -EAGAIN;
+ goto out;
+ }
+
+ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&data.wq, &wait);
+
+ if (signal_pending(current)) {
+ len = -EINTR;
+ goto out;
+ }
+
+ if (!enabled) { /* enable was toggled */
+ len = 0;
+ goto out;
+ }
+ }
+
+ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\n",
+ sample->timestamp.tv_sec,
+ sample->timestamp.tv_nsec,
+ sample->duration);
+
+
+ /* handling partial reads is more trouble than it's worth */
+ if (len > cnt)
+ goto out;
+
+ if (copy_to_user(ubuf, buf, len))
+ len = -EFAULT;
+
+out:
+ kfree(sample);
+ return len;
+}
+
+/**
+ * debug_sample_release - Release function for "sample" debugfs interface
+ * @inode: The in-kernel inode represenation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function completes the close of the debugfs interface "sample" file.
+ * Frees the sample_open "lock" so that other users may open the interface.
+ */
+static int debug_sample_release(struct inode *inode, struct file *filp)
+{
+ atomic_dec(&data.sample_open);
+
+ return 0;
+}
+
+/**
+ * debug_threshold_fopen - Open function for "threshold" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "threshold" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_threshold_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_threshold_fread - Read function for "threshold" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "threshold" debugfs
+ * interface to the hardware latency detector. It can be used to determine
+ * the current threshold level at which a latency will be recorded in the
+ * global ring buffer, typically on the order of 10us.
+ */
+static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold);
+}
+
+/**
+ * debug_threshold_fwrite - Write function for "threshold" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "threshold" debugfs
+ * interface to the hardware latency detector. It can be used to configure
+ * the threshold level at which any subsequently detected latencies will
+ * be recorded into the global ring buffer.
+ */
+static ssize_t debug_threshold_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ int ret;
+
+ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold);
+
+ if (enabled)
+ wake_up_process(kthread);
+
+ return ret;
+}
+
+/**
+ * debug_width_fopen - Open function for "width" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "width" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_width_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_width_fread - Read function for "width" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "width" debugfs
+ * interface to the hardware latency detector. It can be used to determine
+ * for how many us of the total window us we will actively sample for any
+ * hardware-induced latecy periods. Obviously, it is not possible to
+ * sample constantly and have the system respond to a sample reader, or,
+ * worse, without having the system appear to have gone out to lunch.
+ */
+static ssize_t debug_width_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width);
+}
+
+/**
+ * debug_width_fwrite - Write function for "width" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "width" debugfs
+ * interface to the hardware latency detector. It can be used to configure
+ * for how many us of the total window us we will actively sample for any
+ * hardware-induced latency periods. Obviously, it is not possible to
+ * sample constantly and have the system respond to a sample reader, or,
+ * worse, without having the system appear to have gone out to lunch. It
+ * is enforced that width is less that the total window size.
+ */
+static ssize_t debug_width_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ char buf[U64STR_SIZE];
+ int csize = min(cnt, sizeof(buf));
+ u64 val = 0;
+ int err = 0;
+
+ memset(buf, '\0', sizeof(buf));
+ if (copy_from_user(buf, ubuf, csize))
+ return -EFAULT;
+
+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
+ err = strict_strtoull(buf, 10, &val);
+ if (0 != err)
+ return -EINVAL;
+
+ mutex_lock(&data.lock);
+ if (val < data.sample_window)
+ data.sample_width = val;
+ else {
+ mutex_unlock(&data.lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&data.lock);
+
+ if (enabled)
+ wake_up_process(kthread);
+
+ return csize;
+}
+
+/**
+ * debug_window_fopen - Open function for "window" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "window" debugfs
+ * interface to the hardware latency detector. The window is the total time
+ * in us that will be considered one sample period. Conceptually, windows
+ * occur back-to-back and contain a sample width period during which
+ * actual sampling occurs.
+ */
+static int debug_window_fopen(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+/**
+ * debug_window_fread - Read function for "window" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "window" debugfs
+ * interface to the hardware latency detector. The window is the total time
+ * in us that will be considered one sample period. Conceptually, windows
+ * occur back-to-back and contain a sample width period during which
+ * actual sampling occurs. Can be used to read the total window size.
+ */
+static ssize_t debug_window_fread(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window);
+}
+
+/**
+ * debug_window_fwrite - Write function for "window" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "window" debufds
+ * interface to the hardware latency detetector. The window is the total time
+ * in us that will be considered one sample period. Conceptually, windows
+ * occur back-to-back and contain a sample width period during which
+ * actual sampling occurs. Can be used to write a new total window size. It
+ * is enfoced that any value written must be greater than the sample width
+ * size, or an error results.
+ */
+static ssize_t debug_window_fwrite(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ char buf[U64STR_SIZE];
+ int csize = min(cnt, sizeof(buf));
+ u64 val = 0;
+ int err = 0;
+
+ memset(buf, '\0', sizeof(buf));
+ if (copy_from_user(buf, ubuf, csize))
+ return -EFAULT;
+
+ buf[U64STR_SIZE-1] = '\0'; /* just in case */
+ err = strict_strtoull(buf, 10, &val);
+ if (0 != err)
+ return -EINVAL;
+
+ mutex_lock(&data.lock);
+ if (data.sample_width < val)
+ data.sample_window = val;
+ else {
+ mutex_unlock(&data.lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&data.lock);
+
+ return csize;
+}
+
+/*
+ * Function pointers for the "count" debugfs file operations
+ */
+static const struct file_operations count_fops = {
+ .open = debug_count_fopen,
+ .read = debug_count_fread,
+ .write = debug_count_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "enable" debugfs file operations
+ */
+static const struct file_operations enable_fops = {
+ .open = debug_enable_fopen,
+ .read = debug_enable_fread,
+ .write = debug_enable_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "max" debugfs file operations
+ */
+static const struct file_operations max_fops = {
+ .open = debug_max_fopen,
+ .read = debug_max_fread,
+ .write = debug_max_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "sample" debugfs file operations
+ */
+static const struct file_operations sample_fops = {
+ .open = debug_sample_fopen,
+ .read = debug_sample_fread,
+ .release = debug_sample_release,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "threshold" debugfs file operations
+ */
+static const struct file_operations threshold_fops = {
+ .open = debug_threshold_fopen,
+ .read = debug_threshold_fread,
+ .write = debug_threshold_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "width" debugfs file operations
+ */
+static const struct file_operations width_fops = {
+ .open = debug_width_fopen,
+ .read = debug_width_fread,
+ .write = debug_width_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "window" debugfs file operations
+ */
+static const struct file_operations window_fops = {
+ .open = debug_window_fopen,
+ .read = debug_window_fread,
+ .write = debug_window_fwrite,
+ .owner = THIS_MODULE,
+};
+
+/**
+ * init_debugfs - A function to initialize the debugfs interface files
+ *
+ * This function creates entries in debugfs for "hwlat_detector", including
+ * files to read values from the detector, current samples, and the
+ * maximum sample that has been captured since the hardware latency
+ * dectector was started.
+ */
+static int init_debugfs(void)
+{
+ int ret = -ENOMEM;
+
+ debug_dir = debugfs_create_dir(DRVNAME, NULL);
+ if (!debug_dir)
+ goto err_debug_dir;
+
+ debug_sample = debugfs_create_file("sample", 0444,
+ debug_dir, NULL,
+ &sample_fops);
+ if (!debug_sample)
+ goto err_sample;
+
+ debug_count = debugfs_create_file("count", 0444,
+ debug_dir, NULL,
+ &count_fops);
+ if (!debug_count)
+ goto err_count;
+
+ debug_max = debugfs_create_file("max", 0444,
+ debug_dir, NULL,
+ &max_fops);
+ if (!debug_max)
+ goto err_max;
+
+ debug_sample_window = debugfs_create_file("window", 0644,
+ debug_dir, NULL,
+ &window_fops);
+ if (!debug_sample_window)
+ goto err_window;
+
+ debug_sample_width = debugfs_create_file("width", 0644,
+ debug_dir, NULL,
+ &width_fops);
+ if (!debug_sample_width)
+ goto err_width;
+
+ debug_threshold = debugfs_create_file("threshold", 0644,
+ debug_dir, NULL,
+ &threshold_fops);
+ if (!debug_threshold)
+ goto err_threshold;
+
+ debug_enable = debugfs_create_file("enable", 0644,
+ debug_dir, &enabled,
+ &enable_fops);
+ if (!debug_enable)
+ goto err_enable;
+
+ else {
+ ret = 0;
+ goto out;
+ }
+
+err_enable:
+ debugfs_remove(debug_threshold);
+err_threshold:
+ debugfs_remove(debug_sample_width);
+err_width:
+ debugfs_remove(debug_sample_window);
+err_window:
+ debugfs_remove(debug_max);
+err_max:
+ debugfs_remove(debug_count);
+err_count:
+ debugfs_remove(debug_sample);
+err_sample:
+ debugfs_remove(debug_dir);
+err_debug_dir:
+out:
+ return ret;
+}
+
+/**
+ * free_debugfs - A function to cleanup the debugfs file interface
+ */
+static void free_debugfs(void)
+{
+ /* could also use a debugfs_remove_recursive */
+ debugfs_remove(debug_enable);
+ debugfs_remove(debug_threshold);
+ debugfs_remove(debug_sample_width);
+ debugfs_remove(debug_sample_window);
+ debugfs_remove(debug_max);
+ debugfs_remove(debug_count);
+ debugfs_remove(debug_sample);
+ debugfs_remove(debug_dir);
+}
+
+/**
+ * detector_init - Standard module initialization code
+ */
+static int detector_init(void)
+{
+ int ret = -ENOMEM;
+
+ printk(KERN_INFO BANNER "version %s\n", VERSION);
+
+ ret = init_stats();
+ if (0 != ret)
+ goto out;
+
+ ret = init_debugfs();
+ if (0 != ret)
+ goto err_stats;
+
+ if (enabled)
+ ret = start_kthread();
+
+ goto out;
+
+err_stats:
+ ring_buffer_free(ring_buffer);
+out:
+ return ret;
+
+}
+
+/**
+ * detector_exit - Standard module cleanup code
+ */
+static void detector_exit(void)
+{
+ if (enabled) {
+ enabled = 0;
+ stop_kthread();
+ }
+
+ free_debugfs();
+ ring_buffer_free(ring_buffer); /* free up the ring buffer */
+
+}
+
+module_init(detector_init);
+module_exit(detector_exit);
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
index 50d431e469f5..9dbaeb574e63 100644
--- a/drivers/misc/iwmc3200top/fw-download.c
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -43,15 +43,14 @@ static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
struct iwmct_parser *parser = &priv->parser;
struct iwmct_fw_hdr *fw_hdr = &parser->versions;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
parser->file = file;
parser->file_size = file_size;
parser->cur_pos = 0;
- parser->buf = NULL;
-
+ parser->entry_point = 0;
parser->buf = kzalloc(block_size, GFP_KERNEL);
if (!parser->buf) {
LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
@@ -70,7 +69,7 @@ static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
parser->cur_pos += sizeof(struct iwmct_fw_hdr);
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return 0;
}
@@ -113,7 +112,7 @@ static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
struct iwmct_dbg *dbg = &priv->dbg;
struct iwmct_fw_sec_hdr *sec_hdr;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
<= parser->file_size) {
@@ -152,7 +151,7 @@ static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
"finished with section cur_pos=%zd\n", parser->cur_pos);
}
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, INIT, "<--\n");
return 0;
}
@@ -167,7 +166,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
int ret = 0;
u32 cmd = 0;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
addr, sec_size);
@@ -229,7 +228,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
hdr->cmd = cpu_to_le32(cmd);
/* send it down */
/* TODO: add more proper sending and error checking */
- ret = iwmct_tx(priv, 0, parser->buf, trans_size);
+ ret = iwmct_tx(priv, parser->buf, trans_size);
if (ret != 0) {
LOG_INFO(priv, FW_DOWNLOAD,
"iwmct_tx returned %d\n", ret);
@@ -251,7 +250,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
if (sent < sec_size)
ret = -EINVAL;
exit:
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return ret;
}
@@ -262,7 +261,7 @@ static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
int ret;
u32 cmd;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
memset(parser->buf, 0, parser->buf_size);
cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
@@ -281,11 +280,11 @@ static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
/* send it down */
/* TODO: add more proper sending and error checking */
- ret = iwmct_tx(priv, 0, parser->buf, IWMC_SDIO_BLK_SIZE);
+ ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE);
if (ret)
LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return 0;
}
@@ -298,8 +297,16 @@ int iwmct_fw_load(struct iwmct_priv *priv)
__le32 addr;
int ret;
- /* clear parser struct */
- memset(&priv->parser, 0, sizeof(struct iwmct_parser));
+
+ LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n",
+ priv->barker);
+ LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
+ LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
+ LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
+
/* get the firmware */
ret = request_firmware(&raw, fw_name, &priv->func->dev);
@@ -317,6 +324,7 @@ int iwmct_fw_load(struct iwmct_priv *priv)
LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
+ /* clear parser struct */
ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
if (ret < 0) {
LOG_ERROR(priv, FW_DOWNLOAD,
@@ -324,7 +332,6 @@ int iwmct_fw_load(struct iwmct_priv *priv)
goto exit;
}
- /* checksum */
if (!iwmct_checksum(priv)) {
LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
ret = -EINVAL;
@@ -333,23 +340,18 @@ int iwmct_fw_load(struct iwmct_priv *priv)
/* download firmware to device */
while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
- if (iwmct_download_section(priv, pdata, len, addr)) {
+ ret = iwmct_download_section(priv, pdata, len, addr);
+ if (ret) {
LOG_ERROR(priv, FW_DOWNLOAD,
"%s download section failed\n", fw_name);
- ret = -EIO;
goto exit;
}
}
- iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
+ ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
exit:
kfree(priv->parser.buf);
-
- if (raw)
- release_firmware(raw);
-
- raw = NULL;
-
+ release_firmware(raw);
return ret;
}
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
index 43bd510e1872..740ff0738ea8 100644
--- a/drivers/misc/iwmc3200top/iwmc3200top.h
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -196,9 +196,7 @@ struct iwmct_priv {
struct list_head read_req_list;
};
-extern int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
- void *src, int count);
-
+extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count);
extern int iwmct_fw_load(struct iwmct_priv *priv);
extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
index aba8121f978c..4434bb16cea7 100644
--- a/drivers/misc/iwmc3200top/log.h
+++ b/drivers/misc/iwmc3200top/log.h
@@ -37,13 +37,26 @@
#define LOG_SEV_INFO 3
#define LOG_SEV_INFOEX 4
-#define LOG_SEV_FILTER_ALL \
- (BIT(LOG_SEV_CRITICAL) | \
- BIT(LOG_SEV_ERROR) | \
- BIT(LOG_SEV_WARNING) | \
- BIT(LOG_SEV_INFO) | \
+/* Log levels not defined for FW */
+#define LOG_SEV_TRACE 5
+#define LOG_SEV_DUMP 6
+
+#define LOG_SEV_FW_FILTER_ALL \
+ (BIT(LOG_SEV_CRITICAL) | \
+ BIT(LOG_SEV_ERROR) | \
+ BIT(LOG_SEV_WARNING) | \
+ BIT(LOG_SEV_INFO) | \
BIT(LOG_SEV_INFOEX))
+#define LOG_SEV_FILTER_ALL \
+ (BIT(LOG_SEV_CRITICAL) | \
+ BIT(LOG_SEV_ERROR) | \
+ BIT(LOG_SEV_WARNING) | \
+ BIT(LOG_SEV_INFO) | \
+ BIT(LOG_SEV_INFOEX) | \
+ BIT(LOG_SEV_TRACE) | \
+ BIT(LOG_SEV_DUMP))
+
/* log source */
#define LOG_SRC_INIT 0
#define LOG_SRC_DEBUGFS 1
@@ -104,16 +117,16 @@ do { \
__func__, __LINE__, ##args); \
} while (0)
-#define LOG_INFOEX(priv, src, fmt, args...) \
+#define LOG_TRACE(priv, src, fmt, args...) \
do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \
dev_dbg(priv2dev(priv), "%s %d: " fmt, \
__func__, __LINE__, ##args); \
} while (0)
#define LOG_HEXDUMP(src, ptr, len) \
do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
16, 1, ptr, len, false); \
} while (0)
@@ -142,7 +155,7 @@ ssize_t store_iwmct_log_level_fw(struct device *d,
#define LOG_ERROR(priv, src, fmt, args...)
#define LOG_WARNING(priv, src, fmt, args...)
#define LOG_INFO(priv, src, fmt, args...)
-#define LOG_INFOEX(priv, src, fmt, args...)
+#define LOG_TRACE(priv, src, fmt, args...)
#define LOG_HEXDUMP(src, ptr, len)
static inline void iwmct_log_top_message(struct iwmct_priv *priv,
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index fafcaa481d74..dd0a3913bf6d 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -49,6 +49,20 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_COPYRIGHT);
MODULE_FIRMWARE(FW_NAME(FW_API_VER));
+
+static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count)
+{
+ return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count);
+
+}
+int iwmct_tx(struct iwmct_priv *priv, void *src, int count)
+{
+ int ret;
+ sdio_claim_host(priv->func);
+ ret = __iwmct_tx(priv, src, count);
+ sdio_release_host(priv->func);
+ return ret;
+}
/*
* This workers main task is to wait for OP_OPR_ALIVE
* from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
@@ -66,7 +80,7 @@ static void iwmct_rescan_worker(struct work_struct *ws)
ret = bus_rescan_devices(priv->func->dev.bus);
if (ret < 0)
- LOG_INFO(priv, FW_DOWNLOAD, "bus_rescan_devices FAILED!!!\n");
+ LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n");
}
static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
@@ -137,7 +151,7 @@ int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
int ret;
u8 *buf;
- LOG_INFOEX(priv, FW_MSG, "Sending hcmd:\n");
+ LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n");
/* add padding to 256 for IWMC */
((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
@@ -158,27 +172,12 @@ int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
}
memcpy(buf, cmd, len);
-
- sdio_claim_host(priv->func);
- ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, buf,
- FW_HCMD_BLOCK_SIZE);
- sdio_release_host(priv->func);
+ ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE);
kfree(buf);
return ret;
}
-int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
- void *src, int count)
-{
- int ret;
-
- sdio_claim_host(priv->func);
- ret = sdio_memcpy_toio(priv->func, addr, src, count);
- sdio_release_host(priv->func);
-
- return ret;
-}
static void iwmct_irq_read_worker(struct work_struct *ws)
{
@@ -192,7 +191,7 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
priv = container_of(ws, struct iwmct_priv, isr_worker);
- LOG_INFO(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
+ LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
/* --------------------- Handshake with device -------------------- */
sdio_claim_host(priv->func);
@@ -273,8 +272,7 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
if (barker & BARKER_DNLOAD_SYNC_MSK) {
/* Send the same barker back */
- ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR,
- buf, iosize);
+ ret = __iwmct_tx(priv, buf, iosize);
if (ret) {
LOG_ERROR(priv, IRQ,
"error %d echoing barker\n", ret);
@@ -292,15 +290,6 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
sdio_release_host(priv->func);
-
- LOG_INFO(priv, IRQ, "barker download request 0x%x is:\n", priv->barker);
- LOG_INFO(priv, IRQ, "******* Top FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
- LOG_INFO(priv, IRQ, "******* GPS FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
- LOG_INFO(priv, IRQ, "******* BT FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
-
if (priv->dbg.fw_download)
iwmct_fw_load(priv);
else
@@ -312,7 +301,7 @@ exit_release:
sdio_release_host(priv->func);
exit:
kfree(buf);
- LOG_INFO(priv, IRQ, "exit iwmct_irq_read_worker\n");
+ LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n");
}
static void iwmct_irq(struct sdio_func *func)
@@ -325,12 +314,12 @@ static void iwmct_irq(struct sdio_func *func)
priv = sdio_get_drvdata(func);
- LOG_INFO(priv, IRQ, "enter iwmct_irq\n");
+ LOG_TRACE(priv, IRQ, "enter iwmct_irq\n");
/* read the function's status register */
val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
- LOG_INFO(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
+ LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
if (!val) {
LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
@@ -372,7 +361,7 @@ static void iwmct_irq(struct sdio_func *func)
queue_work(priv->wq, &priv->isr_worker);
- LOG_INFO(priv, IRQ, "exit iwmct_irq\n");
+ LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
return;
@@ -660,7 +649,7 @@ static int __init iwmct_init(void)
/* Default log filter settings */
iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
- iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FILTER_ALL);
+ iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL);
iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
rc = sdio_register_driver(&iwmct_driver);
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index fcb6ec1af173..72450237a0f4 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -295,6 +295,10 @@ static int check_and_rewind_pc(char *put_str, char *arg)
/* On x86 a breakpoint stop requires it to be decremented */
if (addr + 1 == kgdbts_regs.ip)
offset = -1;
+#elif defined(CONFIG_SUPERH)
+ /* On SUPERH a breakpoint stop requires it to be decremented */
+ if (addr + 2 == kgdbts_regs.pc)
+ offset = -2;
#endif
if (strcmp(arg, "silent") &&
instruction_pointer(&kgdbts_regs) + offset != addr) {
@@ -305,6 +309,8 @@ static int check_and_rewind_pc(char *put_str, char *arg)
#ifdef CONFIG_X86
/* On x86 adjust the instruction pointer if needed */
kgdbts_regs.ip += offset;
+#elif defined(CONFIG_SUPERH)
+ kgdbts_regs.pc += offset;
#endif
return 0;
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 85f0e8cd875b..1f552c6e7579 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -85,7 +85,14 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_lock(&open_lock);
md->usage--;
if (md->usage == 0) {
+ int devmaj = MAJOR(disk_devt(md->disk));
int devidx = MINOR(disk_devt(md->disk)) >> MMC_SHIFT;
+
+ if (!devmaj)
+ devidx = md->disk->first_minor >> MMC_SHIFT;
+
+ blk_cleanup_queue(md->queue.queue);
+
__clear_bit(devidx, dev_use);
put_disk(md->disk);
@@ -613,6 +620,7 @@ static int mmc_blk_probe(struct mmc_card *card)
return 0;
out:
+ mmc_cleanup_queue(&md->queue);
mmc_blk_put(md);
return err;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 49e582356c65..c5a7a855f4b1 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -90,9 +90,10 @@ static void mmc_request(struct request_queue *q)
struct request *req;
if (!mq) {
- printk(KERN_ERR "MMC: killing requests for dead queue\n");
- while ((req = blk_fetch_request(q)) != NULL)
+ while ((req = blk_fetch_request(q)) != NULL) {
+ req->cmd_flags |= REQ_QUIET;
__blk_end_request_all(req, -EIO);
+ }
return;
}
@@ -223,17 +224,18 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
struct request_queue *q = mq->queue;
unsigned long flags;
- /* Mark that we should start throwing out stragglers */
- spin_lock_irqsave(q->queue_lock, flags);
- q->queuedata = NULL;
- spin_unlock_irqrestore(q->queue_lock, flags);
-
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
/* Then terminate our worker thread */
kthread_stop(mq->thread);
+ /* Empty the queue */
+ spin_lock_irqsave(q->queue_lock, flags);
+ q->queuedata = NULL;
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
if (mq->bounce_sg)
kfree(mq->bounce_sg);
mq->bounce_sg = NULL;
@@ -245,8 +247,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
kfree(mq->bounce_buf);
mq->bounce_buf = NULL;
- blk_cleanup_queue(mq->queue);
-
mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index f53755533e7e..a4e37758be41 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -581,7 +581,7 @@ static int uart_carrier_raised(struct tty_port *tport)
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
unsigned int ret = sdio_uart_claim_func(port);
- if (ret) /* Missing hardware shoudn't block for carrier */
+ if (ret) /* Missing hardware shouldn't block for carrier */
return 1;
ret = sdio_uart_get_mctrl(port);
sdio_uart_release_func(port);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 30acd5265821..010c96403754 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -907,12 +907,12 @@ static void mmc_power_up(struct mmc_host *host)
*/
mmc_delay(10);
- if (host->f_min > 400000) {
+ if (host->f_min > MMC_INIT_FREQ) {
pr_warning("%s: Minimum clock frequency too high for "
"identification mode\n", mmc_hostname(host));
host->ios.clock = host->f_min;
} else
- host->ios.clock = 400000;
+ host->ios.clock = MMC_INIT_FREQ;
host->ios.power_mode = MMC_POWER_ON;
mmc_set_ios(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index a811c52a1659..d20b7bc0b2da 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -15,6 +15,16 @@
#define MMC_CMD_RETRIES 3
+/* Spec says initialisation must happen at or below 400kHz.
+ * Some MMC cards fail to initialise at 400kHz (even as low as 200kHz) and
+ * Some host controllers (eg. tmio_mmc) cannot garantee to set the clock below
+ * 400kHz (it rounds to the closest clock freq available).
+ *
+ * 64 kHz seems like a good speed/reliability compromise.
+ */
+
+#define MMC_INIT_FREQ 64000
+
struct mmc_bus_ops {
int (*awake)(struct mmc_host *);
int (*sleep)(struct mmc_host *);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c11189446a1f..0eac6c814904 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -207,7 +207,7 @@ static int mmc_read_ext_csd(struct mmc_card *card)
}
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
- if (card->ext_csd.rev > 3) {
+ if (card->ext_csd.rev > 5) {
printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
"version %d\n", mmc_hostname(card->host),
card->ext_csd.rev);
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index d3f55615c099..c8649dfb2d0c 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -650,11 +650,11 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host,
flags = DDMA_FLAGS_IE;
if (host->flags & HOST_F_XMIT) {
- ret = au1xxx_dbdma_put_source_flags(channel,
- (void *)sg_virt(sg), len, flags);
+ ret = au1xxx_dbdma_put_source(channel,
+ sg_phys(sg), len, flags);
} else {
- ret = au1xxx_dbdma_put_dest_flags(channel,
- (void *)sg_virt(sg), len, flags);
+ ret = au1xxx_dbdma_put_dest(channel,
+ sg_phys(sg), len, flags);
}
if (!ret)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 7cccc8523747..053fc65f7dbb 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -43,10 +43,21 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock)
for (clock = host->mmc->f_min, clk = 0x80000080;
new_clock >= (clock<<1); clk >>= 1)
clock <<= 1;
+
+ /* Round the clock to the closest available. This is required
+ * for some fussy cards that dont like to initialise below 400kHz
+ */
+ if (new_clock - clock >= (clock << 1) - new_clock) {
+ clk >>= 1; clock <<= 1;
+ }
+
+ /* Clock enable */
clk |= 0x100;
}
- sd_config_write8(host, CNF_SD_CLK_MODE, clk >> 22);
+ if (host->set_clk_div)
+ host->set_clk_div(host->pdev, (clk>>22) & 1);
+
sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
}
@@ -424,17 +435,18 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (ios->clock)
tmio_mmc_set_clock(host, ios->clock);
- /* Power sequence - OFF -> ON -> UP */
+ /* Power sequence - OFF -> UP -> ON */
switch (ios->power_mode) {
case MMC_POWER_OFF: /* power down SD bus */
- sd_config_write8(host, CNF_PWR_CTL_2, 0x00);
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, 0);
tmio_mmc_clk_stop(host);
break;
- case MMC_POWER_ON: /* power up SD bus */
-
- sd_config_write8(host, CNF_PWR_CTL_2, 0x02);
+ case MMC_POWER_UP: /* power up SD bus */
+ if (host->set_pwr)
+ host->set_pwr(host->pdev, 1);
break;
- case MMC_POWER_UP: /* start bus clock */
+ case MMC_POWER_ON: /* enable bus clock */
tmio_mmc_clk_start(host);
break;
}
@@ -475,8 +487,8 @@ static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state)
ret = mmc_suspend_host(mmc, state);
/* Tell MFD core it can disable us now.*/
- if (!ret && cell->disable)
- cell->disable(dev);
+ if (!ret && cell->suspend)
+ cell->suspend(dev);
return ret;
}
@@ -485,21 +497,15 @@ static int tmio_mmc_resume(struct platform_device *dev)
{
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
struct mmc_host *mmc = platform_get_drvdata(dev);
- struct tmio_mmc_host *host = mmc_priv(mmc);
int ret = 0;
/* Tell the MFD core we are ready to be enabled */
- if (cell->enable) {
- ret = cell->enable(dev);
+ if (cell->resume) {
+ ret = cell->resume(dev);
if (ret)
goto out;
}
- /* Enable the MMC/SD Control registers */
- sd_config_write16(host, CNF_CMD, SDCREN);
- sd_config_write32(host, CNF_CTL_BASE,
- (dev->resource[0].start >> host->bus_shift) & 0xfffe);
-
mmc_resume_host(mmc);
out:
@@ -514,17 +520,16 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
{
struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
struct tmio_mmc_data *pdata;
- struct resource *res_ctl, *res_cnf;
+ struct resource *res_ctl;
struct tmio_mmc_host *host;
struct mmc_host *mmc;
int ret = -EINVAL;
- if (dev->num_resources != 3)
+ if (dev->num_resources != 2)
goto out;
res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0);
- res_cnf = platform_get_resource(dev, IORESOURCE_MEM, 1);
- if (!res_ctl || !res_cnf)
+ if (!res_ctl)
goto out;
pdata = cell->driver_data;
@@ -539,8 +544,12 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
host = mmc_priv(mmc);
host->mmc = mmc;
+ host->pdev = dev;
platform_set_drvdata(dev, mmc);
+ host->set_pwr = pdata->set_pwr;
+ host->set_clk_div = pdata->set_clk_div;
+
/* SD control register space size is 0x200, 0x400 for bus_shift=1 */
host->bus_shift = resource_size(res_ctl) >> 10;
@@ -548,10 +557,6 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
if (!host->ctl)
goto host_free;
- host->cnf = ioremap(res_cnf->start, resource_size(res_cnf));
- if (!host->cnf)
- goto unmap_ctl;
-
mmc->ops = &tmio_mmc_ops;
mmc->caps = MMC_CAP_4_BIT_DATA;
mmc->f_max = pdata->hclk;
@@ -562,23 +567,9 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
if (cell->enable) {
ret = cell->enable(dev);
if (ret)
- goto unmap_cnf;
+ goto unmap_ctl;
}
- /* Enable the MMC/SD Control registers */
- sd_config_write16(host, CNF_CMD, SDCREN);
- sd_config_write32(host, CNF_CTL_BASE,
- (dev->resource[0].start >> host->bus_shift) & 0xfffe);
-
- /* Disable SD power during suspend */
- sd_config_write8(host, CNF_PWR_CTL_3, 0x01);
-
- /* The below is required but why? FIXME */
- sd_config_write8(host, CNF_STOP_CLK_CTL, 0x1f);
-
- /* Power down SD bus*/
- sd_config_write8(host, CNF_PWR_CTL_2, 0x00);
-
tmio_mmc_clk_stop(host);
reset(host);
@@ -586,14 +577,14 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
if (ret >= 0)
host->irq = ret;
else
- goto unmap_cnf;
+ goto unmap_ctl;
disable_mmc_irqs(host, TMIO_MASK_ALL);
ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED |
IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host);
if (ret)
- goto unmap_cnf;
+ goto unmap_ctl;
mmc_add_host(mmc);
@@ -605,8 +596,6 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
return 0;
-unmap_cnf:
- iounmap(host->cnf);
unmap_ctl:
iounmap(host->ctl);
host_free:
@@ -626,7 +615,6 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
mmc_remove_host(mmc);
free_irq(host->irq, host);
iounmap(host->ctl);
- iounmap(host->cnf);
mmc_free_host(mmc);
}
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 9fa998594974..692dc23363b9 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -11,26 +11,6 @@
#include <linux/highmem.h>
-#define CNF_CMD 0x04
-#define CNF_CTL_BASE 0x10
-#define CNF_INT_PIN 0x3d
-#define CNF_STOP_CLK_CTL 0x40
-#define CNF_GCLK_CTL 0x41
-#define CNF_SD_CLK_MODE 0x42
-#define CNF_PIN_STATUS 0x44
-#define CNF_PWR_CTL_1 0x48
-#define CNF_PWR_CTL_2 0x49
-#define CNF_PWR_CTL_3 0x4a
-#define CNF_CARD_DETECT_MODE 0x4c
-#define CNF_SD_SLOT 0x50
-#define CNF_EXT_GCLK_CTL_1 0xf0
-#define CNF_EXT_GCLK_CTL_2 0xf1
-#define CNF_EXT_GCLK_CTL_3 0xf9
-#define CNF_SD_LED_EN_1 0xfa
-#define CNF_SD_LED_EN_2 0xfe
-
-#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
-
#define CTL_SD_CMD 0x00
#define CTL_ARG_REG 0x04
#define CTL_STOP_INTERNAL_ACTION 0x08
@@ -110,7 +90,6 @@
struct tmio_mmc_host {
- void __iomem *cnf;
void __iomem *ctl;
unsigned long bus_shift;
struct mmc_command *cmd;
@@ -119,10 +98,16 @@ struct tmio_mmc_host {
struct mmc_host *mmc;
int irq;
+ /* Callbacks for clock / power control */
+ void (*set_pwr)(struct platform_device *host, int state);
+ void (*set_clk_div)(struct platform_device *host, int state);
+
/* pio related stuff */
struct scatterlist *sg_ptr;
unsigned int sg_len;
unsigned int sg_off;
+
+ struct platform_device *pdev;
};
#include <linux/io.h>
@@ -163,25 +148,6 @@ static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr,
writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
-static inline void sd_config_write8(struct tmio_mmc_host *host, int addr,
- u8 val)
-{
- writeb(val, host->cnf + (addr << host->bus_shift));
-}
-
-static inline void sd_config_write16(struct tmio_mmc_host *host, int addr,
- u16 val)
-{
- writew(val, host->cnf + (addr << host->bus_shift));
-}
-
-static inline void sd_config_write32(struct tmio_mmc_host *host, int addr,
- u32 val)
-{
- writew(val, host->cnf + (addr << host->bus_shift));
- writew(val >> 16, host->cnf + ((addr + 2) << host->bus_shift));
-}
-
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c
index ca584d0380b4..ca584d0380b4 100755..100644
--- a/drivers/mtd/chips/cfi_util.c
+++ b/drivers/mtd/chips/cfi_util.c
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index 8aca5523a337..8aca5523a337 100755..100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 4c364d44ad59..dcf1e946ed11 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -251,12 +251,6 @@ config MTD_NETtel
help
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
-config MTD_ALCHEMY
- tristate "AMD Alchemy Pb1xxx/Db1xxx/RDK MTD support"
- depends on SOC_AU1X00 && MTD_PARTITIONS && MTD_CFI
- help
- Flash memory access on AMD Alchemy Pb/Db/RDK Reference Boards
-
config MTD_DILNETPC
tristate "CFI Flash device mapped on DIL/Net PC"
depends on X86 && MTD_CONCAT && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
@@ -428,15 +422,6 @@ config MTD_H720X
This enables access to the flash chips on the Hynix evaluation boards.
If you have such a board, say 'Y'.
-config MTD_OMAP_NOR
- tristate "TI OMAP board mappings"
- depends on MTD_CFI && ARCH_OMAP
- help
- This enables access to the NOR flash chips on TI OMAP-based
- boards defining flash platform devices and flash platform data.
- These boards include the Innovator, H2, H3, OSK, Perseus2, and
- more. If you have such a board, say 'Y'.
-
# This needs CFI or JEDEC, depending on the cards found.
config MTD_PCI
tristate "PCI MTD driver"
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index ce315214ff2b..bb035cd54c72 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -40,7 +40,6 @@ obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o
obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o
obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o
obj-$(CONFIG_MTD_PCI) += pci.o
-obj-$(CONFIG_MTD_ALCHEMY) += alchemy-flash.o
obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o
obj-$(CONFIG_MTD_EDB7312) += edb7312.o
obj-$(CONFIG_MTD_IMPA7) += impa7.o
@@ -55,7 +54,6 @@ obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
obj-$(CONFIG_MTD_DMV182) += dmv182.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
-obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
diff --git a/drivers/mtd/maps/alchemy-flash.c b/drivers/mtd/maps/alchemy-flash.c
deleted file mode 100644
index 845ad4f2a542..000000000000
--- a/drivers/mtd/maps/alchemy-flash.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Flash memory access on AMD Alchemy evaluation boards
- *
- * (C) 2003, 2004 Pete Popov <ppopov@embeddedalley.com>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-
-#ifdef CONFIG_MIPS_PB1000
-#define BOARD_MAP_NAME "Pb1000 Flash"
-#define BOARD_FLASH_SIZE 0x00800000 /* 8MB */
-#define BOARD_FLASH_WIDTH 4 /* 32-bits */
-#endif
-
-#ifdef CONFIG_MIPS_PB1500
-#define BOARD_MAP_NAME "Pb1500 Flash"
-#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
-#define BOARD_FLASH_WIDTH 4 /* 32-bits */
-#endif
-
-#ifdef CONFIG_MIPS_PB1100
-#define BOARD_MAP_NAME "Pb1100 Flash"
-#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
-#define BOARD_FLASH_WIDTH 4 /* 32-bits */
-#endif
-
-#ifdef CONFIG_MIPS_PB1550
-#define BOARD_MAP_NAME "Pb1550 Flash"
-#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
-#define BOARD_FLASH_WIDTH 4 /* 32-bits */
-#endif
-
-#ifdef CONFIG_MIPS_PB1200
-#define BOARD_MAP_NAME "Pb1200 Flash"
-#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
-#define BOARD_FLASH_WIDTH 2 /* 16-bits */
-#endif
-
-#ifdef CONFIG_MIPS_DB1000
-#define BOARD_MAP_NAME "Db1000 Flash"
-#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
-#define BOARD_FLASH_WIDTH 4 /* 32-bits */
-#endif
-
-#ifdef CONFIG_MIPS_DB1500
-#define BOARD_MAP_NAME "Db1500 Flash"
-#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
-#define BOARD_FLASH_WIDTH 4 /* 32-bits */
-#endif
-
-#ifdef CONFIG_MIPS_DB1100
-#define BOARD_MAP_NAME "Db1100 Flash"
-#define BOARD_FLASH_SIZE 0x02000000 /* 32MB */
-#define BOARD_FLASH_WIDTH 4 /* 32-bits */
-#endif
-
-#ifdef CONFIG_MIPS_DB1550
-#define BOARD_MAP_NAME "Db1550 Flash"
-#define BOARD_FLASH_SIZE 0x08000000 /* 128MB */
-#define BOARD_FLASH_WIDTH 4 /* 32-bits */
-#endif
-
-#ifdef CONFIG_MIPS_DB1200
-#define BOARD_MAP_NAME "Db1200 Flash"
-#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
-#define BOARD_FLASH_WIDTH 2 /* 16-bits */
-#endif
-
-#ifdef CONFIG_MIPS_BOSPORUS
-#define BOARD_MAP_NAME "Bosporus Flash"
-#define BOARD_FLASH_SIZE 0x01000000 /* 16MB */
-#define BOARD_FLASH_WIDTH 2 /* 16-bits */
-#endif
-
-#ifdef CONFIG_MIPS_MIRAGE
-#define BOARD_MAP_NAME "Mirage Flash"
-#define BOARD_FLASH_SIZE 0x04000000 /* 64MB */
-#define BOARD_FLASH_WIDTH 4 /* 32-bits */
-#define USE_LOCAL_ACCESSORS /* why? */
-#endif
-
-static struct map_info alchemy_map = {
- .name = BOARD_MAP_NAME,
-};
-
-static struct mtd_partition alchemy_partitions[] = {
- {
- .name = "User FS",
- .size = BOARD_FLASH_SIZE - 0x00400000,
- .offset = 0x0000000
- },{
- .name = "YAMON",
- .size = 0x0100000,
- .offset = MTDPART_OFS_APPEND,
- .mask_flags = MTD_WRITEABLE
- },{
- .name = "raw kernel",
- .size = (0x300000 - 0x40000), /* last 256KB is yamon env */
- .offset = MTDPART_OFS_APPEND,
- }
-};
-
-static struct mtd_info *mymtd;
-
-static int __init alchemy_mtd_init(void)
-{
- struct mtd_partition *parts;
- int nb_parts = 0;
- unsigned long window_addr;
- unsigned long window_size;
-
- /* Default flash buswidth */
- alchemy_map.bankwidth = BOARD_FLASH_WIDTH;
-
- window_addr = 0x20000000 - BOARD_FLASH_SIZE;
- window_size = BOARD_FLASH_SIZE;
-
- /*
- * Static partition definition selection
- */
- parts = alchemy_partitions;
- nb_parts = ARRAY_SIZE(alchemy_partitions);
- alchemy_map.size = window_size;
-
- /*
- * Now let's probe for the actual flash. Do it here since
- * specific machine settings might have been set above.
- */
- printk(KERN_NOTICE BOARD_MAP_NAME ": probing %d-bit flash bus\n",
- alchemy_map.bankwidth*8);
- alchemy_map.virt = ioremap(window_addr, window_size);
- mymtd = do_map_probe("cfi_probe", &alchemy_map);
- if (!mymtd) {
- iounmap(alchemy_map.virt);
- return -ENXIO;
- }
- mymtd->owner = THIS_MODULE;
-
- add_mtd_partitions(mymtd, parts, nb_parts);
- return 0;
-}
-
-static void __exit alchemy_mtd_cleanup(void)
-{
- if (mymtd) {
- del_mtd_partitions(mymtd);
- map_destroy(mymtd);
- iounmap(alchemy_map.virt);
- }
-}
-
-module_init(alchemy_mtd_init);
-module_exit(alchemy_mtd_cleanup);
-
-MODULE_AUTHOR("Embedded Alley Solutions, Inc");
-MODULE_DESCRIPTION(BOARD_MAP_NAME " MTD driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index ead0b2fab670..e69de29bb2d1 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -1,188 +0,0 @@
-/*
- * Flash memory support for various TI OMAP boards
- *
- * Copyright (C) 2001-2002 MontaVista Software Inc.
- * Copyright (C) 2003-2004 Texas Instruments
- * Copyright (C) 2004 Nokia Corporation
- *
- * Assembled using driver code copyright the companies above
- * and written by David Brownell, Jian Zhang <jzhang@ti.com>,
- * Tony Lindgren <tony@atomide.com> and others.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/mach/flash.h>
-#include <plat/tc.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
-static const char *part_probes[] = { /* "RedBoot", */ "cmdlinepart", NULL };
-#endif
-
-struct omapflash_info {
- struct mtd_partition *parts;
- struct mtd_info *mtd;
- struct map_info map;
-};
-
-static void omap_set_vpp(struct map_info *map, int enable)
-{
- static int count;
- u32 l;
-
- if (cpu_class_is_omap1()) {
- if (enable) {
- if (count++ == 0) {
- l = omap_readl(EMIFS_CONFIG);
- l |= OMAP_EMIFS_CONFIG_WP;
- omap_writel(l, EMIFS_CONFIG);
- }
- } else {
- if (count && (--count == 0)) {
- l = omap_readl(EMIFS_CONFIG);
- l &= ~OMAP_EMIFS_CONFIG_WP;
- omap_writel(l, EMIFS_CONFIG);
- }
- }
- }
-}
-
-static int __init omapflash_probe(struct platform_device *pdev)
-{
- int err;
- struct omapflash_info *info;
- struct flash_platform_data *pdata = pdev->dev.platform_data;
- struct resource *res = pdev->resource;
- unsigned long size = res->end - res->start + 1;
-
- info = kzalloc(sizeof(struct omapflash_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- if (!request_mem_region(res->start, size, "flash")) {
- err = -EBUSY;
- goto out_free_info;
- }
-
- info->map.virt = ioremap(res->start, size);
- if (!info->map.virt) {
- err = -ENOMEM;
- goto out_release_mem_region;
- }
- info->map.name = dev_name(&pdev->dev);
- info->map.phys = res->start;
- info->map.size = size;
- info->map.bankwidth = pdata->width;
- info->map.set_vpp = omap_set_vpp;
-
- simple_map_init(&info->map);
- info->mtd = do_map_probe(pdata->map_name, &info->map);
- if (!info->mtd) {
- err = -EIO;
- goto out_iounmap;
- }
- info->mtd->owner = THIS_MODULE;
-
- info->mtd->dev.parent = &pdev->dev;
-
-#ifdef CONFIG_MTD_PARTITIONS
- err = parse_mtd_partitions(info->mtd, part_probes, &info->parts, 0);
- if (err > 0)
- add_mtd_partitions(info->mtd, info->parts, err);
- else if (err <= 0 && pdata->parts)
- add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
- else
-#endif
- add_mtd_device(info->mtd);
-
- platform_set_drvdata(pdev, info);
-
- return 0;
-
-out_iounmap:
- iounmap(info->map.virt);
-out_release_mem_region:
- release_mem_region(res->start, size);
-out_free_info:
- kfree(info);
-
- return err;
-}
-
-static int __exit omapflash_remove(struct platform_device *pdev)
-{
- struct omapflash_info *info = platform_get_drvdata(pdev);
-
- platform_set_drvdata(pdev, NULL);
-
- if (info) {
- if (info->parts) {
- del_mtd_partitions(info->mtd);
- kfree(info->parts);
- } else
- del_mtd_device(info->mtd);
- map_destroy(info->mtd);
- release_mem_region(info->map.phys, info->map.size);
- iounmap((void __iomem *) info->map.virt);
- kfree(info);
- }
-
- return 0;
-}
-
-static struct platform_driver omapflash_driver = {
- .remove = __exit_p(omapflash_remove),
- .driver = {
- .name = "omapflash",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init omapflash_init(void)
-{
- return platform_driver_probe(&omapflash_driver, omapflash_probe);
-}
-
-static void __exit omapflash_exit(void)
-{
- platform_driver_unregister(&omapflash_driver);
-}
-
-module_init(omapflash_init);
-module_exit(omapflash_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MTD NOR map driver for TI OMAP boards");
-MODULE_ALIAS("platform:omapflash");
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 61e4eb48bb2d..1d91333010b1 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -217,7 +217,7 @@ static int __devinit of_flash_probe(struct of_device *dev,
dev_set_drvdata(&dev->dev, info);
- mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL);
+ mtd_list = kzalloc(sizeof(*mtd_list) * count, GFP_KERNEL);
if (!mtd_list)
goto err_flash_remove;
diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c
index dafb91944e70..76a76be5a7bd 100644
--- a/drivers/mtd/maps/plat-ram.c
+++ b/drivers/mtd/maps/plat-ram.c
@@ -4,7 +4,7 @@
* http://www.simtec.co.uk/products/SWLINUX/
* Ben Dooks <ben@simtec.co.uk>
*
- * Generic platfrom device based RAM map
+ * Generic platform device based RAM map
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index a714ec482761..92e12df0917f 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -322,7 +322,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
/* Panics must be written immediately */
- if (reason == KMSG_DUMP_PANIC) {
+ if (reason != KMSG_DUMP_OOPS) {
if (!cxt->mtd->panic_write)
printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
else
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 677cd53f18c3..22cee485be94 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -95,12 +95,6 @@ config MTD_NAND_OMAP_PREFETCH_DMA
or in DMA interrupt mode.
Say y for DMA mode or MPU mode will be used
-config MTD_NAND_TS7250
- tristate "NAND Flash device on TS-7250 board"
- depends on MACH_TS72XX
- help
- Support for NAND flash on Technologic Systems TS-7250 platform.
-
config MTD_NAND_IDS
tristate
@@ -481,11 +475,11 @@ config MTD_NAND_SOCRATES
help
Enables support for NAND Flash chips wired onto Socrates board.
-config MTD_NAND_W90P910
- tristate "Support for NAND on w90p910 evaluation board."
+config MTD_NAND_NUC900
+ tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
depends on ARCH_W90X900 && MTD_PARTITIONS
help
This enables the driver for the NAND Flash on evaluation board based
- on w90p910.
+ on w90p910 / NUC9xx.
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 1407bd144015..39b4af2496a2 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o
obj-$(CONFIG_MTD_NAND_H1900) += h1910.o
obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o
obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o
-obj-$(CONFIG_MTD_NAND_TS7250) += ts7250.o
obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o
obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o
obj-$(CONFIG_MTD_NAND_NDFC) += ndfc.o
@@ -39,7 +38,7 @@ obj-$(CONFIG_MTD_NAND_SH_FLCTL) += sh_flctl.o
obj-$(CONFIG_MTD_NAND_MXC) += mxc_nand.o
obj-$(CONFIG_MTD_NAND_SOCRATES) += socrates_nand.o
obj-$(CONFIG_MTD_NAND_TXX9NDFMC) += txx9ndfmc.o
-obj-$(CONFIG_MTD_NAND_W90P910) += w90p910_nand.o
+obj-$(CONFIG_MTD_NAND_NUC900) += nuc900_nand.o
obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_MTD_NAND_BCM_UMI) += bcm_umi_nand.o nand_bcm_umi.o
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 92c334ff4508..3ffe05db4923 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -19,6 +19,7 @@
#include <asm/io.h>
#include <asm/mach-au1x00/au1xxx.h>
+#include <asm/mach-db1x00/bcsr.h>
/*
* MTD structure for NAND controller
@@ -450,7 +451,7 @@ static int __init au1xxx_nand_init(void)
u32 nand_phys;
/* Allocate memory for MTD device structure and private data */
- au1550_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
+ au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
if (!au1550_mtd) {
printk("Unable to allocate NAND MTD dev structure.\n");
return -ENOMEM;
@@ -459,10 +460,6 @@ static int __init au1xxx_nand_init(void)
/* Get pointer to private data */
this = (struct nand_chip *)(&au1550_mtd[1]);
- /* Initialize structures */
- memset(au1550_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
/* Link the private data with the MTD structure */
au1550_mtd->priv = this;
au1550_mtd->owner = THIS_MODULE;
@@ -475,7 +472,8 @@ static int __init au1xxx_nand_init(void)
/* set gpio206 high */
au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR);
- boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr->status >> 6) & 0x1);
+ boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1);
+
switch (boot_swapboot) {
case 0:
case 2:
@@ -542,7 +540,7 @@ static int __init au1xxx_nand_init(void)
}
nand_phys = (mem_staddr << 4) & 0xFFFC0000;
- p_nand = (void __iomem *)ioremap(nand_phys, 0x1000);
+ p_nand = ioremap(nand_phys, 0x1000);
/* make controller and MTD agree */
if (NAND_CS == 0)
@@ -587,7 +585,7 @@ static int __init au1xxx_nand_init(void)
return 0;
outio:
- iounmap((void *)p_nand);
+ iounmap(p_nand);
outmem:
kfree(au1550_mtd);
@@ -608,7 +606,7 @@ static void __exit au1550_cleanup(void)
kfree(au1550_mtd);
/* Unmap */
- iounmap((void *)p_nand);
+ iounmap(p_nand);
}
module_exit(au1550_cleanup);
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index fe3eba87de40..e2eeaf1e51a3 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -566,8 +566,8 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_nomem;
}
- vaddr = ioremap(res1->start, res1->end - res1->start);
- base = ioremap(res2->start, res2->end - res2->start);
+ vaddr = ioremap(res1->start, resource_size(res1));
+ base = ioremap(res2->start, resource_size(res2));
if (!vaddr || !base) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -EINVAL;
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index ae30fb6eed97..1b8328fbb9dc 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -874,7 +874,7 @@ static int __devinit fsl_elbc_chip_probe(struct fsl_elbc_ctrl *ctrl,
priv->ctrl = ctrl;
priv->dev = ctrl->dev;
- priv->vbase = ioremap(res.start, res.end - res.start + 1);
+ priv->vbase = ioremap(res.start, resource_size(&res));
if (!priv->vbase) {
dev_err(ctrl->dev, "failed to map chip region\n");
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index 071a60cb4204..ab06a5b514a9 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -302,7 +302,7 @@ static int __devinit fun_probe(struct of_device *ofdev,
FSL_UPM_WAIT_WRITE_BYTE;
fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
- io_res.end - io_res.start + 1);
+ resource_size(&io_res));
if (!fun->io_base) {
ret = -ENOMEM;
goto err2;
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 8f902e75aa85..0cde618bcc1e 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -181,11 +181,11 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
res = platform_get_resource(dev, IORESOURCE_MEM, 1);
iounmap(gpiomtd->io_sync);
if (res)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
@@ -208,14 +208,14 @@ static void __iomem *request_and_remap(struct resource *res, size_t size,
{
void __iomem *ptr;
- if (!request_mem_region(res->start, res->end - res->start + 1, name)) {
+ if (!request_mem_region(res->start, resource_size(res), name)) {
*err = -EBUSY;
return NULL;
}
ptr = ioremap(res->start, size);
if (!ptr) {
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
*err = -ENOMEM;
}
return ptr;
@@ -338,10 +338,10 @@ err_nwp:
err_nce:
iounmap(gpiomtd->io_sync);
if (res1)
- release_mem_region(res1->start, res1->end - res1->start + 1);
+ release_mem_region(res1->start, resource_size(res1));
err_sync:
iounmap(gpiomtd->nand_chip.IO_ADDR_R);
- release_mem_region(res0->start, res0->end - res0->start + 1);
+ release_mem_region(res0->start, resource_size(res0));
err_map:
kfree(gpiomtd);
return ret;
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index 66123419f65d..59cbf66607c7 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -104,21 +104,21 @@ static int nomadik_nand_probe(struct platform_device *pdev)
ret = -EIO;
goto err_unmap;
}
- host->addr_va = ioremap(res->start, res->end - res->start + 1);
+ host->addr_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
if (!res) {
ret = -EIO;
goto err_unmap;
}
- host->data_va = ioremap(res->start, res->end - res->start + 1);
+ host->data_va = ioremap(res->start, resource_size(res));
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
if (!res) {
ret = -EIO;
goto err_unmap;
}
- host->cmd_va = ioremap(res->start, res->end - res->start + 1);
+ host->cmd_va = ioremap(res->start, resource_size(res));
if (!host->addr_va || !host->data_va || !host->cmd_va) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/w90p910_nand.c b/drivers/mtd/nand/nuc900_nand.c
index 7680e731348a..6eddf7361ed7 100644
--- a/drivers/mtd/nand/w90p910_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009 Nuvoton technology corporation.
+ * Copyright © 2009 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
@@ -55,7 +55,7 @@
#define write_addr_reg(dev, val) \
__raw_writel((val), (dev)->reg + REG_SMADDR)
-struct w90p910_nand {
+struct nuc900_nand {
struct mtd_info mtd;
struct nand_chip chip;
void __iomem *reg;
@@ -76,49 +76,49 @@ static const struct mtd_partition partitions[] = {
}
};
-static unsigned char w90p910_nand_read_byte(struct mtd_info *mtd)
+static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
{
unsigned char ret;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
ret = (unsigned char)read_data_reg(nand);
return ret;
}
-static void w90p910_nand_read_buf(struct mtd_info *mtd,
- unsigned char *buf, int len)
+static void nuc900_nand_read_buf(struct mtd_info *mtd,
+ unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
buf[i] = (unsigned char)read_data_reg(nand);
}
-static void w90p910_nand_write_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
+static void nuc900_nand_write_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++)
write_data_reg(nand, buf[i]);
}
-static int w90p910_verify_buf(struct mtd_info *mtd,
- const unsigned char *buf, int len)
+static int nuc900_verify_buf(struct mtd_info *mtd,
+ const unsigned char *buf, int len)
{
int i;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
for (i = 0; i < len; i++) {
if (buf[i] != (unsigned char)read_data_reg(nand))
@@ -128,7 +128,7 @@ static int w90p910_verify_buf(struct mtd_info *mtd,
return 0;
}
-static int w90p910_check_rb(struct w90p910_nand *nand)
+static int nuc900_check_rb(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@@ -139,24 +139,24 @@ static int w90p910_check_rb(struct w90p910_nand *nand)
return val;
}
-static int w90p910_nand_devready(struct mtd_info *mtd)
+static int nuc900_nand_devready(struct mtd_info *mtd)
{
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
int ready;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
- ready = (w90p910_check_rb(nand)) ? 1 : 0;
+ ready = (nuc900_check_rb(nand)) ? 1 : 0;
return ready;
}
-static void w90p910_nand_command_lp(struct mtd_info *mtd,
- unsigned int command, int column, int page_addr)
+static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
+ int column, int page_addr)
{
register struct nand_chip *chip = mtd->priv;
- struct w90p910_nand *nand;
+ struct nuc900_nand *nand;
- nand = container_of(mtd, struct w90p910_nand, mtd);
+ nand = container_of(mtd, struct nuc900_nand, mtd);
if (command == NAND_CMD_READOOB) {
column += mtd->writesize;
@@ -212,7 +212,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
write_cmd_reg(nand, NAND_CMD_STATUS);
write_cmd_reg(nand, command);
- while (!w90p910_check_rb(nand))
+ while (!nuc900_check_rb(nand))
;
return;
@@ -241,7 +241,7 @@ static void w90p910_nand_command_lp(struct mtd_info *mtd,
}
-static void w90p910_nand_enable(struct w90p910_nand *nand)
+static void nuc900_nand_enable(struct nuc900_nand *nand)
{
unsigned int val;
spin_lock(&nand->lock);
@@ -262,37 +262,37 @@ static void w90p910_nand_enable(struct w90p910_nand *nand)
spin_unlock(&nand->lock);
}
-static int __devinit w90p910_nand_probe(struct platform_device *pdev)
+static int __devinit nuc900_nand_probe(struct platform_device *pdev)
{
- struct w90p910_nand *w90p910_nand;
+ struct nuc900_nand *nuc900_nand;
struct nand_chip *chip;
int retval;
struct resource *res;
retval = 0;
- w90p910_nand = kzalloc(sizeof(struct w90p910_nand), GFP_KERNEL);
- if (!w90p910_nand)
+ nuc900_nand = kzalloc(sizeof(struct nuc900_nand), GFP_KERNEL);
+ if (!nuc900_nand)
return -ENOMEM;
- chip = &(w90p910_nand->chip);
+ chip = &(nuc900_nand->chip);
- w90p910_nand->mtd.priv = chip;
- w90p910_nand->mtd.owner = THIS_MODULE;
- spin_lock_init(&w90p910_nand->lock);
+ nuc900_nand->mtd.priv = chip;
+ nuc900_nand->mtd.owner = THIS_MODULE;
+ spin_lock_init(&nuc900_nand->lock);
- w90p910_nand->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(w90p910_nand->clk)) {
+ nuc900_nand->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(nuc900_nand->clk)) {
retval = -ENOENT;
goto fail1;
}
- clk_enable(w90p910_nand->clk);
-
- chip->cmdfunc = w90p910_nand_command_lp;
- chip->dev_ready = w90p910_nand_devready;
- chip->read_byte = w90p910_nand_read_byte;
- chip->write_buf = w90p910_nand_write_buf;
- chip->read_buf = w90p910_nand_read_buf;
- chip->verify_buf = w90p910_verify_buf;
+ clk_enable(nuc900_nand->clk);
+
+ chip->cmdfunc = nuc900_nand_command_lp;
+ chip->dev_ready = nuc900_nand_devready;
+ chip->read_byte = nuc900_nand_read_byte;
+ chip->write_buf = nuc900_nand_write_buf;
+ chip->read_buf = nuc900_nand_read_buf;
+ chip->verify_buf = nuc900_verify_buf;
chip->chip_delay = 50;
chip->options = 0;
chip->ecc.mode = NAND_ECC_SOFT;
@@ -308,75 +308,75 @@ static int __devinit w90p910_nand_probe(struct platform_device *pdev)
goto fail1;
}
- w90p910_nand->reg = ioremap(res->start, resource_size(res));
- if (!w90p910_nand->reg) {
+ nuc900_nand->reg = ioremap(res->start, resource_size(res));
+ if (!nuc900_nand->reg) {
retval = -ENOMEM;
goto fail2;
}
- w90p910_nand_enable(w90p910_nand);
+ nuc900_nand_enable(nuc900_nand);
- if (nand_scan(&(w90p910_nand->mtd), 1)) {
+ if (nand_scan(&(nuc900_nand->mtd), 1)) {
retval = -ENXIO;
goto fail3;
}
- add_mtd_partitions(&(w90p910_nand->mtd), partitions,
+ add_mtd_partitions(&(nuc900_nand->mtd), partitions,
ARRAY_SIZE(partitions));
- platform_set_drvdata(pdev, w90p910_nand);
+ platform_set_drvdata(pdev, nuc900_nand);
return retval;
-fail3: iounmap(w90p910_nand->reg);
+fail3: iounmap(nuc900_nand->reg);
fail2: release_mem_region(res->start, resource_size(res));
-fail1: kfree(w90p910_nand);
+fail1: kfree(nuc900_nand);
return retval;
}
-static int __devexit w90p910_nand_remove(struct platform_device *pdev)
+static int __devexit nuc900_nand_remove(struct platform_device *pdev)
{
- struct w90p910_nand *w90p910_nand = platform_get_drvdata(pdev);
+ struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
struct resource *res;
- iounmap(w90p910_nand->reg);
+ iounmap(nuc900_nand->reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- clk_disable(w90p910_nand->clk);
- clk_put(w90p910_nand->clk);
+ clk_disable(nuc900_nand->clk);
+ clk_put(nuc900_nand->clk);
- kfree(w90p910_nand);
+ kfree(nuc900_nand);
platform_set_drvdata(pdev, NULL);
return 0;
}
-static struct platform_driver w90p910_nand_driver = {
- .probe = w90p910_nand_probe,
- .remove = __devexit_p(w90p910_nand_remove),
+static struct platform_driver nuc900_nand_driver = {
+ .probe = nuc900_nand_probe,
+ .remove = __devexit_p(nuc900_nand_remove),
.driver = {
- .name = "w90p910-fmi",
+ .name = "nuc900-fmi",
.owner = THIS_MODULE,
},
};
-static int __init w90p910_nand_init(void)
+static int __init nuc900_nand_init(void)
{
- return platform_driver_register(&w90p910_nand_driver);
+ return platform_driver_register(&nuc900_nand_driver);
}
-static void __exit w90p910_nand_exit(void)
+static void __exit nuc900_nand_exit(void)
{
- platform_driver_unregister(&w90p910_nand_driver);
+ platform_driver_unregister(&nuc900_nand_driver);
}
-module_init(w90p910_nand_init);
-module_exit(w90p910_nand_exit);
+module_init(nuc900_nand_init);
+module_exit(nuc900_nand_exit);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
-MODULE_DESCRIPTION("w90p910 nand driver!");
+MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:w90p910-fmi");
+MODULE_ALIAS("platform:nuc900-fmi");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index f59c07427af3..990346036d37 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -74,6 +74,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
struct mtd_info *mtd;
struct nand_chip *nc;
struct orion_nand_data *board;
+ struct resource *res;
void __iomem *io_base;
int ret = 0;
#ifdef CONFIG_MTD_PARTITIONS
@@ -89,8 +90,13 @@ static int __init orion_nand_probe(struct platform_device *pdev)
}
mtd = (struct mtd_info *)(nc + 1);
- io_base = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -ENODEV;
+ goto no_res;
+ }
+
+ io_base = ioremap(res->start, resource_size(res));
if (!io_base) {
printk(KERN_ERR "orion_nand: ioremap failed\n");
ret = -EIO;
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index fa6e9c7fe511..c41ad2285c63 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -957,7 +957,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev)
/* currently we assume we have the one resource */
res = pdev->resource;
- size = res->end - res->start + 1;
+ size = resource_size(res);
info->area = request_mem_region(res->start, size, pdev->name);
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index 02bef21f2e4b..4260ab78f95c 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -797,7 +797,7 @@ static int __init flctl_probe(struct platform_device *pdev)
goto err;
}
- flctl->reg = ioremap(res->start, res->end - res->start + 1);
+ flctl->reg = ioremap(res->start, resource_size(res));
if (flctl->reg == NULL) {
printk(KERN_ERR "%s: ioremap error.\n", __func__);
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index 92c73344a669..65fa46957dbb 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -318,7 +318,7 @@ static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
int ret;
if (cell->enable) {
@@ -362,7 +362,7 @@ static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
@@ -371,7 +371,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
static int tmio_probe(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
struct tmio_nand_data *data = cell->driver_data;
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
@@ -404,14 +404,14 @@ static int tmio_probe(struct platform_device *dev)
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
- tmio->ccr = ioremap(ccr->start, ccr->end - ccr->start + 1);
+ tmio->ccr = ioremap(ccr->start, resource_size(ccr));
if (!tmio->ccr) {
retval = -EIO;
goto err_iomap_ccr;
}
tmio->fcr_base = fcr->start & 0xfffff;
- tmio->fcr = ioremap(fcr->start, fcr->end - fcr->start + 1);
+ tmio->fcr = ioremap(fcr->start, resource_size(fcr));
if (!tmio->fcr) {
retval = -EIO;
goto err_iomap_fcr;
@@ -515,7 +515,7 @@ static int tmio_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
if (cell->suspend)
cell->suspend(dev);
@@ -526,7 +526,7 @@ static int tmio_suspend(struct platform_device *dev, pm_message_t state)
static int tmio_resume(struct platform_device *dev)
{
- struct mfd_cell *cell = (struct mfd_cell *)dev->dev.platform_data;
+ struct mfd_cell *cell = dev_get_platdata(&dev->dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
deleted file mode 100644
index 0f5562aeedc1..000000000000
--- a/drivers/mtd/nand/ts7250.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * drivers/mtd/nand/ts7250.c
- *
- * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com)
- *
- * Derived from drivers/mtd/nand/edb7312.c
- * Copyright (C) 2004 Marius Gröger (mag@sysgo.de)
- *
- * Derived from drivers/mtd/nand/autcpu12.c
- * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Overview:
- * This is a device driver for the NAND flash device found on the
- * TS-7250 board which utilizes a Samsung 32 Mbyte part.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/ts72xx.h>
-
-#include <asm/sizes.h>
-#include <asm/mach-types.h>
-
-/*
- * MTD structure for TS7250 board
- */
-static struct mtd_info *ts7250_mtd = NULL;
-
-#ifdef CONFIG_MTD_PARTITIONS
-static const char *part_probes[] = { "cmdlinepart", NULL };
-
-#define NUM_PARTITIONS 3
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info32[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0x00000000,
- .size = 0x00004000,
- }, {
- .name = "Linux",
- .offset = 0x00004000,
- .size = 0x01d00000,
- }, {
- .name = "RedBoot",
- .offset = 0x01d04000,
- .size = 0x002fc000,
- },
-};
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info128[] = {
- {
- .name = "TS-BOOTROM",
- .offset = 0x00000000,
- .size = 0x00004000,
- }, {
- .name = "Linux",
- .offset = 0x00004000,
- .size = 0x07d00000,
- }, {
- .name = "RedBoot",
- .offset = 0x07d04000,
- .size = 0x002fc000,
- },
-};
-#endif
-
-
-/*
- * hardware specific access to control-lines
- *
- * ctrl:
- * NAND_NCE: bit 0 -> bit 2
- * NAND_CLE: bit 1 -> bit 1
- * NAND_ALE: bit 2 -> bit 0
- */
-static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
- struct nand_chip *chip = mtd->priv;
-
- if (ctrl & NAND_CTRL_CHANGE) {
- unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
- unsigned char bits;
-
- bits = (ctrl & NAND_NCE) << 2;
- bits |= ctrl & NAND_CLE;
- bits |= (ctrl & NAND_ALE) >> 2;
-
- __raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr);
- }
-
- if (cmd != NAND_CMD_NONE)
- writeb(cmd, chip->IO_ADDR_W);
-}
-
-/*
- * read device ready pin
- */
-static int ts7250_device_ready(struct mtd_info *mtd)
-{
- return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20;
-}
-
-/*
- * Main initialization routine
- */
-static int __init ts7250_init(void)
-{
- struct nand_chip *this;
- const char *part_type = 0;
- int mtd_parts_nb = 0;
- struct mtd_partition *mtd_parts = 0;
-
- if (!machine_is_ts72xx() || board_is_ts7200())
- return -ENXIO;
-
- /* Allocate memory for MTD device structure and private data */
- ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!ts7250_mtd) {
- printk("Unable to allocate TS7250 NAND MTD device structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&ts7250_mtd[1]);
-
- /* Initialize structures */
- memset(ts7250_mtd, 0, sizeof(struct mtd_info));
- memset(this, 0, sizeof(struct nand_chip));
-
- /* Link the private data with the MTD structure */
- ts7250_mtd->priv = this;
- ts7250_mtd->owner = THIS_MODULE;
-
- /* insert callbacks */
- this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE;
- this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE;
- this->cmd_ctrl = ts7250_hwcontrol;
- this->dev_ready = ts7250_device_ready;
- this->chip_delay = 15;
- this->ecc.mode = NAND_ECC_SOFT;
-
- printk("Searching for NAND flash...\n");
- /* Scan to find existence of the device */
- if (nand_scan(ts7250_mtd, 1)) {
- kfree(ts7250_mtd);
- return -ENXIO;
- }
-#ifdef CONFIG_MTD_PARTITIONS
- ts7250_mtd->name = "ts7250-nand";
- mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0);
- if (mtd_parts_nb > 0)
- part_type = "command line";
- else
- mtd_parts_nb = 0;
-#endif
- if (mtd_parts_nb == 0) {
- mtd_parts = partition_info32;
- if (ts7250_mtd->size >= (128 * 0x100000))
- mtd_parts = partition_info128;
- mtd_parts_nb = NUM_PARTITIONS;
- part_type = "static";
- }
-
- /* Register the partitions */
- printk(KERN_NOTICE "Using %s partition definition\n", part_type);
- add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb);
-
- /* Return happy */
- return 0;
-}
-
-module_init(ts7250_init);
-
-/*
- * Clean up routine
- */
-static void __exit ts7250_cleanup(void)
-{
- /* Unregister the device */
- del_mtd_device(ts7250_mtd);
-
- /* Free the MTD device structure */
- kfree(ts7250_mtd);
-}
-
-module_exit(ts7250_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>");
-MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board");
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 277786ebaa2c..1361574e2b00 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -291,8 +291,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
*/
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
- int error, ubi_num, vol_id;
- struct ubi_volume_desc *ret;
+ int error, ubi_num, vol_id, mod;
struct inode *inode;
struct path path;
@@ -306,16 +305,16 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
return ERR_PTR(error);
inode = path.dentry->d_inode;
+ mod = inode->i_mode;
ubi_num = ubi_major2num(imajor(inode));
vol_id = iminor(inode) - 1;
+ path_put(&path);
+ if (!S_ISCHR(mod))
+ return ERR_PTR(-EINVAL);
if (vol_id >= 0 && ubi_num >= 0)
- ret = ubi_open_volume(ubi_num, vol_id, mode);
- else
- ret = ERR_PTR(-ENODEV);
-
- path_put(&path);
- return ret;
+ return ubi_open_volume(ubi_num, vol_id, mode);
+ return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL_GPL(ubi_open_volume_path);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 1afc61e7455d..40044028d682 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -566,6 +566,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs);
vol->alignment = be32_to_cpu(vtbl[i].alignment);
vol->data_pad = be32_to_cpu(vtbl[i].data_pad);
+ vol->upd_marker = vtbl[i].upd_marker;
vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ?
UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
vol->name_len = be16_to_cpu(vtbl[i].name_len);
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 39db0e96815d..5df46c230b07 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -375,7 +375,7 @@ static struct vortex_chip_info {
};
-static struct pci_device_id vortex_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = {
{ 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
{ 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
{ 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 3f452bcbfb9e..9d59654748b1 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -394,7 +394,7 @@ static int cp_get_eeprom(struct net_device *dev,
static int cp_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data);
-static struct pci_device_id cp_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
{ },
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 25f7339daabd..321e73aabb2b 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -231,7 +231,7 @@ static const struct {
};
-static struct pci_device_id rtl8139_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8139_pci_tbl) = {
{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index dd9a09c72dff..16be30f733f9 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -920,7 +920,7 @@ config NET_NETX
config TI_DAVINCI_EMAC
tristate "TI DaVinci EMAC Support"
- depends on ARM && ARCH_DAVINCI
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 )
select PHYLIB
help
This driver supports TI's DaVinci Ethernet .
@@ -2618,6 +2618,28 @@ config IXGBE_DCB
If unsure, say N.
+config IXGBEVF
+ tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) 82599 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbevf.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbevf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
config IXGB
tristate "Intel(R) PRO/10GbE support"
depends on PCI
@@ -3246,4 +3268,18 @@ config VMXNET3
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
+config VBUS_ENET
+ tristate "VBUS Ethernet Driver"
+ default n
+ depends on VBUS_PROXY
+ help
+ A virtualized 802.x network device based on the VBUS
+ "virtual-ethernet" interface. It can be used with any
+ hypervisor/kernel that supports the vbus+venet protocol.
+
+config VBUS_ENET_DEBUG
+ bool "Enable Debugging"
+ depends on VBUS_ENET
+ default n
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ad1346dd9da9..ec7ab957039f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
obj-$(CONFIG_IGB) += igb/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
+obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_IP1000) += ipg.o
obj-$(CONFIG_CHELSIO_T1) += chelsio/
@@ -282,6 +283,7 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
obj-$(CONFIG_NETXEN_NIC) += netxen/
obj-$(CONFIG_NIU) += niu.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
+obj-$(CONFIG_VBUS_ENET) += vbus-enet.o
obj-$(CONFIG_SFC) += sfc/
obj-$(CONFIG_WIMAX) += wimax/
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index d82a9a994753..ec624ab03e88 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -134,7 +134,7 @@
#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
#endif
-static struct pci_device_id acenic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = {
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 766aabfdfc75..545c791f477e 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0
module_param_array(dynamic_ipg, bool, NULL, 0);
MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
-static struct pci_device_id amd8111e_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index dbf4de39754d..b68e1eb405ff 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -144,7 +144,7 @@ static void __devexit com20020pci_remove(struct pci_dev *pdev)
free_netdev(dev);
}
-static struct pci_device_id com20020pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
{ 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index c37ee9e6b67b..39e1c0d39476 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -68,6 +68,7 @@ config W90P910_ETH
tristate "Nuvoton w90p910 Ethernet support"
depends on ARM && ARCH_W90X900
select PHYLIB
+ select MII
help
Say Y here if you want to use built-in Ethernet ports
on w90p910 processor.
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index b25467ac895c..bf72d57a0afd 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -9,6 +9,8 @@
* (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -20,9 +22,9 @@
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <mach/ep93xx-regs.h>
-#include <mach/platform.h>
-#include <asm/io.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
#define DRV_MODULE_NAME "ep93xx-eth"
#define DRV_MODULE_VERSION "0.1"
@@ -185,7 +187,47 @@ struct ep93xx_priv
#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
-static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg);
+static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int data;
+ int i;
+
+ wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10) {
+ pr_info("mdio read timed out\n");
+ data = 0xffff;
+ } else {
+ data = rdl(ep, REG_MIIDATA);
+ }
+
+ return data;
+}
+
+static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int i;
+
+ wrl(ep, REG_MIIDATA, data);
+ wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10)
+ pr_info("mdio write timed out\n");
+}
static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
{
@@ -217,14 +259,11 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
rstat->rstat1 = 0;
if (!(rstat0 & RSTAT0_EOF))
- printk(KERN_CRIT "ep93xx_rx: not end-of-frame "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_EOB))
- printk(KERN_CRIT "ep93xx_rx: not end-of-buffer "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1);
if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
- printk(KERN_CRIT "ep93xx_rx: entry mismatch "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_RWE)) {
ep->stats.rx_errors++;
@@ -241,8 +280,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
length = rstat1 & RSTAT1_FRAME_LENGTH;
if (length > MAX_PKT_SIZE) {
- printk(KERN_NOTICE "ep93xx_rx: invalid length "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1);
goto err;
}
@@ -371,11 +409,9 @@ static void ep93xx_tx_complete(struct net_device *dev)
tstat->tstat0 = 0;
if (tstat0 & TSTAT0_FA)
- printk(KERN_CRIT "ep93xx_tx_complete: frame aborted "
- " %.8x\n", tstat0);
+ pr_crit("frame aborted %.8x\n", tstat0);
if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry)
- printk(KERN_CRIT "ep93xx_tx_complete: entry mismatch "
- " %.8x\n", tstat0);
+ pr_crit("entry mismatch %.8x\n", tstat0);
if (tstat0 & TSTAT0_TXWE) {
int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
@@ -536,7 +572,7 @@ static int ep93xx_start_hw(struct net_device *dev)
}
if (i == 10) {
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n");
+ pr_crit("hw failed to reset\n");
return 1;
}
@@ -581,7 +617,7 @@ static int ep93xx_start_hw(struct net_device *dev)
}
if (i == 10) {
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to start\n");
+ pr_crit("hw failed to start\n");
return 1;
}
@@ -617,7 +653,7 @@ static void ep93xx_stop_hw(struct net_device *dev)
}
if (i == 10)
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n");
+ pr_crit("hw failed to reset\n");
}
static int ep93xx_open(struct net_device *dev)
@@ -681,48 +717,6 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
}
-static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- int data;
- int i;
-
- wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
-
- for (i = 0; i < 10; i++) {
- if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
- break;
- msleep(1);
- }
-
- if (i == 10) {
- printk(KERN_INFO DRV_MODULE_NAME ": mdio read timed out\n");
- data = 0xffff;
- } else {
- data = rdl(ep, REG_MIIDATA);
- }
-
- return data;
-}
-
-static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- int i;
-
- wrl(ep, REG_MIIDATA, data);
- wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
-
- for (i = 0; i < 10; i++) {
- if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
- break;
- msleep(1);
- }
-
- if (i == 10)
- printk(KERN_INFO DRV_MODULE_NAME ": mdio write timed out\n");
-}
-
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_MODULE_NAME);
@@ -825,12 +819,19 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
struct ep93xx_eth_data *data;
struct net_device *dev;
struct ep93xx_priv *ep;
+ struct resource *mem;
+ int irq;
int err;
if (pdev == NULL)
return -ENODEV;
data = pdev->dev.platform_data;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!mem || irq < 0)
+ return -ENXIO;
+
dev = ep93xx_dev_alloc(data);
if (dev == NULL) {
err = -ENOMEM;
@@ -842,23 +843,21 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- ep->res = request_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1,
- dev_name(&pdev->dev));
+ ep->res = request_mem_region(mem->start, resource_size(mem),
+ dev_name(&pdev->dev));
if (ep->res == NULL) {
dev_err(&pdev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_out;
}
- ep->base_addr = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start);
+ ep->base_addr = ioremap(mem->start, resource_size(mem));
if (ep->base_addr == NULL) {
dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
err = -EIO;
goto err_out;
}
- ep->irq = pdev->resource[1].start;
+ ep->irq = irq;
ep->mii.phy_id = data->phy_id;
ep->mii.phy_id_mask = 0x1f;
@@ -877,11 +876,8 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
goto err_out;
}
- printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, "
- "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name,
- ep->irq, data->dev_addr[0], data->dev_addr[1],
- data->dev_addr[2], data->dev_addr[3],
- data->dev_addr[4], data->dev_addr[5]);
+ printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n",
+ dev->name, ep->irq, dev->dev_addr);
return 0;
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index c5721cb38265..cc9ed8643910 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -663,7 +663,7 @@ static int lance_open( struct net_device *dev )
while (--i > 0)
if (DREG & CSR0_IDON)
break;
- if (i < 0 || (DREG & CSR0_ERR)) {
+ if (i <= 0 || (DREG & CSR0_ERR)) {
DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
dev->name, i, DREG ));
DREG = CSR0_STOP;
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 2f4be59b9c0b..d98095df05be 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -35,7 +35,7 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id atl1c_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
/* required last entry */
@@ -2596,11 +2596,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
if (netif_msg_probe(adapter))
- dev_dbg(&pdev->dev,
- "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
- adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
- adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
- adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
+ dev_dbg(&pdev->dev, "mac address : %pM\n",
+ adapter->hw.mac_addr);
atl1c_hw_set_mac_addr(&adapter->hw);
INIT_WORK(&adapter->common_task, atl1c_common_task);
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 08f8c0969e9b..d59f8e89c65d 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -35,7 +35,7 @@ char atl1e_driver_version[] = DRV_VERSION;
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id atl1e_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
/* required last entry */
@@ -2378,10 +2378,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
- dev_dbg(&pdev->dev, "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
- adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
- adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
- adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
+ dev_dbg(&pdev->dev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index b6cf3263127c..9ba547069db3 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -232,7 +232,7 @@ static void __devinit atl1_check_options(struct atl1_adapter *adapter)
/*
* atl1_pci_tbl - PCI Device ID Table
*/
-static const struct pci_device_id atl1_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
/* required last entry */
{0,}
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index c0451d75cdcf..40cf9e5cb9e2 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -63,7 +63,7 @@ MODULE_VERSION(ATL2_DRV_VERSION);
/*
* atl2_pci_tbl - PCI Device ID Table
*/
-static struct pci_device_id atl2_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
/* required last entry */
{0,}
@@ -1959,12 +1959,15 @@ static int atl2_get_eeprom(struct net_device *netdev,
return -ENOMEM;
for (i = first_dword; i < last_dword; i++) {
- if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword])))
- return -EIO;
+ if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) {
+ ret_val = -EIO;
+ goto free;
+ }
}
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3),
eeprom->len);
+free:
kfree(eeprom_buff);
return ret_val;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 6bac04603a88..1acf2c1d1389 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -55,6 +55,7 @@
#include <linux/delay.h>
#include <linux/crc32.h>
#include <linux/phy.h>
+#include <linux/platform_device.h>
#include <asm/cpu.h>
#include <asm/mipsregs.h>
@@ -63,6 +64,7 @@
#include <asm/processor.h>
#include <au1000.h>
+#include <au1xxx_eth.h>
#include <prom.h>
#include "au1000_eth.h"
@@ -112,15 +114,15 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
*
* PHY detection algorithm
*
- * If AU1XXX_PHY_STATIC_CONFIG is undefined, the PHY setup is
+ * If phy_static_config is undefined, the PHY setup is
* autodetected:
*
* mii_probe() first searches the current MAC's MII bus for a PHY,
- * selecting the first (or last, if AU1XXX_PHY_SEARCH_HIGHEST_ADDR is
+ * selecting the first (or last, if phy_search_highest_addr is
* defined) PHY address not already claimed by another netdev.
*
* If nothing was found that way when searching for the 2nd ethernet
- * controller's PHY and AU1XXX_PHY1_SEARCH_ON_MAC0 is defined, then
+ * controller's PHY and phy1_search_mac0 is defined, then
* the first MII bus is searched as well for an unclaimed PHY; this is
* needed in case of a dual-PHY accessible only through the MAC0's MII
* bus.
@@ -129,9 +131,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
* controller is not registered to the network subsystem.
*/
-/* autodetection defaults */
-#undef AU1XXX_PHY_SEARCH_HIGHEST_ADDR
-#define AU1XXX_PHY1_SEARCH_ON_MAC0
+/* autodetection defaults: phy1_search_mac0 */
/* static PHY setup
*
@@ -148,29 +148,6 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
* specific irq-map
*/
-#if defined(CONFIG_MIPS_BOSPORUS)
-/*
- * Micrel/Kendin 5 port switch attached to MAC0,
- * MAC0 is associated with PHY address 5 (== WAN port)
- * MAC1 is not associated with any PHY, since it's connected directly
- * to the switch.
- * no interrupts are used
- */
-# define AU1XXX_PHY_STATIC_CONFIG
-
-# define AU1XXX_PHY0_ADDR 5
-# define AU1XXX_PHY0_BUSID 0
-# undef AU1XXX_PHY0_IRQ
-
-# undef AU1XXX_PHY1_ADDR
-# undef AU1XXX_PHY1_BUSID
-# undef AU1XXX_PHY1_IRQ
-#endif
-
-#if defined(AU1XXX_PHY0_BUSID) && (AU1XXX_PHY0_BUSID > 0)
-# error MAC0-associated PHY attached 2nd MACs MII bus not supported yet
-#endif
-
static void enable_mac(struct net_device *dev, int force_reset)
{
unsigned long flags;
@@ -390,67 +367,54 @@ static int mii_probe (struct net_device *dev)
struct au1000_private *const aup = netdev_priv(dev);
struct phy_device *phydev = NULL;
-#if defined(AU1XXX_PHY_STATIC_CONFIG)
- BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
+ if (aup->phy_static_config) {
+ BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
- if(aup->mac_id == 0) { /* get PHY0 */
-# if defined(AU1XXX_PHY0_ADDR)
- phydev = au_macs[AU1XXX_PHY0_BUSID]->mii_bus->phy_map[AU1XXX_PHY0_ADDR];
-# else
- printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
- dev->name);
- return 0;
-# endif /* defined(AU1XXX_PHY0_ADDR) */
- } else if (aup->mac_id == 1) { /* get PHY1 */
-# if defined(AU1XXX_PHY1_ADDR)
- phydev = au_macs[AU1XXX_PHY1_BUSID]->mii_bus->phy_map[AU1XXX_PHY1_ADDR];
-# else
- printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
- dev->name);
+ if (aup->phy_addr)
+ phydev = aup->mii_bus->phy_map[aup->phy_addr];
+ else
+ printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n",
+ dev->name);
return 0;
-# endif /* defined(AU1XXX_PHY1_ADDR) */
- }
-
-#else /* defined(AU1XXX_PHY_STATIC_CONFIG) */
- int phy_addr;
-
- /* find the first (lowest address) PHY on the current MAC's MII bus */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
- if (aup->mii_bus->phy_map[phy_addr]) {
- phydev = aup->mii_bus->phy_map[phy_addr];
-# if !defined(AU1XXX_PHY_SEARCH_HIGHEST_ADDR)
- break; /* break out with first one found */
-# endif
- }
-
-# if defined(AU1XXX_PHY1_SEARCH_ON_MAC0)
- /* try harder to find a PHY */
- if (!phydev && (aup->mac_id == 1)) {
- /* no PHY found, maybe we have a dual PHY? */
- printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, "
- "let's see if it's attached to MAC0...\n");
-
- BUG_ON(!au_macs[0]);
-
- /* find the first (lowest address) non-attached PHY on
- * the MAC0 MII bus */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- struct phy_device *const tmp_phydev =
- au_macs[0]->mii_bus->phy_map[phy_addr];
-
- if (!tmp_phydev)
- continue; /* no PHY here... */
-
- if (tmp_phydev->attached_dev)
- continue; /* already claimed by MAC0 */
+ } else {
+ int phy_addr;
+
+ /* find the first (lowest address) PHY on the current MAC's MII bus */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
+ if (aup->mii_bus->phy_map[phy_addr]) {
+ phydev = aup->mii_bus->phy_map[phy_addr];
+ if (!aup->phy_search_highest_addr)
+ break; /* break out with first one found */
+ }
- phydev = tmp_phydev;
- break; /* found it */
+ if (aup->phy1_search_mac0) {
+ /* try harder to find a PHY */
+ if (!phydev && (aup->mac_id == 1)) {
+ /* no PHY found, maybe we have a dual PHY? */
+ printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, "
+ "let's see if it's attached to MAC0...\n");
+
+ /* find the first (lowest address) non-attached PHY on
+ * the MAC0 MII bus */
+ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
+ if (aup->mac_id == 1)
+ break;
+ struct phy_device *const tmp_phydev =
+ aup->mii_bus->phy_map[phy_addr];
+
+ if (!tmp_phydev)
+ continue; /* no PHY here... */
+
+ if (tmp_phydev->attached_dev)
+ continue; /* already claimed by MAC0 */
+
+ phydev = tmp_phydev;
+ break; /* found it */
+ }
+ }
}
}
-# endif /* defined(AU1XXX_PHY1_SEARCH_OTHER_BUS) */
-#endif /* defined(AU1XXX_PHY_STATIC_CONFIG) */
if (!phydev) {
printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name);
return -1;
@@ -578,31 +542,6 @@ setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
}
}
-static struct {
- u32 base_addr;
- u32 macen_addr;
- int irq;
- struct net_device *dev;
-} iflist[2] = {
-#ifdef CONFIG_SOC_AU1000
- {AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT},
- {AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT}
-#endif
-#ifdef CONFIG_SOC_AU1100
- {AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT}
-#endif
-#ifdef CONFIG_SOC_AU1500
- {AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT},
- {AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT}
-#endif
-#ifdef CONFIG_SOC_AU1550
- {AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT},
- {AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT}
-#endif
-};
-
-static int num_ifs;
-
/*
* ethtool operations
*/
@@ -1058,53 +997,59 @@ static const struct net_device_ops au1000_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
};
-static struct net_device * au1000_probe(int port_num)
+static int __devinit au1000_probe(struct platform_device *pdev)
{
static unsigned version_printed = 0;
struct au1000_private *aup = NULL;
+ struct au1000_eth_platform_data *pd;
struct net_device *dev = NULL;
db_dest_t *pDB, *pDBfree;
- char ethaddr[6];
- int irq, i, err;
- u32 base, macen;
+ int irq, i, err = 0;
+ struct resource *base, *macen;
+ DECLARE_MAC_BUF(ethaddr);
+
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!base) {
+ printk(KERN_ERR DRV_NAME ": failed to retrieve base register\n");
+ err = -ENODEV;
+ goto out;
+ }
- if (port_num >= NUM_ETH_INTERFACES)
- return NULL;
+ macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!macen) {
+ printk(KERN_ERR DRV_NAME ": failed to retrieve MAC Enable register\n");
+ err = -ENODEV;
+ goto out;
+ }
- base = CPHYSADDR(iflist[port_num].base_addr );
- macen = CPHYSADDR(iflist[port_num].macen_addr);
- irq = iflist[port_num].irq;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ printk(KERN_ERR DRV_NAME ": failed to retrieve IRQ\n");
+ err = -ENODEV;
+ goto out;
+ }
- if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
- !request_mem_region(macen, 4, "Au1x00 ENET"))
- return NULL;
+ if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
+ printk(KERN_ERR DRV_NAME ": failed to request memory region for base registers\n");
+ err = -ENXIO;
+ goto out;
+ }
- if (version_printed++ == 0)
- printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+ if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
+ printk(KERN_ERR DRV_NAME ": failed to request memory region for MAC enable register\n");
+ err = -ENXIO;
+ goto err_request;
+ }
dev = alloc_etherdev(sizeof(struct au1000_private));
if (!dev) {
printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
- return NULL;
- }
-
- dev->base_addr = base;
- dev->irq = irq;
- dev->netdev_ops = &au1000_netdev_ops;
- SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
- dev->watchdog_timeo = ETH_TX_TIMEOUT;
-
- err = register_netdev(dev);
- if (err != 0) {
- printk(KERN_ERR "%s: Cannot register net device, error %d\n",
- DRV_NAME, err);
- free_netdev(dev);
- return NULL;
+ err = -ENOMEM;
+ goto err_alloc;
}
- printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
- dev->name, base, irq);
-
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
aup = netdev_priv(dev);
spin_lock_init(&aup->lock);
@@ -1115,21 +1060,29 @@ static struct net_device * au1000_probe(int port_num)
(NUM_TX_BUFFS + NUM_RX_BUFFS),
&aup->dma_addr, 0);
if (!aup->vaddr) {
- free_netdev(dev);
- release_mem_region( base, MAC_IOSIZE);
- release_mem_region(macen, 4);
- return NULL;
+ printk(KERN_ERR DRV_NAME ": failed to allocate data buffers\n");
+ err = -ENOMEM;
+ goto err_vaddr;
}
/* aup->mac is the base address of the MAC's registers */
- aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
+ aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
+ if (!aup->mac) {
+ printk(KERN_ERR DRV_NAME ": failed to ioremap MAC registers\n");
+ err = -ENXIO;
+ goto err_remap1;
+ }
- /* Setup some variables for quick register address access */
- aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
- aup->mac_id = port_num;
- au_macs[port_num] = aup;
+ /* Setup some variables for quick register address access */
+ aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
+ if (!aup->enable) {
+ printk(KERN_ERR DRV_NAME ": failed to ioremap MAC enable register\n");
+ err = -ENXIO;
+ goto err_remap2;
+ }
+ aup->mac_id = pdev->id;
- if (port_num == 0) {
+ if (pdev->id == 0) {
if (prom_get_ethernet_addr(ethaddr) == 0)
memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
else {
@@ -1139,7 +1092,7 @@ static struct net_device * au1000_probe(int port_num)
}
setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
- } else if (port_num == 1)
+ } else if (pdev->id == 1)
setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
/*
@@ -1147,14 +1100,37 @@ static struct net_device * au1000_probe(int port_num)
* to match those that are printed on their stickers
*/
memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
- dev->dev_addr[5] += port_num;
+ dev->dev_addr[5] += pdev->id;
*aup->enable = 0;
aup->mac_enabled = 0;
+ pd = pdev->dev.platform_data;
+ if (!pd) {
+ printk(KERN_INFO DRV_NAME ": no platform_data passed, PHY search on MAC0\n");
+ aup->phy1_search_mac0 = 1;
+ } else {
+ aup->phy_static_config = pd->phy_static_config;
+ aup->phy_search_highest_addr = pd->phy_search_highest_addr;
+ aup->phy1_search_mac0 = pd->phy1_search_mac0;
+ aup->phy_addr = pd->phy_addr;
+ aup->phy_busid = pd->phy_busid;
+ aup->phy_irq = pd->phy_irq;
+ }
+
+ if (aup->phy_busid && aup->phy_busid > 0) {
+ printk(KERN_ERR DRV_NAME ": MAC0-associated PHY attached 2nd MACs MII"
+ "bus not supported yet\n");
+ err = -ENODEV;
+ goto err_mdiobus_alloc;
+ }
+
aup->mii_bus = mdiobus_alloc();
- if (aup->mii_bus == NULL)
- goto err_out;
+ if (aup->mii_bus == NULL) {
+ printk(KERN_ERR DRV_NAME ": failed to allocate mdiobus structure\n");
+ err = -ENOMEM;
+ goto err_mdiobus_alloc;
+ }
aup->mii_bus->priv = dev;
aup->mii_bus->read = au1000_mdiobus_read;
@@ -1168,23 +1144,19 @@ static struct net_device * au1000_probe(int port_num)
for(i = 0; i < PHY_MAX_ADDR; ++i)
aup->mii_bus->irq[i] = PHY_POLL;
-
/* if known, set corresponding PHY IRQs */
-#if defined(AU1XXX_PHY_STATIC_CONFIG)
-# if defined(AU1XXX_PHY0_IRQ)
- if (AU1XXX_PHY0_BUSID == aup->mac_id)
- aup->mii_bus->irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
-# endif
-# if defined(AU1XXX_PHY1_IRQ)
- if (AU1XXX_PHY1_BUSID == aup->mac_id)
- aup->mii_bus->irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
-# endif
-#endif
- mdiobus_register(aup->mii_bus);
+ if (aup->phy_static_config)
+ if (aup->phy_irq && aup->phy_busid == aup->mac_id)
+ aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
+
+ err = mdiobus_register(aup->mii_bus);
+ if (err) {
+ printk(KERN_ERR DRV_NAME " failed to register MDIO bus\n");
+ goto err_mdiobus_reg;
+ }
- if (mii_probe(dev) != 0) {
+ if (mii_probe(dev) != 0)
goto err_out;
- }
pDBfree = NULL;
/* setup the data buffer descriptors and attach a buffer to each one */
@@ -1216,19 +1188,35 @@ static struct net_device * au1000_probe(int port_num)
aup->tx_db_inuse[i] = pDB;
}
+ dev->base_addr = base->start;
+ dev->irq = irq;
+ dev->netdev_ops = &au1000_netdev_ops;
+ SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
+ dev->watchdog_timeo = ETH_TX_TIMEOUT;
+
/*
* The boot code uses the ethernet controller, so reset it to start
* fresh. au1000_init() expects that the device is in reset state.
*/
reset_mac(dev);
- return dev;
+ err = register_netdev(dev);
+ if (err) {
+ printk(KERN_ERR DRV_NAME "%s: Cannot register net device, aborting.\n",
+ dev->name);
+ goto err_out;
+ }
+
+ printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
+ dev->name, base->start, irq);
+ if (version_printed++ == 0)
+ printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
+
+ return 0;
err_out:
- if (aup->mii_bus != NULL) {
+ if (aup->mii_bus != NULL)
mdiobus_unregister(aup->mii_bus);
- mdiobus_free(aup->mii_bus);
- }
/* here we should have a valid dev plus aup-> register addresses
* so we can reset the mac properly.*/
@@ -1242,67 +1230,84 @@ err_out:
if (aup->tx_db_inuse[i])
ReleaseDB(aup, aup->tx_db_inuse[i]);
}
+err_mdiobus_reg:
+ mdiobus_free(aup->mii_bus);
+err_mdiobus_alloc:
+ iounmap(aup->enable);
+err_remap2:
+ iounmap(aup->mac);
+err_remap1:
dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
(void *)aup->vaddr, aup->dma_addr);
- unregister_netdev(dev);
+err_vaddr:
free_netdev(dev);
- release_mem_region( base, MAC_IOSIZE);
- release_mem_region(macen, 4);
- return NULL;
+err_alloc:
+ release_mem_region(macen->start, resource_size(macen));
+err_request:
+ release_mem_region(base->start, resource_size(base));
+out:
+ return err;
}
-/*
- * Setup the base address and interrupt of the Au1xxx ethernet macs
- * based on cpu type and whether the interface is enabled in sys_pinfunc
- * register. The last interface is enabled if SYS_PF_NI2 (bit 4) is 0.
- */
-static int __init au1000_init_module(void)
+static int __devexit au1000_remove(struct platform_device *pdev)
{
- int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
- struct net_device *dev;
- int i, found_one = 0;
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct au1000_private *aup = netdev_priv(dev);
+ int i;
+ struct resource *base, *macen;
- num_ifs = NUM_ETH_INTERFACES - ni;
+ platform_set_drvdata(pdev, NULL);
+
+ unregister_netdev(dev);
+ mdiobus_unregister(aup->mii_bus);
+ mdiobus_free(aup->mii_bus);
+
+ for (i = 0; i < NUM_RX_DMA; i++)
+ if (aup->rx_db_inuse[i])
+ ReleaseDB(aup, aup->rx_db_inuse[i]);
+
+ for (i = 0; i < NUM_TX_DMA; i++)
+ if (aup->tx_db_inuse[i])
+ ReleaseDB(aup, aup->tx_db_inuse[i]);
+
+ dma_free_noncoherent(NULL, MAX_BUF_SIZE *
+ (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ (void *)aup->vaddr, aup->dma_addr);
+
+ iounmap(aup->mac);
+ iounmap(aup->enable);
+
+ base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(base->start, resource_size(base));
+
+ macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ release_mem_region(macen->start, resource_size(macen));
+
+ free_netdev(dev);
- for(i = 0; i < num_ifs; i++) {
- dev = au1000_probe(i);
- iflist[i].dev = dev;
- if (dev)
- found_one++;
- }
- if (!found_one)
- return -ENODEV;
return 0;
}
-static void __exit au1000_cleanup_module(void)
+static struct platform_driver au1000_eth_driver = {
+ .probe = au1000_probe,
+ .remove = __devexit_p(au1000_remove),
+ .driver = {
+ .name = "au1000-eth",
+ .owner = THIS_MODULE,
+ },
+};
+MODULE_ALIAS("platform:au1000-eth");
+
+
+static int __init au1000_init_module(void)
{
- int i, j;
- struct net_device *dev;
- struct au1000_private *aup;
-
- for (i = 0; i < num_ifs; i++) {
- dev = iflist[i].dev;
- if (dev) {
- aup = netdev_priv(dev);
- unregister_netdev(dev);
- mdiobus_unregister(aup->mii_bus);
- mdiobus_free(aup->mii_bus);
- for (j = 0; j < NUM_RX_DMA; j++)
- if (aup->rx_db_inuse[j])
- ReleaseDB(aup, aup->rx_db_inuse[j]);
- for (j = 0; j < NUM_TX_DMA; j++)
- if (aup->tx_db_inuse[j])
- ReleaseDB(aup, aup->tx_db_inuse[j]);
- dma_free_noncoherent(NULL, MAX_BUF_SIZE *
- (NUM_TX_BUFFS + NUM_RX_BUFFS),
- (void *)aup->vaddr, aup->dma_addr);
- release_mem_region(dev->base_addr, MAC_IOSIZE);
- release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
- free_netdev(dev);
- }
- }
+ return platform_driver_register(&au1000_eth_driver);
+}
+
+static void __exit au1000_exit_module(void)
+{
+ platform_driver_unregister(&au1000_eth_driver);
}
module_init(au1000_init_module);
-module_exit(au1000_cleanup_module);
+module_exit(au1000_exit_module);
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index 824ecd5ff3a8..f9d29a29b8fd 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -108,6 +108,15 @@ struct au1000_private {
struct phy_device *phy_dev;
struct mii_bus *mii_bus;
+ /* PHY configuration */
+ int phy_static_config;
+ int phy_search_highest_addr;
+ int phy1_search_mac0;
+
+ int phy_addr;
+ int phy_busid;
+ int phy_irq;
+
/* These variables are just for quick access to certain regs addresses. */
volatile mac_reg_t *mac; /* mac registers */
volatile u32 *enable; /* address of MAC Enable Register */
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 4869adb69586..44b66be38134 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -102,7 +102,7 @@ MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
#ifdef CONFIG_B44_PCI
-static const struct pci_device_id b44_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 102ade134165..7f64d562ccaa 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1607,3 +1607,33 @@ err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_seeprom_read *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ req = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_SEEPROM_READ);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ status = be_mcc_notify_wait(adapter);
+
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index c002b8391b4d..cb3188f8708d 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -124,6 +124,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_CQ_CREATE 12
#define OPCODE_COMMON_EQ_CREATE 13
#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_SEEPROM_READ 30
#define OPCODE_COMMON_NTWK_RX_FILTER 34
#define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -854,6 +855,19 @@ struct be_cmd_resp_ddrdma_test {
u8 rcv_buff[4096];
};
+/*********************** SEEPROM Read ***********************/
+
+#define BE_READ_SEEPROM_LEN 1024
+struct be_cmd_req_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[BE_READ_SEEPROM_LEN];
+};
+
+struct be_cmd_resp_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
+};
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -926,5 +940,8 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 num_pkts, u64 pattern);
extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
u32 byte_cnt, struct be_dma_mem *cmd);
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
+
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 5d001c4deac1..f18c02f3a5e4 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -567,12 +567,57 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
return be_load_fw(adapter, file_name);
}
+static int
+be_get_eeprom_len(struct net_device *netdev)
+{
+ return BE_READ_SEEPROM_LEN;
+}
+
+static int
+be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ uint8_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_dma_mem eeprom_cmd;
+ struct be_cmd_resp_seeprom_read *resp;
+ int status;
+
+ if (!eeprom->len)
+ return -EINVAL;
+
+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
+
+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
+ eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
+ &eeprom_cmd.dma);
+
+ if (!eeprom_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure. Could not read eeprom\n");
+ return -ENOMEM;
+ }
+
+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
+
+ if (!status) {
+ resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
+ memcpy(data, resp->seeprom_data, eeprom->len);
+ }
+ pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
+ eeprom_cmd.dma);
+
+ return status;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
.get_wol = be_get_wol,
.set_wol = be_set_wol,
.get_link = ethtool_op_get_link,
+ .get_eeprom_len = be_get_eeprom_len,
+ .get_eeprom = be_read_eeprom,
.get_coalesce = be_get_coalesce,
.set_coalesce = be_set_coalesce,
.get_ringparam = be_get_ringparam,
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 65df1de447e4..b1c20e5f7de8 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -6145,6 +6145,10 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
+ /* Need to flush the previous three writes to ensure MSI-X
+ * is setup properly */
+ REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
+
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 306c2b8165e2..ca4ed634d55e 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -140,7 +140,7 @@ static struct {
};
-static const struct pci_device_id bnx2x_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 3f0071cfe56b..6a42a1453afa 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2615,6 +2615,17 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
unsigned char *arp_ptr;
__be32 sip, tip;
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ /*
+ * When using VLANS and bonding, dev and oriv_dev may be
+ * incorrect if the physical interface supports VLAN
+ * acceleration. With this change ARP validation now
+ * works for hosts only reachable on the VLAN interface.
+ */
+ dev = vlan_dev_real_dev(dev);
+ orig_dev = dev_get_by_index_rcu(dev_net(skb->dev),skb->skb_iif);
+ }
+
if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
goto out;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 166cc7e579c0..f7287497ba6e 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -342,6 +342,9 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int mb, prio;
u32 reg_mid, reg_mcr;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
mb = get_tx_next_mb(priv);
prio = get_tx_next_prio(priv);
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 0ec1524523cc..7e1926e79e98 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -318,6 +318,9 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 val;
int i;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
netif_stop_queue(dev);
/* fill id */
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 9c5a1537939c..afa2fa45fed9 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -494,12 +494,8 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- if (skb->len != sizeof(struct can_frame)) {
- dev_err(&spi->dev, "dropping packet - bad length\n");
- dev_kfree_skb(skb);
- net->stats.tx_dropped++;
+ if (can_dropped_invalid_skb(net, skb))
return NETDEV_TX_OK;
- }
netif_stop_queue(net);
priv->tx_skb = skb;
@@ -990,7 +986,7 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
goto error_tx_buf;
}
priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
- if (!priv->spi_tx_buf) {
+ if (!priv->spi_rx_buf) {
ret = -ENOMEM;
goto error_rx_buf;
}
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index cd0f2d6f375d..27d1d398e25e 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -11,12 +11,13 @@ if CAN_MSCAN
config CAN_MPC5XXX
tristate "Freescale MPC5xxx onboard CAN controller"
- depends on PPC_MPC52xx
+ depends on (PPC_MPC52xx || PPC_MPC512x)
---help---
If you say yes here you get support for Freescale's MPC5xxx
- onboard CAN controller.
+ onboard CAN controller. Currently, the MPC5200, MPC5200B and
+ MPC5121 (Rev. 2 and later) are supported.
- This driver can also be built as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called mscan-mpc5xxx.ko.
endif
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 1de6f6349b16..f73487f723b8 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -29,6 +29,7 @@
#include <linux/can/dev.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mpc52xx.h>
@@ -36,22 +37,21 @@
#define DRV_NAME "mpc5xxx_can"
-static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = {
+struct mpc5xxx_can_data {
+ unsigned int type;
+ u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
+ int *mscan_clksrc);
+};
+
+#ifdef CONFIG_PPC_MPC5200
+static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
{ .compatible = "fsl,mpc5200-cdm", },
{}
};
-/*
- * Get frequency of the MSCAN clock source
- *
- * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
- * can be selected. According to the MPC5200 user's manual, the oscillator
- * clock is the better choice as it has less jitter but due to a hardware
- * bug, it can not be selected for the old MPC5200 Rev. A chips.
- */
-
-static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
- int clock_src)
+static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
{
unsigned int pvr;
struct mpc52xx_cdm __iomem *cdm;
@@ -61,11 +61,24 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
pvr = mfspr(SPRN_PVR);
- freq = mpc5xxx_get_bus_frequency(of->node);
+ /*
+ * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
+ * (IP_CLK) can be selected as MSCAN clock source. According to
+ * the MPC5200 user's manual, the oscillator clock is the better
+ * choice as it has less jitter. For this reason, it is selected
+ * by default. Unfortunately, it can not be selected for the old
+ * MPC5200 Rev. A chips due to a hardware bug (check errata).
+ */
+ if (clock_name && strcmp(clock_name, "ip") == 0)
+ *mscan_clksrc = MSCAN_CLKSRC_BUS;
+ else
+ *mscan_clksrc = MSCAN_CLKSRC_XTAL;
+
+ freq = mpc5xxx_get_bus_frequency(ofdev->node);
if (!freq)
return 0;
- if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
+ if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
return freq;
/* Determine SYS_XTAL_IN frequency from the clock domain settings */
@@ -75,7 +88,6 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
return 0;
}
cdm = of_iomap(np_cdm, 0);
- of_node_put(np_cdm);
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;
@@ -84,26 +96,174 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
freq *= (val & (1 << 5)) ? 8 : 4;
freq /= (val & (1 << 6)) ? 12 : 16;
+ of_node_put(np_cdm);
iounmap(cdm);
return freq;
}
+#else /* !CONFIG_PPC_MPC5200 */
+static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC5200 */
+
+#ifdef CONFIG_PPC_MPC512x
+struct mpc512x_clockctl {
+ u32 spmr; /* System PLL Mode Reg */
+ u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
+ u32 scfr1; /* System Clk Freq Reg 1 */
+ u32 scfr2; /* System Clk Freq Reg 2 */
+ u32 reserved;
+ u32 bcr; /* Bread Crumb Reg */
+ u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */
+ u32 spccr; /* SPDIF Clk Ctrl Reg */
+ u32 cccr; /* CFM Clk Ctrl Reg */
+ u32 dccr; /* DIU Clk Cnfg Reg */
+ u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
+};
+
+static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
+ { .compatible = "fsl,mpc5121-clock", },
+ {}
+};
+
+static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ struct mpc512x_clockctl __iomem *clockctl;
+ struct device_node *np_clock;
+ struct clk *sys_clk, *ref_clk;
+ int plen, clockidx, clocksrc = -1;
+ u32 sys_freq, val, clockdiv = 1, freq = 0;
+ const u32 *pval;
+
+ np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
+ if (!np_clock) {
+ dev_err(&ofdev->dev, "couldn't find clock node\n");
+ return -ENODEV;
+ }
+ clockctl = of_iomap(np_clock, 0);
+ if (!clockctl) {
+ dev_err(&ofdev->dev, "couldn't map clock registers\n");
+ return 0;
+ }
+
+ /* Determine the MSCAN device index from the physical address */
+ pval = of_get_property(ofdev->node, "reg", &plen);
+ BUG_ON(!pval || plen < sizeof(*pval));
+ clockidx = (*pval & 0x80) ? 1 : 0;
+ if (*pval & 0x2000)
+ clockidx += 2;
+
+ /*
+ * Clock source and divider selection: 3 different clock sources
+ * can be selected: "ip", "ref" or "sys". For the latter two, a
+ * clock divider can be defined as well. If the clock source is
+ * not specified by the device tree, we first try to find an
+ * optimal CAN source clock based on the system clock. If that
+ * is not posslible, the reference clock will be used.
+ */
+ if (clock_name && !strcmp(clock_name, "ip")) {
+ *mscan_clksrc = MSCAN_CLKSRC_IPS;
+ freq = mpc5xxx_get_bus_frequency(ofdev->node);
+ } else {
+ *mscan_clksrc = MSCAN_CLKSRC_BUS;
+
+ pval = of_get_property(ofdev->node,
+ "fsl,mscan-clock-divider", &plen);
+ if (pval && plen == sizeof(*pval))
+ clockdiv = *pval;
+ if (!clockdiv)
+ clockdiv = 1;
+
+ if (!clock_name || !strcmp(clock_name, "sys")) {
+ sys_clk = clk_get(&ofdev->dev, "sys_clk");
+ if (!sys_clk) {
+ dev_err(&ofdev->dev, "couldn't get sys_clk\n");
+ goto exit_unmap;
+ }
+ /* Get and round up/down sys clock rate */
+ sys_freq = 1000000 *
+ ((clk_get_rate(sys_clk) + 499999) / 1000000);
+
+ if (!clock_name) {
+ /* A multiple of 16 MHz would be optimal */
+ if ((sys_freq % 16000000) == 0) {
+ clocksrc = 0;
+ clockdiv = sys_freq / 16000000;
+ freq = sys_freq / clockdiv;
+ }
+ } else {
+ clocksrc = 0;
+ freq = sys_freq / clockdiv;
+ }
+ }
+
+ if (clocksrc < 0) {
+ ref_clk = clk_get(&ofdev->dev, "ref_clk");
+ if (!ref_clk) {
+ dev_err(&ofdev->dev, "couldn't get ref_clk\n");
+ goto exit_unmap;
+ }
+ clocksrc = 1;
+ freq = clk_get_rate(ref_clk) / clockdiv;
+ }
+ }
+
+ /* Disable clock */
+ out_be32(&clockctl->mccr[clockidx], 0x0);
+ if (clocksrc >= 0) {
+ /* Set source and divider */
+ val = (clocksrc << 14) | ((clockdiv - 1) << 17);
+ out_be32(&clockctl->mccr[clockidx], val);
+ /* Enable clock */
+ out_be32(&clockctl->mccr[clockidx], val | 0x10000);
+ }
+
+ /* Enable MSCAN clock domain */
+ val = in_be32(&clockctl->sccr[1]);
+ if (!(val & (1 << 25)))
+ out_be32(&clockctl->sccr[1], val | (1 << 25));
+
+ dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
+ *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
+ clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
+
+exit_unmap:
+ of_node_put(np_clock);
+ iounmap(clockctl);
+
+ return freq;
+}
+#else /* !CONFIG_PPC_MPC512x */
+static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC512x */
static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
const struct of_device_id *id)
{
+ struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
struct device_node *np = ofdev->node;
struct net_device *dev;
struct mscan_priv *priv;
void __iomem *base;
- const char *clk_src;
- int err, irq, clock_src;
+ const char *clock_name = NULL;
+ int irq, mscan_clksrc = 0;
+ int err = -ENOMEM;
- base = of_iomap(ofdev->node, 0);
+ base = of_iomap(np, 0);
if (!base) {
dev_err(&ofdev->dev, "couldn't ioremap\n");
- err = -ENOMEM;
- goto exit_release_mem;
+ return err;
}
irq = irq_of_parse_and_map(np, 0);
@@ -114,37 +274,27 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
}
dev = alloc_mscandev();
- if (!dev) {
- err = -ENOMEM;
+ if (!dev)
goto exit_dispose_irq;
- }
priv = netdev_priv(dev);
priv->reg_base = base;
dev->irq = irq;
- /*
- * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
- * (IP_CLK) can be selected as MSCAN clock source. According to
- * the MPC5200 user's manual, the oscillator clock is the better
- * choice as it has less jitter. For this reason, it is selected
- * by default.
- */
- clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
- if (clk_src && strcmp(clk_src, "ip") == 0)
- clock_src = MSCAN_CLKSRC_BUS;
- else
- clock_src = MSCAN_CLKSRC_XTAL;
- priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
+ clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL);
+
+ BUG_ON(!data);
+ priv->type = data->type;
+ priv->can.clock.freq = data->get_clock(ofdev, clock_name,
+ &mscan_clksrc);
if (!priv->can.clock.freq) {
- dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n");
- err = -ENODEV;
+ dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
goto exit_free_mscan;
}
SET_NETDEV_DEV(dev, &ofdev->dev);
- err = register_mscandev(dev, clock_src);
+ err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
@@ -164,7 +314,7 @@ exit_dispose_irq:
irq_dispose_mapping(irq);
exit_unmap_mem:
iounmap(base);
-exit_release_mem:
+
return err;
}
@@ -225,8 +375,20 @@ static int mpc5xxx_can_resume(struct of_device *ofdev)
}
#endif
+static struct mpc5xxx_can_data __devinitdata mpc5200_can_data = {
+ .type = MSCAN_TYPE_MPC5200,
+ .get_clock = mpc52xx_can_get_clock,
+};
+
+static struct mpc5xxx_can_data __devinitdata mpc5121_can_data = {
+ .type = MSCAN_TYPE_MPC5121,
+ .get_clock = mpc512x_can_get_clock,
+};
+
static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
- {.compatible = "fsl,mpc5200-mscan"},
+ { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
+ /* Note that only MPC5121 Rev. 2 (and later) is supported */
+ { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
{},
};
@@ -255,5 +417,5 @@ static void __exit mpc5xxx_can_exit(void)
module_exit(mpc5xxx_can_exit);
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-MODULE_DESCRIPTION("Freescale MPC5200 CAN driver");
+MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 07346f880ca6..40827c128b65 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -4,7 +4,7 @@
* Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
* Varma Electronics Oy
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
+ * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the version 2 of the GNU General Public License
@@ -152,6 +152,12 @@ static int mscan_start(struct net_device *dev)
priv->shadow_canrier = 0;
priv->flags = 0;
+ if (priv->type == MSCAN_TYPE_MPC5121) {
+ /* Clear pending bus-off condition */
+ if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
+ out_8(&regs->canmisc, MSCAN_BOHOLD);
+ }
+
err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
if (err)
return err;
@@ -163,8 +169,29 @@ static int mscan_start(struct net_device *dev)
out_8(&regs->cantier, 0);
/* Enable receive interrupts. */
- out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
- MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
+ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
+
+ return 0;
+}
+
+static int mscan_restart(struct net_device *dev)
+{
+ struct mscan_priv *priv = netdev_priv(dev);
+
+ if (priv->type == MSCAN_TYPE_MPC5121) {
+ struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
+ "bus-off state expected");
+ out_8(&regs->canmisc, MSCAN_BOHOLD);
+ /* Re-enable receive interrupts. */
+ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
+ } else {
+ if (priv->can.state <= CAN_STATE_BUS_OFF)
+ mscan_set_mode(dev, MSCAN_INIT_MODE);
+ return mscan_start(dev);
+ }
return 0;
}
@@ -177,8 +204,8 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rtr, buf_id;
u32 can_id;
- if (frame->can_dlc > 8)
- return -EINVAL;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
out_8(&regs->cantier, 0);
@@ -359,9 +386,12 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
* automatically. To avoid that we stop the chip doing
* a light-weight stop (we are in irq-context).
*/
- out_8(&regs->cantier, 0);
- out_8(&regs->canrier, 0);
- setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
+ if (priv->type != MSCAN_TYPE_MPC5121) {
+ out_8(&regs->cantier, 0);
+ out_8(&regs->canrier, 0);
+ setbits8(&regs->canctl0,
+ MSCAN_SLPRQ | MSCAN_INITRQ);
+ }
can_bus_off(dev);
break;
default:
@@ -491,9 +521,7 @@ static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
switch (mode) {
case CAN_MODE_START:
- if (priv->can.state <= CAN_STATE_BUS_OFF)
- mscan_set_mode(dev, MSCAN_INIT_MODE);
- ret = mscan_start(dev);
+ ret = mscan_restart(dev);
if (ret)
break;
if (netif_queue_stopped(dev))
@@ -592,18 +620,21 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_start_xmit = mscan_start_xmit,
};
-int register_mscandev(struct net_device *dev, int clock_src)
+int register_mscandev(struct net_device *dev, int mscan_clksrc)
{
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
u8 ctl1;
ctl1 = in_8(&regs->canctl1);
- if (clock_src)
+ if (mscan_clksrc)
ctl1 |= MSCAN_CLKSRC;
else
ctl1 &= ~MSCAN_CLKSRC;
+ if (priv->type == MSCAN_TYPE_MPC5121)
+ ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
+
ctl1 |= MSCAN_CANE;
out_8(&regs->canctl1, ctl1);
udelay(100);
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 00fc4aaf1ed8..4ff966473bc9 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -38,18 +38,20 @@
#define MSCAN_CLKSRC 0x40
#define MSCAN_LOOPB 0x20
#define MSCAN_LISTEN 0x10
+#define MSCAN_BORM 0x08
#define MSCAN_WUPM 0x04
#define MSCAN_SLPAK 0x02
#define MSCAN_INITAK 0x01
-/* Use the MPC5200 MSCAN variant? */
+/* Use the MPC5XXX MSCAN variant? */
#ifdef CONFIG_PPC
-#define MSCAN_FOR_MPC5200
+#define MSCAN_FOR_MPC5XXX
#endif
-#ifdef MSCAN_FOR_MPC5200
+#ifdef MSCAN_FOR_MPC5XXX
#define MSCAN_CLKSRC_BUS 0
#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
+#define MSCAN_CLKSRC_IPS MSCAN_CLKSRC
#else
#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
#define MSCAN_CLKSRC_XTAL 0
@@ -136,7 +138,7 @@
#define MSCAN_EFF_RTR_SHIFT 0
#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
-#ifdef MSCAN_FOR_MPC5200
+#ifdef MSCAN_FOR_MPC5XXX
#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
#define _MSCAN_RESERVED_DSR_SIZE 2
#else
@@ -165,67 +167,66 @@ struct mscan_regs {
u8 cantbsel; /* + 0x14 0x0a */
u8 canidac; /* + 0x15 0x0b */
u8 reserved; /* + 0x16 0x0c */
- _MSCAN_RESERVED_(6, 5); /* + 0x17 */
-#ifndef MSCAN_FOR_MPC5200
- u8 canmisc; /* 0x0d */
-#endif
+ _MSCAN_RESERVED_(6, 2); /* + 0x17 */
+ u8 canmisc; /* + 0x19 0x0d */
+ _MSCAN_RESERVED_(7, 2); /* + 0x1a */
u8 canrxerr; /* + 0x1c 0x0e */
u8 cantxerr; /* + 0x1d 0x0f */
- _MSCAN_RESERVED_(7, 2); /* + 0x1e */
+ _MSCAN_RESERVED_(8, 2); /* + 0x1e */
u16 canidar1_0; /* + 0x20 0x10 */
- _MSCAN_RESERVED_(8, 2); /* + 0x22 */
+ _MSCAN_RESERVED_(9, 2); /* + 0x22 */
u16 canidar3_2; /* + 0x24 0x12 */
- _MSCAN_RESERVED_(9, 2); /* + 0x26 */
+ _MSCAN_RESERVED_(10, 2); /* + 0x26 */
u16 canidmr1_0; /* + 0x28 0x14 */
- _MSCAN_RESERVED_(10, 2); /* + 0x2a */
+ _MSCAN_RESERVED_(11, 2); /* + 0x2a */
u16 canidmr3_2; /* + 0x2c 0x16 */
- _MSCAN_RESERVED_(11, 2); /* + 0x2e */
+ _MSCAN_RESERVED_(12, 2); /* + 0x2e */
u16 canidar5_4; /* + 0x30 0x18 */
- _MSCAN_RESERVED_(12, 2); /* + 0x32 */
+ _MSCAN_RESERVED_(13, 2); /* + 0x32 */
u16 canidar7_6; /* + 0x34 0x1a */
- _MSCAN_RESERVED_(13, 2); /* + 0x36 */
+ _MSCAN_RESERVED_(14, 2); /* + 0x36 */
u16 canidmr5_4; /* + 0x38 0x1c */
- _MSCAN_RESERVED_(14, 2); /* + 0x3a */
+ _MSCAN_RESERVED_(15, 2); /* + 0x3a */
u16 canidmr7_6; /* + 0x3c 0x1e */
- _MSCAN_RESERVED_(15, 2); /* + 0x3e */
+ _MSCAN_RESERVED_(16, 2); /* + 0x3e */
struct {
u16 idr1_0; /* + 0x40 0x20 */
- _MSCAN_RESERVED_(16, 2); /* + 0x42 */
+ _MSCAN_RESERVED_(17, 2); /* + 0x42 */
u16 idr3_2; /* + 0x44 0x22 */
- _MSCAN_RESERVED_(17, 2); /* + 0x46 */
+ _MSCAN_RESERVED_(18, 2); /* + 0x46 */
u16 dsr1_0; /* + 0x48 0x24 */
- _MSCAN_RESERVED_(18, 2); /* + 0x4a */
+ _MSCAN_RESERVED_(19, 2); /* + 0x4a */
u16 dsr3_2; /* + 0x4c 0x26 */
- _MSCAN_RESERVED_(19, 2); /* + 0x4e */
+ _MSCAN_RESERVED_(20, 2); /* + 0x4e */
u16 dsr5_4; /* + 0x50 0x28 */
- _MSCAN_RESERVED_(20, 2); /* + 0x52 */
+ _MSCAN_RESERVED_(21, 2); /* + 0x52 */
u16 dsr7_6; /* + 0x54 0x2a */
- _MSCAN_RESERVED_(21, 2); /* + 0x56 */
+ _MSCAN_RESERVED_(22, 2); /* + 0x56 */
u8 dlr; /* + 0x58 0x2c */
- u8:8; /* + 0x59 0x2d */
- _MSCAN_RESERVED_(22, 2); /* + 0x5a */
+ u8 reserved; /* + 0x59 0x2d */
+ _MSCAN_RESERVED_(23, 2); /* + 0x5a */
u16 time; /* + 0x5c 0x2e */
} rx;
- _MSCAN_RESERVED_(23, 2); /* + 0x5e */
+ _MSCAN_RESERVED_(24, 2); /* + 0x5e */
struct {
u16 idr1_0; /* + 0x60 0x30 */
- _MSCAN_RESERVED_(24, 2); /* + 0x62 */
+ _MSCAN_RESERVED_(25, 2); /* + 0x62 */
u16 idr3_2; /* + 0x64 0x32 */
- _MSCAN_RESERVED_(25, 2); /* + 0x66 */
+ _MSCAN_RESERVED_(26, 2); /* + 0x66 */
u16 dsr1_0; /* + 0x68 0x34 */
- _MSCAN_RESERVED_(26, 2); /* + 0x6a */
+ _MSCAN_RESERVED_(27, 2); /* + 0x6a */
u16 dsr3_2; /* + 0x6c 0x36 */
- _MSCAN_RESERVED_(27, 2); /* + 0x6e */
+ _MSCAN_RESERVED_(28, 2); /* + 0x6e */
u16 dsr5_4; /* + 0x70 0x38 */
- _MSCAN_RESERVED_(28, 2); /* + 0x72 */
+ _MSCAN_RESERVED_(29, 2); /* + 0x72 */
u16 dsr7_6; /* + 0x74 0x3a */
- _MSCAN_RESERVED_(29, 2); /* + 0x76 */
+ _MSCAN_RESERVED_(30, 2); /* + 0x76 */
u8 dlr; /* + 0x78 0x3c */
u8 tbpr; /* + 0x79 0x3d */
- _MSCAN_RESERVED_(30, 2); /* + 0x7a */
+ _MSCAN_RESERVED_(31, 2); /* + 0x7a */
u16 time; /* + 0x7c 0x3e */
} tx;
- _MSCAN_RESERVED_(31, 2); /* + 0x7e */
+ _MSCAN_RESERVED_(32, 2); /* + 0x7e */
} __attribute__ ((packed));
#undef _MSCAN_RESERVED_
@@ -237,6 +238,15 @@ struct mscan_regs {
#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
#define MSCAN_SET_MODE_RETRIES 255
#define MSCAN_ECHO_SKB_MAX 3
+#define MSCAN_RX_INTS_ENABLE (MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | \
+ MSCAN_RSTATE1 | MSCAN_RSTATE0 | \
+ MSCAN_TSTATE1 | MSCAN_TSTATE0)
+
+/* MSCAN type variants */
+enum {
+ MSCAN_TYPE_MPC5200,
+ MSCAN_TYPE_MPC5121
+};
#define BTR0_BRP_MASK 0x3f
#define BTR0_SJW_SHIFT 6
@@ -270,6 +280,7 @@ struct tx_queue_entry {
struct mscan_priv {
struct can_priv can; /* must be the first member */
+ unsigned int type; /* MSCAN type variants */
long open_time;
unsigned long flags;
void __iomem *reg_base; /* ioremap'ed address to registers */
@@ -285,12 +296,7 @@ struct mscan_priv {
};
extern struct net_device *alloc_mscandev(void);
-/*
- * clock_src:
- * 1 = The MSCAN clock source is the onchip Bus Clock.
- * 0 = The MSCAN clock source is the chip Oscillator Clock.
- */
-extern int register_mscandev(struct net_device *dev, int clock_src);
+extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
extern void unregister_mscandev(struct net_device *dev);
#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index fd04789d3370..87300606abb9 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -102,7 +102,7 @@ struct ems_pci_card {
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
-static struct pci_device_id ems_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
/* CPC-PCI v2 */
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 7dd7769b9713..441e776a7f59 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -109,7 +109,7 @@ struct kvaser_pci {
#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */
#define KVASER_PCI_DEVICE_ID2 0x0008
-static struct pci_device_id kvaser_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(kvaser_pci_tbl) = {
{KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,},
{KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,},
{ 0,}
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 542a4f7255b4..345304d779b9 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -249,6 +249,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
uint8_t dreg;
int i;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
netif_stop_queue(dev);
fi = dlc = cf->can_dlc;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5c993c2da528..7d370e32a7a8 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -477,6 +477,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 mbxno, mbx_mask, data;
unsigned long flags;
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
mbxno = get_tx_head_mb(priv);
mbx_mask = BIT(mbxno);
spin_lock_irqsave(&priv->mbx_lock, flags);
@@ -491,7 +494,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
/* Prepare mailbox for transmission */
- data = min_t(u8, cf->can_dlc, 8);
if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
data |= HECC_CANMCF_RTR;
data |= get_tx_head_prio(priv) << 8;
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index efbb05c71bf4..ddb17e256656 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -767,6 +767,9 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ sizeof(struct cpc_can_msg);
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
/* create a URB, and a buffer for it, and copy the data to the URB */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 80ac56313981..d124d837ae58 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -47,6 +47,7 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/can.h>
+#include <linux/can/dev.h>
#include <net/rtnetlink.h>
static __initdata const char banner[] =
@@ -70,10 +71,11 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
+ struct can_frame *cf = (struct can_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += cf->can_dlc;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
@@ -85,11 +87,15 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
+ struct can_frame *cf = (struct can_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
int loop;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ stats->tx_bytes += cf->can_dlc;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -103,7 +109,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
* CAN core already did the echo for us
*/
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += cf->can_dlc;
}
kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f857afe8e488..b3a038c23af1 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -236,7 +236,7 @@ static u16 link_modes[] __devinitdata = {
CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
};
-static struct pci_device_id cas_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 699d22c5fe09..f6462b54f823 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -334,7 +334,7 @@ static inline int t1_is_asic(const adapter_t *adapter)
return adapter->params.is_asic;
}
-extern struct pci_device_id t1_pci_tbl[];
+extern const struct pci_device_id t1_pci_tbl[];
static inline int adapter_matches_type(const adapter_t *adapter,
int version, int revision)
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 17720c6e5bfe..2402d372c886 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -528,7 +528,7 @@ static const struct board_info t1_board[] = {
};
-struct pci_device_id t1_pci_tbl[] = {
+DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = {
CH_DEVICE(8, 0, CH_BRD_T110_1CU),
CH_DEVICE(8, 1, CH_BRD_T110_1CU),
CH_DEVICE(7, 0, CH_BRD_N110_1F),
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index af9321617ce4..9b5bbc6ea2fa 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -580,7 +580,7 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
}
#ifdef CONFIG_SH_HICOSH4
- /* truely reset the chip */
+ /* truly reset the chip */
writeword(ioaddr, ADD_PORT, 0x0114);
writeword(ioaddr, DATA_PORT, 0x0040);
#endif
@@ -1325,8 +1325,7 @@ net_open(struct net_device *dev)
write_irq(dev, lp->chip_type, dev->irq);
ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev);
if (ret) {
- if (net_debug)
- printk(KERN_DEBUG "cs89x0: request_irq(%d) failed\n", dev->irq);
+ printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq);
goto bad_out;
}
}
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 89bec9c3c141..73622f5312cb 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -80,7 +80,7 @@ enum {
#define CH_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
-static const struct pci_device_id cxgb3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
CH_DEVICE(0x20, 0), /* PE9000 */
CH_DEVICE(0x21, 1), /* T302E */
CH_DEVICE(0x22, 2), /* T310E */
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 75064eea1d87..9498361119d6 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1252,7 +1252,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
struct mtutab mtutab;
unsigned int l2t_capacity;
- t = kcalloc(1, sizeof(*t), GFP_KERNEL);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return -ENOMEM;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index bdbd14727e4b..5dbc125822b9 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -196,13 +196,13 @@ static inline void refill_rspq(struct adapter *adapter,
/**
* need_skb_unmap - does the platform need unmapping of sk_buffs?
*
- * Returns true if the platfrom needs sk_buff unmapping. The compiler
+ * Returns true if the platform needs sk_buff unmapping. The compiler
* optimizes away unecessary code if this returns true.
*/
static inline int need_skb_unmap(void)
{
/*
- * This structure is used to tell if the platfrom needs buffer
+ * This structure is used to tell if the platform needs buffer
* unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
*/
struct dummy {
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 34e03104c3c1..50d9ca0ff780 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -62,12 +62,11 @@
#include <linux/bitops.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/davinci_emac.h>
#include <asm/irq.h>
#include <asm/page.h>
-#include <mach/emac.h>
-
static int debug_level;
module_param(debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
@@ -465,6 +464,7 @@ struct emac_priv {
void __iomem *ctrl_base;
void __iomem *emac_ctrl_ram;
u32 ctrl_ram_size;
+ u32 hw_ram_addr;
struct emac_txch *txch[EMAC_DEF_MAX_TX_CH];
struct emac_rxch *rxch[EMAC_DEF_MAX_RX_CH];
u32 link; /* 1=link on, 0=link off */
@@ -488,6 +488,9 @@ struct emac_priv {
struct mii_bus *mii_bus;
struct phy_device *phydev;
spinlock_t lock;
+ /*platform specific members*/
+ void (*int_enable) (void);
+ void (*int_disable) (void);
};
/* clock frequency for EMAC */
@@ -495,11 +498,9 @@ static struct clk *emac_clk;
static unsigned long emac_bus_frequency;
static unsigned long mdio_max_freq;
-/* EMAC internal utility function */
-static inline u32 emac_virt_to_phys(void __iomem *addr)
-{
- return (u32 __force) io_v2p(addr);
-}
+#define emac_virt_to_phys(addr, priv) \
+ (((u32 __force)(addr) - (u32 __force)(priv->emac_ctrl_ram)) \
+ + priv->hw_ram_addr)
/* Cache macros - Packet buffers would be from skb pool which is cached */
#define EMAC_VIRT_NOCACHE(addr) (addr)
@@ -1002,6 +1003,8 @@ static void emac_int_disable(struct emac_priv *priv)
emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0);
emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0);
/* NOTE: Rx Threshold and Misc interrupts are not disabled */
+ if (priv->int_disable)
+ priv->int_disable();
local_irq_restore(flags);
@@ -1021,6 +1024,9 @@ static void emac_int_disable(struct emac_priv *priv)
static void emac_int_enable(struct emac_priv *priv)
{
if (priv->version == EMAC_VERSION_2) {
+ if (priv->int_enable)
+ priv->int_enable();
+
emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff);
emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff);
@@ -1302,7 +1308,7 @@ static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
curr_bd = txch->active_queue_head;
if (NULL == curr_bd) {
emac_write(EMAC_TXCP(ch),
- emac_virt_to_phys(txch->last_hw_bdprocessed));
+ emac_virt_to_phys(txch->last_hw_bdprocessed, priv));
txch->no_active_pkts++;
spin_unlock_irqrestore(&priv->tx_lock, flags);
return 0;
@@ -1312,7 +1318,7 @@ static int emac_tx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
while ((curr_bd) &&
((frame_status & EMAC_CPPI_OWNERSHIP_BIT) == 0) &&
(pkts_processed < budget)) {
- emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd));
+ emac_write(EMAC_TXCP(ch), emac_virt_to_phys(curr_bd, priv));
txch->active_queue_head = curr_bd->next;
if (frame_status & EMAC_CPPI_EOQ_BIT) {
if (curr_bd->next) { /* misqueued packet */
@@ -1399,7 +1405,7 @@ static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
txch->active_queue_tail = curr_bd;
if (1 != txch->queue_active) {
emac_write(EMAC_TXHDP(ch),
- emac_virt_to_phys(curr_bd));
+ emac_virt_to_phys(curr_bd, priv));
txch->queue_active = 1;
}
++txch->queue_reinit;
@@ -1411,10 +1417,11 @@ static int emac_send(struct emac_priv *priv, struct emac_netpktobj *pkt, u32 ch)
tail_bd->next = curr_bd;
txch->active_queue_tail = curr_bd;
tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
- tail_bd->h_next = (int)emac_virt_to_phys(curr_bd);
+ tail_bd->h_next = (int)emac_virt_to_phys(curr_bd, priv);
frame_status = tail_bd->mode;
if (frame_status & EMAC_CPPI_EOQ_BIT) {
- emac_write(EMAC_TXHDP(ch), emac_virt_to_phys(curr_bd));
+ emac_write(EMAC_TXHDP(ch),
+ emac_virt_to_phys(curr_bd, priv));
frame_status &= ~(EMAC_CPPI_EOQ_BIT);
tail_bd->mode = frame_status;
++txch->end_of_queue_add;
@@ -1604,7 +1611,8 @@ static int emac_init_rxch(struct emac_priv *priv, u32 ch, char *param)
}
/* populate the hardware descriptor */
- curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head);
+ curr_bd->h_next = emac_virt_to_phys(rxch->active_queue_head,
+ priv);
/* FIXME buff_ptr = dma_map_single(... data_ptr ...) */
curr_bd->buff_ptr = virt_to_phys(curr_bd->data_ptr);
curr_bd->off_b_len = rxch->buf_size;
@@ -1879,7 +1887,7 @@ static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
rxch->active_queue_tail = curr_bd;
if (0 != rxch->queue_active) {
emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(rxch->active_queue_head));
+ emac_virt_to_phys(rxch->active_queue_head, priv));
rxch->queue_active = 1;
}
} else {
@@ -1890,11 +1898,11 @@ static void emac_addbd_to_rx_queue(struct emac_priv *priv, u32 ch,
rxch->active_queue_tail = curr_bd;
tail_bd->next = curr_bd;
tail_bd = EMAC_VIRT_NOCACHE(tail_bd);
- tail_bd->h_next = emac_virt_to_phys(curr_bd);
+ tail_bd->h_next = emac_virt_to_phys(curr_bd, priv);
frame_status = tail_bd->mode;
if (frame_status & EMAC_CPPI_EOQ_BIT) {
emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(curr_bd));
+ emac_virt_to_phys(curr_bd, priv));
frame_status &= ~(EMAC_CPPI_EOQ_BIT);
tail_bd->mode = frame_status;
++rxch->end_of_queue_add;
@@ -1987,7 +1995,7 @@ static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
curr_pkt->num_bufs = 1;
curr_pkt->pkt_length =
(frame_status & EMAC_RX_BD_PKT_LENGTH_MASK);
- emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd));
+ emac_write(EMAC_RXCP(ch), emac_virt_to_phys(curr_bd, priv));
++rxch->processed_bd;
last_bd = curr_bd;
curr_bd = last_bd->next;
@@ -1998,7 +2006,7 @@ static int emac_rx_bdproc(struct emac_priv *priv, u32 ch, u32 budget)
if (curr_bd) {
++rxch->mis_queued_packets;
emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(curr_bd));
+ emac_virt_to_phys(curr_bd, priv));
} else {
++rxch->end_of_queue;
rxch->queue_active = 0;
@@ -2099,7 +2107,7 @@ static int emac_hw_enable(struct emac_priv *priv)
emac_write(EMAC_RXINTMASKSET, BIT(ch));
rxch->queue_active = 1;
emac_write(EMAC_RXHDP(ch),
- emac_virt_to_phys(rxch->active_queue_head));
+ emac_virt_to_phys(rxch->active_queue_head, priv));
}
/* Enable MII */
@@ -2651,7 +2659,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (!pdata) {
- printk(KERN_ERR "DaVinci EMAC: No platfrom data\n");
+ printk(KERN_ERR "DaVinci EMAC: No platform data\n");
return -ENODEV;
}
@@ -2660,6 +2668,9 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->phy_mask = pdata->phy_mask;
priv->rmii_en = pdata->rmii_en;
priv->version = pdata->version;
+ priv->int_enable = pdata->interrupt_enable;
+ priv->int_disable = pdata->interrupt_disable;
+
emac_dev = &ndev->dev;
/* Get EMAC platform data */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2692,6 +2703,12 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->ctrl_ram_size = pdata->ctrl_ram_size;
priv->emac_ctrl_ram = priv->remap_addr + pdata->ctrl_ram_offset;
+ if (pdata->hw_ram_addr)
+ priv->hw_ram_addr = pdata->hw_ram_addr;
+ else
+ priv->hw_ram_addr = (u32 __force)res->start +
+ pdata->ctrl_ram_offset;
+
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(emac_dev, "DaVinci EMAC: Error getting irq res\n");
@@ -2711,6 +2728,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
SET_ETHTOOL_OPS(ndev, &ethtool_ops);
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+ clk_enable(emac_clk);
+
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
@@ -2720,7 +2739,6 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
goto netdev_reg_err;
}
- clk_enable(emac_clk);
/* MII/Phy intialisation, mdio bus registration */
emac_mii = mdiobus_alloc();
@@ -2760,6 +2778,7 @@ mdiobus_quit:
netdev_reg_err:
mdio_alloc_err:
+ clk_disable(emac_clk);
no_irq_res:
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, res->end - res->start + 1);
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 6a6ea038d7a3..98da085445e6 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -1052,12 +1052,9 @@ static int __devinit dfx_driver_init(struct net_device *dev,
board_name = "DEFEA";
if (dfx_bus_pci)
board_name = "DEFPA";
- pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, "
- "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
+ pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
print_name, board_name, dfx_use_mmio ? "" : "I/O ",
- (long long)bar_start, dev->irq,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ (long long)bar_start, dev->irq, dev->dev_addr);
/*
* Get memory for descriptor block, consumer block, and other buffers
@@ -3631,7 +3628,7 @@ static int __devinit dfx_pci_register(struct pci_dev *,
const struct pci_device_id *);
static void __devexit dfx_pci_unregister(struct pci_dev *);
-static struct pci_device_id dfx_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
{ }
};
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 266ec8777ca8..7caab3d26a9e 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -537,7 +537,7 @@ struct netdev_private {
driver_data Data private to the driver.
*/
-static const struct pci_device_id rio_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rio_pci_tbl) = {
{0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
{0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
{ }
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 839fb2b136d3..5c7a155e849a 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -208,7 +208,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
-static struct pci_device_id e100_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 7e855f9bbd97..9ec7480be1d8 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -42,7 +42,7 @@ static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation
* Macro expands to...
* {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
*/
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
INTEL_E1000_ETHERNET_DEVICE(0x1000),
INTEL_E1000_ETHERNET_DEVICE(0x1001),
INTEL_E1000_ETHERNET_DEVICE(0x1004),
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index b979464091bb..02d67d047d96 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -237,6 +237,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
/* check for link */
switch (hw->phy.media_type) {
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 3028f23da891..e2aa3b788564 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -224,6 +224,8 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
? true : false;
+ /* Adaptive IFS not supported */
+ mac->adaptive_ifs = false;
/* check for link */
switch (hw->phy.media_type) {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 2784cf44a6f3..eccf29b75c41 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -818,6 +818,7 @@ struct e1000_mac_info {
u8 forced_speed_duplex;
+ bool adaptive_ifs;
bool arc_subsystem_valid;
bool autoneg;
bool autoneg_failed;
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 9b09246af064..ad08cf3f40c0 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -454,6 +454,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->rar_entry_count--;
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid = true;
+ /* Adaptive IFS supported */
+ mac->adaptive_ifs = true;
/* LED operations */
switch (mac->type) {
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a86c17548c1e..2fa9b36a2c5a 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -125,6 +125,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
{
u32 i;
+ u8 mac_addr[ETH_ALEN] = {0};
/* Setup the receive address */
e_dbg("Programming MAC Address into RAR[0]\n");
@@ -133,12 +134,8 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
/* Zero out the other (rar_entry_count - 1) receive addresses */
e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
- for (i = 1; i < rar_count; i++) {
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
- e1e_flush();
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
- e1e_flush();
- }
+ for (i = 1; i < rar_count; i++)
+ e1000e_rar_set(hw, mac_addr, i);
}
/**
@@ -164,10 +161,19 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
- rar_high |= E1000_RAH_AV;
+ /* If MAC address zero, no need to set the AV bit */
+ if (rar_low || rar_high)
+ rar_high |= E1000_RAH_AV;
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+ /*
+ * Some bridges will combine consecutive 32-bit writes into
+ * a single burst write, which will malfunction on some parts.
+ * The flushes avoid this.
+ */
+ ew32(RAL(index), rar_low);
+ e1e_flush();
+ ew32(RAH(index), rar_high);
+ e1e_flush();
}
/**
@@ -1609,6 +1615,11 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
mac->current_ifs_val = 0;
mac->ifs_min_val = IFS_MIN;
mac->ifs_max_val = IFS_MAX;
@@ -1617,6 +1628,8 @@ void e1000e_reset_adaptive(struct e1000_hw *hw)
mac->in_ifs_mode = false;
ew32(AIT, 0);
+out:
+ return;
}
/**
@@ -1630,6 +1643,11 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
+ if (!mac->adaptive_ifs) {
+ e_dbg("Not in Adaptive IFS mode!\n");
+ goto out;
+ }
+
if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
if (mac->tx_packet_delta > MIN_NUM_XMITS) {
mac->in_ifs_mode = true;
@@ -1650,6 +1668,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
ew32(AIT, 0);
}
}
+out:
+ return;
}
/**
@@ -2287,10 +2307,12 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
s32 ret_val, hdr_csum, csum;
u8 i, len;
+ hw->mac.tx_pkt_filtering = true;
+
/* No manageability, no filtering */
if (!e1000e_check_mng_mode(hw)) {
hw->mac.tx_pkt_filtering = false;
- return 0;
+ goto out;
}
/*
@@ -2298,9 +2320,9 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
* reason, disable filtering.
*/
ret_val = e1000_mng_enable_host_if(hw);
- if (ret_val != 0) {
+ if (ret_val) {
hw->mac.tx_pkt_filtering = false;
- return ret_val;
+ goto out;
}
/* Read in the header. Length and offset are in dwords. */
@@ -2319,17 +2341,17 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
*/
if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
hw->mac.tx_pkt_filtering = true;
- return 1;
+ goto out;
}
/* Cookie area is valid, make the final check for filtering. */
if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
hw->mac.tx_pkt_filtering = false;
- return 0;
+ goto out;
}
- hw->mac.tx_pkt_filtering = true;
- return 1;
+out:
+ return hw->mac.tx_pkt_filtering;
}
/**
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 762b697ce731..c3745c9d21aa 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -3315,24 +3315,24 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
- adapter->stats.scc += phy_data;
+ if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data))
+ adapter->stats.scc += phy_data;
e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
- adapter->stats.ecol += phy_data;
+ if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data))
+ adapter->stats.ecol += phy_data;
e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
- e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
- adapter->stats.mcc += phy_data;
+ if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data))
+ adapter->stats.mcc += phy_data;
e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
- e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
- adapter->stats.latecol += phy_data;
+ if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data))
+ adapter->stats.latecol += phy_data;
e1e_rphy(hw, HV_DC_UPPER, &phy_data);
- e1e_rphy(hw, HV_DC_LOWER, &phy_data);
- adapter->stats.dc += phy_data;
+ if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data))
+ adapter->stats.dc += phy_data;
} else {
adapter->stats.scc += er32(SCC);
adapter->stats.ecol += er32(ECOL);
@@ -3360,8 +3360,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
- e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
- hw->mac.collision_delta = phy_data;
+ if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data))
+ hw->mac.collision_delta = phy_data;
} else {
hw->mac.collision_delta = er32(COLC);
}
@@ -3372,8 +3372,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
if ((hw->phy.type == e1000_phy_82578) ||
(hw->phy.type == e1000_phy_82577)) {
e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
- e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
- adapter->stats.tncrs += phy_data;
+ if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data))
+ adapter->stats.tncrs += phy_data;
} else {
if ((hw->mac.type != e1000_82574) &&
(hw->mac.type != e1000_82583))
@@ -4674,6 +4674,7 @@ static int e1000_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ pci_save_state(pdev);
e1000e_disable_l1aspm(pdev);
err = pci_enable_device_mem(pdev);
@@ -4825,6 +4826,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -5325,7 +5327,7 @@ static struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index e1c2076228ba..ee01f5a6d0d4 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -34,7 +34,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
-#define DRV_VERSION "1.1.0.100"
+#define DRV_VERSION "1.1.0.241a"
#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
#define PFX DRV_NAME ": "
@@ -89,9 +89,12 @@ struct enic {
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+ unsigned int flags;
unsigned int mc_count;
int csum_rx_enabled;
u32 port_mtu;
+ u32 rx_coalesce_usecs;
+ u32 tx_coalesce_usecs;
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index f875751af15e..c81bc4b1816f 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -51,7 +51,7 @@
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
/* Supported devices */
-static struct pci_device_id enic_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
{ 0, } /* end of table */
};
@@ -261,6 +261,62 @@ static void enic_set_msglevel(struct net_device *netdev, u32 value)
enic->msg_enable = value;
}
+static int enic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+ ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int enic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+
+ tx_coalesce_usecs = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ ecmd->tx_coalesce_usecs);
+ rx_coalesce_usecs = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ ecmd->rx_coalesce_usecs);
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[0],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ],
+ INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
+ break;
+ default:
+ break;
+ }
+
+ enic->tx_coalesce_usecs = tx_coalesce_usecs;
+ enic->rx_coalesce_usecs = rx_coalesce_usecs;
+
+ return 0;
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.get_settings = enic_get_settings,
.get_drvinfo = enic_get_drvinfo,
@@ -278,6 +334,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = enic_set_tso,
+ .get_coalesce = enic_get_coalesce,
+ .set_coalesce = enic_set_coalesce,
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
};
@@ -363,12 +421,12 @@ static void enic_mtu_check(struct enic *enic)
u32 mtu = vnic_dev_mtu(enic->vdev);
if (mtu && mtu != enic->port_mtu) {
+ enic->port_mtu = mtu;
if (mtu < enic->netdev->mtu)
printk(KERN_WARNING PFX
"%s: interface MTU (%d) set higher "
"than switch port MTU (%d)\n",
enic->netdev->name, enic->netdev->mtu, mtu);
- enic->port_mtu = mtu;
}
}
@@ -673,7 +731,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
/* netif_tx_lock held, process context with BHs disabled, or BH */
static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+ struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_wq *wq = &enic->wq[0];
@@ -771,6 +829,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
int allmulti = (netdev->flags & IFF_ALLMULTI) ||
(netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
+ unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int mc_count = netdev->mc_count;
unsigned int i, j;
@@ -780,8 +839,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
spin_lock(&enic->devcmd_lock);
- vnic_dev_packet_filter(enic->vdev, directed,
- multicast, broadcast, promisc, allmulti);
+ if (enic->flags != flags) {
+ enic->flags = flags;
+ vnic_dev_packet_filter(enic->vdev, directed,
+ multicast, broadcast, promisc, allmulti);
+ }
/* Is there an easier way? Trying to minimize to
* calls to add/del multicast addrs. We keep the
@@ -1084,34 +1146,6 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
return 0;
}
-static void enic_rq_drop_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct sk_buff *skb = buf->os_buf;
-
- if (skipped)
- return;
-
- pci_unmap_single(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_FROMDEVICE);
-
- dev_kfree_skb_any(skb);
-}
-
-static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_rq_service(&enic->rq[q_number], cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_drop_buf, opaque);
-
- return 0;
-}
-
static int enic_poll(struct napi_struct *napi, int budget)
{
struct enic *enic = container_of(napi, struct enic, napi);
@@ -1119,6 +1153,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int rq_work_to_do = budget;
unsigned int wq_work_to_do = -1; /* no limit */
unsigned int work_done, rq_work_done, wq_work_done;
+ int err;
/* Service RQ (first) and WQ
*/
@@ -1142,16 +1177,19 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- if (rq_work_done > 0) {
+ err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
- /* Replenish RQ
- */
+ /* Buffer allocation failed. Stay in polling
+ * mode so we can try to fill the ring again.
+ */
- vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+ if (err)
+ rq_work_done = rq_work_to_do;
- } else {
+ if (rq_work_done < rq_work_to_do) {
- /* If no work done, flush all LROs and exit polling
+ /* Some work done, but not enough to stay in polling,
+ * flush all LROs and exit polling
*/
if (netdev->features & NETIF_F_LRO)
@@ -1170,6 +1208,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
struct net_device *netdev = enic->netdev;
unsigned int work_to_do = budget;
unsigned int work_done;
+ int err;
/* Service RQ
*/
@@ -1177,25 +1216,30 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
work_to_do, enic_rq_service, NULL);
- if (work_done > 0) {
-
- /* Replenish RQ
- */
-
- vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
-
- /* Return intr event credits for this polling
- * cycle. An intr event is the completion of a
- * RQ packet.
- */
+ /* Return intr event credits for this polling
+ * cycle. An intr event is the completion of a
+ * RQ packet.
+ */
+ if (work_done > 0)
vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
work_done,
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- } else {
- /* If no work done, flush all LROs and exit polling
+ err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+
+ /* Buffer allocation failed. Stay in polling mode
+ * so we can try to fill the ring again.
+ */
+
+ if (err)
+ work_done = work_to_do;
+
+ if (work_done < work_to_do) {
+
+ /* Some work done, but not enough to stay in polling,
+ * flush all LROs and exit polling
*/
if (netdev->features & NETIF_F_LRO)
@@ -1304,6 +1348,24 @@ static int enic_request_intr(struct enic *enic)
return err;
}
+static void enic_synchronize_irqs(struct enic *enic)
+{
+ unsigned int i;
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSI:
+ synchronize_irq(enic->pdev->irq);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < enic->intr_count; i++)
+ synchronize_irq(enic->msix_entry[i].vector);
+ break;
+ default:
+ break;
+ }
+}
+
static int enic_notify_set(struct enic *enic)
{
int err;
@@ -1360,11 +1422,13 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
- if (err) {
+ vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
+ /* Need at least one buffer on ring to get going */
+ if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
printk(KERN_ERR PFX
"%s: Unable to alloc receive buffers.\n",
netdev->name);
+ err = -ENOMEM;
goto err_out_notify_unset;
}
}
@@ -1409,16 +1473,19 @@ static int enic_stop(struct net_device *netdev)
unsigned int i;
int err;
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_mask(&enic->intr[i]);
+
+ enic_synchronize_irqs(enic);
+
del_timer_sync(&enic->notify_timer);
spin_lock(&enic->devcmd_lock);
vnic_dev_disable(enic->vdev);
spin_unlock(&enic->devcmd_lock);
napi_disable(&enic->napi);
- netif_stop_queue(netdev);
-
- for (i = 0; i < enic->intr_count; i++)
- vnic_intr_mask(&enic->intr[i]);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_disable(&enic->wq[i]);
@@ -1436,11 +1503,6 @@ static int enic_stop(struct net_device *netdev)
spin_unlock(&enic->devcmd_lock);
enic_free_intr(enic);
- (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
- -1, enic_rq_service_drop, NULL);
- (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
- -1, enic_wq_service, NULL);
-
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
for (i = 0; i < enic->rq_count; i++)
@@ -1762,7 +1824,8 @@ int enic_dev_init(struct enic *enic)
err = enic_set_intr_mode(enic);
if (err) {
printk(KERN_ERR PFX
- "Failed to set intr mode, aborting.\n");
+ "Failed to set intr mode based on resource "
+ "counts and system capabilities, aborting.\n");
return err;
}
@@ -1986,6 +2049,9 @@ static int __devinit enic_probe(struct pci_dev *pdev,
goto err_out_dev_deinit;
}
+ enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
+ enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
+
netdev->netdev_ops = &enic_netdev_ops;
netdev->watchdog_timeo = 2 * HZ;
netdev->ethtool_ops = &enic_ethtool_ops;
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 32111144efc9..02839bf0fe8b 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -66,21 +66,21 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(wq_desc_count);
GET_CONFIG(rq_desc_count);
GET_CONFIG(mtu);
- GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(intr_mode);
+ GET_CONFIG(intr_timer_usec);
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
c->wq_desc_count));
- c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+ c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
min_t(u32, ENIC_MAX_RQ_DESCS,
max_t(u32, ENIC_MIN_RQ_DESCS,
c->rq_desc_count));
- c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+ c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
if (c->mtu == 0)
c->mtu = 1500;
@@ -88,15 +88,17 @@ int enic_get_vnic_config(struct enic *enic)
max_t(u16, ENIC_MIN_MTU,
c->mtu));
- c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
+ c->intr_timer_usec = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ c->intr_timer_usec);
printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n",
enic->mac_addr, c->wq_desc_count, c->rq_desc_count);
printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
- "intr timer %d\n",
+ "intr timer %d usec\n",
c->mtu, ENIC_SETTING(enic, TXCSUM),
ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
- ENIC_SETTING(enic, LRO), c->intr_timer);
+ ENIC_SETTING(enic, LRO), c->intr_timer_usec);
return 0;
}
@@ -303,7 +305,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->intr_count; i++) {
vnic_intr_init(&enic->intr[i],
- enic->config.intr_timer,
+ INTR_COALESCE_USEC_TO_HW(enic->config.intr_timer_usec),
enic->config.intr_timer_type,
mask_on_assertion);
}
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 29a48e8b59d3..69b9b70c7da0 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -36,7 +36,6 @@ struct vnic_res {
};
#define VNIC_DEV_CAP_INIT 0x0001
-#define VNIC_DEV_CAP_PERBI 0x0002
struct vnic_dev {
void *priv;
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 6332ac9391b8..8eeb6758491b 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -20,6 +20,10 @@
#ifndef _VNIC_ENIC_H_
#define _VNIC_ENIC_H_
+/* Hardware intr coalesce timer is in units of 1.5us */
+#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2/3)
+#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3/2)
+
/* Device-specific region: enet configuration */
struct vnic_enet_config {
u32 flags;
@@ -30,6 +34,7 @@ struct vnic_enet_config {
u8 intr_timer_type;
u8 intr_mode;
char devname[16];
+ u32 intr_timer_usec;
};
#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 1f8786d7195e..3934309a9498 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -50,12 +50,18 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
- iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+ vnic_intr_coalescing_timer_set(intr, coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ unsigned int coalescing_timer)
+{
+ iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+}
+
void vnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index 9a53604edce6..2fe6c6339e3c 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -61,6 +61,7 @@ static inline void vnic_intr_unmask(struct vnic_intr *intr)
static inline void vnic_intr_mask(struct vnic_intr *intr)
{
iowrite32(1, &intr->ctrl->mask);
+ (void)ioread32(&intr->ctrl->mask);
}
static inline void vnic_intr_return_credits(struct vnic_intr *intr,
@@ -101,6 +102,8 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index);
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ unsigned int coalescing_timer);
void vnic_intr_clean(struct vnic_intr *intr);
#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
index eeaf329945d8..cf80ab46d582 100644
--- a/drivers/net/enic/vnic_nic.h
+++ b/drivers/net/enic/vnic_nic.h
@@ -41,12 +41,12 @@
#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
-#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 0)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 1)
-#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 2)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 3)
-#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 4)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 41494f7b2ec8..1f8b11449fad 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -167,7 +167,7 @@ static const struct epic_chip_info pci_id_tbl[] = {
};
-static struct pci_device_id epic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index dac4e595589e..e6a98129d787 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1941,7 +1941,7 @@ static int netdev_close(struct net_device *dev)
return 0;
}
-static struct pci_device_id fealnx_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = {
{0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
{0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3c340489804a..3eb713b014f9 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -6198,7 +6198,7 @@ static void nv_shutdown(struct pci_dev *pdev)
#define nv_resume NULL
#endif /* CONFIG_PM */
-static struct pci_device_id pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
{ /* nForce Ethernet Controller */
PCI_DEVICE(0x10DE, 0x01C3),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 25fabb3eedc5..d5160edf2fcf 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -46,6 +46,11 @@
#include "gianfar.h"
#include "fsl_pq_mdio.h"
+struct fsl_pq_mdio_priv {
+ void __iomem *map;
+ struct fsl_pq_mdio __iomem *regs;
+};
+
/*
* Write value to the PHY at mii_id at register regnum,
* on the bus attached to the local interface, which may be different from the
@@ -105,7 +110,9 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
{
- return (void __iomem __force *)bus->priv;
+ struct fsl_pq_mdio_priv *priv = bus->priv;
+
+ return priv->regs;
}
/*
@@ -266,6 +273,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
{
struct device_node *np = ofdev->node;
struct device_node *tbi;
+ struct fsl_pq_mdio_priv *priv;
struct fsl_pq_mdio __iomem *regs = NULL;
void __iomem *map;
u32 __iomem *tbipa;
@@ -274,14 +282,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
u64 addr = 0, size = 0;
int err = 0;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
new_bus = mdiobus_alloc();
if (NULL == new_bus)
- return -ENOMEM;
+ goto err_free_priv;
new_bus->name = "Freescale PowerQUICC MII Bus",
new_bus->read = &fsl_pq_mdio_read,
new_bus->write = &fsl_pq_mdio_write,
new_bus->reset = &fsl_pq_mdio_reset,
+ new_bus->priv = priv;
fsl_pq_mdio_bus_name(new_bus->id, np);
/* Set the PHY base address */
@@ -291,6 +304,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
err = -ENOMEM;
goto err_free_bus;
}
+ priv->map = map;
if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
of_device_is_compatible(np, "fsl,gianfar-tbi") ||
@@ -298,8 +312,7 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
of_device_is_compatible(np, "ucc_geth_phy"))
map -= offsetof(struct fsl_pq_mdio, miimcfg);
regs = map;
-
- new_bus->priv = (void __force *)regs;
+ priv->regs = regs;
new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
@@ -392,10 +405,11 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
err_free_irqs:
kfree(new_bus->irq);
err_unmap_regs:
- iounmap(regs);
+ iounmap(priv->map);
err_free_bus:
kfree(new_bus);
-
+err_free_priv:
+ kfree(priv);
return err;
}
@@ -404,14 +418,16 @@ static int fsl_pq_mdio_remove(struct of_device *ofdev)
{
struct device *device = &ofdev->dev;
struct mii_bus *bus = dev_get_drvdata(device);
+ struct fsl_pq_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
dev_set_drvdata(device, NULL);
- iounmap(fsl_pq_mdio_get_regs(bus));
+ iounmap(priv->map);
bus->priv = NULL;
mdiobus_free(bus);
+ kfree(priv);
return 0;
}
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index ea85075a89a2..dd72c5025e6a 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1990,7 +1990,7 @@ static void __devexit hamachi_remove_one (struct pci_dev *pdev)
}
}
-static struct pci_device_id hamachi_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(hamachi_pci_tbl) = {
{ 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index ae5f11c8fc13..bdadf3e23c94 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -248,6 +248,7 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned char *ptr;
struct bpqdev *bpq;
+ struct net_device *orig_dev;
int size;
/*
@@ -282,8 +283,9 @@ static netdev_tx_t bpq_xmit(struct sk_buff *skb, struct net_device *dev)
bpq = netdev_priv(dev);
+ orig_dev = dev;
if ((dev = bpq_get_ether_dev(dev)) == NULL) {
- dev->stats.tx_dropped++;
+ orig_dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 90f890e7c5e1..0c2f2e8b1c47 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -210,7 +210,7 @@ MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
#endif
#ifdef CONFIG_PCI
-static struct pci_device_id hp100_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(hp100_pci_tbl) = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 933c64ff2465..d9679493c635 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -60,7 +60,7 @@ static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
};
-static struct pci_device_id igb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -4105,6 +4105,9 @@ static irqreturn_t igb_msix_other(int irq, void *data)
u32 icr = rd32(E1000_ICR);
/* reading ICR causes bit 31 of EICR to be cleared */
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4728,6 +4731,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
igb_write_itr(q_vector);
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4767,6 +4773,9 @@ static irqreturn_t igb_intr(int irq, void *data)
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 0dbd0320023a..a6c39209f66f 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2608,11 +2608,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
- dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
- /* MAC address */
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
}
@@ -2778,11 +2774,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- dev_err(&pdev->dev, "Invalid MAC Address: "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
+ netdev->dev_addr);
err = -EIO;
goto err_hw_init;
}
@@ -2884,7 +2877,7 @@ static struct pci_error_handlers igbvf_err_handler = {
.resume = igbvf_io_resume,
};
-static struct pci_device_id igbvf_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
{ } /* terminate list */
};
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8ec15ab8c8c2..81a4c5d30733 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1383,7 +1383,7 @@ static void __devexit ioc3_remove_one (struct pci_dev *pdev)
*/
}
-static struct pci_device_id ioc3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = {
{ PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index ba8d246d05a0..49f35e2ed19f 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -92,7 +92,7 @@ static const char *ipg_brand_name[] = {
"D-Link NIC IP1000A"
};
-static struct pci_device_id ipg_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
{ PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
{ PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 9b2eebdbb25b..b5cbd39d0685 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -36,6 +36,7 @@
#include <asm/pb1000.h>
#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
#include <asm/db1x00.h>
+#include <asm/mach-db1x00/bcsr.h>
#else
#error au1k_ir: unsupported board
#endif
@@ -66,10 +67,6 @@ static char version[] __devinitdata =
#define RUN_AT(x) (jiffies + (x))
-#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
-static BCSR * const bcsr = (BCSR *)0xAE000000;
-#endif
-
static DEFINE_SPINLOCK(ir_lock);
/*
@@ -282,9 +279,8 @@ static int au1k_irda_net_init(struct net_device *dev)
#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
/* power on */
- bcsr->resets &= ~BCSR_RESETS_IRDA_MODE_MASK;
- bcsr->resets |= BCSR_RESETS_IRDA_MODE_FULL;
- au_sync();
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
+ BCSR_RESETS_IRDA_MODE_FULL);
#endif
return 0;
@@ -720,14 +716,14 @@ au1k_irda_set_speed(struct net_device *dev, int speed)
if (speed == 4000000) {
#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- bcsr->resets |= BCSR_RESETS_FIR_SEL;
+ bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_FIR_SEL);
#else /* Pb1000 and Pb1100 */
writel(1<<13, CPLD_AUX1);
#endif
}
else {
#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- bcsr->resets &= ~BCSR_RESETS_FIR_SEL;
+ bcsr_mod(BCSR_RESETS, BCSR_RESETS_FIR_SEL, 0);
#else /* Pb1000 and Pb1100 */
writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
#endif
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 2d7b5c1d5572..b7e6625ca75e 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -184,7 +184,7 @@
#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
-static struct pci_device_id toshoboe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(toshoboe_pci_tbl) = {
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index e8e33bb9d876..e591b36a97ea 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1124,11 +1124,12 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
* The actual image starts after the "STMP" keyword
* so forward to the firmware header tag
*/
- for (i = 0; (fw->data[i] != STIR421X_PATCH_END_OF_HDR_TAG) &&
- (i < fw->size); i++) ;
+ for (i = 0; i < fw->size && fw->data[i] !=
+ STIR421X_PATCH_END_OF_HDR_TAG; i++)
+ ;
/* here we check for the out of buffer case */
- if ((STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i]) &&
- (i < STIR421X_PATCH_CODE_OFFSET)) {
+ if (i < STIR421X_PATCH_CODE_OFFSET && i < fw->size &&
+ STIR421X_PATCH_END_OF_HDR_TAG == fw->data[i]) {
if (!memcmp(fw->data + i + 1, STIR421X_PATCH_STMP_TAG,
sizeof(STIR421X_PATCH_STMP_TAG) - 1)) {
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index fddb4efd5453..6533c010cf5c 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -121,7 +121,7 @@ static void iodelay(int udelay)
}
}
-static struct pci_device_id via_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
{ PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
{ PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
{ PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bd3c6b5ee76a..209d4bcfaced 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -59,7 +59,7 @@ MODULE_LICENSE("GPL");
static /* const */ char drivername[] = DRIVER_NAME;
-static struct pci_device_id vlsi_irda_table [] = {
+static DEFINE_PCI_DEVICE_TABLE(vlsi_irda_table) = {
{
.class = PCI_CLASS_WIRELESS_IRDA << 8,
.class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index bcd0f01d5feb..6c2d9366fe5e 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -50,7 +50,7 @@ MODULE_PARM_DESC(copybreak,
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 21b41f42b61c..1dd867df2967 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -33,7 +33,8 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
- ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o
+ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
+ ixgbe_mbx.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 8da8eb535084..ed735857695c 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -98,6 +98,23 @@
#define IXGBE_MAX_RSC_INT_RATE 162760
+#define IXGBE_MAX_VF_MC_ENTRIES 30
+#define IXGBE_MAX_VF_FUNCTIONS 64
+#define IXGBE_MAX_VFTA_ENTRIES 128
+#define MAX_EMULATION_MAC_ADDRS 16
+#define VMDQ_P(p) ((p) + adapter->num_vfs)
+
+struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 default_vf_vlan_id;
+ u16 vlans_enabled;
+ unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN];
+ bool clear_to_send;
+ int rar;
+};
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
@@ -171,7 +188,7 @@ struct ixgbe_ring {
enum ixgbe_ring_f_enum {
RING_F_NONE = 0,
RING_F_DCB,
- RING_F_VMDQ,
+ RING_F_VMDQ, /* SR-IOV uses the same ring feature */
RING_F_RSS,
RING_F_FDIR,
#ifdef IXGBE_FCOE
@@ -183,7 +200,7 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_DCB_INDICES 8
#define IXGBE_MAX_RSS_INDICES 16
-#define IXGBE_MAX_VMDQ_INDICES 16
+#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64
#ifdef IXGBE_FCOE
#define IXGBE_MAX_FCOE_INDICES 8
@@ -288,6 +305,8 @@ struct ixgbe_adapter {
/* RX */
struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 non_eop_descs;
@@ -330,6 +349,8 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28)
#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
+#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 30)
+#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 31)
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
@@ -379,6 +400,11 @@ struct ixgbe_adapter {
u64 rsc_total_flush;
u32 wol;
u16 eeprom_version;
+
+ /* SR-IOV */
+ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
};
enum ixbge_state_t {
@@ -440,6 +466,7 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
u16 flex_byte);
extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
u8 l4type);
+extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 538340527aa6..9ec296cf4c40 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -31,6 +31,7 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
+#include "ixgbe_mbx.h"
#define IXGBE_82599_MAX_TX_QUEUES 128
#define IXGBE_82599_MAX_RX_QUEUES 128
@@ -951,8 +952,6 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
msleep(50);
-
-
/*
* Store the original AUTOC/AUTOC2 values if they have not been
* stored off yet. Otherwise restore the stored original
@@ -1095,9 +1094,11 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
u32 regindex;
+ u32 vlvf_index;
u32 bitindex;
u32 bits;
u32 first_empty_slot;
+ u32 vt_ctl;
if (vlan > 4095)
return IXGBE_ERR_PARAM;
@@ -1124,76 +1125,84 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Part 2
- * If the vind is set
+ * If VT mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
- if (vind) {
- /* find the vlanid or the first empty slot */
- first_empty_slot = 0;
-
- for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
- bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
- if (!bits && !first_empty_slot)
- first_empty_slot = regindex;
- else if ((bits & 0x0FFF) == vlan)
- break;
- }
+ vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
+ goto out;
- if (regindex >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- regindex = first_empty_slot;
- else {
- hw_dbg(hw, "No space in VLVF.\n");
- goto out;
- }
+ /* find the vlanid or the first empty slot */
+ first_empty_slot = 0;
+
+ for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
+ if (!bits && !first_empty_slot)
+ first_empty_slot = vlvf_index;
+ else if ((bits & 0x0FFF) == vlan)
+ break;
+ }
+
+ if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
+ if (first_empty_slot)
+ vlvf_index = first_empty_slot;
+ else {
+ hw_dbg(hw, "No space in VLVF.\n");
+ goto out;
}
+ }
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex * 2), bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1), bits);
- }
+ if (vlan_on) {
+ /* set the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
+ bits |= (1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2), bits);
} else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits |= (1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+ }
+ } else {
+ /* clear the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex * 2), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
- }
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2), bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits &= ~(1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
}
+ }
- if (bits)
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
- (IXGBE_VLVF_VIEN | vlan));
- else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+ (IXGBE_VLVF_VIEN | vlan));
+ /* if bits is non-zero then some pools/VFs are still
+ * using this VLAN ID. Force the VFTA entry to on */
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+ bits |= (1 << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
}
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
out:
return 0;
@@ -2655,4 +2664,5 @@ struct ixgbe_info ixgbe_82599_info = {
.mac_ops = &mac_ops_82599,
.eeprom_ops = &eeprom_ops_82599,
.phy_ops = &phy_ops_82599,
+ .mbx_ops = &mbx_ops_82599,
};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 688b8ca5da32..1cedb9af63dc 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1278,19 +1278,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
/* Get the MAC address from the RAR0 for later reference */
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
- hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
} else {
/* Setup the receive address. */
hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
- hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 0bd49d3b9f65..a0107b5a28e7 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1867,11 +1867,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
if (ixgbe_intr_test(adapter, &data[2]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ /* If SRIOV or VMDq is enabled then skip MAC
+ * loopback diagnostic. */
+ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED)) {
+ DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
+ "mode\n");
+ data[3] = 0;
+ goto skip_loopback;
+ }
+
ixgbe_reset(adapter);
DPRINTK(HW, INFO, "loopback testing starting\n");
if (ixgbe_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+skip_loopback:
ixgbe_reset(adapter);
clear_bit(__IXGBE_TESTING, &adapter->state);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 1a2ea621e371..81971ed607eb 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -45,6 +45,7 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -67,7 +68,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgbe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
@@ -124,6 +125,13 @@ static struct notifier_block dca_notifier = {
};
#endif
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
+ "per physical function");
+#endif /* CONFIG_PCI_IOV */
+
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
@@ -131,6 +139,41 @@ MODULE_VERSION(DRV_VERSION);
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr;
+ u32 gpie;
+ u32 vmdctl;
+
+#ifdef CONFIG_PCI_IOV
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(adapter->pdev);
+#endif
+
+ /* turn off device IOV mode */
+ gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* set default pool back to 0 */
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+
+ /* take a breather then clean up driver data */
+ msleep(100);
+ if (adapter->vfinfo)
+ kfree(adapter->vfinfo);
+ adapter->vfinfo = NULL;
+
+ adapter->num_vfs = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
@@ -262,10 +305,12 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
int reg_idx = tx_ring->reg_idx;
int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82598EB:
tc = reg_idx >> 2;
txoff = IXGBE_TFCS_TXOFF0;
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ break;
+ case ixgbe_mac_82599EB:
tc = 0;
txoff = IXGBE_TFCS_TXOFF;
if (dcb_i == 8) {
@@ -284,6 +329,9 @@ static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
tc += (reg_idx - 96) >> 4;
}
}
+ break;
+ default:
+ tc = 0;
}
txoff <<= tc;
}
@@ -1020,7 +1068,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+ if (adapter->num_vfs)
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+ else
+ mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -1249,6 +1302,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_msg_task(adapter);
+
if (hw->mac.type == ixgbe_mac_82598EB)
ixgbe_check_fan_failure(adapter, eicr);
@@ -1763,6 +1819,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
+ if (adapter->num_vfs)
+ mask |= IXGBE_EIMS_MAILBOX;
}
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -1771,6 +1829,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
ixgbe_irq_enable_queues(adapter, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
+
+ if (adapter->num_vfs > 32) {
+ u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+ }
}
/**
@@ -1900,6 +1963,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+ if (adapter->num_vfs > 32)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1984,18 +2049,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82599EB) {
u32 rttdcs;
+ u32 mask;
/* disable the arbiter while setting MTQC */
rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
rttdcs |= IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
- /* We enable 8 traffic classes, DCB only */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
- IXGBE_MTQC_8TC_8TQ));
- else
+ /* set transmit pool layout */
+ mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
+ switch (adapter->flags & mask) {
+
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+ break;
+
+ case (IXGBE_FLAG_DCB_ENABLED):
+ /* We enable 8 traffic classes, DCB only */
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
+ break;
+
+ default:
IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+ break;
+ }
/* re-eable the arbiter */
rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@ -2054,12 +2133,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCB
| IXGBE_FLAG_DCB_ENABLED
#endif
+ | IXGBE_FLAG_SRIOV_ENABLED
);
switch (mask) {
case (IXGBE_FLAG_RSS_ENABLED):
mrqc = IXGBE_MRQC_RSSEN;
break;
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ mrqc = IXGBE_MRQC_VMDQEN;
+ break;
#ifdef CONFIG_IXGBE_DCB
case (IXGBE_FLAG_DCB_ENABLED):
mrqc = IXGBE_MRQC_RT8TCEN;
@@ -2140,7 +2223,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
int rx_buf_len;
/* Decide whether to use packet split mode or not */
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ /* Do not use packet split if we're in SR-IOV Mode */
+ if (!adapter->num_vfs)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2152,7 +2237,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_PSRTYPE_IPV4HDR |
IXGBE_PSRTYPE_IPV6HDR |
IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PSRTYPE(adapter->num_vfs),
+ psrtype);
}
} else {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
@@ -2238,6 +2325,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ u32 vt_reg_bits;
+ u32 reg_offset, vf_shift;
+ u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
+ | IXGBE_VT_CTL_REPLEN;
+ vt_reg_bits |= (adapter->num_vfs <<
+ IXGBE_VT_CTL_POOL_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
+
+ vf_shift = adapter->num_vfs % 32;
+ reg_offset = adapter->num_vfs / 32;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+ /* Enable only the PF's pool for Tx/Rx */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ ixgbe_set_vmolr(hw, adapter->num_vfs);
+ }
+
/* Program MRQC for the distribution of queues */
mrqc = ixgbe_setup_mrqc(adapter);
@@ -2269,6 +2380,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ if (adapter->num_vfs) {
+ u32 reg;
+
+ /* Map PF MAC address in RAR Entry 0 to first pool
+ * following VFs */
+ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+ /* Set up VF register offsets for selected VT Mode, i.e.
+ * 64 VFs for SR-IOV */
+ reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ reg |= IXGBE_GCR_EXT_SRIOV;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
+ }
+
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2409,7 +2534,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
* responsible for configuring the hardware for proper unicast, multicast and
* promiscuous mode.
**/
-static void ixgbe_set_rx_mode(struct net_device *netdev)
+void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -2449,6 +2574,8 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
addr_list = netdev->mc_list->dmi_addr;
hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
ixgbe_addr_list_itr);
+ if (adapter->num_vfs)
+ ixgbe_restore_vf_multicasts(adapter);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2709,6 +2836,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
/* MSI only */
gpie = 0;
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ }
/* XXX: to interrupt immediately for EICS writes, enable this */
/* gpie |= IXGBE_GPIE_EIMEN; */
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -2783,6 +2914,18 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
txdctl |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ int wait_loop = 10;
+ /* poll for Tx Enable ready */
+ do {
+ msleep(1);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+ } while (--wait_loop &&
+ !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ DPRINTK(DRV, ERR, "Could not enable "
+ "Tx Queue %d\n", j);
+ }
}
for (i = 0; i < num_rx_rings; i++) {
@@ -2918,7 +3061,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
/* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
}
/**
@@ -3286,6 +3430,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
}
#endif /* IXGBE_FCOE */
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+ return false;
+}
+
/*
* ixgbe_set_num_queues: Allocate queues for device, feature dependant
* @adapter: board private structure to initialize
@@ -3299,6 +3456,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
**/
static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+
+ if (ixgbe_set_sriov_queues(adapter))
+ return;
+
#ifdef IXGBE_FCOE
if (ixgbe_set_fcoe_queues(adapter))
goto done;
@@ -3570,6 +3736,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
/**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+ adapter->rx_ring[0].reg_idx = adapter->num_vfs * 2;
+ adapter->tx_ring[0].reg_idx = adapter->num_vfs * 2;
+ if (adapter->num_vfs)
+ return true;
+ else
+ return false;
+}
+
+/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @adapter: board private structure to initialize
*
@@ -3586,6 +3770,9 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
adapter->rx_ring[0].reg_idx = 0;
adapter->tx_ring[0].reg_idx = 0;
+ if (ixgbe_cache_ring_sriov(adapter))
+ return;
+
#ifdef IXGBE_FCOE
if (ixgbe_cache_ring_fcoe(adapter))
return;
@@ -3695,6 +3882,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_set_num_queues(adapter);
err = pci_enable_msi(adapter->pdev);
@@ -5479,7 +5669,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
return 0;
}
@@ -5612,6 +5803,61 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#endif /* IXGBE_FCOE */
};
+static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
+{
+#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
+ return;
+
+ /* The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function
+ */
+ adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Failed to enable PCI sriov: %d\n", err);
+ goto err_novfs;
+ }
+ /* If call to enable VFs succeeded then allocate memory
+ * for per VF control structures.
+ */
+ adapter->vfinfo =
+ kcalloc(adapter->num_vfs,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+ if (adapter->vfinfo) {
+ /* Now that we're sure SR-IOV is enabled
+ * and memory allocated set up the mailbox parameters
+ */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops,
+ sizeof(hw->mbx.ops));
+
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
+ return;
+ }
+
+ /* Oh oh */
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for VF "
+ "Data Storage - SRIOV disabled\n");
+ pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->num_vfs = 0;
+#endif /* CONFIG_PCI_IOV */
+}
+
/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -5786,6 +6032,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
}
+ ixgbe_probe_vf(adapter, ii);
+
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_TX |
@@ -5806,6 +6054,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
+ IXGBE_FLAG_DCB_ENABLED);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
@@ -5932,6 +6183,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_setup_dca(adapter);
}
#endif
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
+ adapter->num_vfs);
+ for (i = 0; i < adapter->num_vfs; i++)
+ ixgbe_vf_configuration(pdev, (i | 0x10000000));
+ }
+
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
@@ -5944,6 +6202,8 @@ err_register:
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
err_eeprom:
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
cancel_work_sync(&adapter->sfp_task);
@@ -6012,6 +6272,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
new file mode 100644
index 000000000000..d75f9148eb1f
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,479 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_mbx.h"
+
+/**
+ * ixgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = 0;
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+ u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ u32 reg_offset = (vf_number < 32) ? 0 : 1;
+ u32 vf_shift = vf_number % 32;
+ u32 vflre = 0;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+
+ if (vflre & (1 << vf_shift)) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ u32 p2v_mailbox;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_number);
+ ixgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * ixgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+struct ixgbe_mbx_operations mbx_ops_82599 = {
+ .read = ixgbe_read_mbx_pf,
+ .write = ixgbe_write_mbx_pf,
+ .read_posted = ixgbe_read_posted_mbx,
+ .write_posted = ixgbe_write_posted_mbx,
+ .check_for_msg = ixgbe_check_for_msg_pf,
+ .check_for_ack = ixgbe_check_for_ack_pf,
+ .check_for_rst = ixgbe_check_for_rst_pf,
+};
+
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
new file mode 100644
index 000000000000..be7ab3309ab7
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,96 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+extern struct ixgbe_mbx_operations mbx_ops_82599;
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
new file mode 100644
index 000000000000..74bca74d57c1
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -0,0 +1,336 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+#include "ixgbe.h"
+
+#include "ixgbe_sriov.h"
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf)
+{
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ int i;
+
+ /* only so many hash values supported */
+ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
+
+ /*
+ * salt away the number of multi cast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vfinfo->num_vf_mc_hashes = entries;
+
+ /*
+ * VFs are limited to using the MTA hash table for their multicast
+ * addresses
+ */
+ for (i = 0; i < entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];;
+ }
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo;
+ int i, j;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ vfinfo = &adapter->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ hw->addr_ctrl.mta_in_use++;
+ vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
+ }
+}
+
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
+{
+ u32 ctrl;
+
+ /* Check if global VLAN already set, if not set it */
+ ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+ if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
+ /* enable VLAN tag insert/strip */
+ ctrl |= IXGBE_VLNCTRL_VFE;
+ ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ }
+
+ return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
+}
+
+
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
+{
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+ vmolr |= (IXGBE_VMOLR_AUPE |
+ IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE |
+ IXGBE_VMOLR_BAM);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+}
+
+inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* reset offloads to defaults */
+ ixgbe_set_vmolr(hw, vf);
+
+
+ /* reset multicast table array for vf */
+ adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ if (adapter->vfinfo[vf].rar > 0) {
+ adapter->hw.mac.ops.clear_rar(&adapter->hw,
+ adapter->vfinfo[vf].rar);
+ adapter->vfinfo[vf].rar = -1;
+ }
+}
+
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
+ vf, IXGBE_RAH_AV);
+ if (adapter->vfinfo[vf].rar < 0) {
+ DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
+ return -1;
+ }
+
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+
+ return 0;
+}
+
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
+{
+ unsigned char vf_mac_addr[6];
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ unsigned int vfn = (event_mask & 0x3f);
+
+ bool enable = ((event_mask & 0x10000000U) != 0);
+
+ if (enable) {
+ random_ether_addr(vf_mac_addr);
+ DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
+ "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
+ vfn,
+ vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
+ vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
+ /*
+ * Store away the VF "permananet" MAC address, it will ask
+ * for it later.
+ */
+ memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
+ }
+
+ return 0;
+}
+
+inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+ u32 reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ /* enable transmit and receive for vf */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+
+ ixgbe_vf_reset_event(adapter, vf);
+}
+
+static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
+ u32 msgbuf[mbx_size];
+ struct ixgbe_hw *hw = &adapter->hw;
+ s32 retval;
+ int entries;
+ u16 *hash_list;
+ int add, vid;
+
+ retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+
+ if (retval)
+ printk(KERN_ERR "Error receiving message from VF\n");
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
+ return retval;
+
+ /*
+ * until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+
+ if (msgbuf[0] == IXGBE_VF_RESET) {
+ unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_vf_reset_msg(adapter, vf);
+ adapter->vfinfo[vf].clear_to_send = true;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return retval;
+ }
+
+ if (!adapter->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+ return retval;
+ }
+
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case IXGBE_VF_SET_MAC_ADDR:
+ {
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ if (is_valid_ether_addr(new_mac))
+ ixgbe_set_vf_mac(adapter, vf, new_mac);
+ else
+ retval = -1;
+ }
+ break;
+ case IXGBE_VF_SET_MULTICAST:
+ entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ hash_list = (u16 *)&msgbuf[1];
+ retval = ixgbe_set_vf_multicasts(adapter, entries,
+ hash_list, vf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
+ break;
+ case IXGBE_VF_SET_VLAN:
+ add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+ retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ break;
+ default:
+ DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ retval = IXGBE_ERR_MBX;
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+
+ return retval;
+}
+
+static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 msg = IXGBE_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!adapter->vfinfo[vf].clear_to_send)
+ ixgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void ixgbe_msg_task(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vf;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!ixgbe_check_for_rst(hw, vf))
+ ixgbe_vf_reset_event(adapter, vf);
+
+ /* process any messages pending */
+ if (!ixgbe_check_for_msg(hw, vf))
+ ixgbe_rcv_msg_from_vf(adapter, vf);
+
+ /* process any acks */
+ if (!ixgbe_check_for_ack(hw, vf))
+ ixgbe_rcv_ack_from_vf(adapter, vf);
+ }
+}
+
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
new file mode 100644
index 000000000000..664b237eacb9
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -0,0 +1,45 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_SRIOV_H_
+#define _IXGBE_SRIOV_H_
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf);
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_msg_task(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr);
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+
+#endif /* _IXGBE_SRIOV_H_ */
+
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 84650c6ebe03..ec8ad182e2f5 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -277,6 +277,7 @@
#define IXGBE_DTXCTL 0x07E00
#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFDTXGSWC 0x08220
#define IXGBE_DTXMXSZRQ 0x08100
#define IXGBE_DTXTCPFLGL 0x04A88
#define IXGBE_DTXTCPFLGH 0x04A8C
@@ -287,6 +288,8 @@
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
/* Tx DCA Control register : 128 of these (0-127) */
#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -497,6 +500,7 @@
/* DCB registers */
#define IXGBE_RTRPCS 0x02430
#define IXGBE_RTTDCS 0x04900
+#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_RTTPCS 0x0CD00
#define IXGBE_RTRUP2TC 0x03020
#define IXGBE_RTTUP2TC 0x0C800
@@ -730,6 +734,13 @@
#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
#define IXGBE_GCR_CAP_VER2 0x00040000
+#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
+ IXGBE_GCR_EXT_VT_MODE_64)
+
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1065,6 +1076,8 @@
/* VFRE bitmask */
#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
/* RDHMPN and TDHMPN bitmasks */
#define IXGBE_RDHMPN_RDICADDR 0x007FF800
#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
@@ -1295,6 +1308,7 @@
/* VLAN pool filtering masks */
#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
#define IXGBE_VLVF_ENTRIES 64
+#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
@@ -1843,6 +1857,12 @@
#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
+#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
+#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
+
/* Little Endian defines */
#ifndef __le32
#define __le32 u32
@@ -2463,6 +2483,37 @@ struct ixgbe_phy_info {
bool multispeed_fiber;
};
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *, u16);
+ s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+ s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
struct ixgbe_hw {
u8 __iomem *hw_addr;
void *back;
@@ -2472,6 +2523,7 @@ struct ixgbe_hw {
struct ixgbe_phy_info phy;
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -2486,6 +2538,7 @@ struct ixgbe_info {
struct ixgbe_mac_operations *mac_ops;
struct ixgbe_eeprom_operations *eeprom_ops;
struct ixgbe_phy_operations *phy_ops;
+ struct ixgbe_mbx_operations *mbx_ops;
};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
new file mode 100644
index 000000000000..dd4e0d27e8cc
--- /dev/null
+++ b/drivers/net/ixgbevf/Makefile
@@ -0,0 +1,38 @@
+################################################################################
+#
+# Intel 82599 Virtual Function driver
+# Copyright(c) 1999 - 2009 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) 82599 VF ethernet driver
+#
+
+obj-$(CONFIG_IXGBEVF) += ixgbevf.o
+
+ixgbevf-objs := vf.o \
+ mbx.o \
+ ethtool.o \
+ ixgbevf_main.o
+
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
new file mode 100644
index 000000000000..c44fdb05447a
--- /dev/null
+++ b/drivers/net/ixgbevf/defines.h
@@ -0,0 +1,292 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_DEFINES_H_
+#define _IXGBEVF_DEFINES_H_
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+
+#define IXGBE_VF_IRQ_CLEAR_MASK 7
+#define IXGBE_VF_MAX_TX_QUEUES 1
+#define IXGBE_VF_MAX_RX_QUEUES 1
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED 0x20000000
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+
+/* DCA Control */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
+
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_MAILBOX | \
+ IXGBE_EIMS_OTHER)
+
+#define IXGBE_EITR_CNT_WDIS 0x80000000
+
+/* Error Codes */
+#define IXGBE_ERR_INVALID_MAC_ADDR -1
+#define IXGBE_ERR_RESET_FAILED -2
+
+#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
new file mode 100644
index 000000000000..399be0c34c36
--- /dev/null
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -0,0 +1,716 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbevf */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+
+#include "ixgbevf.h"
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+#ifdef ETHTOOL_GSTATS
+struct ixgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+};
+
+#define IXGBEVF_STAT(m, b) sizeof(((struct ixgbevf_adapter *)0)->m), \
+ offsetof(struct ixgbevf_adapter, m), \
+ offsetof(struct ixgbevf_adapter, b)
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+ {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)},
+ {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)},
+ {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)},
+ {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)},
+ {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)},
+ {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)},
+ {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)},
+ {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)},
+ {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)},
+ {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)},
+};
+
+#define IXGBE_QUEUE_STATS_LEN 0
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+
+#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "Link test (on/offline)"
+};
+#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+static int ixgbevf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = 0;
+ bool link_up;
+
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = -1;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+ if (link_up) {
+ ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ SPEED_10000 : SPEED_1000;
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ return 0;
+}
+
+static u32 ixgbevf_get_rx_csum(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
+}
+
+static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ if (data)
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ } else {
+ ixgbevf_reset(adapter);
+ }
+
+ return 0;
+}
+
+static int ixgbevf_set_tso(struct net_device *netdev, u32 data)
+{
+ if (data) {
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ netif_tx_start_all_queues(netdev);
+ }
+ return 0;
+}
+
+static u32 ixgbevf_get_msglevel(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
+
+static char *ixgbevf_reg_names[] = {
+ "IXGBE_VFCTRL",
+ "IXGBE_VFSTATUS",
+ "IXGBE_VFLINKS",
+ "IXGBE_VFRXMEMWRAP",
+ "IXGBE_VFRTIMER",
+ "IXGBE_VTEICR",
+ "IXGBE_VTEICS",
+ "IXGBE_VTEIMS",
+ "IXGBE_VTEIMC",
+ "IXGBE_VTEIAC",
+ "IXGBE_VTEIAM",
+ "IXGBE_VTEITR",
+ "IXGBE_VTIVAR",
+ "IXGBE_VTIVAR_MISC",
+ "IXGBE_VFRDBAL0",
+ "IXGBE_VFRDBAL1",
+ "IXGBE_VFRDBAH0",
+ "IXGBE_VFRDBAH1",
+ "IXGBE_VFRDLEN0",
+ "IXGBE_VFRDLEN1",
+ "IXGBE_VFRDH0",
+ "IXGBE_VFRDH1",
+ "IXGBE_VFRDT0",
+ "IXGBE_VFRDT1",
+ "IXGBE_VFRXDCTL0",
+ "IXGBE_VFRXDCTL1",
+ "IXGBE_VFSRRCTL0",
+ "IXGBE_VFSRRCTL1",
+ "IXGBE_VFPSRTYPE",
+ "IXGBE_VFTDBAL0",
+ "IXGBE_VFTDBAL1",
+ "IXGBE_VFTDBAH0",
+ "IXGBE_VFTDBAH1",
+ "IXGBE_VFTDLEN0",
+ "IXGBE_VFTDLEN1",
+ "IXGBE_VFTDH0",
+ "IXGBE_VFTDH1",
+ "IXGBE_VFTDT0",
+ "IXGBE_VFTDT1",
+ "IXGBE_VFTXDCTL0",
+ "IXGBE_VFTXDCTL1",
+ "IXGBE_VFTDWBAL0",
+ "IXGBE_VFTDWBAL1",
+ "IXGBE_VFTDWBAH0",
+ "IXGBE_VFTDWBAH1"
+};
+
+
+static int ixgbevf_get_regs_len(struct net_device *netdev)
+{
+ return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+}
+
+static void ixgbevf_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u32 regs_len = ixgbevf_get_regs_len(netdev);
+ u8 i;
+
+ memset(p, 0, regs_len);
+
+ regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
+ regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
+ regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
+ regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
+
+ /* Interrupt */
+ /* don't read EICR because it can clear interrupt causes, instead
+ * read EICS which is a shadow but doesn't clear EICR */
+ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
+ regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
+ regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
+ regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
+ regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+
+ /* Receive DMA */
+ for (i = 0; i < 2; i++)
+ regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+
+ /* Receive */
+ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
+
+ /* Transmit */
+ for (i = 0; i < 2; i++)
+ regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
+
+ for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
+ hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
+}
+
+static void ixgbevf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
+ strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
+
+ strlcpy(drvinfo->fw_version, "N/A", 4);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+static void ixgbevf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = adapter->tx_ring;
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring;
+
+ ring->rx_max_pending = IXGBEVF_MAX_RXD;
+ ring->tx_max_pending = IXGBEVF_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int ixgbevf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
+ int i, err;
+ u32 new_rx_count, new_tx_count;
+ bool need_tx_update = false;
+ bool need_rx_update = false;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
+ new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
+ new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring->count) &&
+ (new_rx_count == adapter->rx_ring->count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (new_tx_count != adapter->tx_ring_count) {
+ tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto err_setup;
+ }
+ memcpy(tx_ring, adapter->tx_ring,
+ adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter,
+ &tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(adapter,
+ &tx_ring[i]);
+ }
+ kfree(tx_ring);
+ goto err_setup;
+ }
+ tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
+ }
+ need_tx_update = true;
+ }
+
+ if (new_rx_count != adapter->rx_ring_count) {
+ rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if ((!rx_ring) && (need_tx_update)) {
+ err = -ENOMEM;
+ goto err_rx_setup;
+ }
+ memcpy(rx_ring, adapter->rx_ring,
+ adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter,
+ &rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(adapter,
+ &rx_ring[i]);
+ }
+ kfree(rx_ring);
+ goto err_rx_setup;
+ }
+ rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
+ }
+ need_rx_update = true;
+ }
+
+err_rx_setup:
+ /* if rings need to be updated, here's the place to do it in one shot */
+ if (need_tx_update || need_rx_update) {
+ if (netif_running(netdev))
+ ixgbevf_down(adapter);
+ }
+
+ /* tx */
+ if (need_tx_update) {
+ kfree(adapter->tx_ring);
+ adapter->tx_ring = tx_ring;
+ tx_ring = NULL;
+ adapter->tx_ring_count = new_tx_count;
+ }
+
+ /* rx */
+ if (need_rx_update) {
+ kfree(adapter->rx_ring);
+ adapter->rx_ring = rx_ring;
+ rx_ring = NULL;
+ adapter->rx_ring_count = new_rx_count;
+ }
+
+ /* success! */
+ err = 0;
+ if (netif_running(netdev))
+ ixgbevf_up(adapter);
+
+err_setup:
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+ return err;
+}
+
+static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return IXGBE_TEST_LEN;
+ case ETH_SS_STATS:
+ return IXGBE_GLOBAL_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ ixgbevf_update_stats(adapter);
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter +
+ ixgbe_gstrings_stats[i].stat_offset;
+ char *b = (char *)adapter +
+ ixgbe_gstrings_stats[i].base_stat_offset;
+ data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
+ ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)b : *(u32 *)b);
+ }
+}
+
+static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *ixgbe_gstrings_test,
+ IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link_up;
+ u32 link_speed = 0;
+ *data = 0;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+ if (!link_up)
+ *data = 1;
+
+ return *data;
+}
+
+/* ethtool register test data */
+struct ixgbevf_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default VF register test */
+static struct ixgbevf_reg_test reg_test_vf[] = {
+ { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { 0, 0, 0, 0 }
+};
+
+#define REG_PATTERN_TEST(R, M, W) \
+{ \
+ u32 pat, val, before; \
+ const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if (val != (_test[pat] & W & M)) { \
+ hw_dbg(&adapter->hw, \
+ "pattern test reg %04X failed: got " \
+ "0x%08X expected 0x%08X\n", \
+ R, val, (_test[pat] & W & M)); \
+ *data = R; \
+ writel(before, adapter->hw.hw_addr + R); \
+ return 1; \
+ } \
+ writel(before, adapter->hw.hw_addr + R); \
+ } \
+}
+
+#define REG_SET_AND_CHECK(R, M, W) \
+{ \
+ u32 val, before; \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((W & M), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if ((W & M) != (val & M)) { \
+ printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
+ "expected 0x%08X\n", R, (val & M), (W & M)); \
+ *data = R; \
+ writel(before, (adapter->hw.hw_addr + R)); \
+ return 1; \
+ } \
+ writel(before, (adapter->hw.hw_addr + R)); \
+}
+
+static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbevf_reg_test *test;
+ u32 i;
+
+ test = reg_test_vf;
+
+ /*
+ * Perform the register test, looping through the test table
+ * until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * 0x40));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return *data;
+}
+
+static void ixgbevf_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ hw_dbg(&adapter->hw, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbevf_reset(adapter);
+
+ hw_dbg(&adapter->hw, "register testing starting\n");
+ if (ixgbevf_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbevf_reset(adapter);
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ hw_dbg(&adapter->hw, "online testing starting\n");
+ /* Online tests */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static int ixgbevf_nway_reset(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops ixgbevf_ethtool_ops = {
+ .get_settings = ixgbevf_get_settings,
+ .get_drvinfo = ixgbevf_get_drvinfo,
+ .get_regs_len = ixgbevf_get_regs_len,
+ .get_regs = ixgbevf_get_regs,
+ .nway_reset = ixgbevf_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ixgbevf_get_ringparam,
+ .set_ringparam = ixgbevf_set_ringparam,
+ .get_rx_csum = ixgbevf_get_rx_csum,
+ .set_rx_csum = ixgbevf_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_msglevel = ixgbevf_get_msglevel,
+ .set_msglevel = ixgbevf_set_msglevel,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ixgbevf_set_tso,
+ .self_test = ixgbevf_diag_test,
+ .get_sset_count = ixgbevf_get_sset_count,
+ .get_strings = ixgbevf_get_strings,
+ .get_ethtool_stats = ixgbevf_get_ethtool_stats,
+};
+
+void ixgbevf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
new file mode 100644
index 000000000000..f7015efbff05
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -0,0 +1,318 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_H_
+#define _IXGBEVF_H_
+
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+
+#include "vf.h"
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgbevf_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ u16 mapped_as_page;
+};
+
+struct ixgbevf_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct ixgbevf_ring {
+ struct ixgbevf_adapter *adapter; /* backlink */
+ void *desc; /* descriptor ring memory */
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
+ unsigned int count; /* amount of descriptors */
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+
+ int queue_index; /* needed for multiqueue queue management */
+ union {
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+ };
+
+ u16 head;
+ u16 tail;
+
+ unsigned int total_bytes;
+ unsigned int total_packets;
+
+ u16 reg_idx; /* holds the special value that gets the hardware register
+ * offset associated with this ring, which is different
+ * for DCB and RSS modes */
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ /* cpu for tx queue */
+ int cpu;
+#endif
+
+ u64 v_idx; /* maps directly to the index for this ring in the hardware
+ * vector array, can also be used for finding the bit in EICR
+ * and friends that represents the vector for this ring */
+
+ u16 work_limit; /* max work per interrupt */
+ u16 rx_buf_len;
+};
+
+enum ixgbevf_ring_f_enum {
+ RING_F_NONE = 0,
+ RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
+struct ixgbevf_ring_feature {
+ int indices;
+ int mask;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define MAX_RX_QUEUES 1
+#define MAX_TX_QUEUES 1
+
+#define IXGBEVF_DEFAULT_TXD 1024
+#define IXGBEVF_DEFAULT_RXD 512
+#define IXGBEVF_MAX_TXD 4096
+#define IXGBEVF_MIN_TXD 64
+#define IXGBEVF_MAX_RXD 4096
+#define IXGBEVF_MIN_RXD 64
+
+/* Supported Rx Buffer Sizes */
+#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_2048 2048
+#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+
+#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define IXGBE_TX_FLAGS_CSUM (u32)(1)
+#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
+#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
+#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
+#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
+#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct ixgbevf_q_vector {
+ struct ixgbevf_adapter *adapter;
+ struct napi_struct napi;
+ DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
+ DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
+ u8 rxr_count; /* Rx ring count assigned to this vector */
+ u8 txr_count; /* Tx ring count assigned to this vector */
+ u8 tx_itr;
+ u8 rx_itr;
+ u32 eitr;
+ int v_idx; /* vector index in list */
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the ixgbe hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define IXGBE_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IXGBE_RX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
+#define IXGBE_TX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
+#define IXGBE_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 2
+#define MAX_MSIX_COUNT 2
+
+#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+/* board specific private data structure */
+struct ixgbevf_adapter {
+ struct timer_list watchdog_timer;
+#ifdef NETIF_F_HW_VLAN_TX
+ struct vlan_group *vlgrp;
+#endif
+ u16 bd_number;
+ struct work_struct reset_task;
+ struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+ /* Interrupt Throttle Rate */
+ u32 itr_setting;
+ u16 eitr_low;
+ u16 eitr_high;
+
+ /* TX */
+ struct ixgbevf_ring *tx_ring; /* One per active queue */
+ int num_tx_queues;
+ u64 restart_queue;
+ u64 hw_csum_tx_good;
+ u64 lsc_int;
+ u64 hw_tso_ctxt;
+ u64 hw_tso6_ctxt;
+ u32 tx_timeout_count;
+ bool detect_tx_hung;
+
+ /* RX */
+ struct ixgbevf_ring *rx_ring; /* One per active queue */
+ int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 hw_csum_rx_good;
+ u64 non_eop_descs;
+ int num_msix_vectors;
+ int max_msix_q_vectors; /* true count of q_vectors for device */
+ struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+ struct msix_entry *msix_entries;
+
+ u64 rx_hdr_split;
+ u32 alloc_rx_page_failed;
+ u32 alloc_rx_buff_failed;
+
+ /* Some features need tri-state capability,
+ * thus the additional *_CAPABLE flags.
+ */
+ u32 flags;
+#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
+#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
+#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in ixgbe_vf.h */
+ struct ixgbe_hw hw;
+ u16 msg_enable;
+ struct ixgbevf_hw_stats stats;
+ u64 zero_base;
+ /* Interrupt Throttle Rate */
+ u32 eitr_param;
+
+ unsigned long state;
+ u32 *config_space;
+ u64 tx_busy;
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
+
+ u32 link_speed;
+ bool link_up;
+ unsigned long link_check_timeout;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool dev_closed;
+};
+
+enum ixbgevf_state_t {
+ __IXGBEVF_TESTING,
+ __IXGBEVF_RESETTING,
+ __IXGBEVF_DOWN
+};
+
+enum ixgbevf_boards {
+ board_82599_vf,
+};
+
+extern struct ixgbevf_info ixgbevf_vf_info;
+extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
+
+/* needed by ethtool.c */
+extern char ixgbevf_driver_name[];
+extern const char ixgbevf_driver_version[];
+
+extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+
+#endif
+extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+
+#ifdef DEBUG
+extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+#define hw_dbg(hw, format, arg...) \
+ printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
+#else
+#define hw_dbg(hw, format, arg...) do {} while (0)
+#endif
+
+#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
new file mode 100644
index 000000000000..39544afdc57f
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -0,0 +1,3571 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+******************************************************************************/
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#include "ixgbevf.h"
+
+char ixgbevf_driver_name[] = "ixgbevf";
+static const char ixgbevf_driver_string[] =
+ "Intel(R) 82599 Virtual Function";
+
+#define DRV_VERSION "1.0.0-k0"
+const char ixgbevf_driver_version[] = DRV_VERSION;
+static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+
+static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
+ [board_82599_vf] = &ixgbevf_vf_info,
+};
+
+/* ixgbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static struct pci_device_id ixgbevf_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
+ board_82599_vf},
+
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+/* forward decls */
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg);
+
+static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
+ struct ixgbevf_ring *rx_ring,
+ u32 val)
+{
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+}
+
+/*
+ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
+ u8 queue, u8 msix_vector)
+{
+ u32 ivar, index;
+ struct ixgbe_hw *hw = &adapter->hw;
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= msix_vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
+ }
+}
+
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_tx_buffer
+ *tx_buffer_info)
+{
+ if (tx_buffer_info->dma) {
+ if (tx_buffer_info->mapped_as_page)
+ pci_unmap_page(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->dma = 0;
+ }
+ if (tx_buffer_info->skb) {
+ dev_kfree_skb_any(tx_buffer_info->skb);
+ tx_buffer_info->skb = NULL;
+ }
+ tx_buffer_info->time_stamp = 0;
+ /* tx_buffer_info must be completely set up in the transmit path */
+}
+
+static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ unsigned int eop)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 head, tail;
+
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of eop */
+ head = readl(hw->hw_addr + tx_ring->head);
+ tail = readl(hw->hw_addr + tx_ring->tail);
+ adapter->detect_tx_hung = false;
+ if ((head != tail) &&
+ tx_ring->tx_buffer_info[eop].time_stamp &&
+ time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
+ /* detected Tx unit hang */
+ union ixgbe_adv_tx_desc *tx_desc;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ printk(KERN_ERR "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ head, tail,
+ tx_ring->next_to_use, eop,
+ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+ return true;
+ }
+
+ return false;
+}
+
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#ifdef MAX_SKB_FRAGS
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+#else
+#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
+#endif
+
+static void ixgbevf_tx_timeout(struct net_device *netdev);
+
+/**
+ * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ * @tx_ring: tx ring to clean
+ **/
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int i, eop, count = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+
+ while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
+ (count < tx_ring->work_limit)) {
+ bool cleaned = false;
+ for ( ; !cleaned; count++) {
+ struct sk_buff *skb;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ cleaned = (i == eop);
+ skb = tx_buffer_info->skb;
+
+ if (cleaned && skb) {
+ unsigned int segs, bytecount;
+
+ /* gso_segs is currently only valid for tcp */
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_packets += segs;
+ total_bytes += bytecount;
+ }
+
+ ixgbevf_unmap_and_free_tx_resource(adapter,
+ tx_buffer_info);
+
+ tx_desc->wb.status = 0;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(count && netif_carrier_ok(netdev) &&
+ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+#ifdef HAVE_TX_MQ
+ if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ }
+#else
+ if (netif_queue_stopped(netdev) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+#endif
+ }
+
+ if (adapter->detect_tx_hung) {
+ if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
+ /* schedule immediate reset if we believe we hung */
+ printk(KERN_INFO
+ "tx hang %d detected, resetting adapter\n",
+ adapter->tx_timeout_count + 1);
+ ixgbevf_tx_timeout(adapter->netdev);
+ }
+ }
+
+ /* re-arm the interrupt */
+ if ((count >= tx_ring->work_limit) &&
+ (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
+ }
+
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+
+ adapter->net_stats.tx_bytes += total_bytes;
+ adapter->net_stats.tx_packets += total_packets;
+
+ return (count < tx_ring->work_limit);
+}
+
+/**
+ * ixgbevf_receive_skb - Send a completed packet up the stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ struct ixgbevf_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+ int ret;
+
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+ if (adapter->vlgrp && is_vlan)
+ vlan_gro_receive(&q_vector->napi,
+ adapter->vlgrp,
+ tag, skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+ } else {
+ if (adapter->vlgrp && is_vlan)
+ ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+ else
+ ret = netif_rx(skb);
+ }
+}
+
+/**
+ * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
+static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+ u32 status_err, struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum disabled */
+ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ return;
+
+ /* if IP and error */
+ if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+ (status_err & IXGBE_RXDADV_ERR_IPE)) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ return;
+
+ if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_rx_good++;
+}
+
+/**
+ * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ int cleaned_count)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbevf_rx_buffer *bi;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+
+ i = rx_ring->next_to_use;
+ bi = &rx_ring->rx_buffer_info[i];
+
+ while (cleaned_count--) {
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+
+ if (!bi->page_dma &&
+ (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(adapter->netdev);
+ if (!bi->page) {
+ adapter->alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ bi->page_offset = 0;
+ } else {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= (PAGE_SIZE / 2);
+ }
+
+ bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_offset,
+ (PAGE_SIZE / 2),
+ PCI_DMA_FROMDEVICE);
+ }
+
+ skb = bi->skb;
+ if (!skb) {
+ skb = netdev_alloc_skb(adapter->netdev,
+ bufsz);
+
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ bi->skb = skb;
+ }
+ if (!bi->dma) {
+ bi->dma = pci_map_single(pdev, skb->data,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ }
+ /* Refresh the desc even if buffer_addrs didn't change because
+ * each write-back erases this info. */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ bi = &rx_ring->rx_buffer_info[i];
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ if (i-- == 0)
+ i = (rx_ring->count - 1);
+
+ ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
+ }
+}
+
+static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+}
+
+static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+}
+
+static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+}
+
+static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+ struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len, staterr;
+ u16 hdr_info;
+ bool cleaned = false;
+ int cleaned_count = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
+ u32 upper_len = 0;
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
+ len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hdr_info & IXGBE_RXDADV_SPH)
+ adapter->rx_hdr_split++;
+ if (len > IXGBEVF_RX_HDR_SIZE)
+ len = IXGBEVF_RX_HDR_SIZE;
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ len = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+ cleaned = true;
+ skb = rx_buffer_info->skb;
+ prefetch(skb->data - NET_IP_ALIGN);
+ rx_buffer_info->skb = NULL;
+
+ if (rx_buffer_info->dma) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
+ skb_put(skb, len);
+ }
+
+ if (upper_len) {
+ pci_unmap_page(pdev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
+
+ if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+ (page_count(rx_buffer_info->page) != 1))
+ rx_buffer_info->page = NULL;
+ else
+ get_page(rx_buffer_info->page);
+
+ skb->len += upper_len;
+ skb->data_len += upper_len;
+ skb->truesize += upper_len;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+
+ next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ prefetch(next_rxd);
+ cleaned_count++;
+
+ next_buffer = &rx_ring->rx_buffer_info[i];
+
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_buffer_info->skb = next_buffer->skb;
+ rx_buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ } else {
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
+ }
+ adapter->non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ ixgbevf_rx_checksum(adapter, staterr, skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /*
+ * Work around issue of some types of VM to VM loop back
+ * packets not getting split correctly
+ */
+ if (staterr & IXGBE_RXD_STAT_LB) {
+ u32 header_fixup_len = skb->len - skb->data_len;
+ if (header_fixup_len < 14)
+ skb_push(skb, header_fixup_len);
+ }
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+ adapter->netdev->last_rx = jiffies;
+
+next_desc:
+ rx_desc->wb.upper.status_error = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ rx_ring->next_to_clean = i;
+ cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+
+ if (cleaned_count)
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+
+ rx_ring->total_packets += total_rx_packets;
+ rx_ring->total_bytes += total_rx_bytes;
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_packets += total_rx_packets;
+
+ return cleaned;
+}
+
+/**
+ * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0;
+ long r_idx;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
+ }
+
+ return work_done;
+}
+
+/**
+ * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one rx queue associated with a
+ * q_vector.
+ **/
+static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0, i;
+ long r_idx;
+ u64 enable_mask = 0;
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ budget /= (q_vector->rxr_count ?: 1);
+ budget = max(budget, 1);
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+ enable_mask |= rx_ring->v_idx;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+#ifndef HAVE_NETDEV_NAPI_LIST
+ if (!netif_running(adapter->netdev))
+ work_done = 0;
+
+#endif
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, enable_mask);
+ }
+
+ return work_done;
+}
+
+
+/**
+ * ixgbevf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbevf_q_vector *q_vector;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j, q_vectors, v_idx, r_idx;
+ u32 mask;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ q_vector = adapter->q_vector[v_idx];
+ /* XXX for_each_bit(...) */
+ r_idx = find_first_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues);
+
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ j = adapter->rx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 0, j, v_idx);
+ r_idx = find_next_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues,
+ r_idx + 1);
+ }
+ r_idx = find_first_bit(q_vector->txr_idx,
+ adapter->num_tx_queues);
+
+ for (i = 0; i < q_vector->txr_count; i++) {
+ j = adapter->tx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 1, j, v_idx);
+ r_idx = find_next_bit(q_vector->txr_idx,
+ adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ /* if this is a tx only vector halve the interrupt rate */
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ q_vector->eitr = (adapter->eitr_param >> 1);
+ else if (q_vector->rxr_count)
+ /* rx only */
+ q_vector->eitr = adapter->eitr_param;
+
+ ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+ }
+
+ ixgbevf_set_ivar(adapter, -1, 1, v_idx);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~IXGBE_EIMS_OTHER;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * ixgbevf_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @eitr: eitr setting (ints per sec) to give last timeslice
+ * @itr_setting: current throttle rate in ints/second
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
+ u32 eitr, u8 itr_setting,
+ int packets, int bytes)
+{
+ unsigned int retval = itr_setting;
+ u32 timepassed_us;
+ u64 bytes_perint;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+
+ /* simple throttlerate management
+ * 0-20MB/s lowest (100000 ints/s)
+ * 20-100MB/s low (20000 ints/s)
+ * 100-1249MB/s bulk (8000 ints/s)
+ */
+ /* what was last interrupt timeslice? */
+ timepassed_us = 1000000/eitr;
+ bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+ switch (itr_setting) {
+ case lowest_latency:
+ if (bytes_perint > adapter->eitr_low)
+ retval = low_latency;
+ break;
+ case low_latency:
+ if (bytes_perint > adapter->eitr_high)
+ retval = bulk_latency;
+ else if (bytes_perint <= adapter->eitr_low)
+ retval = lowest_latency;
+ break;
+ case bulk_latency:
+ if (bytes_perint <= adapter->eitr_high)
+ retval = low_latency;
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+/**
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @adapter: pointer to adapter struct
+ * @v_idx: vector index into q_vector array
+ * @itr_reg: new value to be written in *register* format, not ints/s
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update VTEITR registers at runtime. Hardware
+ * specific quirks/differences are taken care of here.
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
+
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
+}
+
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ u32 new_itr;
+ u8 current_itr, ret_itr;
+ int i, r_idx, v_idx = q_vector->v_idx;
+ struct ixgbevf_ring *rx_ring, *tx_ring;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->tx_itr,
+ tx_ring->total_packets,
+ tx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
+ q_vector->tx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->rx_itr,
+ rx_ring->total_packets,
+ rx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
+ q_vector->rx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 100000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ default:
+ new_itr = 8000;
+ break;
+ }
+
+ if (new_itr != q_vector->eitr) {
+ u32 itr_reg;
+
+ /* save the algorithm value here, not the smoothed one */
+ q_vector->eitr = new_itr;
+ /* do an exponential smoothing */
+ new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+ ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+ }
+
+ return;
+}
+
+static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr;
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *tx_ring;
+ int i, r_idx;
+
+ if (!q_vector->txr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring->total_bytes = 0;
+ tx_ring->total_packets = 0;
+ ixgbevf_clean_tx_irq(adapter, tx_ring);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * @irq: unused
+ * @data: pointer to our q_vector struct for this interrupt vector
+ **/
+static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ int r_idx;
+ int i;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring->total_bytes = 0;
+ rx_ring->total_packets = 0;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ if (!q_vector->rxr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ /* disable interrupts on this vector only */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
+ napi_schedule(&q_vector->napi);
+
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
+{
+ ixgbevf_msix_clean_rx(irq, data);
+ ixgbevf_msix_clean_tx(irq, data);
+
+ return IRQ_HANDLED;
+}
+
+static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
+ int r_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(r_idx, q_vector->rxr_idx);
+ q_vector->rxr_count++;
+ a->rx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
+static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
+ int t_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(t_idx, q_vector->txr_idx);
+ q_vector->txr_count++;
+ a->tx_ring[t_idx].v_idx = 1 << v_idx;
+}
+
+/**
+ * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_vectors;
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int i, j;
+ int rqpv, tqpv;
+ int err = 0;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * The ideal configuration...
+ * We have enough vectors to map one per queue.
+ */
+ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+ for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+ map_vector_to_txq(adapter, v_start, txr_idx);
+ goto out;
+ }
+
+ /*
+ * If we don't have enough vectors for a 1-to-1
+ * mapping, we'll have to group them so there are
+ * multiple queues per vector.
+ */
+ /* Re-adjusting *qpv takes care of the remainder. */
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+ for (j = 0; j < rqpv; j++) {
+ map_vector_to_rxq(adapter, i, rxr_idx);
+ rxr_idx++;
+ rxr_remaining--;
+ }
+ }
+ for (i = v_start; i < q_vectors; i++) {
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+ for (j = 0; j < tqpv; j++) {
+ map_vector_to_txq(adapter, i, txr_idx);
+ txr_idx++;
+ txr_remaining--;
+ }
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ irqreturn_t (*handler)(int, void *);
+ int i, vector, q_vectors, err;
+ int ri = 0, ti = 0;
+
+ /* Decrement for Other and TCP Timer vectors */
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
+ ? &ixgbevf_msix_clean_many : \
+ (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
+ (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
+ NULL)
+ for (vector = 0; vector < q_vectors; vector++) {
+ handler = SET_HANDLER(adapter->q_vector[vector]);
+
+ if (handler == &ixgbevf_msix_clean_rx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "rx", ri++);
+ } else if (handler == &ixgbevf_msix_clean_tx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "tx", ti++);
+ } else if (handler == &ixgbevf_msix_clean_many) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "TxRx", vector);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(adapter->msix_entries[vector].vector,
+ handler, 0, adapter->name[vector],
+ adapter->q_vector[vector]);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq failed for MSIX interrupt "
+ "Error: %d\n", err);
+ goto free_queue_irqs;
+ }
+ }
+
+ sprintf(adapter->name[vector], "%s:mbx", netdev->name);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq for msix_mbx failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ for (i = vector - 1; i >= 0; i--)
+ free_irq(adapter->msix_entries[--vector].vector,
+ &(adapter->q_vector[i]));
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ return err;
+}
+
+static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (i = 0; i < q_vectors; i++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
+ bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
+ bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
+ q_vector->rxr_count = 0;
+ q_vector->txr_count = 0;
+ q_vector->eitr = adapter->eitr_param;
+ }
+}
+
+/**
+ * ixgbevf_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+
+ err = ixgbevf_request_msix_irqs(adapter);
+
+ if (err)
+ hw_dbg(&adapter->hw,
+ "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i, q_vectors;
+
+ q_vectors = adapter->num_msix_vectors;
+
+ i = q_vectors - 1;
+
+ free_irq(adapter->msix_entries[i].vector, netdev);
+ i--;
+
+ for (; i >= 0; i--) {
+ free_irq(adapter->msix_entries[i].vector,
+ adapter->q_vector[i]);
+ }
+
+ ixgbevf_reset_q_vectors(adapter);
+}
+
+/**
+ * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ for (i = 0; i < adapter->num_msix_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+}
+
+/**
+ * ixgbevf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
+ bool queues, bool flush)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
+ u64 qmask;
+
+ mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+ qmask = ~0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ if (queues)
+ ixgbevf_irq_enable_queues(adapter, qmask);
+
+ if (flush)
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
+{
+ u64 tdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 i, j, tdlen, txctrl;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->tx_ring[i];
+ j = ring->reg_idx;
+ tdba = ring->dma;
+ tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
+ (tdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
+ adapter->tx_ring[i].head = IXGBE_VFTDH(j);
+ adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
+ /* Disable Tx Head Writeback RO bit, since this hoses
+ * bookkeeping if things aren't delivered in order.
+ */
+ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
+ }
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
+{
+ struct ixgbevf_ring *rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 srrctl;
+
+ rx_ring = &adapter->rx_ring[index];
+
+ srrctl = IXGBE_SRRCTL_DROP_EN;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ u16 bufsz = IXGBEVF_RXBUFFER_2048;
+ /* grow the amount we can receive on large page machines */
+ if (bufsz < (PAGE_SIZE / 2))
+ bufsz = (PAGE_SIZE / 2);
+ /* cap the bufsz at our largest descriptor size */
+ bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
+
+ srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ } else {
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+ srrctl |= IXGBEVF_RXBUFFER_2048 >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= rx_ring->rx_buf_len >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
+}
+
+/**
+ * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
+{
+ u64 rdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i, j;
+ u32 rdlen;
+ int rx_buf_len;
+
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ /* PSRTYPE must be initialized in 82599 */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+ rx_buf_len = IXGBEVF_RX_HDR_SIZE;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rdba = adapter->rx_ring[i].dma;
+ j = adapter->rx_ring[i].reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
+ (rdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
+ adapter->rx_ring[i].head = IXGBE_VFRDH(j);
+ adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
+ adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+
+ ixgbevf_configure_srrctl(adapter, j);
+ }
+}
+
+static void ixgbevf_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j;
+ u32 ctrl;
+
+ adapter->vlgrp = grp;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
+ }
+}
+
+static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *v_netdev;
+
+ /* add VID to filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, true);
+ /*
+ * Copy feature flags from netdev to the vlan netdev for this vid.
+ * This allows things like TSO to bubble down to our vlan device.
+ */
+ v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
+ v_netdev->features |= adapter->netdev->features;
+ vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
+}
+
+static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_disable(adapter);
+
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable(adapter, true, true);
+
+ /* remove VID from filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, false);
+}
+
+static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
+{
+ ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+ if (adapter->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
+ continue;
+ ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
+ }
+ }
+}
+
+static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq)
+{
+ struct dev_mc_list *mc_ptr;
+ u8 *addr = *mc_addr_ptr;
+ *vmdq = 0;
+
+ mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
+ if (mc_ptr->next)
+ *mc_addr_ptr = mc_ptr->next->dmi_addr;
+ else
+ *mc_addr_ptr = NULL;
+
+ return addr;
+}
+
+/**
+ * ixgbevf_set_rx_mode - Multicast set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast mode.
+ **/
+static void ixgbevf_set_rx_mode(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 *addr_list = NULL;
+ int addr_count = 0;
+
+ /* reprogram multicast list */
+ addr_count = netdev->mc_count;
+ if (addr_count)
+ addr_list = netdev->mc_list->dmi_addr;
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+ ixgbevf_addr_list_itr);
+}
+
+static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ struct napi_struct *napi;
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi = &q_vector->napi;
+ if (q_vector->rxr_count > 1)
+ napi->poll = &ixgbevf_clean_rxonly_many;
+
+ napi_enable(napi);
+ }
+}
+
+static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi_disable(&q_vector->napi);
+ }
+}
+
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ ixgbevf_set_rx_mode(netdev);
+
+ ixgbevf_restore_vlan(adapter);
+
+ ixgbevf_configure_tx(adapter);
+ ixgbevf_configure_rx(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->rx_ring[i];
+ ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
+ ring->next_to_use = ring->count - 1;
+ writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+ }
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ int rxr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int j = adapter->rx_ring[rxr].reg_idx;
+ int k;
+
+ for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
+ break;
+ else
+ msleep(1);
+ }
+ if (k >= IXGBE_MAX_RX_DESC_POLL) {
+ hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
+ "not set within the polling period\n", rxr);
+ }
+
+ ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr].count - 1));
+}
+
+static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j = 0;
+ int num_rx_rings = adapter->num_rx_queues;
+ u32 txdctl, rxdctl;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < num_rx_rings; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
+ ixgbevf_rx_desc_queue_enable(adapter, i);
+ }
+
+ ixgbevf_configure_msix(adapter);
+
+ if (hw->mac.ops.set_rar) {
+ if (is_valid_ether_addr(hw->mac.addr))
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ else
+ hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
+ }
+
+ clear_bit(__IXGBEVF_DOWN, &adapter->state);
+ ixgbevf_napi_enable_all(adapter);
+
+ /* enable transmits */
+ netif_tx_start_all_queues(netdev);
+
+ /* bring the link up in the watchdog, this could race with our first
+ * link up interrupt but shouldn't be a problem */
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ return 0;
+}
+
+int ixgbevf_up(struct ixgbevf_adapter *adapter)
+{
+ int err;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbevf_configure(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return err;
+}
+
+/**
+ * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer_info->dma) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
+ }
+ if (rx_buffer_info->skb) {
+ struct sk_buff *skb = rx_buffer_info->skb;
+ rx_buffer_info->skb = NULL;
+ do {
+ struct sk_buff *this = skb;
+ skb = skb->prev;
+ dev_kfree_skb(this);
+ } while (skb);
+ }
+ if (!rx_buffer_info->page)
+ continue;
+ pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ put_page(rx_buffer_info->page);
+ rx_buffer_info->page = NULL;
+ rx_buffer_info->page_offset = 0;
+ }
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ if (rx_ring->head)
+ writel(0, adapter->hw.hw_addr + rx_ring->head);
+ if (rx_ring->tail)
+ writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (tx_ring->head)
+ writel(0, adapter->hw.hw_addr + tx_ring->head);
+ if (tx_ring->tail)
+ writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+void ixgbevf_down(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 txdctl;
+ int i, j;
+
+ /* signal that we are down to the interrupt handler */
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+ /* disable receives */
+
+ netif_tx_disable(netdev);
+
+ msleep(10);
+
+ netif_tx_stop_all_queues(netdev);
+
+ ixgbevf_irq_disable(adapter);
+
+ ixgbevf_napi_disable_all(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ /* can't call flush scheduled work here because it can deadlock
+ * if linkwatch_event tries to acquire the rtnl_lock which we are
+ * holding */
+ while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
+ msleep(1);
+
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
+ (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ }
+
+ netif_carrier_off(netdev);
+
+ if (!pci_channel_offline(adapter->pdev))
+ ixgbevf_reset(adapter);
+
+ ixgbevf_clean_all_tx_rings(adapter);
+ ixgbevf_clean_all_rx_rings(adapter);
+}
+
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
+{
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ ixgbevf_down(adapter);
+ ixgbevf_up(adapter);
+
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+}
+
+void ixgbevf_reset(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ if (hw->mac.ops.reset_hw(hw))
+ hw_dbg(hw, "PF still resetting\n");
+ else
+ hw->mac.ops.init_hw(hw);
+
+ if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+}
+
+static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+ int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ * 3) Other (Link Status Change, etc.)
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ /* Can't allocate enough MSI-X interrupts? Oh well.
+ * This just means we'll go with either a single MSI
+ * vector or fall back to legacy interrupts.
+ */
+ hw_dbg(&adapter->hw,
+ "Unable to allocate MSI-X interrupts\n");
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else {
+ /*
+ * Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+ }
+}
+
+/*
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine. The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features. This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
+{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+}
+
+/**
+ * ixgbevf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ goto err_tx_ring_allocation;
+
+ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->rx_ring)
+ goto err_rx_ring_allocation;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ adapter->tx_ring[i].count = adapter->tx_ring_count;
+ adapter->tx_ring[i].queue_index = i;
+ adapter->tx_ring[i].reg_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->rx_ring[i].count = adapter->rx_ring_count;
+ adapter->rx_ring[i].queue_index = i;
+ adapter->rx_ring[i].reg_idx = i;
+ }
+
+ return 0;
+
+err_rx_ring_allocation:
+ kfree(adapter->tx_ring);
+err_tx_ring_allocation:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+ int vector, v_budget;
+
+ /*
+ * It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+ (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter. */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ ixgbevf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct ixgbevf_q_vector *q_vector;
+ int napi_vectors;
+ int (*poll)(struct napi_struct *, int);
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+ poll = &ixgbevf_clean_rxonly;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->v_idx = q_idx;
+ q_vector->eitr = adapter->eitr_param;
+ if (q_idx < napi_vectors)
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ (*poll), 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
+/**
+ * ixgbevf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ return;
+}
+
+/**
+ * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+ int err;
+
+ /* Number of supported queues */
+ ixgbevf_set_num_queues(adapter);
+
+ err = ixgbevf_set_interrupt_capability(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = ixgbevf_alloc_q_vectors(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
+ "vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = ixgbevf_alloc_queues(adapter);
+ if (err) {
+ printk(KERN_ERR "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
+ "Tx Queue count = %u\n",
+ (adapter->num_rx_queues > 1) ? "Enabled" :
+ "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ return 0;
+err_alloc_queues:
+ ixgbevf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ ixgbevf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * ixgbevf_sw_init - Initialize general software structures
+ * (struct ixgbevf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbevf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ hw->mbx.ops.init_params(hw);
+ hw->mac.max_tx_queues = MAX_TX_QUEUES;
+ hw->mac.max_rx_queues = MAX_RX_QUEUES;
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_info(&pdev->dev,
+ "PF still in reset state, assigning new address\n");
+ random_ether_addr(hw->mac.addr);
+ } else {
+ err = hw->mac.ops.init_hw(hw);
+ if (err) {
+ printk(KERN_ERR "init_shared_code failed: %d\n", err);
+ goto out;
+ }
+ }
+
+ /* Enable dynamic interrupt throttling rates */
+ adapter->eitr_param = 20000;
+ adapter->itr_setting = 1;
+
+ /* set defaults for eitr in MegaBytes */
+ adapter->eitr_low = 10;
+ adapter->eitr_high = 20;
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
+ adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+
+ /* enable rx csum by default */
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+out:
+ return err;
+}
+
+static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
+ adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
+ adapter->stats.last_vfgorc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
+ adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
+ adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
+ adapter->stats.last_vfgotc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
+ adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
+
+ adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
+ adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
+ adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
+ adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
+ adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+}
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ u32 current_counter = IXGBE_READ_REG(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
+ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+/**
+ * ixgbevf_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
+ adapter->stats.vfgprc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
+ adapter->stats.vfgptc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+ adapter->stats.last_vfgorc,
+ adapter->stats.vfgorc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+ adapter->stats.last_vfgotc,
+ adapter->stats.vfgotc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
+ adapter->stats.vfmprc);
+
+ /* Fill out the OS statistics structure */
+ adapter->net_stats.multicast = adapter->stats.vfmprc -
+ adapter->stats.base_vfmprc;
+}
+
+/**
+ * ixgbevf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbevf_watchdog(unsigned long data)
+{
+ struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 eics = 0;
+ int i;
+
+ /*
+ * Do the watchdog outside of interrupt context due to the lovely
+ * delays that some of the newer hardware requires
+ */
+
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ goto watchdog_short_circuit;
+
+ /* get one bit for every active tx/rx interrupt vector */
+ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ struct ixgbevf_q_vector *qv = adapter->q_vector[i];
+ if (qv->rxr_count || qv->txr_count)
+ eics |= (1 << i);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+
+watchdog_short_circuit:
+ schedule_work(&adapter->watchdog_task);
+}
+
+/**
+ * ixgbevf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbevf_tx_timeout(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+static void ixgbevf_reset_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter;
+ adapter = container_of(work, struct ixgbevf_adapter, reset_task);
+
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ adapter->tx_timeout_count++;
+
+ ixgbevf_reinit_locked(adapter);
+}
+
+/**
+ * ixgbevf_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbevf_watchdog_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter = container_of(work,
+ struct ixgbevf_adapter,
+ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool link_up = adapter->link_up;
+
+ adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+
+ /*
+ * Always check the link on the watchdog because we have
+ * no LSC interrupt
+ */
+ if (hw->mac.ops.check_link) {
+ if ((hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false)) != 0) {
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+ schedule_work(&adapter->reset_task);
+ goto pf_has_reset;
+ }
+ } else {
+ /* always assume link is up, if no check link
+ * function */
+ link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ link_up = true;
+ }
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+
+ if (link_up) {
+ if (!netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
+ ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ "10 Gbps" : "1 Gbps"));
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ } else {
+ /* Force detection of hung controller */
+ adapter->detect_tx_hung = true;
+ }
+ } else {
+ adapter->link_up = false;
+ adapter->link_speed = 0;
+ if (netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ }
+
+pf_has_reset:
+ ixgbevf_update_stats(adapter);
+
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = true;
+
+ /* Reset the timer */
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + (2 * HZ)));
+
+ adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+}
+
+/**
+ * ixgbevf_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+
+ ixgbevf_clean_tx_ring(adapter, tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tx_ring[i].desc)
+ ixgbevf_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+
+}
+
+/**
+ * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vmalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+ &tx_ring->dma);
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->work_limit = tx_ring->count;
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
+ "descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Tx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vmalloc(size);
+ if (!rx_ring->rx_buffer_info) {
+ hw_dbg(&adapter->hw,
+ "Unable to vmalloc buffer memory for "
+ "the receive descriptor ring\n");
+ goto alloc_failed;
+ }
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+ &rx_ring->dma);
+
+ if (!rx_ring->desc) {
+ hw_dbg(&adapter->hw,
+ "Unable to allocate memory for "
+ "the receive descriptor ring\n");
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ goto alloc_failed;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+alloc_failed:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Rx Queue %u failed\n", i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * ixgbevf_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgbevf_clean_rx_ring(adapter, rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rx_ring[i].desc)
+ ixgbevf_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int ixgbevf_open(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__IXGBEVF_TESTING, &adapter->state))
+ return -EBUSY;
+
+ if (hw->adapter_stopped) {
+ ixgbevf_reset(adapter);
+ /* if adapter is still stopped then PF isn't up and
+ * the vf can't start. */
+ if (hw->adapter_stopped) {
+ err = IXGBE_ERR_MBX;
+ printk(KERN_ERR "Unable to start - perhaps the PF"
+ "Driver isn't up yet\n");
+ goto err_setup_reset;
+ }
+ }
+
+ /* allocate transmit descriptors */
+ err = ixgbevf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = ixgbevf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ ixgbevf_configure(adapter);
+
+ /*
+ * Map the Tx/Rx rings to the vectors we were allotted.
+ * if request_irq will be called in this function map_rings
+ * must be called *before* up_complete
+ */
+ ixgbevf_map_rings_to_vectors(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+ if (err)
+ goto err_up;
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ err = ixgbevf_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return 0;
+
+err_req_irq:
+ ixgbevf_down(adapter);
+err_up:
+ ixgbevf_free_irq(adapter);
+err_setup_rx:
+ ixgbevf_free_all_rx_resources(adapter);
+err_setup_tx:
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_reset(adapter);
+
+err_setup_reset:
+
+ return err;
+}
+
+/**
+ * ixgbevf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ixgbevf_close(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ int err;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl;
+ u32 mss_l4len_idx, l4len;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ adapter->hw_tso_ctxt++;
+ } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ adapter->hw_tso6_ctxt++;
+ }
+
+ i = tx_ring->next_to_use;
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ /* VLAN MACLEN IPLEN */
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |=
+ (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= ((skb_network_offset(skb)) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ *hdr_len += skb_network_offset(skb);
+ vlan_macip_lens |=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ *hdr_len +=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx =
+ (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
+ /* use index 1 for TSO */
+ mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
+ i = tx_ring->next_to_use;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |= (tx_flags &
+ IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= (skb_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vlan_macip_lens |= (skb_transport_header(skb) -
+ skb_network_header(skb));
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ /* XXX what about other V6 headers?? */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ printk(KERN_WARNING
+ "partial checksum but "
+ "proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
+ }
+
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+ /* use index zero for tx checksum offload */
+ context_desc->mss_l4len_idx = 0;
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ adapter->hw_csum_tx_good++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ unsigned int first)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int len;
+ unsigned int total = skb->len;
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int f;
+
+ i = tx_ring->next_to_use;
+
+ len = min(skb_headlen(skb), total);
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->mapped_as_page = false;
+ tx_buffer_info->dma = pci_map_single(adapter->pdev,
+ skb->data + offset,
+ size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = min((unsigned int)frag->size, total);
+ offset = frag->page_offset;
+
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ frag->page,
+ offset,
+ size,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->mapped_as_page = true;
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ if (total == 0)
+ break;
+ }
+
+ if (i == 0)
+ i = tx_ring->count - 1;
+ else
+ i = i - 1;
+ tx_ring->tx_buffer_info[i].skb = skb;
+ tx_ring->tx_buffer_info[first].next_to_watch = i;
+
+ return count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed tx_buffer_info map */
+ tx_buffer_info->dma = 0;
+ tx_buffer_info->time_stamp = 0;
+ tx_buffer_info->next_to_watch = 0;
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count >= 0) {
+ count--;
+ i--;
+ if (i < 0)
+ i += tx_ring->count;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ return count;
+}
+
+static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring, int tx_flags,
+ int count, u32 paylen, u8 hdr_len)
+{
+ union ixgbe_adv_tx_desc *tx_desc = NULL;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 olinfo_status = 0, cmd_type_len = 0;
+ unsigned int i;
+
+ u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+
+ cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+ if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ /* use index 1 context for tso */
+ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ i = tx_ring->next_to_use;
+ while (count--) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_stop_subqueue(netdev, tx_ring->queue_index);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring;
+ unsigned int first;
+ unsigned int tx_flags = 0;
+ u8 hdr_len = 0;
+ int r_idx = 0, tso;
+ int count = 0;
+
+ unsigned int f;
+
+ tx_ring = &adapter->tx_ring[r_idx];
+
+ if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb);
+ tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_VLAN;
+ }
+
+ /* four things can cause us to need a context descriptor */
+ if (skb_is_gso(skb) ||
+ (skb->ip_summed == CHECKSUM_PARTIAL) ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN))
+ count++;
+
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+ if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
+ adapter->tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ first = tx_ring->next_to_use;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= IXGBE_TX_FLAGS_IPV4;
+ tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_TSO;
+ else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+ ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
+ ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+ skb->len, hdr_len);
+
+ netdev->trans_start = jiffies;
+
+ ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ixgbevf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * ixgbevf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_set_mac(struct net_device *netdev, void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ if (hw->mac.ops.set_rar)
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ return -EINVAL;
+
+ hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
+
+ return 0;
+}
+
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+ }
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+#endif
+
+ pci_disable_device(pdev);
+}
+
+#ifdef HAVE_NET_DEVICE_OPS
+static const struct net_device_ops ixgbe_netdev_ops = {
+ .ndo_open = &ixgbevf_open,
+ .ndo_stop = &ixgbevf_close,
+ .ndo_start_xmit = &ixgbevf_xmit_frame,
+ .ndo_get_stats = &ixgbevf_get_stats,
+ .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
+ .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = &ixgbevf_set_mac,
+ .ndo_change_mtu = &ixgbevf_change_mtu,
+ .ndo_tx_timeout = &ixgbevf_tx_timeout,
+ .ndo_vlan_rx_register = &ixgbevf_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid,
+};
+#endif /* HAVE_NET_DEVICE_OPS */
+
+static void ixgbevf_assign_netdev_ops(struct net_device *dev)
+{
+ struct ixgbevf_adapter *adapter;
+ adapter = netdev_priv(dev);
+#ifdef HAVE_NET_DEVICE_OPS
+ dev->netdev_ops = &ixgbe_netdev_ops;
+#else /* HAVE_NET_DEVICE_OPS */
+ dev->open = &ixgbevf_open;
+ dev->stop = &ixgbevf_close;
+
+ dev->hard_start_xmit = &ixgbevf_xmit_frame;
+
+ dev->get_stats = &ixgbevf_get_stats;
+ dev->set_multicast_list = &ixgbevf_set_rx_mode;
+ dev->set_mac_address = &ixgbevf_set_mac;
+ dev->change_mtu = &ixgbevf_change_mtu;
+ dev->tx_timeout = &ixgbevf_tx_timeout;
+ dev->vlan_rx_register = &ixgbevf_vlan_rx_register;
+ dev->vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid;
+#endif /* HAVE_NET_DEVICE_OPS */
+ ixgbevf_set_ethtool_ops(dev);
+ dev->watchdog_timeo = 5 * HZ;
+}
+
+/**
+ * ixgbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbevf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit ixgbevf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct ixgbevf_adapter *adapter = NULL;
+ struct ixgbe_hw *hw = NULL;
+ const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
+ static int cards_found;
+ int err, pci_using_dac;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ pci_using_dac = 0;
+ }
+
+ err = pci_request_regions(pdev, ixgbevf_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_set_master(pdev);
+
+#ifdef HAVE_TX_MQ
+ netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
+ MAX_TX_QUEUES);
+#else
+ netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
+#endif
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+ /*
+ * call save state here in standalone driver because it relies on
+ * adapter struct to exist, and needs to call netdev_priv
+ */
+ pci_save_state(pdev);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ ixgbevf_assign_netdev_ops(netdev);
+
+ adapter->bd_number = cards_found;
+
+ /* Setup hw api */
+ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.type = ii->mac;
+
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mac_operations));
+
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+
+ /* setup the private structure */
+ err = ixgbevf_sw_init(adapter);
+
+ ixgbevf_init_last_counter_stats(adapter);
+
+#ifdef MAX_SKB_FRAGS
+ netdev->features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+#endif /* MAX_SKB_FRAGS */
+
+ /* The HW MAC address was set and/or determined in sw_init */
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ printk(KERN_ERR "invalid MAC address\n");
+ err = -EIO;
+ goto err_sw_init;
+ }
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &ixgbevf_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
+ INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
+
+ err = ixgbevf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* pick up the PCI bus settings for reporting later */
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ strcpy(netdev->name, "eth%d");
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ adapter->netdev_registered = true;
+
+ /* print the MAC address */
+ hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ netdev->dev_addr[0],
+ netdev->dev_addr[1],
+ netdev->dev_addr[2],
+ netdev->dev_addr[3],
+ netdev->dev_addr[4],
+ netdev->dev_addr[5]);
+
+ hw_dbg(hw, "MAC: %d\n", hw->mac.type);
+
+ hw_dbg(hw, "LRO is disabled \n");
+
+ hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
+ cards_found++;
+ return 0;
+
+err_register:
+err_sw_init:
+ ixgbevf_reset_interrupt_capability(adapter);
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ixgbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit ixgbevf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ cancel_work_sync(&adapter->watchdog_task);
+
+ flush_scheduled_work();
+
+ if (adapter->netdev_registered) {
+ unregister_netdev(netdev);
+ adapter->netdev_registered = false;
+ }
+
+ ixgbevf_reset_interrupt_capability(adapter);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+
+ hw_dbg(&adapter->hw, "Remove complete\n");
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver ixgbevf_driver = {
+ .name = ixgbevf_driver_name,
+ .id_table = ixgbevf_pci_tbl,
+ .probe = ixgbevf_probe,
+ .remove = __devexit_p(ixgbevf_remove),
+ .shutdown = ixgbevf_shutdown,
+};
+
+/**
+ * ixgbe_init_module - Driver Registration Routine
+ *
+ * ixgbe_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init ixgbevf_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
+ ixgbevf_driver_version);
+
+ printk(KERN_INFO "%s\n", ixgbevf_copyright);
+
+ ret = pci_register_driver(&ixgbevf_driver);
+ return ret;
+}
+
+module_init(ixgbevf_init_module);
+
+/**
+ * ixgbe_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgbe_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit ixgbevf_exit_module(void)
+{
+ pci_unregister_driver(&ixgbevf_driver);
+}
+
+#ifdef DEBUG
+/**
+ * ixgbe_get_hw_dev_name - return device name string
+ * used by hardware layer to print debugging information
+ **/
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
+{
+ struct ixgbevf_adapter *adapter = hw->back;
+ return adapter->netdev->name;
+}
+
+#endif
+module_exit(ixgbevf_exit_module);
+
+/* ixgbevf_main.c */
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
new file mode 100644
index 000000000000..b8143501e6fc
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.c
@@ -0,0 +1,341 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "mbx.h"
+
+/**
+ * ixgbevf_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message notification
+ **/
+static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_msg(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message acknowledgement
+ **/
+static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_ack(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ ret_val = ixgbevf_poll_for_msg(hw);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbevf_poll_for_ack(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+ u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * ixgbevf_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = 0;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+ IXGBE_VFMAILBOX_RSTI))) {
+ ret_val = 0;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return 0 if we obtained the mailbox lock
+ **/
+static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val;
+ u16 i;
+
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbevf_check_for_msg_vf(hw);
+ ixgbevf_check_for_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfuly read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val = 0;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return 0;
+}
+
+struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .read = ixgbevf_read_mbx_vf,
+ .write = ixgbevf_write_mbx_vf,
+ .read_posted = ixgbevf_read_posted_mbx,
+ .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
new file mode 100644
index 000000000000..1b0e0bf4c0f5
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "vf.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+/* forward declaration of the HW struct */
+struct ixgbe_hw;
+
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
new file mode 100644
index 000000000000..12f75960aec1
--- /dev/null
+++ b/drivers/net/ixgbevf/regs.h
@@ -0,0 +1,85 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_REGS_H_
+#define _IXGBEVF_REGS_H_
+
+#define IXGBE_VFCTRL 0x00000
+#define IXGBE_VFSTATUS 0x00008
+#define IXGBE_VFLINKS 0x00010
+#define IXGBE_VFRTIMER 0x00048
+#define IXGBE_VFRXMEMWRAP 0x03190
+#define IXGBE_VTEICR 0x00100
+#define IXGBE_VTEICS 0x00104
+#define IXGBE_VTEIMS 0x00108
+#define IXGBE_VTEIMC 0x0010C
+#define IXGBE_VTEIAC 0x00110
+#define IXGBE_VTEIAM 0x00114
+#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTIVAR_MISC 0x00140
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VFPSRTYPE 0x00300
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFGPRC 0x0101C
+#define IXGBE_VFGPTC 0x0201C
+#define IXGBE_VFGORC_LSB 0x01020
+#define IXGBE_VFGORC_MSB 0x01024
+#define IXGBE_VFGOTC_LSB 0x02020
+#define IXGBE_VFGOTC_MSB 0x02024
+#define IXGBE_VFMPRC 0x01034
+
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
+
+#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
new file mode 100644
index 000000000000..4b5dec0ec140
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.c
@@ -0,0 +1,387 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "vf.h"
+
+/**
+ * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
+{
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return 0;
+}
+
+/**
+ * ixgbevf_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
+{
+ s32 status = hw->mac.ops.start_hw(hw);
+
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ return status;
+}
+
+/**
+ * ixgbevf_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by reseting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+ s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+ u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw) && timeout) {
+ timeout--;
+ udelay(5);
+ }
+
+ if (!timeout)
+ return IXGBE_ERR_RESET_FAILED;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = IXGBE_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1);
+
+ msleep(10);
+
+ /* set our "perm_addr" based on info provided by PF */
+ /* also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3 */
+ ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
+ if (ret_val)
+ return ret_val;
+
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+
+ memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+ return 0;
+}
+
+/**
+ * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
+{
+ u32 number_of_queues;
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Disable the receive unit by stopped each queue */
+ number_of_queues = hw->mac.max_rx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ if (reg_val & IXGBE_RXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+ }
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ number_of_queues = hw->mac.max_tx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ if (reg_val & IXGBE_TXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * ixgbevf_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to storage for retrieved MAC address
+ **/
+static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: Unused in this implementation
+ **/
+static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+ u32 vmdq)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @next: caller supplied function to return next address in list
+ *
+ * Updates the Multicast Table Array.
+ **/
+static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count,
+ ixgbe_mc_addr_itr next)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ u16 *vector_list = (u16 *)&msgbuf[1];
+ u32 vector;
+ u32 cnt, i;
+ u32 vmdq;
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < cnt; i++) {
+ vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+ vector_list[i] = vector;
+ }
+
+ mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ **/
+static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+
+ msgbuf[0] = IXGBE_VF_SET_VLAN;
+ msgbuf[1] = vlan;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+ return mbx->ops.write_posted(hw, msgbuf, 2);
+}
+
+/**
+ * ixgbevf_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: Unused in this implementation
+ * @autoneg: Unused in this implementation
+ * @autoneg_wait_to_complete: Unused in this implementation
+ *
+ * Do nothing and return success. VF drivers are not allowed to change
+ * global settings. Maintained for driver compatibility.
+ **/
+static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return 0;
+}
+
+/**
+ * ixgbevf_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ u32 links_reg;
+
+ if (!(hw->mbx.ops.check_for_rst(hw))) {
+ *link_up = false;
+ *speed = 0;
+ return -1;
+ }
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+
+ if (links_reg & IXGBE_LINKS_SPEED)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ return 0;
+}
+
+struct ixgbe_mac_operations ixgbevf_mac_ops = {
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+ .get_mac_addr = ixgbevf_get_mac_addr_vf,
+ .stop_adapter = ixgbevf_stop_hw_vf,
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_check_mac_link_vf,
+ .set_rar = ixgbevf_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .set_vfta = ixgbevf_set_vfta_vf,
+};
+
+struct ixgbevf_info ixgbevf_vf_info = {
+ .mac = ixgbe_mac_82599_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
new file mode 100644
index 000000000000..799600e92700
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.h
@@ -0,0 +1,168 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef __IXGBE_VF_H__
+#define __IXGBE_VF_H__
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "defines.h"
+#include "regs.h"
+#include "mbx.h"
+
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+ s32 (*reset_hw)(struct ixgbe_hw *);
+ s32 (*start_hw)(struct ixgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+
+ /* Link */
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ bool *);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
+ s32 (*init_rx_addrs)(struct ixgbe_hw *);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr);
+ s32 (*enable_mc)(struct ixgbe_hw *);
+ s32 (*disable_mc)(struct ixgbe_hw *);
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+};
+
+enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+ ixgbe_mac_82599_vf,
+ ixgbe_num_macs
+};
+
+struct ixgbe_mac_info {
+ struct ixgbe_mac_operations ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+
+ enum ixgbe_mac_type type;
+
+ s32 mc_filter_type;
+
+ bool get_link_status;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 max_msix_vectors;
+};
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *);
+ s32 (*check_for_ack)(struct ixgbe_hw *);
+ s32 (*check_for_rst)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 udelay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
+struct ixgbe_hw {
+ void *back;
+
+ u8 __iomem *hw_addr;
+ u8 *flash_address;
+ unsigned long io_base;
+
+ struct ixgbe_mac_info mac;
+ struct ixgbe_mbx_info mbx;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+ bool adapter_stopped;
+};
+
+struct ixgbevf_hw_stats {
+ u64 base_vfgprc;
+ u64 base_vfgptc;
+ u64 base_vfgorc;
+ u64 base_vfgotc;
+ u64 base_vfmprc;
+
+ u64 last_vfgprc;
+ u64 last_vfgptc;
+ u64 last_vfgorc;
+ u64 last_vfgotc;
+ u64 last_vfmprc;
+
+ u64 vfgprc;
+ u64 vfgptc;
+ u64 vfgorc;
+ u64 vfgotc;
+ u64 vfmprc;
+};
+
+struct ixgbevf_info {
+ enum ixgbe_mac_type mac;
+ struct ixgbe_mac_operations *mac_ops;
+};
+
+#endif /* __IXGBE_VF_H__ */
+
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 792b88fc3574..26eed49d3208 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2994,7 +2994,7 @@ jme_resume(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id jme_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
{ }
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index b117f7f8b194..b60efd4bd017 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -1094,11 +1094,9 @@ static int __devinit i82596_probe(struct net_device *dev)
return i;
};
- DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,",
- dev->name, dev->base_addr));
- for (i = 0; i < 6; i++)
- DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
- DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
+ DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
+ dev->name, dev->base_addr, dev->dev_addr,
+ dev->irq));
DEB(DEB_INIT, printk(KERN_INFO
"%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
dev->name, dma, (int)sizeof(struct i596_dma),
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 336e7c7a9275..a8522bd73ae7 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -134,7 +134,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
struct sk_buff *skb;
int i;
- lp->rx_skb = kzalloc(sizeof(struct sk_buff)*RX_BD_NUM, GFP_KERNEL);
+ lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual addres and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index f8fa0c3f0f64..a8768672dc5a 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -17,6 +17,8 @@
/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */
/* 2003-12-26: Make sure Asante cards always work. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -34,31 +36,36 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
#include <asm/dma.h>
#include <asm/hwtest.h>
#include <asm/macints.h>
static char version[] =
- "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
+ "v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
#define EI_SHIFT(x) (ei_local->reg_offset[x])
-#define ei_inb(port) in_8(port)
-#define ei_outb(val,port) out_8(port,val)
-#define ei_inb_p(port) in_8(port)
-#define ei_outb_p(val,port) out_8(port,val)
+#define ei_inb(port) in_8(port)
+#define ei_outb(val, port) out_8(port, val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val, port) out_8(port, val)
#include "lib8390.c"
#define WD_START_PG 0x00 /* First page of TX buffer */
#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */
#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
-#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG /* First page of TX buffer */
+#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG
+ /* First page of TX buffer */
-/* Unfortunately it seems we have to hardcode these for the moment */
-/* Shouldn't the card know about this? Does anyone know where to read it off the card? Do we trust the data provided by the card? */
+/*
+ * Unfortunately it seems we have to hardcode these for the moment
+ * Shouldn't the card know about this?
+ * Does anyone know where to read it off the card?
+ * Do we trust the data provided by the card?
+ */
#define DAYNA_8390_BASE 0x80000
#define DAYNA_8390_MEM 0x00000
@@ -80,7 +87,7 @@ enum mac8390_type {
MAC8390_KINETICS,
};
-static const char * cardname[] = {
+static const char *cardname[] = {
"apple",
"asante",
"farallon",
@@ -90,7 +97,7 @@ static const char * cardname[] = {
"kinetics",
};
-static int word16[] = {
+static const int word16[] = {
1, /* apple */
1, /* asante */
1, /* farallon */
@@ -101,7 +108,7 @@ static int word16[] = {
};
/* on which cards do we use NuBus resources? */
-static int useresources[] = {
+static const int useresources[] = {
1, /* apple */
1, /* asante */
1, /* farallon */
@@ -117,22 +124,22 @@ enum mac8390_access {
ACCESS_16,
};
-extern int mac8390_memtest(struct net_device * dev);
-static int mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
+extern int mac8390_memtest(struct net_device *dev);
+static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
enum mac8390_type type);
-static int mac8390_open(struct net_device * dev);
-static int mac8390_close(struct net_device * dev);
+static int mac8390_open(struct net_device *dev);
+static int mac8390_close(struct net_device *dev);
static void mac8390_no_reset(struct net_device *dev);
static void interlan_reset(struct net_device *dev);
/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/
static void sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
-static void sane_block_input(struct net_device * dev, int count,
- struct sk_buff * skb, int ring_offset);
-static void sane_block_output(struct net_device * dev, int count,
- const unsigned char * buf, const int start_page);
+static void sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page);
/* dayna_memcpy to and from card */
static void dayna_memcpy_fromcard(struct net_device *dev, void *to,
@@ -148,8 +155,8 @@ static void dayna_block_input(struct net_device *dev, int count,
static void dayna_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
+#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
static void slow_sane_get_8390_hdr(struct net_device *dev,
@@ -164,70 +171,72 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count);
static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
{
switch (dev->dr_sw) {
- case NUBUS_DRSW_3COM:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_APPLE_SONIC_NB:
- case NUBUS_DRHW_APPLE_SONIC_LC:
- case NUBUS_DRHW_SONNET:
- return MAC8390_NONE;
- break;
- default:
- return MAC8390_APPLE;
- break;
- }
+ case NUBUS_DRSW_3COM:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_APPLE_SONIC_NB:
+ case NUBUS_DRHW_APPLE_SONIC_LC:
+ case NUBUS_DRHW_SONNET:
+ return MAC8390_NONE;
break;
-
- case NUBUS_DRSW_APPLE:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_ASANTE_LC:
- return MAC8390_NONE;
- break;
- case NUBUS_DRHW_CABLETRON:
- return MAC8390_CABLETRON;
- break;
- default:
- return MAC8390_APPLE;
- break;
- }
+ default:
+ return MAC8390_APPLE;
break;
+ }
+ break;
- case NUBUS_DRSW_ASANTE:
- return MAC8390_ASANTE;
+ case NUBUS_DRSW_APPLE:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_ASANTE_LC:
+ return MAC8390_NONE;
break;
-
- case NUBUS_DRSW_TECHWORKS:
- case NUBUS_DRSW_DAYNA2:
- case NUBUS_DRSW_DAYNA_LC:
- if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
- return MAC8390_CABLETRON;
- else
- return MAC8390_APPLE;
+ case NUBUS_DRHW_CABLETRON:
+ return MAC8390_CABLETRON;
break;
-
- case NUBUS_DRSW_FARALLON:
- return MAC8390_FARALLON;
+ default:
+ return MAC8390_APPLE;
break;
+ }
+ break;
- case NUBUS_DRSW_KINETICS:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_INTERLAN:
- return MAC8390_INTERLAN;
- break;
- default:
- return MAC8390_KINETICS;
- break;
- }
- break;
+ case NUBUS_DRSW_ASANTE:
+ return MAC8390_ASANTE;
+ break;
- case NUBUS_DRSW_DAYNA:
- // These correspond to Dayna Sonic cards
- // which use the macsonic driver
- if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
- dev->dr_hw == NUBUS_DRHW_INTERLAN )
- return MAC8390_NONE;
- else
- return MAC8390_DAYNA;
+ case NUBUS_DRSW_TECHWORKS:
+ case NUBUS_DRSW_DAYNA2:
+ case NUBUS_DRSW_DAYNA_LC:
+ if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ return MAC8390_CABLETRON;
+ else
+ return MAC8390_APPLE;
+ break;
+
+ case NUBUS_DRSW_FARALLON:
+ return MAC8390_FARALLON;
+ break;
+
+ case NUBUS_DRSW_KINETICS:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_INTERLAN:
+ return MAC8390_INTERLAN;
+ break;
+ default:
+ return MAC8390_KINETICS;
break;
+ }
+ break;
+
+ case NUBUS_DRSW_DAYNA:
+ /*
+ * These correspond to Dayna Sonic cards
+ * which use the macsonic driver
+ */
+ if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
+ dev->dr_hw == NUBUS_DRHW_INTERLAN)
+ return MAC8390_NONE;
+ else
+ return MAC8390_DAYNA;
+ break;
}
return MAC8390_NONE;
}
@@ -237,14 +246,14 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
unsigned long outdata = 0xA5A0B5B0;
unsigned long indata = 0x00000000;
/* Try writing 32 bits */
- memcpy((char *)membase, (char *)&outdata, 4);
+ memcpy(membase, &outdata, 4);
/* Now compare them */
if (memcmp((char *)&outdata, (char *)membase, 4) == 0)
return ACCESS_32;
/* Write 16 bit output */
- word_memcpy_tocard((char *)membase, (char *)&outdata, 4);
+ word_memcpy_tocard(membase, &outdata, 4);
/* Now read it back */
- word_memcpy_fromcard((char *)&indata, (char *)membase, 4);
+ word_memcpy_fromcard(&indata, membase, 4);
if (outdata == indata)
return ACCESS_16;
return ACCESS_UNKNOWN;
@@ -258,7 +267,7 @@ static int __init mac8390_memsize(unsigned long membase)
local_irq_save(flags);
/* Check up to 32K in 4K increments */
for (i = 0; i < 8; i++) {
- volatile unsigned short *m = (unsigned short *) (membase + (i * 0x1000));
+ volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000));
/* Unwriteable - we have a fully decoded card and the
RAM end located */
@@ -273,28 +282,127 @@ static int __init mac8390_memsize(unsigned long membase)
/* check for partial decode and wrap */
for (j = 0; j < i; j++) {
- volatile unsigned short *p = (unsigned short *) (membase + (j * 0x1000));
+ volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000));
if (*p != (0xA5A0 | j))
break;
- }
- }
+ }
+ }
local_irq_restore(flags);
- /* in any case, we stopped once we tried one block too many,
- or once we reached 32K */
- return i * 0x1000;
+ /*
+ * in any case, we stopped once we tried one block too many,
+ * or once we reached 32K
+ */
+ return i * 0x1000;
+}
+
+static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
+ enum mac8390_type cardtype)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+ int offset;
+ volatile unsigned short *i;
+
+ printk_once(KERN_INFO pr_fmt("%s"), version);
+
+ dev->irq = SLOT2IRQ(ndev->board->slot);
+ /* This is getting to be a habit */
+ dev->base_addr = (ndev->board->slot_addr |
+ ((ndev->board->slot & 0xf) << 20));
+
+ /*
+ * Get some Nubus info - we will trust the card's idea
+ * of where its memory and registers are.
+ */
+
+ if (nubus_get_func_dir(ndev, &dir) == -1) {
+ pr_err("%s: Unable to get Nubus functional directory for slot %X!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+
+ /* Get the MAC address */
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) {
+ pr_info("%s: Couldn't get MAC address!\n", dev->name);
+ return false;
+ }
+
+ nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+
+ if (useresources[cardtype] == 1) {
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS,
+ &ent) == -1) {
+ pr_err("%s: Memory offset resource for slot %X not found!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ dev->mem_start = dev->base_addr + offset;
+ /* yes, this is how the Apple driver does it */
+ dev->base_addr = dev->mem_start + 0x10000;
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH,
+ &ent) == -1) {
+ pr_info("%s: Memory length resource for slot %X not found, probing\n",
+ dev->name, ndev->board->slot);
+ offset = mac8390_memsize(dev->mem_start);
+ } else {
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ }
+ dev->mem_end = dev->mem_start + offset;
+ } else {
+ switch (cardtype) {
+ case MAC8390_KINETICS:
+ case MAC8390_DAYNA: /* it's the same */
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ DAYNA_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ DAYNA_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_INTERLAN:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_CABLETRON:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_MEM);
+ /* The base address is unreadable if 0x00
+ * has been written to the command register
+ * Reset the chip by writing E8390_NODMA +
+ * E8390_PAGE0 + E8390_STOP just to be
+ * sure
+ */
+ i = (void *)dev->base_addr;
+ *i = 0x21;
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+
+ default:
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
+ return false;
+ }
+ }
+
+ return true;
}
struct net_device * __init mac8390_probe(int unit)
{
struct net_device *dev;
- volatile unsigned short *i;
- int version_disp = 0;
- struct nubus_dev * ndev = NULL;
+ struct nubus_dev *ndev = NULL;
int err = -ENODEV;
- struct nubus_dir dir;
- struct nubus_dirent ent;
- int offset;
static unsigned int slots;
enum mac8390_type cardtype;
@@ -311,118 +419,19 @@ struct net_device * __init mac8390_probe(int unit)
if (unit >= 0)
sprintf(dev->name, "eth%d", unit);
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev))) {
+ while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
+ ndev))) {
/* Have we seen it already? */
- if (slots & (1<<ndev->board->slot))
+ if (slots & (1 << ndev->board->slot))
continue;
- slots |= 1<<ndev->board->slot;
+ slots |= 1 << ndev->board->slot;
- if ((cardtype = mac8390_ident(ndev)) == MAC8390_NONE)
+ cardtype = mac8390_ident(ndev);
+ if (cardtype == MAC8390_NONE)
continue;
- if (version_disp == 0) {
- version_disp = 1;
- printk(version);
- }
-
- dev->irq = SLOT2IRQ(ndev->board->slot);
- /* This is getting to be a habit */
- dev->base_addr = ndev->board->slot_addr | ((ndev->board->slot&0xf) << 20);
-
- /* Get some Nubus info - we will trust the card's idea
- of where its memory and registers are. */
-
- if (nubus_get_func_dir(ndev, &dir) == -1) {
- printk(KERN_ERR "%s: Unable to get Nubus functional"
- " directory for slot %X!\n",
- dev->name, ndev->board->slot);
+ if (!mac8390_init(dev, ndev, cardtype))
continue;
- }
-
- /* Get the MAC address */
- if ((nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent)) == -1) {
- printk(KERN_INFO "%s: Couldn't get MAC address!\n",
- dev->name);
- continue;
- } else {
- nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
- }
-
- if (useresources[cardtype] == 1) {
- nubus_rewinddir(&dir);
- if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, &ent) == -1) {
- printk(KERN_ERR "%s: Memory offset resource"
- " for slot %X not found!\n",
- dev->name, ndev->board->slot);
- continue;
- }
- nubus_get_rsrc_mem(&offset, &ent, 4);
- dev->mem_start = dev->base_addr + offset;
- /* yes, this is how the Apple driver does it */
- dev->base_addr = dev->mem_start + 0x10000;
- nubus_rewinddir(&dir);
- if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, &ent) == -1) {
- printk(KERN_INFO "%s: Memory length resource"
- " for slot %X not found"
- ", probing\n",
- dev->name, ndev->board->slot);
- offset = mac8390_memsize(dev->mem_start);
- } else {
- nubus_get_rsrc_mem(&offset, &ent, 4);
- }
- dev->mem_end = dev->mem_start + offset;
- } else {
- switch (cardtype) {
- case MAC8390_KINETICS:
- case MAC8390_DAYNA: /* it's the same */
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- DAYNA_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- DAYNA_8390_MEM);
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
- case MAC8390_INTERLAN:
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- INTERLAN_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- INTERLAN_8390_MEM);
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
- case MAC8390_CABLETRON:
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- CABLETRON_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- CABLETRON_8390_MEM);
- /* The base address is unreadable if 0x00
- * has been written to the command register
- * Reset the chip by writing E8390_NODMA +
- * E8390_PAGE0 + E8390_STOP just to be
- * sure
- */
- i = (void *)dev->base_addr;
- *i = 0x21;
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
-
- default:
- printk(KERN_ERR "Card type %s is"
- " unsupported, sorry\n",
- ndev->board->name);
- continue;
- }
- }
/* Do the nasty 8390 stuff */
if (!mac8390_initdev(dev, ndev, cardtype))
@@ -458,7 +467,7 @@ int init_module(void)
dev_mac890[i] = dev;
}
if (!i) {
- printk(KERN_NOTICE "mac8390.c: No useable cards found, driver NOT installed.\n");
+ pr_notice("No useable cards found, driver NOT installed.\n");
return -ENODEV;
}
return 0;
@@ -493,22 +502,23 @@ static const struct net_device_ops mac8390_netdev_ops = {
#endif
};
-static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
- enum mac8390_type type)
+static int __init mac8390_initdev(struct net_device *dev,
+ struct nubus_dev *ndev,
+ enum mac8390_type type)
{
- static u32 fwrd4_offsets[16]={
+ static u32 fwrd4_offsets[16] = {
0, 4, 8, 12,
16, 20, 24, 28,
32, 36, 40, 44,
48, 52, 56, 60
};
- static u32 back4_offsets[16]={
+ static u32 back4_offsets[16] = {
60, 56, 52, 48,
44, 40, 36, 32,
28, 24, 20, 16,
12, 8, 4, 0
};
- static u32 fwrd2_offsets[16]={
+ static u32 fwrd2_offsets[16] = {
0, 2, 4, 6,
8, 10, 12, 14,
16, 18, 20, 22,
@@ -526,47 +536,47 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
/* Cabletron's TX/RX buffers are backwards */
if (type == MAC8390_CABLETRON) {
- ei_status.tx_start_page = CABLETRON_TX_START_PG;
- ei_status.rx_start_page = CABLETRON_RX_START_PG;
- ei_status.stop_page = CABLETRON_RX_STOP_PG;
- ei_status.rmem_start = dev->mem_start;
- ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
+ ei_status.tx_start_page = CABLETRON_TX_START_PG;
+ ei_status.rx_start_page = CABLETRON_RX_START_PG;
+ ei_status.stop_page = CABLETRON_RX_STOP_PG;
+ ei_status.rmem_start = dev->mem_start;
+ ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
} else {
- ei_status.tx_start_page = WD_START_PG;
- ei_status.rx_start_page = WD_START_PG + TX_PAGES;
- ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
- ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
- ei_status.rmem_end = dev->mem_end;
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
+ ei_status.rmem_end = dev->mem_end;
}
/* Fill in model-specific information and functions */
- switch(type) {
+ switch (type) {
case MAC8390_FARALLON:
case MAC8390_APPLE:
- switch(mac8390_testio(dev->mem_start)) {
- case ACCESS_UNKNOWN:
- printk("Don't know how to access card memory!\n");
- return -ENODEV;
- break;
+ switch (mac8390_testio(dev->mem_start)) {
+ case ACCESS_UNKNOWN:
+ pr_info("Don't know how to access card memory!\n");
+ return -ENODEV;
+ break;
- case ACCESS_16:
- /* 16 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
- ei_status.reg_offset = back4_offsets;
- break;
+ case ACCESS_16:
+ /* 16 bit card, register map is reversed */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &slow_sane_block_input;
+ ei_status.block_output = &slow_sane_block_output;
+ ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ break;
- case ACCESS_32:
- /* 32 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &sane_block_input;
- ei_status.block_output = &sane_block_output;
- ei_status.get_8390_hdr = &sane_get_8390_hdr;
- ei_status.reg_offset = back4_offsets;
- access_bitmode = 1;
- break;
+ case ACCESS_32:
+ /* 32 bit card, register map is reversed */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &sane_block_input;
+ ei_status.block_output = &sane_block_output;
+ ei_status.get_8390_hdr = &sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ access_bitmode = 1;
+ break;
}
break;
@@ -608,24 +618,25 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
ei_status.block_input = &slow_sane_block_input;
ei_status.block_output = &slow_sane_block_output;
ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
- ei_status.reg_offset = fwrd4_offsets;
- break;
+ ei_status.reg_offset = fwrd4_offsets;
+ break;
default:
- printk(KERN_ERR "Card type %s is unsupported, sorry\n", ndev->board->name);
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
return -ENODEV;
}
__NS8390_init(dev, 0);
/* Good, done, now spit out some messages */
- printk(KERN_INFO "%s: %s in slot %X (type %s)\n",
- dev->name, ndev->board->name, ndev->board->slot, cardname[type]);
- printk(KERN_INFO
- "MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
- dev->dev_addr, dev->irq,
- (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
- dev->mem_start, access_bitmode ? 32 : 16);
+ pr_info("%s: %s in slot %X (type %s)\n",
+ dev->name, ndev->board->name, ndev->board->slot,
+ cardname[type]);
+ pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
+ dev->dev_addr, dev->irq,
+ (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
+ dev->mem_start, access_bitmode ? 32 : 16);
return 0;
}
@@ -633,7 +644,7 @@ static int mac8390_open(struct net_device *dev)
{
__ei_open(dev);
if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
- printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
return -EAGAIN;
}
return 0;
@@ -650,72 +661,71 @@ static void mac8390_no_reset(struct net_device *dev)
{
ei_status.txing = 0;
if (ei_debug > 1)
- printk("reset not supported\n");
+ pr_info("reset not supported\n");
return;
}
static void interlan_reset(struct net_device *dev)
{
- unsigned char *target=nubus_slot_addr(IRQ2SLOT(dev->irq));
+ unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
if (ei_debug > 1)
- printk("Need to reset the NS8390 t=%lu...", jiffies);
+ pr_info("Need to reset the NS8390 t=%lu...", jiffies);
ei_status.txing = 0;
target[0xC0000] = 0;
if (ei_debug > 1)
- printk("reset complete\n");
+ pr_cont("reset complete\n");
return;
}
/* dayna_memcpy_fromio/dayna_memcpy_toio */
/* directly from daynaport.c by Alan Cox */
-static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count)
+static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from,
+ int count)
{
volatile unsigned char *ptr;
- unsigned char *target=to;
- from<<=1; /* word, skip overhead */
- ptr=(unsigned char *)(dev->mem_start+from);
+ unsigned char *target = to;
+ from <<= 1; /* word, skip overhead */
+ ptr = (unsigned char *)(dev->mem_start+from);
/* Leading byte? */
- if (from&2) {
+ if (from & 2) {
*target++ = ptr[-1];
ptr += 2;
count--;
}
- while(count>=2)
- {
+ while (count >= 2) {
*(unsigned short *)target = *(unsigned short volatile *)ptr;
ptr += 4; /* skip cruft */
target += 2;
- count-=2;
+ count -= 2;
}
/* Trailing byte? */
- if(count)
+ if (count)
*target = *ptr;
}
-static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count)
+static void dayna_memcpy_tocard(struct net_device *dev, int to,
+ const void *from, int count)
{
volatile unsigned short *ptr;
- const unsigned char *src=from;
- to<<=1; /* word, skip overhead */
- ptr=(unsigned short *)(dev->mem_start+to);
+ const unsigned char *src = from;
+ to <<= 1; /* word, skip overhead */
+ ptr = (unsigned short *)(dev->mem_start+to);
/* Leading byte? */
- if (to&2) { /* avoid a byte write (stomps on other data) */
+ if (to & 2) { /* avoid a byte write (stomps on other data) */
ptr[-1] = (ptr[-1]&0xFF00)|*src++;
ptr++;
count--;
}
- while(count>=2)
- {
- *ptr++=*(unsigned short *)src; /* Copy and */
+ while (count >= 2) {
+ *ptr++ = *(unsigned short *)src; /* Copy and */
ptr++; /* skip cruft */
src += 2;
- count-=2;
+ count -= 2;
}
/* Trailing byte? */
- if(count)
- {
+ if (count) {
/* card doesn't like byte writes */
- *ptr=(*ptr&0x00FF)|(*src << 8);
+ *ptr = (*ptr & 0x00FF) | (*src << 8);
}
}
@@ -738,11 +748,14 @@ static void sane_block_input(struct net_device *dev, int count,
if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, semi_count);
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ semi_count);
count -= semi_count;
- memcpy_toio(skb->data + semi_count, (char *)ei_status.rmem_start, count);
+ memcpy_toio(skb->data + semi_count,
+ (char *)ei_status.rmem_start, count);
} else {
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, count);
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ count);
}
}
@@ -755,16 +768,18 @@ static void sane_block_output(struct net_device *dev, int count,
}
/* dayna block input/output */
-static void dayna_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+static void dayna_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- dayna_memcpy_fromcard(dev, (void *)hdr, hdr_start, 4);
+ dayna_memcpy_fromcard(dev, hdr, hdr_start, 4);
/* Fix endianness */
- hdr->count=(hdr->count&0xFF)<<8|(hdr->count>>8);
+ hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8);
}
-static void dayna_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+static void dayna_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
{
unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
unsigned long xfer_start = xfer_base+dev->mem_start;
@@ -772,8 +787,7 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
/* Note the offset math is done in card memory space which is word
per long onto our space. */
- if (xfer_start + count > ei_status.rmem_end)
- {
+ if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count);
@@ -781,15 +795,14 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
dayna_memcpy_fromcard(dev, skb->data + semi_count,
ei_status.rmem_start - dev->mem_start,
count);
- }
- else
- {
+ } else {
dayna_memcpy_fromcard(dev, skb->data, xfer_base, count);
}
}
-static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
+static void dayna_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ int start_page)
{
long shmem = (start_page - WD_START_PG)<<8;
@@ -797,40 +810,39 @@ static void dayna_block_output(struct net_device *dev, int count, const unsigned
}
/* Cabletron block I/O */
-static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page)
+static void slow_sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- word_memcpy_fromcard((void *)hdr, (char *)dev->mem_start+hdr_start, 4);
+ word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4);
/* Register endianism - fix here rather than 8390.c */
hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
}
-static void slow_sane_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
+static void slow_sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
{
unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
unsigned long xfer_start = xfer_base+dev->mem_start;
- if (xfer_start + count > ei_status.rmem_end)
- {
+ if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
- xfer_base, semi_count);
+ word_memcpy_fromcard(skb->data,
+ (char *)dev->mem_start + xfer_base,
+ semi_count);
count -= semi_count;
word_memcpy_fromcard(skb->data + semi_count,
(char *)ei_status.rmem_start, count);
- }
- else
- {
- word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
- xfer_base, count);
+ } else {
+ word_memcpy_fromcard(skb->data,
+ (char *)dev->mem_start + xfer_base, count);
}
}
-static void slow_sane_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
+static void slow_sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
{
long shmem = (start_page - WD_START_PG)<<8;
@@ -843,10 +855,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
const unsigned short *from = fp;
count++;
- count/=2;
+ count /= 2;
- while(count--)
- *to++=*from++;
+ while (count--)
+ *to++ = *from++;
}
static void word_memcpy_fromcard(void *tp, const void *fp, int count)
@@ -855,10 +867,10 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count)
const volatile unsigned short *from = fp;
count++;
- count/=2;
+ count /= 2;
- while(count--)
- *to++=*from++;
+ while (count--)
+ *to++ = *from++;
}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 291a505fd4fc..8f6e816a7395 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1174,7 +1174,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_port:
- for (port = 1; port <= dev->caps.num_ports; port++)
+ for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
mlx4_cleanup_mcg_table(dev);
@@ -1271,7 +1271,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
return __mlx4_init_one(pdev, NULL);
}
-static struct pci_device_id mlx4_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 1405a170bb43..af67af55efe7 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -656,6 +656,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
struct sk_buff *skb;
int rx;
struct rx_desc *rx_desc;
+ int size;
skb = __skb_dequeue(&mp->rx_recycle);
if (skb == NULL)
@@ -678,10 +679,11 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
rx_desc = rxq->rx_desc_area + rx;
+ size = skb->end - skb->data;
rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent,
- skb->data, mp->skb_size,
+ skb->data, size,
DMA_FROM_DEVICE);
- rx_desc->buf_size = mp->skb_size;
+ rx_desc->buf_size = size;
rxq->rx_skb[rx] = skb;
wmb();
rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3fcb1c356e0d..c0884a9cba3c 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -4085,7 +4085,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
-static struct pci_device_id myri10ge_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
{PCI_DEVICE
(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index b3513ad3b703..8b4313085359 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -716,10 +716,10 @@ static int myri_header(struct sk_buff *skb, struct net_device *dev,
pad[0] = MYRI_PAD_LEN;
pad[1] = 0xab;
- /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length
- * in here instead. It is up to the 802.2 layer to carry protocol information.
+ /* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the
+ * length in here instead.
*/
- if (type != ETH_P_802_3)
+ if (type != ETH_P_802_3 && type != ETH_P_802_2)
eth->h_proto = htons(type);
else
eth->h_proto = htons(len);
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 797fe164ce27..2d7b3bbfed01 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -247,7 +247,7 @@ static struct {
{ "NatSemi DP8381[56]", 0, 24 },
};
-static struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(natsemi_pci_tbl) = {
{ PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
{ PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ } /* terminate list */
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 3fcebb70151c..85aec4f10131 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -136,7 +136,7 @@ static struct {
};
-static struct pci_device_id ne2k_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = {
{ 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 },
{ 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 },
{ 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 },
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 76cd1f3e9fc8..8264ae0cbf1d 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 65
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.65"
+#define _NETXEN_NIC_LINUX_SUBVERSION 72
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.72"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
@@ -420,7 +420,7 @@ struct status_desc {
} __attribute__ ((aligned(16)));
/* UNIFIED ROMIMAGE *************************/
-#define NX_UNI_FW_MIN_SIZE 0x3eb000
+#define NX_UNI_FW_MIN_SIZE 0xc8000
#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
#define NX_UNI_DIR_SECT_BOOTLD 0x6
#define NX_UNI_DIR_SECT_FW 0x7
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index ddd704ae0188..542f408333ff 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -66,7 +66,7 @@ static const char netxen_nic_gstrings_test[][ETH_GSTRING_LEN] = {
#define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test)
-#define NETXEN_NIC_REGS_COUNT 42
+#define NETXEN_NIC_REGS_COUNT 30
#define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32))
#define NETXEN_MAX_EEPROM_LEN 1024
@@ -312,150 +312,91 @@ static int netxen_nic_get_regs_len(struct net_device *dev)
return NETXEN_NIC_REGS_LEN;
}
-struct netxen_niu_regs {
- __u32 reg[NETXEN_NIC_REGS_COUNT];
-};
-
-static struct netxen_niu_regs niu_registers[] = {
- {
- /* GB Mode */
- {
- NETXEN_NIU_GB_SERDES_RESET,
- NETXEN_NIU_GB0_MII_MODE,
- NETXEN_NIU_GB1_MII_MODE,
- NETXEN_NIU_GB2_MII_MODE,
- NETXEN_NIU_GB3_MII_MODE,
- NETXEN_NIU_GB0_GMII_MODE,
- NETXEN_NIU_GB1_GMII_MODE,
- NETXEN_NIU_GB2_GMII_MODE,
- NETXEN_NIU_GB3_GMII_MODE,
- NETXEN_NIU_REMOTE_LOOPBACK,
- NETXEN_NIU_GB0_HALF_DUPLEX,
- NETXEN_NIU_GB1_HALF_DUPLEX,
- NETXEN_NIU_RESET_SYS_FIFOS,
- NETXEN_NIU_GB_CRC_DROP,
- NETXEN_NIU_GB_DROP_WRONGADDR,
- NETXEN_NIU_TEST_MUX_CTL,
-
- NETXEN_NIU_GB_MAC_CONFIG_0(0),
- NETXEN_NIU_GB_MAC_CONFIG_1(0),
- NETXEN_NIU_GB_HALF_DUPLEX_CTRL(0),
- NETXEN_NIU_GB_MAX_FRAME_SIZE(0),
- NETXEN_NIU_GB_TEST_REG(0),
- NETXEN_NIU_GB_MII_MGMT_CONFIG(0),
- NETXEN_NIU_GB_MII_MGMT_COMMAND(0),
- NETXEN_NIU_GB_MII_MGMT_ADDR(0),
- NETXEN_NIU_GB_MII_MGMT_CTRL(0),
- NETXEN_NIU_GB_MII_MGMT_STATUS(0),
- NETXEN_NIU_GB_MII_MGMT_INDICATE(0),
- NETXEN_NIU_GB_INTERFACE_CTRL(0),
- NETXEN_NIU_GB_INTERFACE_STATUS(0),
- NETXEN_NIU_GB_STATION_ADDR_0(0),
- NETXEN_NIU_GB_STATION_ADDR_1(0),
- -1,
- }
- },
- {
- /* XG Mode */
- {
- NETXEN_NIU_XG_SINGLE_TERM,
- NETXEN_NIU_XG_DRIVE_HI,
- NETXEN_NIU_XG_DRIVE_LO,
- NETXEN_NIU_XG_DTX,
- NETXEN_NIU_XG_DEQ,
- NETXEN_NIU_XG_WORD_ALIGN,
- NETXEN_NIU_XG_RESET,
- NETXEN_NIU_XG_POWER_DOWN,
- NETXEN_NIU_XG_RESET_PLL,
- NETXEN_NIU_XG_SERDES_LOOPBACK,
- NETXEN_NIU_XG_DO_BYTE_ALIGN,
- NETXEN_NIU_XG_TX_ENABLE,
- NETXEN_NIU_XG_RX_ENABLE,
- NETXEN_NIU_XG_STATUS,
- NETXEN_NIU_XG_PAUSE_THRESHOLD,
- NETXEN_NIU_XGE_CONFIG_0,
- NETXEN_NIU_XGE_CONFIG_1,
- NETXEN_NIU_XGE_IPG,
- NETXEN_NIU_XGE_STATION_ADDR_0_HI,
- NETXEN_NIU_XGE_STATION_ADDR_0_1,
- NETXEN_NIU_XGE_STATION_ADDR_1_LO,
- NETXEN_NIU_XGE_STATUS,
- NETXEN_NIU_XGE_MAX_FRAME_SIZE,
- NETXEN_NIU_XGE_PAUSE_FRAME_VALUE,
- NETXEN_NIU_XGE_TX_BYTE_CNT,
- NETXEN_NIU_XGE_TX_FRAME_CNT,
- NETXEN_NIU_XGE_RX_BYTE_CNT,
- NETXEN_NIU_XGE_RX_FRAME_CNT,
- NETXEN_NIU_XGE_AGGR_ERROR_CNT,
- NETXEN_NIU_XGE_MULTICAST_FRAME_CNT,
- NETXEN_NIU_XGE_UNICAST_FRAME_CNT,
- NETXEN_NIU_XGE_CRC_ERROR_CNT,
- NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR,
- NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR,
- NETXEN_NIU_XGE_LOCAL_ERROR_CNT,
- NETXEN_NIU_XGE_REMOTE_ERROR_CNT,
- NETXEN_NIU_XGE_CONTROL_CHAR_CNT,
- NETXEN_NIU_XGE_PAUSE_FRAME_CNT,
- -1,
- }
- }
-};
-
static void
netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- __u32 mode, *regs_buff = p;
- int i, window;
+ struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct nx_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+ int port = adapter->physical_port;
memset(p, 0, NETXEN_NIC_REGS_LEN);
+
regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
(adapter->pdev)->device;
- /* which mode */
- regs_buff[0] = NXRD32(adapter, NETXEN_NIU_MODE);
- mode = regs_buff[0];
-
- /* Common registers to all the modes */
- regs_buff[2] = NXRD32(adapter, NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER);
- /* GB/XGB Mode */
- mode = (mode / 2) - 1;
- window = 0;
- if (mode <= 1) {
- for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) {
- /* GB: port specific registers */
- if (mode == 0 && i >= 19)
- window = adapter->physical_port *
- NETXEN_NIC_PORT_WINDOW;
-
- regs_buff[i] = NXRD32(adapter,
- niu_registers[mode].reg[i - 3] + window);
- }
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE);
+ regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
+ regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+ regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2);
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c);
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c);
+
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+
+ regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c);
+ i += 2;
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3);
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+
+ } else {
+ i++;
+
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_0+(0x10000*port));
+ regs_buff[i++] = NXRD32(adapter,
+ NETXEN_NIU_XGE_CONFIG_1+(0x10000*port));
+
+ regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE);
+ regs_buff[i++] = NXRDIO(adapter,
+ adapter->tx_ring->crb_cmd_consumer);
+ }
+
+ regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = NXRDIO(adapter,
+ recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = NXRDIO(adapter,
+ sds_ring->crb_sts_consumer);
}
}
static u32 netxen_nic_test_link(struct net_device *dev)
{
struct netxen_adapter *adapter = netdev_priv(dev);
- __u32 status;
- int val;
+ u32 val, port;
- /* read which mode */
- if (adapter->ahw.port_type == NETXEN_NIC_GBE) {
- if (adapter->phy_read &&
- adapter->phy_read(adapter,
- NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
- &status) != 0)
- return -EIO;
- else {
- val = netxen_get_phy_link(status);
- return !val;
- }
- } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
+ port = adapter->physical_port;
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ val = NXRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3) ? 0 : 1;
+ } else {
val = NXRD32(adapter, CRB_XG_STATE);
+ val = (val >> port*8) & 0xff;
return (val == XG_LINK_UP) ? 0 : 1;
}
- return -EIO;
}
static int
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index d138fc22927a..638369024908 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -969,7 +969,8 @@ enum {
#define NX_DEV_READY 3
#define NX_DEV_NEED_RESET 4
#define NX_DEV_NEED_QUISCENT 5
-#define NX_DEV_FAILED 6
+#define NX_DEV_NEED_AER 6
+#define NX_DEV_FAILED 7
#define NX_RCODE_DRIVER_INFO 0x20000000
#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 2e364fee3cbb..85e28e60ecf1 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -345,8 +345,7 @@ netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
void
netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem)
{
- int val;
- val = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+ NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
}
int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
@@ -691,6 +690,9 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
struct list_head *head;
nx_mac_list_t *cur;
+ if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
+ return;
+
list_splice_tail_init(&adapter->mac_list, &del_list);
nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 02f8d4b4db63..333bd325f58e 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -184,6 +184,8 @@ skip_rds:
tx_ring = adapter->tx_ring;
vfree(tx_ring->cmd_buf_arr);
+ kfree(tx_ring);
+ adapter->tx_ring = NULL;
}
int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
@@ -778,11 +780,14 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 1;
+ if (adapter->need_fw_reset)
+ return 1;
+
/* last attempt had failed */
if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
return 1;
- old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
+ old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER);
for (i = 0; i < 10; i++) {
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 6cae26a5bd67..def4a07357d6 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -35,6 +35,7 @@
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/sysfs.h>
+#include <linux/aer.h>
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -84,6 +85,7 @@ static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_create_diag_entries(struct netxen_adapter *adapter);
static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
+static int nx_dev_request_aer(struct netxen_adapter *adapter);
static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
static int netxen_can_start_firmware(struct netxen_adapter *adapter);
@@ -98,7 +100,7 @@ static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
{PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
-static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
@@ -340,7 +342,7 @@ netxen_check_hw_init(struct netxen_adapter *adapter, int first_boot)
if (!(first_boot & 0x4)) {
first_boot |= 0x4;
NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot);
- first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4));
+ NXRD32(adapter, NETXEN_PCIE_REG(0x4));
}
/* This is the first boot after power up */
@@ -1262,6 +1264,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
goto err_out_disable_pdev;
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_enable_pcie_error_reporting(pdev);
+
pci_set_master(pdev);
netdev = alloc_etherdev(sizeof(struct netxen_adapter));
@@ -1409,17 +1414,19 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
netxen_release_firmware(adapter);
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_disable_pcie_error_reporting(pdev);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
-static int __netxen_nic_shutdown(struct pci_dev *pdev)
+
+static void netxen_nic_detach_func(struct netxen_adapter *adapter)
{
- struct netxen_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- int retval;
netif_device_detach(netdev);
@@ -1438,53 +1445,22 @@ static int __netxen_nic_shutdown(struct pci_dev *pdev)
nx_decr_dev_ref_cnt(adapter);
clear_bit(__NX_RESETTING, &adapter->state);
-
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- if (netxen_nic_wol_supported(adapter)) {
- pci_enable_wake(pdev, PCI_D3cold, 1);
- pci_enable_wake(pdev, PCI_D3hot, 1);
- }
-
- pci_disable_device(pdev);
-
- return 0;
}
-static void netxen_nic_shutdown(struct pci_dev *pdev)
-{
- if (__netxen_nic_shutdown(pdev))
- return;
-}
-#ifdef CONFIG_PM
-static int
-netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- int retval;
-
- retval = __netxen_nic_shutdown(pdev);
- if (retval)
- return retval;
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return 0;
-}
-
-static int
-netxen_nic_resume(struct pci_dev *pdev)
+static int netxen_nic_attach_func(struct pci_dev *pdev)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
int err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
err = pci_enable_device(pdev);
if (err)
return err;
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
adapter->ahw.crb_win = -1;
adapter->ahw.ocm_win = -1;
@@ -1503,11 +1479,10 @@ netxen_nic_resume(struct pci_dev *pdev)
if (err)
goto err_out_detach;
- netif_device_attach(netdev);
-
netxen_config_indev_addr(netdev, NETDEV_UP);
}
+ netif_device_attach(netdev);
netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
return 0;
@@ -1517,6 +1492,85 @@ err_out:
nx_decr_dev_ref_cnt(adapter);
return err;
}
+
+static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (nx_dev_request_aer(adapter))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ netxen_nic_detach_func(adapter);
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
+{
+ int err = 0;
+
+ err = netxen_nic_attach_func(pdev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void netxen_io_resume(struct pci_dev *pdev)
+{
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+static void netxen_nic_shutdown(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ netxen_nic_detach_func(adapter);
+
+ if (pci_save_state(pdev))
+ return;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+ int retval;
+
+ netxen_nic_detach_func(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int
+netxen_nic_resume(struct pci_dev *pdev)
+{
+ return netxen_nic_attach_func(pdev);
+}
#endif
static int netxen_nic_open(struct net_device *netdev)
@@ -1898,12 +1952,8 @@ static void netxen_nic_handle_phy_intr(struct netxen_adapter *adapter)
linkup = (val == XG_LINK_UP_P3);
} else {
val = NXRD32(adapter, CRB_XG_STATE);
- if (adapter->ahw.port_type == NETXEN_NIC_GBE)
- linkup = (val >> port) & 1;
- else {
- val = (val >> port*8) & 0xff;
- linkup = (val == XG_LINK_UP);
- }
+ val = (val >> port*8) & 0xff;
+ linkup = (val == XG_LINK_UP);
}
netxen_advert_link_change(adapter, linkup);
@@ -2108,20 +2158,49 @@ nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
return count;
}
-static void
+static int
+nx_dev_request_aer(struct netxen_adapter *adapter)
+{
+ u32 state;
+ int ret = -EINVAL;
+
+ if (netxen_api_lock(adapter))
+ return ret;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (state == NX_DEV_NEED_AER)
+ ret = 0;
+ else if (state == NX_DEV_READY) {
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
+ ret = 0;
+ }
+
+ netxen_api_unlock(adapter);
+ return ret;
+}
+
+static int
nx_dev_request_reset(struct netxen_adapter *adapter)
{
u32 state;
+ int ret = -EINVAL;
if (netxen_api_lock(adapter))
- return;
+ return ret;
state = NXRD32(adapter, NX_CRB_DEV_STATE);
- if (state != NX_DEV_INITALIZING)
+ if (state == NX_DEV_NEED_RESET)
+ ret = 0;
+ else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
+ ret = 0;
+ }
netxen_api_unlock(adapter);
+
+ return ret;
}
static int
@@ -2273,17 +2352,29 @@ netxen_check_health(struct netxen_adapter *adapter)
u32 state, heartbit;
struct net_device *netdev = adapter->netdev;
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+ if (state == NX_DEV_NEED_AER)
+ return 0;
+
if (netxen_nic_check_temp(adapter))
goto detach;
if (adapter->need_fw_reset) {
- nx_dev_request_reset(adapter);
+ if (nx_dev_request_reset(adapter))
+ return 0;
goto detach;
}
- state = NXRD32(adapter, NX_CRB_DEV_STATE);
- if (state == NX_DEV_NEED_RESET)
- goto detach;
+ /* NX_DEV_NEED_RESET, this state can be marked in two cases
+ * 1. Tx timeout 2. Fw hang
+ * Send request to destroy context in case of tx timeout only
+ * and doesn't required in case of Fw hang
+ */
+ if (state == NX_DEV_NEED_RESET) {
+ adapter->need_fw_reset = 1;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ goto detach;
+ }
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
@@ -2292,12 +2383,17 @@ netxen_check_health(struct netxen_adapter *adapter)
if (heartbit != adapter->heartbit) {
adapter->heartbit = heartbit;
adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
return 0;
}
if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
return 0;
+ if (nx_dev_request_reset(adapter))
+ return 0;
+
clear_bit(__NX_FW_ATTACHED, &adapter->state);
dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2727,6 +2823,12 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
+static struct pci_error_handlers netxen_err_handler = {
+ .error_detected = netxen_io_error_detected,
+ .slot_reset = netxen_io_slot_reset,
+ .resume = netxen_io_resume,
+};
+
static struct pci_driver netxen_driver = {
.name = netxen_nic_driver_name,
.id_table = netxen_pci_tbl,
@@ -2736,7 +2838,8 @@ static struct pci_driver netxen_driver = {
.suspend = netxen_nic_suspend,
.resume = netxen_nic_resume,
#endif
- .shutdown = netxen_nic_shutdown
+ .shutdown = netxen_nic_shutdown,
+ .err_handler = &netxen_err_handler
};
static int __init netxen_init_module(void)
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 8ce58c4c7dd3..0e260cfbff7b 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -58,7 +58,7 @@ static void writeq(u64 val, void __iomem *reg)
}
#endif
-static struct pci_device_id niu_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
{}
};
@@ -2844,7 +2844,7 @@ static int tcam_wait_bit(struct niu *np, u64 bit)
break;
udelay(1);
}
- if (limit < 0)
+ if (limit <= 0)
return -ENODEV;
return 0;
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 1f6327d41536..a3b6aa0f375d 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -2292,7 +2292,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
pci_set_drvdata(pci_dev, NULL);
}
-static struct pci_device_id ns83820_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
{ 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, },
{ 0, },
};
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 050538bf155a..6fd8789ef487 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -1119,11 +1119,8 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
if (p->port >= octeon_bootinfo->mac_addr_count)
dev_err(&pdev->dev,
- "Error %s: Using MAC outside of the assigned range: "
- "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name,
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ "Error %s: Using MAC outside of the assigned range: %pM\n",
+ netdev->name, netdev->dev_addr);
if (register_netdev(netdev))
goto err;
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 1673eb045e1e..d44d4a208bbf 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1875,7 +1875,7 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
-static struct pci_device_id pasemi_mac_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
{ },
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 480af402affd..20273832bfce 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -211,7 +211,7 @@ static struct {
};
-static struct pci_device_id netdrv_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(netdrv_pci_tbl) = {
{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d431b59e7d11..2ee57bd52a01 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1065,14 +1065,11 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&ei_local->page_lock, flags);
outb_p(0x00, e8390_base + EN0_IMR);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
/*
* Slow phase with lock held.
*/
- spin_lock_irqsave(&ei_local->page_lock, flags);
-
ei_local->irqlock = 1;
send_length = max(length, ETH_ZLEN);
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 8a5ae3b182ed..12e3233868e9 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1402,7 +1402,6 @@ static void BuildLAF(int *ladrf, int *adr)
for (i = 0; i < 8; i++)
printk(KERN_CONT " %02X", ladrf[i]);
printk(KERN_CONT "\n");
- }
#endif
} /* BuildLAF */
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 92ed3fbf89a5..776cad2f5715 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1741,7 +1741,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
- PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"),
@@ -1754,7 +1754,7 @@ MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
MODULE_FIRMWARE("cis/PCMLM28.cis");
MODULE_FIRMWARE("cis/DP83903.cis");
MODULE_FIRMWARE("cis/LA-PCM.cis");
-MODULE_FIRMWARE("PE520.cis");
+MODULE_FIRMWARE("cis/PE520.cis");
MODULE_FIRMWARE("cis/NE2K.cis");
MODULE_FIRMWARE("cis/PE-200.cis");
MODULE_FIRMWARE("cis/tamarack.cis");
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 6dd486d2977b..aa57cfd1e3fb 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -453,8 +453,7 @@ static int mhz_mfc_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->io.IOAddrLines = 16;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
@@ -652,8 +651,7 @@ static int osi_config(struct pcmcia_device *link)
link->conf.Attributes |= CONF_ENABLE_SPKR;
link->conf.Status = CCSR_AUDIO_ENA;
- link->irq.Attributes =
- IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
+ link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
link->io.NumPorts1 = 64;
link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
link->io.NumPorts2 = 8;
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index e154677ff706..0dc7ff896eeb 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -59,7 +59,7 @@ static const char *const version =
/*
* PCI device identifiers for "new style" Linux PCI Device Drivers
*/
-static struct pci_device_id pcnet32_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index c13cf64095b6..33c4b12a63ba 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -331,8 +331,8 @@ static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
bool clk125en = true;
/* Abort if we are using an untested phy. */
- if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 ||
- BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 ||
+ if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 &&
+ BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 &&
BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
return;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index bd4e8d72dc08..e17b70291bbc 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -264,6 +264,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
(phydev->phy_id & phydrv->phy_id_mask));
}
+#ifdef CONFIG_PM
+
static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
{
struct device_driver *drv = phydev->dev.driver;
@@ -295,34 +297,88 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
return true;
}
-/* Suspend and resume. Copied from platform_suspend and
- * platform_resume
- */
-static int mdio_bus_suspend(struct device * dev, pm_message_t state)
+static int mdio_bus_suspend(struct device *dev)
{
struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
+ /*
+ * We must stop the state machine manually, otherwise it stops out of
+ * control, possibly with the phydev->lock held. Upon resume, netdev
+ * may call phy routines that try to grab the same lock, and that may
+ * lead to a deadlock.
+ */
+ if (phydev->attached_dev)
+ phy_stop_machine(phydev);
+
if (!mdio_bus_phy_may_suspend(phydev))
return 0;
+
return phydrv->suspend(phydev);
}
-static int mdio_bus_resume(struct device * dev)
+static int mdio_bus_resume(struct device *dev)
{
struct phy_driver *phydrv = to_phy_driver(dev->driver);
struct phy_device *phydev = to_phy_device(dev);
+ int ret;
if (!mdio_bus_phy_may_suspend(phydev))
+ goto no_resume;
+
+ ret = phydrv->resume(phydev);
+ if (ret < 0)
+ return ret;
+
+no_resume:
+ if (phydev->attached_dev)
+ phy_start_machine(phydev, NULL);
+
+ return 0;
+}
+
+static int mdio_bus_restore(struct device *dev)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+ struct net_device *netdev = phydev->attached_dev;
+ int ret;
+
+ if (!netdev)
return 0;
- return phydrv->resume(phydev);
+
+ ret = phy_init_hw(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* The PHY needs to renegotiate. */
+ phydev->link = 0;
+ phydev->state = PHY_UP;
+
+ phy_start_machine(phydev, NULL);
+
+ return 0;
}
+static struct dev_pm_ops mdio_bus_pm_ops = {
+ .suspend = mdio_bus_suspend,
+ .resume = mdio_bus_resume,
+ .freeze = mdio_bus_suspend,
+ .thaw = mdio_bus_resume,
+ .restore = mdio_bus_restore,
+};
+
+#define MDIO_BUS_PM_OPS (&mdio_bus_pm_ops)
+
+#else
+
+#define MDIO_BUS_PM_OPS NULL
+
+#endif /* CONFIG_PM */
+
struct bus_type mdio_bus_type = {
.name = "mdio_bus",
.match = mdio_bus_match,
- .suspend = mdio_bus_suspend,
- .resume = mdio_bus_resume,
+ .pm = MDIO_BUS_PM_OPS,
};
EXPORT_SYMBOL(mdio_bus_type);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b10fedd82143..8212b2b93422 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -378,6 +378,20 @@ void phy_disconnect(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_disconnect);
+int phy_init_hw(struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->drv || !phydev->drv->config_init)
+ return 0;
+
+ ret = phy_scan_fixups(phydev);
+ if (ret < 0)
+ return ret;
+
+ return phydev->drv->config_init(phydev);
+}
+
/**
* phy_attach_direct - attach a network device to a given PHY device pointer
* @dev: network device to attach
@@ -425,21 +439,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
/* Do initial configuration here, now that
* we have certain key parameters
* (dev_flags and interface) */
- if (phydev->drv->config_init) {
- int err;
-
- err = phy_scan_fixups(phydev);
-
- if (err < 0)
- return err;
-
- err = phydev->drv->config_init(phydev);
-
- if (err < 0)
- return err;
- }
-
- return 0;
+ return phy_init_hw(phydev);
}
EXPORT_SYMBOL(phy_attach_direct);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 5123bb954dd7..ed2644a57500 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -25,6 +25,7 @@
#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
#define MII_LAN83C185_IM 30 /* Interrupt Mask */
+#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
@@ -37,8 +38,10 @@
#define MII_LAN83C185_ISF_INT_ALL (0x0e)
#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
- (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
+ (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
+ MII_LAN83C185_ISF_INT7)
+#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
static int smsc_phy_config_intr(struct phy_device *phydev)
{
@@ -59,9 +62,23 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_init(struct phy_device *phydev)
{
+ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+
return smsc_phy_ack_interrupt (phydev);
}
+static int lan911x_config_init(struct phy_device *phydev)
+{
+ return smsc_phy_ack_interrupt(phydev);
+}
static struct phy_driver lan83c185_driver = {
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -147,7 +164,7 @@ static struct phy_driver lan911x_int_driver = {
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
- .config_init = smsc_phy_config_init,
+ .config_init = lan911x_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index dd35066a7f8d..f922294fd349 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -61,7 +61,7 @@ static int msi;
module_param(msi, int, 0);
MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
-static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
/* required last entry */
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 862c1aaf3860..ee0e2bd4842f 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -54,12 +54,8 @@
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
-#define SMALL_BUFFER_SIZE 512
-#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
#define LARGE_BUFFER_MAX_SIZE 8192
#define LARGE_BUFFER_MIN_SIZE 2048
-#define MAX_SPLIT_SIZE 1023
-#define QLGE_SB_PAD 32
#define MAX_CQ 128
#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
@@ -737,6 +733,21 @@ enum {
PRB_MX_DATA = 0xfc, /* Use semaphore */
};
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define SMALL_BUFFER_SIZE 256
+#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
+#define SPLT_SETTING FSC_DBRST_1024
+#define SPLT_LEN 0
+#define QLGE_SB_PAD 0
+#else
+#define SMALL_BUFFER_SIZE 512
+#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
+#define SPLT_SETTING FSC_SH
+#define SPLT_LEN (SPLT_HDR_EP | \
+ min(SMALL_BUF_MAP_SIZE, 1023))
+#define QLGE_SB_PAD 32
+#endif
+
/*
* CAM output format.
*/
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 707b391afa02..167a3dab2f18 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -73,7 +73,7 @@ static int qlge_irq_type = MSIX_IRQ;
module_param(qlge_irq_type, int, MSIX_IRQ);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
-static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
/* required last entry */
@@ -452,9 +452,7 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
if (set) {
addr = &qdev->ndev->dev_addr[0];
QPRINTK(qdev, IFUP, DEBUG,
- "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
- addr[0], addr[1], addr[2], addr[3],
- addr[4], addr[5]);
+ "Set Mac addr %pM\n", addr);
} else {
memset(zero_mac_addr, 0, ETH_ALEN);
addr = &zero_mac_addr[0];
@@ -1433,6 +1431,254 @@ map_error:
return NETDEV_TX_BUSY;
}
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct sk_buff *skb;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct skb_frag_struct *rx_frag;
+ int nr_frags;
+ struct napi_struct *napi = &rx_ring->napi;
+
+ napi->dev = qdev->ndev;
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, exiting.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+ prefetch(lbq_desc->p.pg_chunk.va);
+ rx_frag = skb_shinfo(skb)->frags;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ rx_frag += nr_frags;
+ rx_frag->page = lbq_desc->p.pg_chunk.page;
+ rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
+ rx_frag->size = length;
+
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ skb_shinfo(skb)->nr_frags++;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += length;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
+ else
+ napi_gro_frags(napi);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ void *addr;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct napi_struct *napi = &rx_ring->napi;
+
+ skb = netdev_alloc_skb(ndev, length);
+ if (!skb) {
+ QPRINTK(qdev, DRV, ERR, "Couldn't get an skb, "
+ "need to unwind!.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+
+ addr = lbq_desc->p.pg_chunk.va;
+ prefetch(addr);
+
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
+ ib_mac_rsp->flags2);
+ rx_ring->rx_errors++;
+ goto err_out;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ QPRINTK(qdev, DRV, ERR, "Segment too small, dropping.\n");
+ rx_ring->rx_dropped++;
+ goto err_out;
+ }
+ memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "%d bytes of headers and data in large. Chain "
+ "page to new skb and pull tail.\n", length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset+ETH_HLEN,
+ length-ETH_HLEN);
+ skb->len += length-ETH_HLEN;
+ skb->data_len += length-ETH_HLEN;
+ skb->truesize += length-ETH_HLEN;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (qdev->rx_csum &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
+ else
+ napi_gro_receive(napi, skb);
+ } else {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+ else
+ netif_receive_skb(skb);
+ }
+ return;
+err_out:
+ dev_kfree_skb_any(skb);
+ put_page(lbq_desc->p.pg_chunk.page);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *new_skb = NULL;
+ struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
+
+ skb = sbq_desc->p.skb;
+ /* Allocate new_skb and copy */
+ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
+ if (new_skb == NULL) {
+ QPRINTK(qdev, PROBE, ERR,
+ "No skb available, drop the packet.\n");
+ rx_ring->rx_dropped++;
+ return;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ memcpy(skb_put(new_skb, length), skb->data, length);
+ skb = new_skb;
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
+ ib_mac_rsp->flags2);
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_errors++;
+ return;
+ }
+
+ /* loopback self test for ethtool */
+ if (test_bit(QL_SELFTEST, &qdev->flags)) {
+ ql_check_lb_frame(qdev, skb);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_dropped++;
+ return;
+ }
+
+ prefetch(skb->data);
+ skb->dev = ndev;
+ if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+ QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ }
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
+ QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* If rx checksum is on, and there are no
+ * csum or frame errors.
+ */
+ if (qdev->rx_csum &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ QPRINTK(qdev, RX_STATUS, DEBUG,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
+ vlan_id, skb);
+ else
+ napi_gro_receive(&rx_ring->napi, skb);
+ } else {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+ else
+ netif_receive_skb(skb);
+ }
+}
+
static void ql_realign_skb(struct sk_buff *skb, int len)
{
void *temp_addr = skb->data;
@@ -1646,14 +1892,13 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
+static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
- u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
- IB_MAC_IOCB_RSP_VLAN_MASK)
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
@@ -1753,6 +1998,65 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
}
}
+/* Process an inbound completion from an rx ring. */
+static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+ u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ ((le16_to_cpu(ib_mac_rsp->vlan_id) &
+ IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
+
+ QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+ /* The data and headers are split into
+ * separate buffers.
+ */
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+ /* The data fit in a single small buffer.
+ * Allocate a new skb, copy the data and
+ * return the buffer to the free pool.
+ */
+ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
+ /* TCP packet in a page chunk that's been checksummed.
+ * Tack it on to our GRO skb and let it go.
+ */
+ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+ /* Non-TCP packet in a page chunk. Allocate an
+ * skb, tack it on frags, and send it up.
+ */
+ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else {
+ struct bq_desc *lbq_desc;
+
+ /* Free small buffer that holds the IAL */
+ lbq_desc = ql_get_curr_sbuf(rx_ring);
+ QPRINTK(qdev, RX_ERR, ERR, "Dropping frame, len %d > mtu %d\n",
+ length, qdev->ndev->mtu);
+
+ /* Unwind the large buffers for this frame. */
+ while (length > 0) {
+ lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ length -= (length < rx_ring->lbq_buf_size) ?
+ length : rx_ring->lbq_buf_size;
+ put_page(lbq_desc->p.pg_chunk.page);
+ }
+ }
+
+ return (unsigned long)length;
+}
+
/* Process an outbound completion from an rx ring. */
static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
struct ob_mac_iocb_rsp *mac_rsp)
@@ -3332,15 +3636,15 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Enable the function, set pagesize, enable error checking. */
value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
- FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
+ FSC_EC | FSC_VM_PAGE_4K;
+ value |= SPLT_SETTING;
/* Set/clear header splitting. */
mask = FSC_VM_PAGESIZE_MASK |
FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
ql_write32(qdev, FSC, mask | value);
- ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
- min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
+ ql_write32(qdev, SPLT_HDR, SPLT_LEN);
/* Set RX packet routing to use port/pci function on which the
* packet arrived on in addition to usual frame routing.
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index f03e2e4a15a8..d68ba7a58631 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -1222,7 +1222,7 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
}
-static struct pci_device_id r6040_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
{ 0 }
};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 60f96c468a24..c1bb24cf0793 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -168,7 +168,7 @@ static void rtl_hw_start_8169(struct net_device *);
static void rtl_hw_start_8168(struct net_device *);
static void rtl_hw_start_8101(struct net_device *);
-static struct pci_device_id rtl8169_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
@@ -3188,15 +3188,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (netif_msg_probe(tp)) {
u32 xid = RTL_R32(TxConfig) & 0x9cf0f8ff;
- printk(KERN_INFO "%s: %s at 0x%lx, "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
- "XID %08x IRQ %d\n",
+ printk(KERN_INFO "%s: %s at 0x%lx, %pM, XID %08x IRQ %d\n",
dev->name,
rtl_chip_info[tp->chipset].name,
- dev->base_addr,
- dev->dev_addr[0], dev->dev_addr[1],
- dev->dev_addr[2], dev->dev_addr[3],
- dev->dev_addr[4], dev->dev_addr[5], xid, dev->irq);
+ dev->base_addr, dev->dev_addr, xid, dev->irq);
}
rtl8169_init_phy(dev, tp);
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 20a71749154a..266baf534964 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1293,7 +1293,7 @@ static void rr_dump(struct net_device *dev)
printk("Error code 0x%x\n", readl(&regs->Fail1));
- index = (((readl(&regs->EvtPrd) >> 8) & 0xff ) - 1) % EVT_RING_ENTRIES;
+ index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES;
cons = rrpriv->dirty_tx;
printk("TX ring index %i, TX consumer %i\n",
index, cons);
@@ -1688,7 +1688,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
-static struct pci_device_id rr_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = {
{ PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index cc4218667cba..ac6189005c79 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -523,7 +523,7 @@ module_param_array(rts_frm_len, uint, NULL, 0);
* S2IO device table.
* This table lists all the devices that this driver supports.
*/
-static struct pci_device_id s2io_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index e35050322f97..fd8cb506a2bb 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1589,7 +1589,7 @@ out:
return 0;
}
-static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
{ PCI_DEVICE(0x1088, 0x2031) },
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 103e8b0e2a0d..62d5cd51a9dd 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1940,7 +1940,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
**************************************************************************/
/* PCI device ID table */
-static struct pci_device_id efx_pci_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
.driver_data = (unsigned long) &falcon_a1_nic_type},
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 2a85360a46f0..f61e1dedb8b3 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -853,7 +853,7 @@
* Poll for BIST completion
*
* Returns a single status code, and a binary blob of phy-specific
- * bist output. If the driver can't succesfully parse the BIST output,
+ * bist output. If the driver can't successfully parse the BIST output,
* it should still respect the Pass/Fail in OUT.RESULT.
*
* Locks required: PHY_LOCK if doing a PHY BIST
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index ca6285016dfd..42a35f086a9f 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -110,7 +110,7 @@ static void sh_eth_reset(struct net_device *ndev)
mdelay(1);
cnt--;
}
- if (cnt < 0)
+ if (cnt == 0)
printk(KERN_ERR "Device reset fail\n");
/* Table Init */
@@ -1473,13 +1473,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
if (ret)
goto out_unregister;
- /* pritnt device infomation */
- pr_info("Base address at 0x%x, ",
- (u32)ndev->base_addr);
-
- for (i = 0; i < 5; i++)
- printk("%02X:", ndev->dev_addr[i]);
- printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
+ /* print device infomation */
+ pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 31233b4c44a0..626de766443a 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -334,7 +334,7 @@ static const struct {
{ "SiS 191 PCI Gigabit Ethernet adapter" },
};
-static struct pci_device_id sis190_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
{ 0, },
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 7360d4bbf75e..20c5ce474891 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -106,7 +106,7 @@ static const char * card_names[] = {
"SiS 900 PCI Fast Ethernet",
"SiS 7016 PCI Fast Ethernet"
};
-static struct pci_device_id sis900_pci_tbl [] = {
+static DEFINE_PCI_DEVICE_TABLE(sis900_pci_tbl) = {
{PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
{PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index db216a728503..6b955a4f19b2 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -149,7 +149,7 @@ extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
extern void mac_drv_clear_rx_queue(struct s_smc *smc);
extern void enable_tx_irq(struct s_smc *smc, u_short queue);
-static struct pci_device_id skfddi_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(skfddi_pci_tbl) = {
{ PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
};
@@ -435,13 +435,7 @@ static int skfp_driver_init(struct net_device *dev)
goto fail;
}
read_address(smc, NULL);
- pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n",
- smc->hw.fddi_canon_addr.a[0],
- smc->hw.fddi_canon_addr.a[1],
- smc->hw.fddi_canon_addr.a[2],
- smc->hw.fddi_canon_addr.a[3],
- smc->hw.fddi_canon_addr.a[4],
- smc->hw.fddi_canon_addr.a[5]);
+ pr_debug(KERN_INFO "HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
smt_reset_defaults(smc, 0);
@@ -890,15 +884,8 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
(struct fddi_addr *)dmi->dmi_addr,
1);
- pr_debug(KERN_INFO "ENABLE MC ADDRESS:");
- pr_debug(" %02x %02x %02x ",
- dmi->dmi_addr[0],
- dmi->dmi_addr[1],
- dmi->dmi_addr[2]);
- pr_debug("%02x %02x %02x\n",
- dmi->dmi_addr[3],
- dmi->dmi_addr[4],
- dmi->dmi_addr[5]);
+ pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
+ dmi->dmi_addr);
dmi = dmi->next;
} // for
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 379a3dc00163..5ff46eb18d0c 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -78,7 +78,7 @@ static int debug = -1; /* defaults above */
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-static const struct pci_device_id skge_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 1c01b96c9611..f8f50f70bcd2 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -1844,7 +1844,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2->tx_cons = idx;
smp_mb();
- if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
+ /* Wake unless it's detached, and called e.g. from sky2_down() */
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
netif_wake_queue(dev);
}
@@ -3176,7 +3177,9 @@ static void sky2_reset(struct sky2_hw *hw)
static void sky2_detach(struct net_device *dev)
{
if (netif_running(dev)) {
+ netif_tx_lock(dev);
netif_device_detach(dev); /* stop txq */
+ netif_tx_unlock(dev);
sky2_down(dev);
}
}
@@ -3837,6 +3840,50 @@ static int sky2_get_regs_len(struct net_device *dev)
return 0x4000;
}
+static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
+{
+ /* This complicated switch statement is to make sure and
+ * only access regions that are unreserved.
+ * Some blocks are only valid on dual port cards.
+ */
+ switch (b) {
+ /* second port */
+ case 5: /* Tx Arbiter 2 */
+ case 9: /* RX2 */
+ case 14 ... 15: /* TX2 */
+ case 17: case 19: /* Ram Buffer 2 */
+ case 22 ... 23: /* Tx Ram Buffer 2 */
+ case 25: /* Rx MAC Fifo 1 */
+ case 27: /* Tx MAC Fifo 2 */
+ case 31: /* GPHY 2 */
+ case 40 ... 47: /* Pattern Ram 2 */
+ case 52: case 54: /* TCP Segmentation 2 */
+ case 112 ... 116: /* GMAC 2 */
+ return hw->ports > 1;
+
+ case 0: /* Control */
+ case 2: /* Mac address */
+ case 4: /* Tx Arbiter 1 */
+ case 7: /* PCI express reg */
+ case 8: /* RX1 */
+ case 12 ... 13: /* TX1 */
+ case 16: case 18:/* Rx Ram Buffer 1 */
+ case 20 ... 21: /* Tx Ram Buffer 1 */
+ case 24: /* Rx MAC Fifo 1 */
+ case 26: /* Tx MAC Fifo 1 */
+ case 28 ... 29: /* Descriptor and status unit */
+ case 30: /* GPHY 1*/
+ case 32 ... 39: /* Pattern Ram 1 */
+ case 48: case 50: /* TCP Segmentation 1 */
+ case 56 ... 60: /* PCI space */
+ case 80 ... 84: /* GMAC 1 */
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
/*
* Returns copy of control register region
* Note: ethtool_get_regs always provides full size (16k) buffer
@@ -3851,55 +3898,13 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 1;
for (b = 0; b < 128; b++) {
- /* This complicated switch statement is to make sure and
- * only access regions that are unreserved.
- * Some blocks are only valid on dual port cards.
- * and block 3 has some special diagnostic registers that
- * are poison.
- */
- switch (b) {
- case 3:
- /* skip diagnostic ram region */
+ /* skip poisonous diagnostic ram region in block 3 */
+ if (b == 3)
memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
- break;
-
- /* dual port cards only */
- case 5: /* Tx Arbiter 2 */
- case 9: /* RX2 */
- case 14 ... 15: /* TX2 */
- case 17: case 19: /* Ram Buffer 2 */
- case 22 ... 23: /* Tx Ram Buffer 2 */
- case 25: /* Rx MAC Fifo 1 */
- case 27: /* Tx MAC Fifo 2 */
- case 31: /* GPHY 2 */
- case 40 ... 47: /* Pattern Ram 2 */
- case 52: case 54: /* TCP Segmentation 2 */
- case 112 ... 116: /* GMAC 2 */
- if (sky2->hw->ports == 1)
- goto reserved;
- /* fall through */
- case 0: /* Control */
- case 2: /* Mac address */
- case 4: /* Tx Arbiter 1 */
- case 7: /* PCI express reg */
- case 8: /* RX1 */
- case 12 ... 13: /* TX1 */
- case 16: case 18:/* Rx Ram Buffer 1 */
- case 20 ... 21: /* Tx Ram Buffer 1 */
- case 24: /* Rx MAC Fifo 1 */
- case 26: /* Tx MAC Fifo 1 */
- case 28 ... 29: /* Descriptor and status unit */
- case 30: /* GPHY 1*/
- case 32 ... 39: /* Pattern Ram 1 */
- case 48: case 50: /* TCP Segmentation 1 */
- case 56 ... 60: /* PCI space */
- case 80 ... 84: /* GMAC 1 */
+ else if (sky2_reg_access_ok(sky2->hw, b))
memcpy_fromio(p, io, 128);
- break;
- default:
-reserved:
+ else
memset(p, 0, 128);
- }
p += 128;
io += 128;
@@ -4684,6 +4689,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
+ pdev->d3_delay = 150;
return 0;
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 44ebbaa7457b..3c5a4f52345c 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -2017,10 +2017,8 @@ static int __devinit smc911x_probe(struct net_device *dev)
"set using ifconfig\n", dev->name);
} else {
/* Print the Ethernet address */
- printk("%s: Ethernet addr: ", dev->name);
- for (i = 0; i < 5; i++)
- printk("%2.2x:", dev->dev_addr[i]);
- printk("%2.2x\n", dev->dev_addr[5]);
+ printk("%s: Ethernet addr: %pM\n",
+ dev->name, dev->dev_addr);
}
if (lp->phy_type == 0) {
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 12f0f5d74e3c..1495a5dd4b46 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -80,7 +80,7 @@ struct smsc9420_pdata {
int last_carrier;
};
-static const struct pci_device_id smsc9420_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = {
{ PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 218524857bfc..16191998ac67 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -72,7 +72,7 @@ MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
char spider_net_driver_name[] = "spidernet";
-static struct pci_device_id spider_net_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = {
{ PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ 0, }
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 95db60adde41..c81252d9a57c 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -301,7 +301,7 @@ enum chipset {
CH_6915 = 0,
};
-static struct pci_device_id starfire_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
{ 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
{ 0, }
};
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index 35eaa5251d7f..fb287649a305 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -4,8 +4,9 @@ config STMMAC_ETH
select PHYLIB
depends on NETDEVICES && CPU_SUBTYPE_ST40
help
- This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
- controllers. ST Ethernet IPs are built around a Synopsys IP Core.
+ This is the driver for the Ethernet IPs are built around a
+ Synopsys IP Core and fully tested on the STMicroelectronics
+ platforms.
if STMMAC_ETH
@@ -32,7 +33,8 @@ config STMMAC_TIMER
default n
help
Use an external timer for mitigating the number of network
- interrupts.
+ interrupts. Currently, for SH architectures, it is possible
+ to use the TMU channel 2 and the SH-RTC device.
choice
prompt "Select Timer device"
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index b2d7a5564dfa..c776af15fe1a 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
-stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
- mac100.o gmac.o $(stmmac-y)
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
+ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+ dwmac100.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index e49e5188e887..7267bcd43d06 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -23,132 +23,7 @@
*******************************************************************************/
#include "descs.h"
-#include <linux/io.h>
-
-/* *********************************************
- DMA CRS Control and Status Register Mapping
- * *********************************************/
-#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
-#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
-#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
-#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
-#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
-#define DMA_STATUS 0x00001014 /* Status Register */
-#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
-#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
-#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
-#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
-#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
-
-/* ********************************
- DMA Control register defines
- * ********************************/
-#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
-#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
-
-/* **************************************
- DMA Interrupt Enable register defines
- * **************************************/
-/**** NORMAL INTERRUPT ****/
-#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
-#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
-#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
-#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
-
-#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
- DMA_INTR_ENA_TIE)
-
-/**** ABNORMAL INTERRUPT ****/
-#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
-#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
-#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
-#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
-#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
-#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
-#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
-#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
-#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
-
-#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
- DMA_INTR_ENA_UNE)
-
-/* DMA default interrupt mask */
-#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
-
-/* ****************************
- * DMA Status register defines
- * ****************************/
-#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
-#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
-#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
-#define DMA_STATUS_GMI 0x08000000
-#define DMA_STATUS_GLI 0x04000000
-#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
-#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
-#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
-#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
-#define DMA_STATUS_TS_SHIFT 20
-#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
-#define DMA_STATUS_RS_SHIFT 17
-#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
-#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
-#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
-#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
-#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
-#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
-#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
-#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
-#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
-#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
-#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
-#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
-#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
-
-/* Other defines */
-#define HASH_TABLE_SIZE 64
-#define PAUSE_TIME 0x200
-
-/* Flow Control defines */
-#define FLOW_OFF 0
-#define FLOW_RX 1
-#define FLOW_TX 2
-#define FLOW_AUTO (FLOW_TX | FLOW_RX)
-
-/* DMA STORE-AND-FORWARD Operation Mode */
-#define SF_DMA_MODE 1
-
-#define HW_CSUM 1
-#define NO_HW_CSUM 0
-
-/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
-#define BUF_SIZE_16KiB 16384
-#define BUF_SIZE_8KiB 8192
-#define BUF_SIZE_4KiB 4096
-#define BUF_SIZE_2KiB 2048
-
-/* Power Down and WOL */
-#define PMT_NOT_SUPPORTED 0
-#define PMT_SUPPORTED 1
-
-/* Common MAC defines */
-#define MAC_CTRL_REG 0x00000000 /* MAC Control */
-#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
-#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
-
-/* MAC Management Counters register */
-#define MMC_CONTROL 0x00000100 /* MMC Control */
-#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
-#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
-#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
-#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
-
-#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
-#define MMC_CONTROL_MAX_FRM_SHIFT 3
-#define MMC_CONTROL_MAX_FRAME 0x7FF
+#include <linux/netdevice.h>
struct stmmac_extra_stats {
/* Transmit errors */
@@ -198,66 +73,62 @@ struct stmmac_extra_stats {
unsigned long normal_irq_n;
};
-/* GMAC core can compute the checksums in HW. */
-enum rx_frame_status {
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0x200
+
+/* Flow Control defines */
+#define FLOW_OFF 0
+#define FLOW_RX 1
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+
+#define HW_CSUM 1
+#define NO_HW_CSUM 0
+enum rx_frame_status { /* IPC status */
good_frame = 0,
discard_frame = 1,
csum_none = 2,
};
-static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
- unsigned int high, unsigned int low)
-{
- unsigned long data;
-
- data = (addr[5] << 8) | addr[4];
- writel(data, ioaddr + high);
- data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- writel(data, ioaddr + low);
+enum tx_dma_irq_status {
+ tx_hard_error = 1,
+ tx_hard_error_bump_tc = 2,
+ handle_tx_rx = 3,
+};
- return;
-}
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
-static inline void stmmac_get_mac_addr(unsigned long ioaddr,
- unsigned char *addr, unsigned int high,
- unsigned int low)
-{
- unsigned int hi_addr, lo_addr;
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
- /* Read the MAC address from the hardware */
- hi_addr = readl(ioaddr + high);
- lo_addr = readl(ioaddr + low);
+/* Common MAC defines */
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
- /* Extract the MAC address from the high and low words */
- addr[0] = lo_addr & 0xff;
- addr[1] = (lo_addr >> 8) & 0xff;
- addr[2] = (lo_addr >> 16) & 0xff;
- addr[3] = (lo_addr >> 24) & 0xff;
- addr[4] = hi_addr & 0xff;
- addr[5] = (hi_addr >> 8) & 0xff;
+/* MAC Management Counters register */
+#define MMC_CONTROL 0x00000100 /* MMC Control */
+#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
+#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
+#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
+#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
- return;
-}
+#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
+#define MMC_CONTROL_MAX_FRM_SHIFT 3
+#define MMC_CONTROL_MAX_FRAME 0x7FF
-struct stmmac_ops {
- /* MAC core initialization */
- void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
- /* DMA core initialization */
- int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
- /* Dump MAC registers */
- void (*dump_mac_regs) (unsigned long ioaddr);
- /* Dump DMA registers */
- void (*dump_dma_regs) (unsigned long ioaddr);
- /* Set tx/rx threshold in the csr6 register
- * An invalid value enables the store-and-forward mode */
- void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
- /* To track extra statistic (if supported) */
- void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr);
- /* RX descriptor ring initialization */
+struct stmmac_desc_ops {
+ /* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic);
- /* TX descriptor ring initialization */
+ int disable_rx_ic);
+ /* DMA TX descriptor ring initialization */
void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
/* Invoked by the xmit function to prepare the tx descriptor */
@@ -281,7 +152,6 @@ struct stmmac_ops {
/* Get the buffer size from the descriptor */
int (*get_tx_len) (struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (unsigned long ioaddr);
int (*get_rx_owner) (struct dma_desc *p);
void (*set_rx_owner) (struct dma_desc *p);
/* Get the receive frame size */
@@ -289,6 +159,37 @@ struct stmmac_ops {
/* Return the reception status looking at the RDES1 */
int (*rx_status) (void *data, struct stmmac_extra_stats *x,
struct dma_desc *p);
+};
+
+struct stmmac_dma_ops {
+ /* DMA core initialization */
+ int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+ /* Dump DMA registers */
+ void (*dump_regs) (unsigned long ioaddr);
+ /* Set tx/rx threshold in the csr6 register
+ * An invalid value enables the store-and-forward mode */
+ void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr);
+ void (*enable_dma_transmission) (unsigned long ioaddr);
+ void (*enable_dma_irq) (unsigned long ioaddr);
+ void (*disable_dma_irq) (unsigned long ioaddr);
+ void (*start_tx) (unsigned long ioaddr);
+ void (*stop_tx) (unsigned long ioaddr);
+ void (*start_rx) (unsigned long ioaddr);
+ void (*stop_rx) (unsigned long ioaddr);
+ int (*dma_interrupt) (unsigned long ioaddr,
+ struct stmmac_extra_stats *x);
+};
+
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
+ /* Dump MAC registers */
+ void (*dump_regs) (unsigned long ioaddr);
+ /* Handle extra events on specific interrupts hw dependent */
+ void (*host_irq_status) (unsigned long ioaddr);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev);
/* Flow control setting */
@@ -298,9 +199,9 @@ struct stmmac_ops {
void (*pmt) (unsigned long ioaddr, unsigned long mode);
/* Set/Get Unicast MAC addresses */
void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n);
+ unsigned int reg_n);
void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n);
+ unsigned int reg_n);
};
struct mac_link {
@@ -314,17 +215,19 @@ struct mii_regs {
unsigned int data; /* MII Data */
};
-struct hw_cap {
- unsigned int version; /* Core Version register (GMAC) */
- unsigned int pmt; /* Power-Down mode (GMAC) */
+struct mac_device_info {
+ struct stmmac_ops *mac;
+ struct stmmac_desc_ops *desc;
+ struct stmmac_dma_ops *dma;
+ unsigned int pmt; /* support Power-Down */
+ struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
- struct mii_regs mii;
};
-struct mac_device_info {
- struct hw_cap hw;
- struct stmmac_ops *ops;
-};
+struct mac_device_info *dwmac1000_setup(unsigned long addr);
+struct mac_device_info *dwmac100_setup(unsigned long addr);
-struct mac_device_info *gmac_setup(unsigned long addr);
-struct mac_device_info *mac100_setup(unsigned long addr);
+extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
index 6d2a0b2f5e57..63a03e264694 100644
--- a/drivers/net/stmmac/descs.h
+++ b/drivers/net/stmmac/descs.h
@@ -1,6 +1,6 @@
/*******************************************************************************
- Header File to describe the DMA descriptors
- Use enhanced descriptors in case of GMAC Cores.
+ Header File to describe the DMA descriptors.
+ Enhanced descriptors have been in case of DWMAC1000 Cores.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/dwmac100.c
index 625171b6062b..82dde774d4c5 100644
--- a/drivers/net/stmmac/mac100.c
+++ b/drivers/net/stmmac/dwmac100.c
@@ -26,23 +26,23 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include "common.h"
-#include "mac100.h"
+#include "dwmac100.h"
+#include "dwmac_dma.h"
-#undef MAC100_DEBUG
-/*#define MAC100_DEBUG*/
-#ifdef MAC100_DEBUG
+#undef DWMAC100_DEBUG
+/*#define DWMAC100_DEBUG*/
+#ifdef DWMAC100_DEBUG
#define DBG(fmt, args...) printk(fmt, ## args)
#else
#define DBG(fmt, args...) do { } while (0)
#endif
-static void mac100_core_init(unsigned long ioaddr)
+static void dwmac100_core_init(unsigned long ioaddr)
{
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -54,43 +54,43 @@ static void mac100_core_init(unsigned long ioaddr)
return;
}
-static void mac100_dump_mac_regs(unsigned long ioaddr)
+static void dwmac100_dump_mac_regs(unsigned long ioaddr)
{
pr_info("\t----------------------------------------------\n"
- "\t MAC100 CSR (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
+ "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
- readl(ioaddr + MAC_CONTROL));
+ readl(ioaddr + MAC_CONTROL));
pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
- readl(ioaddr + MAC_ADDR_HIGH));
+ readl(ioaddr + MAC_ADDR_HIGH));
pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
- readl(ioaddr + MAC_ADDR_LOW));
+ readl(ioaddr + MAC_ADDR_LOW));
pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
- MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
- MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
pr_info("\tflow control (offset 0x%x): 0x%08x\n",
MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
- readl(ioaddr + MAC_VLAN1));
+ readl(ioaddr + MAC_VLAN1));
pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
- readl(ioaddr + MAC_VLAN2));
+ readl(ioaddr + MAC_VLAN2));
pr_info("\n\tMAC management counter registers\n");
pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
- MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
return;
}
-static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
@@ -117,7 +117,7 @@ static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
/* Store and Forward capability is not used at all..
* The transmit threshold can be programmed by
* setting the TTC bits in the DMA control register.*/
-static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -134,11 +134,11 @@ static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
return;
}
-static void mac100_dump_dma_regs(unsigned long ioaddr)
+static void dwmac100_dump_dma_regs(unsigned long ioaddr)
{
int i;
- DBG(KERN_DEBUG "MAC100 DMA CSR \n");
+ DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
for (i = 0; i < 9; i++)
pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
(DMA_BUS_MODE + i * 4),
@@ -151,8 +151,9 @@ static void mac100_dump_dma_regs(unsigned long ioaddr)
}
/* DMA controller has two counters to track the number of
- the receive missed frames. */
-static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ * the receive missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data,
+ struct stmmac_extra_stats *x,
unsigned long ioaddr)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -181,7 +182,8 @@ static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
return;
}
-static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac100_get_tx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
struct dma_desc *p, unsigned long ioaddr)
{
int ret = 0;
@@ -217,7 +219,7 @@ static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int mac100_get_tx_len(struct dma_desc *p)
+static int dwmac100_get_tx_len(struct dma_desc *p)
{
return p->des01.tx.buffer1_size;
}
@@ -226,14 +228,15 @@ static int mac100_get_tx_len(struct dma_desc *p)
* and, if required, updates the multicast statistics.
* In case of success, it returns csum_none becasue the device
* is not able to compute the csum in HW. */
-static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac100_get_rx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = csum_none;
struct net_device_stats *stats = (struct net_device_stats *)data;
if (unlikely(p->des01.rx.last_descriptor == 0)) {
- pr_warning("mac100 Error: Oversized Ethernet "
+ pr_warning("dwmac100 Error: Oversized Ethernet "
"frame spanned multiple buffers\n");
stats->rx_length_errors++;
return discard_frame;
@@ -276,24 +279,24 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void mac100_irq_status(unsigned long ioaddr)
+static void dwmac100_irq_status(unsigned long ioaddr)
{
return;
}
-static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void mac100_set_filter(struct net_device *dev)
+static void dwmac100_set_filter(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr;
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -319,8 +322,8 @@ static void mac100_set_filter(struct net_device *dev)
/* Perfect filter mode for physical address and Hash
filter for multicast */
value |= MAC_CONTROL_HP;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
- | MAC_CONTROL_HO);
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
memset(mc_filter, 0, sizeof(mc_filter));
for (i = 0, mclist = dev->mc_list;
@@ -347,7 +350,7 @@ static void mac100_set_filter(struct net_device *dev)
return;
}
-static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
unsigned int fc, unsigned int pause_time)
{
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -359,13 +362,15 @@ static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
return;
}
-/* No PMT module supported in our SoC for the Ethernet Controller. */
-static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
+/* No PMT module supported for this Ethernet Controller.
+ * Tested on ST platforms only.
+ */
+static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
{
return;
}
-static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic)
{
int i;
@@ -381,7 +386,7 @@ static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
return;
}
-static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
{
int i;
for (i = 0; i < ring_size; i++) {
@@ -393,32 +398,32 @@ static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
return;
}
-static int mac100_get_tx_owner(struct dma_desc *p)
+static int dwmac100_get_tx_owner(struct dma_desc *p)
{
return p->des01.tx.own;
}
-static int mac100_get_rx_owner(struct dma_desc *p)
+static int dwmac100_get_rx_owner(struct dma_desc *p)
{
return p->des01.rx.own;
}
-static void mac100_set_tx_owner(struct dma_desc *p)
+static void dwmac100_set_tx_owner(struct dma_desc *p)
{
p->des01.tx.own = 1;
}
-static void mac100_set_rx_owner(struct dma_desc *p)
+static void dwmac100_set_rx_owner(struct dma_desc *p)
{
p->des01.rx.own = 1;
}
-static int mac100_get_tx_ls(struct dma_desc *p)
+static int dwmac100_get_tx_ls(struct dma_desc *p)
{
return p->des01.tx.last_segment;
}
-static void mac100_release_tx_desc(struct dma_desc *p)
+static void dwmac100_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.tx.end_ring;
@@ -444,74 +449,91 @@ static void mac100_release_tx_desc(struct dma_desc *p)
return;
}
-static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.tx.first_segment = is_fs;
p->des01.tx.buffer1_size = len;
}
-static void mac100_clear_tx_ic(struct dma_desc *p)
+static void dwmac100_clear_tx_ic(struct dma_desc *p)
{
p->des01.tx.interrupt = 0;
}
-static void mac100_close_tx_desc(struct dma_desc *p)
+static void dwmac100_close_tx_desc(struct dma_desc *p)
{
p->des01.tx.last_segment = 1;
p->des01.tx.interrupt = 1;
}
-static int mac100_get_rx_frame_len(struct dma_desc *p)
+static int dwmac100_get_rx_frame_len(struct dma_desc *p)
{
return p->des01.rx.frame_length;
}
-struct stmmac_ops mac100_driver = {
- .core_init = mac100_core_init,
- .dump_mac_regs = mac100_dump_mac_regs,
- .dma_init = mac100_dma_init,
- .dump_dma_regs = mac100_dump_dma_regs,
- .dma_mode = mac100_dma_operation_mode,
- .dma_diagnostic_fr = mac100_dma_diagnostic_fr,
- .tx_status = mac100_get_tx_frame_status,
- .rx_status = mac100_get_rx_frame_status,
- .get_tx_len = mac100_get_tx_len,
- .set_filter = mac100_set_filter,
- .flow_ctrl = mac100_flow_ctrl,
- .pmt = mac100_pmt,
- .init_rx_desc = mac100_init_rx_desc,
- .init_tx_desc = mac100_init_tx_desc,
- .get_tx_owner = mac100_get_tx_owner,
- .get_rx_owner = mac100_get_rx_owner,
- .release_tx_desc = mac100_release_tx_desc,
- .prepare_tx_desc = mac100_prepare_tx_desc,
- .clear_tx_ic = mac100_clear_tx_ic,
- .close_tx_desc = mac100_close_tx_desc,
- .get_tx_ls = mac100_get_tx_ls,
- .set_tx_owner = mac100_set_tx_owner,
- .set_rx_owner = mac100_set_rx_owner,
- .get_rx_frame_len = mac100_get_rx_frame_len,
- .host_irq_status = mac100_irq_status,
- .set_umac_addr = mac100_set_umac_addr,
- .get_umac_addr = mac100_get_umac_addr,
+struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *mac100_setup(unsigned long ioaddr)
+struct stmmac_dma_ops dwmac100_dma_ops = {
+ .init = dwmac100_dma_init,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_mode = dwmac100_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
+
+struct stmmac_desc_ops dwmac100_desc_ops = {
+ .tx_status = dwmac100_get_tx_frame_status,
+ .rx_status = dwmac100_get_rx_frame_status,
+ .get_tx_len = dwmac100_get_tx_len,
+ .init_rx_desc = dwmac100_init_rx_desc,
+ .init_tx_desc = dwmac100_init_tx_desc,
+ .get_tx_owner = dwmac100_get_tx_owner,
+ .get_rx_owner = dwmac100_get_rx_owner,
+ .release_tx_desc = dwmac100_release_tx_desc,
+ .prepare_tx_desc = dwmac100_prepare_tx_desc,
+ .clear_tx_ic = dwmac100_clear_tx_ic,
+ .close_tx_desc = dwmac100_close_tx_desc,
+ .get_tx_ls = dwmac100_get_tx_ls,
+ .set_tx_owner = dwmac100_set_tx_owner,
+ .set_rx_owner = dwmac100_set_rx_owner,
+ .get_rx_frame_len = dwmac100_get_rx_frame_len,
+};
+
+struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
{
struct mac_device_info *mac;
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
- pr_info("\tMAC 10/100\n");
+ pr_info("\tDWMAC100\n");
+
+ mac->mac = &dwmac100_ops;
+ mac->desc = &dwmac100_desc_ops;
+ mac->dma = &dwmac100_dma_ops;
- mac->ops = &mac100_driver;
- mac->hw.pmt = PMT_NOT_SUPPORTED;
- mac->hw.link.port = MAC_CONTROL_PS;
- mac->hw.link.duplex = MAC_CONTROL_F;
- mac->hw.link.speed = 0;
- mac->hw.mii.addr = MAC_MII_ADDR;
- mac->hw.mii.data = MAC_MII_DATA;
+ mac->pmt = PMT_NOT_SUPPORTED;
+ mac->link.port = MAC_CONTROL_PS;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed = 0;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
return mac;
}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004a..0f8f110d004a 100644
--- a/drivers/net/stmmac/mac100.h
+++ b/drivers/net/stmmac/dwmac100.h
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/dwmac1000.h
index 2e82d6c9a148..62dca0e384e7 100644
--- a/drivers/net/stmmac/gmac.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -20,6 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/phy.h>
+#include "common.h"
+
#define GMAC_CONTROL 0x00000000 /* Configuration */
#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
@@ -32,7 +35,7 @@
#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
-enum gmac_irq_status {
+enum dwmac1000_irq_status {
time_stamp_irq = 0x0200,
mmc_rx_csum_offload_irq = 0x0080,
mmc_tx_irq = 0x0040,
@@ -202,3 +205,16 @@ enum rtc_control {
#define GMAC_MMC_RX_INTR 0x104
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
+
+#undef DWMAC1000_DEBUG
+/* #define DWMAC1000__DEBUG */
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
+#ifdef DWMAC1000__DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+extern struct stmmac_dma_ops dwmac1000_dma_ops;
+extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
new file mode 100644
index 000000000000..928eac05b912
--- /dev/null
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -0,0 +1,245 @@
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include "dwmac1000.h"
+
+static void dwmac1000_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+ value |= GMAC_CORE_INIT;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ /* STBus Bridge Configuration */
+ /*writel(0xc5608, ioaddr + 0x00007000);*/
+
+ /* Freeze MMC counters */
+ writel(0x8, ioaddr + GMAC_MMC_CTRL);
+ /* Mask GMAC interrupts */
+ writel(0x207, ioaddr + GMAC_INT_MASK);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Tag detection without filtering */
+ writel(0x0, ioaddr + GMAC_VLAN_TAG);
+#endif
+ return;
+}
+
+static void dwmac1000_dump_regs(unsigned long ioaddr)
+{
+ int i;
+ pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr);
+
+ for (i = 0; i < 55; i++) {
+ int offset = i * 4;
+ pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+ return;
+}
+
+static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int value = 0;
+
+ DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, dev->mc_count, dev->uc.count);
+
+ if (dev->flags & IFF_PROMISC)
+ value = GMAC_FRAME_FILTER_PR;
+ else if ((dev->mc_count > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
+ } else if (dev->mc_count > 0) {
+ int i;
+ u32 mc_filter[2];
+ struct dev_mc_list *mclist;
+
+ /* Hash filter for multicast */
+ value = GMAC_FRAME_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ for (i = 0, mclist = dev->mc_list;
+ mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ /* The upper 6 bits of the calculated CRC are used to
+ index the contens of the hash table */
+ int bit_nr =
+ bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
+ }
+
+ /* Handle multiple unicast addresses (perfect filtering)*/
+ if (dev->uc.count > GMAC_MAX_UNICAST_ADDRESSES)
+ /* Switch to promiscuous mode is more than 16 addrs
+ are required */
+ value |= GMAC_FRAME_FILTER_PR;
+ else {
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+
+ list_for_each_entry(ha, &dev->uc.list, list) {
+ dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= GMAC_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + GMAC_FRAME_FILTER);
+
+ DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
+ readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
+
+ return;
+}
+
+static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = 0;
+
+ DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_RFE;
+ }
+ if (fc & FLOW_TX) {
+ DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_TFE;
+ }
+
+ if (duplex) {
+ DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+ flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ }
+
+ writel(flow, ioaddr + GMAC_FLOW_CTRL);
+ return;
+}
+
+static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode == WAKE_MAGIC) {
+ DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ } else if (mode == WAKE_UCAST) {
+ DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ pmt |= global_unicast;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+ return;
+}
+
+
+static void dwmac1000_irq_status(unsigned long ioaddr)
+{
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_TX_INTR));
+ if (unlikely(intr_status & mmc_rx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_INTR));
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ if (unlikely(intr_status & pmt_irq)) {
+ DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT
+ * status register. */
+ readl(ioaddr + GMAC_PMT);
+ }
+
+ return;
+}
+
+struct stmmac_ops dwmac1000_ops = {
+ .core_init = dwmac1000_core_init,
+ .dump_regs = dwmac1000_dump_regs,
+ .host_irq_status = dwmac1000_irq_status,
+ .set_filter = dwmac1000_set_filter,
+ .flow_ctrl = dwmac1000_flow_ctrl,
+ .pmt = dwmac1000_pmt,
+ .set_umac_addr = dwmac1000_set_umac_addr,
+ .get_umac_addr = dwmac1000_get_umac_addr,
+};
+
+struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+ u32 uid = readl(ioaddr + GMAC_VERSION);
+
+ pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ mac->mac = &dwmac1000_ops;
+ mac->desc = &dwmac1000_desc_ops;
+ mac->dma = &dwmac1000_dma_ops;
+
+ mac->pmt = PMT_SUPPORTED;
+ mac->link.port = GMAC_CONTROL_PS;
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed = GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/dwmac1000_dma.c
index 52586ee68953..68245508e2de 100644
--- a/drivers/net/stmmac/gmac.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,6 +3,8 @@
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
developing this code.
+ This contains the functions to handle the dma and descriptors.
+
Copyright (C) 2007-2009 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
@@ -24,41 +26,11 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
-#include <linux/crc32.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-
-#include "stmmac.h"
-#include "gmac.h"
-
-#undef GMAC_DEBUG
-/*#define GMAC_DEBUG*/
-#undef FRAME_FILTER_DEBUG
-/*#define FRAME_FILTER_DEBUG*/
-#ifdef GMAC_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
+#include "dwmac1000.h"
+#include "dwmac_dma.h"
-static void gmac_dump_regs(unsigned long ioaddr)
-{
- int i;
- pr_info("\t----------------------------------------------\n"
- "\t GMAC registers (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
-
- for (i = 0; i < 55; i++) {
- int offset = i * 4;
- pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
- offset, readl(ioaddr + offset));
- }
- return;
-}
-
-static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
+static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
/* DMA SW reset */
@@ -87,7 +59,7 @@ static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
}
/* Transmit FIFO flush operation */
-static void gmac_flush_tx_fifo(unsigned long ioaddr)
+static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -95,7 +67,7 @@ static void gmac_flush_tx_fifo(unsigned long ioaddr)
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
}
-static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -148,13 +120,13 @@ static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
}
/* Not yet implemented --- no RMON module */
-static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr)
+static void dwmac1000_dma_diagnostic_fr(void *data,
+ struct stmmac_extra_stats *x, unsigned long ioaddr)
{
return;
}
-static void gmac_dump_dma_regs(unsigned long ioaddr)
+static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
{
int i;
pr_info(" DMA registers\n");
@@ -169,8 +141,9 @@ static void gmac_dump_dma_regs(unsigned long ioaddr)
return;
}
-static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
+static int dwmac1000_get_tx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
{
int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -185,7 +158,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.frame_flushed)) {
DBG(KERN_ERR "\tframe_flushed error\n");
x->tx_frame_flushed++;
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
}
if (unlikely(p->des01.etx.loss_carrier)) {
@@ -213,7 +186,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.underflow_error)) {
DBG(KERN_ERR "\tunderflow error\n");
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
x->tx_underflow++;
}
@@ -225,7 +198,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.payload_error)) {
DBG(KERN_ERR "\tAddr/Payload csum error\n");
x->tx_payload_error++;
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
}
ret = -1;
@@ -245,12 +218,12 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int gmac_get_tx_len(struct dma_desc *p)
+static int dwmac1000_get_tx_len(struct dma_desc *p)
{
return p->des01.etx.buffer1_size;
}
-static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
+static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
{
int ret = good_frame;
u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
@@ -293,8 +266,8 @@ static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
return ret;
}
-static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p)
+static int dwmac1000_get_rx_frame_status(void *data,
+ struct stmmac_extra_stats *x, struct dma_desc *p)
{
int ret = good_frame;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -339,7 +312,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
+ ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
if (unlikely(p->des01.erx.dribbling)) {
@@ -370,181 +343,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void gmac_irq_status(unsigned long ioaddr)
-{
- u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
-
- /* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
- DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_TX_INTR));
- if (unlikely(intr_status & mmc_rx_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_INTR));
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
- if (unlikely(intr_status & pmt_irq)) {
- DBG(KERN_DEBUG "GMAC: received Magic frame\n");
- /* clear the PMT bits 5 and 6 by reading the PMT
- * status register. */
- readl(ioaddr + GMAC_PMT);
- }
-
- return;
-}
-
-static void gmac_core_init(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + GMAC_CONTROL);
- value |= GMAC_CORE_INIT;
- writel(value, ioaddr + GMAC_CONTROL);
-
- /* STBus Bridge Configuration */
- /*writel(0xc5608, ioaddr + 0x00007000);*/
-
- /* Freeze MMC counters */
- writel(0x8, ioaddr + GMAC_MMC_CTRL);
- /* Mask GMAC interrupts */
- writel(0x207, ioaddr + GMAC_INT_MASK);
-
-#ifdef STMMAC_VLAN_TAG_USED
- /* Tag detection without filtering */
- writel(0x0, ioaddr + GMAC_VLAN_TAG);
-#endif
- return;
-}
-
-static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
-}
-
-static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
-}
-
-static void gmac_set_filter(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- unsigned int value = 0;
-
- DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
- __func__, dev->mc_count, dev->uc_count);
-
- if (dev->flags & IFF_PROMISC)
- value = GMAC_FRAME_FILTER_PR;
- else if ((dev->mc_count > HASH_TABLE_SIZE)
- || (dev->flags & IFF_ALLMULTI)) {
- value = GMAC_FRAME_FILTER_PM; /* pass all multi */
- writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
- writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
- } else if (dev->mc_count > 0) {
- int i;
- u32 mc_filter[2];
- struct dev_mc_list *mclist;
-
- /* Hash filter for multicast */
- value = GMAC_FRAME_FILTER_HMC;
-
- memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
- /* The upper 6 bits of the calculated CRC are used to
- index the contens of the hash table */
- int bit_nr =
- bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
- /* The most significant bit determines the register to
- * use (H/L) while the other 5 bits determine the bit
- * within the register. */
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
- writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
- }
-
- /* Handle multiple unicast addresses (perfect filtering)*/
- if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
- /* Switch to promiscuous mode is more than 16 addrs
- are required */
- value |= GMAC_FRAME_FILTER_PR;
- else {
- int i;
- struct dev_addr_list *uc_ptr = dev->uc_list;
-
- for (i = 0; i < dev->uc_count; i++) {
- gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
- i + 1);
-
- DBG(KERN_INFO "\t%d "
- "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
- "%02x\n", i + 1,
- uc_ptr->da_addr[0], uc_ptr->da_addr[1],
- uc_ptr->da_addr[2], uc_ptr->da_addr[3],
- uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
- uc_ptr = uc_ptr->next;
- }
- }
-
-#ifdef FRAME_FILTER_DEBUG
- /* Enable Receive all mode (to debug filtering_fail errors) */
- value |= GMAC_FRAME_FILTER_RA;
-#endif
- writel(value, ioaddr + GMAC_FRAME_FILTER);
-
- DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
- "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
- readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
-
- return;
-}
-
-static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
-{
- unsigned int flow = 0;
-
- DBG(KERN_DEBUG "GMAC Flow-Control:\n");
- if (fc & FLOW_RX) {
- DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
- flow |= GMAC_FLOW_CTRL_RFE;
- }
- if (fc & FLOW_TX) {
- DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
- flow |= GMAC_FLOW_CTRL_TFE;
- }
-
- if (duplex) {
- DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
- flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
- }
-
- writel(flow, ioaddr + GMAC_FLOW_CTRL);
- return;
-}
-
-static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
-{
- unsigned int pmt = 0;
-
- if (mode == WAKE_MAGIC) {
- DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
- pmt |= power_down | magic_pkt_en;
- } else if (mode == WAKE_UCAST) {
- DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
- }
-
- writel(pmt, ioaddr + GMAC_PMT);
- return;
-}
-
-static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic)
{
int i;
@@ -562,7 +361,7 @@ static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
return;
}
-static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
{
int i;
@@ -576,32 +375,32 @@ static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
return;
}
-static int gmac_get_tx_owner(struct dma_desc *p)
+static int dwmac1000_get_tx_owner(struct dma_desc *p)
{
return p->des01.etx.own;
}
-static int gmac_get_rx_owner(struct dma_desc *p)
+static int dwmac1000_get_rx_owner(struct dma_desc *p)
{
return p->des01.erx.own;
}
-static void gmac_set_tx_owner(struct dma_desc *p)
+static void dwmac1000_set_tx_owner(struct dma_desc *p)
{
p->des01.etx.own = 1;
}
-static void gmac_set_rx_owner(struct dma_desc *p)
+static void dwmac1000_set_rx_owner(struct dma_desc *p)
{
p->des01.erx.own = 1;
}
-static int gmac_get_tx_ls(struct dma_desc *p)
+static int dwmac1000_get_tx_ls(struct dma_desc *p)
{
return p->des01.etx.last_segment;
}
-static void gmac_release_tx_desc(struct dma_desc *p)
+static void dwmac1000_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.etx.end_ring;
@@ -611,7 +410,7 @@ static void gmac_release_tx_desc(struct dma_desc *p)
return;
}
-static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.etx.first_segment = is_fs;
@@ -625,69 +424,51 @@ static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p->des01.etx.checksum_insertion = cic_full;
}
-static void gmac_clear_tx_ic(struct dma_desc *p)
+static void dwmac1000_clear_tx_ic(struct dma_desc *p)
{
p->des01.etx.interrupt = 0;
}
-static void gmac_close_tx_desc(struct dma_desc *p)
+static void dwmac1000_close_tx_desc(struct dma_desc *p)
{
p->des01.etx.last_segment = 1;
p->des01.etx.interrupt = 1;
}
-static int gmac_get_rx_frame_len(struct dma_desc *p)
+static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
{
return p->des01.erx.frame_length;
}
-struct stmmac_ops gmac_driver = {
- .core_init = gmac_core_init,
- .dump_mac_regs = gmac_dump_regs,
- .dma_init = gmac_dma_init,
- .dump_dma_regs = gmac_dump_dma_regs,
- .dma_mode = gmac_dma_operation_mode,
- .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
- .tx_status = gmac_get_tx_frame_status,
- .rx_status = gmac_get_rx_frame_status,
- .get_tx_len = gmac_get_tx_len,
- .set_filter = gmac_set_filter,
- .flow_ctrl = gmac_flow_ctrl,
- .pmt = gmac_pmt,
- .init_rx_desc = gmac_init_rx_desc,
- .init_tx_desc = gmac_init_tx_desc,
- .get_tx_owner = gmac_get_tx_owner,
- .get_rx_owner = gmac_get_rx_owner,
- .release_tx_desc = gmac_release_tx_desc,
- .prepare_tx_desc = gmac_prepare_tx_desc,
- .clear_tx_ic = gmac_clear_tx_ic,
- .close_tx_desc = gmac_close_tx_desc,
- .get_tx_ls = gmac_get_tx_ls,
- .set_tx_owner = gmac_set_tx_owner,
- .set_rx_owner = gmac_set_rx_owner,
- .get_rx_frame_len = gmac_get_rx_frame_len,
- .host_irq_status = gmac_irq_status,
- .set_umac_addr = gmac_set_umac_addr,
- .get_umac_addr = gmac_get_umac_addr,
+struct stmmac_dma_ops dwmac1000_dma_ops = {
+ .init = dwmac1000_dma_init,
+ .dump_regs = dwmac1000_dump_dma_regs,
+ .dma_mode = dwmac1000_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
};
-struct mac_device_info *gmac_setup(unsigned long ioaddr)
-{
- struct mac_device_info *mac;
- u32 uid = readl(ioaddr + GMAC_VERSION);
-
- pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
- ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
-
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
-
- mac->ops = &gmac_driver;
- mac->hw.pmt = PMT_SUPPORTED;
- mac->hw.link.port = GMAC_CONTROL_PS;
- mac->hw.link.duplex = GMAC_CONTROL_DM;
- mac->hw.link.speed = GMAC_CONTROL_FES;
- mac->hw.mii.addr = GMAC_MII_ADDR;
- mac->hw.mii.data = GMAC_MII_DATA;
-
- return mac;
-}
+struct stmmac_desc_ops dwmac1000_desc_ops = {
+ .tx_status = dwmac1000_get_tx_frame_status,
+ .rx_status = dwmac1000_get_rx_frame_status,
+ .get_tx_len = dwmac1000_get_tx_len,
+ .init_rx_desc = dwmac1000_init_rx_desc,
+ .init_tx_desc = dwmac1000_init_tx_desc,
+ .get_tx_owner = dwmac1000_get_tx_owner,
+ .get_rx_owner = dwmac1000_get_rx_owner,
+ .release_tx_desc = dwmac1000_release_tx_desc,
+ .prepare_tx_desc = dwmac1000_prepare_tx_desc,
+ .clear_tx_ic = dwmac1000_clear_tx_ic,
+ .close_tx_desc = dwmac1000_close_tx_desc,
+ .get_tx_ls = dwmac1000_get_tx_ls,
+ .set_tx_owner = dwmac1000_set_tx_owner,
+ .set_rx_owner = dwmac1000_set_rx_owner,
+ .get_rx_frame_len = dwmac1000_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
new file mode 100644
index 000000000000..de848d9f6060
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+ DWMAC DMA Header file.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
+#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
+#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
+#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
+#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/* DMA Abnormal interrupt */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_GMI 0x08000000
+#define DMA_STATUS_GLI 0x04000000
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
+extern void dwmac_enable_dma_irq(unsigned long ioaddr);
+extern void dwmac_disable_dma_irq(unsigned long ioaddr);
+extern void dwmac_dma_start_tx(unsigned long ioaddr);
+extern void dwmac_dma_stop_tx(unsigned long ioaddr);
+extern void dwmac_dma_start_rx(unsigned long ioaddr);
+extern void dwmac_dma_stop_rx(unsigned long ioaddr);
+extern int dwmac_dma_interrupt(unsigned long ioaddr,
+ struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
new file mode 100644
index 000000000000..d4adb1eaa447
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -0,0 +1,263 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include "common.h"
+#include "dwmac_dma.h"
+
+#undef DWMAC_DMA_DEBUG
+#ifdef DWMAC_DMA_DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+/* CSR1 enables the transmit DMA to check for new descriptor */
+void dwmac_enable_dma_transmission(unsigned long ioaddr)
+{
+ writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+}
+
+void dwmac_enable_dma_irq(unsigned long ioaddr)
+{
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_disable_dma_irq(unsigned long ioaddr)
+{
+ writel(0, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_dma_start_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+void dwmac_dma_stop_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+void dwmac_dma_start_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+void dwmac_dma_stop_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+#ifdef DWMAC_DMA_DEBUG
+static void show_tx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- TX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- TX (Running):Fetching the Tx desc\n");
+ break;
+ case 2:
+ pr_info("- TX (Running): Waiting for end of tx\n");
+ break;
+ case 3:
+ pr_info("- TX (Running): Reading the data "
+ "and queuing the data into the Tx buf\n");
+ break;
+ case 6:
+ pr_info("- TX (Suspended): Tx Buff Underflow "
+ "or an unavailable Transmit descriptor\n");
+ break;
+ case 7:
+ pr_info("- TX (Running): Closing Tx descriptor\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+static void show_rx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- RX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- RX (Running): Fetching the Rx desc\n");
+ break;
+ case 2:
+ pr_info("- RX (Running):Checking for end of pkt\n");
+ break;
+ case 3:
+ pr_info("- RX (Running): Waiting for Rx pkt\n");
+ break;
+ case 4:
+ pr_info("- RX (Suspended): Unavailable Rx buf\n");
+ break;
+ case 5:
+ pr_info("- RX (Running): Closing Rx descriptor\n");
+ break;
+ case 6:
+ pr_info("- RX(Running): Flushing the current frame"
+ " from the Rx buf\n");
+ break;
+ case 7:
+ pr_info("- RX (Running): Queuing the Rx frame"
+ " from the Rx buf into memory\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+#endif
+
+int dwmac_dma_interrupt(unsigned long ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ int ret = 0;
+ /* read the status register (CSR5) */
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
+
+ DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+#ifdef DWMAC_DMA_DEBUG
+ /* It displays the DMA process states (CSR5 register) */
+ show_tx_process_state(intr_status);
+ show_rx_process_state(intr_status);
+#endif
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ DBG(INFO, "transmit underflow\n");
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT)) {
+ DBG(INFO, "transmit jabber\n");
+ x->tx_jabber_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_OVF)) {
+ DBG(INFO, "recv overflow\n");
+ x->rx_overflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RU)) {
+ DBG(INFO, "receive buffer unavailable\n");
+ x->rx_buf_unav_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RPS)) {
+ DBG(INFO, "receive process stopped\n");
+ x->rx_process_stopped_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RWT)) {
+ DBG(INFO, "receive watchdog\n");
+ x->rx_watchdog_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ETI)) {
+ DBG(INFO, "transmit early interrupt\n");
+ x->tx_early_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ DBG(INFO, "transmit process stopped\n");
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
+ DBG(INFO, "fatal bus error\n");
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & DMA_STATUS_NIS) {
+ x->normal_irq_n++;
+ if (likely((intr_status & DMA_STATUS_RI) ||
+ (intr_status & (DMA_STATUS_TI))))
+ ret = handle_tx_rx;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
+
+ DBG(INFO, "\n\n");
+ return ret;
+}
+
+
+void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ writel(data, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+
+ return;
+}
+
+void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+
+ return;
+}
+
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 6d2eae3040e5..ba35e6943cf4 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Oct_09"
+#define DRV_MODULE_VERSION "Jan_2010"
+#include <linux/stmmac.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
@@ -57,7 +58,7 @@ struct stmmac_priv {
int rx_csum;
unsigned int dma_buf_sz;
struct device *device;
- struct mac_device_info *mac_type;
+ struct mac_device_info *hw;
struct stmmac_extra_stats xstats;
struct napi_struct napi;
@@ -69,6 +70,7 @@ struct stmmac_priv {
int phy_mask;
int (*phy_reset) (void *priv);
void (*fix_mac_speed) (void *priv, unsigned int speed);
+ void (*bus_setup)(unsigned long ioaddr);
void *bsp_priv;
int phy_irq;
@@ -93,6 +95,28 @@ struct stmmac_priv {
#endif
};
+#ifdef CONFIG_STM_DRIVERS
+#include <linux/stm/pad.h>
+static inline int stmmac_claim_resource(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
+
+ /* Pad routing setup */
+ if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
+ dev_name(&pdev->dev)))) {
+ printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
+ ret = -ENODEV;
+ }
+ return ret;
+}
+#else
+static inline int stmmac_claim_resource(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 694ebe6a0758..0abeff6193a1 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -28,6 +28,7 @@
#include <linux/phy.h>
#include "stmmac.h"
+#include "dwmac_dma.h"
#define REG_SPACE_SIZE 0x1054
#define MAC100_ETHTOOL_NAME "st_mac100"
@@ -268,8 +269,8 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
} else {
unsigned long ioaddr = netdev->base_addr;
- priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
- priv->flow_ctrl, priv->pause);
+ priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
+ priv->flow_ctrl, priv->pause);
}
spin_unlock(&priv->lock);
return ret;
@@ -283,8 +284,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
int i;
/* Update HW stats if supported */
- priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
- ioaddr);
+ priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
+ ioaddr);
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 508fba8fa07f..a6733612d64a 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -32,7 +32,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/ip.h>
@@ -45,7 +44,6 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
-#include <linux/stm/soc.h>
#include "stmmac.h"
#define STMMAC_RESOURCE_NAME "stmmaceth"
@@ -226,41 +224,38 @@ static void stmmac_adjust_link(struct net_device *dev)
if (phydev->duplex != priv->oldduplex) {
new_state = 1;
if (!(phydev->duplex))
- ctrl &= ~priv->mac_type->hw.link.duplex;
+ ctrl &= ~priv->hw->link.duplex;
else
- ctrl |= priv->mac_type->hw.link.duplex;
+ ctrl |= priv->hw->link.duplex;
priv->oldduplex = phydev->duplex;
}
/* Flow Control operation */
if (phydev->pause)
- priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex,
- fc, pause_time);
+ priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex,
+ fc, pause_time);
if (phydev->speed != priv->speed) {
new_state = 1;
switch (phydev->speed) {
case 1000:
if (likely(priv->is_gmac))
- ctrl &= ~priv->mac_type->hw.link.port;
+ ctrl &= ~priv->hw->link.port;
break;
case 100:
case 10:
if (priv->is_gmac) {
- ctrl |= priv->mac_type->hw.link.port;
+ ctrl |= priv->hw->link.port;
if (phydev->speed == SPEED_100) {
- ctrl |=
- priv->mac_type->hw.link.
- speed;
+ ctrl |= priv->hw->link.speed;
} else {
- ctrl &=
- ~(priv->mac_type->hw.
- link.speed);
+ ctrl &= ~(priv->hw->link.speed);
}
} else {
- ctrl &= ~priv->mac_type->hw.link.port;
+ ctrl &= ~priv->hw->link.port;
}
- priv->fix_mac_speed(priv->bsp_priv,
- phydev->speed);
+ if (likely(priv->fix_mac_speed))
+ priv->fix_mac_speed(priv->bsp_priv,
+ phydev->speed);
break;
default:
if (netif_msg_link(priv))
@@ -305,8 +300,8 @@ static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev;
- char phy_id[BUS_ID_SIZE]; /* PHY to connect */
- char bus_id[BUS_ID_SIZE];
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
priv->oldlink = 0;
priv->speed = 0;
@@ -318,7 +313,8 @@ static int stmmac_init_phy(struct net_device *dev)
}
snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
- snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
@@ -508,8 +504,8 @@ static void init_dma_desc_rings(struct net_device *dev)
priv->cur_tx = 0;
/* Clear the Rx/Tx descriptors */
- priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize);
+ priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
if (netif_msg_hw(priv)) {
pr_info("RX descriptor ring:\n");
@@ -544,8 +540,8 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
struct dma_desc *p = priv->dma_tx + i;
if (p->des2)
dma_unmap_single(priv->device, p->des2,
- priv->mac_type->ops->get_tx_len(p),
- DMA_TO_DEVICE);
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
}
@@ -575,50 +571,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
}
/**
- * stmmac_dma_start_tx
- * @ioaddr: device I/O address
- * Description: this function starts the DMA tx process.
- */
-static void stmmac_dma_start_tx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
- return;
-}
-
-static void stmmac_dma_stop_tx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
- return;
-}
-
-/**
- * stmmac_dma_start_rx
- * @ioaddr: device I/O address
- * Description: this function starts the DMA rx process.
- */
-static void stmmac_dma_start_rx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-static void stmmac_dma_stop_rx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-/**
* stmmac_dma_operation_mode - HW DMA operation mode
* @priv : pointer to the private device structure.
* Description: it sets the DMA operation mode: tx/rx DMA thresholds
@@ -629,18 +581,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
if (!priv->is_gmac) {
/* MAC 10/100 */
- priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0);
+ priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0);
priv->tx_coe = NO_HW_CSUM;
} else {
if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
- priv->mac_type->ops->dma_mode(priv->dev->base_addr,
- SF_DMA_MODE, SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->dev->base_addr,
+ SF_DMA_MODE, SF_DMA_MODE);
tc = SF_DMA_MODE;
priv->tx_coe = HW_CSUM;
} else {
/* Checksum computation is performed in software. */
- priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc,
- SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
+ SF_DMA_MODE);
priv->tx_coe = NO_HW_CSUM;
}
}
@@ -649,88 +601,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
return;
}
-#ifdef STMMAC_DEBUG
-/**
- * show_tx_process_state
- * @status: tx descriptor status field
- * Description: it shows the Transmit Process State for CSR5[22:20]
- */
-static void show_tx_process_state(unsigned int status)
-{
- unsigned int state;
- state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
-
- switch (state) {
- case 0:
- pr_info("- TX (Stopped): Reset or Stop command\n");
- break;
- case 1:
- pr_info("- TX (Running):Fetching the Tx desc\n");
- break;
- case 2:
- pr_info("- TX (Running): Waiting for end of tx\n");
- break;
- case 3:
- pr_info("- TX (Running): Reading the data "
- "and queuing the data into the Tx buf\n");
- break;
- case 6:
- pr_info("- TX (Suspended): Tx Buff Underflow "
- "or an unavailable Transmit descriptor\n");
- break;
- case 7:
- pr_info("- TX (Running): Closing Tx descriptor\n");
- break;
- default:
- break;
- }
- return;
-}
-
-/**
- * show_rx_process_state
- * @status: rx descriptor status field
- * Description: it shows the Receive Process State for CSR5[19:17]
- */
-static void show_rx_process_state(unsigned int status)
-{
- unsigned int state;
- state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
-
- switch (state) {
- case 0:
- pr_info("- RX (Stopped): Reset or Stop command\n");
- break;
- case 1:
- pr_info("- RX (Running): Fetching the Rx desc\n");
- break;
- case 2:
- pr_info("- RX (Running):Checking for end of pkt\n");
- break;
- case 3:
- pr_info("- RX (Running): Waiting for Rx pkt\n");
- break;
- case 4:
- pr_info("- RX (Suspended): Unavailable Rx buf\n");
- break;
- case 5:
- pr_info("- RX (Running): Closing Rx descriptor\n");
- break;
- case 6:
- pr_info("- RX(Running): Flushing the current frame"
- " from the Rx buf\n");
- break;
- case 7:
- pr_info("- RX (Running): Queuing the Rx frame"
- " from the Rx buf into memory\n");
- break;
- default:
- break;
- }
- return;
-}
-#endif
-
/**
* stmmac_tx:
* @priv: private driver structure
@@ -748,16 +618,16 @@ static void stmmac_tx(struct stmmac_priv *priv)
struct dma_desc *p = priv->dma_tx + entry;
/* Check if the descriptor is owned by the DMA. */
- if (priv->mac_type->ops->get_tx_owner(p))
+ if (priv->hw->desc->get_tx_owner(p))
break;
/* Verify tx error by looking at the last segment */
- last = priv->mac_type->ops->get_tx_ls(p);
+ last = priv->hw->desc->get_tx_ls(p);
if (likely(last)) {
int tx_error =
- priv->mac_type->ops->tx_status(&priv->dev->stats,
- &priv->xstats,
- p, ioaddr);
+ priv->hw->desc->tx_status(&priv->dev->stats,
+ &priv->xstats, p,
+ ioaddr);
if (likely(tx_error == 0)) {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
@@ -769,7 +639,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
if (likely(p->des2))
dma_unmap_single(priv->device, p->des2,
- priv->mac_type->ops->get_tx_len(p),
+ priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
if (unlikely(p->des3))
p->des3 = 0;
@@ -790,7 +660,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->tx_skbuff[entry] = NULL;
}
- priv->mac_type->ops->release_tx_desc(p);
+ priv->hw->desc->release_tx_desc(p);
entry = (++priv->dirty_tx) % txsize;
}
@@ -814,7 +684,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
priv->tm->timer_start(tmrate);
else
#endif
- writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
+ priv->hw->dma->enable_dma_irq(priv->dev->base_addr);
}
static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -824,7 +694,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
priv->tm->timer_stop();
else
#endif
- writel(0, priv->dev->base_addr + DMA_INTR_ENA);
+ priv->hw->dma->disable_dma_irq(priv->dev->base_addr);
}
static int stmmac_has_work(struct stmmac_priv *priv)
@@ -832,7 +702,7 @@ static int stmmac_has_work(struct stmmac_priv *priv)
unsigned int has_work = 0;
int rxret, tx_work = 0;
- rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx +
+ rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
(priv->cur_rx % priv->dma_rx_size));
if (priv->dirty_tx != priv->cur_tx)
@@ -883,12 +753,12 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
{
netif_stop_queue(priv->dev);
- stmmac_dma_stop_tx(priv->dev->base_addr);
+ priv->hw->dma->stop_tx(priv->dev->base_addr);
dma_free_tx_skbufs(priv);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
priv->dirty_tx = 0;
priv->cur_tx = 0;
- stmmac_dma_start_tx(priv->dev->base_addr);
+ priv->hw->dma->start_tx(priv->dev->base_addr);
priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev);
@@ -896,95 +766,27 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
return;
}
-/**
- * stmmac_dma_interrupt - Interrupt handler for the driver
- * @dev: net device structure
- * Description: Interrupt handler for the driver (DMA).
- */
-static void stmmac_dma_interrupt(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- struct stmmac_priv *priv = netdev_priv(dev);
- /* read the status register (CSR5) */
- u32 intr_status = readl(ioaddr + DMA_STATUS);
-
- DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
-#ifdef STMMAC_DEBUG
- /* It displays the DMA transmit process state (CSR5 register) */
- if (netif_msg_tx_done(priv))
- show_tx_process_state(intr_status);
- if (netif_msg_rx_status(priv))
- show_rx_process_state(intr_status);
-#endif
- /* ABNORMAL interrupts */
- if (unlikely(intr_status & DMA_STATUS_AIS)) {
- DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
- if (unlikely(intr_status & DMA_STATUS_UNF)) {
- DBG(intr, INFO, "transmit underflow\n");
- if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
- /* Try to bump up the threshold */
- tc += 64;
- priv->mac_type->ops->dma_mode(ioaddr, tc,
- SF_DMA_MODE);
- priv->xstats.threshold = tc;
- }
- stmmac_tx_err(priv);
- priv->xstats.tx_undeflow_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_TJT)) {
- DBG(intr, INFO, "transmit jabber\n");
- priv->xstats.tx_jabber_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_OVF)) {
- DBG(intr, INFO, "recv overflow\n");
- priv->xstats.rx_overflow_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RU)) {
- DBG(intr, INFO, "receive buffer unavailable\n");
- priv->xstats.rx_buf_unav_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RPS)) {
- DBG(intr, INFO, "receive process stopped\n");
- priv->xstats.rx_process_stopped_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RWT)) {
- DBG(intr, INFO, "receive watchdog\n");
- priv->xstats.rx_watchdog_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_ETI)) {
- DBG(intr, INFO, "transmit early interrupt\n");
- priv->xstats.tx_early_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_TPS)) {
- DBG(intr, INFO, "transmit process stopped\n");
- priv->xstats.tx_process_stopped_irq++;
- stmmac_tx_err(priv);
- }
- if (unlikely(intr_status & DMA_STATUS_FBI)) {
- DBG(intr, INFO, "fatal bus error\n");
- priv->xstats.fatal_bus_error_irq++;
- stmmac_tx_err(priv);
+static void stmmac_dma_interrupt(struct stmmac_priv *priv)
+{
+ unsigned long ioaddr = priv->dev->base_addr;
+ int status;
+
+ status = priv->hw->dma->dma_interrupt(priv->dev->base_addr,
+ &priv->xstats);
+ if (likely(status == handle_tx_rx))
+ _stmmac_schedule(priv);
+
+ else if (unlikely(status == tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
+ tc += 64;
+ priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE);
+ priv->xstats.threshold = tc;
}
- }
-
- /* TX/RX NORMAL interrupts */
- if (intr_status & DMA_STATUS_NIS) {
- priv->xstats.normal_irq_n++;
- if (likely((intr_status & DMA_STATUS_RI) ||
- (intr_status & (DMA_STATUS_TI))))
- _stmmac_schedule(priv);
- }
-
- /* Optional hardware blocks, interrupts should be disabled */
- if (unlikely(intr_status &
- (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
- pr_info("%s: unexpected status %08x\n", __func__, intr_status);
-
- /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
- writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
-
- DBG(intr, INFO, "\n\n");
+ stmmac_tx_err(priv);
+ } else if (unlikely(status == tx_hard_error))
+ stmmac_tx_err(priv);
return;
}
@@ -1058,17 +860,20 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->mac_type->ops->dma_init(ioaddr,
- priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) {
+ if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy,
+ priv->dma_rx_phy) < 0)) {
pr_err("%s: DMA initialization failed\n", __func__);
return -1;
}
/* Copy the MAC addr into the HW */
- priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ /* If required, perform hw setup of the bus. */
+ if (priv->bus_setup)
+ priv->bus_setup(ioaddr);
/* Initialize the MAC Core */
- priv->mac_type->ops->core_init(ioaddr);
+ priv->hw->mac->core_init(ioaddr);
priv->shutdown = 0;
@@ -1089,16 +894,16 @@ static int stmmac_open(struct net_device *dev)
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
- stmmac_dma_start_tx(ioaddr);
- stmmac_dma_start_rx(ioaddr);
+ priv->hw->dma->start_tx(ioaddr);
+ priv->hw->dma->start_rx(ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
#endif
/* Dump DMA/MAC registers */
if (netif_msg_hw(priv)) {
- priv->mac_type->ops->dump_mac_regs(ioaddr);
- priv->mac_type->ops->dump_dma_regs(ioaddr);
+ priv->hw->mac->dump_regs(ioaddr);
+ priv->hw->dma->dump_regs(ioaddr);
}
if (priv->phydev)
@@ -1142,8 +947,8 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
- stmmac_dma_stop_tx(dev->base_addr);
- stmmac_dma_stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(dev->base_addr);
+ priv->hw->dma->stop_rx(dev->base_addr);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
@@ -1214,8 +1019,8 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
desc->des2 = dma_map_single(priv->device, skb->data,
BUF_SIZE_8KiB, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
+ csum_insertion);
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -1224,16 +1029,16 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
skb->data + BUF_SIZE_8KiB,
buf2_size, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 0,
- buf2_size, csum_insertion);
- priv->mac_type->ops->set_tx_owner(desc);
+ priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
+ csum_insertion);
+ priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
}
return entry;
}
@@ -1301,8 +1106,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nopaged_len = skb_headlen(skb);
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
- priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
}
for (i = 0; i < nfrags; i++) {
@@ -1317,21 +1122,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
frag->page_offset,
len, DMA_TO_DEVICE);
priv->tx_skbuff[entry] = NULL;
- priv->mac_type->ops->prepare_tx_desc(desc, 0, len,
- csum_insertion);
- priv->mac_type->ops->set_tx_owner(desc);
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ priv->hw->desc->set_tx_owner(desc);
}
/* Interrupt on completition only for the latest segment */
- priv->mac_type->ops->close_tx_desc(desc);
+ priv->hw->desc->close_tx_desc(desc);
#ifdef CONFIG_STMMAC_TIMER
/* Clean IC while using timer */
if (likely(priv->tm->enable))
- priv->mac_type->ops->clear_tx_ic(desc);
+ priv->hw->desc->clear_tx_ic(desc);
#endif
/* To avoid raise condition */
- priv->mac_type->ops->set_tx_owner(first);
+ priv->hw->desc->set_tx_owner(first);
priv->cur_tx++;
@@ -1353,8 +1157,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- /* CSR1 enables the transmit DMA to check for new descriptor */
- writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
+ priv->hw->dma->enable_dma_transmission(dev->base_addr);
return NETDEV_TX_OK;
}
@@ -1391,7 +1194,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
}
- priv->mac_type->ops->set_rx_owner(p + entry);
+ priv->hw->desc->set_rx_owner(p + entry);
}
return;
}
@@ -1412,7 +1215,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
}
#endif
count = 0;
- while (!priv->mac_type->ops->get_rx_owner(p)) {
+ while (!priv->hw->desc->get_rx_owner(p)) {
int status;
if (count >= limit)
@@ -1425,15 +1228,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
prefetch(p_next);
/* read the status of the incoming frame */
- status = (priv->mac_type->ops->rx_status(&priv->dev->stats,
- &priv->xstats, p));
+ status = (priv->hw->desc->rx_status(&priv->dev->stats,
+ &priv->xstats, p));
if (unlikely(status == discard_frame))
priv->dev->stats.rx_errors++;
else {
struct sk_buff *skb;
/* Length should omit the CRC */
- int frame_len =
- priv->mac_type->ops->get_rx_frame_len(p) - 4;
+ int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
#ifdef STMMAC_RX_DEBUG
if (frame_len > ETH_FRAME_LEN)
@@ -1569,7 +1371,7 @@ static void stmmac_multicast_list(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
spin_lock(&priv->lock);
- priv->mac_type->ops->set_filter(dev);
+ priv->hw->mac->set_filter(dev);
spin_unlock(&priv->lock);
return;
}
@@ -1623,9 +1425,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (priv->is_gmac) {
unsigned long ioaddr = dev->base_addr;
/* To handle GMAC own interrupts */
- priv->mac_type->ops->host_irq_status(ioaddr);
+ priv->hw->mac->host_irq_status(ioaddr);
}
- stmmac_dma_interrupt(dev);
+
+ stmmac_dma_interrupt(priv);
return IRQ_HANDLED;
}
@@ -1744,7 +1547,7 @@ static int stmmac_probe(struct net_device *dev)
netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
/* Get the MAC address */
- priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
+ priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
if (!is_valid_ether_addr(dev->dev_addr))
pr_warning("\tno valid MAC address;"
@@ -1779,16 +1582,16 @@ static int stmmac_mac_device_setup(struct net_device *dev)
struct mac_device_info *device;
if (priv->is_gmac)
- device = gmac_setup(ioaddr);
+ device = dwmac1000_setup(ioaddr);
else
- device = mac100_setup(ioaddr);
+ device = dwmac100_setup(ioaddr);
if (!device)
return -ENOMEM;
- priv->mac_type = device;
+ priv->hw = device;
- priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */
+ priv->wolenabled = priv->hw->pmt; /* PMT supported */
if (priv->wolenabled == PMT_SUPPORTED)
priv->wolopts = WAKE_MAGIC; /* Magic Frame */
@@ -1797,8 +1600,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
static int stmmacphy_dvr_probe(struct platform_device *pdev)
{
- struct plat_stmmacphy_data *plat_dat;
- plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
+ struct plat_stmmacphy_data *plat_dat = pdev->dev.platform_data;
pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
plat_dat->bus_id);
@@ -1830,9 +1632,7 @@ static struct platform_driver stmmacphy_driver = {
static int stmmac_associate_phy(struct device *dev, void *data)
{
struct stmmac_priv *priv = (struct stmmac_priv *)data;
- struct plat_stmmacphy_data *plat_dat;
-
- plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
+ struct plat_stmmacphy_data *plat_dat = dev->platform_data;
DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
plat_dat->bus_id);
@@ -1922,7 +1722,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->device = &(pdev->dev);
priv->dev = ndev;
- plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data);
+ plat_dat = pdev->dev.platform_data;
priv->bus_id = plat_dat->bus_id;
priv->pbl = plat_dat->pbl; /* TLI */
priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
@@ -1932,6 +1732,11 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
/* Set the I/O base addr */
ndev->base_addr = (unsigned long)addr;
+ /* Verify embedded resource for the platform */
+ ret = stmmac_claim_resource(pdev);
+ if (ret < 0)
+ goto out;
+
/* MAC HW revice detection */
ret = stmmac_mac_device_setup(ndev);
if (ret < 0)
@@ -1952,6 +1757,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
}
priv->fix_mac_speed = plat_dat->fix_mac_speed;
+ priv->bus_setup = plat_dat->bus_setup;
priv->bsp_priv = plat_dat->bsp_priv;
pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
@@ -1986,12 +1792,13 @@ out:
static int stmmac_dvr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct resource *res;
pr_info("%s:\n\tremoving driver", __func__);
- stmmac_dma_stop_rx(ndev->base_addr);
- stmmac_dma_stop_tx(ndev->base_addr);
+ priv->hw->dma->stop_rx(ndev->base_addr);
+ priv->hw->dma->stop_tx(ndev->base_addr);
stmmac_mac_disable_rx(ndev->base_addr);
stmmac_mac_disable_tx(ndev->base_addr);
@@ -2038,21 +1845,20 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
napi_disable(&priv->napi);
/* Stop TX/RX DMA */
- stmmac_dma_stop_tx(dev->base_addr);
- stmmac_dma_stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(dev->base_addr);
+ priv->hw->dma->stop_rx(dev->base_addr);
/* Clear the Rx/Tx descriptors */
- priv->mac_type->ops->init_rx_desc(priv->dma_rx,
- priv->dma_rx_size, dis_ic);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx,
- priv->dma_tx_size);
+ priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
+ dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
stmmac_mac_disable_tx(dev->base_addr);
if (device_may_wakeup(&(pdev->dev))) {
/* Enable Power down mode by programming the PMT regs */
if (priv->wolenabled == PMT_SUPPORTED)
- priv->mac_type->ops->pmt(dev->base_addr,
- priv->wolopts);
+ priv->hw->mac->pmt(dev->base_addr,
+ priv->wolopts);
} else {
stmmac_mac_disable_rx(dev->base_addr);
}
@@ -2093,15 +1899,15 @@ static int stmmac_resume(struct platform_device *pdev)
* from another devices (e.g. serial console). */
if (device_may_wakeup(&(pdev->dev)))
if (priv->wolenabled == PMT_SUPPORTED)
- priv->mac_type->ops->pmt(dev->base_addr, 0);
+ priv->hw->mac->pmt(dev->base_addr, 0);
netif_device_attach(dev);
/* Enable the MAC and DMA */
stmmac_mac_enable_rx(ioaddr);
stmmac_mac_enable_tx(ioaddr);
- stmmac_dma_start_tx(ioaddr);
- stmmac_dma_start_rx(ioaddr);
+ priv->hw->dma->start_tx(ioaddr);
+ priv->hw->dma->start_rx(ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 8498552a22fc..fffe1d037fe6 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -24,7 +24,6 @@
Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
#include <linux/mii.h>
#include <linux/phy.h>
@@ -48,8 +47,8 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
- unsigned int mii_data = priv->mac_type->hw.mii.data;
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
int data;
u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
@@ -80,8 +79,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
- unsigned int mii_data = priv->mac_type->hw.mii.data;
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
u16 value =
(((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
@@ -112,7 +111,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
+ unsigned int mii_address = priv->hw->mii.addr;
if (priv->phy_reset) {
pr_debug("stmmac_mdio_reset: calling phy_reset\n");
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index d58e1891ca60..0c972e560cf3 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -206,7 +206,7 @@ IVc. Errata
#define USE_IO_OPS 1
#endif
-static const struct pci_device_id sundance_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b571a1babab9..b55ceb88d93f 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -107,7 +107,7 @@ MODULE_LICENSE("GPL");
#define GEM_MODULE_NAME "gem"
#define PFX GEM_MODULE_NAME ": "
-static struct pci_device_id gem_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 6762f1c6ec8a..76ccd31cbf50 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -3211,7 +3211,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
dev_set_drvdata(&pdev->dev, NULL);
}
-static struct pci_device_id happymeal_pci_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
{ } /* Terminating entry */
};
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index bc74db0d12f3..d65764ea1d83 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -1062,10 +1062,7 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
goto err_out_free_dev;
}
- printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
-
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+ printk(KERN_INFO "%s: Sun LDOM vnet %pM\n", dev->name, dev->dev_addr);
list_add(&vp->list, &vnet_list);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 75a669d48e5e..033408f589fb 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -65,7 +65,7 @@ static const struct {
{ "TOSHIBA TC35815/TX4939" },
};
-static const struct pci_device_id tc35815_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 80b404f2b938..b907bee31fd5 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -64,7 +64,7 @@
#include "tehuti.h"
-static struct pci_device_id __devinitdata bdx_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
{0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 3a74d2168598..b0630cd093a3 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -174,7 +174,7 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
-static struct pci_device_id tg3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index fabaeffb3155..613943eb6e75 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -254,7 +254,7 @@ static struct board {
{ "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
};
-static struct pci_device_id tlan_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index cf552d1d9629..b0d7db9d8bb4 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
* will be stuck with 1555 lines of hex #'s in the code.
*/
-static struct pci_device_id xl_pci_tbl[] =
+static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
{
{PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* terminate list */
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index b9db1b5a58a3..515f122777ab 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -45,7 +45,7 @@ static char version[] __devinitdata =
#define ABYSS_IO_EXTENT 64
-static struct pci_device_id abyss_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
{ } /* Terminating entry */
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index d6ccd59c7d07..3f9d5a25562e 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -146,7 +146,7 @@
static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
" v0.5.3 11/13/02 - Kent Yoder";
-static struct pci_device_id streamer_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
{} /* terminating entry */
};
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index df32025c5132..f010a4dc5f19 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -172,7 +172,7 @@ module_param_array(message_level, int, NULL, 0) ;
static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
module_param_array(network_monitor, int, NULL, 0);
-static struct pci_device_id olympic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
{PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
{ } /* Terminating Entry */
};
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index f92fe86fdcae..d4c7c0c0a3d6 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -57,7 +57,7 @@ static struct card_info card_info_table[] = {
{ {0x03, 0x01}, "3Com Token Link Velocity"},
};
-static struct pci_device_id tmspci_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index 1cc8cf4425d1..516713fa0a05 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -101,6 +101,10 @@ config TULIP_NAPI_HW_MITIGATION
If in doubt, say Y.
+config TULIP_DM910X
+ def_bool y
+ depends on TULIP && SPARC
+
config DE4X5
tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
depends on PCI || EISA
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d4255d44cb75..87ea39e2037d 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -337,7 +337,7 @@ static void de21041_media_timer (unsigned long data);
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
-static struct pci_device_id de_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index ad63621913c3..2d9f09c6189e 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -92,6 +92,10 @@
#include <asm/uaccess.h>
#include <asm/irq.h>
+#ifdef CONFIG_TULIP_DM910X
+#include <linux/of.h>
+#endif
+
/* Board/System/Debug information/definition ---------------- */
#define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */
@@ -377,6 +381,23 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
if (!printed_version++)
printk(version);
+ /*
+ * SPARC on-board DM910x chips should be handled by the main
+ * tulip driver, except for early DM9100s.
+ */
+#ifdef CONFIG_TULIP_DM910X
+ if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) ||
+ ent->driver_data == PCI_DM9102_ID) {
+ struct device_node *dp = pci_device_to_OF_node(pdev);
+
+ if (dp && of_get_property(dp, "local-mac-address", NULL)) {
+ printk(KERN_INFO DRV_NAME
+ ": skipping on-board DM910x (use tulip)\n");
+ return -ENODEV;
+ }
+ }
+#endif
+
/* Init network device */
dev = alloc_etherdev(sizeof(*db));
if (dev == NULL)
@@ -2068,7 +2089,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
-static struct pci_device_id dmfe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 0fa3140d65bf..cbfdd9fbe005 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -196,9 +196,13 @@ struct tulip_chip_table tulip_tbl[] = {
| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
/* DM910X */
+#ifdef CONFIG_TULIP_DM910X
{ "Davicom DM9102/DM9102A", 128, 0x0001ebef,
HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
tulip_timer, tulip_media_task },
+#else
+ { NULL },
+#endif
/* RS7112 */
{ "Conexant LANfinity", 256, 0x0001ebef,
@@ -207,7 +211,7 @@ struct tulip_chip_table tulip_tbl[] = {
};
-static struct pci_device_id tulip_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
@@ -228,8 +232,10 @@ static struct pci_device_id tulip_pci_tbl[] = {
{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
+#ifdef CONFIG_TULIP_DM910X
{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+#endif
{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
@@ -1299,18 +1305,30 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
}
/*
- * Early DM9100's need software CRC and the DMFE driver
+ * DM910x chips should be handled by the dmfe driver, except
+ * on-board chips on SPARC systems. Also, early DM9100s need
+ * software CRC which only the dmfe driver supports.
*/
- if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
- {
- /* Read Chip revision */
- if (pdev->revision < 0x30)
- {
- printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+#ifdef CONFIG_TULIP_DM910X
+ if (chip_idx == DM910X) {
+ struct device_node *dp;
+
+ if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
+ pdev->revision < 0x30) {
+ printk(KERN_INFO PFX
+ "skipping early DM9100 with Crc bug (use dmfe)\n");
+ return -ENODEV;
+ }
+
+ dp = pci_device_to_OF_node(pdev);
+ if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
+ printk(KERN_INFO PFX
+ "skipping DM910x expansion card (use dmfe)\n");
return -ENODEV;
}
}
+#endif
/*
* Looks for early PCI chipsets where people report hangs
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index fa019cabc355..d549042a01df 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1783,7 +1783,7 @@ static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
}
-static struct pci_device_id uli526x_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = {
{ 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
{ 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
{ 0, }
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 869a7a0005f9..23395e1ff238 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -218,7 +218,7 @@ enum chip_capability_flags {
CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
};
-static const struct pci_device_id w840_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = {
{ 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
{ 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 9924c4c7e2d6..c84123fd635c 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -144,7 +144,7 @@ static int link_status(struct xircom_private *card);
-static struct pci_device_id xircom_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
{0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
{0,},
};
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 39f1fc650be6..507338afc96c 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -215,7 +215,7 @@ static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
* bit 8 indicates if this is a (0) copper or (1) fiber card
* bits 12-16 indicate card type: (0) client and (1) server
*/
-static struct pci_device_id typhoon_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
@@ -2113,7 +2113,7 @@ typhoon_tx_timeout(struct net_device *dev)
if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
printk(KERN_WARNING "%s: could not reset in tx timeout\n",
dev->name);
- goto truely_dead;
+ goto truly_dead;
}
/* If we ever start using the Hi ring, it will need cleaning too */
@@ -2123,13 +2123,13 @@ typhoon_tx_timeout(struct net_device *dev)
if(typhoon_start_runtime(tp) < 0) {
printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
dev->name);
- goto truely_dead;
+ goto truly_dead;
}
netif_wake_queue(dev);
return;
-truely_dead:
+truly_dead:
/* Reset the hardware, and turn off carrier to avoid more timeouts */
typhoon_reset(tp->ioaddr, NoWait);
netif_carrier_off(dev);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 41ad2f3697c7..96bdc0b43889 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3607,6 +3607,7 @@ static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state)
if (!netif_running(ndev))
return 0;
+ netif_device_detach(ndev);
napi_disable(&ugeth->napi);
/*
@@ -3665,7 +3666,7 @@ static int ucc_geth_resume(struct of_device *ofdev)
phy_start(ugeth->phydev);
napi_enable(&ugeth->napi);
- netif_start_queue(ndev);
+ netif_device_attach(ndev);
return 0;
}
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index a007e2acf651..ef1fbeb11c6e 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -838,13 +838,13 @@ struct ucc_geth_hardware_statistics {
using the maximum is
easier */
#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
-#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */
+#define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */
#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64
#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
-#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This
+#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This
is a
guess
*/
@@ -899,16 +899,17 @@ struct ucc_geth_hardware_statistics {
#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
*/
#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
-#define UCC_GETH_UTFTT_INIT 128
+#define UCC_GETH_UTFTT_INIT 512
/* Gigabit Ethernet (1000 Mbps) */
#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
FIFO size */
#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
-#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual
+#define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual
+ FIFO size */
+#define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */
+#define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual
FIFO size */
-#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
-#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
#define UCC_GETH_REMODER_INIT 0 /* bits that must be
set */
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 22b87e64a810..7d3fa06980c1 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -897,11 +897,9 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
f5u011_rxmode(catc, catc->rxmode);
}
dbg("Init done.");
- printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, ",
+ printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
- usbdev->bus->bus_name, usbdev->devpath);
- for (i = 0; i < 5; i++) printk("%2.2x:", netdev->dev_addr[i]);
- printk("%2.2x.\n", netdev->dev_addr[i]);
+ usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
usb_set_intfdata(intf, catc);
SET_NETDEV_DEV(netdev, &intf->dev);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index f78f0903b073..6895f1531238 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -286,6 +286,7 @@ struct hso_device {
u8 usb_gone;
struct work_struct async_get_intf;
struct work_struct async_put_intf;
+ struct work_struct reset_device;
struct usb_device *usb;
struct usb_interface *interface;
@@ -332,7 +333,8 @@ static void hso_kick_transmit(struct hso_serial *serial);
/* Helper functions */
static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int,
struct usb_device *usb, gfp_t gfp);
-static void log_usb_status(int status, const char *function);
+static void handle_usb_error(int status, const char *function,
+ struct hso_device *hso_dev);
static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf,
int type, int dir);
static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports);
@@ -350,6 +352,7 @@ static void async_put_intf(struct work_struct *data);
static int hso_put_activity(struct hso_device *hso_dev);
static int hso_get_activity(struct hso_device *hso_dev);
static void tiocmget_intr_callback(struct urb *urb);
+static void reset_device(struct work_struct *data);
/*****************************************************************************/
/* Helping functions */
/*****************************************************************************/
@@ -461,10 +464,17 @@ static const struct usb_device_id hso_ids[] = {
{USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */
{USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */
{USB_DEVICE(0x0af0, 0x7701)},
+ {USB_DEVICE(0x0af0, 0x7706)},
{USB_DEVICE(0x0af0, 0x7801)},
{USB_DEVICE(0x0af0, 0x7901)},
+ {USB_DEVICE(0x0af0, 0x7A01)},
+ {USB_DEVICE(0x0af0, 0x7A05)},
{USB_DEVICE(0x0af0, 0x8200)},
{USB_DEVICE(0x0af0, 0x8201)},
+ {USB_DEVICE(0x0af0, 0x8300)},
+ {USB_DEVICE(0x0af0, 0x8302)},
+ {USB_DEVICE(0x0af0, 0x8304)},
+ {USB_DEVICE(0x0af0, 0x8400)},
{USB_DEVICE(0x0af0, 0xd035)},
{USB_DEVICE(0x0af0, 0xd055)},
{USB_DEVICE(0x0af0, 0xd155)},
@@ -473,6 +483,8 @@ static const struct usb_device_id hso_ids[] = {
{USB_DEVICE(0x0af0, 0xd157)},
{USB_DEVICE(0x0af0, 0xd257)},
{USB_DEVICE(0x0af0, 0xd357)},
+ {USB_DEVICE(0x0af0, 0xd058)},
+ {USB_DEVICE(0x0af0, 0xc100)},
{}
};
MODULE_DEVICE_TABLE(usb, hso_ids);
@@ -655,8 +667,8 @@ static void set_serial_by_index(unsigned index, struct hso_serial *serial)
spin_unlock_irqrestore(&serial_table_lock, flags);
}
-/* log a meaningful explanation of an USB status */
-static void log_usb_status(int status, const char *function)
+static void handle_usb_error(int status, const char *function,
+ struct hso_device *hso_dev)
{
char *explanation;
@@ -685,10 +697,20 @@ static void log_usb_status(int status, const char *function)
case -EMSGSIZE:
explanation = "internal error";
break;
+ case -EILSEQ:
+ case -EPROTO:
+ case -ETIME:
+ case -ETIMEDOUT:
+ explanation = "protocol error";
+ if (hso_dev)
+ schedule_work(&hso_dev->reset_device);
+ break;
default:
explanation = "unknown status";
break;
}
+
+ /* log a meaningful explanation of an USB status */
D1("%s: received USB status - %s (%d)", function, explanation, status);
}
@@ -762,7 +784,7 @@ static void write_bulk_callback(struct urb *urb)
/* log status, but don't act on it, we don't need to resubmit anything
* anyhow */
if (status)
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, odev->parent);
hso_put_activity(odev->parent);
@@ -806,7 +828,7 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC);
if (result) {
dev_warn(&odev->parent->interface->dev,
- "failed mux_bulk_tx_urb %d", result);
+ "failed mux_bulk_tx_urb %d\n", result);
net->stats.tx_errors++;
netif_start_queue(net);
} else {
@@ -998,7 +1020,7 @@ static void read_bulk_callback(struct urb *urb)
/* is al ok? (Filip: Who's Al ?) */
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, odev->parent);
return;
}
@@ -1019,7 +1041,8 @@ static void read_bulk_callback(struct urb *urb)
if (odev->parent->port_spec & HSO_INFO_CRC_BUG) {
u32 rest;
u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
- rest = urb->actual_length % odev->in_endp->wMaxPacketSize;
+ rest = urb->actual_length %
+ le16_to_cpu(odev->in_endp->wMaxPacketSize);
if (((rest == 5) || (rest == 6)) &&
!memcmp(((u8 *) urb->transfer_buffer) +
urb->actual_length - 4, crc_check, 4)) {
@@ -1053,7 +1076,7 @@ static void read_bulk_callback(struct urb *urb)
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_warn(&odev->parent->interface->dev,
- "%s failed submit mux_bulk_rx_urb %d", __func__,
+ "%s failed submit mux_bulk_rx_urb %d\n", __func__,
result);
}
@@ -1207,7 +1230,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
D1("serial == NULL");
return;
} else if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
return;
}
@@ -1225,7 +1248,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
rest =
urb->actual_length %
- serial->in_endp->wMaxPacketSize;
+ le16_to_cpu(serial->in_endp->wMaxPacketSize);
if (((rest == 5) || (rest == 6)) &&
!memcmp(((u8 *) urb->transfer_buffer) +
urb->actual_length - 4, crc_check, 4)) {
@@ -1513,7 +1536,7 @@ static void tiocmget_intr_callback(struct urb *urb)
if (!serial)
return;
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
return;
}
tiocmget = serial->tiocmget;
@@ -1700,6 +1723,10 @@ static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file,
D1("no tty structures");
return -EINVAL;
}
+
+ if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM)
+ return -EINVAL;
+
if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -1838,7 +1865,7 @@ static int mux_device_request(struct hso_serial *serial, u8 type, u16 port,
result = usb_submit_urb(ctrl_urb, GFP_ATOMIC);
if (result) {
dev_err(&ctrl_urb->dev->dev,
- "%s failed submit ctrl_urb %d type %d", __func__,
+ "%s failed submit ctrl_urb %d type %d\n", __func__,
result, type);
return result;
}
@@ -1888,7 +1915,7 @@ static void intr_callback(struct urb *urb)
/* status check */
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, NULL);
return;
}
D4("\n--- Got intr callback 0x%02X ---", status);
@@ -1905,18 +1932,18 @@ static void intr_callback(struct urb *urb)
if (serial != NULL) {
D1("Pending read interrupt on port %d\n", i);
spin_lock(&serial->serial_lock);
- if (serial->rx_state == RX_IDLE) {
+ if (serial->rx_state == RX_IDLE &&
+ serial->open_count > 0) {
/* Setup and send a ctrl req read on
* port i */
- if (!serial->rx_urb_filled[0]) {
+ if (!serial->rx_urb_filled[0]) {
serial->rx_state = RX_SENT;
hso_mux_serial_read(serial);
} else
serial->rx_state = RX_PENDING;
-
} else {
- D1("Already pending a read on "
- "port %d\n", i);
+ D1("Already a read pending on "
+ "port %d or port not open\n", i);
}
spin_unlock(&serial->serial_lock);
}
@@ -1958,7 +1985,7 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
tty = tty_kref_get(serial->tty);
spin_unlock(&serial->serial_lock);
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
tty_kref_put(tty);
return;
}
@@ -2014,7 +2041,7 @@ static void ctrl_callback(struct urb *urb)
tty = tty_kref_get(serial->tty);
spin_unlock(&serial->serial_lock);
if (status) {
- log_usb_status(status, __func__);
+ handle_usb_error(status, __func__, serial->parent);
tty_kref_put(tty);
return;
}
@@ -2358,12 +2385,12 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
serial->tx_data_length = tx_size;
serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL);
if (!serial->tx_data) {
- dev_err(dev, "%s - Out of memory", __func__);
+ dev_err(dev, "%s - Out of memory\n", __func__);
goto exit;
}
serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL);
if (!serial->tx_buffer) {
- dev_err(dev, "%s - Out of memory", __func__);
+ dev_err(dev, "%s - Out of memory\n", __func__);
goto exit;
}
@@ -2391,6 +2418,7 @@ static struct hso_device *hso_create_device(struct usb_interface *intf,
INIT_WORK(&hso_dev->async_get_intf, async_get_intf);
INIT_WORK(&hso_dev->async_put_intf, async_put_intf);
+ INIT_WORK(&hso_dev->reset_device, reset_device);
return hso_dev;
}
@@ -2831,13 +2859,14 @@ struct hso_shared_int *hso_create_shared_int(struct usb_interface *interface)
mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!mux->shared_intr_urb) {
- dev_err(&interface->dev, "Could not allocate intr urb?");
+ dev_err(&interface->dev, "Could not allocate intr urb?\n");
goto exit;
}
- mux->shared_intr_buf = kzalloc(mux->intr_endp->wMaxPacketSize,
- GFP_KERNEL);
+ mux->shared_intr_buf =
+ kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize),
+ GFP_KERNEL);
if (!mux->shared_intr_buf) {
- dev_err(&interface->dev, "Could not allocate intr buf?");
+ dev_err(&interface->dev, "Could not allocate intr buf?\n");
goto exit;
}
@@ -3132,6 +3161,26 @@ out:
return result;
}
+static void reset_device(struct work_struct *data)
+{
+ struct hso_device *hso_dev =
+ container_of(data, struct hso_device, reset_device);
+ struct usb_device *usb = hso_dev->usb;
+ int result;
+
+ if (hso_dev->usb_gone) {
+ D1("No reset during disconnect\n");
+ } else {
+ result = usb_lock_device_for_reset(usb, hso_dev->interface);
+ if (result < 0)
+ D1("unable to lock device for reset: %d\n", result);
+ else {
+ usb_reset_device(usb);
+ usb_unlock_device(usb);
+ }
+ }
+}
+
static void hso_serial_ref_free(struct kref *ref)
{
struct hso_device *hso_dev = container_of(ref, struct hso_device, ref);
@@ -3232,13 +3281,13 @@ static int hso_mux_submit_intr_urb(struct hso_shared_int *shared_int,
usb_rcvintpipe(usb,
shared_int->intr_endp->bEndpointAddress & 0x7F),
shared_int->shared_intr_buf,
- shared_int->intr_endp->wMaxPacketSize,
+ 1,
intr_callback, shared_int,
shared_int->intr_endp->bInterval);
result = usb_submit_urb(shared_int->shared_intr_urb, gfp);
if (result)
- dev_warn(&usb->dev, "%s failed mux_intr_urb %d", __func__,
+ dev_warn(&usb->dev, "%s failed mux_intr_urb %d\n", __func__,
result);
return result;
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index f14d225404da..21ac103fbb71 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -270,7 +270,7 @@ static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg)
get_registers(dev, PHYCNT, 1, data);
} while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
- if (i < MII_TIMEOUT) {
+ if (i <= MII_TIMEOUT) {
get_registers(dev, PHYDAT, 2, data);
*reg = data[0] | (data[1] << 8);
return 0;
@@ -295,7 +295,7 @@ static int write_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 reg)
get_registers(dev, PHYCNT, 1, data);
} while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT));
- if (i < MII_TIMEOUT)
+ if (i <= MII_TIMEOUT)
return 0;
else
return 1;
@@ -313,20 +313,17 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
{
struct sockaddr *addr = p;
rtl8150_t *dev = netdev_priv(netdev);
- int i;
if (netif_running(netdev))
return -EBUSY;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- dbg("%s: Setting MAC address to ", netdev->name);
- for (i = 0; i < 5; i++)
- dbg("%02X:", netdev->dev_addr[i]);
- dbg("%02X\n", netdev->dev_addr[i]);
+ dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr);
/* Set the IDR registers. */
set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
#ifdef EEPROM_WRITE
{
+ int i;
u8 cr;
/* Get the CR contents. */
get_registers(dev, CR, 1, &cr);
diff --git a/drivers/net/vbus-enet.c b/drivers/net/vbus-enet.c
new file mode 100644
index 000000000000..94b86d482cee
--- /dev/null
+++ b/drivers/net/vbus-enet.c
@@ -0,0 +1,1560 @@
+/*
+ * vbus_enet - A virtualized 802.x network device based on the VBUS interface
+ *
+ * Copyright (C) 2009 Novell, Gregory Haskins <ghaskins@novell.com>
+ *
+ * Derived from the SNULL example from the book "Linux Device Drivers" by
+ * Alessandro Rubini, Jonathan Corbet, and Greg Kroah-Hartman, published
+ * by O'Reilly & Associates.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+
+#include <linux/in.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ioq.h>
+#include <linux/vbus_driver.h>
+
+#include <linux/in6.h>
+#include <asm/checksum.h>
+
+#include <linux/venet.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("virtual-ethernet");
+MODULE_VERSION("1");
+
+static int rx_ringlen = 256;
+module_param(rx_ringlen, int, 0444);
+static int tx_ringlen = 256;
+module_param(tx_ringlen, int, 0444);
+static int sg_enabled = 1;
+module_param(sg_enabled, int, 0444);
+
+#define PDEBUG(_dev, fmt, args...) dev_dbg(&(_dev)->dev, fmt, ## args)
+
+#define SG_DESC_SIZE VSG_DESC_SIZE(MAX_SKB_FRAGS)
+
+struct vbus_enet_queue {
+ struct ioq *queue;
+ struct ioq_notifier notifier;
+ unsigned long count;
+};
+
+struct vbus_enet_priv {
+ spinlock_t lock;
+ struct net_device *dev;
+ struct vbus_device_proxy *vdev;
+ struct napi_struct napi;
+ struct vbus_enet_queue rxq;
+ struct {
+ struct vbus_enet_queue veq;
+ struct tasklet_struct task;
+ struct sk_buff_head outstanding;
+ } tx;
+ bool sg;
+ struct {
+ bool enabled;
+ char *pool;
+ } pmtd; /* pre-mapped transmit descriptors */
+ struct {
+ bool enabled;
+ bool linkstate;
+ bool txc;
+ unsigned long evsize;
+ struct vbus_enet_queue veq;
+ struct tasklet_struct task;
+ char *pool;
+ } evq;
+ struct {
+ bool available;
+ char *pool;
+ struct vbus_enet_queue pageq;
+ } l4ro;
+
+ struct sk_buff *(*import)(struct vbus_enet_priv *priv,
+ struct ioq_ring_desc *desc);
+};
+
+static void vbus_enet_tx_reap(struct vbus_enet_priv *priv);
+
+static struct vbus_enet_priv *
+napi_to_priv(struct napi_struct *napi)
+{
+ return container_of(napi, struct vbus_enet_priv, napi);
+}
+
+static int
+queue_init(struct vbus_enet_priv *priv,
+ struct vbus_enet_queue *q,
+ const char *name,
+ int qid,
+ size_t ringsize,
+ void (*func)(struct ioq_notifier *))
+{
+ struct vbus_device_proxy *dev = priv->vdev;
+ int ret;
+ char _name[64];
+
+ if (name)
+ snprintf(_name, sizeof(_name), "%s-%s", priv->dev->name, name);
+
+ ret = vbus_driver_ioq_alloc(dev, name ? _name : NULL, qid, 0,
+ ringsize, &q->queue);
+ if (ret < 0)
+ panic("ioq_alloc failed: %d\n", ret);
+
+ if (func) {
+ q->notifier.signal = func;
+ q->queue->notifier = &q->notifier;
+ }
+
+ q->count = ringsize;
+
+ return 0;
+}
+
+static int
+devcall(struct vbus_enet_priv *priv, u32 func, void *data, size_t len)
+{
+ struct vbus_device_proxy *dev = priv->vdev;
+
+ return dev->ops->call(dev, func, data, len, 0);
+}
+
+/*
+ * ---------------
+ * rx descriptors
+ * ---------------
+ */
+
+static void
+rxdesc_alloc(struct vbus_enet_priv *priv, struct ioq_ring_desc *desc, size_t len)
+{
+ struct net_device *dev = priv->dev;
+ struct sk_buff *skb;
+
+ len += ETH_HLEN;
+
+ skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
+ BUG_ON(!skb);
+
+ skb_reserve(skb, NET_IP_ALIGN); /* align IP on 16B boundary */
+
+ if (priv->l4ro.available) {
+ /*
+ * We will populate an SG descriptor initially with one
+ * IOV filled with an MTU SKB. If the packet needs to be
+ * larger than MTU, the host will grab pages out of the
+ * page-queue and populate additional IOVs
+ */
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)desc->cookie;
+ struct venet_iov *iov = &vsg->iov[0];
+
+ memset(vsg, 0, SG_DESC_SIZE);
+
+ vsg->cookie = (u64)(unsigned long)skb;
+ vsg->count = 1;
+
+ iov->ptr = (u64)__pa(skb->data);
+ iov->len = len;
+ } else {
+ desc->cookie = (u64)(unsigned long)skb;
+ desc->ptr = cpu_to_le64(__pa(skb->data));
+ desc->len = cpu_to_le64(len); /* total length */
+ }
+
+ desc->valid = 1;
+}
+
+static void
+rx_pageq_refill(struct vbus_enet_priv *priv, gfp_t gfp_mask)
+{
+ struct ioq *ioq = priv->l4ro.pageq.queue;
+ struct ioq_iterator iter;
+ int ret, added = 0;
+
+ if (ioq_full(ioq, ioq_idxtype_inuse))
+ /* nothing to do if the pageq is already fully populated */
+ return;
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_inuse, 0);
+ BUG_ON(ret < 0); /* will never fail unless seriously broken */
+
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty page
+ */
+ while (!iter.desc->sown) {
+ struct page *page = NULL;
+
+ page = alloc_page(gfp_mask);
+
+ if (!page)
+ break;
+
+ added = 1;
+ iter.desc->cookie = (u64)(unsigned long)page;
+ iter.desc->ptr = cpu_to_le64(__pa(page_address(page)));
+ iter.desc->len = cpu_to_le64(PAGE_SIZE);
+
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ if (added)
+ ioq_signal(ioq, 0);
+}
+
+static void
+rx_setup(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->rxq.queue;
+ struct ioq_iterator iter;
+ int ret;
+ int i = 0;
+
+ /*
+ * We want to iterate on the "valid" index. By default the iterator
+ * will not "autoupdate" which means it will not hypercall the host
+ * with our changes. This is good, because we are really just
+ * initializing stuff here anyway. Note that you can always manually
+ * signal the host with ioq_signal() if the autoupdate feature is not
+ * used.
+ */
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0); /* will never fail unless seriously broken */
+
+ /*
+ * Seek to the tail of the valid index (which should be our first
+ * item, since the queue is brand-new)
+ */
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty buffer and mark it valid
+ */
+ while (!iter.desc->valid) {
+ if (priv->l4ro.available) {
+ size_t offset = (i * SG_DESC_SIZE);
+ void *addr = &priv->l4ro.pool[offset];
+
+ iter.desc->ptr = cpu_to_le64(offset);
+ iter.desc->cookie = (u64)(unsigned long)addr;
+ iter.desc->len = cpu_to_le64(SG_DESC_SIZE);
+ }
+
+ rxdesc_alloc(priv, iter.desc, priv->dev->mtu);
+
+ /*
+ * This push operation will simultaneously advance the
+ * valid-head index and increment our position in the queue
+ * by one.
+ */
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+
+ i++;
+ }
+
+ if (priv->l4ro.available)
+ rx_pageq_refill(priv, GFP_KERNEL);
+}
+
+static void
+rx_rxq_teardown(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->rxq.queue;
+ struct ioq_iterator iter;
+ int ret;
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * free each valid descriptor
+ */
+ while (iter.desc->valid) {
+ struct sk_buff *skb;
+
+ if (priv->l4ro.available) {
+ struct venet_sg *vsg;
+ int i;
+
+ vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+
+ /* skip i=0, since that is the skb->data IOV */
+ for (i = 1; i < vsg->count; i++) {
+ struct venet_iov *iov = &vsg->iov[i];
+ struct page *page = (struct page *)(unsigned long)iov->ptr;
+
+ put_page(page);
+ }
+
+ skb = (struct sk_buff *)(unsigned long)vsg->cookie;
+ } else
+ skb = (struct sk_buff *)(unsigned long)iter.desc->cookie;
+
+ iter.desc->valid = 0;
+ wmb();
+
+ iter.desc->ptr = 0;
+ iter.desc->cookie = 0;
+
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ dev_kfree_skb(skb);
+ }
+}
+
+static void
+rx_l4ro_teardown(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->l4ro.pageq.queue;
+ struct ioq_iterator iter;
+ int ret;
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_inuse, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * free each valid descriptor
+ */
+ while (iter.desc->sown) {
+ struct page *page = (struct page *)(unsigned long)iter.desc->cookie;
+
+ iter.desc->valid = 0;
+ wmb();
+
+ iter.desc->ptr = 0;
+ iter.desc->cookie = 0;
+
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ put_page(page);
+ }
+
+ ioq_put(ioq);
+ kfree(priv->l4ro.pool);
+}
+
+static void
+rx_teardown(struct vbus_enet_priv *priv)
+{
+ rx_rxq_teardown(priv);
+
+ if (priv->l4ro.available)
+ rx_l4ro_teardown(priv);
+}
+
+static int
+tx_setup(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->tx.veq.queue;
+ struct ioq_iterator iter;
+ int i;
+ int ret;
+
+ if (!priv->sg)
+ /*
+ * There is nothing to do for a ring that is not using
+ * scatter-gather
+ */
+ return 0;
+
+ /* pre-allocate our descriptor pool if pmtd is enabled */
+ if (priv->pmtd.enabled) {
+ struct vbus_device_proxy *dev = priv->vdev;
+ size_t poollen = SG_DESC_SIZE * priv->tx.veq.count;
+ char *pool;
+ int shmid;
+
+ /* pmtdquery will return the shm-id to use for the pool */
+ ret = devcall(priv, VENET_FUNC_PMTDQUERY, NULL, 0);
+ BUG_ON(ret < 0);
+
+ shmid = ret;
+
+ pool = kzalloc(poollen, GFP_KERNEL | GFP_DMA);
+ if (!pool)
+ return -ENOMEM;
+
+ priv->pmtd.pool = pool;
+
+ ret = dev->ops->shm(dev, NULL, shmid, 0, pool, poollen,
+ NULL, NULL, 0);
+ BUG_ON(ret < 0);
+ }
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_set, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty SG descriptor
+ */
+ for (i = 0; i < priv->tx.veq.count; i++) {
+ struct venet_sg *vsg;
+
+ if (priv->pmtd.enabled) {
+ size_t offset = (i * SG_DESC_SIZE);
+
+ vsg = (struct venet_sg *)&priv->pmtd.pool[offset];
+ iter.desc->ptr = cpu_to_le64(offset);
+ } else {
+ vsg = kzalloc(SG_DESC_SIZE, GFP_KERNEL);
+ if (!vsg)
+ return -ENOMEM;
+
+ iter.desc->ptr = cpu_to_le64(__pa(vsg));
+ }
+
+ iter.desc->cookie = (u64)(unsigned long)vsg;
+ iter.desc->len = cpu_to_le64(SG_DESC_SIZE);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_next, 0, 0);
+ BUG_ON(ret < 0);
+ }
+
+ return 0;
+}
+
+static void
+tx_teardown(struct vbus_enet_priv *priv)
+{
+ struct ioq *ioq = priv->tx.veq.queue;
+ struct ioq_iterator iter;
+ struct sk_buff *skb;
+ int ret;
+
+ /* forcefully free all outstanding transmissions */
+ while ((skb = __skb_dequeue(&priv->tx.outstanding)))
+ dev_kfree_skb(skb);
+
+ if (!priv->sg)
+ /*
+ * There is nothing else to do for a ring that is not using
+ * scatter-gather
+ */
+ return;
+
+ if (priv->pmtd.enabled) {
+ /*
+ * PMTD mode means we only need to free the pool
+ */
+ kfree(priv->pmtd.pool);
+ return;
+ }
+
+ ret = ioq_iter_init(ioq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ /* seek to position 0 */
+ ret = ioq_iter_seek(&iter, ioq_seek_set, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * free each valid descriptor
+ */
+ while (iter.desc->cookie) {
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+
+ iter.desc->valid = 0;
+ wmb();
+
+ iter.desc->ptr = 0;
+ iter.desc->cookie = 0;
+
+ ret = ioq_iter_seek(&iter, ioq_seek_next, 0, 0);
+ BUG_ON(ret < 0);
+
+ kfree(vsg);
+ }
+}
+
+static void
+evq_teardown(struct vbus_enet_priv *priv)
+{
+ if (!priv->evq.enabled)
+ return;
+
+ ioq_put(priv->evq.veq.queue);
+ kfree(priv->evq.pool);
+}
+
+/*
+ * Open and close
+ */
+
+static int
+vbus_enet_open(struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = devcall(priv, VENET_FUNC_LINKUP, NULL, 0);
+ BUG_ON(ret < 0);
+
+ napi_enable(&priv->napi);
+
+ return 0;
+}
+
+static int
+vbus_enet_stop(struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ napi_disable(&priv->napi);
+
+ ret = devcall(priv, VENET_FUNC_LINKDOWN, NULL, 0);
+ BUG_ON(ret < 0);
+
+ return 0;
+}
+
+/*
+ * Configuration changes (passed on by ifconfig)
+ */
+static int
+vbus_enet_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP) /* can't act on a running interface */
+ return -EBUSY;
+
+ /* Don't allow changing the I/O address */
+ if (map->base_addr != dev->base_addr) {
+ dev_warn(&dev->dev, "Can't change I/O address\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* ignore other fields */
+ return 0;
+}
+
+static void
+vbus_enet_schedule_rx(struct vbus_enet_priv *priv)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable further interrupts */
+ ioq_notify_disable(priv->rxq.queue, 0);
+ __napi_schedule(&priv->napi);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int
+vbus_enet_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ dev->mtu = new_mtu;
+
+ /*
+ * FLUSHRX will cause the device to flush any outstanding
+ * RX buffers. They will appear to come in as 0 length
+ * packets which we can simply discard and replace with new_mtu
+ * buffers for the future.
+ */
+ ret = devcall(priv, VENET_FUNC_FLUSHRX, NULL, 0);
+ BUG_ON(ret < 0);
+
+ vbus_enet_schedule_rx(priv);
+
+ return 0;
+}
+
+static struct sk_buff *
+vbus_enet_l4ro_import(struct vbus_enet_priv *priv, struct ioq_ring_desc *desc)
+{
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)desc->cookie;
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long)vsg->cookie;
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+ int i;
+
+ rx_pageq_refill(priv, GFP_ATOMIC);
+
+ if (!vsg->len)
+ /*
+ * the device may send a zero-length packet when its
+ * flushing references on the ring. We can just drop
+ * these on the floor
+ */
+ goto fail;
+
+ /* advance only by the linear portion in IOV[0] */
+ skb_put(skb, vsg->iov[0].len);
+
+ /* skip i=0, since that is the skb->data IOV */
+ for (i = 1; i < vsg->count; i++) {
+ struct venet_iov *iov = &vsg->iov[i];
+ struct page *page = (struct page *)(unsigned long)iov->ptr;
+ skb_frag_t *f = &sinfo->frags[i-1];
+
+ f->page = page;
+ f->page_offset = 0;
+ f->size = iov->len;
+
+ PDEBUG(priv->dev, "SG: Importing %d byte page[%i]\n",
+ f->size, i);
+
+ skb->data_len += f->size;
+ skb->len += f->size;
+ skb->truesize += f->size;
+ sinfo->nr_frags++;
+ }
+
+ if (vsg->flags & VENET_SG_FLAG_NEEDS_CSUM
+ && !skb_partial_csum_set(skb, vsg->csum.start,
+ vsg->csum.offset)) {
+ priv->dev->stats.rx_frame_errors++;
+ goto fail;
+ }
+
+ if (vsg->flags & VENET_SG_FLAG_GSO) {
+ PDEBUG(priv->dev, "L4RO packet detected\n");
+
+ switch (vsg->gso.type) {
+ case VENET_GSO_TYPE_TCPV4:
+ sinfo->gso_type = SKB_GSO_TCPV4;
+ break;
+ case VENET_GSO_TYPE_TCPV6:
+ sinfo->gso_type = SKB_GSO_TCPV6;
+ break;
+ case VENET_GSO_TYPE_UDP:
+ sinfo->gso_type = SKB_GSO_UDP;
+ break;
+ default:
+ PDEBUG(priv->dev, "Illegal L4RO type: %d\n",
+ vsg->gso.type);
+ priv->dev->stats.rx_frame_errors++;
+ goto fail;
+ }
+
+ if (vsg->flags & VENET_SG_FLAG_ECN)
+ sinfo->gso_type |= SKB_GSO_TCP_ECN;
+
+ sinfo->gso_size = vsg->gso.size;
+ if (sinfo->gso_size == 0) {
+ PDEBUG(priv->dev, "Illegal L4RO size: %d\n",
+ vsg->gso.size);
+ priv->dev->stats.rx_frame_errors++;
+ goto fail;
+ }
+
+ /*
+ * Header must be checked, and gso_segs
+ * computed.
+ */
+ sinfo->gso_type |= SKB_GSO_DODGY;
+ sinfo->gso_segs = 0;
+ }
+
+ return skb;
+
+fail:
+ dev_kfree_skb(skb);
+
+ return NULL;
+}
+
+static struct sk_buff *
+vbus_enet_flat_import(struct vbus_enet_priv *priv, struct ioq_ring_desc *desc)
+{
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->cookie;
+
+ if (!desc->len) {
+ /*
+ * the device may send a zero-length packet when its
+ * flushing references on the ring. We can just drop
+ * these on the floor
+ */
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ skb_put(skb, le64_to_cpu(desc->len));
+
+ return skb;
+}
+
+/*
+ * The poll implementation.
+ */
+static int
+vbus_enet_poll(struct napi_struct *napi, int budget)
+{
+ struct vbus_enet_priv *priv = napi_to_priv(napi);
+ int npackets = 0;
+ struct ioq_iterator iter;
+ int ret;
+
+ PDEBUG(priv->dev, "polling...\n");
+
+ /* We want to iterate on the head of the in-use index */
+ ret = ioq_iter_init(priv->rxq.queue, &iter, ioq_idxtype_inuse,
+ IOQ_ITER_AUTOUPDATE);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * We stop if we have met the quota or there are no more packets.
+ * The EOM is indicated by finding a packet that is still owned by
+ * the south side
+ */
+ while ((npackets < budget) && (!iter.desc->sown)) {
+ struct sk_buff *skb;
+
+ skb = priv->import(priv, iter.desc);
+ if (skb) {
+ /* Maintain stats */
+ npackets++;
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += skb->len;
+
+ /* Pass the buffer up to the stack */
+ skb->dev = priv->dev;
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ netif_receive_skb(skb);
+
+ mb();
+ }
+
+ /* Grab a new buffer to put in the ring */
+ rxdesc_alloc(priv, iter.desc, priv->dev->mtu);
+
+ /* Advance the in-use tail */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ PDEBUG(priv->dev, "%d packets received\n", npackets);
+
+ /*
+ * If we processed all packets, we're done; tell the kernel and
+ * reenable ints
+ */
+ if (ioq_empty(priv->rxq.queue, ioq_idxtype_inuse)) {
+ napi_complete(napi);
+ ioq_notify_enable(priv->rxq.queue, 0);
+ ret = 0;
+ } else
+ /* We couldn't process everything. */
+ ret = 1;
+
+ return ret;
+}
+
+/*
+ * Transmit a packet (called by the kernel)
+ */
+static int
+vbus_enet_tx_start(struct sk_buff *skb, struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+ struct ioq_iterator iter;
+ int ret;
+ unsigned long flags;
+
+ PDEBUG(priv->dev, "sending %d bytes\n", skb->len);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (ioq_full(priv->tx.veq.queue, ioq_idxtype_valid)) {
+ /*
+ * We must flow-control the kernel by disabling the
+ * queue
+ */
+ spin_unlock_irqrestore(&priv->lock, flags);
+ netif_stop_queue(dev);
+ dev_err(&priv->dev->dev, "tx on full queue bug\n");
+ return 1;
+ }
+
+ /*
+ * We want to iterate on the tail of both the "inuse" and "valid" index
+ * so we specify the "both" index
+ */
+ ret = ioq_iter_init(priv->tx.veq.queue, &iter, ioq_idxtype_both,
+ IOQ_ITER_AUTOUPDATE);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+ BUG_ON(iter.desc->sown);
+
+ if (priv->sg) {
+ struct venet_sg *vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+ struct scatterlist sgl[MAX_SKB_FRAGS+1];
+ struct scatterlist *sg;
+ int count, maxcount = ARRAY_SIZE(sgl);
+
+ sg_init_table(sgl, maxcount);
+
+ memset(vsg, 0, sizeof(*vsg));
+
+ vsg->cookie = (u64)(unsigned long)skb;
+ vsg->len = skb->len;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ vsg->flags |= VENET_SG_FLAG_NEEDS_CSUM;
+ vsg->csum.start = skb->csum_start - skb_headroom(skb);
+ vsg->csum.offset = skb->csum_offset;
+ }
+
+ if (skb_is_gso(skb)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ vsg->flags |= VENET_SG_FLAG_GSO;
+
+ vsg->gso.hdrlen = skb_headlen(skb);
+ vsg->gso.size = sinfo->gso_size;
+ if (sinfo->gso_type & SKB_GSO_TCPV4)
+ vsg->gso.type = VENET_GSO_TYPE_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ vsg->gso.type = VENET_GSO_TYPE_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ vsg->gso.type = VENET_GSO_TYPE_UDP;
+ else
+ panic("Virtual-Ethernet: unknown GSO type " \
+ "0x%x\n", sinfo->gso_type);
+
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+ vsg->flags |= VENET_SG_FLAG_ECN;
+ }
+
+ count = skb_to_sgvec(skb, sgl, 0, skb->len);
+
+ BUG_ON(count > maxcount);
+
+ for (sg = &sgl[0]; sg; sg = sg_next(sg)) {
+ struct venet_iov *iov = &vsg->iov[vsg->count++];
+
+ iov->len = sg->length;
+ iov->ptr = (u64)sg_phys(sg);
+ }
+
+ iter.desc->len = cpu_to_le64(VSG_DESC_SIZE(vsg->count));
+
+ } else {
+ /*
+ * non scatter-gather mode: simply put the skb right onto the
+ * ring.
+ */
+ iter.desc->cookie = (u64)(unsigned long)skb;
+ iter.desc->len = cpu_to_le64(skb->len);
+ iter.desc->ptr = cpu_to_le64(__pa(skb->data));
+ }
+
+ iter.desc->valid = 1;
+
+ priv->dev->stats.tx_packets++;
+ priv->dev->stats.tx_bytes += skb->len;
+
+ skb_queue_tail(&priv->tx.outstanding, skb);
+
+ /*
+ * This advances both indexes together implicitly, and then
+ * signals the south side to consume the packet
+ */
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+
+ dev->trans_start = jiffies; /* save the timestamp */
+
+ if (ioq_full(priv->tx.veq.queue, ioq_idxtype_valid)) {
+ /*
+ * If the queue is congested, we must flow-control the kernel
+ */
+ PDEBUG(priv->dev, "backpressure tx queue\n");
+ netif_stop_queue(dev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return 0;
+}
+
+/* assumes priv->lock held */
+static void
+vbus_enet_skb_complete(struct vbus_enet_priv *priv, struct sk_buff *skb)
+{
+ PDEBUG(priv->dev, "completed sending %d bytes\n",
+ skb->len);
+
+ skb_unlink(skb, &priv->tx.outstanding);
+ dev_kfree_skb(skb);
+}
+
+/*
+ * reclaim any outstanding completed tx packets
+ *
+ * assumes priv->lock held
+ */
+static struct sk_buff *
+vbus_enet_tx_reap_one(struct vbus_enet_priv *priv)
+{
+ struct sk_buff *skb = NULL;
+ struct ioq_iterator iter;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /*
+ * We want to iterate on the head of the valid index, but we
+ * do not want the iter_pop (below) to flip the ownership, so
+ * we set the NOFLIPOWNER option
+ */
+ ret = ioq_iter_init(priv->tx.veq.queue, &iter, ioq_idxtype_valid,
+ IOQ_ITER_NOFLIPOWNER);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ if (iter.desc->valid && !iter.desc->sown) {
+
+ if (priv->sg) {
+ struct venet_sg *vsg;
+
+ vsg = (struct venet_sg *)(unsigned long)iter.desc->cookie;
+ skb = (struct sk_buff *)(unsigned long)vsg->cookie;
+ } else
+ skb = (struct sk_buff *)(unsigned long)iter.desc->cookie;
+
+ /* Reset the descriptor */
+ iter.desc->valid = 0;
+
+ /* Advance the valid-index head */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ /*
+ * If we were previously stopped due to flow control, restart the
+ * processing
+ */
+ if (netif_queue_stopped(priv->dev)
+ && !ioq_full(priv->tx.veq.queue, ioq_idxtype_valid)) {
+ PDEBUG(priv->dev, "re-enabling tx queue\n");
+ netif_wake_queue(priv->dev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return skb;
+}
+
+static void
+vbus_enet_tx_reap(struct vbus_enet_priv *priv)
+{
+ struct sk_buff *skb;
+
+ while ((skb = vbus_enet_tx_reap_one(priv))) {
+ if (!priv->evq.txc)
+ /*
+ * We are responsible for freeing the packet upon
+ * reap if TXC is not enabled
+ */
+ vbus_enet_skb_complete(priv, skb);
+ }
+}
+
+static void
+vbus_enet_timeout(struct net_device *dev)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+
+ dev_dbg(&dev->dev, "Transmit timeout\n");
+
+ vbus_enet_tx_reap(priv);
+}
+
+static void
+rx_isr(struct ioq_notifier *notifier)
+{
+ struct vbus_enet_priv *priv;
+ struct net_device *dev;
+
+ priv = container_of(notifier, struct vbus_enet_priv, rxq.notifier);
+ dev = priv->dev;
+
+ if (!ioq_empty(priv->rxq.queue, ioq_idxtype_inuse))
+ vbus_enet_schedule_rx(priv);
+}
+
+static void
+deferred_tx_isr(unsigned long data)
+{
+ struct vbus_enet_priv *priv = (struct vbus_enet_priv *)data;
+
+ PDEBUG(priv->dev, "deferred_tx_isr\n");
+
+ vbus_enet_tx_reap(priv);
+
+ ioq_notify_enable(priv->tx.veq.queue, 0);
+}
+
+static void
+tx_isr(struct ioq_notifier *notifier)
+{
+ struct vbus_enet_priv *priv;
+
+ priv = container_of(notifier, struct vbus_enet_priv, tx.veq.notifier);
+
+ PDEBUG(priv->dev, "tx_isr\n");
+
+ ioq_notify_disable(priv->tx.veq.queue, 0);
+ tasklet_schedule(&priv->tx.task);
+}
+
+static void
+evq_linkstate_event(struct vbus_enet_priv *priv,
+ struct venet_event_header *header)
+{
+ struct venet_event_linkstate *event =
+ (struct venet_event_linkstate *)header;
+
+ switch (event->state) {
+ case 0:
+ netif_carrier_off(priv->dev);
+ break;
+ case 1:
+ netif_carrier_on(priv->dev);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+evq_txc_event(struct vbus_enet_priv *priv,
+ struct venet_event_header *header)
+{
+ struct venet_event_txc *event =
+ (struct venet_event_txc *)header;
+
+ vbus_enet_tx_reap(priv);
+
+ vbus_enet_skb_complete(priv, (struct sk_buff *)(unsigned long)event->cookie);
+}
+
+static void
+deferred_evq_isr(unsigned long data)
+{
+ struct vbus_enet_priv *priv = (struct vbus_enet_priv *)data;
+ int nevents = 0;
+ struct ioq_iterator iter;
+ int ret;
+
+ PDEBUG(priv->dev, "evq: polling...\n");
+
+ /* We want to iterate on the head of the in-use index */
+ ret = ioq_iter_init(priv->evq.veq.queue, &iter, ioq_idxtype_inuse,
+ IOQ_ITER_AUTOUPDATE);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * The EOM is indicated by finding a packet that is still owned by
+ * the south side
+ */
+ while (!iter.desc->sown) {
+ struct venet_event_header *header;
+
+ header = (struct venet_event_header *)(unsigned long)iter.desc->cookie;
+
+ switch (header->id) {
+ case VENET_EVENT_LINKSTATE:
+ evq_linkstate_event(priv, header);
+ break;
+ case VENET_EVENT_TXC:
+ evq_txc_event(priv, header);
+ break;
+ default:
+ panic("venet: unexpected event id:%d of size %d\n",
+ header->id, header->size);
+ break;
+ }
+
+ memset((void *)(unsigned long)iter.desc->cookie, 0, priv->evq.evsize);
+
+ /* Advance the in-use tail */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ nevents++;
+ }
+
+ PDEBUG(priv->dev, "%d events received\n", nevents);
+
+ ioq_notify_enable(priv->evq.veq.queue, 0);
+}
+
+static void
+evq_isr(struct ioq_notifier *notifier)
+{
+ struct vbus_enet_priv *priv;
+
+ priv = container_of(notifier, struct vbus_enet_priv, evq.veq.notifier);
+
+ PDEBUG(priv->dev, "evq_isr\n");
+
+ ioq_notify_disable(priv->evq.veq.queue, 0);
+ tasklet_schedule(&priv->evq.task);
+}
+
+static int
+vbus_enet_sg_negcap(struct vbus_enet_priv *priv)
+{
+ struct net_device *dev = priv->dev;
+ struct venet_capabilities caps;
+ int ret;
+
+ memset(&caps, 0, sizeof(caps));
+
+ if (sg_enabled) {
+ caps.gid = VENET_CAP_GROUP_SG;
+ caps.bits |= (VENET_CAP_SG|VENET_CAP_TSO4|VENET_CAP_TSO6
+ |VENET_CAP_ECN|VENET_CAP_PMTD);
+ /* note: exclude UFO for now due to stack bug */
+ }
+
+ ret = devcall(priv, VENET_FUNC_NEGCAP, &caps, sizeof(caps));
+ if (ret < 0)
+ return ret;
+
+ if (caps.bits & VENET_CAP_SG) {
+ priv->sg = true;
+
+ dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM|NETIF_F_FRAGLIST;
+
+ if (caps.bits & VENET_CAP_TSO4)
+ dev->features |= NETIF_F_TSO;
+ if (caps.bits & VENET_CAP_UFO)
+ dev->features |= NETIF_F_UFO;
+ if (caps.bits & VENET_CAP_TSO6)
+ dev->features |= NETIF_F_TSO6;
+ if (caps.bits & VENET_CAP_ECN)
+ dev->features |= NETIF_F_TSO_ECN;
+
+ if (caps.bits & VENET_CAP_PMTD)
+ priv->pmtd.enabled = true;
+ }
+
+ return 0;
+}
+
+static int
+vbus_enet_evq_negcap(struct vbus_enet_priv *priv, unsigned long count)
+{
+ struct venet_capabilities caps;
+ int ret;
+
+ memset(&caps, 0, sizeof(caps));
+
+ caps.gid = VENET_CAP_GROUP_EVENTQ;
+ caps.bits |= VENET_CAP_EVQ_LINKSTATE;
+ caps.bits |= VENET_CAP_EVQ_TXC;
+
+ ret = devcall(priv, VENET_FUNC_NEGCAP, &caps, sizeof(caps));
+ if (ret < 0)
+ return ret;
+
+ if (caps.bits) {
+ struct vbus_device_proxy *dev = priv->vdev;
+ struct venet_eventq_query query;
+ size_t poollen;
+ struct ioq_iterator iter;
+ char *pool;
+ int i;
+
+ priv->evq.enabled = true;
+
+ if (caps.bits & VENET_CAP_EVQ_LINKSTATE) {
+ /*
+ * We will assume there is no carrier until we get
+ * an event telling us otherwise
+ */
+ netif_carrier_off(priv->dev);
+ priv->evq.linkstate = true;
+ }
+
+ if (caps.bits & VENET_CAP_EVQ_TXC)
+ priv->evq.txc = true;
+
+ memset(&query, 0, sizeof(query));
+
+ ret = devcall(priv, VENET_FUNC_EVQQUERY, &query, sizeof(query));
+ if (ret < 0)
+ return ret;
+
+ priv->evq.evsize = query.evsize;
+ poollen = query.evsize * count;
+
+ pool = kzalloc(poollen, GFP_KERNEL | GFP_DMA);
+ if (!pool)
+ return -ENOMEM;
+
+ priv->evq.pool = pool;
+
+ ret = dev->ops->shm(dev, NULL, query.dpid, 0,
+ pool, poollen, NULL, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ queue_init(priv, &priv->evq.veq, "evq",
+ query.qid, count, evq_isr);
+
+ ret = ioq_iter_init(priv->evq.veq.queue,
+ &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_set, 0, 0);
+ BUG_ON(ret < 0);
+
+ /* Now populate each descriptor with an empty event */
+ for (i = 0; i < count; i++) {
+ size_t offset = (i * query.evsize);
+ void *addr = &priv->evq.pool[offset];
+
+ iter.desc->ptr = cpu_to_le64(offset);
+ iter.desc->cookie = (u64)(unsigned long)addr;
+ iter.desc->len = cpu_to_le64(query.evsize);
+
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ /* Finally, enable interrupts */
+ tasklet_init(&priv->evq.task, deferred_evq_isr,
+ (unsigned long)priv);
+ ioq_notify_enable(priv->evq.veq.queue, 0);
+ }
+
+ return 0;
+}
+
+static int
+vbus_enet_l4ro_negcap(struct vbus_enet_priv *priv, unsigned long count)
+{
+ struct venet_capabilities caps;
+ int ret;
+
+ memset(&caps, 0, sizeof(caps));
+
+ caps.gid = VENET_CAP_GROUP_L4RO;
+ caps.bits |= (VENET_CAP_SG|VENET_CAP_TSO4|VENET_CAP_TSO6
+ |VENET_CAP_ECN);
+
+ ret = devcall(priv, VENET_FUNC_NEGCAP, &caps, sizeof(caps));
+ if (ret < 0) {
+ printk(KERN_ERR "Error negotiating L4RO: %d\n", ret);
+ return ret;
+ }
+
+ if (caps.bits & VENET_CAP_SG) {
+ struct vbus_device_proxy *dev = priv->vdev;
+ size_t poollen = SG_DESC_SIZE * count;
+ struct venet_l4ro_query query;
+ char *pool;
+
+ memset(&query, 0, sizeof(query));
+
+ ret = devcall(priv, VENET_FUNC_L4ROQUERY, &query, sizeof(query));
+ if (ret < 0) {
+ printk(KERN_ERR "Error querying L4RO: %d\n", ret);
+ return ret;
+ }
+
+ pool = kzalloc(poollen, GFP_KERNEL | GFP_DMA);
+ if (!pool)
+ return -ENOMEM;
+
+ /*
+ * pre-mapped descriptor pool
+ */
+ ret = dev->ops->shm(dev, NULL, query.dpid, 0,
+ pool, poollen, NULL, NULL, 0);
+ if (ret < 0) {
+ printk(KERN_ERR "Error registering L4RO pool: %d\n",
+ ret);
+ kfree(pool);
+ return ret;
+ }
+
+ /*
+ * page-queue: contains a ring of arbitrary pages for
+ * consumption by the host for when the SG::IOV count exceeds
+ * one MTU frame. All we need to do is keep it populated
+ * with free pages.
+ */
+ queue_init(priv, &priv->l4ro.pageq, "pageq", query.pqid,
+ count, NULL);
+
+ priv->l4ro.pool = pool;
+ priv->l4ro.available = true;
+ }
+
+ return 0;
+}
+
+static int
+vbus_enet_negcap(struct vbus_enet_priv *priv)
+{
+ int ret;
+
+ ret = vbus_enet_sg_negcap(priv);
+ if (ret < 0)
+ return ret;
+
+ ret = vbus_enet_evq_negcap(priv, tx_ringlen);
+ if (ret < 0)
+ return ret;
+
+ ret = vbus_enet_l4ro_negcap(priv, rx_ringlen);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int vbus_enet_set_tx_csum(struct net_device *dev, u32 data)
+{
+ struct vbus_enet_priv *priv = netdev_priv(dev);
+
+ if (data && !priv->sg)
+ return -ENOSYS;
+
+ return ethtool_op_set_tx_hw_csum(dev, data);
+}
+
+static struct ethtool_ops vbus_enet_ethtool_ops = {
+ .set_tx_csum = vbus_enet_set_tx_csum,
+ .set_sg = ethtool_op_set_sg,
+ .set_tso = ethtool_op_set_tso,
+ .get_link = ethtool_op_get_link,
+};
+
+static const struct net_device_ops vbus_enet_netdev_ops = {
+ .ndo_open = vbus_enet_open,
+ .ndo_stop = vbus_enet_stop,
+ .ndo_set_config = vbus_enet_config,
+ .ndo_start_xmit = vbus_enet_tx_start,
+ .ndo_change_mtu = vbus_enet_change_mtu,
+ .ndo_tx_timeout = vbus_enet_timeout,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+/*
+ * This is called whenever a new vbus_device_proxy is added to the vbus
+ * with the matching VENET_ID
+ */
+static int
+vbus_enet_probe(struct vbus_device_proxy *vdev)
+{
+ struct net_device *dev;
+ struct vbus_enet_priv *priv;
+ int ret;
+
+ printk(KERN_INFO "VENET: Found new device at %lld\n", vdev->id);
+
+ ret = vdev->ops->open(vdev, VENET_VERSION, 0);
+ if (ret < 0)
+ return ret;
+
+ dev = alloc_etherdev(sizeof(struct vbus_enet_priv));
+ if (!dev)
+ return -ENOMEM;
+
+ /*
+ * establish our device-name early so we can incorporate it into
+ * the signal-path names, etc
+ */
+ rtnl_lock();
+
+ ret = dev_alloc_name(dev, dev->name);
+ if (ret < 0)
+ goto out_free;
+
+ priv = netdev_priv(dev);
+
+ spin_lock_init(&priv->lock);
+ priv->dev = dev;
+ priv->vdev = vdev;
+
+ ret = vbus_enet_negcap(priv);
+ if (ret < 0) {
+ printk(KERN_INFO "VENET: Error negotiating capabilities for " \
+ "%lld\n",
+ priv->vdev->id);
+ goto out_free;
+ }
+
+ if (priv->l4ro.available)
+ priv->import = &vbus_enet_l4ro_import;
+ else
+ priv->import = &vbus_enet_flat_import;
+
+ skb_queue_head_init(&priv->tx.outstanding);
+
+ queue_init(priv, &priv->rxq, "rx", VENET_QUEUE_RX, rx_ringlen,
+ rx_isr);
+ queue_init(priv, &priv->tx.veq, "tx", VENET_QUEUE_TX, tx_ringlen,
+ tx_isr);
+
+ rx_setup(priv);
+ tx_setup(priv);
+
+ ioq_notify_enable(priv->rxq.queue, 0); /* enable rx interrupts */
+
+ if (!priv->evq.txc) {
+ /*
+ * If the TXC feature is present, we will recieve our
+ * tx-complete notification via the event-channel. Therefore,
+ * we only enable txq interrupts if the TXC feature is not
+ * present.
+ */
+ tasklet_init(&priv->tx.task, deferred_tx_isr,
+ (unsigned long)priv);
+ ioq_notify_enable(priv->tx.veq.queue, 0);
+ }
+
+ dev->netdev_ops = &vbus_enet_netdev_ops;
+ dev->watchdog_timeo = 5 * HZ;
+ SET_ETHTOOL_OPS(dev, &vbus_enet_ethtool_ops);
+ SET_NETDEV_DEV(dev, &vdev->dev);
+
+ netif_napi_add(dev, &priv->napi, vbus_enet_poll, 128);
+
+ ret = devcall(priv, VENET_FUNC_MACQUERY, priv->dev->dev_addr, ETH_ALEN);
+ if (ret < 0) {
+ printk(KERN_INFO "VENET: Error obtaining MAC address for " \
+ "%lld\n",
+ priv->vdev->id);
+ goto out_free;
+ }
+
+ dev->features |= NETIF_F_HIGHDMA;
+
+ ret = register_netdevice(dev);
+ if (ret < 0) {
+ printk(KERN_INFO "VENET: error %i registering device \"%s\"\n",
+ ret, dev->name);
+ goto out_free;
+ }
+
+ rtnl_unlock();
+
+ vdev->priv = priv;
+
+ return 0;
+
+ out_free:
+ rtnl_unlock();
+
+ free_netdev(dev);
+
+ return ret;
+}
+
+static int
+vbus_enet_remove(struct vbus_device_proxy *vdev)
+{
+ struct vbus_enet_priv *priv = (struct vbus_enet_priv *)vdev->priv;
+ struct vbus_device_proxy *dev = priv->vdev;
+
+ unregister_netdev(priv->dev);
+ napi_disable(&priv->napi);
+
+ rx_teardown(priv);
+ ioq_put(priv->rxq.queue);
+
+ tx_teardown(priv);
+ ioq_put(priv->tx.veq.queue);
+
+ if (priv->evq.enabled)
+ evq_teardown(priv);
+
+ dev->ops->close(dev, 0);
+
+ free_netdev(priv->dev);
+
+ return 0;
+}
+
+/*
+ * Finally, the module stuff
+ */
+
+static struct vbus_driver_ops vbus_enet_driver_ops = {
+ .probe = vbus_enet_probe,
+ .remove = vbus_enet_remove,
+};
+
+static struct vbus_driver vbus_enet_driver = {
+ .type = VENET_TYPE,
+ .owner = THIS_MODULE,
+ .ops = &vbus_enet_driver_ops,
+};
+
+static __init int
+vbus_enet_init_module(void)
+{
+ printk(KERN_INFO "Virtual Ethernet: Copyright (C) 2009 Novell, Gregory Haskins\n");
+ printk(KERN_DEBUG "VENET: Using %d/%d queue depth\n",
+ rx_ringlen, tx_ringlen);
+ return vbus_driver_register(&vbus_enet_driver);
+}
+
+static __exit void
+vbus_enet_cleanup(void)
+{
+ vbus_driver_unregister(&vbus_enet_driver);
+}
+
+module_init(vbus_enet_init_module);
+module_exit(vbus_enet_cleanup);
+
+VBUS_DRIVER_AUTOPROBE(VENET_TYPE);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 611b80435955..a7e0c84426ea 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -267,7 +267,7 @@ enum rhine_quirks {
/* Beware of PCI posted writes */
#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
-static const struct pci_device_id rhine_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 4ceb441f2687..f15485efe40e 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -361,7 +361,7 @@ static struct velocity_info_tbl chip_info_table[] = {
* Describe the PCI device identifiers that we support in this
* device driver. Used for hotplug autoloading.
*/
-static const struct pci_device_id velocity_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
{ }
};
@@ -2237,8 +2237,6 @@ static int velocity_open(struct net_device *dev)
/* Ensure chip is running */
pci_set_power_state(vptr->pdev, PCI_D0);
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED,
@@ -2250,6 +2248,8 @@ static int velocity_open(struct net_device *dev)
goto out;
}
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
napi_enable(&vptr->napi);
@@ -2339,10 +2339,10 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
dev->mtu = new_mtu;
- velocity_give_many_rx_descs(vptr);
-
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
+ velocity_give_many_rx_descs(vptr);
+
mac_enable_int(vptr->mac_regs);
netif_start_queue(dev);
@@ -2702,10 +2702,8 @@ static void __devinit velocity_print_info(struct velocity_info *vptr)
struct net_device *dev = vptr->dev;
printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
- printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- dev->name,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ printk(KERN_INFO "%s: Ethernet Address: %pM\n",
+ dev->name, dev->dev_addr);
}
static u32 velocity_get_link(struct net_device *dev)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9cc438282d77..b896f9386110 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -35,7 +35,7 @@ char vmxnet3_driver_name[] = "vmxnet3";
* PCI Device ID Table
* Last entry must be all 0s
*/
-static const struct pci_device_id vmxnet3_pciid_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
{0}
};
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 0fdfd58a35a1..a6606b8948e9 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
"Virtualized Server Adapter");
-static struct pci_device_id vxge_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
@@ -310,7 +310,7 @@ static int vxge_rx_map(void *dtrh, struct vxge_ring *ring)
dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data,
rx_priv->data_size, PCI_DMA_FROMDEVICE);
- if (dma_addr == 0) {
+ if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) {
ring->stats.pci_map_fail++;
return -EIO;
}
@@ -4297,10 +4297,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
vdev->ndev->name, ll_config.device_hw_info.product_desc);
- vxge_debug_init(VXGE_TRACE,
- "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X",
- vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
- macaddr[3], macaddr[4], macaddr[5]);
+ vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
+ vdev->ndev->name, macaddr);
vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index b36bf96eb502..f0bd70fb650c 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -811,7 +811,7 @@ static ssize_t cosa_read(struct file *file,
cosa_enable_rx(chan);
spin_lock_irqsave(&cosa->lock, flags);
add_wait_queue(&chan->rxwaitq, &wait);
- while(!chan->rx_status) {
+ while (!chan->rx_status) {
current->state = TASK_INTERRUPTIBLE;
spin_unlock_irqrestore(&cosa->lock, flags);
schedule();
@@ -896,7 +896,7 @@ static ssize_t cosa_write(struct file *file,
spin_lock_irqsave(&cosa->lock, flags);
add_wait_queue(&chan->txwaitq, &wait);
- while(!chan->tx_status) {
+ while (!chan->tx_status) {
current->state = TASK_INTERRUPTIBLE;
spin_unlock_irqrestore(&cosa->lock, flags);
schedule();
@@ -1153,7 +1153,7 @@ static int cosa_ioctl_common(struct cosa_data *cosa,
struct channel_data *channel, unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
- switch(cmd) {
+ switch (cmd) {
case COSAIORSET: /* Reset the device */
if (!capable(CAP_NET_ADMIN))
return -EACCES;
@@ -1704,7 +1704,7 @@ static inline void tx_interrupt(struct cosa_data *cosa, int status)
spin_unlock_irqrestore(&cosa->lock, flags);
return;
}
- while(1) {
+ while (1) {
cosa->txchan++;
i++;
if (cosa->txchan >= cosa->nchannels)
@@ -2010,7 +2010,7 @@ again:
static void debug_status_in(struct cosa_data *cosa, int status)
{
char *s;
- switch(status & SR_CMD_FROM_SRP_MASK) {
+ switch (status & SR_CMD_FROM_SRP_MASK) {
case SR_UP_REQUEST:
s = "RX_REQ";
break;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 3f759daf3ca4..f88c07c13197 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2050,7 +2050,7 @@ static int __init dscc4_setup(char *str)
__setup("dscc4.setup=", dscc4_setup);
#endif
-static struct pci_device_id dscc4_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = {
{ PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 9bc2e3649157..40d724a8e020 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -528,7 +528,7 @@ static int fst_debug_mask = { FST_DEBUG };
/*
* PCI ID lookup table
*/
-static struct pci_device_id fst_pci_dev_id[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(fst_pci_dev_id) = {
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index f1bff98acd1f..1ceccf1ca6c7 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -141,7 +141,7 @@ static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
data->address != CISCO_UNICAST)
return cpu_to_be16(ETH_P_HDLC);
- switch(data->protocol) {
+ switch (data->protocol) {
case cpu_to_be16(ETH_P_IP):
case cpu_to_be16(ETH_P_IPX):
case cpu_to_be16(ETH_P_IPV6):
@@ -190,7 +190,7 @@ static int cisco_rx(struct sk_buff *skb)
cisco_data = (struct cisco_packet*)(skb->data + sizeof
(struct hdlc_header));
- switch(ntohl (cisco_data->type)) {
+ switch (ntohl (cisco_data->type)) {
case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
in_dev = dev->ip_ptr;
addr = 0;
@@ -245,8 +245,8 @@ static int cisco_rx(struct sk_buff *skb)
dev_kfree_skb_any(skb);
return NET_RX_SUCCESS;
- } /* switch(keepalive type) */
- } /* switch(protocol) */
+ } /* switch (keepalive type) */
+ } /* switch (protocol) */
printk(KERN_INFO "%s: Unsupported protocol %x\n", dev->name,
ntohs(data->protocol));
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index aa9248f8eb1a..6e1ca256effd 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -202,10 +202,10 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
return 0; /* return protocol only, no settable parameters */
case IF_PROTO_X25:
- if(!capable(CAP_NET_ADMIN))
+ if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if(dev->flags & IFF_UP)
+ if (dev->flags & IFF_UP)
return -EBUSY;
result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 4b6f27e7c820..b27850377121 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -77,7 +77,7 @@
static int LMC_PKT_BUF_SZ = 1542;
-static struct pci_device_id lmc_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
PCI_VENDOR_ID_LMC, PCI_ANY_ID },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index aec4d3955420..f4f1c00d0d23 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -251,7 +251,7 @@ static char rcsid[] =
#undef PC300_DEBUG_RX
#undef PC300_DEBUG_OTHER
-static struct pci_device_id cpc_pci_dev_id[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cpc_pci_dev_id) = {
/* PC300/RSV or PC300/X21, 2 chan */
{0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300},
/* PC300/RSV or PC300/X21, 1 chan */
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 60ece54bdd94..c7ab3becd261 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -481,7 +481,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
-static struct pci_device_id pc300_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(pc300_pci_tbl) = {
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index f1340faaf022..e2cff64a446a 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -417,7 +417,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
-static struct pci_device_id pci200_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(pci200_pci_tbl) = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index daee8a0624ee..541c700dceef 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -814,7 +814,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
return 0;
}
-static struct pci_device_id wanxl_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = {
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 96a615fe09de..6cead321bc15 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -301,24 +301,15 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
/* Extract MAC addresss */
ddi = (void *) skb->data;
BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
- d_printf(2, dev, "GET DEVICE INFO: mac addr "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- ddi->mac_address[0], ddi->mac_address[1],
- ddi->mac_address[2], ddi->mac_address[3],
- ddi->mac_address[4], ddi->mac_address[5]);
+ d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
+ ddi->mac_address);
if (!memcmp(net_dev->perm_addr, ddi->mac_address,
sizeof(ddi->mac_address)))
goto ok;
dev_warn(dev, "warning: device reports a different MAC address "
"to that of boot mode's\n");
- dev_warn(dev, "device reports %02x:%02x:%02x:%02x:%02x:%02x\n",
- ddi->mac_address[0], ddi->mac_address[1],
- ddi->mac_address[2], ddi->mac_address[3],
- ddi->mac_address[4], ddi->mac_address[5]);
- dev_warn(dev, "boot mode reported %02x:%02x:%02x:%02x:%02x:%02x\n",
- net_dev->perm_addr[0], net_dev->perm_addr[1],
- net_dev->perm_addr[2], net_dev->perm_addr[3],
- net_dev->perm_addr[4], net_dev->perm_addr[5]);
+ dev_warn(dev, "device reports %pM\n", ddi->mac_address);
+ dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
dev_err(dev, "device reports an invalid MAC address, "
"not updating\n");
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 64cdfeb299ca..4716c4e4a684 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1041,21 +1041,14 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
dev_err(dev, "BM: read mac addr failed: %d\n", result);
goto error_read_mac;
}
- d_printf(2, dev,
- "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
- ack_buf.ack_pl[0], ack_buf.ack_pl[1],
- ack_buf.ack_pl[2], ack_buf.ack_pl[3],
- ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl);
if (i2400m->bus_bm_mac_addr_impaired == 1) {
ack_buf.ack_pl[0] = 0x00;
ack_buf.ack_pl[1] = 0x16;
ack_buf.ack_pl[2] = 0xd3;
get_random_bytes(&ack_buf.ack_pl[3], 3);
dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
- "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
- ack_buf.ack_pl[0], ack_buf.ack_pl[1],
- ack_buf.ack_pl[2], ack_buf.ack_pl[3],
- ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ "mac addr is %pM\n", ack_buf.ack_pl);
result = 0;
}
net_dev->addr_len = ETH_ALEN;
@@ -1595,7 +1588,7 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
i2400m->fw_name = fw_name;
ret = i2400m_fw_bootstrap(i2400m, fw, flags);
release_firmware(fw);
- if (ret >= 0) /* firmware loaded succesfully */
+ if (ret >= 0) /* firmware loaded successfully */
break;
i2400m->fw_name = NULL;
}
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 39410016b4ff..e6ca3eb4c0d3 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -39,7 +39,7 @@ static unsigned int rx_ring_size __read_mostly = 16;
module_param(tx_ring_size, uint, 0);
module_param(rx_ring_size, uint, 0);
-static struct pci_device_id adm8211_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(adm8211_pci_id_table) = {
/* ADMtek ADM8211 */
{ PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
{ PCI_DEVICE(0x1200, 0x8201) }, /* ? */
@@ -1400,15 +1400,15 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
}
static int adm8211_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
if (priv->mode != NL80211_IFTYPE_MONITOR)
return -EOPNOTSUPP;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
+ priv->mode = vif->type;
break;
default:
return -EOPNOTSUPP;
@@ -1416,8 +1416,8 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
ADM8211_IDLE();
- ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)conf->mac_addr));
- ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(conf->mac_addr + 4)));
+ ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr));
+ ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4)));
adm8211_update_mode(dev);
@@ -1427,7 +1427,7 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
}
static void adm8211_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
priv->mode = NL80211_IFTYPE_MONITOR;
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 4331d675fcc6..37e4ab737f2a 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -57,7 +57,7 @@
#define DRV_NAME "airo"
#ifdef CONFIG_PCI
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{ 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
{ 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
{ 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 2517364d3ebe..0fb419936dff 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1789,7 +1789,7 @@ static void at76_mac80211_stop(struct ieee80211_hw *hw)
}
static int at76_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct at76_priv *priv = hw->priv;
int ret = 0;
@@ -1798,7 +1798,7 @@ static int at76_add_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mtx);
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
priv->iw_mode = IW_MODE_INFRA;
break;
@@ -1814,7 +1814,7 @@ exit:
}
static void at76_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
at76_dbg(DBG_MAC80211, "%s()", __func__);
}
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 9f9459860d82..b99a8c2053d8 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,7 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
bool has_plcp;
};
-#define AR9170_NUM_MAX_BA_RETRY 5
#define AR9170_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
@@ -143,7 +142,6 @@ struct ar9170_sta_tid {
u16 tid;
enum ar9170_tid_state state;
bool active;
- u8 retry;
};
#define AR9170_QUEUE_TIMEOUT 64
@@ -154,6 +152,8 @@ struct ar9170_sta_tid {
#define AR9170_NUM_TX_STATUS 128
#define AR9170_NUM_TX_AGG_MAX 30
+#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
+#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
struct ar9170 {
struct ieee80211_hw *hw;
@@ -248,13 +248,8 @@ struct ar9170_sta_info {
unsigned int ampdu_max_len;
};
-#define AR9170_TX_FLAG_WAIT_FOR_ACK BIT(0)
-#define AR9170_TX_FLAG_NO_ACK BIT(1)
-#define AR9170_TX_FLAG_BLOCK_ACK BIT(2)
-
struct ar9170_tx_info {
unsigned long timeout;
- unsigned int flags;
};
#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED)
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 701ddb7d8400..0a1d4c28e68a 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -276,6 +276,7 @@ struct ar9170_tx_control {
#define AR9170_TX_MAC_RATE_PROBE 0x8000
/* either-or */
+#define AR9170_TX_PHY_MOD_MASK 0x00000003
#define AR9170_TX_PHY_MOD_CCK 0x00000000
#define AR9170_TX_PHY_MOD_OFDM 0x00000001
#define AR9170_TX_PHY_MOD_HT 0x00000002
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index ddc8c09dc79e..857e86104295 100644
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -117,7 +117,7 @@ int ar9170_set_qos(struct ar9170 *ar)
ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
ar->edcf[0].txop | ar->edcf[1].txop << 16);
ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
- ar->edcf[1].txop | ar->edcf[3].txop << 16);
+ ar->edcf[2].txop | ar->edcf[3].txop << 16);
ar9170_regwrite_finish();
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index f9d6db8d013e..4d27f7f67c76 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -194,12 +194,15 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
return ar9170_get_seq_h((void *) txc->frame_data);
}
+static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
+{
+ return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
static inline u16 ar9170_get_tid(struct sk_buff *skb)
{
struct ar9170_tx_control *txc = (void *) skb->data;
- struct ieee80211_hdr *hdr = (void *) txc->frame_data;
-
- return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+ return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
}
#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
@@ -213,10 +216,10 @@ static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
struct ieee80211_hdr *hdr = (void *) txc->frame_data;
- printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x s:%d "
+ printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
"mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
- ieee80211_get_DA(hdr), arinfo->flags, ar9170_get_seq_h(hdr),
+ ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
jiffies_to_msecs(arinfo->timeout - jiffies));
}
@@ -430,7 +433,7 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
spin_lock_irqsave(&ar->tx_stats_lock, flags);
ar->tx_stats[queue].len--;
- if (skb_queue_empty(&ar->tx_pending[queue])) {
+ if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
#ifdef AR9170_QUEUE_STOP_DEBUG
printk(KERN_DEBUG "%s: wake queue %d\n",
wiphy_name(ar->hw->wiphy), queue);
@@ -440,22 +443,17 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
}
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
- if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
- ar9170_tx_ampdu_callback(ar, skb);
- } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_TX_TIMEOUT);
-
- skb_queue_tail(&ar->tx_status[queue], skb);
- } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
} else {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: unsupported frame flags!\n",
- wiphy_name(ar->hw->wiphy));
- ar9170_print_txheader(ar, skb);
-#endif /* AR9170_QUEUE_DEBUG */
- dev_kfree_skb_any(skb);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ar9170_tx_ampdu_callback(ar, skb);
+ } else {
+ arinfo->timeout = jiffies +
+ msecs_to_jiffies(AR9170_TX_TIMEOUT);
+
+ skb_queue_tail(&ar->tx_status[queue], skb);
+ }
}
if (!ar->tx_stats[queue].len &&
@@ -1407,17 +1405,6 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
(is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- if (unlikely(!info->control.sta))
- goto err_out;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
- arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
-
- goto out;
- }
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
/*
* WARNING:
* Putting the QoS queue bits into an unexplored territory is
@@ -1431,12 +1418,17 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |=
cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
- arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
- } else {
- arinfo->flags = AR9170_TX_FLAG_NO_ACK;
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ if (unlikely(!info->control.sta))
+ goto err_out;
+
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
+ } else {
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
+ }
}
-out:
return 0;
err_out:
@@ -1671,8 +1663,7 @@ static bool ar9170_tx_ampdu(struct ar9170 *ar)
* tell the FW/HW that this is the last frame,
* that way it will wait for the immediate block ack.
*/
- if (likely(skb_peek_tail(&agg)))
- ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
+ ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
#ifdef AR9170_TXAGG_DEBUG
printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
@@ -1716,6 +1707,21 @@ static void ar9170_tx(struct ar9170 *ar)
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
spin_lock_irqsave(&ar->tx_stats_lock, flags);
+ frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
+ skb_queue_len(&ar->tx_pending[i]));
+
+ if (remaining_space < frames) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
+ "remaining slots:%d, needed:%d\n",
+ wiphy_name(ar->hw->wiphy), i, remaining_space,
+ frames);
+#endif /* AR9170_QUEUE_DEBUG */
+ frames = remaining_space;
+ }
+
+ ar->tx_stats[i].len += frames;
+ ar->tx_stats[i].count += frames;
if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: queue %d full\n",
@@ -1733,25 +1739,8 @@ static void ar9170_tx(struct ar9170 *ar)
__ar9170_dump_txstats(ar);
#endif /* AR9170_QUEUE_STOP_DEBUG */
ieee80211_stop_queue(ar->hw, i);
- spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
- continue;
}
- frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
- skb_queue_len(&ar->tx_pending[i]));
-
- if (remaining_space < frames) {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
- "remaining slots:%d, needed:%d\n",
- wiphy_name(ar->hw->wiphy), i, remaining_space,
- frames);
-#endif /* AR9170_QUEUE_DEBUG */
- frames = remaining_space;
- }
-
- ar->tx_stats[i].len += frames;
- ar->tx_stats[i].count += frames;
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
if (!frames)
@@ -1773,7 +1762,7 @@ static void ar9170_tx(struct ar9170 *ar)
arinfo->timeout = jiffies +
msecs_to_jiffies(AR9170_TX_TIMEOUT);
- if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_inc(&ar->tx_ampdu_pending);
#ifdef AR9170_QUEUE_DEBUG
@@ -1784,7 +1773,7 @@ static void ar9170_tx(struct ar9170 *ar)
err = ar->tx(ar, skb);
if (unlikely(err)) {
- if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_dec(&ar->tx_ampdu_pending);
frames_failed++;
@@ -1950,7 +1939,7 @@ err_free:
}
static int ar9170_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ar9170 *ar = hw->priv;
struct ath_common *common = &ar->common;
@@ -1963,8 +1952,8 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
goto unlock;
}
- ar->vif = conf->vif;
- memcpy(common->macaddr, conf->mac_addr, ETH_ALEN);
+ ar->vif = vif;
+ memcpy(common->macaddr, vif->addr, ETH_ALEN);
if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
ar->rx_software_decryption = true;
@@ -1984,7 +1973,7 @@ unlock:
}
static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ar9170 *ar = hw->priv;
@@ -2366,7 +2355,6 @@ static void ar9170_sta_notify(struct ieee80211_hw *hw,
sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
sta_info->agg[i].active = false;
sta_info->agg[i].ssn = 0;
- sta_info->agg[i].retry = 0;
sta_info->agg[i].tid = i;
INIT_LIST_HEAD(&sta_info->agg[i].list);
skb_queue_head_init(&sta_info->agg[i].queue);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e0799d924057..0f361186b78f 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -84,6 +84,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cde, 0x0023) },
/* Z-Com UB82 ABG */
{ USB_DEVICE(0x0cde, 0x0026) },
+ /* Sphairon Homelink 1202 */
+ { USB_DEVICE(0x0cde, 0x0027) },
/* Arcadyan WN7512 */
{ USB_DEVICE(0x083a, 0xf522) },
/* Planex GWUS300 */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6a2a96761111..66bcb506a112 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1063,6 +1063,7 @@ struct ath5k_hw {
u32 ah_cw_min;
u32 ah_cw_max;
u32 ah_limit_tx_retries;
+ u8 ah_coverage_class;
/* Antenna Control */
u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
@@ -1200,6 +1201,7 @@ extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
/* Protocol Control Unit Functions */
extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
+extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
/* BSSID Functions */
extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
@@ -1231,6 +1233,10 @@ extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
+/* Clock rate related functions */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
+unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
/* Key table (WEP) functions */
extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
@@ -1310,24 +1316,6 @@ extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
* Functions used internaly
*/
-/*
- * Translate usec to hw clock units
- * TODO: Half/quarter rate
- */
-static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
-{
- return turbo ? (usec * 80) : (usec * 40);
-}
-
-/*
- * Translate hw clock units to usec
- * TODO: Half/quarter rate
- */
-static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
-{
- return turbo ? (clock / 80) : (clock / 40);
-}
-
static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
{
return &ah->common;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index e63b7c40d0ee..5577bcc80eac 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -83,7 +83,7 @@ MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
/* Known PCI ids */
-static const struct pci_device_id ath5k_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
{ PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
{ PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
@@ -225,9 +225,9 @@ static int ath5k_reset_wake(struct ath5k_softc *sc);
static int ath5k_start(struct ieee80211_hw *hw);
static void ath5k_stop(struct ieee80211_hw *hw);
static int ath5k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
static void ath5k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
int mc_count, struct dev_addr_list *mc_list);
@@ -254,6 +254,8 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
u32 changes);
static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
+static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
+ u8 coverage_class);
static const struct ieee80211_ops ath5k_hw_ops = {
.tx = ath5k_tx,
@@ -274,6 +276,7 @@ static const struct ieee80211_ops ath5k_hw_ops = {
.bss_info_changed = ath5k_bss_info_changed,
.sw_scan_start = ath5k_sw_scan_start,
.sw_scan_complete = ath5k_sw_scan_complete,
+ .set_coverage_class = ath5k_set_coverage_class,
};
/*
@@ -2773,7 +2776,7 @@ static void ath5k_stop(struct ieee80211_hw *hw)
}
static int ath5k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath5k_softc *sc = hw->priv;
int ret;
@@ -2784,22 +2787,22 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
goto end;
}
- sc->vif = conf->vif;
+ sc->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_MONITOR:
- sc->opmode = conf->type;
+ sc->opmode = vif->type;
break;
default:
ret = -EOPNOTSUPP;
goto end;
}
- ath5k_hw_set_lladdr(sc->ah, conf->mac_addr);
+ ath5k_hw_set_lladdr(sc->ah, vif->addr);
ath5k_mode_setup(sc);
ret = 0;
@@ -2810,13 +2813,13 @@ end:
static void
ath5k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath5k_softc *sc = hw->priv;
u8 mac[ETH_ALEN] = {};
mutex_lock(&sc->lock);
- if (sc->vif != conf->vif)
+ if (sc->vif != vif)
goto end;
ath5k_hw_set_lladdr(sc->ah, mac);
@@ -3262,3 +3265,22 @@ static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
}
+
+/**
+ * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @hw: struct ieee80211_hw pointer
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
+ * coverage class. The values are persistent, they are restored after device
+ * reset.
+ */
+static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ mutex_lock(&sc->lock);
+ ath5k_hw_set_coverage_class(sc->ah, coverage_class);
+ mutex_unlock(&sc->lock);
+}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 5d1c8677f180..6a3f4da7fb48 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -97,7 +97,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
int ret;
u16 val;
- u32 cksum, offset;
+ u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
/*
* Read values from EEPROM and store them in the capability structure
@@ -116,12 +116,38 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
* Validate the checksum of the EEPROM date. There are some
* devices with invalid EEPROMs.
*/
- for (cksum = 0, offset = 0; offset < AR5K_EEPROM_INFO_MAX; offset++) {
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val);
+ if (val) {
+ eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
+ AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
+ AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val);
+ eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE;
+
+ /*
+ * Fail safe check to prevent stupid loops due
+ * to busted EEPROMs. XXX: This value is likely too
+ * big still, waiting on a better value.
+ */
+ if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
+ ATH5K_ERR(ah->ah_sc, "Invalid max custom EEPROM size: "
+ "%d (0x%04x) max expected: %d (0x%04x)\n",
+ eep_max, eep_max,
+ 3 * AR5K_EEPROM_INFO_MAX,
+ 3 * AR5K_EEPROM_INFO_MAX);
+ return -EIO;
+ }
+ }
+
+ for (cksum = 0, offset = 0; offset < eep_max; offset++) {
AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
cksum ^= val;
}
if (cksum != AR5K_EEPROM_INFO_CKSUM) {
- ATH5K_ERR(ah->ah_sc, "Invalid EEPROM checksum 0x%04x\n", cksum);
+ ATH5K_ERR(ah->ah_sc, "Invalid EEPROM "
+ "checksum: 0x%04x eep_max: 0x%04x (%s)\n",
+ cksum, eep_max,
+ eep_max == AR5K_EEPROM_INFO_MAX ?
+ "default size" : "custom size");
return -EIO;
}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 0123f3521a0b..473a483bb9c3 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -37,6 +37,14 @@
#define AR5K_EEPROM_RFKILL_POLARITY_S 1
#define AR5K_EEPROM_REG_DOMAIN 0x00bf /* EEPROM regdom */
+
+/* FLASH(EEPROM) Defines for AR531X chips */
+#define AR5K_EEPROM_SIZE_LOWER 0x1b /* size info -- lower */
+#define AR5K_EEPROM_SIZE_UPPER 0x1c /* size info -- upper */
+#define AR5K_EEPROM_SIZE_UPPER_MASK 0xfff0
+#define AR5K_EEPROM_SIZE_UPPER_SHIFT 4
+#define AR5K_EEPROM_SIZE_ENDLOC_SHIFT 12
+
#define AR5K_EEPROM_CHECKSUM 0x00c0 /* EEPROM checksum */
#define AR5K_EEPROM_INFO_BASE 0x00c0 /* EEPROM header */
#define AR5K_EEPROM_INFO_MAX (0x400 - AR5K_EEPROM_INFO_BASE)
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 64fc1eb9b6d9..aefe84f9c04b 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -187,8 +187,8 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
+ return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
+ AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
}
/**
@@ -200,12 +200,12 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
- if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
- ah->ah_turbo) <= timeout)
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
+ <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
- ath5k_hw_htoclock(timeout, ah->ah_turbo));
+ ath5k_hw_htoclock(ah, timeout));
return 0;
}
@@ -218,8 +218,8 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
+ return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
+ AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
}
/**
@@ -231,17 +231,97 @@ unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
- if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
- ah->ah_turbo) <= timeout)
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
+ <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
- ath5k_hw_htoclock(timeout, ah->ah_turbo));
+ ath5k_hw_htoclock(ah, timeout));
return 0;
}
/**
+ * ath5k_hw_htoclock - Translate usec to hw clock units
+ *
+ * @ah: The &struct ath5k_hw
+ * @usec: value in microseconds
+ */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+{
+ return usec * ath5k_hw_get_clockrate(ah);
+}
+
+/**
+ * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * @clock: value in hw clock units
+ */
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+{
+ return clock / ath5k_hw_get_clockrate(ah);
+}
+
+/**
+ * ath5k_hw_get_clockrate - Get the clock rate for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ int clock;
+
+ if (channel->hw_value & CHANNEL_5GHZ)
+ clock = 40; /* 802.11a */
+ else if (channel->hw_value & CHANNEL_CCK)
+ clock = 22; /* 802.11b */
+ else
+ clock = 44; /* 802.11g */
+
+ /* Clock rate in turbo modes is twice the normal rate */
+ if (channel->hw_value & CHANNEL_TURBO)
+ clock *= 2;
+
+ return clock;
+}
+
+/**
+ * ath5k_hw_get_default_slottime - Get the default slot time for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+
+ if (channel->hw_value & CHANNEL_TURBO)
+ return 6; /* both turbo modes */
+
+ if (channel->hw_value & CHANNEL_CCK)
+ return 20; /* 802.11b */
+
+ return 9; /* 802.11 a/g */
+}
+
+/**
+ * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+
+ if (channel->hw_value & CHANNEL_TURBO)
+ return 8; /* both turbo modes */
+
+ if (channel->hw_value & CHANNEL_5GHZ)
+ return 16; /* 802.11a */
+
+ return 10; /* 802.11 b/g */
+}
+
+/**
* ath5k_hw_set_lladdr - Set station id
*
* @ah: The &struct ath5k_hw
@@ -1050,3 +1130,24 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
return 0;
}
+/**
+ * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @ah: The &struct ath5k_hw
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Sets slot time, ACK timeout and CTS timeout for given coverage class.
+ */
+void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+{
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
+ int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
+ int cts_timeout = ack_timeout;
+
+ ath5k_hw_set_slot_time(ah, slot_time);
+ ath5k_hw_set_ack_timeout(ah, ack_timeout);
+ ath5k_hw_set_cts_timeout(ah, cts_timeout);
+
+ ah->ah_coverage_class = coverage_class;
+}
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index eeebb9aef206..abe36c0d139c 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -520,12 +520,16 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
*/
unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
{
+ unsigned int slot_time_clock;
+
ATH5K_TRACE(ah->ah_sc);
+
if (ah->ah_version == AR5K_AR5210)
- return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
- AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
+ slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
else
- return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
+ slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
+
+ return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
}
/*
@@ -533,15 +537,17 @@ unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
*/
int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
{
+ u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
+
ATH5K_TRACE(ah->ah_sc);
- if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
+
+ if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
if (ah->ah_version == AR5K_AR5210)
- ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
- ah->ah_turbo), AR5K_SLOT_TIME);
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
else
- ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 62954fc77869..6690923fd78c 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -60,12 +60,11 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
!(channel->hw_value & CHANNEL_OFDM));
/* Get coefficient
- * ALGO: coef = (5 * clock * carrier_freq) / 2)
+ * ALGO: coef = (5 * clock / carrier_freq) / 2
* we scale coef by shifting clock value by 24 for
* better precision since we use integers */
/* TODO: Half/quarter rate */
- clock = ath5k_hw_htoclock(1, channel->hw_value & CHANNEL_TURBO);
-
+ clock = (channel->hw_value & CHANNEL_TURBO) ? 80 : 40;
coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
/* Get exponent
@@ -1317,6 +1316,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/* Restore antenna mode */
ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
+ /* Restore slot time and ACK timeouts */
+ if (ah->ah_coverage_class > 0)
+ ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
+
/*
* Configure QCUs/DCUs
*/
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 03a1106ad725..5774cea23a3b 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -25,7 +25,7 @@ config ATH9K
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
- depends on ATH9K
+ depends on ATH9K && DEBUG_FS
---help---
Say Y, if you need access to ath9k's statistics for
interrupts, rate control, etc.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 4985b2b1b0a9..6b50d5eb9ec3 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,4 +1,6 @@
ath9k-y += beacon.o \
+ gpio.o \
+ init.o \
main.o \
recv.o \
xmit.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 329e6bc137ab..f24b1f4c3e29 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -121,16 +121,16 @@ static int ath_ahb_probe(struct platform_device *pdev)
sc->mem = mem;
sc->irq = irq;
- ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
+ ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize device\n");
+ dev_err(&pdev->dev, "request_irq failed\n");
goto err_free_hw;
}
- ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+ ret = ath9k_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
if (ret) {
- dev_err(&pdev->dev, "request_irq failed\n");
- goto err_detach;
+ dev_err(&pdev->dev, "failed to initialize device\n");
+ goto err_irq;
}
ah = sc->sc_ah;
@@ -143,8 +143,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
return 0;
- err_detach:
- ath_detach(sc);
+ err_irq:
+ free_irq(irq, sc);
err_free_hw:
ieee80211_free_hw(hw);
platform_set_drvdata(pdev, NULL);
@@ -161,8 +161,12 @@ static int ath_ahb_remove(struct platform_device *pdev)
if (hw) {
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- ath_cleanup(sc);
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
+ ath_bus_cleanup(common);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index e2cef2ff5d8f..bf3d4c4bfa52 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -33,11 +33,11 @@ struct ath_node;
/* Macro to expand scalars to 64-bit objects */
-#define ito64(x) (sizeof(x) == 8) ? \
+#define ito64(x) (sizeof(x) == 1) ? \
(((unsigned long long int)(x)) & (0xff)) : \
- (sizeof(x) == 16) ? \
+ (sizeof(x) == 2) ? \
(((unsigned long long int)(x)) & 0xffff) : \
- ((sizeof(x) == 32) ? \
+ ((sizeof(x) == 4) ? \
(((unsigned long long int)(x)) & 0xffffffff) : \
(unsigned long long int)(x))
@@ -341,6 +341,12 @@ int ath_beaconq_config(struct ath_softc *sc);
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+void ath_ani_calibrate(unsigned long data);
+
+/**********/
+/* BTCOEX */
+/**********/
+
/* Defines the BT AR_BT_COEX_WGHT used */
enum ath_stomp_type {
ATH_BTCOEX_NO_STOMP,
@@ -361,6 +367,10 @@ struct ath_btcoex {
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
};
+int ath_init_btcoex_timer(struct ath_softc *sc);
+void ath9k_btcoex_timer_resume(struct ath_softc *sc);
+void ath9k_btcoex_timer_pause(struct ath_softc *sc);
+
/********************/
/* LED Control */
/********************/
@@ -385,6 +395,9 @@ struct ath_led {
bool registered;
};
+void ath_init_leds(struct ath_softc *sc);
+void ath_deinit_leds(struct ath_softc *sc);
+
/********************/
/* Main driver core */
/********************/
@@ -403,26 +416,28 @@ struct ath_led {
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define ATH_RATE_DUMMY_MARKER 0
-#define SC_OP_INVALID BIT(0)
-#define SC_OP_BEACONS BIT(1)
-#define SC_OP_RXAGGR BIT(2)
-#define SC_OP_TXAGGR BIT(3)
-#define SC_OP_FULL_RESET BIT(4)
-#define SC_OP_PREAMBLE_SHORT BIT(5)
-#define SC_OP_PROTECT_ENABLE BIT(6)
-#define SC_OP_RXFLUSH BIT(7)
-#define SC_OP_LED_ASSOCIATED BIT(8)
-#define SC_OP_WAIT_FOR_BEACON BIT(12)
-#define SC_OP_LED_ON BIT(13)
-#define SC_OP_SCANNING BIT(14)
-#define SC_OP_TSF_RESET BIT(15)
-#define SC_OP_WAIT_FOR_CAB BIT(16)
-#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17)
-#define SC_OP_WAIT_FOR_TX_ACK BIT(18)
-#define SC_OP_BEACON_SYNC BIT(19)
-#define SC_OP_BT_PRIORITY_DETECTED BIT(21)
-#define SC_OP_NULLFUNC_COMPLETED BIT(22)
-#define SC_OP_PS_ENABLED BIT(23)
+#define SC_OP_INVALID BIT(0)
+#define SC_OP_BEACONS BIT(1)
+#define SC_OP_RXAGGR BIT(2)
+#define SC_OP_TXAGGR BIT(3)
+#define SC_OP_FULL_RESET BIT(4)
+#define SC_OP_PREAMBLE_SHORT BIT(5)
+#define SC_OP_PROTECT_ENABLE BIT(6)
+#define SC_OP_RXFLUSH BIT(7)
+#define SC_OP_LED_ASSOCIATED BIT(8)
+#define SC_OP_LED_ON BIT(9)
+#define SC_OP_SCANNING BIT(10)
+#define SC_OP_TSF_RESET BIT(11)
+#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
+
+/* Powersave flags */
+#define PS_WAIT_FOR_BEACON BIT(0)
+#define PS_WAIT_FOR_CAB BIT(1)
+#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
+#define PS_WAIT_FOR_TX_ACK BIT(3)
+#define PS_BEACON_SYNC BIT(4)
+#define PS_NULLFUNC_COMPLETED BIT(5)
+#define PS_ENABLED BIT(6)
struct ath_wiphy;
struct ath_rate_table;
@@ -453,12 +468,12 @@ struct ath_softc {
int irq;
spinlock_t sc_resetlock;
spinlock_t sc_serial_rw;
- spinlock_t ani_lock;
spinlock_t sc_pm_lock;
struct mutex mutex;
u32 intrstatus;
u32 sc_flags; /* SC_OP_* */
+ u16 ps_flags; /* PS_* */
u16 curtxpow;
u8 nbcnvifs;
u16 nvifs;
@@ -509,6 +524,7 @@ struct ath_wiphy {
int chan_is_ht;
};
+void ath9k_tasklet(unsigned long data);
int ath_reset(struct ath_softc *sc, bool retry_tx);
int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
@@ -525,15 +541,15 @@ static inline void ath_bus_cleanup(struct ath_common *common)
}
extern struct ieee80211_ops ath9k_ops;
+extern int modparam_nohwcrypt;
irqreturn_t ath_isr(int irq, void *dev);
-void ath_cleanup(struct ath_softc *sc);
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops);
-void ath_detach(struct ath_softc *sc);
+void ath9k_deinit_device(struct ath_softc *sc);
const char *ath_mac_bb_name(u32 mac_bb_version);
const char *ath_rf_name(u16 rf_version);
-void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
+void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *ichan);
void ath_update_chainmask(struct ath_softc *sc, int is_ht);
@@ -542,6 +558,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
+bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
#ifdef CONFIG_PCI
int ath_pci_init(void);
@@ -583,4 +600,8 @@ void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
+
+void ath_start_rfkill_poll(struct ath_softc *sc);
+extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index b66f72dbf7b9..9489b6b25b5f 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -289,23 +289,49 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
if (sc->cur_rate_table == NULL)
return 0;
- max = 80 + sc->cur_rate_table->rate_cnt * 64;
+ max = 80 + sc->cur_rate_table->rate_cnt * 1024;
buf = kmalloc(max + 1, GFP_KERNEL);
if (buf == NULL)
return 0;
buf[max] = 0;
- len += sprintf(buf, "%5s %15s %8s %9s %3s\n\n", "Rate", "Success",
- "Retries", "XRetries", "PER");
+ len += sprintf(buf, "%6s %6s %6s "
+ "%10s %10s %10s %10s\n",
+ "HT", "MCS", "Rate",
+ "Success", "Retries", "XRetries", "PER");
for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) {
u32 ratekbps = sc->cur_rate_table->info[i].ratekbps;
struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i];
+ char mcs[5];
+ char htmode[5];
+ int used_mcs = 0, used_htmode = 0;
+
+ if (WLAN_RC_PHY_HT(sc->cur_rate_table->info[i].phy)) {
+ used_mcs = snprintf(mcs, 5, "%d",
+ sc->cur_rate_table->info[i].ratecode);
+
+ if (WLAN_RC_PHY_40(sc->cur_rate_table->info[i].phy))
+ used_htmode = snprintf(htmode, 5, "HT40");
+ else if (WLAN_RC_PHY_20(sc->cur_rate_table->info[i].phy))
+ used_htmode = snprintf(htmode, 5, "HT20");
+ else
+ used_htmode = snprintf(htmode, 5, "????");
+ }
+
+ mcs[used_mcs] = '\0';
+ htmode[used_htmode] = '\0';
len += snprintf(buf + len, max - len,
- "%3u.%d: %8u %8u %8u %8u\n", ratekbps / 1000,
- (ratekbps % 1000) / 100, stats->success,
- stats->retries, stats->xretries,
+ "%6s %6s %3u.%d: "
+ "%10u %10u %10u %10u\n",
+ htmode,
+ mcs,
+ ratekbps / 1000,
+ (ratekbps % 1000) / 100,
+ stats->success,
+ stats->retries,
+ stats->xretries,
stats->per);
}
@@ -554,6 +580,116 @@ static const struct file_operations fops_xmit = {
.owner = THIS_MODULE
};
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define PHY_ERR(s, p) \
+ len += snprintf(buf + len, size - len, "%18s : %10u\n", s, \
+ sc->debug.stats.rxstats.phy_err_stats[p]);
+
+ struct ath_softc *sc = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1152;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return 0;
+
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "CRC ERR",
+ sc->debug.stats.rxstats.crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "DECRYPT CRC ERR",
+ sc->debug.stats.rxstats.decrypt_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "PHY ERR",
+ sc->debug.stats.rxstats.phy_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "MIC ERR",
+ sc->debug.stats.rxstats.mic_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "PRE-DELIM CRC ERR",
+ sc->debug.stats.rxstats.pre_delim_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "POST-DELIM CRC ERR",
+ sc->debug.stats.rxstats.post_delim_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "DECRYPT BUSY ERR",
+ sc->debug.stats.rxstats.decrypt_busy_err);
+
+ PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
+ PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
+ PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
+ PHY_ERR("RATE", ATH9K_PHYERR_RATE);
+ PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
+ PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
+ PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
+ PHY_ERR("TOR", ATH9K_PHYERR_TOR);
+ PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
+ PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+ PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+ PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+ PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
+ PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
+ PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
+ PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
+ PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
+ PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
+ PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+ PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
+ PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
+ PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+ PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
+ PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
+ PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+ PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+
+#undef PHY_ERR
+}
+
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
+{
+#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
+#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
+
+ struct ath_desc *ds = bf->bf_desc;
+ u32 phyerr;
+
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
+ RX_STAT_INC(crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
+ RX_STAT_INC(decrypt_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
+ RX_STAT_INC(mic_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ RX_STAT_INC(pre_delim_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
+ RX_STAT_INC(post_delim_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
+ RX_STAT_INC(decrypt_busy_err);
+
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
+ RX_STAT_INC(phy_err);
+ phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
+ RX_PHY_ERR_INC(phyerr);
+ }
+
+#undef RX_STAT_INC
+#undef RX_PHY_ERR_INC
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -606,6 +742,13 @@ int ath9k_init_debug(struct ath_hw *ah)
if (!sc->debug.debugfs_xmit)
goto err;
+ sc->debug.debugfs_recv = debugfs_create_file("recv",
+ S_IRUSR,
+ sc->debug.debugfs_phy,
+ sc, &fops_recv);
+ if (!sc->debug.debugfs_recv)
+ goto err;
+
return 0;
err:
ath9k_exit_debug(ah);
@@ -617,6 +760,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
+ debugfs_remove(sc->debug.debugfs_recv);
debugfs_remove(sc->debug.debugfs_xmit);
debugfs_remove(sc->debug.debugfs_wiphy);
debugfs_remove(sc->debug.debugfs_rcstat);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 536663e3ee11..86780e68b31e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -116,10 +116,35 @@ struct ath_tx_stats {
u32 delim_underrun;
};
+/**
+ * struct ath_rx_stats - RX Statistics
+ * @crc_err: No. of frames with incorrect CRC value
+ * @decrypt_crc_err: No. of frames whose CRC check failed after
+ decryption process completed
+ * @phy_err: No. of frames whose reception failed because the PHY
+ encountered an error
+ * @mic_err: No. of frames with incorrect TKIP MIC verification failure
+ * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
+ * @post_delim_crc_err: Post-Frame delimiter CRC error detections
+ * @decrypt_busy_err: Decryption interruptions counter
+ * @phy_err_stats: Individual PHY error statistics
+ */
+struct ath_rx_stats {
+ u32 crc_err;
+ u32 decrypt_crc_err;
+ u32 phy_err;
+ u32 mic_err;
+ u32 pre_delim_crc_err;
+ u32 post_delim_crc_err;
+ u32 decrypt_busy_err;
+ u32 phy_err_stats[ATH9K_PHYERR_MAX];
+};
+
struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
+ struct ath_rx_stats rxstats;
};
struct ath9k_debug {
@@ -130,6 +155,7 @@ struct ath9k_debug {
struct dentry *debugfs_rcstat;
struct dentry *debugfs_wiphy;
struct dentry *debugfs_xmit;
+ struct dentry *debugfs_recv;
struct ath_stats stats;
};
@@ -142,6 +168,7 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf);
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per);
@@ -181,6 +208,11 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
{
}
+static inline void ath_debug_stat_rx(struct ath_softc *sc,
+ struct ath_buf *bf)
+{
+}
+
static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per)
{
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
new file mode 100644
index 000000000000..e204bd25ff65
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -0,0 +1,428 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/********************************/
+/* LED functions */
+/********************************/
+
+static void ath_led_blink_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ ath_led_blink_work.work);
+
+ if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
+ return;
+
+ if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
+ (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
+ else
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
+ (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
+
+ ieee80211_queue_delayed_work(sc->hw,
+ &sc->ath_led_blink_work,
+ (sc->sc_flags & SC_OP_LED_ON) ?
+ msecs_to_jiffies(sc->led_off_duration) :
+ msecs_to_jiffies(sc->led_on_duration));
+
+ sc->led_on_duration = sc->led_on_cnt ?
+ max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
+ ATH_LED_ON_DURATION_IDLE;
+ sc->led_off_duration = sc->led_off_cnt ?
+ max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
+ ATH_LED_OFF_DURATION_IDLE;
+ sc->led_on_cnt = sc->led_off_cnt = 0;
+ if (sc->sc_flags & SC_OP_LED_ON)
+ sc->sc_flags &= ~SC_OP_LED_ON;
+ else
+ sc->sc_flags |= SC_OP_LED_ON;
+}
+
+static void ath_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
+ struct ath_softc *sc = led->sc;
+
+ switch (brightness) {
+ case LED_OFF:
+ if (led->led_type == ATH_LED_ASSOC ||
+ led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
+ (led->led_type == ATH_LED_RADIO));
+ sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+ if (led->led_type == ATH_LED_RADIO)
+ sc->sc_flags &= ~SC_OP_LED_ON;
+ } else {
+ sc->led_off_cnt++;
+ }
+ break;
+ case LED_FULL:
+ if (led->led_type == ATH_LED_ASSOC) {
+ sc->sc_flags |= SC_OP_LED_ASSOCIATED;
+ ieee80211_queue_delayed_work(sc->hw,
+ &sc->ath_led_blink_work, 0);
+ } else if (led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
+ sc->sc_flags |= SC_OP_LED_ON;
+ } else {
+ sc->led_on_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
+ char *trigger)
+{
+ int ret;
+
+ led->sc = sc;
+ led->led_cdev.name = led->name;
+ led->led_cdev.default_trigger = trigger;
+ led->led_cdev.brightness_set = ath_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
+ if (ret)
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+ "Failed to register led:%s", led->name);
+ else
+ led->registered = 1;
+ return ret;
+}
+
+static void ath_unregister_led(struct ath_led *led)
+{
+ if (led->registered) {
+ led_classdev_unregister(&led->led_cdev);
+ led->registered = 0;
+ }
+}
+
+void ath_deinit_leds(struct ath_softc *sc)
+{
+ ath_unregister_led(&sc->assoc_led);
+ sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+ ath_unregister_led(&sc->tx_led);
+ ath_unregister_led(&sc->rx_led);
+ ath_unregister_led(&sc->radio_led);
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+}
+
+void ath_init_leds(struct ath_softc *sc)
+{
+ char *trigger;
+ int ret;
+
+ if (AR_SREV_9287(sc->sc_ah))
+ sc->sc_ah->led_pin = ATH_LED_PIN_9287;
+ else
+ sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+
+ INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
+
+ trigger = ieee80211_get_radio_led_name(sc->hw);
+ snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
+ "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->radio_led, trigger);
+ sc->radio_led.led_type = ATH_LED_RADIO;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_assoc_led_name(sc->hw);
+ snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
+ "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->assoc_led, trigger);
+ sc->assoc_led.led_type = ATH_LED_ASSOC;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_tx_led_name(sc->hw);
+ snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
+ "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->tx_led, trigger);
+ sc->tx_led.led_type = ATH_LED_TX;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_rx_led_name(sc->hw);
+ snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
+ "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->rx_led, trigger);
+ sc->rx_led.led_type = ATH_LED_RX;
+ if (ret)
+ goto fail;
+
+ return;
+
+fail:
+ cancel_delayed_work_sync(&sc->ath_led_blink_work);
+ ath_deinit_leds(sc);
+}
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
+ ah->rfkill_polarity;
+}
+
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ bool blocked = !!ath_is_rfkill_set(sc);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath_start_rfkill_poll(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(sc->hw->wiphy);
+}
+
+/******************/
+/* BTCOEX */
+/******************/
+
+/*
+ * Detects if there is any priority bt traffic
+ */
+static void ath_detect_bt_priority(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
+ btcoex->bt_priority_cnt++;
+
+ if (time_after(jiffies, btcoex->bt_priority_time +
+ msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
+ if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ "BT priority traffic detected");
+ sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
+ } else {
+ sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
+ }
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ }
+}
+
+/*
+ * Configures appropriate weight based on stomp type.
+ */
+static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
+ enum ath_stomp_type stomp_type)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ switch (stomp_type) {
+ case ATH_BTCOEX_STOMP_ALL:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_ALL_WLAN_WGHT);
+ break;
+ case ATH_BTCOEX_STOMP_LOW:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT);
+ break;
+ case ATH_BTCOEX_STOMP_NONE:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_NONE_WLAN_WGHT);
+ break;
+ default:
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "Invalid Stomptype\n");
+ break;
+ }
+
+ ath9k_hw_btcoex_enable(ah);
+}
+
+static void ath9k_gen_timer_start(struct ath_hw *ah,
+ struct ath_gen_timer *timer,
+ u32 timer_next,
+ u32 timer_period)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
+
+ if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
+ ath9k_hw_set_interrupts(ah, 0);
+ sc->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, sc->imask);
+ }
+}
+
+static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+
+ ath9k_hw_gen_timer_stop(ah, timer);
+
+ /* if no timer is enabled, turn off interrupt mask */
+ if (timer_table->timer_mask.val == 0) {
+ ath9k_hw_set_interrupts(ah, 0);
+ sc->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, sc->imask);
+ }
+}
+
+/*
+ * This is the master bt coex timer which runs for every
+ * 45ms, bt traffic will be given priority during 55% of this
+ * period while wlan gets remaining 45%
+ */
+static void ath_btcoex_period_timer(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ ath_detect_bt_priority(sc);
+
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
+
+ spin_unlock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+ ath9k_gen_timer_start(ah,
+ btcoex->no_stomp_timer,
+ (ath9k_hw_gettsf32(ah) +
+ btcoex->btcoex_no_stomp),
+ btcoex->btcoex_no_stomp * 10);
+ btcoex->hw_timer_enabled = true;
+ }
+
+ mod_timer(&btcoex->period_timer, jiffies +
+ msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
+}
+
+/*
+ * Generic tsf based hw timer which configures weight
+ * registers to time slice between wlan and bt traffic
+ */
+static void ath_btcoex_no_stomp_timer(void *arg)
+{
+ struct ath_softc *sc = (struct ath_softc *)arg;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "no stomp timer running \n");
+
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
+ ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
+ else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
+ ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
+
+ spin_unlock_bh(&btcoex->btcoex_lock);
+}
+
+int ath_init_btcoex_timer(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+
+ setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
+ (unsigned long) sc);
+
+ spin_lock_init(&btcoex->btcoex_lock);
+
+ btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
+ ath_btcoex_no_stomp_timer,
+ ath_btcoex_no_stomp_timer,
+ (void *) sc, AR_FIRST_NDP_TIMER);
+
+ if (!btcoex->no_stomp_timer)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * (Re)start btcoex timers
+ */
+void ath9k_btcoex_timer_resume(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "Starting btcoex timers");
+
+ /* make sure duty cycle timer is also stopped when resuming */
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
+
+ mod_timer(&btcoex->period_timer, jiffies);
+}
+
+
+/*
+ * Pause btcoex timer and bt duty cycle timer
+ */
+void ath9k_btcoex_timer_pause(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ del_timer_sync(&btcoex->period_timer);
+
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+ btcoex->hw_timer_enabled = false;
+}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 2ec61f08cfdb..2311fe7a0bf2 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -343,30 +343,6 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
return true;
}
-static const char *ath9k_hw_devname(u16 devid)
-{
- switch (devid) {
- case AR5416_DEVID_PCI:
- return "Atheros 5416";
- case AR5416_DEVID_PCIE:
- return "Atheros 5418";
- case AR9160_DEVID_PCI:
- return "Atheros 9160";
- case AR5416_AR9100_DEVID:
- return "Atheros 9100";
- case AR9280_DEVID_PCI:
- case AR9280_DEVID_PCIE:
- return "Atheros 9280";
- case AR9285_DEVID_PCIE:
- return "Atheros 9285";
- case AR5416_DEVID_AR9287_PCI:
- case AR5416_DEVID_AR9287_PCIE:
- return "Atheros 9287";
- }
-
- return NULL;
-}
-
static void ath9k_hw_init_config(struct ath_hw *ah)
{
int i;
@@ -392,7 +368,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.spurchans[i][1] = AR_NO_SPUR;
}
- ah->config.intr_mitigation = true;
+ ah->config.rx_intr_mitigation = true;
/*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -1184,7 +1160,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (ah->config.intr_mitigation)
+ if (ah->config.rx_intr_mitigation)
ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
else
ah->mask_reg |= AR_IMR_RXOK;
@@ -1266,13 +1242,7 @@ static void ath9k_hw_init_user_settings(struct ath_hw *ah)
ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
}
-const char *ath9k_hw_probe(u16 vendorid, u16 devid)
-{
- return vendorid == ATHEROS_VENDOR_ID ?
- ath9k_hw_devname(devid) : NULL;
-}
-
-void ath9k_hw_detach(struct ath_hw *ah)
+void ath9k_hw_deinit(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1290,7 +1260,7 @@ free_hw:
kfree(ah);
ah = NULL;
}
-EXPORT_SYMBOL(ath9k_hw_detach);
+EXPORT_SYMBOL(ath9k_hw_deinit);
/*******/
/* INI */
@@ -2121,7 +2091,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_OBS, 8);
- if (ah->config.intr_mitigation) {
+ if (ah->config.rx_intr_mitigation) {
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
}
@@ -2781,7 +2751,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
*masked = isr & ATH9K_INT_COMMON;
- if (ah->config.intr_mitigation) {
+ if (ah->config.rx_intr_mitigation) {
if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
*masked |= ATH9K_INT_RX;
}
@@ -2914,7 +2884,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
}
if (ints & ATH9K_INT_RX) {
mask |= AR_IMR_RXERR;
- if (ah->config.intr_mitigation)
+ if (ah->config.rx_intr_mitigation)
mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
else
mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e2b0c73a616f..3f0f055ea39b 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -212,7 +212,7 @@ struct ath9k_ops_config {
u32 cck_trig_low;
u32 enable_ani;
int serialize_regmode;
- bool intr_mitigation;
+ bool rx_intr_mitigation;
#define SPUR_DISABLE 0
#define SPUR_ENABLE_IOCTL 1
#define SPUR_ENABLE_EEPROM 2
@@ -616,7 +616,7 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
/* Initialization, Detach, Reset */
const char *ath9k_hw_probe(u16 vendorid, u16 devid);
-void ath9k_hw_detach(struct ath_hw *ah);
+void ath9k_hw_deinit(struct ath_hw *ah);
int ath9k_hw_init(struct ath_hw *ah);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
new file mode 100644
index 000000000000..16d1efb4b8b2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -0,0 +1,863 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+static char *dev_info = "ath9k";
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
+MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
+int modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
+/* We use the hw_value as an index into our private channel structure */
+
+#define CHAN2G(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+/* Some 2 GHz radios are actually tunable on 2312-2732
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static struct ieee80211_channel ath9k_2ghz_chantable[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Some 5 GHz radios are actually tunable on XXXX-YYYY
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static struct ieee80211_channel ath9k_5ghz_chantable[] = {
+ /* _We_ call this UNII 1 */
+ CHAN5G(5180, 14), /* Channel 36 */
+ CHAN5G(5200, 15), /* Channel 40 */
+ CHAN5G(5220, 16), /* Channel 44 */
+ CHAN5G(5240, 17), /* Channel 48 */
+ /* _We_ call this UNII 2 */
+ CHAN5G(5260, 18), /* Channel 52 */
+ CHAN5G(5280, 19), /* Channel 56 */
+ CHAN5G(5300, 20), /* Channel 60 */
+ CHAN5G(5320, 21), /* Channel 64 */
+ /* _We_ call this "Middle band" */
+ CHAN5G(5500, 22), /* Channel 100 */
+ CHAN5G(5520, 23), /* Channel 104 */
+ CHAN5G(5540, 24), /* Channel 108 */
+ CHAN5G(5560, 25), /* Channel 112 */
+ CHAN5G(5580, 26), /* Channel 116 */
+ CHAN5G(5600, 27), /* Channel 120 */
+ CHAN5G(5620, 28), /* Channel 124 */
+ CHAN5G(5640, 29), /* Channel 128 */
+ CHAN5G(5660, 30), /* Channel 132 */
+ CHAN5G(5680, 31), /* Channel 136 */
+ CHAN5G(5700, 32), /* Channel 140 */
+ /* _We_ call this UNII 3 */
+ CHAN5G(5745, 33), /* Channel 149 */
+ CHAN5G(5765, 34), /* Channel 153 */
+ CHAN5G(5785, 35), /* Channel 157 */
+ CHAN5G(5805, 36), /* Channel 161 */
+ CHAN5G(5825, 37), /* Channel 165 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, 0x0b, 0),
+ RATE(90, 0x0f, 0),
+ RATE(120, 0x0a, 0),
+ RATE(180, 0x0e, 0),
+ RATE(240, 0x09, 0),
+ RATE(360, 0x0d, 0),
+ RATE(480, 0x08, 0),
+ RATE(540, 0x0c, 0),
+};
+
+static void ath9k_deinit_softc(struct ath_softc *sc);
+
+/*
+ * Read and write, they both share the same lock. We do this to serialize
+ * reads and writes on Atheros 802.11n PCI devices only. This is required
+ * as the FIFO on these devices can only accept sanely 2 requests.
+ */
+
+static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ iowrite32(val, sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ iowrite32(val, sc->mem + reg_offset);
+}
+
+static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ u32 val;
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ val = ioread32(sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ val = ioread32(sc->mem + reg_offset);
+ return val;
+}
+
+static const struct ath_ops ath9k_common_ops = {
+ .read = ath9k_ioread32,
+ .write = ath9k_iowrite32,
+};
+
+/**************************/
+/* Initialization */
+/**************************/
+
+static void setup_ht_cap(struct ath_softc *sc,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u8 tx_streams, rx_streams;
+
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ /* set up supported mcs set */
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
+ 1 : 2;
+ rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
+ 1 : 2;
+
+ if (tx_streams != rx_streams) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+
+ ht_info->mcs.rx_mask[0] = 0xff;
+ if (rx_streams >= 2)
+ ht_info->mcs.rx_mask[1] = 0xff;
+
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static int ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
+
+ return ath_reg_notifier_apply(wiphy, request, reg);
+}
+
+/*
+ * This function will allocate both the DMA descriptor structure, and the
+ * buffers it contains. These are used to contain the descriptors used
+ * by the system.
+*/
+int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
+ struct list_head *head, const char *name,
+ int nbuf, int ndesc)
+{
+#define DS2PHYS(_dd, _ds) \
+ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
+#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
+#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_desc *ds;
+ struct ath_buf *bf;
+ int i, bsize, error;
+
+ ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+ name, nbuf, ndesc);
+
+ INIT_LIST_HEAD(head);
+ /* ath_desc must be a multiple of DWORDs */
+ if ((sizeof(struct ath_desc) % 4) != 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "ath_desc not DWORD aligned\n");
+ BUG_ON((sizeof(struct ath_desc) % 4) != 0);
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+
+ /*
+ * Need additional DMA memory because we can't use
+ * descriptors that cross the 4K page boundary. Assume
+ * one skipped descriptor per 4K page.
+ */
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ u32 ndesc_skipped =
+ ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
+ u32 dma_len;
+
+ while (ndesc_skipped) {
+ dma_len = ndesc_skipped * sizeof(struct ath_desc);
+ dd->dd_desc_len += dma_len;
+
+ ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
+ };
+ }
+
+ /* allocate descriptors */
+ dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (dd->dd_desc == NULL) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ ds = dd->dd_desc;
+ ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+ name, ds, (u32) dd->dd_desc_len,
+ ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
+
+ /* allocate buffers */
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = kzalloc(bsize, GFP_KERNEL);
+ if (bf == NULL) {
+ error = -ENOMEM;
+ goto fail2;
+ }
+ dd->dd_bufptr = bf;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += ndesc;
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ return 0;
+fail2:
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+fail:
+ memset(dd, 0, sizeof(*dd));
+ return error;
+#undef ATH_DESC_4KB_BOUND_CHECK
+#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
+#undef DS2PHYS
+}
+
+static void ath9k_init_crypto(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ /* Get the hardware key cache size. */
+ common->keymax = sc->sc_ah->caps.keycache_size;
+ if (common->keymax > ATH_KEYMAX) {
+ ath_print(common, ATH_DBG_ANY,
+ "Warning, using only %u entries in %u key cache\n",
+ ATH_KEYMAX, common->keymax);
+ common->keymax = ATH_KEYMAX;
+ }
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath9k_hw_keyreset(sc->sc_ah, (u16) i);
+
+ if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)) {
+ /*
+ * Whether we should enable h/w TKIP MIC.
+ * XXX: if we don't support WME TKIP MIC, then we wouldn't
+ * report WMM capable, so it's always safe to turn on
+ * TKIP MIC in this case.
+ */
+ ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
+ }
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)
+ && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_MIC, NULL)
+ && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
+ 0, NULL))
+ common->splitmic = 1;
+
+ /* turn on mcast key search if possible */
+ if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
+ (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
+ 1, 1, NULL);
+
+}
+
+static int ath9k_init_btcoex(struct ath_softc *sc)
+{
+ int r, qnum;
+
+ switch (sc->sc_ah->btcoex_hw.scheme) {
+ case ATH_BTCOEX_CFG_NONE:
+ break;
+ case ATH_BTCOEX_CFG_2WIRE:
+ ath9k_hw_btcoex_init_2wire(sc->sc_ah);
+ break;
+ case ATH_BTCOEX_CFG_3WIRE:
+ ath9k_hw_btcoex_init_3wire(sc->sc_ah);
+ r = ath_init_btcoex_timer(sc);
+ if (r)
+ return -1;
+ qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+ ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
+ sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return 0;
+}
+
+static int ath9k_init_queues(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
+ sc->tx.hwq_map[i] = -1;
+
+ sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
+ if (sc->beacon.beaconq == -1) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup a beacon xmit queue\n");
+ goto err;
+ }
+
+ sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
+ if (sc->beacon.cabq == NULL) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup CAB xmit queue\n");
+ goto err;
+ }
+
+ sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
+ ath_cabq_update(sc);
+
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BK traffic\n");
+ goto err;
+ }
+
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BE traffic\n");
+ goto err;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VI traffic\n");
+ goto err;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VO traffic\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+
+ return -EIO;
+}
+
+static void ath9k_init_channels_rates(struct ath_softc *sc)
+{
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
+ sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
+ sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_chantable);
+ sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
+ sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
+ sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
+ sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
+ ARRAY_SIZE(ath9k_5ghz_chantable);
+ sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ ath9k_legacy_rates + 4;
+ sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates) - 4;
+ }
+}
+
+static void ath9k_init_misc(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
+ setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
+
+ sc->config.txpowlimit = ATH_TXPOWER_MAX;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ sc->sc_flags |= SC_OP_TXAGGR;
+ sc->sc_flags |= SC_OP_RXAGGR;
+ }
+
+ common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
+ common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
+
+ ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
+ sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+
+ sc->beacon.slottime = ATH9K_SLOT_TIME_9;
+
+ for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
+ sc->beacon.bslot[i] = NULL;
+ sc->beacon.bslot_aphy[i] = NULL;
+ }
+}
+
+static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ath_hw *ah = NULL;
+ struct ath_common *common;
+ int ret = 0, i;
+ int csz = 0;
+
+ sc->sc_flags |= SC_OP_INVALID;
+
+ ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ if (!ah)
+ return -ENOMEM;
+
+ ah->hw_version.devid = devid;
+ ah->hw_version.subsysid = subsysid;
+ sc->sc_ah = ah;
+
+ common = ath9k_hw_common(ah);
+ common->ops = &ath9k_common_ops;
+ common->bus_ops = bus_ops;
+ common->ah = ah;
+ common->hw = sc->hw;
+ common->priv = sc;
+ common->debug_mask = ath9k_debug;
+
+ spin_lock_init(&sc->wiphy_lock);
+ spin_lock_init(&sc->sc_resetlock);
+ spin_lock_init(&sc->sc_serial_rw);
+ spin_lock_init(&sc->sc_pm_lock);
+ mutex_init(&sc->mutex);
+ tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
+ tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
+ (unsigned long)sc);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ ret = ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ goto err_hw;
+ }
+
+ ret = ath9k_init_debug(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to create debugfs files\n");
+ goto err_debug;
+ }
+
+ ret = ath9k_init_queues(sc);
+ if (ret)
+ goto err_queues;
+
+ ret = ath9k_init_btcoex(sc);
+ if (ret)
+ goto err_btcoex;
+
+ ath9k_init_crypto(sc);
+ ath9k_init_channels_rates(sc);
+ ath9k_init_misc(sc);
+
+ return 0;
+
+err_btcoex:
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+err_queues:
+ ath9k_exit_debug(ah);
+err_debug:
+ ath9k_hw_deinit(ah);
+err_hw:
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
+ kfree(ah);
+ sc->sc_ah = NULL;
+
+ return ret;
+}
+
+void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_SPECTRUM_MGMT;
+
+ if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
+ hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->queues = 4;
+ hw->max_rates = 4;
+ hw->channel_change_time = 5000;
+ hw->max_listen_interval = 10;
+ /* Hardware supports 10 but we use 4 */
+ hw->max_rate_tries = 4;
+ hw->sta_data_size = sizeof(struct ath_node);
+ hw->vif_data_size = sizeof(struct ath_vif);
+
+ hw->rate_control_algorithm = "ath9k_rate_control";
+
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &sc->sbands[IEEE80211_BAND_2GHZ];
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &sc->sbands[IEEE80211_BAND_5GHZ];
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+ }
+
+ SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+}
+
+int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_common *common;
+ struct ath_hw *ah;
+ int error = 0;
+ struct ath_regulatory *reg;
+
+ /* Bring up device */
+ error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
+ if (error != 0)
+ goto error_init;
+
+ ah = sc->sc_ah;
+ common = ath9k_hw_common(ah);
+ ath9k_set_hw_capab(sc, hw);
+
+ /* Initialize regulatory */
+ error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ goto error_regd;
+
+ reg = &common->regulatory;
+
+ /* Setup TX DMA */
+ error = ath_tx_init(sc, ATH_TXBUF);
+ if (error != 0)
+ goto error_tx;
+
+ /* Setup RX DMA */
+ error = ath_rx_init(sc, ATH_RXBUF);
+ if (error != 0)
+ goto error_rx;
+
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+ goto error_register;
+
+ /* Handle world regulatory */
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
+ if (error)
+ goto error_world;
+ }
+
+ INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
+ INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
+ sc->wiphy_scheduler_int = msecs_to_jiffies(500);
+
+ ath_init_leds(sc);
+ ath_start_rfkill_poll(sc);
+
+ return 0;
+
+error_world:
+ ieee80211_unregister_hw(hw);
+error_register:
+ ath_rx_cleanup(sc);
+error_rx:
+ ath_tx_cleanup(sc);
+error_tx:
+ /* Nothing */
+error_regd:
+ ath9k_deinit_softc(sc);
+error_init:
+ return error;
+}
+
+/*****************************/
+/* De-Initialization */
+/*****************************/
+
+static void ath9k_deinit_softc(struct ath_softc *sc)
+{
+ int i = 0;
+
+ if ((sc->btcoex.no_stomp_timer) &&
+ sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+
+ ath9k_exit_debug(sc->sc_ah);
+ ath9k_hw_deinit(sc->sc_ah);
+
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+}
+
+void ath9k_deinit_device(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ int i = 0;
+
+ ath9k_ps_wakeup(sc);
+
+ wiphy_rfkill_stop_polling(sc->hw->wiphy);
+ ath_deinit_leds(sc);
+
+ for (i = 0; i < sc->num_sec_wiphy; i++) {
+ struct ath_wiphy *aphy = sc->sec_wiphy[i];
+ if (aphy == NULL)
+ continue;
+ sc->sec_wiphy[i] = NULL;
+ ieee80211_unregister_hw(aphy->hw);
+ ieee80211_free_hw(aphy->hw);
+ }
+ kfree(sc->sec_wiphy);
+
+ ieee80211_unregister_hw(hw);
+ ath_rx_cleanup(sc);
+ ath_tx_cleanup(sc);
+ ath9k_deinit_softc(sc);
+}
+
+void ath_descdma_cleanup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head)
+{
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+
+ INIT_LIST_HEAD(head);
+ kfree(dd->dd_bufptr);
+ memset(dd, 0, sizeof(*dd));
+}
+
+/************************/
+/* Module Hooks */
+/************************/
+
+static int __init ath9k_init(void)
+{
+ int error;
+
+ /* Register rate control algorithm */
+ error = ath_rate_control_register();
+ if (error != 0) {
+ printk(KERN_ERR
+ "ath9k: Unable to register rate control "
+ "algorithm: %d\n",
+ error);
+ goto err_out;
+ }
+
+ error = ath9k_debug_create_root();
+ if (error) {
+ printk(KERN_ERR
+ "ath9k: Unable to create debugfs root: %d\n",
+ error);
+ goto err_rate_unregister;
+ }
+
+ error = ath_pci_init();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k: No PCI devices found, driver not installed.\n");
+ error = -ENODEV;
+ goto err_remove_root;
+ }
+
+ error = ath_ahb_init();
+ if (error < 0) {
+ error = -ENODEV;
+ goto err_pci_exit;
+ }
+
+ return 0;
+
+ err_pci_exit:
+ ath_pci_exit();
+
+ err_remove_root:
+ ath9k_debug_remove_root();
+ err_rate_unregister:
+ ath_rate_control_unregister();
+ err_out:
+ return error;
+}
+module_init(ath9k_init);
+
+static void __exit ath9k_exit(void)
+{
+ ath_ahb_exit();
+ ath_pci_exit();
+ ath9k_debug_remove_root();
+ ath_rate_control_unregister();
+ printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
+}
+module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index e185479e295e..29851e6376a9 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -167,6 +167,40 @@ struct ath_rx_status {
#define ATH9K_RXKEYIX_INVALID ((u8)-1)
#define ATH9K_TXKEYIX_INVALID ((u32)-1)
+enum ath9k_phyerr {
+ ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
+ ATH9K_PHYERR_TIMING = 1, /* Timing error */
+ ATH9K_PHYERR_PARITY = 2, /* Illegal parity */
+ ATH9K_PHYERR_RATE = 3, /* Illegal rate */
+ ATH9K_PHYERR_LENGTH = 4, /* Illegal length */
+ ATH9K_PHYERR_RADAR = 5, /* Radar detect */
+ ATH9K_PHYERR_SERVICE = 6, /* Illegal service */
+ ATH9K_PHYERR_TOR = 7, /* Transmit override receive */
+
+ ATH9K_PHYERR_OFDM_TIMING = 17,
+ ATH9K_PHYERR_OFDM_SIGNAL_PARITY = 18,
+ ATH9K_PHYERR_OFDM_RATE_ILLEGAL = 19,
+ ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL = 20,
+ ATH9K_PHYERR_OFDM_POWER_DROP = 21,
+ ATH9K_PHYERR_OFDM_SERVICE = 22,
+ ATH9K_PHYERR_OFDM_RESTART = 23,
+ ATH9K_PHYERR_FALSE_RADAR_EXT = 24,
+
+ ATH9K_PHYERR_CCK_TIMING = 25,
+ ATH9K_PHYERR_CCK_HEADER_CRC = 26,
+ ATH9K_PHYERR_CCK_RATE_ILLEGAL = 27,
+ ATH9K_PHYERR_CCK_SERVICE = 30,
+ ATH9K_PHYERR_CCK_RESTART = 31,
+ ATH9K_PHYERR_CCK_LENGTH_ILLEGAL = 32,
+ ATH9K_PHYERR_CCK_POWER_DROP = 33,
+
+ ATH9K_PHYERR_HT_CRC_ERROR = 34,
+ ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35,
+ ATH9K_PHYERR_HT_RATE_ILLEGAL = 36,
+
+ ATH9K_PHYERR_MAX = 37,
+};
+
struct ath_desc {
u32 ds_link;
u32 ds_data;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 996eb90263cc..b39c7bc41143 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -18,118 +18,6 @@
#include "ath9k.h"
#include "btcoex.h"
-static char *dev_info = "ath9k";
-
-MODULE_AUTHOR("Atheros Communications");
-MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
-MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
-MODULE_LICENSE("Dual BSD/GPL");
-
-static int modparam_nohwcrypt;
-module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
-MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
-
-static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
-module_param_named(debug, ath9k_debug, uint, 0);
-MODULE_PARM_DESC(debug, "Debugging mask");
-
-/* We use the hw_value as an index into our private channel structure */
-
-#define CHAN2G(_freq, _idx) { \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-/* Some 2 GHz radios are actually tunable on 2312-2732
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static struct ieee80211_channel ath9k_2ghz_chantable[] = {
- CHAN2G(2412, 0), /* Channel 1 */
- CHAN2G(2417, 1), /* Channel 2 */
- CHAN2G(2422, 2), /* Channel 3 */
- CHAN2G(2427, 3), /* Channel 4 */
- CHAN2G(2432, 4), /* Channel 5 */
- CHAN2G(2437, 5), /* Channel 6 */
- CHAN2G(2442, 6), /* Channel 7 */
- CHAN2G(2447, 7), /* Channel 8 */
- CHAN2G(2452, 8), /* Channel 9 */
- CHAN2G(2457, 9), /* Channel 10 */
- CHAN2G(2462, 10), /* Channel 11 */
- CHAN2G(2467, 11), /* Channel 12 */
- CHAN2G(2472, 12), /* Channel 13 */
- CHAN2G(2484, 13), /* Channel 14 */
-};
-
-/* Some 5 GHz radios are actually tunable on XXXX-YYYY
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static struct ieee80211_channel ath9k_5ghz_chantable[] = {
- /* _We_ call this UNII 1 */
- CHAN5G(5180, 14), /* Channel 36 */
- CHAN5G(5200, 15), /* Channel 40 */
- CHAN5G(5220, 16), /* Channel 44 */
- CHAN5G(5240, 17), /* Channel 48 */
- /* _We_ call this UNII 2 */
- CHAN5G(5260, 18), /* Channel 52 */
- CHAN5G(5280, 19), /* Channel 56 */
- CHAN5G(5300, 20), /* Channel 60 */
- CHAN5G(5320, 21), /* Channel 64 */
- /* _We_ call this "Middle band" */
- CHAN5G(5500, 22), /* Channel 100 */
- CHAN5G(5520, 23), /* Channel 104 */
- CHAN5G(5540, 24), /* Channel 108 */
- CHAN5G(5560, 25), /* Channel 112 */
- CHAN5G(5580, 26), /* Channel 116 */
- CHAN5G(5600, 27), /* Channel 120 */
- CHAN5G(5620, 28), /* Channel 124 */
- CHAN5G(5640, 29), /* Channel 128 */
- CHAN5G(5660, 30), /* Channel 132 */
- CHAN5G(5680, 31), /* Channel 136 */
- CHAN5G(5700, 32), /* Channel 140 */
- /* _We_ call this UNII 3 */
- CHAN5G(5745, 33), /* Channel 149 */
- CHAN5G(5765, 34), /* Channel 153 */
- CHAN5G(5785, 35), /* Channel 157 */
- CHAN5G(5805, 36), /* Channel 161 */
- CHAN5G(5825, 37), /* Channel 165 */
-};
-
-/* Atheros hardware rate code addition for short premble */
-#define SHPCHECK(__hw_rate, __flags) \
- ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
-
-#define RATE(_bitrate, _hw_rate, _flags) { \
- .bitrate = (_bitrate), \
- .flags = (_flags), \
- .hw_value = (_hw_rate), \
- .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
-}
-
-static struct ieee80211_rate ath9k_legacy_rates[] = {
- RATE(10, 0x1b, 0),
- RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(60, 0x0b, 0),
- RATE(90, 0x0f, 0),
- RATE(120, 0x0a, 0),
- RATE(180, 0x0e, 0),
- RATE(240, 0x09, 0),
- RATE(360, 0x0d, 0),
- RATE(480, 0x08, 0),
- RATE(540, 0x0c, 0),
-};
-
static void ath_cache_conf_rate(struct ath_softc *sc,
struct ieee80211_conf *conf)
{
@@ -221,7 +109,7 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
return channel;
}
-static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
+bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
{
unsigned long flags;
bool ret;
@@ -256,10 +144,10 @@ void ath9k_ps_restore(struct ath_softc *sc)
goto unlock;
if (sc->ps_enabled &&
- !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK)))
+ !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK)))
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
unlock:
@@ -349,7 +237,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
* When the task is complete, it reschedules itself depending on the
* appropriate interval that was calculated.
*/
-static void ath_ani_calibrate(unsigned long data)
+void ath_ani_calibrate(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
@@ -363,14 +251,6 @@ static void ath_ani_calibrate(unsigned long data)
short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
- /*
- * don't calibrate when we're scanning.
- * we are most likely not on our home channel.
- */
- spin_lock(&sc->ani_lock);
- if (sc->sc_flags & SC_OP_SCANNING)
- goto set_timer;
-
/* Only calibrate if awake */
if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
goto set_timer;
@@ -437,7 +317,6 @@ static void ath_ani_calibrate(unsigned long data)
ath9k_ps_restore(sc);
set_timer:
- spin_unlock(&sc->ani_lock);
/*
* Set timer interval based on previous results.
* The interval must be the shortest necessary to satisfy ANI,
@@ -513,7 +392,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
ath_tx_node_cleanup(sc, an);
}
-static void ath9k_tasklet(unsigned long data)
+void ath9k_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
@@ -545,7 +424,7 @@ static void ath9k_tasklet(unsigned long data)
*/
ath_print(common, ATH_DBG_PS,
"TSFOOR - Sync with next Beacon\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC;
+ sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -646,7 +525,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* receive frames */
ath9k_setpower(sc, ATH9K_PM_AWAKE);
ath9k_hw_setrxabort(sc->sc_ah, 0);
- sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags |= PS_WAIT_FOR_BEACON;
}
chip_reset:
@@ -933,44 +812,6 @@ static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf
}
}
-static void setup_ht_cap(struct ath_softc *sc,
- struct ieee80211_sta_ht_cap *ht_info)
-{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u8 tx_streams, rx_streams;
-
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SM_PS |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
-
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-
- /* set up supported mcs set */
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
- 1 : 2;
- rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
- 1 : 2;
-
- if (tx_streams != rx_streams) {
- ath_print(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_streams - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-
- ht_info->mcs.rx_mask[0] = 0xff;
- if (rx_streams >= 2)
- ht_info->mcs.rx_mask[1] = 0xff;
-
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-}
-
static void ath9k_bss_assoc_info(struct ath_softc *sc,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
@@ -992,7 +833,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
* on the receipt of the first Beacon frame (i.e.,
* after time sync with the AP).
*/
- sc->sc_flags |= SC_OP_BEACON_SYNC;
+ sc->ps_flags |= PS_BEACON_SYNC;
/* Configure the beacon */
ath_beacon_config(sc, vif);
@@ -1009,174 +850,6 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
}
}
-/********************************/
-/* LED functions */
-/********************************/
-
-static void ath_led_blink_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- ath_led_blink_work.work);
-
- if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
- return;
-
- if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
- (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- else
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
-
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work,
- (sc->sc_flags & SC_OP_LED_ON) ?
- msecs_to_jiffies(sc->led_off_duration) :
- msecs_to_jiffies(sc->led_on_duration));
-
- sc->led_on_duration = sc->led_on_cnt ?
- max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
- ATH_LED_ON_DURATION_IDLE;
- sc->led_off_duration = sc->led_off_cnt ?
- max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
- ATH_LED_OFF_DURATION_IDLE;
- sc->led_on_cnt = sc->led_off_cnt = 0;
- if (sc->sc_flags & SC_OP_LED_ON)
- sc->sc_flags &= ~SC_OP_LED_ON;
- else
- sc->sc_flags |= SC_OP_LED_ON;
-}
-
-static void ath_led_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
- struct ath_softc *sc = led->sc;
-
- switch (brightness) {
- case LED_OFF:
- if (led->led_type == ATH_LED_ASSOC ||
- led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (led->led_type == ATH_LED_RADIO));
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- if (led->led_type == ATH_LED_RADIO)
- sc->sc_flags &= ~SC_OP_LED_ON;
- } else {
- sc->led_off_cnt++;
- }
- break;
- case LED_FULL:
- if (led->led_type == ATH_LED_ASSOC) {
- sc->sc_flags |= SC_OP_LED_ASSOCIATED;
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work, 0);
- } else if (led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- sc->sc_flags |= SC_OP_LED_ON;
- } else {
- sc->led_on_cnt++;
- }
- break;
- default:
- break;
- }
-}
-
-static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
- char *trigger)
-{
- int ret;
-
- led->sc = sc;
- led->led_cdev.name = led->name;
- led->led_cdev.default_trigger = trigger;
- led->led_cdev.brightness_set = ath_led_brightness;
-
- ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
- if (ret)
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Failed to register led:%s", led->name);
- else
- led->registered = 1;
- return ret;
-}
-
-static void ath_unregister_led(struct ath_led *led)
-{
- if (led->registered) {
- led_classdev_unregister(&led->led_cdev);
- led->registered = 0;
- }
-}
-
-static void ath_deinit_leds(struct ath_softc *sc)
-{
- ath_unregister_led(&sc->assoc_led);
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- ath_unregister_led(&sc->tx_led);
- ath_unregister_led(&sc->rx_led);
- ath_unregister_led(&sc->radio_led);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-}
-
-static void ath_init_leds(struct ath_softc *sc)
-{
- char *trigger;
- int ret;
-
- if (AR_SREV_9287(sc->sc_ah))
- sc->sc_ah->led_pin = ATH_LED_PIN_9287;
- else
- sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
-
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- /* LED off, active low */
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
- INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
-
- trigger = ieee80211_get_radio_led_name(sc->hw);
- snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
- "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->radio_led, trigger);
- sc->radio_led.led_type = ATH_LED_RADIO;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_assoc_led_name(sc->hw);
- snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
- "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->assoc_led, trigger);
- sc->assoc_led.led_type = ATH_LED_ASSOC;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_tx_led_name(sc->hw);
- snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
- "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->tx_led, trigger);
- sc->tx_led.led_type = ATH_LED_TX;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_rx_led_name(sc->hw);
- snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
- "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->rx_led, trigger);
- sc->rx_led.led_type = ATH_LED_RX;
- if (ret)
- goto fail;
-
- return;
-
-fail:
- cancel_delayed_work_sync(&sc->ath_led_blink_work);
- ath_deinit_leds(sc);
-}
-
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1261,711 +934,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
}
-/*******************/
-/* Rfkill */
-/*******************/
-
-static bool ath_is_rfkill_set(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
- ah->rfkill_polarity;
-}
-
-static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- bool blocked = !!ath_is_rfkill_set(sc);
-
- wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
-}
-
-static void ath_start_rfkill_poll(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- wiphy_rfkill_start_polling(sc->hw->wiphy);
-}
-
-static void ath9k_uninit_hw(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- BUG_ON(!ah);
-
- ath9k_exit_debug(ah);
- ath9k_hw_detach(ah);
- sc->sc_ah = NULL;
-}
-
-static void ath_clean_core(struct ath_softc *sc)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_hw *ah = sc->sc_ah;
- int i = 0;
-
- ath9k_ps_wakeup(sc);
-
- dev_dbg(sc->dev, "Detach ATH hw\n");
-
- ath_deinit_leds(sc);
- wiphy_rfkill_stop_polling(sc->hw->wiphy);
-
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- sc->sec_wiphy[i] = NULL;
- ieee80211_unregister_hw(aphy->hw);
- ieee80211_free_hw(aphy->hw);
- }
- ieee80211_unregister_hw(hw);
- ath_rx_cleanup(sc);
- ath_tx_cleanup(sc);
-
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
-
- if (!(sc->sc_flags & SC_OP_INVALID))
- ath9k_setpower(sc, ATH9K_PM_AWAKE);
-
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
- if ((sc->btcoex.no_stomp_timer) &&
- ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
- ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
-}
-
-void ath_detach(struct ath_softc *sc)
-{
- ath_clean_core(sc);
- ath9k_uninit_hw(sc);
-}
-
-void ath_cleanup(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_clean_core(sc);
- free_irq(sc->irq, sc);
- ath_bus_cleanup(common);
- kfree(sc->sec_wiphy);
- ieee80211_free_hw(sc->hw);
-
- ath9k_uninit_hw(sc);
-}
-
-static int ath9k_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
-{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
-
- return ath_reg_notifier_apply(wiphy, request, reg);
-}
-
-/*
- * Detects if there is any priority bt traffic
- */
-static void ath_detect_bt_priority(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
- btcoex->bt_priority_cnt++;
-
- if (time_after(jiffies, btcoex->bt_priority_time +
- msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
- if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
- "BT priority traffic detected");
- sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
- } else {
- sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
- }
-
- btcoex->bt_priority_cnt = 0;
- btcoex->bt_priority_time = jiffies;
- }
-}
-
-/*
- * Configures appropriate weight based on stomp type.
- */
-static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
- enum ath_stomp_type stomp_type)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- switch (stomp_type) {
- case ATH_BTCOEX_STOMP_ALL:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_ALL_WLAN_WGHT);
- break;
- case ATH_BTCOEX_STOMP_LOW:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
- break;
- case ATH_BTCOEX_STOMP_NONE:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_NONE_WLAN_WGHT);
- break;
- default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
- break;
- }
-
- ath9k_hw_btcoex_enable(ah);
-}
-
-static void ath9k_gen_timer_start(struct ath_hw *ah,
- struct ath_gen_timer *timer,
- u32 timer_next,
- u32 timer_period)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
-
- if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
- ath9k_hw_set_interrupts(ah, 0);
- sc->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
- }
-}
-
-static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
-
- ath9k_hw_gen_timer_stop(ah, timer);
-
- /* if no timer is enabled, turn off interrupt mask */
- if (timer_table->timer_mask.val == 0) {
- ath9k_hw_set_interrupts(ah, 0);
- sc->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
- }
-}
-
-/*
- * This is the master bt coex timer which runs for every
- * 45ms, bt traffic will be given priority during 55% of this
- * period while wlan gets remaining 45%
- */
-static void ath_btcoex_period_timer(unsigned long data)
-{
- struct ath_softc *sc = (struct ath_softc *) data;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- ath_detect_bt_priority(sc);
-
- spin_lock_bh(&btcoex->btcoex_lock);
-
- ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
-
- spin_unlock_bh(&btcoex->btcoex_lock);
-
- if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- ath9k_gen_timer_start(ah,
- btcoex->no_stomp_timer,
- (ath9k_hw_gettsf32(ah) +
- btcoex->btcoex_no_stomp),
- btcoex->btcoex_no_stomp * 10);
- btcoex->hw_timer_enabled = true;
- }
-
- mod_timer(&btcoex->period_timer, jiffies +
- msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
-}
-
-/*
- * Generic tsf based hw timer which configures weight
- * registers to time slice between wlan and bt traffic
- */
-static void ath_btcoex_no_stomp_timer(void *arg)
-{
- struct ath_softc *sc = (struct ath_softc *)arg;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "no stomp timer running \n");
-
- spin_lock_bh(&btcoex->btcoex_lock);
-
- if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
- ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
- else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
- ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
-
- spin_unlock_bh(&btcoex->btcoex_lock);
-}
-
-static int ath_init_btcoex_timer(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
- btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
- btcoex->btcoex_period / 100;
-
- setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
- (unsigned long) sc);
-
- spin_lock_init(&btcoex->btcoex_lock);
-
- btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
- ath_btcoex_no_stomp_timer,
- ath_btcoex_no_stomp_timer,
- (void *) sc, AR_FIRST_NDP_TIMER);
-
- if (!btcoex->no_stomp_timer)
- return -ENOMEM;
-
- return 0;
-}
-
-/*
- * Read and write, they both share the same lock. We do this to serialize
- * reads and writes on Atheros 802.11n PCI devices only. This is required
- * as the FIFO on these devices can only accept sanely 2 requests. After
- * that the device goes bananas. Serializing the reads/writes prevents this
- * from happening.
- */
-
-static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
-{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&sc->sc_serial_rw, flags);
- iowrite32(val, sc->mem + reg_offset);
- spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
- } else
- iowrite32(val, sc->mem + reg_offset);
-}
-
-static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
-{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- u32 val;
-
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&sc->sc_serial_rw, flags);
- val = ioread32(sc->mem + reg_offset);
- spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
- } else
- val = ioread32(sc->mem + reg_offset);
- return val;
-}
-
-static const struct ath_ops ath9k_common_ops = {
- .read = ath9k_ioread32,
- .write = ath9k_iowrite32,
-};
-
-/*
- * Initialize and fill ath_softc, ath_sofct is the
- * "Software Carrier" struct. Historically it has existed
- * to allow the separation between hardware specific
- * variables (now in ath_hw) and driver specific variables.
- */
-static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
- const struct ath_bus_ops *bus_ops)
-{
- struct ath_hw *ah = NULL;
- struct ath_common *common;
- int r = 0, i;
- int csz = 0;
- int qnum;
-
- /* XXX: hardware will not be ready until ath_open() being called */
- sc->sc_flags |= SC_OP_INVALID;
-
- spin_lock_init(&sc->wiphy_lock);
- spin_lock_init(&sc->sc_resetlock);
- spin_lock_init(&sc->sc_serial_rw);
- spin_lock_init(&sc->ani_lock);
- spin_lock_init(&sc->sc_pm_lock);
- mutex_init(&sc->mutex);
- tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
- tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
- (unsigned long)sc);
-
- ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
- if (!ah)
- return -ENOMEM;
-
- ah->hw_version.devid = devid;
- ah->hw_version.subsysid = subsysid;
- sc->sc_ah = ah;
-
- common = ath9k_hw_common(ah);
- common->ops = &ath9k_common_ops;
- common->bus_ops = bus_ops;
- common->ah = ah;
- common->hw = sc->hw;
- common->priv = sc;
- common->debug_mask = ath9k_debug;
-
- /*
- * Cache line size is used to size and align various
- * structures used to communicate with the hardware.
- */
- ath_read_cachesize(common, &csz);
- /* XXX assert csz is non-zero */
- common->cachelsz = csz << 2; /* convert to bytes */
-
- r = ath9k_hw_init(ah);
- if (r) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize hardware; "
- "initialization status: %d\n", r);
- goto bad_free_hw;
- }
-
- if (ath9k_init_debug(ah) < 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to create debugfs files\n");
- goto bad_free_hw;
- }
-
- /* Get the hardware key cache size. */
- common->keymax = ah->caps.keycache_size;
- if (common->keymax > ATH_KEYMAX) {
- ath_print(common, ATH_DBG_ANY,
- "Warning, using only %u entries in %u key cache\n",
- ATH_KEYMAX, common->keymax);
- common->keymax = ATH_KEYMAX;
- }
-
- /*
- * Reset the key cache since some parts do not
- * reset the contents on initial power up.
- */
- for (i = 0; i < common->keymax; i++)
- ath9k_hw_keyreset(ah, (u16) i);
-
- /* default to MONITOR mode */
- sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
-
- /*
- * Allocate hardware transmit queues: one queue for
- * beacon frames and one data queue for each QoS
- * priority. Note that the hal handles reseting
- * these queues at the needed time.
- */
- sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
- if (sc->beacon.beaconq == -1) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup a beacon xmit queue\n");
- r = -EIO;
- goto bad2;
- }
- sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
- if (sc->beacon.cabq == NULL) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup CAB xmit queue\n");
- r = -EIO;
- goto bad2;
- }
-
- sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
- ath_cabq_update(sc);
-
- for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
- sc->tx.hwq_map[i] = -1;
-
- /* Setup data queues */
- /* NB: ensure BK queue is the lowest priority h/w queue */
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BK traffic\n");
- r = -EIO;
- goto bad2;
- }
-
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BE traffic\n");
- r = -EIO;
- goto bad2;
- }
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VI traffic\n");
- r = -EIO;
- goto bad2;
- }
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VO traffic\n");
- r = -EIO;
- goto bad2;
- }
-
- /* Initializes the noise floor to a reasonable default value.
- * Later on this will be updated during ANI processing. */
-
- common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
- setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
-
- if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)) {
- /*
- * Whether we should enable h/w TKIP MIC.
- * XXX: if we don't support WME TKIP MIC, then we wouldn't
- * report WMM capable, so it's always safe to turn on
- * TKIP MIC in this case.
- */
- ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
- 0, 1, NULL);
- }
-
- /*
- * Check whether the separate key cache entries
- * are required to handle both tx+rx MIC keys.
- * With split mic keys the number of stations is limited
- * to 27 otherwise 59.
- */
- if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)
- && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_MIC, NULL)
- && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
- 0, NULL))
- common->splitmic = 1;
-
- /* turn on mcast key search if possible */
- if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
- (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
- 1, NULL);
-
- sc->config.txpowlimit = ATH_TXPOWER_MAX;
-
- /* 11n Capabilities */
- if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- sc->sc_flags |= SC_OP_TXAGGR;
- sc->sc_flags |= SC_OP_RXAGGR;
- }
-
- common->tx_chainmask = ah->caps.tx_chainmask;
- common->rx_chainmask = ah->caps.rx_chainmask;
-
- ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
- sc->rx.defant = ath9k_hw_getdefantenna(ah);
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
- memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
-
- sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
-
- /* initialize beacon slots */
- for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- sc->beacon.bslot[i] = NULL;
- sc->beacon.bslot_aphy[i] = NULL;
- }
-
- /* setup channels and rates */
-
- if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
- sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
- sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
- ARRAY_SIZE(ath9k_2ghz_chantable);
- sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
- sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates);
- }
-
- if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
- sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
- sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
- ARRAY_SIZE(ath9k_5ghz_chantable);
- sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
- ath9k_legacy_rates + 4;
- sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates) - 4;
- }
-
- switch (ah->btcoex_hw.scheme) {
- case ATH_BTCOEX_CFG_NONE:
- break;
- case ATH_BTCOEX_CFG_2WIRE:
- ath9k_hw_btcoex_init_2wire(ah);
- break;
- case ATH_BTCOEX_CFG_3WIRE:
- ath9k_hw_btcoex_init_3wire(ah);
- r = ath_init_btcoex_timer(sc);
- if (r)
- goto bad2;
- qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
- ath9k_hw_init_btcoex_hw(ah, qnum);
- sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- return 0;
-bad2:
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
-bad_free_hw:
- ath9k_uninit_hw(sc);
- return r;
-}
-
-void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_SPECTRUM_MGMT;
-
- if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
-
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
-
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->queues = 4;
- hw->max_rates = 4;
- hw->channel_change_time = 5000;
- hw->max_listen_interval = 10;
- /* Hardware supports 10 but we use 4 */
- hw->max_rate_tries = 4;
- hw->sta_data_size = sizeof(struct ath_node);
- hw->vif_data_size = sizeof(struct ath_vif);
-
- hw->rate_control_algorithm = "ath9k_rate_control";
-
- if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &sc->sbands[IEEE80211_BAND_2GHZ];
- if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &sc->sbands[IEEE80211_BAND_5GHZ];
-}
-
-/* Device driver core initialization */
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
- const struct ath_bus_ops *bus_ops)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_common *common;
- struct ath_hw *ah;
- int error = 0, i;
- struct ath_regulatory *reg;
-
- dev_dbg(sc->dev, "Attach ATH hw\n");
-
- error = ath_init_softc(devid, sc, subsysid, bus_ops);
- if (error != 0)
- return error;
-
- ah = sc->sc_ah;
- common = ath9k_hw_common(ah);
-
- /* get mac address from hardware and set in mac80211 */
-
- SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
-
- ath_set_hw_capab(sc, hw);
-
- error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
- ath9k_reg_notifier);
- if (error)
- return error;
-
- reg = &common->regulatory;
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
- setup_ht_cap(sc,
- &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
- if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
- setup_ht_cap(sc,
- &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
- }
-
- /* initialize tx/rx engine */
- error = ath_tx_init(sc, ATH_TXBUF);
- if (error != 0)
- goto error_attach;
-
- error = ath_rx_init(sc, ATH_RXBUF);
- if (error != 0)
- goto error_attach;
-
- INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
- INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
- sc->wiphy_scheduler_int = msecs_to_jiffies(500);
-
- error = ieee80211_register_hw(hw);
-
- if (!ath_is_world_regd(reg)) {
- error = regulatory_hint(hw->wiphy, reg->alpha2);
- if (error)
- goto error_attach;
- }
-
- /* Initialize LED control */
- ath_init_leds(sc);
-
- ath_start_rfkill_poll(sc);
-
- return 0;
-
-error_attach:
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
- ath9k_uninit_hw(sc);
-
- return error;
-}
-
int ath_reset(struct ath_softc *sc, bool retry_tx)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1976,6 +944,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
/* Stop ANI */
del_timer_sync(&common->ani.timer);
+ ieee80211_stop_queues(hw);
+
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, retry_tx);
ath_stoprecv(sc);
@@ -2017,131 +987,14 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
}
}
+ ieee80211_wake_queues(hw);
+
/* Start ANI */
ath_start_ani(common);
return r;
}
-/*
- * This function will allocate both the DMA descriptor structure, and the
- * buffers it contains. These are used to contain the descriptors used
- * by the system.
-*/
-int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
- struct list_head *head, const char *name,
- int nbuf, int ndesc)
-{
-#define DS2PHYS(_dd, _ds) \
- ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
-#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
-#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_desc *ds;
- struct ath_buf *bf;
- int i, bsize, error;
-
- ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
- name, nbuf, ndesc);
-
- INIT_LIST_HEAD(head);
- /* ath_desc must be a multiple of DWORDs */
- if ((sizeof(struct ath_desc) % 4) != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "ath_desc not DWORD aligned\n");
- BUG_ON((sizeof(struct ath_desc) % 4) != 0);
- error = -ENOMEM;
- goto fail;
- }
-
- dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
-
- /*
- * Need additional DMA memory because we can't use
- * descriptors that cross the 4K page boundary. Assume
- * one skipped descriptor per 4K page.
- */
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- u32 ndesc_skipped =
- ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
- u32 dma_len;
-
- while (ndesc_skipped) {
- dma_len = ndesc_skipped * sizeof(struct ath_desc);
- dd->dd_desc_len += dma_len;
-
- ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
- };
- }
-
- /* allocate descriptors */
- dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
- &dd->dd_desc_paddr, GFP_KERNEL);
- if (dd->dd_desc == NULL) {
- error = -ENOMEM;
- goto fail;
- }
- ds = dd->dd_desc;
- ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
- name, ds, (u32) dd->dd_desc_len,
- ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
-
- /* allocate buffers */
- bsize = sizeof(struct ath_buf) * nbuf;
- bf = kzalloc(bsize, GFP_KERNEL);
- if (bf == NULL) {
- error = -ENOMEM;
- goto fail2;
- }
- dd->dd_bufptr = bf;
-
- for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
-
- if (!(sc->sc_ah->caps.hw_caps &
- ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- /*
- * Skip descriptor addresses which can cause 4KB
- * boundary crossing (addr + length) with a 32 dword
- * descriptor fetch.
- */
- while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
- BUG_ON((caddr_t) bf->bf_desc >=
- ((caddr_t) dd->dd_desc +
- dd->dd_desc_len));
-
- ds += ndesc;
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
- }
- }
- list_add_tail(&bf->list, head);
- }
- return 0;
-fail2:
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-fail:
- memset(dd, 0, sizeof(*dd));
- return error;
-#undef ATH_DESC_4KB_BOUND_CHECK
-#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
-#undef DS2PHYS
-}
-
-void ath_descdma_cleanup(struct ath_softc *sc,
- struct ath_descdma *dd,
- struct list_head *head)
-{
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-
- INIT_LIST_HEAD(head);
- kfree(dd->dd_bufptr);
- memset(dd, 0, sizeof(*dd));
-}
-
int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
{
int qnum;
@@ -2220,28 +1073,6 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
/* mac80211 callbacks */
/**********************/
-/*
- * (Re)start btcoex timers
- */
-static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Starting btcoex timers");
-
- /* make sure duty cycle timer is also stopped when resuming */
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
-
- btcoex->bt_priority_cnt = 0;
- btcoex->bt_priority_time = jiffies;
- sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
-
- mod_timer(&btcoex->period_timer, jiffies);
-}
-
static int ath9k_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2411,11 +1242,11 @@ static int ath9k_tx(struct ieee80211_hw *hw,
if (ieee80211_is_pspoll(hdr->frame_control)) {
ath_print(common, ATH_DBG_PS,
"Sending PS-Poll to pick a buffered frame\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA;
+ sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
} else {
ath_print(common, ATH_DBG_PS,
"Wake up to complete TX\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK;
+ sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
}
/*
* The actual restore operation will happen only after
@@ -2468,22 +1299,6 @@ exit:
return 0;
}
-/*
- * Pause btcoex timer and bt duty cycle timer
- */
-static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- del_timer_sync(&btcoex->period_timer);
-
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- btcoex->hw_timer_enabled = false;
-}
-
static void ath9k_stop(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2550,12 +1365,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
}
static int ath9k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp = (void *)conf->vif->drv_priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
int ret = 0;
@@ -2567,7 +1382,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
goto out;
}
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
ic_opmode = NL80211_IFTYPE_STATION;
break;
@@ -2578,11 +1393,11 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ret = -ENOBUFS;
goto out;
}
- ic_opmode = conf->type;
+ ic_opmode = vif->type;
break;
default:
ath_print(common, ATH_DBG_FATAL,
- "Interface type %d not yet supported\n", conf->type);
+ "Interface type %d not yet supported\n", vif->type);
ret = -EOPNOTSUPP;
goto out;
}
@@ -2614,18 +1429,18 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
* Enable MIB interrupts when there are hardware phy counters.
* Note we only do this (at the moment) for station mode.
*/
- if ((conf->type == NL80211_IFTYPE_STATION) ||
- (conf->type == NL80211_IFTYPE_ADHOC) ||
- (conf->type == NL80211_IFTYPE_MESH_POINT)) {
+ if ((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) {
sc->imask |= ATH9K_INT_MIB;
sc->imask |= ATH9K_INT_TSFOOR;
}
ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
- if (conf->type == NL80211_IFTYPE_AP ||
- conf->type == NL80211_IFTYPE_ADHOC ||
- conf->type == NL80211_IFTYPE_MONITOR)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MONITOR)
ath_start_ani(common);
out:
@@ -2634,12 +1449,12 @@ out:
}
static void ath9k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp = (void *)conf->vif->drv_priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
int i;
ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
@@ -2662,7 +1477,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
sc->sc_flags &= ~SC_OP_BEACONS;
for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- if (sc->beacon.bslot[i] == conf->vif) {
+ if (sc->beacon.bslot[i] == vif) {
printk(KERN_DEBUG "%s: vif had allocated beacon "
"slot\n", __func__);
sc->beacon.bslot[i] = NULL;
@@ -2727,7 +1542,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
*/
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (conf->flags & IEEE80211_CONF_PS) {
- sc->sc_flags |= SC_OP_PS_ENABLED;
+ sc->ps_flags |= PS_ENABLED;
if (!(ah->caps.hw_caps &
ATH9K_HW_CAP_AUTOSLEEP)) {
if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
@@ -2740,23 +1555,23 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
* At this point we know hardware has received an ACK
* of a previously sent null data frame.
*/
- if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
+ if ((sc->ps_flags & PS_NULLFUNC_COMPLETED)) {
+ sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
sc->ps_enabled = true;
ath9k_hw_setrxabort(sc->sc_ah, 1);
}
} else {
sc->ps_enabled = false;
- sc->sc_flags &= ~(SC_OP_PS_ENABLED |
- SC_OP_NULLFUNC_COMPLETED);
+ sc->ps_flags &= ~(PS_ENABLED |
+ PS_NULLFUNC_COMPLETED);
ath9k_setpower(sc, ATH9K_PM_AWAKE);
if (!(ah->caps.hw_caps &
ATH9K_HW_CAP_AUTOSLEEP)) {
ath9k_hw_setrxabort(sc->sc_ah, 0);
- sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK);
+ sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK);
if (sc->imask & ATH9K_INT_TIM_TIMER) {
sc->imask &= ~ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(sc->sc_ah,
@@ -2766,6 +1581,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "HW opmode set to Monitor mode\n");
+ sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
@@ -3133,6 +1956,7 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
mutex_lock(&sc->mutex);
if (ath9k_wiphy_scanning(sc)) {
@@ -3148,10 +1972,9 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
aphy->state = ATH_WIPHY_SCAN;
ath9k_wiphy_pause_all_forced(sc, aphy);
-
- spin_lock_bh(&sc->ani_lock);
sc->sc_flags |= SC_OP_SCANNING;
- spin_unlock_bh(&sc->ani_lock);
+ del_timer_sync(&common->ani.timer);
+ cancel_delayed_work_sync(&sc->tx_complete_work);
mutex_unlock(&sc->mutex);
}
@@ -3159,13 +1982,14 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
mutex_lock(&sc->mutex);
- spin_lock_bh(&sc->ani_lock);
aphy->state = ATH_WIPHY_ACTIVE;
sc->sc_flags &= ~SC_OP_SCANNING;
sc->sc_flags |= SC_OP_FULL_RESET;
- spin_unlock_bh(&sc->ani_lock);
+ ath_start_ani(common);
+ ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
ath_beacon_config(sc, NULL);
mutex_unlock(&sc->mutex);
}
@@ -3190,63 +2014,3 @@ struct ieee80211_ops ath9k_ops = {
.sw_scan_complete = ath9k_sw_scan_complete,
.rfkill_poll = ath9k_rfkill_poll_state,
};
-
-static int __init ath9k_init(void)
-{
- int error;
-
- /* Register rate control algorithm */
- error = ath_rate_control_register();
- if (error != 0) {
- printk(KERN_ERR
- "ath9k: Unable to register rate control "
- "algorithm: %d\n",
- error);
- goto err_out;
- }
-
- error = ath9k_debug_create_root();
- if (error) {
- printk(KERN_ERR
- "ath9k: Unable to create debugfs root: %d\n",
- error);
- goto err_rate_unregister;
- }
-
- error = ath_pci_init();
- if (error < 0) {
- printk(KERN_ERR
- "ath9k: No PCI devices found, driver not installed.\n");
- error = -ENODEV;
- goto err_remove_root;
- }
-
- error = ath_ahb_init();
- if (error < 0) {
- error = -ENODEV;
- goto err_pci_exit;
- }
-
- return 0;
-
- err_pci_exit:
- ath_pci_exit();
-
- err_remove_root:
- ath9k_debug_remove_root();
- err_rate_unregister:
- ath_rate_control_unregister();
- err_out:
- return error;
-}
-module_init(ath9k_init);
-
-static void __exit ath9k_exit(void)
-{
- ath_ahb_exit();
- ath_pci_exit();
- ath9k_debug_remove_root();
- ath_rate_control_unregister();
- printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
-}
-module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f7af5ea54753..edda92b1eb7a 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,7 +18,7 @@
#include <linux/pci.h>
#include "ath9k.h"
-static struct pci_device_id ath_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
@@ -113,25 +113,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
u16 subsysid;
u32 val;
int ret = 0;
- struct ath_hw *ah;
char hw_name[64];
if (pci_enable_device(pdev))
return -EIO;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
- goto bad;
+ goto err_dma;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA consistent "
"DMA enable failed\n");
- goto bad;
+ goto err_dma;
}
/*
@@ -171,22 +168,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
ret = -ENODEV;
- goto bad;
+ goto err_region;
}
mem = pci_iomap(pdev, 0, 0);
if (!mem) {
printk(KERN_ERR "PCI memory map error\n") ;
ret = -EIO;
- goto bad1;
+ goto err_iomap;
}
hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
sizeof(struct ath_softc), &ath9k_ops);
if (!hw) {
- dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
+ dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
ret = -ENOMEM;
- goto bad2;
+ goto err_alloc_hw;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -201,25 +198,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->dev = &pdev->dev;
sc->mem = mem;
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
- ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize device\n");
- goto bad3;
- }
-
- /* setup interrupt service routine */
-
ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
- goto bad4;
+ goto err_irq;
}
sc->irq = pdev->irq;
- ah = sc->sc_ah;
- ath9k_hw_name(ah, hw_name, sizeof(hw_name));
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
+ ret = ath9k_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize device\n");
+ goto err_init;
+ }
+
+ ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
printk(KERN_INFO
"%s: %s mem=0x%lx, irq=%d\n",
wiphy_name(hw->wiphy),
@@ -227,15 +221,18 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(unsigned long)mem, pdev->irq);
return 0;
-bad4:
- ath_detach(sc);
-bad3:
+
+err_init:
+ free_irq(sc->irq, sc);
+err_irq:
ieee80211_free_hw(hw);
-bad2:
+err_alloc_hw:
pci_iounmap(pdev, mem);
-bad1:
+err_iomap:
pci_release_region(pdev, 0);
-bad:
+err_region:
+ /* Nothing */
+err_dma:
pci_disable_device(pdev);
return ret;
}
@@ -245,8 +242,12 @@ static void ath_pci_remove(struct pci_dev *pdev)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- ath_cleanup(sc);
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
+ ath_bus_cleanup(common);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 9eb96f506998..4f6d6fd442f4 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -57,6 +57,10 @@ enum {
|| (_phy == WLAN_RC_PHY_HT_40_DS) \
|| (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
+#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
+ || (_phy == WLAN_RC_PHY_HT_20_DS) \
+ || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
+ || (_phy == WLAN_RC_PHY_HT_20_DS_HGI))
#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
|| (_phy == WLAN_RC_PHY_HT_40_DS) \
|| (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 477365e5ae69..40b5d05edcce 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -364,10 +364,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
return; /* not from our current AP */
- sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
- if (sc->sc_flags & SC_OP_BEACON_SYNC) {
- sc->sc_flags &= ~SC_OP_BEACON_SYNC;
+ if (sc->ps_flags & PS_BEACON_SYNC) {
+ sc->ps_flags &= ~PS_BEACON_SYNC;
ath_print(common, ATH_DBG_PS,
"Reconfigure Beacon timers based on "
"timestamp from the AP\n");
@@ -384,17 +384,17 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
*/
ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
"buffered broadcast/multicast frame(s)\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
return;
}
- if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) {
+ if (sc->ps_flags & PS_WAIT_FOR_CAB) {
/*
* This can happen if a broadcast frame is dropped or the AP
* fails to send a frame indicating that all CAB frames have
* been delivered.
*/
- sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
+ sc->ps_flags &= ~PS_WAIT_FOR_CAB;
ath_print(common, ATH_DBG_PS,
"PS wait for CAB frames timed out\n");
}
@@ -408,10 +408,10 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
/* Process Beacon and CAB receive in PS state */
- if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) &&
+ if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
ieee80211_is_beacon(hdr->frame_control))
ath_rx_ps_beacon(sc, skb);
- else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) &&
+ else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
(ieee80211_is_data(hdr->frame_control) ||
ieee80211_is_action(hdr->frame_control)) &&
is_multicast_ether_addr(hdr->addr1) &&
@@ -420,20 +420,20 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
* No more broadcast/multicast frames to be received at this
* point.
*/
- sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
+ sc->ps_flags &= ~PS_WAIT_FOR_CAB;
ath_print(common, ATH_DBG_PS,
"All PS CAB frames received, back to sleep\n");
- } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) &&
+ } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
!is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_morefrags(hdr->frame_control)) {
- sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA;
+ sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
ath_print(common, ATH_DBG_PS,
"Going back to sleep after having received "
"PS-Poll data (0x%x)\n",
- sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK));
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
}
}
@@ -571,6 +571,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
hw = ath_get_virt_hw(sc, hdr);
rx_stats = &ds->ds_rxstat;
+ ath_debug_stat_rx(sc, bf);
+
/*
* If we're asked to flush receive queue, directly
* chain it back at the queue without processing it.
@@ -631,9 +633,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
sc->rx.rxotherant = 0;
}
- if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA)))
+ if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA)))
ath_rx_ps(sc, skb);
ath_rx_send_to_mac80211(hw, sc, skb, rxs);
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index cd26caaf44e7..a43fbf84dab9 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -152,7 +152,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
SET_IEEE80211_PERM_ADDR(hw, addr);
- ath_set_hw_capab(sc, hw);
+ ath9k_set_hw_capab(sc, hw);
error = ieee80211_register_hw(hw);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fa12b9060b0b..a821bb687b3b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1648,7 +1648,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
/* tag if this is a nullfunc frame to enable PS when AP acks it */
if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
bf->bf_isnullfunc = true;
- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
+ sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
} else
bf->bf_isnullfunc = false;
@@ -1858,15 +1858,15 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb_pull(skb, padsize);
}
- if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) {
- sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK;
+ if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+ sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
ath_print(common, ATH_DBG_PS,
"Going back to sleep after having "
"received TX status (0x%x)\n",
- sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK));
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
}
if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
@@ -2053,11 +2053,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
*/
if (bf->bf_isnullfunc &&
(ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
- if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
+ if ((sc->ps_flags & PS_ENABLED)) {
sc->ps_enabled = true;
ath9k_hw_setrxabort(sc->sc_ah, 1);
} else
- sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
+ sc->ps_flags |= PS_NULLFUNC_COMPLETED;
}
/*
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 92f87fbe750f..9ab1192004c0 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.")
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards");
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{ 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
{ 0, }
};
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 64c12e1bced3..073be566d05e 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -3,6 +3,7 @@ config B43
depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
select SSB
select FW_LOADER
+ select SSB_BLOCKIO
---help---
b43 is a driver for the Broadcom 43xx series wireless devices.
@@ -78,14 +79,6 @@ config B43_SDIO
If unsure, say N.
-# Data transfers to the device via PIO
-# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
-config B43_PIO
- bool
- depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
- select SSB_BLOCKIO
- default y
-
config B43_NPHY
bool "Pre IEEE 802.11n support (BROKEN)"
depends on B43 && EXPERIMENTAL && BROKEN
@@ -137,12 +130,4 @@ config B43_DEBUG
for production use.
Only say Y, if you are debugging a problem in the b43 driver sourcecode.
-config B43_FORCE_PIO
- bool "Force usage of PIO instead of DMA"
- depends on B43 && B43_DEBUG
- ---help---
- This will disable DMA and always enable PIO instead.
- Say N!
- This is only for debugging the PIO engine code. You do
- _NOT_ want to enable this.
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 84772a2542dc..5e83b6f0a3a0 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -12,7 +12,7 @@ b43-y += xmit.o
b43-y += lo.o
b43-y += wa.o
b43-y += dma.o
-b43-$(CONFIG_B43_PIO) += pio.o
+b43-y += pio.o
b43-y += rfkill.o
b43-$(CONFIG_B43_LEDS) += leds.o
b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index fe3bf9491997..2f12a750bc98 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -821,11 +821,9 @@ struct b43_wl {
/* The device LEDs. */
struct b43_leds leds;
-#ifdef CONFIG_B43_PIO
/* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
u8 pio_tailspace[4] __attribute__((__aligned__(8)));
-#endif /* CONFIG_B43_PIO */
};
static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
@@ -876,20 +874,9 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
{
-#ifdef CONFIG_B43_PIO
return dev->__using_pio_transfers;
-#else
- return 0;
-#endif
}
-#ifdef CONFIG_B43_FORCE_PIO
-# define B43_FORCE_PIO 1
-#else
-# define B43_FORCE_PIO 0
-#endif
-
-
/* Message printing */
void b43info(struct b43_wl *wl, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 88d1fd02d40a..615af22c49fd 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1653,7 +1653,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
b43_power_saving_ctl_bits(dev, 0);
}
-#ifdef CONFIG_B43_PIO
static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
u16 mmio_base, bool enable)
{
@@ -1687,4 +1686,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
mmio_base = b43_dmacontroller_base(type, engine_index);
direct_fifo_rx(dev, type, mmio_base, enable);
}
-#endif /* CONFIG_B43_PIO */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 4c41cfe44f26..60290c06e950 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -102,6 +102,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
module_param_named(verbose, b43_modparam_verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
+static int modparam_pio;
+module_param_named(pio, modparam_pio, int, 0444);
+MODULE_PARM_DESC(pio, "enable(1) / disable(0) PIO mode");
static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
@@ -1786,8 +1789,8 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
dma_reason[4], dma_reason[5]);
b43err(dev->wl, "This device does not support DMA "
"on your system. Please use PIO instead.\n");
- b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in "
- "your kernel configuration.\n");
+ b43err(dev->wl, "Unload the b43 module and reload "
+ "with 'pio=1'\n");
return;
}
if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
@@ -4353,7 +4356,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
(dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
- B43_FORCE_PIO) {
+ modparam_pio) {
dev->__using_pio_transfers = 1;
err = b43_pio_init(dev);
} else {
@@ -4388,7 +4391,7 @@ err_busdown:
}
static int b43_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -4396,24 +4399,24 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
/* TODO: allow WDS/AP devices to coexist */
- if (conf->type != NL80211_IFTYPE_AP &&
- conf->type != NL80211_IFTYPE_MESH_POINT &&
- conf->type != NL80211_IFTYPE_STATION &&
- conf->type != NL80211_IFTYPE_WDS &&
- conf->type != NL80211_IFTYPE_ADHOC)
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_MESH_POINT &&
+ vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_WDS &&
+ vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
mutex_lock(&wl->mutex);
if (wl->operating)
goto out_mutex_unlock;
- b43dbg(wl, "Adding Interface type %d\n", conf->type);
+ b43dbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
wl->operating = 1;
- wl->vif = conf->vif;
- wl->if_type = conf->type;
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ wl->vif = vif;
+ wl->if_type = vif->type;
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
b43_adjust_opmode(dev);
b43_set_pretbtt(dev);
@@ -4428,17 +4431,17 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
}
static void b43_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
- b43dbg(wl, "Removing Interface type %d\n", conf->type);
+ b43dbg(wl, "Removing Interface type %d\n", vif->type);
mutex_lock(&wl->mutex);
B43_WARN_ON(!wl->operating);
- B43_WARN_ON(wl->vif != conf->vif);
+ B43_WARN_ON(wl->vif != vif);
wl->vif = NULL;
wl->operating = 0;
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3e046ec1ff86..eb4fb4581edb 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -80,6 +80,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
dev->phy.lp = NULL;
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
static void lpphy_read_band_sprom(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
@@ -101,6 +102,12 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
maxpwr = bus->sprom.maxpwr_bg;
lpphy->max_tx_pwr_med_band = maxpwr;
cckpo = bus->sprom.cck2gpo;
+ /*
+ * We don't read SPROM's opo as specs say. On rev8 SPROMs
+ * opo == ofdm2gpo and we don't know any SSB with LP-PHY
+ * and SPROM rev below 8.
+ */
+ B43_WARN_ON(bus->sprom.revision < 8);
ofdmpo = bus->sprom.ofdm2gpo;
if (cckpo) {
for (i = 0; i < 4; i++) {
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index 7dd649c9ddad..7b3c42f93a16 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -55,8 +55,6 @@
#define B43_PIO_MAX_NR_TXPACKETS 32
-#ifdef CONFIG_B43_PIO
-
struct b43_pio_txpacket {
/* Pointer to the TX queue we belong to. */
struct b43_pio_txqueue *queue;
@@ -169,42 +167,4 @@ void b43_pio_rx(struct b43_pio_rxqueue *q);
void b43_pio_tx_suspend(struct b43_wldev *dev);
void b43_pio_tx_resume(struct b43_wldev *dev);
-
-#else /* CONFIG_B43_PIO */
-
-
-static inline int b43_pio_init(struct b43_wldev *dev)
-{
- return 0;
-}
-static inline void b43_pio_free(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_stop(struct b43_wldev *dev)
-{
-}
-static inline int b43_pio_tx(struct b43_wldev *dev,
- struct sk_buff *skb)
-{
- return 0;
-}
-static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
- const struct b43_txstatus *status)
-{
-}
-static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
-{
-}
-static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_tx_resume(struct b43_wldev *dev)
-{
-}
-
-#endif /* CONFIG_B43_PIO */
#endif /* B43_PIO_H_ */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 4a905b6a886b..fbae264095cc 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3361,7 +3361,7 @@ err_kfree_lo_control:
}
static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev;
@@ -3370,23 +3370,23 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
/* TODO: allow WDS/AP devices to coexist */
- if (conf->type != NL80211_IFTYPE_AP &&
- conf->type != NL80211_IFTYPE_STATION &&
- conf->type != NL80211_IFTYPE_WDS &&
- conf->type != NL80211_IFTYPE_ADHOC)
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_WDS &&
+ vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
mutex_lock(&wl->mutex);
if (wl->operating)
goto out_mutex_unlock;
- b43legacydbg(wl, "Adding Interface type %d\n", conf->type);
+ b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
wl->operating = 1;
- wl->vif = conf->vif;
- wl->if_type = conf->type;
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ wl->vif = vif;
+ wl->if_type = vif->type;
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_adjust_opmode(dev);
@@ -3403,18 +3403,18 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
}
static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
unsigned long flags;
- b43legacydbg(wl, "Removing Interface type %d\n", conf->type);
+ b43legacydbg(wl, "Removing Interface type %d\n", vif->type);
mutex_lock(&wl->mutex);
B43legacy_WARN_ON(!wl->operating);
- B43legacy_WARN_ON(wl->vif != conf->vif);
+ B43legacy_WARN_ON(wl->vif != vif);
wl->vif = NULL;
wl->operating = 0;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ff9b5c882184..d70732819423 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2618,6 +2618,15 @@ static irqreturn_t prism2_interrupt(int irq, void *dev_id)
int events = 0;
u16 ev;
+ /* Detect early interrupt before driver is fully configued */
+ if (!dev->base_addr) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
+ dev->name);
+ }
+ return IRQ_HANDLED;
+ }
+
iface = netdev_priv(dev);
local = iface->local;
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 8fdd41f4b4f2..4d97ae37499b 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -39,7 +39,7 @@ struct hostap_pci_priv {
/* FIX: do we need mb/wmb/rmb with memory operations? */
-static struct pci_device_id prism2_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
/* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
{ 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
/* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 0e5d51086a44..fc04ccdc5bef 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -60,7 +60,7 @@ struct hostap_plx_priv {
#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID }
-static struct pci_device_id prism2_plx_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(prism2_plx_id_table) = {
PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
PLXDEV(0x126c, 0x8030, "Nortel emobility"),
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 56afcf041f81..9b72c45a7748 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6585,7 +6585,7 @@ static void ipw2100_shutdown(struct pci_dev *pci_dev)
#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
-static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ipw2100_pci_id_table) = {
IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */
IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */
IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 09ddd3e6bedc..63c2a7ade5fb 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11524,7 +11524,7 @@ out:
}
/* PCI driver stuff */
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 8414178bcff4..0db1fda94a65 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -105,6 +105,7 @@ static struct iwl_lib_ops iwl1000_lib = {
.load_ucode = iwl5000_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
@@ -140,7 +141,7 @@ static struct iwl_lib_ops iwl1000_lib = {
},
};
-static struct iwl_ops iwl1000_ops = {
+static const struct iwl_ops iwl1000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl1000_lib,
.hcmd = &iwl5000_hcmd,
@@ -173,7 +174,6 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
struct iwl_cfg iwl1000_bg_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 234891d8cc10..6cde661ce0bc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -2804,7 +2804,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
};
-static struct iwl_ops iwl3945_ops = {
+static const struct iwl_ops iwl3945_ops = {
.ucode = &iwl3945_ucode,
.lib = &iwl3945_lib,
.hcmd = &iwl3945_hcmd,
@@ -2849,7 +2849,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.broken_powersave = true,
};
-struct pci_device_id iwl3945_hw_card_ids[] = {
+DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
{IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
{IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
{IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 531fa125f5a6..bc532ff4f883 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -37,7 +37,7 @@
#include <net/ieee80211_radiotap.h>
/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern struct pci_device_id iwl3945_hw_card_ids[];
+extern const struct pci_device_id iwl3945_hw_card_ids[];
#include "iwl-csr.h"
#include "iwl-prph.h"
@@ -226,7 +226,8 @@ extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,int left);
-extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
+extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display);
extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 484c5fdf7c2a..6a004abb5973 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1961,7 +1961,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
struct ieee80211_tx_info *info;
struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
u32 status = le32_to_cpu(tx_resp->u.status);
- int tid = MAX_TID_COUNT;
+ int uninitialized_var(tid);
int sta_id;
int freed;
u8 *qc = NULL;
@@ -2208,7 +2208,7 @@ static struct iwl_lib_ops iwl4965_lib = {
},
};
-static struct iwl_ops iwl4965_ops = {
+static const struct iwl_ops iwl4965_ops = {
.ucode = &iwl4965_ucode,
.lib = &iwl4965_lib,
.hcmd = &iwl4965_hcmd,
@@ -2239,7 +2239,6 @@ struct iwl_cfg iwl4965_agn_cfg = {
.broken_powersave = true,
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
/* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 33a5866538e7..c3f8ec0a38b1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -781,7 +781,7 @@ void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
- if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}
@@ -800,12 +800,12 @@ void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
if (txq_id != IWL_CMD_QUEUE_NUM)
sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
- if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}
static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
@@ -1466,6 +1466,7 @@ struct iwl_lib_ops iwl5000_lib = {
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
.load_ucode = iwl5000_load_ucode,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
@@ -1518,6 +1519,7 @@ static struct iwl_lib_ops iwl5150_lib = {
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
.load_ucode = iwl5000_load_ucode,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
@@ -1555,7 +1557,7 @@ static struct iwl_lib_ops iwl5150_lib = {
},
};
-static struct iwl_ops iwl5000_ops = {
+static const struct iwl_ops iwl5000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl5000_lib,
.hcmd = &iwl5000_hcmd,
@@ -1563,7 +1565,7 @@ static struct iwl_ops iwl5000_ops = {
.led = &iwlagn_led_ops,
};
-static struct iwl_ops iwl5150_ops = {
+static const struct iwl_ops iwl5150_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl5150_lib,
.hcmd = &iwl5000_hcmd,
@@ -1599,7 +1601,6 @@ struct iwl_cfg iwl5300_agn_cfg = {
.ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
struct iwl_cfg iwl5100_bgn_cfg = {
@@ -1668,7 +1669,6 @@ struct iwl_cfg iwl5100_agn_cfg = {
.ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
struct iwl_cfg iwl5350_agn_cfg = {
@@ -1692,7 +1692,6 @@ struct iwl_cfg iwl5350_agn_cfg = {
.ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
struct iwl_cfg iwl5150_agn_cfg = {
@@ -1716,7 +1715,6 @@ struct iwl_cfg iwl5150_agn_cfg = {
.ht_greenfield_support = true,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
struct iwl_cfg iwl5150_abg_cfg = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 74e571049273..a5a0ed4817a4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -215,6 +215,7 @@ static struct iwl_lib_ops iwl6000_lib = {
.load_ucode = iwl5000_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
@@ -252,7 +253,7 @@ static struct iwl_lib_ops iwl6000_lib = {
},
};
-static struct iwl_ops iwl6000_ops = {
+static const struct iwl_ops iwl6000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl6000_lib,
.hcmd = &iwl5000_hcmd,
@@ -267,7 +268,7 @@ static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
.calc_rssi = iwl5000_calc_rssi,
};
-static struct iwl_ops iwl6050_ops = {
+static const struct iwl_ops iwl6050_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl6000_lib,
.hcmd = &iwl5000_hcmd,
@@ -306,7 +307,6 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -395,7 +395,6 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DYNAMIC,
};
struct iwl_cfg iwl6050_2abg_cfg = {
@@ -455,7 +454,6 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 1c9866daf815..344e99de4cab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -657,6 +657,131 @@ static void iwl_bg_statistics_periodic(unsigned long data)
iwl_send_statistics_request(priv, CMD_ASYNC, false);
}
+
+static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
+ u32 start_idx, u32 num_events,
+ u32 mode)
+{
+ u32 i;
+ u32 ptr; /* SRAM byte address of log data */
+ u32 ev, time, data; /* event log data */
+ unsigned long reg_flags;
+
+ if (mode == 0)
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
+ else
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
+
+ /* Make sure device is powered up for SRAM reads */
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (iwl_grab_nic_access(priv)) {
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return;
+ }
+
+ /* Set starting address; reads will auto-increment */
+ _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ rmb();
+
+ /*
+ * "time" is actually "data" for mode 0 (no timestamp).
+ * place event id # at far right for easier visual parsing.
+ */
+ for (i = 0; i < num_events; i++) {
+ ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (mode == 0) {
+ trace_iwlwifi_dev_ucode_cont_event(priv,
+ 0, time, ev);
+ } else {
+ data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ trace_iwlwifi_dev_ucode_cont_event(priv,
+ time, data, ev);
+ }
+ }
+ /* Allow device to power down */
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+void iwl_continuous_event_trace(struct iwl_priv *priv)
+{
+ u32 capacity; /* event log capacity in # entries */
+ u32 base; /* SRAM byte address of event log header */
+ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
+ u32 num_wraps; /* # times uCode wrapped to top of log */
+ u32 next_entry; /* index of next entry to be written by uCode */
+
+ if (priv->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+ else
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ capacity = iwl_read_targ_mem(priv, base);
+ num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
+ mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
+ next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
+ } else
+ return;
+
+ if (num_wraps == priv->event_log.num_wraps) {
+ iwl_print_cont_event_trace(priv,
+ base, priv->event_log.next_entry,
+ next_entry - priv->event_log.next_entry,
+ mode);
+ priv->event_log.non_wraps_count++;
+ } else {
+ if ((num_wraps - priv->event_log.num_wraps) > 1)
+ priv->event_log.wraps_more_count++;
+ else
+ priv->event_log.wraps_once_count++;
+ trace_iwlwifi_dev_ucode_wrap_event(priv,
+ num_wraps - priv->event_log.num_wraps,
+ next_entry, priv->event_log.next_entry);
+ if (next_entry < priv->event_log.next_entry) {
+ iwl_print_cont_event_trace(priv, base,
+ priv->event_log.next_entry,
+ capacity - priv->event_log.next_entry,
+ mode);
+
+ iwl_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ } else {
+ iwl_print_cont_event_trace(priv, base,
+ next_entry, capacity - next_entry,
+ mode);
+
+ iwl_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ }
+ }
+ priv->event_log.num_wraps = num_wraps;
+ priv->event_log.next_entry = next_entry;
+}
+
+/**
+ * iwl_bg_ucode_trace - Timer callback to log ucode event
+ *
+ * The timer is continually set to execute every
+ * UCODE_TRACE_PERIOD milliseconds after the last timer expired
+ * this function is to perform continuous uCode event logging operation
+ * if enabled
+ */
+static void iwl_bg_ucode_trace(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (priv->event_log.ucode_trace) {
+ iwl_continuous_event_trace(priv);
+ /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ }
+}
+
static void iwl_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -689,12 +814,14 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
unsigned long status = priv->status;
- IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n",
+ IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On");
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ?
+ "Reached" : "Not reached");
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
- RF_CARD_DISABLED)) {
+ CT_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
@@ -708,10 +835,10 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
- if (flags & RF_CARD_DISABLED)
+ if (flags & CT_CARD_DISABLED)
iwl_tt_enter_ct_kill(priv);
}
- if (!(flags & RF_CARD_DISABLED))
+ if (!(flags & CT_CARD_DISABLED))
iwl_tt_exit_ct_kill(priv);
if (flags & HW_CARD_DISABLED)
@@ -1705,8 +1832,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
* iwl_print_event_log - Dump error event log to syslog
*
*/
-static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode)
+static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
u32 i;
u32 base; /* SRAM byte address of event log header */
@@ -1716,7 +1844,7 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
unsigned long reg_flags;
if (num_events == 0)
- return;
+ return pos;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
else
@@ -1744,27 +1872,44 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
- trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
- IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ } else {
+ trace_iwlwifi_dev_ucode_event(priv, 0,
+ time, ev);
+ IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ }
} else {
data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
+ trace_iwlwifi_dev_ucode_event(priv, time,
+ data, ev);
+ }
}
}
/* Allow device to power down */
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return pos;
}
/**
* iwl_print_last_event_logs - Dump the newest # of event log to syslog
*/
-static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
- u32 num_wraps, u32 next_entry,
- u32 size, u32 mode)
+static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+ u32 num_wraps, u32 next_entry,
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
/*
* display the newest DEFAULT_LOG_ENTRIES entries
@@ -1772,21 +1917,26 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
*/
if (num_wraps) {
if (next_entry < size) {
- iwl_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode);
- iwl_print_event_log(priv, 0,
- next_entry, mode);
+ pos = iwl_print_event_log(priv,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
} else {
- if (next_entry < size)
- iwl_print_event_log(priv, 0, next_entry, mode);
- else
- iwl_print_event_log(priv, next_entry - size,
- size, mode);
+ if (next_entry < size) {
+ pos = iwl_print_event_log(priv, 0, next_entry,
+ mode, pos, buf, bufsz);
+ } else {
+ pos = iwl_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ }
}
+ return pos;
}
/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1794,7 +1944,8 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
-void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
+int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display)
{
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
@@ -1802,6 +1953,8 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
u32 size; /* # entries that we'll print */
+ int pos = 0;
+ size_t bufsz = 0;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
@@ -1812,7 +1965,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
IWL_ERR(priv,
"Invalid event log pointer 0x%08X for %s uCode\n",
base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
- return;
+ return pos;
}
/* event log header */
@@ -1838,7 +1991,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
/* bail out if nothing in log */
if (size == 0) {
IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return;
+ return pos;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1853,6 +2006,15 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
size);
#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return pos;
+ }
if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/*
* if uCode has wrapped back to top of log,
@@ -1860,17 +2022,22 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
* i.e the next one that uCode would fill.
*/
if (num_wraps)
- iwl_print_event_log(priv, next_entry,
- capacity - next_entry, mode);
+ pos = iwl_print_event_log(priv, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
/* (then/else) start at top of log */
- iwl_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl_print_event_log(priv, 0,
+ next_entry, mode, pos, buf, bufsz);
} else
- iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#else
- iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#endif
+ return pos;
}
/**
@@ -2456,6 +2623,10 @@ static int iwl_setup_mac(struct iwl_priv *priv)
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ if (priv->cfg->sku & IWL_SKU_N)
+ hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
hw->sta_data_size = sizeof(struct iwl_station_priv);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
@@ -2784,6 +2955,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return 0;
else
return ret;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* do nothing */
+ return -EOPNOTSUPP;
default:
IWL_DEBUG_HT(priv, "unknown\n");
return -EINVAL;
@@ -3126,6 +3300,10 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
priv->statistics_periodic.data = (unsigned long)priv;
priv->statistics_periodic.function = iwl_bg_statistics_periodic;
+ init_timer(&priv->ucode_trace);
+ priv->ucode_trace.data = (unsigned long)priv;
+ priv->ucode_trace.function = iwl_bg_ucode_trace;
+
if (!priv->cfg->use_isr_legacy)
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl_irq_tasklet, (unsigned long)priv);
@@ -3144,6 +3322,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work(&priv->alive_start);
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
+ del_timer_sync(&priv->ucode_trace);
}
static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3188,6 +3367,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->band = IEEE80211_BAND_2GHZ;
priv->iw_mode = NL80211_IFTYPE_STATION;
+ priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
/* Choose which receivers/antennas to use */
if (priv->cfg->ops->hcmd->set_rxon_chain)
@@ -3589,7 +3769,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
*****************************************************************************/
/* Hardware specific file defines the PCI IDs table for that hardware module */
-static struct pci_device_id iwl_hw_card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
#ifdef CONFIG_IWL4965
{IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
{IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 95a57b36a7ea..dc61906290e8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -414,7 +414,6 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
static int iwl_sensitivity_write(struct iwl_priv *priv)
{
- int ret = 0;
struct iwl_sensitivity_cmd cmd ;
struct iwl_sensitivity_data *data = NULL;
struct iwl_host_cmd cmd_out = {
@@ -477,11 +476,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
sizeof(u16)*HD_TABLE_SIZE);
- ret = iwl_send_cmd(priv, &cmd_out);
- if (ret)
- IWL_ERR(priv, "SENSITIVITY_CMD failed\n");
-
- return ret;
+ return iwl_send_cmd(priv, &cmd_out);
}
void iwl_init_sensitivity(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e91507531923..3320cce3d57b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -120,7 +120,6 @@ enum {
CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
/* 802.11h related */
- RADAR_NOTIFICATION = 0x70, /* not used */
REPLY_QUIET_CMD = 0x71, /* not used */
REPLY_CHANNEL_SWITCH = 0x72,
CHANNEL_SWITCH_NOTIFICATION = 0x73,
@@ -2510,7 +2509,7 @@ struct iwl_card_state_notif {
#define HW_CARD_DISABLED 0x01
#define SW_CARD_DISABLED 0x02
-#define RF_CARD_DISABLED 0x04
+#define CT_CARD_DISABLED 0x04
#define RXON_CARD_DISABLED 0x10
struct iwl_ct_kill_config {
@@ -2984,7 +2983,7 @@ struct statistics_rx_ht_phy {
__le32 agg_crc32_good;
__le32 agg_mpdu_cnt;
__le32 agg_cnt;
- __le32 reserved2;
+ __le32 unsupport_mcs;
} __attribute__ ((packed));
#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
@@ -3087,8 +3086,8 @@ struct statistics_div {
} __attribute__ ((packed));
struct statistics_general {
- __le32 temperature;
- __le32 temperature_m;
+ __le32 temperature; /* radio temperature */
+ __le32 temperature_m; /* for 5000 and up, this is radio voltage */
struct statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 574d36658702..5b56307a3812 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -450,8 +450,6 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
if (priv->cfg->ht_greenfield_support)
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
- ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
- (priv->cfg->sm_ps_mode << 2));
max_bit_rate = MAX_BIT_RATE_20_MHZ;
if (priv->hw_params.ht40_channel & BIT(band)) {
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -636,7 +634,7 @@ EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
static bool is_single_rx_stream(struct iwl_priv *priv)
{
- return !priv->current_ht_config.is_ht ||
+ return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
priv->current_ht_config.single_chain_sufficient;
}
@@ -1003,28 +1001,18 @@ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
*/
static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
{
- int idle_cnt = active_cnt;
- bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
-
- /* # Rx chains when idling and maybe trying to save power */
- switch (priv->cfg->sm_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
- idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE;
- break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
- idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
- IWL_NUM_IDLE_CHAINS_SINGLE;
- break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
- break;
- case WLAN_HT_CAP_SM_PS_INVALID:
+ /* # Rx chains when idling, depending on SMPS mode */
+ switch (priv->current_ht_config.smps) {
+ case IEEE80211_SMPS_STATIC:
+ case IEEE80211_SMPS_DYNAMIC:
+ return IWL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_OFF:
+ return active_cnt;
default:
- IWL_ERR(priv, "invalid sm_ps mode %u\n",
- priv->cfg->sm_ps_mode);
- WARN_ON(1);
- break;
+ WARN(1, "invalid SMPS mode %d",
+ priv->current_ht_config.smps);
+ return active_cnt;
}
- return idle_cnt;
}
/* up to 4 chains */
@@ -1363,7 +1351,9 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
priv->cfg->ops->lib->dump_nic_error_log(priv);
- priv->cfg->ops->lib->dump_nic_event_log(priv, false);
+ if (priv->cfg->ops->lib->dump_csr)
+ priv->cfg->ops->lib->dump_csr(priv);
+ priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
iwl_print_rx_config_cmd(priv);
@@ -2344,6 +2334,21 @@ static void iwl_ht_conf(struct iwl_priv *priv,
IWL_DEBUG_MAC80211(priv, "leave\n");
}
+static inline void iwl_set_no_assoc(struct iwl_priv *priv)
+{
+ priv->assoc_id = 0;
+ iwl_led_disassociate(priv);
+ /*
+ * inform the ucode that there is no longer an
+ * association and that no more packets should be
+ * sent
+ */
+ priv->staging_rxon.filter_flags &=
+ ~RXON_FILTER_ASSOC_MSK;
+ priv->staging_rxon.assoc_id = 0;
+ iwlcore_commit_rxon(priv);
+}
+
#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
void iwl_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -2475,20 +2480,8 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
if (!iwl_is_rfkill(priv))
priv->cfg->ops->lib->post_associate(priv);
- } else {
- priv->assoc_id = 0;
- iwl_led_disassociate(priv);
-
- /*
- * inform the ucode that there is no longer an
- * association and that no more packets should be
- * send
- */
- priv->staging_rxon.filter_flags &=
- ~RXON_FILTER_ASSOC_MSK;
- priv->staging_rxon.assoc_id = 0;
- iwlcore_commit_rxon(priv);
- }
+ } else
+ iwl_set_no_assoc(priv);
}
if (changes && iwl_is_associated(priv) && priv->assoc_id) {
@@ -2503,12 +2496,14 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if ((changes & BSS_CHANGED_BEACON_ENABLED) &&
- vif->bss_conf.enable_beacon) {
- memcpy(priv->staging_rxon.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- iwlcore_config_ap(priv);
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (vif->bss_conf.enable_beacon) {
+ memcpy(priv->staging_rxon.bssid_addr,
+ bss_conf->bssid, ETH_ALEN);
+ memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
+ iwlcore_config_ap(priv);
+ } else
+ iwl_set_no_assoc(priv);
}
mutex_unlock(&priv->mutex);
@@ -2594,12 +2589,12 @@ int iwl_set_mode(struct iwl_priv *priv, int mode)
EXPORT_SYMBOL(iwl_set_mode);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
unsigned long flags;
- IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type);
+ IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
if (priv->vif) {
IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
@@ -2607,19 +2602,19 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
}
spin_lock_irqsave(&priv->lock, flags);
- priv->vif = conf->vif;
- priv->iw_mode = conf->type;
+ priv->vif = vif;
+ priv->iw_mode = vif->type;
spin_unlock_irqrestore(&priv->lock, flags);
mutex_lock(&priv->mutex);
- if (conf->mac_addr) {
- IWL_DEBUG_MAC80211(priv, "Set %pM\n", conf->mac_addr);
- memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
+ if (vif->addr) {
+ IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
}
- if (iwl_set_mode(priv, conf->type) == -EAGAIN)
+ if (iwl_set_mode(priv, vif->type) == -EAGAIN)
/* we are not ready, will run again when ready */
set_bit(STATUS_MODE_PENDING, &priv->status);
@@ -2631,7 +2626,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
EXPORT_SYMBOL(iwl_mac_add_interface);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
@@ -2644,7 +2639,7 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
}
- if (priv->vif == conf->vif) {
+ if (priv->vif == vif) {
priv->vif = NULL;
memset(priv->bssid, 0, ETH_ALEN);
}
@@ -2684,6 +2679,21 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
}
+ if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+ IEEE80211_CONF_CHANGE_CHANNEL)) {
+ /* mac80211 uses static for non-HT which is what we want */
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /*
+ * Recalculate chain counts.
+ *
+ * If monitor mode is enabled then mac80211 will
+ * set up the SM PS mode to OFF if an HT channel is
+ * configured.
+ */
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+ }
/* during scanning mac80211 will delay channel setting until
* scan finish with changed = 0
@@ -2780,10 +2790,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_tx_power(priv, conf->power_level, false);
}
- /* call to ensure that 4965 rx_chain is set properly in monitor mode */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv);
-
if (!iwl_is_ready(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
@@ -3191,6 +3197,77 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
EXPORT_SYMBOL(iwl_update_stats);
#endif
+const static char *get_csr_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(CSR_HW_IF_CONFIG_REG);
+ IWL_CMD(CSR_INT_COALESCING);
+ IWL_CMD(CSR_INT);
+ IWL_CMD(CSR_INT_MASK);
+ IWL_CMD(CSR_FH_INT_STATUS);
+ IWL_CMD(CSR_GPIO_IN);
+ IWL_CMD(CSR_RESET);
+ IWL_CMD(CSR_GP_CNTRL);
+ IWL_CMD(CSR_HW_REV);
+ IWL_CMD(CSR_EEPROM_REG);
+ IWL_CMD(CSR_EEPROM_GP);
+ IWL_CMD(CSR_OTP_GP_REG);
+ IWL_CMD(CSR_GIO_REG);
+ IWL_CMD(CSR_GP_UCODE_REG);
+ IWL_CMD(CSR_GP_DRIVER_REG);
+ IWL_CMD(CSR_UCODE_DRV_GP1);
+ IWL_CMD(CSR_UCODE_DRV_GP2);
+ IWL_CMD(CSR_LED_REG);
+ IWL_CMD(CSR_DRAM_INT_TBL_REG);
+ IWL_CMD(CSR_GIO_CHICKEN_BITS);
+ IWL_CMD(CSR_ANA_PLL_CFG);
+ IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_DBG_HPET_MEM_REG);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+void iwl_dump_csr(struct iwl_priv *priv)
+{
+ int i;
+ u32 csr_tbl[] = {
+ CSR_HW_IF_CONFIG_REG,
+ CSR_INT_COALESCING,
+ CSR_INT,
+ CSR_INT_MASK,
+ CSR_FH_INT_STATUS,
+ CSR_GPIO_IN,
+ CSR_RESET,
+ CSR_GP_CNTRL,
+ CSR_HW_REV,
+ CSR_EEPROM_REG,
+ CSR_EEPROM_GP,
+ CSR_OTP_GP_REG,
+ CSR_GIO_REG,
+ CSR_GP_UCODE_REG,
+ CSR_GP_DRIVER_REG,
+ CSR_UCODE_DRV_GP1,
+ CSR_UCODE_DRV_GP2,
+ CSR_LED_REG,
+ CSR_DRAM_INT_TBL_REG,
+ CSR_GIO_CHICKEN_BITS,
+ CSR_ANA_PLL_CFG,
+ CSR_HW_REV_WA_REG,
+ CSR_DBG_HPET_MEM_REG
+ };
+ IWL_ERR(priv, "CSR values:\n");
+ IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
+ "CSR_INT_PERIODIC_REG)\n");
+ for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
+ IWL_ERR(priv, " %25s: 0X%08x\n",
+ get_csr_string(csr_tbl[i]),
+ iwl_read32(priv, csr_tbl[i]));
+ }
+}
+EXPORT_SYMBOL(iwl_dump_csr);
+
#ifdef CONFIG_PM
int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 27ca859e7453..8deb83bfe182 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -63,8 +63,6 @@
#ifndef __iwl_core_h__
#define __iwl_core_h__
-#include <generated/utsrelease.h>
-
/************************
* forward declarations *
************************/
@@ -72,7 +70,7 @@ struct iwl_host_cmd;
struct iwl_cmd;
-#define IWLWIFI_VERSION UTS_RELEASE "-k"
+#define IWLWIFI_VERSION "in-tree:"
#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
@@ -169,8 +167,10 @@ struct iwl_lib_ops {
int (*is_valid_rtc_data_addr)(u32 addr);
/* 1st ucode load */
int (*load_ucode)(struct iwl_priv *priv);
- void (*dump_nic_event_log)(struct iwl_priv *priv, bool full_log);
+ int (*dump_nic_event_log)(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
void (*dump_nic_error_log)(struct iwl_priv *priv);
+ void (*dump_csr)(struct iwl_priv *priv);
int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
/* power management */
struct iwl_apm_ops apm_ops;
@@ -230,7 +230,6 @@ struct iwl_mod_params {
* @chain_noise_num_beacons: number of beacons used to compute chain noise
* @adv_thermal_throttle: support advance thermal throttle
* @support_ct_kill_exit: support ct kill exit condition
- * @sm_ps_mode: spatial multiplexing power save mode
* @support_wimax_coexist: support wimax/wifi co-exist
*
* We enable the driver to be backward compatible wrt API version. The
@@ -287,7 +286,6 @@ struct iwl_cfg {
const bool supports_idle;
bool adv_thermal_throttle;
bool support_ct_kill_exit;
- u8 sm_ps_mode;
const bool support_wimax_coexist;
};
@@ -332,9 +330,9 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
int iwl_commit_rxon(struct iwl_priv *priv);
int iwl_set_mode(struct iwl_priv *priv, int mode);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
void iwl_config_ap(struct iwl_priv *priv);
int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
@@ -581,7 +579,9 @@ int iwl_pci_resume(struct pci_dev *pdev);
* Error Handling Debugging
******************************************************/
void iwl_dump_nic_error_log(struct iwl_priv *priv);
-void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
+int iwl_dump_nic_event_log(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
+void iwl_dump_csr(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv);
#else
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index d61293ab67c9..58e0462cafa3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -109,6 +109,8 @@ struct iwl_debugfs {
struct dentry *file_power_save_status;
struct dentry *file_clear_ucode_statistics;
struct dentry *file_clear_traffic_statistics;
+ struct dentry *file_csr;
+ struct dentry *file_ucode_tracing;
} dbgfs_debug_files;
u32 sram_offset;
u32 sram_len;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 21e0f6699daf..4a2ac9311ba8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -125,7 +125,7 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char *buf;
int pos = 0;
@@ -184,7 +184,7 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char *buf;
int pos = 0;
int cnt;
@@ -232,7 +232,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
ssize_t ret;
int i;
int pos = 0;
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
size_t bufsz;
/* default is to dump the entire data segment */
@@ -306,7 +306,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_station_entry *station;
int max_sta = priv->hw_params.max_stations;
char *buf;
@@ -376,7 +376,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
loff_t *ppos)
{
ssize_t ret;
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0, ofs = 0, buf_size = 0;
const u8 *ptr;
char *buf;
@@ -420,6 +420,23 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
return ret;
}
+static ssize_t iwl_dbgfs_log_event_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -ENOMEM;
+
+ pos = priv->cfg->ops->lib->dump_nic_event_log(priv, true, &buf, true);
+ if (pos && buf) {
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ }
+ return ret;
+}
+
static ssize_t iwl_dbgfs_log_event_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -436,7 +453,8 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
if (sscanf(buf, "%d", &event_log_flag) != 1)
return -EFAULT;
if (event_log_flag == 1)
- priv->cfg->ops->lib->dump_nic_event_log(priv, true);
+ priv->cfg->ops->lib->dump_nic_event_log(priv, true,
+ NULL, false);
return count;
}
@@ -446,7 +464,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct ieee80211_channel *channels = NULL;
const struct ieee80211_supported_band *supp_band = NULL;
int pos = 0, i, bufsz = PAGE_SIZE;
@@ -519,7 +537,7 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[512];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -567,7 +585,7 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -654,7 +672,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0, i;
char buf[256];
const size_t bufsz = sizeof(buf);
@@ -677,7 +695,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char buf[256];
const size_t bufsz = sizeof(buf);
@@ -703,7 +721,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
struct iwl_tt_restriction *restriction;
char buf[100];
@@ -763,7 +781,7 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[100];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -820,7 +838,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[10];
int pos, value;
const size_t bufsz = sizeof(buf);
@@ -838,7 +856,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[200];
int pos = 0, i;
const size_t bufsz = sizeof(buf);
@@ -859,7 +877,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
}
DEBUGFS_READ_WRITE_FILE_OPS(sram);
-DEBUGFS_WRITE_FILE_OPS(log_event);
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
DEBUGFS_READ_FILE_OPS(nvm);
DEBUGFS_READ_FILE_OPS(stations);
DEBUGFS_READ_FILE_OPS(channels);
@@ -976,7 +994,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_tx_queue *txq;
struct iwl_queue *q;
char *buf;
@@ -1022,7 +1040,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_rx_queue *rxq = &priv->rxq;
char buf[256];
int pos = 0;
@@ -1068,7 +1086,7 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
int bufsz = sizeof(struct statistics_rx_phy) * 20 +
@@ -1369,6 +1387,9 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
accum_ht->agg_mpdu_cnt);
pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, "unsupport_mcs:\t\t%u\t\t\t%u\n",
+ le32_to_cpu(ht->unsupport_mcs),
+ accum_ht->unsupport_mcs);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -1379,7 +1400,7 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
@@ -1521,7 +1542,7 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
int bufsz = sizeof(struct statistics_general) * 4 + 250;
@@ -1612,7 +1633,7 @@ static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -1693,7 +1714,7 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -1751,7 +1772,7 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[128];
int pos = 0;
ssize_t ret;
@@ -1802,7 +1823,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[60];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -1845,6 +1866,80 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int csr;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &csr) != 1)
+ return -EFAULT;
+
+ if (priv->cfg->ops->lib->dump_csr)
+ priv->cfg->ops->lib->dump_csr(priv);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int pos = 0;
+ char buf[128];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
+ priv->event_log.ucode_trace ? "On" : "Off");
+ pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
+ priv->event_log.non_wraps_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
+ priv->event_log.wraps_once_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
+ priv->event_log.wraps_more_count);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int trace;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &trace) != 1)
+ return -EFAULT;
+
+ if (trace) {
+ priv->event_log.ucode_trace = true;
+ /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ } else {
+ priv->event_log.ucode_trace = false;
+ del_timer_sync(&priv->ucode_trace);
+ }
+
+ return count;
+}
+
DEBUGFS_READ_FILE_OPS(rx_statistics);
DEBUGFS_READ_FILE_OPS(tx_statistics);
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1859,6 +1954,8 @@ DEBUGFS_READ_FILE_OPS(tx_power);
DEBUGFS_READ_FILE_OPS(power_save_status);
DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
+DEBUGFS_WRITE_FILE_OPS(csr);
+DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
/*
* Create the debugfs files and directories
@@ -1889,7 +1986,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv);
DEBUGFS_ADD_FILE(nvm, data, S_IRUSR);
DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(log_event, data, S_IWUSR);
+ DEBUGFS_ADD_FILE(log_event, data, S_IWUSR | S_IRUSR);
DEBUGFS_ADD_FILE(stations, data, S_IRUSR);
DEBUGFS_ADD_FILE(channels, data, S_IRUSR);
DEBUGFS_ADD_FILE(status, data, S_IRUSR);
@@ -1909,12 +2006,14 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR);
DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR);
DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(csr, debug, S_IWUSR);
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR);
DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR);
DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR);
DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR);
DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tracing, debug, S_IWUSR | S_IRUSR);
}
DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
@@ -1966,6 +2065,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
file_clear_ucode_statistics);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
file_clear_traffic_statistics);
+ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_csr);
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
file_ucode_rx_stats);
@@ -1977,6 +2077,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
file_sensitivity);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
file_chain_noise);
+ DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
+ file_ucode_tracing);
}
DEBUGFS_REMOVE(priv->dbgfs->dir_debug);
DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 165d1f6e2dd9..70f0e79c8e4a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -512,6 +512,7 @@ struct iwl_ht_config {
bool is_ht;
bool is_40mhz;
bool single_chain_sufficient;
+ enum ieee80211_smps_mode smps; /* current smps mode */
/* BSS related data */
u8 extension_chan_offset;
u8 ht_protection;
@@ -711,7 +712,7 @@ extern void iwl_txq_ctx_stop(struct iwl_priv *priv);
extern int iwl_queue_space(const struct iwl_queue *q);
static inline int iwl_queue_used(const struct iwl_queue *q, int i)
{
- return q->write_ptr > q->read_ptr ?
+ return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
}
@@ -984,6 +985,32 @@ struct iwl_switch_rxon {
__le16 channel;
};
+/*
+ * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
+ * to perform continuous uCode event logging operation if enabled
+ */
+#define UCODE_TRACE_PERIOD (100)
+
+/*
+ * iwl_event_log: current uCode event log position
+ *
+ * @ucode_trace: enable/disable ucode continuous trace timer
+ * @num_wraps: how many times the event buffer wraps
+ * @next_entry: the entry just before the next one that uCode would fill
+ * @non_wraps_count: counter for no wrap detected when dump ucode events
+ * @wraps_once_count: counter for wrap once detected when dump ucode events
+ * @wraps_more_count: counter for wrap more than once detected
+ * when dump ucode events
+ */
+struct iwl_event_log {
+ bool ucode_trace;
+ u32 num_wraps;
+ u32 next_entry;
+ int non_wraps_count;
+ int wraps_once_count;
+ int wraps_more_count;
+};
+
struct iwl_priv {
/* ieee device used by generic ieee processing code */
@@ -1261,6 +1288,7 @@ struct iwl_priv {
u32 disable_tx_power_cal;
struct work_struct run_time_calib_work;
struct timer_list statistics_periodic;
+ struct timer_list ucode_trace;
bool hw_ready;
/*For 3945*/
#define IWL_DEFAULT_TX_POWER 0x0F
@@ -1268,6 +1296,8 @@ struct iwl_priv {
struct iwl3945_notif_statistics statistics_39;
u32 sta_supp_rates;
+
+ struct iwl_event_log event_log;
}; /*iwl_priv */
static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index e7d88d1da15d..bf46308b17fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -11,4 +11,6 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 21361968ab7e..0819f990be6c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -65,6 +65,50 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
);
#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_ucode
+
+TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
+ TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_ARGS(priv, time, data, ev),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, time)
+ __field(u32, data)
+ __field(u32, ev)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->time = time;
+ __entry->data = data;
+ __entry->ev = ev;
+ ),
+ TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+ __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+
+TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
+ TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_ARGS(priv, wraps, n_entry, p_entry),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, wraps)
+ __field(u32, n_entry)
+ __field(u32, p_entry)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->wraps = wraps;
+ __entry->n_entry = n_entry;
+ __entry->p_entry = p_entry;
+ ),
+ TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
+ __entry->priv, __entry->wraps, __entry->n_entry,
+ __entry->p_entry)
+);
+
+#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 30e9ea6d54ec..87d684efe110 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -58,7 +58,6 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(COEX_PRIORITY_TABLE_CMD);
IWL_CMD(COEX_MEDIUM_NOTIFICATION);
IWL_CMD(COEX_EVENT_CMD);
- IWL_CMD(RADAR_NOTIFICATION);
IWL_CMD(REPLY_QUIET_CMD);
IWL_CMD(REPLY_CHANNEL_SWITCH);
IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index f8e4e4b18d02..10b0aa8024c4 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1518,8 +1518,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
* iwl3945_print_event_log - Dump error event log to syslog
*
*/
-static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode)
+static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
u32 i;
u32 base; /* SRAM byte address of event log header */
@@ -1529,7 +1530,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
unsigned long reg_flags;
if (num_events == 0)
- return;
+ return pos;
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
@@ -1555,26 +1556,43 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
- IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
- trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "0x%08x:%04u\n",
+ time, ev);
+ } else {
+ IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
+ trace_iwlwifi_dev_ucode_event(priv, 0,
+ time, ev);
+ }
} else {
data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
+ time, data, ev);
+ trace_iwlwifi_dev_ucode_event(priv, time,
+ data, ev);
+ }
}
}
/* Allow device to power down */
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return pos;
}
/**
* iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
*/
-static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
u32 num_wraps, u32 next_entry,
- u32 size, u32 mode)
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
/*
* display the newest DEFAULT_LOG_ENTRIES entries
@@ -1582,21 +1600,28 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
*/
if (num_wraps) {
if (next_entry < size) {
- iwl3945_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode);
- iwl3945_print_event_log(priv, 0,
- next_entry, mode);
+ pos = iwl3945_print_event_log(priv,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl3945_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl3945_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl3945_print_event_log(priv, next_entry - size,
+ size, mode,
+ pos, buf, bufsz);
} else {
if (next_entry < size)
- iwl3945_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl3945_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
else
- iwl3945_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl3945_print_event_log(priv, next_entry - size,
+ size, mode,
+ pos, buf, bufsz);
}
+ return pos;
}
/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1604,7 +1629,8 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
-void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
+int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display)
{
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
@@ -1612,11 +1638,13 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
u32 size; /* # entries that we'll print */
+ int pos = 0;
+ size_t bufsz = 0;
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
if (!iwl3945_hw_valid_rtc_data_addr(base)) {
IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
- return;
+ return pos;
}
/* event log header */
@@ -1642,7 +1670,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
/* bail out if nothing in log */
if (size == 0) {
IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return;
+ return pos;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1658,25 +1686,38 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
size);
#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return pos;
+ }
if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/* if uCode has wrapped back to top of log,
* start at the oldest entry,
* i.e the next one that uCode would fill.
*/
if (num_wraps)
- iwl3945_print_event_log(priv, next_entry,
- capacity - next_entry, mode);
+ pos = iwl3945_print_event_log(priv, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
/* (then/else) start at top of log */
- iwl3945_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl3945_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#else
- iwl3945_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#endif
-
+ return pos;
}
static void iwl3945_irq_tasklet(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 6d6ed7485175..d32adeab68a3 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -868,36 +868,35 @@ static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
struct iwm_umac_notif_mgt_frame *mgt_frame =
(struct iwm_umac_notif_mgt_frame *)buf;
struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
- u8 *ie;
IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
le16_to_cpu(mgt_frame->len));
if (ieee80211_is_assoc_req(mgt->frame_control)) {
- ie = mgt->u.assoc_req.variable;;
- iwm->req_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.assoc_req.variable);
kfree(iwm->req_ie);
iwm->req_ie = kmemdup(mgt->u.assoc_req.variable,
iwm->req_ie_len, GFP_KERNEL);
} else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
- ie = mgt->u.reassoc_req.variable;;
- iwm->req_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.reassoc_req.variable);
kfree(iwm->req_ie);
iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable,
iwm->req_ie_len, GFP_KERNEL);
} else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
- ie = mgt->u.assoc_resp.variable;;
- iwm->resp_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.assoc_resp.variable);
kfree(iwm->resp_ie);
iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable,
iwm->resp_ie_len, GFP_KERNEL);
} else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
- ie = mgt->u.reassoc_resp.variable;;
- iwm->resp_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.reassoc_resp.variable);
kfree(iwm->resp_ie);
iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable,
iwm->resp_ie_len, GFP_KERNEL);
@@ -1534,6 +1533,33 @@ static void classify8023(struct sk_buff *skb)
}
}
+static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
+{
+ struct wireless_dev *wdev = iwm_to_wdev(iwm);
+ struct net_device *ndev = iwm_to_ndev(iwm);
+ struct sk_buff_head list;
+ struct sk_buff *frame;
+
+ IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
+
+ __skb_queue_head_init(&list);
+ ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0);
+
+ while ((frame = __skb_dequeue(&list))) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += frame->len;
+
+ frame->protocol = eth_type_trans(frame, ndev);
+ frame->ip_summed = CHECKSUM_NONE;
+ memset(frame->cb, 0, sizeof(frame->cb));
+
+ if (netif_rx_ni(frame) == NET_RX_DROP) {
+ IWM_ERR(iwm, "Packet dropped\n");
+ ndev->stats.rx_dropped++;
+ }
+ }
+}
+
static void iwm_rx_process_packet(struct iwm_priv *iwm,
struct iwm_rx_packet *packet,
struct iwm_rx_ticket_node *ticket_node)
@@ -1548,25 +1574,34 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm,
switch (le16_to_cpu(ticket_node->ticket->action)) {
case IWM_RX_TICKET_RELEASE:
IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
- classify8023(skb);
+
iwm_rx_adjust_packet(iwm, packet, ticket_node);
+ skb->dev = iwm_to_ndev(iwm);
+ classify8023(skb);
+
+ if (le16_to_cpu(ticket_node->ticket->flags) &
+ IWM_RX_TICKET_AMSDU_MSK) {
+ iwm_rx_process_amsdu(iwm, skb);
+ break;
+ }
+
ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
if (ret < 0) {
IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
"%d\n", ret);
+ kfree_skb(packet->skb);
break;
}
IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
- skb->dev = iwm_to_ndev(iwm);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
memset(skb->cb, 0, sizeof(skb->cb));
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
-
if (netif_rx_ni(skb) == NET_RX_DROP) {
IWM_ERR(iwm, "Packet dropped\n");
ndev->stats.rx_dropped++;
diff --git a/drivers/net/wireless/libertas/Kconfig b/drivers/net/wireless/libertas/Kconfig
index 30aa9d48d67e..0485c9957575 100644
--- a/drivers/net/wireless/libertas/Kconfig
+++ b/drivers/net/wireless/libertas/Kconfig
@@ -37,3 +37,9 @@ config LIBERTAS_DEBUG
depends on LIBERTAS
---help---
Debugging support.
+
+config LIBERTAS_MESH
+ bool "Enable mesh support"
+ depends on LIBERTAS
+ help
+ This enables Libertas' MESH support, used by e.g. the OLPC people.
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index b188cd97a053..45e870e33117 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -5,11 +5,11 @@ libertas-y += cmdresp.o
libertas-y += debugfs.o
libertas-y += ethtool.o
libertas-y += main.o
-libertas-y += mesh.o
libertas-y += rx.o
libertas-y += scan.o
libertas-y += tx.o
libertas-y += wext.o
+libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
usb8xxx-objs += if_usb.o
libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 751067369ba8..5e650f358415 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -390,10 +390,8 @@ int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
- if (!ret && cmd_action == CMD_ACT_GET) {
- priv->ratebitmap = le16_to_cpu(cmd.bitmap);
+ if (!ret && cmd_action == CMD_ACT_GET)
priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
- }
lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
@@ -807,8 +805,7 @@ static int lbs_try_associate(struct lbs_private *priv,
}
/* Use short preamble only when both the BSS and firmware support it */
- if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
- (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
+ if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
preamble = RADIO_PREAMBLE_SHORT;
ret = lbs_set_radio(priv, preamble, 1);
@@ -939,8 +936,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
}
/* Use short preamble only when both the BSS and firmware support it */
- if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
- (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
+ if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
lbs_deb_join("AdhocJoin: Short preamble\n");
preamble = RADIO_PREAMBLE_SHORT;
}
@@ -1049,7 +1045,7 @@ static int lbs_adhoc_start(struct lbs_private *priv,
struct assoc_request *assoc_req)
{
struct cmd_ds_802_11_ad_hoc_start cmd;
- u8 preamble = RADIO_PREAMBLE_LONG;
+ u8 preamble = RADIO_PREAMBLE_SHORT;
size_t ratesize = 0;
u16 tmpcap = 0;
int ret = 0;
@@ -1057,11 +1053,6 @@ static int lbs_adhoc_start(struct lbs_private *priv,
lbs_deb_enter(LBS_DEB_ASSOC);
- if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
- lbs_deb_join("ADHOC_START: Will use short preamble\n");
- preamble = RADIO_PREAMBLE_SHORT;
- }
-
ret = lbs_set_radio(priv, preamble, 1);
if (ret)
goto out;
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 42611bea76a3..82371ef39524 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -143,19 +143,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
cmd.hwifversion, cmd.version);
- /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
- /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
- /* 5.110.22 have mesh command with 0xa3 command id */
- /* 10.0.0.p0 FW brings in mesh config command with different id */
- /* Check FW version MSB and initialize mesh_fw_ver */
- if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
- priv->mesh_fw_ver = MESH_FW_OLD;
- else if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
- (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK))
- priv->mesh_fw_ver = MESH_FW_NEW;
- else
- priv->mesh_fw_ver = MESH_NONE;
-
/* Clamp region code to 8-bit since FW spec indicates that it should
* only ever be 8-bit, even though the field size is 16-bit. Some firmware
* returns non-zero high 8 bits here.
@@ -855,9 +842,6 @@ int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
if (priv->fwrelease < 0x09000000) {
switch (preamble) {
case RADIO_PREAMBLE_SHORT:
- if (!(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
- goto out;
- /* Fall through */
case RADIO_PREAMBLE_AUTO:
case RADIO_PREAMBLE_LONG:
cmd.control = cpu_to_le16(preamble);
@@ -1011,6 +995,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
ret = 0;
break;
+#ifdef CONFIG_LIBERTAS_MESH
+
case CMD_BT_ACCESS:
ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
break;
@@ -1019,6 +1005,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
break;
+#endif
+
case CMD_802_11_BEACON_CTRL:
ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
break;
@@ -1317,7 +1305,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
if ((priv->psmode != LBS802_11POWERMODECAM) &&
(priv->psstate == PS_STATE_FULL_POWER) &&
((priv->connect_status == LBS_CONNECTED) ||
- (priv->mesh_connect_status == LBS_CONNECTED))) {
+ lbs_mesh_connected(priv))) {
if (priv->secinfo.WPAenabled ||
priv->secinfo.WPA2enabled) {
/* check for valid WPA group keys */
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 2862748aef70..cb4138a55fdf 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -110,18 +110,6 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
-/* Mesh related */
-
-int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
- struct cmd_ds_mesh_access *cmd);
-
-int lbs_mesh_config_send(struct lbs_private *priv,
- struct cmd_ds_mesh_config *cmd,
- uint16_t action, uint16_t type);
-
-int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
-
-
/* Commands only used in wext.c, assoc. and scan.c */
int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 21d57690c20a..0334a58820ee 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -485,20 +485,8 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
break;
case MACREG_INT_CODE_MESH_AUTO_STARTED:
- /* Ignore spurious autostart events if autostart is disabled */
- if (!priv->mesh_autostart_enabled) {
- lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
- break;
- }
- lbs_pr_info("EVENT: MESH_AUTO_STARTED\n");
- priv->mesh_connect_status = LBS_CONNECTED;
- if (priv->mesh_open) {
- netif_carrier_on(priv->mesh_dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->mesh_dev);
- }
- priv->mode = IW_MODE_ADHOC;
- schedule_work(&priv->sync_channel);
+ /* Ignore spurious autostart events */
+ lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
break;
default:
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 6b6ea9f7bf5b..ea3f10ef4e00 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -397,13 +397,6 @@ enum KEY_INFO_WPA {
KEY_INFO_WPA_ENABLED = 0x04
};
-/** mesh_fw_ver */
-enum _mesh_fw_ver {
- MESH_NONE = 0, /* MESH is not supported */
- MESH_FW_OLD, /* MESH is supported in FW V5 */
- MESH_FW_NEW, /* MESH is supported in FW V10 and newer */
-};
-
/* Default values for fwt commands. */
#define FWT_DEFAULT_METRIC 0
#define FWT_DEFAULT_DIR 1
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 05bb298dfae9..c348aff8f309 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -39,15 +39,14 @@ struct lbs_private {
/* Mesh */
struct net_device *mesh_dev; /* Virtual device */
+#ifdef CONFIG_LIBERTAS_MESH
u32 mesh_connect_status;
struct lbs_mesh_stats mstats;
int mesh_open;
- int mesh_fw_ver;
- int mesh_autostart_enabled;
uint16_t mesh_tlv;
u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 mesh_ssid_len;
- struct work_struct sync_channel;
+#endif
/* Monitor mode */
struct net_device *rtap_net_dev;
@@ -176,9 +175,7 @@ struct lbs_private {
struct bss_descriptor *networks;
struct assoc_request * pending_assoc_req;
struct assoc_request * in_progress_assoc_req;
- u16 capability;
uint16_t enablehwauto;
- uint16_t ratebitmap;
/* ADHOC */
u16 beacon_period;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 63d020374c2b..3804a58d7f4e 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -114,9 +114,11 @@ const struct ethtool_ops lbs_ethtool_ops = {
.get_drvinfo = lbs_ethtool_get_drvinfo,
.get_eeprom = lbs_ethtool_get_eeprom,
.get_eeprom_len = lbs_ethtool_get_eeprom_len,
+#ifdef CONFIG_LIBERTAS_MESH
.get_sset_count = lbs_mesh_ethtool_get_sset_count,
.get_ethtool_stats = lbs_mesh_ethtool_get_stats,
.get_strings = lbs_mesh_ethtool_get_strings,
+#endif
.get_wol = lbs_ethtool_get_wol,
.set_wol = lbs_ethtool_set_wol,
};
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index c2975c8e2f21..60bde1233a30 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -123,7 +123,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
if (priv->monitormode == monitor_mode)
return strlen(buf);
if (!priv->monitormode) {
- if (priv->infra_open || priv->mesh_open)
+ if (priv->infra_open || lbs_mesh_open(priv))
return -EBUSY;
if (priv->mode == IW_MODE_INFRA)
lbs_cmd_80211_deauthenticate(priv,
@@ -622,7 +622,7 @@ static int lbs_thread(void *data)
if (priv->connect_status == LBS_CONNECTED)
netif_wake_queue(priv->dev);
if (priv->mesh_dev &&
- priv->mesh_connect_status == LBS_CONNECTED)
+ lbs_mesh_connected(priv))
netif_wake_queue(priv->mesh_dev);
}
}
@@ -809,18 +809,6 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
return 0;
}
-static void lbs_sync_channel_worker(struct work_struct *work)
-{
- struct lbs_private *priv = container_of(work, struct lbs_private,
- sync_channel);
-
- lbs_deb_enter(LBS_DEB_MAIN);
- if (lbs_update_channel(priv))
- lbs_pr_info("Channel synchronization failed.");
- lbs_deb_leave(LBS_DEB_MAIN);
-}
-
-
static int lbs_init_adapter(struct lbs_private *priv)
{
size_t bufsize;
@@ -848,14 +836,12 @@ static int lbs_init_adapter(struct lbs_private *priv)
memset(priv->current_addr, 0xff, ETH_ALEN);
priv->connect_status = LBS_DISCONNECTED;
- priv->mesh_connect_status = LBS_DISCONNECTED;
priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
priv->mode = IW_MODE_INFRA;
priv->channel = DEFAULT_AD_HOC_CHANNEL;
priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
priv->radio_on = 1;
priv->enablehwauto = 1;
- priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
priv->psmode = LBS802_11POWERMODECAM;
priv->psstate = PS_STATE_FULL_POWER;
priv->is_deep_sleep = 0;
@@ -998,11 +984,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
- INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker);
-
- priv->mesh_open = 0;
- sprintf(priv->mesh_ssid, "mesh");
- priv->mesh_ssid_len = 4;
priv->wol_criteria = 0xffffffff;
priv->wol_gpio = 0xff;
@@ -1076,6 +1057,17 @@ void lbs_remove_card(struct lbs_private *priv)
EXPORT_SYMBOL_GPL(lbs_remove_card);
+static int lbs_rtap_supported(struct lbs_private *priv)
+{
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
+ return 1;
+
+ /* newer firmware use a capability mask */
+ return ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
+ (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK));
+}
+
+
int lbs_start_card(struct lbs_private *priv)
{
struct net_device *dev = priv->dev;
@@ -1095,12 +1087,14 @@ int lbs_start_card(struct lbs_private *priv)
lbs_update_channel(priv);
+ lbs_init_mesh(priv);
+
/*
* While rtap isn't related to mesh, only mesh-enabled
* firmware implements the rtap functionality via
* CMD_802_11_MONITOR_MODE.
*/
- if (lbs_init_mesh(priv)) {
+ if (lbs_rtap_supported(priv)) {
if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
lbs_pr_err("cannot register lbs_rtap attribute\n");
}
@@ -1134,7 +1128,9 @@ void lbs_stop_card(struct lbs_private *priv)
netif_carrier_off(dev);
lbs_debugfs_remove_one(priv);
- if (lbs_deinit_mesh(priv))
+ lbs_deinit_mesh(priv);
+
+ if (lbs_rtap_supported(priv))
device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
/* Delete the timeout of the currently processing command */
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 92b7a357a5e4..e385af1f4583 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1,4 +1,3 @@
-#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
@@ -197,7 +196,14 @@ int lbs_init_mesh(struct lbs_private *priv)
lbs_deb_enter(LBS_DEB_MESH);
- if (priv->mesh_fw_ver == MESH_FW_OLD) {
+ priv->mesh_connect_status = LBS_DISCONNECTED;
+
+ /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
+ /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
+ /* 5.110.22 have mesh command with 0xa3 command id */
+ /* 10.0.0.p0 FW brings in mesh config command with different id */
+ /* Check FW version MSB and initialize mesh_fw_ver */
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) {
/* Enable mesh, if supported, and work out which TLV it uses.
0x100 + 291 is an unofficial value used in 5.110.20.pXX
0x100 + 37 is the official value used in 5.110.21.pXX
@@ -219,7 +225,9 @@ int lbs_init_mesh(struct lbs_private *priv)
priv->channel))
priv->mesh_tlv = 0;
}
- } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+ } else
+ if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
+ (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) {
/* 10.0.0.pXX new firmwares should succeed with TLV
* 0x100+37; Do not invoke command with old TLV.
*/
@@ -228,7 +236,12 @@ int lbs_init_mesh(struct lbs_private *priv)
priv->channel))
priv->mesh_tlv = 0;
}
+
+
if (priv->mesh_tlv) {
+ sprintf(priv->mesh_ssid, "mesh");
+ priv->mesh_ssid_len = 4;
+
lbs_add_mesh(priv);
if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
@@ -416,10 +429,10 @@ struct net_device *lbs_mesh_set_dev(struct lbs_private *priv,
struct net_device *dev, struct rxpd *rxpd)
{
if (priv->mesh_dev) {
- if (priv->mesh_fw_ver == MESH_FW_OLD) {
+ if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) {
if (rxpd->rx_control & RxPD_MESH_FRAME)
dev = priv->mesh_dev;
- } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+ } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) {
if (rxpd->u.bss.bss_num == MESH_IFACE_ID)
dev = priv->mesh_dev;
}
@@ -432,9 +445,9 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
struct net_device *dev, struct txpd *txpd)
{
if (dev == priv->mesh_dev) {
- if (priv->mesh_fw_ver == MESH_FW_OLD)
+ if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID)
txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
- else if (priv->mesh_fw_ver == MESH_FW_NEW)
+ else if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
txpd->u.bss.bss_num = MESH_IFACE_ID;
}
}
@@ -538,7 +551,7 @@ static int __lbs_mesh_config_send(struct lbs_private *priv,
* Command id is 0xac for v10 FW along with mesh interface
* id in bits 14-13-12.
*/
- if (priv->mesh_fw_ver == MESH_FW_NEW)
+ if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
command = CMD_MESH_CONFIG |
(MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index fea9b5d005fc..e2573303a328 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -9,6 +9,8 @@
#include <net/lib80211.h>
+#ifdef CONFIG_LIBERTAS_MESH
+
/* Mesh statistics */
struct lbs_mesh_stats {
u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */
@@ -46,11 +48,20 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
/* Command handling */
struct cmd_ds_command;
+struct cmd_ds_mesh_access;
+struct cmd_ds_mesh_config;
int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
u16 cmd_action, void *pdata_buf);
int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
u16 cmd_action, void *pdata_buf);
+int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
+ struct cmd_ds_mesh_access *cmd);
+int lbs_mesh_config_send(struct lbs_private *priv,
+ struct cmd_ds_mesh_config *cmd,
+ uint16_t action, uint16_t type);
+int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
+
/* Persistent configuration */
@@ -75,4 +86,25 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *s);
+/* Accessors */
+
+#define lbs_mesh_open(priv) (priv->mesh_open)
+#define lbs_mesh_connected(priv) (priv->mesh_connect_status == LBS_CONNECTED)
+
+#else
+
+#define lbs_init_mesh(priv)
+#define lbs_deinit_mesh(priv)
+#define lbs_add_mesh(priv)
+#define lbs_remove_mesh(priv)
+#define lbs_mesh_set_dev(priv, dev, rxpd) (dev)
+#define lbs_mesh_set_txpd(priv, dev, txpd)
+#define lbs_mesh_config(priv, enable, chan)
+#define lbs_mesh_open(priv) (0)
+#define lbs_mesh_connected(priv) (0)
+
+#endif
+
+
+
#endif
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index b0b1c7841500..220361e69cd3 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -635,7 +635,7 @@ out:
if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
netif_wake_queue(priv->dev);
- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) &&
+ if (priv->mesh_dev && lbs_mesh_connected(priv) &&
!priv->tx_pending_len)
netif_wake_queue(priv->mesh_dev);
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 315d1ce286ca..52d244ea3d97 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -198,7 +198,7 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
if (priv->connect_status == LBS_CONNECTED)
netif_wake_queue(priv->dev);
- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED))
+ if (priv->mesh_dev && lbs_mesh_connected(priv))
netif_wake_queue(priv->mesh_dev);
}
EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 4b1aab593a84..71f88a08e090 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -192,7 +192,7 @@ static void copy_active_data_rates(struct lbs_private *priv, u8 *rates)
lbs_deb_enter(LBS_DEB_WEXT);
if ((priv->connect_status != LBS_CONNECTED) &&
- (priv->mesh_connect_status != LBS_CONNECTED))
+ !lbs_mesh_connected(priv))
memcpy(rates, lbs_bg_rates, MAX_RATES);
else
memcpy(rates, priv->curbssparams.rates, MAX_RATES);
@@ -298,6 +298,7 @@ static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
return 0;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
@@ -307,7 +308,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
/* Use nickname to indicate that mesh is on */
- if (priv->mesh_connect_status == LBS_CONNECTED) {
+ if (lbs_mesh_connected(priv)) {
strncpy(extra, "Mesh", 12);
extra[12] = '\0';
dwrq->length = strlen(extra);
@@ -321,6 +322,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
lbs_deb_leave(LBS_DEB_WEXT);
return 0;
}
+#endif
static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -422,6 +424,7 @@ static int lbs_get_mode(struct net_device *dev,
return 0;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int mesh_wlan_get_mode(struct net_device *dev,
struct iw_request_info *info, u32 * uwrq,
char *extra)
@@ -433,6 +436,7 @@ static int mesh_wlan_get_mode(struct net_device *dev,
lbs_deb_leave(LBS_DEB_WEXT);
return 0;
}
+#endif
static int lbs_get_txpow(struct net_device *dev,
struct iw_request_info *info,
@@ -863,7 +867,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
/* If we're not associated, all quality values are meaningless */
if ((priv->connect_status != LBS_CONNECTED) &&
- (priv->mesh_connect_status != LBS_CONNECTED))
+ !lbs_mesh_connected(priv))
goto out;
/* Quality by RSSI */
@@ -1010,6 +1014,7 @@ out:
return ret;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int lbs_mesh_set_freq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *fwrq, char *extra)
@@ -1061,6 +1066,7 @@ out:
lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
return ret;
}
+#endif
static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -2108,6 +2114,7 @@ out:
return ret;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int lbs_mesh_get_essid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
@@ -2161,6 +2168,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
return ret;
}
+#endif
/**
* @brief Connect to the AP or Ad-hoc Network with specific bssid
@@ -2267,7 +2275,13 @@ static const iw_handler lbs_handler[] = {
(iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
(iw_handler) NULL, /* SIOCSIWPMKSA */
};
+struct iw_handler_def lbs_handler_def = {
+ .num_standard = ARRAY_SIZE(lbs_handler),
+ .standard = (iw_handler *) lbs_handler,
+ .get_wireless_stats = lbs_get_wireless_stats,
+};
+#ifdef CONFIG_LIBERTAS_MESH
static const iw_handler mesh_wlan_handler[] = {
(iw_handler) NULL, /* SIOCSIWCOMMIT */
(iw_handler) lbs_get_name, /* SIOCGIWNAME */
@@ -2325,14 +2339,10 @@ static const iw_handler mesh_wlan_handler[] = {
(iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
(iw_handler) NULL, /* SIOCSIWPMKSA */
};
-struct iw_handler_def lbs_handler_def = {
- .num_standard = ARRAY_SIZE(lbs_handler),
- .standard = (iw_handler *) lbs_handler,
- .get_wireless_stats = lbs_get_wireless_stats,
-};
struct iw_handler_def mesh_handler_def = {
.num_standard = ARRAY_SIZE(mesh_wlan_handler),
.standard = (iw_handler *) mesh_wlan_handler,
.get_wireless_stats = lbs_get_wireless_stats,
};
+#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 26a1abd5bb03..ba3eb0101d55 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -318,14 +318,14 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
}
static int lbtf_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
if (priv->vif != NULL)
return -EOPNOTSUPP;
- priv->vif = conf->vif;
- switch (conf->type) {
+ priv->vif = vif;
+ switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
lbtf_set_mode(priv, LBTF_AP_MODE);
@@ -337,12 +337,12 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
priv->vif = NULL;
return -EOPNOTSUPP;
}
- lbtf_set_mac_address(priv, (u8 *) conf->mac_addr);
+ lbtf_set_mac_address(priv, (u8 *) vif->addr);
return 0;
}
static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 88e41176e7fd..84df3fcf37b3 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -436,6 +436,38 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
}
+struct mac80211_hwsim_addr_match_data {
+ bool ret;
+ const u8 *addr;
+};
+
+static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_addr_match_data *md = data;
+ if (memcmp(mac, md->addr, ETH_ALEN) == 0)
+ md->ret = true;
+}
+
+
+static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
+ const u8 *addr)
+{
+ struct mac80211_hwsim_addr_match_data md;
+
+ if (memcmp(addr, data->hw->wiphy->perm_addr, ETH_ALEN) == 0)
+ return true;
+
+ md.ret = false;
+ md.addr = addr;
+ ieee80211_iterate_active_interfaces_atomic(data->hw,
+ mac80211_hwsim_addr_iter,
+ &md);
+
+ return md.ret;
+}
+
+
static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
@@ -488,8 +520,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
if (nskb == NULL)
continue;
- if (memcmp(hdr->addr1, data2->hw->wiphy->perm_addr,
- ETH_ALEN) == 0)
+ if (mac80211_hwsim_addr_match(data2, hdr->addr1))
ack = true;
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -553,24 +584,24 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
- wiphy_name(hw->wiphy), __func__, conf->type,
- conf->mac_addr);
- hwsim_set_magic(conf->vif);
+ wiphy_name(hw->wiphy), __func__, vif->type,
+ vif->addr);
+ hwsim_set_magic(vif);
return 0;
}
static void mac80211_hwsim_remove_interface(
- struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf)
+ struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
- wiphy_name(hw->wiphy), __func__, conf->type,
- conf->mac_addr);
- hwsim_check_magic(conf->vif);
- hwsim_clear_magic(conf->vif);
+ wiphy_name(hw->wiphy), __func__, vif->type,
+ vif->addr);
+ hwsim_check_magic(vif);
+ hwsim_clear_magic(vif);
}
@@ -618,12 +649,26 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
{
struct mac80211_hwsim_data *data = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
-
- printk(KERN_DEBUG "%s:%s (freq=%d idle=%d ps=%d)\n",
+ static const char *chantypes[4] = {
+ [NL80211_CHAN_NO_HT] = "noht",
+ [NL80211_CHAN_HT20] = "ht20",
+ [NL80211_CHAN_HT40MINUS] = "ht40-",
+ [NL80211_CHAN_HT40PLUS] = "ht40+",
+ };
+ static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
+ [IEEE80211_SMPS_AUTOMATIC] = "auto",
+ [IEEE80211_SMPS_OFF] = "off",
+ [IEEE80211_SMPS_STATIC] = "static",
+ [IEEE80211_SMPS_DYNAMIC] = "dynamic",
+ };
+
+ printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
wiphy_name(hw->wiphy), __func__,
conf->channel->center_freq,
+ chantypes[conf->channel_type],
!!(conf->flags & IEEE80211_CONF_IDLE),
- !!(conf->flags & IEEE80211_CONF_PS));
+ !!(conf->flags & IEEE80211_CONF_PS),
+ smps_modes[conf->smps_mode]);
data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
@@ -827,6 +872,41 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
}
#endif
+static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
+{
+ /*
+ * In this special case, there's nothing we need to
+ * do because hwsim does transmission synchronously.
+ * In the future, when it does transmissions via
+ * userspace, we may need to do something.
+ */
+}
+
+
static const struct ieee80211_ops mac80211_hwsim_ops =
{
.tx = mac80211_hwsim_tx,
@@ -841,6 +921,8 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
.set_tim = mac80211_hwsim_set_tim,
.conf_tx = mac80211_hwsim_conf_tx,
CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
+ .ampdu_action = mac80211_hwsim_ampdu_action,
+ .flush = mac80211_hwsim_flush,
};
@@ -1082,7 +1164,9 @@ static int __init init_mac80211_hwsim(void)
BIT(NL80211_IFTYPE_MESH_POINT);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_SIGNAL_DBM;
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 59d49159cf2a..68546ca0ba37 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2,7 +2,7 @@
* drivers/net/wireless/mwl8k.c
* Driver for Marvell TOPDOG 802.11 Wireless cards
*
- * Copyright (C) 2008-2009 Marvell Semiconductor Inc.
+ * Copyright (C) 2008, 2009, 2010 Marvell Semiconductor Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -26,7 +26,7 @@
#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
#define MWL8K_NAME KBUILD_MODNAME
-#define MWL8K_VERSION "0.10"
+#define MWL8K_VERSION "0.12"
/* Register definitions */
#define MWL8K_HIU_GEN_PTR 0x00000c10
@@ -92,8 +92,7 @@ struct mwl8k_device_info {
char *part_name;
char *helper_image;
char *fw_image;
- struct rxd_ops *rxd_ops;
- u16 modes;
+ struct rxd_ops *ap_rxd_ops;
};
struct mwl8k_rx_queue {
@@ -126,28 +125,30 @@ struct mwl8k_tx_queue {
struct sk_buff **skb;
};
-/* Pointers to the firmware data and meta information about it. */
-struct mwl8k_firmware {
- /* Boot helper code */
- struct firmware *helper;
+struct mwl8k_priv {
+ struct ieee80211_hw *hw;
+ struct pci_dev *pdev;
- /* Microcode */
- struct firmware *ucode;
-};
+ struct mwl8k_device_info *device_info;
-struct mwl8k_priv {
void __iomem *sram;
void __iomem *regs;
- struct ieee80211_hw *hw;
- struct pci_dev *pdev;
+ /* firmware */
+ struct firmware *fw_helper;
+ struct firmware *fw_ucode;
- struct mwl8k_device_info *device_info;
+ /* hardware/firmware parameters */
bool ap_fw;
struct rxd_ops *rxd_ops;
-
- /* firmware files and meta data */
- struct mwl8k_firmware fw;
+ struct ieee80211_supported_band band_24;
+ struct ieee80211_channel channels_24[14];
+ struct ieee80211_rate rates_24[14];
+ struct ieee80211_supported_band band_50;
+ struct ieee80211_channel channels_50[4];
+ struct ieee80211_rate rates_50[9];
+ u32 ap_macids_supported;
+ u32 sta_macids_supported;
/* firmware access */
struct mutex fw_mutex;
@@ -161,9 +162,9 @@ struct mwl8k_priv {
/* TX quiesce completion, protected by fw_mutex and tx_lock */
struct completion *tx_wait;
- struct ieee80211_vif *vif;
-
- struct ieee80211_channel *current_channel;
+ /* List of interfaces. */
+ u32 macids_used;
+ struct list_head vif_list;
/* power management status cookie from firmware */
u32 *cookie;
@@ -182,16 +183,15 @@ struct mwl8k_priv {
struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES];
- /* PHY parameters */
- struct ieee80211_supported_band band;
- struct ieee80211_channel channels[14];
- struct ieee80211_rate rates[14];
-
bool radio_on;
bool radio_short_preamble;
bool sniffer_enabled;
bool wmm_enabled;
+ struct work_struct sta_notify_worker;
+ spinlock_t sta_notify_list_lock;
+ struct list_head sta_notify_list;
+
/* XXX need to convert this to handle multiple interfaces */
bool capture_beacon;
u8 capture_bssid[ETH_ALEN];
@@ -205,32 +205,33 @@ struct mwl8k_priv {
*/
struct work_struct finalize_join_worker;
- /* Tasklet to reclaim TX descriptors and buffers after tx */
- struct tasklet_struct tx_reclaim_task;
+ /* Tasklet to perform TX reclaim. */
+ struct tasklet_struct poll_tx_task;
+
+ /* Tasklet to perform RX. */
+ struct tasklet_struct poll_rx_task;
};
/* Per interface specific private data */
struct mwl8k_vif {
- /* backpointer to parent config block */
- struct mwl8k_priv *priv;
-
- /* BSS config of AP or IBSS from mac80211*/
- struct ieee80211_bss_conf bss_info;
-
- /* BSSID of AP or IBSS */
- u8 bssid[ETH_ALEN];
- u8 mac_addr[ETH_ALEN];
+ struct list_head list;
+ struct ieee80211_vif *vif;
- /* Index into station database.Returned by update_sta_db call */
- u8 peer_id;
+ /* Firmware macid for this vif. */
+ int macid;
- /* Non AMPDU sequence number assigned by driver */
- u16 seqno;
+ /* Non AMPDU sequence number assigned by driver. */
+ u16 seqno;
};
-
#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
-static const struct ieee80211_channel mwl8k_channels[] = {
+struct mwl8k_sta {
+ /* Index into station database. Returned by UPDATE_STADB. */
+ u8 peer_id;
+};
+#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
+
+static const struct ieee80211_channel mwl8k_channels_24[] = {
{ .center_freq = 2412, .hw_value = 1, },
{ .center_freq = 2417, .hw_value = 2, },
{ .center_freq = 2422, .hw_value = 3, },
@@ -242,9 +243,12 @@ static const struct ieee80211_channel mwl8k_channels[] = {
{ .center_freq = 2452, .hw_value = 9, },
{ .center_freq = 2457, .hw_value = 10, },
{ .center_freq = 2462, .hw_value = 11, },
+ { .center_freq = 2467, .hw_value = 12, },
+ { .center_freq = 2472, .hw_value = 13, },
+ { .center_freq = 2484, .hw_value = 14, },
};
-static const struct ieee80211_rate mwl8k_rates[] = {
+static const struct ieee80211_rate mwl8k_rates_24[] = {
{ .bitrate = 10, .hw_value = 2, },
{ .bitrate = 20, .hw_value = 4, },
{ .bitrate = 55, .hw_value = 11, },
@@ -261,8 +265,23 @@ static const struct ieee80211_rate mwl8k_rates[] = {
{ .bitrate = 720, .hw_value = 144, },
};
-static const u8 mwl8k_rateids[12] = {
- 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108,
+static const struct ieee80211_channel mwl8k_channels_50[] = {
+ { .center_freq = 5180, .hw_value = 36, },
+ { .center_freq = 5200, .hw_value = 40, },
+ { .center_freq = 5220, .hw_value = 44, },
+ { .center_freq = 5240, .hw_value = 48, },
+};
+
+static const struct ieee80211_rate mwl8k_rates_50[] = {
+ { .bitrate = 60, .hw_value = 12, },
+ { .bitrate = 90, .hw_value = 18, },
+ { .bitrate = 120, .hw_value = 24, },
+ { .bitrate = 180, .hw_value = 36, },
+ { .bitrate = 240, .hw_value = 48, },
+ { .bitrate = 360, .hw_value = 72, },
+ { .bitrate = 480, .hw_value = 96, },
+ { .bitrate = 540, .hw_value = 108, },
+ { .bitrate = 720, .hw_value = 144, },
};
/* Set or get info from Firmware */
@@ -278,6 +297,7 @@ static const u8 mwl8k_rateids[12] = {
#define MWL8K_CMD_RADIO_CONTROL 0x001c
#define MWL8K_CMD_RF_TX_POWER 0x001e
#define MWL8K_CMD_RF_ANTENNA 0x0020
+#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
#define MWL8K_CMD_SET_PRE_SCAN 0x0107
#define MWL8K_CMD_SET_POST_SCAN 0x0108
#define MWL8K_CMD_SET_RF_CHANNEL 0x010a
@@ -291,8 +311,10 @@ static const u8 mwl8k_rateids[12] = {
#define MWL8K_CMD_MIMO_CONFIG 0x0125
#define MWL8K_CMD_USE_FIXED_RATE 0x0126
#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
-#define MWL8K_CMD_SET_MAC_ADDR 0x0202
+#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
+#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
+#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
#define MWL8K_CMD_UPDATE_STADB 0x1123
static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
@@ -310,6 +332,7 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(RADIO_CONTROL);
MWL8K_CMDNAME(RF_TX_POWER);
MWL8K_CMDNAME(RF_ANTENNA);
+ MWL8K_CMDNAME(SET_BEACON);
MWL8K_CMDNAME(SET_PRE_SCAN);
MWL8K_CMDNAME(SET_POST_SCAN);
MWL8K_CMDNAME(SET_RF_CHANNEL);
@@ -325,6 +348,8 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(ENABLE_SNIFFER);
MWL8K_CMDNAME(SET_MAC_ADDR);
MWL8K_CMDNAME(SET_RATEADAPT_MODE);
+ MWL8K_CMDNAME(BSS_START);
+ MWL8K_CMDNAME(SET_NEW_STN);
MWL8K_CMDNAME(UPDATE_STADB);
default:
snprintf(buf, bufsize, "0x%x", cmd);
@@ -355,8 +380,8 @@ static void mwl8k_release_fw(struct firmware **fw)
static void mwl8k_release_firmware(struct mwl8k_priv *priv)
{
- mwl8k_release_fw(&priv->fw.ucode);
- mwl8k_release_fw(&priv->fw.helper);
+ mwl8k_release_fw(&priv->fw_ucode);
+ mwl8k_release_fw(&priv->fw_helper);
}
/* Request fw image */
@@ -377,7 +402,7 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
int rc;
if (di->helper_image != NULL) {
- rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw.helper);
+ rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper);
if (rc) {
printk(KERN_ERR "%s: Error requesting helper "
"firmware file %s\n", pci_name(priv->pdev),
@@ -386,24 +411,22 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
}
}
- rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw.ucode);
+ rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw_ucode);
if (rc) {
printk(KERN_ERR "%s: Error requesting firmware file %s\n",
pci_name(priv->pdev), di->fw_image);
- mwl8k_release_fw(&priv->fw.helper);
+ mwl8k_release_fw(&priv->fw_helper);
return rc;
}
return 0;
}
-MODULE_FIRMWARE("mwl8k/helper_8687.fw");
-MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
-
struct mwl8k_cmd_pkt {
__le16 code;
__le16 length;
- __le16 seq_num;
+ __u8 seq_num;
+ __u8 macid;
__le16 result;
char payload[0];
} __attribute__((packed));
@@ -461,6 +484,7 @@ static int mwl8k_load_fw_image(struct mwl8k_priv *priv,
cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD);
cmd->seq_num = 0;
+ cmd->macid = 0;
cmd->result = 0;
done = 0;
@@ -551,13 +575,12 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
static int mwl8k_load_firmware(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
- struct firmware *fw = priv->fw.ucode;
- struct mwl8k_device_info *di = priv->device_info;
+ struct firmware *fw = priv->fw_ucode;
int rc;
int loops;
if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
- struct firmware *helper = priv->fw.helper;
+ struct firmware *helper = priv->fw_helper;
if (helper == NULL) {
printk(KERN_ERR "%s: helper image needed but none "
@@ -584,10 +607,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
return rc;
}
- if (di->modes & BIT(NL80211_IFTYPE_AP))
- iowrite32(MWL8K_MODE_AP, priv->regs + MWL8K_HIU_GEN_PTR);
- else
- iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
+ iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
loops = 500000;
do {
@@ -610,91 +630,6 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
}
-/*
- * Defines shared between transmission and reception.
- */
-/* HT control fields for firmware */
-struct ewc_ht_info {
- __le16 control1;
- __le16 control2;
- __le16 control3;
-} __attribute__((packed));
-
-/* Firmware Station database operations */
-#define MWL8K_STA_DB_ADD_ENTRY 0
-#define MWL8K_STA_DB_MODIFY_ENTRY 1
-#define MWL8K_STA_DB_DEL_ENTRY 2
-#define MWL8K_STA_DB_FLUSH 3
-
-/* Peer Entry flags - used to define the type of the peer node */
-#define MWL8K_PEER_TYPE_ACCESSPOINT 2
-
-struct peer_capability_info {
- /* Peer type - AP vs. STA. */
- __u8 peer_type;
-
- /* Basic 802.11 capabilities from assoc resp. */
- __le16 basic_caps;
-
- /* Set if peer supports 802.11n high throughput (HT). */
- __u8 ht_support;
-
- /* Valid if HT is supported. */
- __le16 ht_caps;
- __u8 extended_ht_caps;
- struct ewc_ht_info ewc_info;
-
- /* Legacy rate table. Intersection of our rates and peer rates. */
- __u8 legacy_rates[12];
-
- /* HT rate table. Intersection of our rates and peer rates. */
- __u8 ht_rates[16];
- __u8 pad[16];
-
- /* If set, interoperability mode, no proprietary extensions. */
- __u8 interop;
- __u8 pad2;
- __u8 station_id;
- __le16 amsdu_enabled;
-} __attribute__((packed));
-
-/* Inline functions to manipulate QoS field in data descriptor. */
-static inline u16 mwl8k_qos_setbit_eosp(u16 qos)
-{
- u16 val_mask = 1 << 4;
-
- /* End of Service Period Bit 4 */
- return qos | val_mask;
-}
-
-static inline u16 mwl8k_qos_setbit_ack(u16 qos, u8 ack_policy)
-{
- u16 val_mask = 0x3;
- u8 shift = 5;
- u16 qos_mask = ~(val_mask << shift);
-
- /* Ack Policy Bit 5-6 */
- return (qos & qos_mask) | ((ack_policy & val_mask) << shift);
-}
-
-static inline u16 mwl8k_qos_setbit_amsdu(u16 qos)
-{
- u16 val_mask = 1 << 7;
-
- /* AMSDU present Bit 7 */
- return qos | val_mask;
-}
-
-static inline u16 mwl8k_qos_setbit_qlen(u16 qos, u8 len)
-{
- u16 val_mask = 0xff;
- u8 shift = 8;
- u16 qos_mask = ~(val_mask << shift);
-
- /* Queue Length Bits 8-15 */
- return (qos & qos_mask) | ((len & val_mask) << shift);
-}
-
/* DMA header used by firmware and hardware. */
struct mwl8k_dma_data {
__le16 fwlen;
@@ -761,9 +696,9 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
/*
- * Packet reception for 88w8366.
+ * Packet reception for 88w8366 AP firmware.
*/
-struct mwl8k_rxd_8366 {
+struct mwl8k_rxd_8366_ap {
__le16 pkt_len;
__u8 sq2;
__u8 rate;
@@ -781,23 +716,23 @@ struct mwl8k_rxd_8366 {
__u8 rx_ctrl;
} __attribute__((packed));
-#define MWL8K_8366_RATE_INFO_MCS_FORMAT 0x80
-#define MWL8K_8366_RATE_INFO_40MHZ 0x40
-#define MWL8K_8366_RATE_INFO_RATEID(x) ((x) & 0x3f)
+#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
+#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
+#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
-#define MWL8K_8366_RX_CTRL_OWNED_BY_HOST 0x80
+#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
-static void mwl8k_rxd_8366_init(void *_rxd, dma_addr_t next_dma_addr)
+static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
- rxd->rx_ctrl = MWL8K_8366_RX_CTRL_OWNED_BY_HOST;
+ rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST;
}
-static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
+static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
rxd->pkt_len = cpu_to_le16(len);
rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -806,12 +741,12 @@ static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
}
static int
-mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
- __le16 *qos)
+mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
+ __le16 *qos)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
- if (!(rxd->rx_ctrl & MWL8K_8366_RX_CTRL_OWNED_BY_HOST))
+ if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST))
return -1;
rmb();
@@ -820,23 +755,29 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
status->signal = -rxd->rssi;
status->noise = -rxd->noise_floor;
- if (rxd->rate & MWL8K_8366_RATE_INFO_MCS_FORMAT) {
+ if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
status->flag |= RX_FLAG_HT;
- if (rxd->rate & MWL8K_8366_RATE_INFO_40MHZ)
+ if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ)
status->flag |= RX_FLAG_40MHZ;
- status->rate_idx = MWL8K_8366_RATE_INFO_RATEID(rxd->rate);
+ status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate);
} else {
int i;
- for (i = 0; i < ARRAY_SIZE(mwl8k_rates); i++) {
- if (mwl8k_rates[i].hw_value == rxd->rate) {
+ for (i = 0; i < ARRAY_SIZE(mwl8k_rates_24); i++) {
+ if (mwl8k_rates_24[i].hw_value == rxd->rate) {
status->rate_idx = i;
break;
}
}
}
- status->band = IEEE80211_BAND_2GHZ;
+ if (rxd->channel > 14) {
+ status->band = IEEE80211_BAND_5GHZ;
+ if (!(status->flag & RX_FLAG_HT))
+ status->rate_idx -= 5;
+ } else {
+ status->band = IEEE80211_BAND_2GHZ;
+ }
status->freq = ieee80211_channel_to_frequency(rxd->channel);
*qos = rxd->qos_control;
@@ -844,17 +785,17 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
return le16_to_cpu(rxd->pkt_len);
}
-static struct rxd_ops rxd_8366_ops = {
- .rxd_size = sizeof(struct mwl8k_rxd_8366),
- .rxd_init = mwl8k_rxd_8366_init,
- .rxd_refill = mwl8k_rxd_8366_refill,
- .rxd_process = mwl8k_rxd_8366_process,
+static struct rxd_ops rxd_8366_ap_ops = {
+ .rxd_size = sizeof(struct mwl8k_rxd_8366_ap),
+ .rxd_init = mwl8k_rxd_8366_ap_init,
+ .rxd_refill = mwl8k_rxd_8366_ap_refill,
+ .rxd_process = mwl8k_rxd_8366_ap_process,
};
/*
- * Packet reception for 88w8687.
+ * Packet reception for STA firmware.
*/
-struct mwl8k_rxd_8687 {
+struct mwl8k_rxd_sta {
__le16 pkt_len;
__u8 link_quality;
__u8 noise_level;
@@ -871,26 +812,26 @@ struct mwl8k_rxd_8687 {
__u8 pad2[2];
} __attribute__((packed));
-#define MWL8K_8687_RATE_INFO_SHORTPRE 0x8000
-#define MWL8K_8687_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
-#define MWL8K_8687_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
-#define MWL8K_8687_RATE_INFO_40MHZ 0x0004
-#define MWL8K_8687_RATE_INFO_SHORTGI 0x0002
-#define MWL8K_8687_RATE_INFO_MCS_FORMAT 0x0001
+#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000
+#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
+#define MWL8K_STA_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
+#define MWL8K_STA_RATE_INFO_40MHZ 0x0004
+#define MWL8K_STA_RATE_INFO_SHORTGI 0x0002
+#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
-#define MWL8K_8687_RX_CTRL_OWNED_BY_HOST 0x02
+#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
-static void mwl8k_rxd_8687_init(void *_rxd, dma_addr_t next_dma_addr)
+static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
- rxd->rx_ctrl = MWL8K_8687_RX_CTRL_OWNED_BY_HOST;
+ rxd->rx_ctrl = MWL8K_STA_RX_CTRL_OWNED_BY_HOST;
}
-static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
+static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
rxd->pkt_len = cpu_to_le16(len);
rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -899,13 +840,13 @@ static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
}
static int
-mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
+mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
__le16 *qos)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
u16 rate_info;
- if (!(rxd->rx_ctrl & MWL8K_8687_RX_CTRL_OWNED_BY_HOST))
+ if (!(rxd->rx_ctrl & MWL8K_STA_RX_CTRL_OWNED_BY_HOST))
return -1;
rmb();
@@ -915,19 +856,25 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
status->signal = -rxd->rssi;
status->noise = -rxd->noise_level;
- status->antenna = MWL8K_8687_RATE_INFO_ANTSELECT(rate_info);
- status->rate_idx = MWL8K_8687_RATE_INFO_RATEID(rate_info);
+ status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
+ status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
- if (rate_info & MWL8K_8687_RATE_INFO_SHORTPRE)
+ if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE)
status->flag |= RX_FLAG_SHORTPRE;
- if (rate_info & MWL8K_8687_RATE_INFO_40MHZ)
+ if (rate_info & MWL8K_STA_RATE_INFO_40MHZ)
status->flag |= RX_FLAG_40MHZ;
- if (rate_info & MWL8K_8687_RATE_INFO_SHORTGI)
+ if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI)
status->flag |= RX_FLAG_SHORT_GI;
- if (rate_info & MWL8K_8687_RATE_INFO_MCS_FORMAT)
+ if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT)
status->flag |= RX_FLAG_HT;
- status->band = IEEE80211_BAND_2GHZ;
+ if (rxd->channel > 14) {
+ status->band = IEEE80211_BAND_5GHZ;
+ if (!(status->flag & RX_FLAG_HT))
+ status->rate_idx -= 5;
+ } else {
+ status->band = IEEE80211_BAND_2GHZ;
+ }
status->freq = ieee80211_channel_to_frequency(rxd->channel);
*qos = rxd->qos_control;
@@ -935,11 +882,11 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
return le16_to_cpu(rxd->pkt_len);
}
-static struct rxd_ops rxd_8687_ops = {
- .rxd_size = sizeof(struct mwl8k_rxd_8687),
- .rxd_init = mwl8k_rxd_8687_init,
- .rxd_refill = mwl8k_rxd_8687_refill,
- .rxd_process = mwl8k_rxd_8687_process,
+static struct rxd_ops rxd_sta_ops = {
+ .rxd_size = sizeof(struct mwl8k_rxd_sta),
+ .rxd_init = mwl8k_rxd_sta_init,
+ .rxd_refill = mwl8k_rxd_sta_refill,
+ .rxd_process = mwl8k_rxd_sta_process,
};
@@ -1153,16 +1100,18 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
* Packet transmission.
*/
-/* Transmit packet ACK policy */
-#define MWL8K_TXD_ACK_POLICY_NORMAL 0
-#define MWL8K_TXD_ACK_POLICY_BLOCKACK 3
-
#define MWL8K_TXD_STATUS_OK 0x00000001
#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008
#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000
+#define MWL8K_QOS_QLEN_UNSPEC 0xff00
+#define MWL8K_QOS_ACK_POLICY_MASK 0x0060
+#define MWL8K_QOS_ACK_POLICY_NORMAL 0x0000
+#define MWL8K_QOS_ACK_POLICY_BLOCKACK 0x0060
+#define MWL8K_QOS_EOSP 0x0010
+
struct mwl8k_tx_desc {
__le32 status;
__u8 data_rate;
@@ -1272,7 +1221,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
/*
* Must be called with priv->fw_mutex held and tx queues stopped.
*/
-#define MWL8K_TX_WAIT_TIMEOUT_MS 1000
+#define MWL8K_TX_WAIT_TIMEOUT_MS 5000
static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
{
@@ -1316,8 +1265,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
}
if (priv->pending_tx_pkts < oldcount) {
- printk(KERN_NOTICE "%s: timeout waiting for tx "
- "rings to drain (%d -> %d pkts), retrying\n",
+ printk(KERN_NOTICE "%s: waiting for tx rings "
+ "to drain (%d -> %d pkts)\n",
wiphy_name(hw->wiphy), oldcount,
priv->pending_tx_pkts);
retry = 1;
@@ -1342,13 +1291,15 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
MWL8K_TXD_STATUS_OK_RETRY | \
MWL8K_TXD_STATUS_OK_MORE_RETRY))
-static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
+static int
+mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
- int wake = 0;
+ int processed;
- while (txq->stats.len > 0) {
+ processed = 0;
+ while (txq->stats.len > 0 && limit--) {
int tx;
struct mwl8k_tx_desc *tx_desc;
unsigned long addr;
@@ -1395,11 +1346,13 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
ieee80211_tx_status_irqsafe(hw, skb);
- wake = 1;
+ processed++;
}
- if (wake && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
+ if (processed && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
ieee80211_wake_queue(hw, index);
+
+ return processed;
}
/* must be called only when the card's transmit is completely halted */
@@ -1408,7 +1361,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
- mwl8k_txq_reclaim(hw, index, 1);
+ mwl8k_txq_reclaim(hw, index, INT_MAX, 1);
kfree(txq->skb);
txq->skb = NULL;
@@ -1446,11 +1399,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- u16 seqno = mwl8k_vif->seqno;
-
wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- wh->seq_ctrl |= cpu_to_le16(seqno << 4);
- mwl8k_vif->seqno = seqno++ % 4096;
+ wh->seq_ctrl |= cpu_to_le16(mwl8k_vif->seqno);
+ mwl8k_vif->seqno += 0x10;
}
/* Setup firmware control bit fields for each frame type. */
@@ -1459,24 +1410,17 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
if (ieee80211_is_mgmt(wh->frame_control) ||
ieee80211_is_ctl(wh->frame_control)) {
txdatarate = 0;
- qos = mwl8k_qos_setbit_eosp(qos);
- /* Set Queue size to unspecified */
- qos = mwl8k_qos_setbit_qlen(qos, 0xff);
+ qos |= MWL8K_QOS_QLEN_UNSPEC | MWL8K_QOS_EOSP;
} else if (ieee80211_is_data(wh->frame_control)) {
txdatarate = 1;
if (is_multicast_ether_addr(wh->addr1))
txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX;
- /* Send pkt in an aggregate if AMPDU frame. */
+ qos &= ~MWL8K_QOS_ACK_POLICY_MASK;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
- qos = mwl8k_qos_setbit_ack(qos,
- MWL8K_TXD_ACK_POLICY_BLOCKACK);
+ qos |= MWL8K_QOS_ACK_POLICY_BLOCKACK;
else
- qos = mwl8k_qos_setbit_ack(qos,
- MWL8K_TXD_ACK_POLICY_NORMAL);
-
- if (qos & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
- qos = mwl8k_qos_setbit_amsdu(qos);
+ qos |= MWL8K_QOS_ACK_POLICY_NORMAL;
}
dma = pci_map_single(priv->pdev, skb->data,
@@ -1503,7 +1447,10 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
tx->pkt_phys_addr = cpu_to_le32(dma);
tx->pkt_len = cpu_to_le16(skb->len);
tx->rate_info = 0;
- tx->peer_id = mwl8k_vif->peer_id;
+ if (!priv->ap_fw && tx_info->control.sta != NULL)
+ tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
+ else
+ tx->peer_id = 0;
wmb();
tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
@@ -1656,6 +1603,56 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
return rc;
}
+static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct mwl8k_cmd_pkt *cmd)
+{
+ if (vif != NULL)
+ cmd->macid = MWL8K_VIF(vif)->macid;
+ return mwl8k_post_cmd(hw, cmd);
+}
+
+/*
+ * Setup code shared between STA and AP firmware images.
+ */
+static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ BUILD_BUG_ON(sizeof(priv->channels_24) != sizeof(mwl8k_channels_24));
+ memcpy(priv->channels_24, mwl8k_channels_24, sizeof(mwl8k_channels_24));
+
+ BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
+ memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
+
+ priv->band_24.band = IEEE80211_BAND_2GHZ;
+ priv->band_24.channels = priv->channels_24;
+ priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
+ priv->band_24.bitrates = priv->rates_24;
+ priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
+
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
+}
+
+static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ BUILD_BUG_ON(sizeof(priv->channels_50) != sizeof(mwl8k_channels_50));
+ memcpy(priv->channels_50, mwl8k_channels_50, sizeof(mwl8k_channels_50));
+
+ BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
+ memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
+
+ priv->band_50.band = IEEE80211_BAND_5GHZ;
+ priv->band_50.channels = priv->channels_50;
+ priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
+ priv->band_50.bitrates = priv->rates_50;
+ priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
+
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
+}
+
/*
* CMD_GET_HW_SPEC (STA version).
*/
@@ -1678,6 +1675,89 @@ struct mwl8k_cmd_get_hw_spec_sta {
__le32 total_rxd;
} __attribute__((packed));
+#define MWL8K_CAP_MAX_AMSDU 0x20000000
+#define MWL8K_CAP_GREENFIELD 0x08000000
+#define MWL8K_CAP_AMPDU 0x04000000
+#define MWL8K_CAP_RX_STBC 0x01000000
+#define MWL8K_CAP_TX_STBC 0x00800000
+#define MWL8K_CAP_SHORTGI_40MHZ 0x00400000
+#define MWL8K_CAP_SHORTGI_20MHZ 0x00200000
+#define MWL8K_CAP_RX_ANTENNA_MASK 0x000e0000
+#define MWL8K_CAP_TX_ANTENNA_MASK 0x0001c000
+#define MWL8K_CAP_DELAY_BA 0x00003000
+#define MWL8K_CAP_MIMO 0x00000200
+#define MWL8K_CAP_40MHZ 0x00000100
+#define MWL8K_CAP_BAND_MASK 0x00000007
+#define MWL8K_CAP_5GHZ 0x00000004
+#define MWL8K_CAP_2GHZ4 0x00000001
+
+static void
+mwl8k_set_ht_caps(struct ieee80211_hw *hw,
+ struct ieee80211_supported_band *band, u32 cap)
+{
+ int rx_streams;
+ int tx_streams;
+
+ band->ht_cap.ht_supported = 1;
+
+ if (cap & MWL8K_CAP_MAX_AMSDU)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+ if (cap & MWL8K_CAP_GREENFIELD)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
+ if (cap & MWL8K_CAP_AMPDU) {
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ }
+ if (cap & MWL8K_CAP_RX_STBC)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_RX_STBC;
+ if (cap & MWL8K_CAP_TX_STBC)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+ if (cap & MWL8K_CAP_SHORTGI_40MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ if (cap & MWL8K_CAP_SHORTGI_20MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+ if (cap & MWL8K_CAP_DELAY_BA)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_DELAY_BA;
+ if (cap & MWL8K_CAP_40MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ rx_streams = hweight32(cap & MWL8K_CAP_RX_ANTENNA_MASK);
+ tx_streams = hweight32(cap & MWL8K_CAP_TX_ANTENNA_MASK);
+
+ band->ht_cap.mcs.rx_mask[0] = 0xff;
+ if (rx_streams >= 2)
+ band->ht_cap.mcs.rx_mask[1] = 0xff;
+ if (rx_streams >= 3)
+ band->ht_cap.mcs.rx_mask[2] = 0xff;
+ band->ht_cap.mcs.rx_mask[4] = 0x01;
+ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ if (rx_streams != tx_streams) {
+ band->ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ band->ht_cap.mcs.tx_params |= (tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ }
+}
+
+static void
+mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
+ mwl8k_setup_2ghz_band(hw);
+ if (caps & MWL8K_CAP_MIMO)
+ mwl8k_set_ht_caps(hw, &priv->band_24, caps);
+ }
+
+ if (caps & MWL8K_CAP_5GHZ) {
+ mwl8k_setup_5ghz_band(hw);
+ if (caps & MWL8K_CAP_MIMO)
+ mwl8k_set_ht_caps(hw, &priv->band_50, caps);
+ }
+}
+
static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
@@ -1708,6 +1788,9 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
+ mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
+ priv->ap_macids_supported = 0x00000000;
+ priv->sta_macids_supported = 0x00000001;
}
kfree(cmd);
@@ -1761,6 +1844,9 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
+ mwl8k_setup_2ghz_band(hw);
+ priv->ap_macids_supported = 0x000000ff;
+ priv->sta_macids_supported = 0x00000000;
off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off);
@@ -1806,7 +1892,9 @@ struct mwl8k_cmd_set_hw_spec {
__le32 total_rxd;
} __attribute__((packed));
-#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
+#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
+#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
+#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
{
@@ -1827,7 +1915,9 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
- cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT);
+ cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
+ MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
+ MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
@@ -1897,9 +1987,9 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
}
/*
- * CMD_802_11_GET_STAT.
+ * CMD_GET_STAT.
*/
-struct mwl8k_cmd_802_11_get_stat {
+struct mwl8k_cmd_get_stat {
struct mwl8k_cmd_pkt header;
__le32 stats[64];
} __attribute__((packed));
@@ -1909,10 +1999,10 @@ struct mwl8k_cmd_802_11_get_stat {
#define MWL8K_STAT_FCS_ERROR 24
#define MWL8K_STAT_RTS_SUCCESS 11
-static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
+static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
{
- struct mwl8k_cmd_802_11_get_stat *cmd;
+ struct mwl8k_cmd_get_stat *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1939,9 +2029,9 @@ static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
}
/*
- * CMD_802_11_RADIO_CONTROL.
+ * CMD_RADIO_CONTROL.
*/
-struct mwl8k_cmd_802_11_radio_control {
+struct mwl8k_cmd_radio_control {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 control;
@@ -1949,10 +2039,10 @@ struct mwl8k_cmd_802_11_radio_control {
} __attribute__((packed));
static int
-mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
+mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
{
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_802_11_radio_control *cmd;
+ struct mwl8k_cmd_radio_control *cmd;
int rc;
if (enable == priv->radio_on && !force)
@@ -1977,36 +2067,32 @@ mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
return rc;
}
-static int mwl8k_cmd_802_11_radio_disable(struct ieee80211_hw *hw)
+static int mwl8k_cmd_radio_disable(struct ieee80211_hw *hw)
{
- return mwl8k_cmd_802_11_radio_control(hw, 0, 0);
+ return mwl8k_cmd_radio_control(hw, 0, 0);
}
-static int mwl8k_cmd_802_11_radio_enable(struct ieee80211_hw *hw)
+static int mwl8k_cmd_radio_enable(struct ieee80211_hw *hw)
{
- return mwl8k_cmd_802_11_radio_control(hw, 1, 0);
+ return mwl8k_cmd_radio_control(hw, 1, 0);
}
static int
mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
{
- struct mwl8k_priv *priv;
-
- if (hw == NULL || hw->priv == NULL)
- return -EINVAL;
- priv = hw->priv;
+ struct mwl8k_priv *priv = hw->priv;
priv->radio_short_preamble = short_preamble;
- return mwl8k_cmd_802_11_radio_control(hw, 1, 1);
+ return mwl8k_cmd_radio_control(hw, 1, 1);
}
/*
- * CMD_802_11_RF_TX_POWER.
+ * CMD_RF_TX_POWER.
*/
#define MWL8K_TX_POWER_LEVEL_TOTAL 8
-struct mwl8k_cmd_802_11_rf_tx_power {
+struct mwl8k_cmd_rf_tx_power {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 support_level;
@@ -2015,9 +2101,9 @@ struct mwl8k_cmd_802_11_rf_tx_power {
__le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
} __attribute__((packed));
-static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm)
+static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
{
- struct mwl8k_cmd_802_11_rf_tx_power *cmd;
+ struct mwl8k_cmd_rf_tx_power *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2069,6 +2155,36 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
}
/*
+ * CMD_SET_BEACON.
+ */
+struct mwl8k_cmd_set_beacon {
+ struct mwl8k_cmd_pkt header;
+ __le16 beacon_len;
+ __u8 beacon[0];
+};
+
+static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *beacon, int len)
+{
+ struct mwl8k_cmd_set_beacon *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd) + len);
+ cmd->beacon_len = cpu_to_le16(len);
+ memcpy(cmd->beacon, beacon, len);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
* CMD_SET_PRE_SCAN.
*/
struct mwl8k_cmd_set_pre_scan {
@@ -2103,7 +2219,7 @@ struct mwl8k_cmd_set_post_scan {
} __attribute__((packed));
static int
-mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, __u8 *mac)
+mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
{
struct mwl8k_cmd_set_post_scan *cmd;
int rc;
@@ -2134,8 +2250,9 @@ struct mwl8k_cmd_set_rf_channel {
} __attribute__((packed));
static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
- struct ieee80211_channel *channel)
+ struct ieee80211_conf *conf)
{
+ struct ieee80211_channel *channel = conf->channel;
struct mwl8k_cmd_set_rf_channel *cmd;
int rc;
@@ -2147,10 +2264,19 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->current_channel = channel->hw_value;
+
if (channel->band == IEEE80211_BAND_2GHZ)
- cmd->channel_flags = cpu_to_le32(0x00000081);
- else
- cmd->channel_flags = cpu_to_le32(0x00000000);
+ cmd->channel_flags |= cpu_to_le32(0x00000001);
+ else if (channel->band == IEEE80211_BAND_5GHZ)
+ cmd->channel_flags |= cpu_to_le32(0x00000004);
+
+ if (conf->channel_type == NL80211_CHAN_NO_HT ||
+ conf->channel_type == NL80211_CHAN_HT20)
+ cmd->channel_flags |= cpu_to_le32(0x00000080);
+ else if (conf->channel_type == NL80211_CHAN_HT40MINUS)
+ cmd->channel_flags |= cpu_to_le32(0x000001900);
+ else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
+ cmd->channel_flags |= cpu_to_le32(0x000000900);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2159,85 +2285,75 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
}
/*
- * CMD_SET_SLOT.
+ * CMD_SET_AID.
*/
-struct mwl8k_cmd_set_slot {
- struct mwl8k_cmd_pkt header;
- __le16 action;
- __u8 short_slot;
-} __attribute__((packed));
-
-static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
-{
- struct mwl8k_cmd_set_slot *cmd;
- int rc;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
-
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
- cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(MWL8K_CMD_SET);
- cmd->short_slot = short_slot_time;
-
- rc = mwl8k_post_cmd(hw, &cmd->header);
- kfree(cmd);
+#define MWL8K_FRAME_PROT_DISABLED 0x00
+#define MWL8K_FRAME_PROT_11G 0x07
+#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
+#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
- return rc;
-}
+struct mwl8k_cmd_update_set_aid {
+ struct mwl8k_cmd_pkt header;
+ __le16 aid;
-/*
- * CMD_MIMO_CONFIG.
- */
-struct mwl8k_cmd_mimo_config {
- struct mwl8k_cmd_pkt header;
- __le32 action;
- __u8 rx_antenna_map;
- __u8 tx_antenna_map;
+ /* AP's MAC address (BSSID) */
+ __u8 bssid[ETH_ALEN];
+ __le16 protection_mode;
+ __u8 supp_rates[14];
} __attribute__((packed));
-static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
+static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
{
- struct mwl8k_cmd_mimo_config *cmd;
- int rc;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
-
- cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
- cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
- cmd->rx_antenna_map = rx;
- cmd->tx_antenna_map = tx;
+ int i;
+ int j;
- rc = mwl8k_post_cmd(hw, &cmd->header);
- kfree(cmd);
+ /*
+ * Clear nonstandard rates 4 and 13.
+ */
+ mask &= 0x1fef;
- return rc;
+ for (i = 0, j = 0; i < 14; i++) {
+ if (mask & (1 << i))
+ rates[j++] = mwl8k_rates_24[i].hw_value;
+ }
}
-/*
- * CMD_ENABLE_SNIFFER.
- */
-struct mwl8k_cmd_enable_sniffer {
- struct mwl8k_cmd_pkt header;
- __le32 action;
-} __attribute__((packed));
-
-static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
+static int
+mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u32 legacy_rate_mask)
{
- struct mwl8k_cmd_enable_sniffer *cmd;
+ struct mwl8k_cmd_update_set_aid *cmd;
+ u16 prot_mode;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le32(!!enable);
+ cmd->aid = cpu_to_le16(vif->bss_conf.aid);
+ memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+ if (vif->bss_conf.use_cts_prot) {
+ prot_mode = MWL8K_FRAME_PROT_11G;
+ } else {
+ switch (vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
+ break;
+ default:
+ prot_mode = MWL8K_FRAME_PROT_DISABLED;
+ break;
+ }
+ }
+ cmd->protection_mode = cpu_to_le16(prot_mode);
+
+ legacy_rate_mask_to_array(cmd->supp_rates, legacy_rate_mask);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2246,37 +2362,32 @@ static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
}
/*
- * CMD_SET_MAC_ADDR.
+ * CMD_SET_RATE.
*/
-struct mwl8k_cmd_set_mac_addr {
- struct mwl8k_cmd_pkt header;
- union {
- struct {
- __le16 mac_type;
- __u8 mac_addr[ETH_ALEN];
- } mbss;
- __u8 mac_addr[ETH_ALEN];
- };
+struct mwl8k_cmd_set_rate {
+ struct mwl8k_cmd_pkt header;
+ __u8 legacy_rates[14];
+
+ /* Bitmap for supported MCS codes. */
+ __u8 mcs_set[16];
+ __u8 reserved[16];
} __attribute__((packed));
-static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
+static int
+mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 legacy_rate_mask, u8 *mcs_rates)
{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_set_mac_addr *cmd;
+ struct mwl8k_cmd_set_rate *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- if (priv->ap_fw) {
- cmd->mbss.mac_type = 0;
- memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
- } else {
- memcpy(cmd->mac_addr, mac, ETH_ALEN);
- }
+ legacy_rate_mask_to_array(cmd->legacy_rates, legacy_rate_mask);
+ memcpy(cmd->mcs_set, mcs_rates, 16);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2284,29 +2395,40 @@ static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
return rc;
}
-
/*
- * CMD_SET_RATEADAPT_MODE.
+ * CMD_FINALIZE_JOIN.
*/
-struct mwl8k_cmd_set_rate_adapt_mode {
+#define MWL8K_FJ_BEACON_MAXLEN 128
+
+struct mwl8k_cmd_finalize_join {
struct mwl8k_cmd_pkt header;
- __le16 action;
- __le16 mode;
+ __le32 sleep_interval; /* Number of beacon periods to sleep */
+ __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
} __attribute__((packed));
-static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
+static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
+ int framelen, int dtim)
{
- struct mwl8k_cmd_set_rate_adapt_mode *cmd;
+ struct mwl8k_cmd_finalize_join *cmd;
+ struct ieee80211_mgmt *payload = frame;
+ int payload_len;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(MWL8K_CMD_SET);
- cmd->mode = cpu_to_le16(mode);
+ cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
+
+ payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
+ if (payload_len < 0)
+ payload_len = 0;
+ else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
+ payload_len = MWL8K_FJ_BEACON_MAXLEN;
+
+ memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2315,59 +2437,57 @@ static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
}
/*
- * CMD_SET_WMM_MODE.
+ * CMD_SET_RTS_THRESHOLD.
*/
-struct mwl8k_cmd_set_wmm {
+struct mwl8k_cmd_set_rts_threshold {
struct mwl8k_cmd_pkt header;
__le16 action;
+ __le16 threshold;
} __attribute__((packed));
-static int mwl8k_set_wmm(struct ieee80211_hw *hw, bool enable)
+static int
+mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_set_wmm *cmd;
+ struct mwl8k_cmd_set_rts_threshold *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(!!enable);
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->threshold = cpu_to_le16(rts_thresh);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
- if (!rc)
- priv->wmm_enabled = enable;
-
return rc;
}
/*
- * CMD_SET_RTS_THRESHOLD.
+ * CMD_SET_SLOT.
*/
-struct mwl8k_cmd_rts_threshold {
+struct mwl8k_cmd_set_slot {
struct mwl8k_cmd_pkt header;
__le16 action;
- __le16 threshold;
+ __u8 short_slot;
} __attribute__((packed));
-static int mwl8k_rts_threshold(struct ieee80211_hw *hw,
- u16 action, u16 threshold)
+static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
{
- struct mwl8k_cmd_rts_threshold *cmd;
+ struct mwl8k_cmd_set_slot *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(action);
- cmd->threshold = cpu_to_le16(threshold);
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->short_slot = short_slot_time;
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2426,9 +2546,9 @@ struct mwl8k_cmd_set_edca_params {
MWL8K_SET_EDCA_AIFS)
static int
-mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
- __u16 cw_min, __u16 cw_max,
- __u8 aifs, __u16 txop)
+mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
+ __u16 cw_min, __u16 cw_max,
+ __u8 aifs, __u16 txop)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_set_edca_params *cmd;
@@ -2438,12 +2558,6 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
if (cmd == NULL)
return -ENOMEM;
- /*
- * Queues 0 (BE) and 1 (BK) are swapped in hardware for
- * this call.
- */
- qnum ^= !(qnum >> 1);
-
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
@@ -2467,170 +2581,259 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
}
/*
- * CMD_FINALIZE_JOIN.
+ * CMD_SET_WMM_MODE.
*/
-#define MWL8K_FJ_BEACON_MAXLEN 128
-
-struct mwl8k_cmd_finalize_join {
+struct mwl8k_cmd_set_wmm_mode {
struct mwl8k_cmd_pkt header;
- __le32 sleep_interval; /* Number of beacon periods to sleep */
- __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
+ __le16 action;
} __attribute__((packed));
-static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame,
- int framelen, int dtim)
+static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
{
- struct mwl8k_cmd_finalize_join *cmd;
- struct ieee80211_mgmt *payload = frame;
- int payload_len;
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_cmd_set_wmm_mode *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
-
- payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
- if (payload_len < 0)
- payload_len = 0;
- else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
- payload_len = MWL8K_FJ_BEACON_MAXLEN;
-
- memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
+ cmd->action = cpu_to_le16(!!enable);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
+ if (!rc)
+ priv->wmm_enabled = enable;
+
return rc;
}
/*
- * CMD_UPDATE_STADB.
+ * CMD_MIMO_CONFIG.
*/
-struct mwl8k_cmd_update_sta_db {
+struct mwl8k_cmd_mimo_config {
struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __u8 rx_antenna_map;
+ __u8 tx_antenna_map;
+} __attribute__((packed));
- /* See STADB_ACTION_TYPE */
- __le32 action;
+static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
+{
+ struct mwl8k_cmd_mimo_config *cmd;
+ int rc;
- /* Peer MAC address */
- __u8 peer_addr[ETH_ALEN];
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
- __le32 reserved;
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
+ cmd->rx_antenna_map = rx;
+ cmd->tx_antenna_map = tx;
- /* Peer info - valid during add/update. */
- struct peer_capability_info peer_info;
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_USE_FIXED_RATE (STA version).
+ */
+struct mwl8k_cmd_use_fixed_rate_sta {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __le32 allow_rate_drop;
+ __le32 num_rates;
+ struct {
+ __le32 is_ht_rate;
+ __le32 enable_retry;
+ __le32 rate;
+ __le32 retry_count;
+ } rate_entry[8];
+ __le32 rate_type;
+ __le32 reserved1;
+ __le32 reserved2;
} __attribute__((packed));
-static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, __u32 action)
+#define MWL8K_USE_AUTO_RATE 0x0002
+#define MWL8K_UCAST_RATE 0
+
+static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw)
{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
- struct ieee80211_bss_conf *info = &mv_vif->bss_info;
- struct mwl8k_cmd_update_sta_db *cmd;
- struct peer_capability_info *peer_info;
+ struct mwl8k_cmd_use_fixed_rate_sta *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
+ cmd->rate_type = cpu_to_le32(MWL8K_UCAST_RATE);
- cmd->action = cpu_to_le32(action);
- peer_info = &cmd->peer_info;
- memcpy(cmd->peer_addr, mv_vif->bssid, ETH_ALEN);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- switch (action) {
- case MWL8K_STA_DB_ADD_ENTRY:
- case MWL8K_STA_DB_MODIFY_ENTRY:
- /* Build peer_info block */
- peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
- peer_info->basic_caps = cpu_to_le16(info->assoc_capability);
- memcpy(peer_info->legacy_rates, mwl8k_rateids,
- sizeof(mwl8k_rateids));
- peer_info->interop = 1;
- peer_info->amsdu_enabled = 0;
-
- rc = mwl8k_post_cmd(hw, &cmd->header);
- if (rc == 0)
- mv_vif->peer_id = peer_info->station_id;
+ return rc;
+}
- break;
+/*
+ * CMD_USE_FIXED_RATE (AP version).
+ */
+struct mwl8k_cmd_use_fixed_rate_ap {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __le32 allow_rate_drop;
+ __le32 num_rates;
+ struct mwl8k_rate_entry_ap {
+ __le32 is_ht_rate;
+ __le32 enable_retry;
+ __le32 rate;
+ __le32 retry_count;
+ } rate_entry[4];
+ u8 multicast_rate;
+ u8 multicast_rate_type;
+ u8 management_rate;
+} __attribute__((packed));
- case MWL8K_STA_DB_DEL_ENTRY:
- case MWL8K_STA_DB_FLUSH:
- default:
- rc = mwl8k_post_cmd(hw, &cmd->header);
- if (rc == 0)
- mv_vif->peer_id = 0;
- break;
- }
+static int
+mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
+{
+ struct mwl8k_cmd_use_fixed_rate_ap *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
+ cmd->multicast_rate = mcast;
+ cmd->management_rate = mgmt;
+
+ rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
- * CMD_SET_AID.
+ * CMD_ENABLE_SNIFFER.
*/
-#define MWL8K_FRAME_PROT_DISABLED 0x00
-#define MWL8K_FRAME_PROT_11G 0x07
-#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
-#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
-
-struct mwl8k_cmd_update_set_aid {
- struct mwl8k_cmd_pkt header;
- __le16 aid;
-
- /* AP's MAC address (BSSID) */
- __u8 bssid[ETH_ALEN];
- __le16 protection_mode;
- __u8 supp_rates[14];
+struct mwl8k_cmd_enable_sniffer {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
} __attribute__((packed));
-static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
- struct ieee80211_bss_conf *info = &mv_vif->bss_info;
- struct mwl8k_cmd_update_set_aid *cmd;
- u16 prot_mode;
+ struct mwl8k_cmd_enable_sniffer *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->aid = cpu_to_le16(info->aid);
+ cmd->action = cpu_to_le32(!!enable);
- memcpy(cmd->bssid, mv_vif->bssid, ETH_ALEN);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- if (info->use_cts_prot) {
- prot_mode = MWL8K_FRAME_PROT_11G;
+ return rc;
+}
+
+/*
+ * CMD_SET_MAC_ADDR.
+ */
+struct mwl8k_cmd_set_mac_addr {
+ struct mwl8k_cmd_pkt header;
+ union {
+ struct {
+ __le16 mac_type;
+ __u8 mac_addr[ETH_ALEN];
+ } mbss;
+ __u8 mac_addr[ETH_ALEN];
+ };
+} __attribute__((packed));
+
+#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
+#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
+#define MWL8K_MAC_TYPE_PRIMARY_AP 2
+#define MWL8K_MAC_TYPE_SECONDARY_AP 3
+
+static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *mac)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ struct mwl8k_cmd_set_mac_addr *cmd;
+ int mac_type;
+ int rc;
+
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
+ if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
+ if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
+ else
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
+ } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
+ if (mwl8k_vif->macid + 1 == ffs(priv->ap_macids_supported))
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
+ else
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_AP;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ if (priv->ap_fw) {
+ cmd->mbss.mac_type = cpu_to_le16(mac_type);
+ memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
} else {
- switch (info->ht_operation_mode &
- IEEE80211_HT_OP_MODE_PROTECTION) {
- case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
- prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
- break;
- case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
- prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
- break;
- default:
- prot_mode = MWL8K_FRAME_PROT_DISABLED;
- break;
- }
+ memcpy(cmd->mac_addr, mac, ETH_ALEN);
}
- cmd->protection_mode = cpu_to_le16(prot_mode);
- memcpy(cmd->supp_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_SET_RATEADAPT_MODE.
+ */
+struct mwl8k_cmd_set_rate_adapt_mode {
+ struct mwl8k_cmd_pkt header;
+ __le16 action;
+ __le16 mode;
+} __attribute__((packed));
+
+static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
+{
+ struct mwl8k_cmd_set_rate_adapt_mode *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->mode = cpu_to_le16(mode);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2639,115 +2842,255 @@ static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
}
/*
- * CMD_SET_RATE.
+ * CMD_BSS_START.
*/
-struct mwl8k_cmd_update_rateset {
- struct mwl8k_cmd_pkt header;
- __u8 legacy_rates[14];
-
- /* Bitmap for supported MCS codes. */
- __u8 mcs_set[16];
- __u8 reserved[16];
+struct mwl8k_cmd_bss_start {
+ struct mwl8k_cmd_pkt header;
+ __le32 enable;
} __attribute__((packed));
-static int mwl8k_update_rateset(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int enable)
{
- struct mwl8k_cmd_update_rateset *cmd;
+ struct mwl8k_cmd_bss_start *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_BSS_START);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- memcpy(cmd->legacy_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
+ cmd->enable = cpu_to_le32(enable);
- rc = mwl8k_post_cmd(hw, &cmd->header);
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
}
/*
- * CMD_USE_FIXED_RATE.
+ * CMD_SET_NEW_STN.
*/
-#define MWL8K_RATE_TABLE_SIZE 8
-#define MWL8K_UCAST_RATE 0
-#define MWL8K_USE_AUTO_RATE 0x0002
+struct mwl8k_cmd_set_new_stn {
+ struct mwl8k_cmd_pkt header;
+ __le16 aid;
+ __u8 mac_addr[6];
+ __le16 stn_id;
+ __le16 action;
+ __le16 rsvd;
+ __le32 legacy_rates;
+ __u8 ht_rates[4];
+ __le16 cap_info;
+ __le16 ht_capabilities_info;
+ __u8 mac_ht_param_info;
+ __u8 rev;
+ __u8 control_channel;
+ __u8 add_channel;
+ __le16 op_mode;
+ __le16 stbc;
+ __u8 add_qos_info;
+ __u8 is_qos_sta;
+ __le32 fw_sta_ptr;
+} __attribute__((packed));
-struct mwl8k_rate_entry {
- /* Set to 1 if HT rate, 0 if legacy. */
- __le32 is_ht_rate;
+#define MWL8K_STA_ACTION_ADD 0
+#define MWL8K_STA_ACTION_REMOVE 2
- /* Set to 1 to use retry_count field. */
- __le32 enable_retry;
+static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ u32 rates;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
- /* Specified legacy rate or MCS. */
- __le32 rate;
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->aid = cpu_to_le16(sta->aid);
+ memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
+ cmd->stn_id = cpu_to_le16(sta->aid);
+ cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ else
+ rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ cmd->legacy_rates = cpu_to_le32(rates);
+ if (sta->ht_cap.ht_supported) {
+ cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
+ cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1];
+ cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2];
+ cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3];
+ cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap);
+ cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) |
+ ((sta->ht_cap.ampdu_density & 7) << 2);
+ cmd->is_qos_sta = 1;
+ }
- /* Number of allowed retries. */
- __le32 retry_count;
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_set_new_stn_add_self(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ memcpy(cmd->mac_addr, vif->addr, ETH_ALEN);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ memcpy(cmd->mac_addr, addr, ETH_ALEN);
+ cmd->action = cpu_to_le16(MWL8K_STA_ACTION_REMOVE);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_UPDATE_STADB.
+ */
+struct ewc_ht_info {
+ __le16 control1;
+ __le16 control2;
+ __le16 control3;
} __attribute__((packed));
-struct mwl8k_rate_table {
- /* 1 to allow specified rate and below */
- __le32 allow_rate_drop;
- __le32 num_rates;
- struct mwl8k_rate_entry rate_entry[MWL8K_RATE_TABLE_SIZE];
+struct peer_capability_info {
+ /* Peer type - AP vs. STA. */
+ __u8 peer_type;
+
+ /* Basic 802.11 capabilities from assoc resp. */
+ __le16 basic_caps;
+
+ /* Set if peer supports 802.11n high throughput (HT). */
+ __u8 ht_support;
+
+ /* Valid if HT is supported. */
+ __le16 ht_caps;
+ __u8 extended_ht_caps;
+ struct ewc_ht_info ewc_info;
+
+ /* Legacy rate table. Intersection of our rates and peer rates. */
+ __u8 legacy_rates[12];
+
+ /* HT rate table. Intersection of our rates and peer rates. */
+ __u8 ht_rates[16];
+ __u8 pad[16];
+
+ /* If set, interoperability mode, no proprietary extensions. */
+ __u8 interop;
+ __u8 pad2;
+ __u8 station_id;
+ __le16 amsdu_enabled;
} __attribute__((packed));
-struct mwl8k_cmd_use_fixed_rate {
- struct mwl8k_cmd_pkt header;
+struct mwl8k_cmd_update_stadb {
+ struct mwl8k_cmd_pkt header;
+
+ /* See STADB_ACTION_TYPE */
__le32 action;
- struct mwl8k_rate_table rate_table;
- /* Unicast, Broadcast or Multicast */
- __le32 rate_type;
- __le32 reserved1;
- __le32 reserved2;
+ /* Peer MAC address */
+ __u8 peer_addr[ETH_ALEN];
+
+ __le32 reserved;
+
+ /* Peer info - valid during add/update. */
+ struct peer_capability_info peer_info;
} __attribute__((packed));
-static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw,
- u32 action, u32 rate_type, struct mwl8k_rate_table *rate_table)
+#define MWL8K_STA_DB_MODIFY_ENTRY 1
+#define MWL8K_STA_DB_DEL_ENTRY 2
+
+/* Peer Entry flags - used to define the type of the peer node */
+#define MWL8K_PEER_TYPE_ACCESSPOINT 2
+
+static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
- struct mwl8k_cmd_use_fixed_rate *cmd;
- int count;
+ struct mwl8k_cmd_update_stadb *cmd;
+ struct peer_capability_info *p;
+ u32 rates;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_STA_DB_MODIFY_ENTRY);
+ memcpy(cmd->peer_addr, sta->addr, ETH_ALEN);
+
+ p = &cmd->peer_info;
+ p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
+ p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
+ p->ht_support = sta->ht_cap.ht_supported;
+ p->ht_caps = sta->ht_cap.cap;
+ p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
+ ((sta->ht_cap.ampdu_density & 7) << 2);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ else
+ rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ legacy_rate_mask_to_array(p->legacy_rates, rates);
+ memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
+ p->interop = 1;
+ p->amsdu_enabled = 0;
- cmd->action = cpu_to_le32(action);
- cmd->rate_type = cpu_to_le32(rate_type);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- if (rate_table != NULL) {
- /*
- * Copy over each field manually so that endian
- * conversion can be done.
- */
- cmd->rate_table.allow_rate_drop =
- cpu_to_le32(rate_table->allow_rate_drop);
- cmd->rate_table.num_rates =
- cpu_to_le32(rate_table->num_rates);
-
- for (count = 0; count < rate_table->num_rates; count++) {
- struct mwl8k_rate_entry *dst =
- &cmd->rate_table.rate_entry[count];
- struct mwl8k_rate_entry *src =
- &rate_table->rate_entry[count];
-
- dst->is_ht_rate = cpu_to_le32(src->is_ht_rate);
- dst->enable_retry = cpu_to_le32(src->enable_retry);
- dst->rate = cpu_to_le32(src->rate);
- dst->retry_count = cpu_to_le32(src->retry_count);
- }
- }
+ return rc ? rc : p->station_id;
+}
+
+static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct mwl8k_cmd_update_stadb *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_STA_DB_DEL_ENTRY);
+ memcpy(cmd->peer_addr, addr, ETH_ALEN);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2766,19 +3109,22 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
u32 status;
status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
- iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
-
if (!status)
return IRQ_NONE;
- if (status & MWL8K_A2H_INT_TX_DONE)
- tasklet_schedule(&priv->tx_reclaim_task);
+ if (status & MWL8K_A2H_INT_TX_DONE) {
+ status &= ~MWL8K_A2H_INT_TX_DONE;
+ tasklet_schedule(&priv->poll_tx_task);
+ }
if (status & MWL8K_A2H_INT_RX_READY) {
- while (rxq_process(hw, 0, 1))
- rxq_refill(hw, 0, 1);
+ status &= ~MWL8K_A2H_INT_RX_READY;
+ tasklet_schedule(&priv->poll_rx_task);
}
+ if (status)
+ iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+
if (status & MWL8K_A2H_INT_OPC_DONE) {
if (priv->hostcmd_wait != NULL)
complete(priv->hostcmd_wait);
@@ -2793,6 +3139,53 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void mwl8k_tx_poll(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct mwl8k_priv *priv = hw->priv;
+ int limit;
+ int i;
+
+ limit = 32;
+
+ spin_lock_bh(&priv->tx_lock);
+
+ for (i = 0; i < MWL8K_TX_QUEUES; i++)
+ limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
+
+ if (!priv->pending_tx_pkts && priv->tx_wait != NULL) {
+ complete(priv->tx_wait);
+ priv->tx_wait = NULL;
+ }
+
+ spin_unlock_bh(&priv->tx_lock);
+
+ if (limit) {
+ writel(~MWL8K_A2H_INT_TX_DONE,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+ } else {
+ tasklet_schedule(&priv->poll_tx_task);
+ }
+}
+
+static void mwl8k_rx_poll(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct mwl8k_priv *priv = hw->priv;
+ int limit;
+
+ limit = 32;
+ limit -= rxq_process(hw, 0, limit);
+ limit -= rxq_refill(hw, 0, limit);
+
+ if (limit) {
+ writel(~MWL8K_A2H_INT_RX_READY,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+ } else {
+ tasklet_schedule(&priv->poll_rx_task);
+ }
+}
+
/*
* Core driver operations.
@@ -2803,7 +3196,7 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
int index = skb_get_queue_mapping(skb);
int rc;
- if (priv->current_channel == NULL) {
+ if (!priv->radio_on) {
printk(KERN_DEBUG "%s: dropped TX frame since radio "
"disabled\n", wiphy_name(hw->wiphy));
dev_kfree_skb(skb);
@@ -2828,19 +3221,20 @@ static int mwl8k_start(struct ieee80211_hw *hw)
return -EIO;
}
- /* Enable tx reclaim tasklet */
- tasklet_enable(&priv->tx_reclaim_task);
+ /* Enable TX reclaim and RX tasklets. */
+ tasklet_enable(&priv->poll_tx_task);
+ tasklet_enable(&priv->poll_rx_task);
/* Enable interrupts */
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
rc = mwl8k_fw_lock(hw);
if (!rc) {
- rc = mwl8k_cmd_802_11_radio_enable(hw);
+ rc = mwl8k_cmd_radio_enable(hw);
if (!priv->ap_fw) {
if (!rc)
- rc = mwl8k_enable_sniffer(hw, 0);
+ rc = mwl8k_cmd_enable_sniffer(hw, 0);
if (!rc)
rc = mwl8k_cmd_set_pre_scan(hw);
@@ -2851,10 +3245,10 @@ static int mwl8k_start(struct ieee80211_hw *hw)
}
if (!rc)
- rc = mwl8k_cmd_setrateadaptmode(hw, 0);
+ rc = mwl8k_cmd_set_rateadapt_mode(hw, 0);
if (!rc)
- rc = mwl8k_set_wmm(hw, 0);
+ rc = mwl8k_cmd_set_wmm_mode(hw, 0);
mwl8k_fw_unlock(hw);
}
@@ -2862,7 +3256,8 @@ static int mwl8k_start(struct ieee80211_hw *hw)
if (rc) {
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
- tasklet_disable(&priv->tx_reclaim_task);
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_disable(&priv->poll_rx_task);
}
return rc;
@@ -2873,7 +3268,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
struct mwl8k_priv *priv = hw->priv;
int i;
- mwl8k_cmd_802_11_radio_disable(hw);
+ mwl8k_cmd_radio_disable(hw);
ieee80211_stop_queues(hw);
@@ -2886,36 +3281,27 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
if (priv->beacon_skb != NULL)
dev_kfree_skb(priv->beacon_skb);
- /* Stop tx reclaim tasklet */
- tasklet_disable(&priv->tx_reclaim_task);
+ /* Stop TX reclaim and RX tasklets. */
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_disable(&priv->poll_rx_task);
/* Return all skbs to mac80211 */
for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 1);
+ mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
}
static int mwl8k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif;
-
- /*
- * We only support one active interface at a time.
- */
- if (priv->vif != NULL)
- return -EBUSY;
-
- /*
- * We only support managed interfaces for now.
- */
- if (conf->type != NL80211_IFTYPE_STATION)
- return -EINVAL;
+ u32 macids_supported;
+ int macid;
/*
* Reject interface creation if sniffer mode is active, as
* STA operation is mutually exclusive with hardware sniffer
- * mode.
+ * mode. (Sniffer mode is only used on STA firmware.)
*/
if (priv->sniffer_enabled) {
printk(KERN_INFO "%s: unable to create STA "
@@ -2924,37 +3310,54 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
return -EINVAL;
}
- /* Clean out driver private area */
- mwl8k_vif = MWL8K_VIF(conf->vif);
- memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
- /* Set and save the mac address */
- mwl8k_set_mac_addr(hw, conf->mac_addr);
- memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN);
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ macids_supported = priv->ap_macids_supported;
+ break;
+ case NL80211_IFTYPE_STATION:
+ macids_supported = priv->sta_macids_supported;
+ break;
+ default:
+ return -EINVAL;
+ }
- /* Back pointer to parent config block */
- mwl8k_vif->priv = priv;
+ macid = ffs(macids_supported & ~priv->macids_used);
+ if (!macid--)
+ return -EBUSY;
- /* Set Initial sequence number to zero */
+ /* Setup driver private area. */
+ mwl8k_vif = MWL8K_VIF(vif);
+ memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
+ mwl8k_vif->vif = vif;
+ mwl8k_vif->macid = macid;
mwl8k_vif->seqno = 0;
- priv->vif = conf->vif;
- priv->current_channel = NULL;
+ /* Set the mac address. */
+ mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
+
+ if (priv->ap_fw)
+ mwl8k_cmd_set_new_stn_add_self(hw, vif);
+
+ priv->macids_used |= 1 << mwl8k_vif->macid;
+ list_add_tail(&mwl8k_vif->list, &priv->vif_list);
return 0;
}
static void mwl8k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
- if (priv->vif == NULL)
- return;
+ if (priv->ap_fw)
+ mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
- mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+ mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
- priv->vif = NULL;
+ priv->macids_used &= ~(1 << mwl8k_vif->macid);
+ list_del(&mwl8k_vif->list);
}
static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2964,8 +3367,7 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
int rc;
if (conf->flags & IEEE80211_CONF_IDLE) {
- mwl8k_cmd_802_11_radio_disable(hw);
- priv->current_channel = NULL;
+ mwl8k_cmd_radio_disable(hw);
return 0;
}
@@ -2973,19 +3375,17 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
if (rc)
return rc;
- rc = mwl8k_cmd_802_11_radio_enable(hw);
+ rc = mwl8k_cmd_radio_enable(hw);
if (rc)
goto out;
- rc = mwl8k_cmd_set_rf_channel(hw, conf->channel);
+ rc = mwl8k_cmd_set_rf_channel(hw, conf);
if (rc)
goto out;
- priv->current_channel = conf->channel;
-
if (conf->power_level > 18)
conf->power_level = 18;
- rc = mwl8k_cmd_802_11_rf_tx_power(hw, conf->power_level);
+ rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
if (rc)
goto out;
@@ -3003,79 +3403,160 @@ out:
return rc;
}
-static void mwl8k_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u32 changed)
+static void
+mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
{
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ u32 ap_legacy_rates;
+ u8 ap_mcs_rates[16];
int rc;
- if ((changed & BSS_CHANGED_ASSOC) == 0)
+ if (mwl8k_fw_lock(hw))
return;
- priv->capture_beacon = false;
-
- rc = mwl8k_fw_lock(hw);
- if (rc)
- return;
+ /*
+ * No need to capture a beacon if we're no longer associated.
+ */
+ if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc)
+ priv->capture_beacon = false;
- if (info->assoc) {
- memcpy(&mwl8k_vif->bss_info, info,
- sizeof(struct ieee80211_bss_conf));
+ /*
+ * Get the AP's legacy and MCS rates.
+ */
+ if (vif->bss_conf.assoc) {
+ struct ieee80211_sta *ap;
- memcpy(mwl8k_vif->bssid, info->bssid, ETH_ALEN);
+ rcu_read_lock();
- /* Install rates */
- rc = mwl8k_update_rateset(hw, vif);
- if (rc)
+ ap = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (ap == NULL) {
+ rcu_read_unlock();
goto out;
+ }
- /* Turn on rate adaptation */
- rc = mwl8k_cmd_use_fixed_rate(hw, MWL8K_USE_AUTO_RATE,
- MWL8K_UCAST_RATE, NULL);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
+ ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
+ } else {
+ ap_legacy_rates =
+ ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ }
+ memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
+
+ rcu_read_unlock();
+ }
+
+ if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
+ rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
if (rc)
goto out;
- /* Set radio preamble */
- rc = mwl8k_set_radio_preamble(hw, info->use_short_preamble);
+ rc = mwl8k_cmd_use_fixed_rate_sta(hw);
if (rc)
goto out;
+ }
- /* Set slot time */
- rc = mwl8k_cmd_set_slot(hw, info->use_short_slot);
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ rc = mwl8k_set_radio_preamble(hw,
+ vif->bss_conf.use_short_preamble);
if (rc)
goto out;
+ }
- /* Update peer rate info */
- rc = mwl8k_cmd_update_sta_db(hw, vif,
- MWL8K_STA_DB_MODIFY_ENTRY);
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
if (rc)
goto out;
+ }
- /* Set AID */
- rc = mwl8k_cmd_set_aid(hw, vif);
+ if (vif->bss_conf.assoc &&
+ (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_HT))) {
+ rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
if (rc)
goto out;
+ }
+ if (vif->bss_conf.assoc &&
+ (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) {
/*
* Finalize the join. Tell rx handler to process
* next beacon from our BSSID.
*/
- memcpy(priv->capture_bssid, mwl8k_vif->bssid, ETH_ALEN);
+ memcpy(priv->capture_bssid, vif->bss_conf.bssid, ETH_ALEN);
priv->capture_beacon = true;
- } else {
- rc = mwl8k_cmd_update_sta_db(hw, vif, MWL8K_STA_DB_DEL_ENTRY);
- memset(&mwl8k_vif->bss_info, 0,
- sizeof(struct ieee80211_bss_conf));
- memset(mwl8k_vif->bssid, 0, ETH_ALEN);
}
out:
mwl8k_fw_unlock(hw);
}
+static void
+mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ int rc;
+
+ if (mwl8k_fw_lock(hw))
+ return;
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ rc = mwl8k_set_radio_preamble(hw,
+ vif->bss_conf.use_short_preamble);
+ if (rc)
+ goto out;
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ int idx;
+ int rate;
+
+ /*
+ * Use lowest supported basic rate for multicasts
+ * and management frames (such as probe responses --
+ * beacons will always go out at 1 Mb/s).
+ */
+ idx = ffs(vif->bss_conf.basic_rates);
+ if (idx)
+ idx--;
+
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rate = mwl8k_rates_24[idx].hw_value;
+ else
+ rate = mwl8k_rates_50[idx].hw_value;
+
+ mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) {
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get(hw, vif);
+ if (skb != NULL) {
+ mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
+ kfree_skb(skb);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ mwl8k_cmd_bss_start(hw, vif, info->enable_beacon);
+
+out:
+ mwl8k_fw_unlock(hw);
+}
+
+static void
+mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if (!priv->ap_fw)
+ mwl8k_bss_info_changed_sta(hw, vif, info, changed);
+ else
+ mwl8k_bss_info_changed_ap(hw, vif, info, changed);
+}
+
static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
int mc_count, struct dev_addr_list *mclist)
{
@@ -3105,7 +3586,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
* operation, so refuse to enable sniffer mode if a STA
* interface is active.
*/
- if (priv->vif != NULL) {
+ if (!list_empty(&priv->vif_list)) {
if (net_ratelimit())
printk(KERN_INFO "%s: not enabling sniffer "
"mode because STA interface is active\n",
@@ -3114,7 +3595,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
}
if (!priv->sniffer_enabled) {
- if (mwl8k_enable_sniffer(hw, 1))
+ if (mwl8k_cmd_enable_sniffer(hw, 1))
return 0;
priv->sniffer_enabled = true;
}
@@ -3126,6 +3607,14 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
return 1;
}
+static struct mwl8k_vif *mwl8k_first_vif(struct mwl8k_priv *priv)
+{
+ if (!list_empty(&priv->vif_list))
+ return list_entry(priv->vif_list.next, struct mwl8k_vif, list);
+
+ return NULL;
+}
+
static void mwl8k_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -3157,11 +3646,13 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
/* Clear unsupported feature flags */
*total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
- if (mwl8k_fw_lock(hw))
+ if (mwl8k_fw_lock(hw)) {
+ kfree(cmd);
return;
+ }
if (priv->sniffer_enabled) {
- mwl8k_enable_sniffer(hw, 0);
+ mwl8k_cmd_enable_sniffer(hw, 0);
priv->sniffer_enabled = false;
}
@@ -3172,7 +3663,8 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
*/
mwl8k_cmd_set_pre_scan(hw);
} else {
- u8 *bssid;
+ struct mwl8k_vif *mwl8k_vif;
+ const u8 *bssid;
/*
* Enable the BSS filter.
@@ -3182,9 +3674,11 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
* (where the OUI part needs to be nonzero for
* the BSSID to be accepted by POST_SCAN).
*/
- bssid = "\x01\x00\x00\x00\x00\x00";
- if (priv->vif != NULL)
- bssid = MWL8K_VIF(priv->vif)->bssid;
+ mwl8k_vif = mwl8k_first_vif(priv);
+ if (mwl8k_vif != NULL)
+ bssid = mwl8k_vif->vif->bss_conf.bssid;
+ else
+ bssid = "\x01\x00\x00\x00\x00\x00";
mwl8k_cmd_set_post_scan(hw, bssid);
}
@@ -3211,7 +3705,93 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- return mwl8k_rts_threshold(hw, MWL8K_CMD_SET, value);
+ return mwl8k_cmd_set_rts_threshold(hw, value);
+}
+
+struct mwl8k_sta_notify_item
+{
+ struct list_head list;
+ struct ieee80211_vif *vif;
+ enum sta_notify_cmd cmd;
+ struct ieee80211_sta sta;
+};
+
+static void
+mwl8k_do_sta_notify(struct ieee80211_hw *hw, struct mwl8k_sta_notify_item *s)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ /*
+ * STA firmware uses UPDATE_STADB, AP firmware uses SET_NEW_STN.
+ */
+ if (!priv->ap_fw && s->cmd == STA_NOTIFY_ADD) {
+ int rc;
+
+ rc = mwl8k_cmd_update_stadb_add(hw, s->vif, &s->sta);
+ if (rc >= 0) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(s->vif, s->sta.addr);
+ if (sta != NULL)
+ MWL8K_STA(sta)->peer_id = rc;
+ rcu_read_unlock();
+ }
+ } else if (!priv->ap_fw && s->cmd == STA_NOTIFY_REMOVE) {
+ mwl8k_cmd_update_stadb_del(hw, s->vif, s->sta.addr);
+ } else if (priv->ap_fw && s->cmd == STA_NOTIFY_ADD) {
+ mwl8k_cmd_set_new_stn_add(hw, s->vif, &s->sta);
+ } else if (priv->ap_fw && s->cmd == STA_NOTIFY_REMOVE) {
+ mwl8k_cmd_set_new_stn_del(hw, s->vif, s->sta.addr);
+ }
+}
+
+static void mwl8k_sta_notify_worker(struct work_struct *work)
+{
+ struct mwl8k_priv *priv =
+ container_of(work, struct mwl8k_priv, sta_notify_worker);
+ struct ieee80211_hw *hw = priv->hw;
+
+ spin_lock_bh(&priv->sta_notify_list_lock);
+ while (!list_empty(&priv->sta_notify_list)) {
+ struct mwl8k_sta_notify_item *s;
+
+ s = list_entry(priv->sta_notify_list.next,
+ struct mwl8k_sta_notify_item, list);
+ list_del(&s->list);
+
+ spin_unlock_bh(&priv->sta_notify_list_lock);
+
+ mwl8k_do_sta_notify(hw, s);
+ kfree(s);
+
+ spin_lock_bh(&priv->sta_notify_list_lock);
+ }
+ spin_unlock_bh(&priv->sta_notify_list_lock);
+}
+
+static void
+mwl8k_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd, struct ieee80211_sta *sta)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_sta_notify_item *s;
+
+ if (cmd != STA_NOTIFY_ADD && cmd != STA_NOTIFY_REMOVE)
+ return;
+
+ s = kmalloc(sizeof(*s), GFP_ATOMIC);
+ if (s != NULL) {
+ s->vif = vif;
+ s->cmd = cmd;
+ s->sta = *sta;
+
+ spin_lock(&priv->sta_notify_list_lock);
+ list_add_tail(&s->list, &priv->sta_notify_list);
+ spin_unlock(&priv->sta_notify_list_lock);
+
+ ieee80211_queue_work(hw, &priv->sta_notify_worker);
+ }
}
static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3223,14 +3803,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
rc = mwl8k_fw_lock(hw);
if (!rc) {
if (!priv->wmm_enabled)
- rc = mwl8k_set_wmm(hw, 1);
+ rc = mwl8k_cmd_set_wmm_mode(hw, 1);
if (!rc)
- rc = mwl8k_set_edca_params(hw, queue,
- params->cw_min,
- params->cw_max,
- params->aifs,
- params->txop);
+ rc = mwl8k_cmd_set_edca_params(hw, queue,
+ params->cw_min,
+ params->cw_max,
+ params->aifs,
+ params->txop);
mwl8k_fw_unlock(hw);
}
@@ -3259,7 +3839,23 @@ static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
static int mwl8k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
- return mwl8k_cmd_802_11_get_stat(hw, stats);
+ return mwl8k_cmd_get_stat(hw, stats);
+}
+
+static int
+mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
+ return -ENOTSUPP;
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
}
static const struct ieee80211_ops mwl8k_ops = {
@@ -3273,67 +3869,68 @@ static const struct ieee80211_ops mwl8k_ops = {
.prepare_multicast = mwl8k_prepare_multicast,
.configure_filter = mwl8k_configure_filter,
.set_rts_threshold = mwl8k_set_rts_threshold,
+ .sta_notify = mwl8k_sta_notify,
.conf_tx = mwl8k_conf_tx,
.get_tx_stats = mwl8k_get_tx_stats,
.get_stats = mwl8k_get_stats,
+ .ampdu_action = mwl8k_ampdu_action,
};
-static void mwl8k_tx_reclaim_handler(unsigned long data)
-{
- int i;
- struct ieee80211_hw *hw = (struct ieee80211_hw *) data;
- struct mwl8k_priv *priv = hw->priv;
-
- spin_lock_bh(&priv->tx_lock);
- for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 0);
-
- if (priv->tx_wait != NULL && !priv->pending_tx_pkts) {
- complete(priv->tx_wait);
- priv->tx_wait = NULL;
- }
- spin_unlock_bh(&priv->tx_lock);
-}
-
static void mwl8k_finalize_join_worker(struct work_struct *work)
{
struct mwl8k_priv *priv =
container_of(work, struct mwl8k_priv, finalize_join_worker);
struct sk_buff *skb = priv->beacon_skb;
- u8 dtim = MWL8K_VIF(priv->vif)->bss_info.dtim_period;
+ struct mwl8k_vif *mwl8k_vif;
- mwl8k_finalize_join(priv->hw, skb->data, skb->len, dtim);
- dev_kfree_skb(skb);
+ mwl8k_vif = mwl8k_first_vif(priv);
+ if (mwl8k_vif != NULL)
+ mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len,
+ mwl8k_vif->vif->bss_conf.dtim_period);
+ dev_kfree_skb(skb);
priv->beacon_skb = NULL;
}
enum {
- MWL8687 = 0,
+ MWL8363 = 0,
+ MWL8687,
MWL8366,
};
static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
- {
+ [MWL8363] = {
+ .part_name = "88w8363",
+ .helper_image = "mwl8k/helper_8363.fw",
+ .fw_image = "mwl8k/fmimage_8363.fw",
+ },
+ [MWL8687] = {
.part_name = "88w8687",
.helper_image = "mwl8k/helper_8687.fw",
.fw_image = "mwl8k/fmimage_8687.fw",
- .rxd_ops = &rxd_8687_ops,
- .modes = BIT(NL80211_IFTYPE_STATION),
},
- {
+ [MWL8366] = {
.part_name = "88w8366",
.helper_image = "mwl8k/helper_8366.fw",
.fw_image = "mwl8k/fmimage_8366.fw",
- .rxd_ops = &rxd_8366_ops,
- .modes = 0,
+ .ap_rxd_ops = &rxd_8366_ap_ops,
},
};
+MODULE_FIRMWARE("mwl8k/helper_8363.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8363.fw");
+MODULE_FIRMWARE("mwl8k/helper_8687.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
+MODULE_FIRMWARE("mwl8k/helper_8366.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
+
static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
+ { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
+ { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
{ PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
+ { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
{ },
};
MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
@@ -3352,6 +3949,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
printed_version = 1;
}
+
rc = pci_enable_device(pdev);
if (rc) {
printk(KERN_ERR "%s: Cannot enable new PCI device\n",
@@ -3368,6 +3966,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
pci_set_master(pdev);
+
hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
if (hw == NULL) {
printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
@@ -3375,17 +3974,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
goto err_free_reg;
}
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+ pci_set_drvdata(pdev, hw);
+
priv = hw->priv;
priv->hw = hw;
priv->pdev = pdev;
priv->device_info = &mwl8k_info_tbl[id->driver_data];
- priv->rxd_ops = priv->device_info->rxd_ops;
- priv->sniffer_enabled = false;
- priv->wmm_enabled = false;
- priv->pending_tx_pkts = 0;
- SET_IEEE80211_DEV(hw, &pdev->dev);
- pci_set_drvdata(pdev, hw);
priv->sram = pci_iomap(pdev, 0, 0x10000);
if (priv->sram == NULL) {
@@ -3408,16 +4004,46 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
}
}
- memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
- priv->band.band = IEEE80211_BAND_2GHZ;
- priv->band.channels = priv->channels;
- priv->band.n_channels = ARRAY_SIZE(mwl8k_channels);
- priv->band.bitrates = priv->rates;
- priv->band.n_bitrates = ARRAY_SIZE(mwl8k_rates);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- BUILD_BUG_ON(sizeof(priv->rates) != sizeof(mwl8k_rates));
- memcpy(priv->rates, mwl8k_rates, sizeof(mwl8k_rates));
+ /* Reset firmware and hardware */
+ mwl8k_hw_reset(priv);
+
+ /* Ask userland hotplug daemon for the device firmware */
+ rc = mwl8k_request_firmware(priv);
+ if (rc) {
+ printk(KERN_ERR "%s: Firmware files not found\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+
+ /* Load firmware into hardware */
+ rc = mwl8k_load_firmware(hw);
+ if (rc) {
+ printk(KERN_ERR "%s: Cannot start firmware\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+
+ /* Reclaim memory once firmware is successfully loaded */
+ mwl8k_release_firmware(priv);
+
+
+ if (priv->ap_fw) {
+ priv->rxd_ops = priv->device_info->ap_rxd_ops;
+ if (priv->rxd_ops == NULL) {
+ printk(KERN_ERR "%s: Driver does not have AP "
+ "firmware image support for this hardware\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+ } else {
+ priv->rxd_ops = &rxd_sta_ops;
+ }
+
+ priv->sniffer_enabled = false;
+ priv->wmm_enabled = false;
+ priv->pending_tx_pkts = 0;
+
/*
* Extra headroom is the size of the required DMA header
@@ -3430,33 +4056,40 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
hw->queues = MWL8K_TX_QUEUES;
- hw->wiphy->interface_modes = priv->device_info->modes;
-
/* Set rssi and noise values to dBm */
hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
hw->vif_data_size = sizeof(struct mwl8k_vif);
- priv->vif = NULL;
+ hw->sta_data_size = sizeof(struct mwl8k_sta);
+
+ priv->macids_used = 0;
+ INIT_LIST_HEAD(&priv->vif_list);
/* Set default radio state and preamble */
priv->radio_on = 0;
priv->radio_short_preamble = 0;
+ /* Station database handling */
+ INIT_WORK(&priv->sta_notify_worker, mwl8k_sta_notify_worker);
+ spin_lock_init(&priv->sta_notify_list_lock);
+ INIT_LIST_HEAD(&priv->sta_notify_list);
+
/* Finalize join worker */
INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
- /* TX reclaim tasklet */
- tasklet_init(&priv->tx_reclaim_task,
- mwl8k_tx_reclaim_handler, (unsigned long)hw);
- tasklet_disable(&priv->tx_reclaim_task);
+ /* TX reclaim and RX tasklets. */
+ tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_rx_task);
/* Power management cookie */
priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
if (priv->cookie == NULL)
- goto err_iounmap;
+ goto err_stop_firmware;
rc = mwl8k_rxq_init(hw, 0);
if (rc)
- goto err_iounmap;
+ goto err_free_cookie;
rxq_refill(hw, 0, INT_MAX);
mutex_init(&priv->fw_mutex);
@@ -3476,7 +4109,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
- iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
+ iowrite32(MWL8K_A2H_INT_TX_DONE | MWL8K_A2H_INT_RX_READY,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
@@ -3487,31 +4121,9 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
goto err_free_queues;
}
- /* Reset firmware and hardware */
- mwl8k_hw_reset(priv);
-
- /* Ask userland hotplug daemon for the device firmware */
- rc = mwl8k_request_firmware(priv);
- if (rc) {
- printk(KERN_ERR "%s: Firmware files not found\n",
- wiphy_name(hw->wiphy));
- goto err_free_irq;
- }
-
- /* Load firmware into hardware */
- rc = mwl8k_load_firmware(hw);
- if (rc) {
- printk(KERN_ERR "%s: Cannot start firmware\n",
- wiphy_name(hw->wiphy));
- goto err_stop_firmware;
- }
-
- /* Reclaim memory once firmware is successfully loaded */
- mwl8k_release_firmware(priv);
-
/*
* Temporarily enable interrupts. Initial firmware host
- * commands use interrupts and avoids polling. Disable
+ * commands use interrupts and avoid polling. Disable
* interrupts when done.
*/
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
@@ -3527,22 +4139,29 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
if (rc) {
printk(KERN_ERR "%s: Cannot initialise firmware\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
+ hw->wiphy->interface_modes = 0;
+ if (priv->ap_macids_supported)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
+ if (priv->sta_macids_supported)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
+
+
/* Turn radio off */
- rc = mwl8k_cmd_802_11_radio_disable(hw);
+ rc = mwl8k_cmd_radio_disable(hw);
if (rc) {
printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
/* Clear MAC address */
- rc = mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+ rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
if (rc) {
printk(KERN_ERR "%s: Cannot clear MAC address\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
/* Disable interrupts */
@@ -3553,7 +4172,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
if (rc) {
printk(KERN_ERR "%s: Cannot register device\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_queues;
}
printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n",
@@ -3565,10 +4184,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
return 0;
-err_stop_firmware:
- mwl8k_hw_reset(priv);
- mwl8k_release_firmware(priv);
-
err_free_irq:
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
@@ -3578,11 +4193,16 @@ err_free_queues:
mwl8k_txq_deinit(hw, i);
mwl8k_rxq_deinit(hw, 0);
-err_iounmap:
+err_free_cookie:
if (priv->cookie != NULL)
pci_free_consistent(priv->pdev, 4,
priv->cookie, priv->cookie_dma);
+err_stop_firmware:
+ mwl8k_hw_reset(priv);
+ mwl8k_release_firmware(priv);
+
+err_iounmap:
if (priv->regs != NULL)
pci_iounmap(pdev, priv->regs);
@@ -3620,15 +4240,16 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
ieee80211_unregister_hw(hw);
- /* Remove tx reclaim tasklet */
- tasklet_kill(&priv->tx_reclaim_task);
+ /* Remove TX reclaim and RX tasklets. */
+ tasklet_kill(&priv->poll_tx_task);
+ tasklet_kill(&priv->poll_rx_task);
/* Stop hardware */
mwl8k_hw_reset(priv);
/* Return all skbs to mac80211 */
for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 1);
+ mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
mwl8k_txq_deinit(hw, i);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index c13a4c383410..075f446b3139 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -274,7 +274,7 @@ static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_nortel_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_nortel_id_table) = {
/* Nortel emobility PCI */
{0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
/* Symbol LA-4123 PCI */
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index fea7781948e7..bda5317cc596 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -212,7 +212,7 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_pci_id_table) = {
/* Intersil Prism 3 */
{0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
/* Intersil Prism 2.5 */
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 3f2942a1e4f5..e0d5874ab42f 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -310,7 +310,7 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_plx_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_plx_id_table) = {
{0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
{0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
{0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index d3452548cc71..88cbc7902aa0 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -203,7 +203,7 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_tmd_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_tmd_id_table) = {
{0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
{0,},
};
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 18012dbfb45d..26428e4c9c60 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -216,7 +216,7 @@ static void p54_stop(struct ieee80211_hw *dev)
}
static int p54_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
@@ -226,28 +226,28 @@ static int p54_add_interface(struct ieee80211_hw *dev,
return -EOPNOTSUPP;
}
- priv->vif = conf->vif;
+ priv->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
- priv->mode = conf->type;
+ priv->mode = vif->type;
break;
default:
mutex_unlock(&priv->conf_mutex);
return -EOPNOTSUPP;
}
- memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
p54_setup_mac(priv);
mutex_unlock(&priv->conf_mutex);
return 0;
}
static void p54_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a15962a19b2a..fab41f507bd3 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("prism54pci");
MODULE_FIRMWARE("isl3886pci");
-static struct pci_device_id p54p_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{ PCI_DEVICE(0x1260, 0x3890) },
/* 3COM 3CRWE154G72 Wireless LAN adapter */
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index e4f2bb7368f2..dc14420a9adc 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -39,7 +39,7 @@ module_param(init_pcitm, int, 0);
* driver_data
* If you have an update for this please contact prism54-devel@prism54.org
* The latest list can be found at http://prism54.org/supported_cards.php */
-static const struct pci_device_id prism54_id_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{
0x1260, 0x3890,
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2ecbedb26e15..305c106fdc1c 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2594,23 +2594,9 @@ end:
/*
* driver/device initialization
*/
-static int bcm4320a_early_init(struct usbnet *usbdev)
-{
- /* bcm4320a doesn't handle configuration parameters well. Try
- * set any and you get partially zeroed mac and broken device.
- */
-
- return 0;
-}
-
-static int bcm4320b_early_init(struct usbnet *usbdev)
+static void rndis_copy_module_params(struct usbnet *usbdev)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- char buf[8];
-
- /* Early initialization settings, setting these won't have effect
- * if called after generic_rndis_bind().
- */
priv->param_country[0] = modparam_country[0];
priv->param_country[1] = modparam_country[1];
@@ -2652,6 +2638,32 @@ static int bcm4320b_early_init(struct usbnet *usbdev)
priv->param_workaround_interval = 500;
else
priv->param_workaround_interval = modparam_workaround_interval;
+}
+
+static int bcm4320a_early_init(struct usbnet *usbdev)
+{
+ /* copy module parameters for bcm4320a so that iwconfig reports txpower
+ * and workaround parameter is copied to private structure correctly.
+ */
+ rndis_copy_module_params(usbdev);
+
+ /* bcm4320a doesn't handle configuration parameters well. Try
+ * set any and you get partially zeroed mac and broken device.
+ */
+
+ return 0;
+}
+
+static int bcm4320b_early_init(struct usbnet *usbdev)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ char buf[8];
+
+ rndis_copy_module_params(usbdev);
+
+ /* Early initialization settings, setting these won't have effect
+ * if called after generic_rndis_bind().
+ */
rndis_set_config_parameter_str(usbdev, "Country", priv->param_country);
rndis_set_config_parameter_str(usbdev, "FrameBursting",
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index bf60689aaabb..3ca824a91ad9 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -54,12 +54,12 @@ config RT61PCI
When compiled as a module, this driver will be called rt61pci.
config RT2800PCI_PCI
- tristate
+ boolean
depends on PCI
default y
config RT2800PCI_SOC
- tristate
+ boolean
depends on RALINK_RT288X || RALINK_RT305X
default y
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index e7f46405a418..aa579eb8723f 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -451,7 +451,7 @@ static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev,
/*
* RF2420 chipset don't need any additional actions.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2420))
+ if (rt2x00_rf(rt2x00dev, RF2420))
return;
/*
@@ -1343,8 +1343,7 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip_rf(rt2x00dev, value, reg);
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_rf(&rt2x00dev->chip, RF2420) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2421)) {
+ if (!rt2x00_rf(rt2x00dev, RF2420) && !rt2x00_rf(rt2x00dev, RF2421)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1643,7 +1642,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
/*
* RT2400pci module information.
*/
-static struct pci_device_id rt2400pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) },
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 408fcfc120f5..77ee1df7933f 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -440,8 +440,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E and RT5222 need to flip TX I/Q
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E) ||
- rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1);
rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1);
@@ -449,7 +448,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E does not need RX I/Q Flip.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E))
+ if (rt2x00_rf(rt2x00dev, RF2525E))
rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
} else {
rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0);
@@ -475,14 +474,14 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
* Switch on tuning bits.
* For RT2523 devices we do not need to update the R1 register.
*/
- if (!rt2x00_rf(&rt2x00dev->chip, RF2523))
+ if (!rt2x00_rf(rt2x00dev, RF2523))
rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1);
rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1);
/*
* For RT2525 we should first set the channel to half band higher.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ if (rt2x00_rf(rt2x00dev, RF2525)) {
static const u32 vals[] = {
0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a,
0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a,
@@ -516,7 +515,7 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
* Switch off tuning bits.
* For RT2523 devices we do not need to update the R1 register.
*/
- if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ if (!rt2x00_rf(rt2x00dev, RF2523)) {
rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0);
rt2500pci_rf_write(rt2x00dev, 1, rf->rf1);
}
@@ -640,7 +639,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
* up to version C the link tuning should halt after 20
* seconds while being associated.
*/
- if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D &&
+ if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D &&
rt2x00dev->intf_associated && count > 20)
return;
@@ -650,7 +649,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
* should go straight to dynamic CCA tuning when they
* are not associated.
*/
- if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D ||
+ if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D ||
!rt2x00dev->intf_associated)
goto dynamic_cca_tune;
@@ -1507,12 +1506,12 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip_rf(rt2x00dev, value, reg);
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2523) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2524) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525E) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (!rt2x00_rf(rt2x00dev, RF2522) &&
+ !rt2x00_rf(rt2x00dev, RF2523) &&
+ !rt2x00_rf(rt2x00dev, RF2524) &&
+ !rt2x00_rf(rt2x00dev, RF2525) &&
+ !rt2x00_rf(rt2x00dev, RF2525E) &&
+ !rt2x00_rf(rt2x00dev, RF5222)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1744,22 +1743,22 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
+ if (rt2x00_rf(rt2x00dev, RF2522)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
spec->channels = rf_vals_bg_2522;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2523)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
spec->channels = rf_vals_bg_2523;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2524)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
spec->channels = rf_vals_bg_2524;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
spec->channels = rf_vals_bg_2525;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
spec->channels = rf_vals_bg_2525e;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5222)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5222);
spec->channels = rf_vals_5222;
@@ -1941,7 +1940,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
/*
* RT2500pci module information.
*/
-static struct pci_device_id rt2500pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) },
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 83f2592c59de..9e6f865c57f2 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -565,8 +565,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E and RT5222 need to flip TX I/Q
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E) ||
- rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1);
rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1);
@@ -574,7 +573,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E does not need RX I/Q Flip.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E))
+ if (rt2x00_rf(rt2x00dev, RF2525E))
rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
} else {
rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0);
@@ -598,7 +597,7 @@ static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev,
/*
* For RT2525E we should first set the channel to half band higher.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E)) {
static const u32 vals[] = {
0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2,
0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba,
@@ -793,7 +792,7 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1);
rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
- if (rt2x00_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) {
+ if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) {
rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg);
rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0);
} else {
@@ -1411,19 +1410,18 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) ||
- rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
-
+ if (!rt2x00_check_rev(rt2x00dev, 0x000ffff0, 0) ||
+ rt2x00_check_rev(rt2x00dev, 0x0000000f, 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
- if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2523) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2524) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525E) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (!rt2x00_rf(rt2x00dev, RF2522) &&
+ !rt2x00_rf(rt2x00dev, RF2523) &&
+ !rt2x00_rf(rt2x00dev, RF2524) &&
+ !rt2x00_rf(rt2x00dev, RF2525) &&
+ !rt2x00_rf(rt2x00dev, RF2525E) &&
+ !rt2x00_rf(rt2x00dev, RF5222)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1667,22 +1665,22 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
+ if (rt2x00_rf(rt2x00dev, RF2522)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
spec->channels = rf_vals_bg_2522;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2523)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
spec->channels = rf_vals_bg_2523;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2524)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
spec->channels = rf_vals_bg_2524;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
spec->channels = rf_vals_bg_2525;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
spec->channels = rf_vals_bg_2525e;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5222)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5222);
spec->channels = rf_vals_5222;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 27bf887f1453..529a37364eb0 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -37,7 +37,7 @@
#include <linux/module.h>
#include "rt2x00.h"
-#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
+#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
#include "rt2x00usb.h"
#endif
#include "rt2800lib.h"
@@ -220,8 +220,7 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
/*
* RT2880 and RT3052 don't support MCU requests.
*/
- if (rt2x00_rt(&rt2x00dev->chip, RT2880) ||
- rt2x00_rt(&rt2x00dev->chip, RT3052))
+ if (rt2x00_rt(rt2x00dev, RT2880) || rt2x00_rt(rt2x00dev, RT3052))
return;
mutex_lock(&rt2x00dev->csr_mutex);
@@ -246,6 +245,25 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
}
EXPORT_SYMBOL_GPL(rt2800_mcu_request);
+int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
+ !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
+ return 0;
+
+ msleep(1);
+ }
+
+ ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
+ return -EACCES;
+}
+EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -340,7 +358,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
- rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
+ rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
@@ -348,7 +366,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
return 0;
}
-void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
+static void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
struct rt2x00_led *led, enum led_type type)
{
led->rt2x00dev = rt2x00dev;
@@ -357,7 +375,6 @@ void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
led->led_dev.blink_set = rt2800_blink_set;
led->flags = LED_INITIALIZED;
}
-EXPORT_SYMBOL_GPL(rt2800_init_led);
#endif /* CONFIG_RT2X00_LIB_LEDS */
/*
@@ -806,12 +823,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
unsigned int tx_pin;
u8 bbp;
- if ((rt2x00_rt(&rt2x00dev->chip, RT3070) ||
- rt2x00_rt(&rt2x00dev->chip, RT3090)) &&
- (rt2x00_rf(&rt2x00dev->chip, RF2020) ||
- rt2x00_rf(&rt2x00dev->chip, RF3020) ||
- rt2x00_rf(&rt2x00dev->chip, RF3021) ||
- rt2x00_rf(&rt2x00dev->chip, RF3022)))
+ if ((rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3090)) &&
+ (rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022)))
rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
else
rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
@@ -878,7 +895,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
rt2800_bbp_write(rt2x00dev, 3, bbp);
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+ if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION) {
if (conf_is_ht40(conf)) {
rt2800_bbp_write(rt2x00dev, 69, 0x1a);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -1041,7 +1058,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
{
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION)
+ rt2x00_rev(rt2x00dev) == RT3070_VERSION)
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@@ -1072,7 +1089,7 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
const u32 count)
{
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
+ if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)
return;
/*
@@ -1121,7 +1138,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
if (rt2x00_intf_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
-#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
+#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_RESET, REGISTER_TIMEOUT);
#endif
@@ -1158,7 +1175,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+ rt2x00_rev(rt2x00dev) == RT3070_VERSION) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -1185,8 +1202,8 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
- if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
- rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
+ if (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION &&
+ rt2x00_rev(rt2x00dev) < RT3070_VERSION)
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
else
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1465,22 +1482,22 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 103, 0x00);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+ if (rt2x00_rev(rt2x00dev) == RT2860C_VERSION) {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
}
- if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION)
+ if (rt2x00_rev(rt2x00dev) > RT2860D_VERSION)
rt2800_bbp_write(rt2x00dev, 84, 0x19);
if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+ rt2x00_rev(rt2x00dev) == RT3070_VERSION) {
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 84, 0x99);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
}
- if (rt2x00_rt(&rt2x00dev->chip, RT3052)) {
+ if (rt2x00_rt(rt2x00dev, RT3052)) {
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 78, 0x0e);
rt2800_bbp_write(rt2x00dev, 80, 0x08);
@@ -1566,13 +1583,13 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
u8 bbp;
if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
+ rt2x00_rev(rt2x00dev) != RT3070_VERSION)
return 0;
if (rt2x00_intf_is_pci(rt2x00dev)) {
- if (!rt2x00_rf(&rt2x00dev->chip, RF3020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3022))
+ if (!rt2x00_rf(rt2x00dev, RF3020) &&
+ !rt2x00_rf(rt2x00dev, RF3021) &&
+ !rt2x00_rf(rt2x00dev, RF3022))
return 0;
}
@@ -1737,7 +1754,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
- } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
+ } else if (rt2x00_rev(rt2x00dev) < RT2883_VERSION) {
/*
* There is a max of 2 RX streams for RT28x0 series
*/
@@ -1839,17 +1856,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip_rf(rt2x00dev, value, reg);
if (rt2x00_intf_is_usb(rt2x00dev)) {
- struct rt2x00_chip *chip = &rt2x00dev->chip;
-
/*
* The check for rt2860 is not a typo, some rt2870 hardware
* identifies itself as rt2860 in the CSR register.
*/
- if (rt2x00_check_rev(chip, 0xfff00000, 0x28600000) ||
- rt2x00_check_rev(chip, 0xfff00000, 0x28700000) ||
- rt2x00_check_rev(chip, 0xfff00000, 0x28800000)) {
+ if (rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28600000) ||
+ rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28700000) ||
+ rt2x00_check_rev(rt2x00dev, 0xfff00000, 0x28800000)) {
rt2x00_set_chip_rt(rt2x00dev, RT2870);
- } else if (rt2x00_check_rev(chip, 0xffff0000, 0x30700000)) {
+ } else if (rt2x00_check_rev(rt2x00dev, 0xffff0000, 0x30700000)) {
rt2x00_set_chip_rt(rt2x00dev, RT3070);
} else {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
@@ -1858,14 +1873,15 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
}
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3022)) {
+ if (!rt2x00_rf(rt2x00dev, RF2820) &&
+ !rt2x00_rf(rt2x00dev, RF2850) &&
+ !rt2x00_rf(rt2x00dev, RF2720) &&
+ !rt2x00_rf(rt2x00dev, RF2750) &&
+ !rt2x00_rf(rt2x00dev, RF3020) &&
+ !rt2x00_rf(rt2x00dev, RF2020) &&
+ !rt2x00_rf(rt2x00dev, RF3021) &&
+ !rt2x00_rf(rt2x00dev, RF3022) &&
+ !rt2x00_rf(rt2x00dev, RF3052)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2013,7 +2029,6 @@ static const struct rf_channel rf_vals_302x[] = {
int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
- struct rt2x00_chip *chip = &rt2x00dev->chip;
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
char *tx_power1;
@@ -2049,19 +2064,19 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(chip, RF2820) ||
- rt2x00_rf(chip, RF2720) ||
- (rt2x00_intf_is_pci(rt2x00dev) && rt2x00_rf(chip, RF3052))) {
+ if (rt2x00_rf(rt2x00dev, RF2820) ||
+ rt2x00_rf(rt2x00dev, RF2720) ||
+ rt2x00_rf(rt2x00dev, RF3052)) {
spec->num_channels = 14;
spec->channels = rf_vals;
- } else if (rt2x00_rf(chip, RF2850) || rt2x00_rf(chip, RF2750)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals);
spec->channels = rf_vals;
- } else if (rt2x00_rf(chip, RF3020) ||
- rt2x00_rf(chip, RF2020) ||
- rt2x00_rf(chip, RF3021) ||
- rt2x00_rf(chip, RF3022)) {
+ } else if (rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022)) {
spec->num_channels = ARRAY_SIZE(rf_vals_302x);
spec->channels = rf_vals_302x;
}
@@ -2069,7 +2084,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize HT information.
*/
- if (!rt2x00_rf(chip, RF2020))
+ if (!rt2x00_rf(rt2x00dev, RF2020))
spec->ht.ht_supported = true;
else
spec->ht.ht_supported = false;
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 535ce22f2ac8..ebabeae62d1b 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -114,8 +114,6 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
extern const struct rt2x00debug rt2800_rt2x00debug;
int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
-void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
- struct rt2x00_led *led, enum led_type type);
int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key);
@@ -139,6 +137,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
+int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index dfc886fcb44d..d64181cbc9cb 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -48,14 +48,6 @@
#include "rt2800.h"
#include "rt2800pci.h"
-#ifdef CONFIG_RT2800PCI_PCI_MODULE
-#define CONFIG_RT2800PCI_PCI
-#endif
-
-#ifdef CONFIG_RT2800PCI_WISOC_MODULE
-#define CONFIG_RT2800PCI_WISOC
-#endif
-
/*
* Allow hardware encryption to be disabled.
*/
@@ -87,7 +79,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
}
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
@@ -98,7 +90,7 @@ static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
}
-#endif /* CONFIG_RT2800PCI_WISOC */
+#endif /* CONFIG_RT2800PCI_SOC */
#ifdef CONFIG_RT2800PCI_PCI
static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -461,24 +453,6 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
}
-static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
- if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
- !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
- return 0;
-
- msleep(1);
- }
-
- ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
- return -EACCES;
-}
-
static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -487,10 +461,10 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all registers.
*/
- if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800pci_init_queues(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
- rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+ rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
return -EIO;
@@ -570,7 +544,7 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
/* Wait for DMA, ignore error */
- rt2800pci_wait_wpdma_ready(rt2x00dev);
+ rt2800_wait_wpdma_ready(rt2x00dev);
}
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -835,7 +809,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
struct rxdone_entry_desc *rxdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
__le32 *rxd = entry_priv->desc;
__le32 *rxwi = (__le32 *)entry->skb->data;
@@ -883,10 +856,8 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) {
+ if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -927,7 +898,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
* Remove TXWI descriptor from start of buffer.
*/
skb_pull(entry->skb, RXWI_DESC_SIZE);
- skb_trim(entry->skb, rxdesc->size);
}
/*
@@ -1133,8 +1103,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* This device requires firmware.
*/
- if (!rt2x00_rt(&rt2x00dev->chip, RT2880) &&
- !rt2x00_rt(&rt2x00dev->chip, RT3052))
+ if (!rt2x00_rt(rt2x00dev, RT2880) && !rt2x00_rt(rt2x00dev, RT3052))
__set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
@@ -1221,7 +1190,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
-static struct pci_device_id rt2800pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
{ PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1255,7 +1224,7 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
#endif /* CONFIG_RT2800PCI_PCI */
MODULE_LICENSE("GPL");
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
#if defined(CONFIG_RALINK_RT288X)
__rt2x00soc_probe(RT2880, &rt2800pci_ops);
#elif defined(CONFIG_RALINK_RT305X)
@@ -1273,7 +1242,7 @@ static struct platform_driver rt2800soc_driver = {
.suspend = rt2x00soc_suspend,
.resume = rt2x00soc_resume,
};
-#endif /* CONFIG_RT2800PCI_WISOC */
+#endif /* CONFIG_RT2800PCI_SOC */
#ifdef CONFIG_RT2800PCI_PCI
static struct pci_driver rt2800pci_driver = {
@@ -1290,7 +1259,7 @@ static int __init rt2800pci_init(void)
{
int ret = 0;
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
ret = platform_driver_register(&rt2800soc_driver);
if (ret)
return ret;
@@ -1298,7 +1267,7 @@ static int __init rt2800pci_init(void)
#ifdef CONFIG_RT2800PCI_PCI
ret = pci_register_driver(&rt2800pci_driver);
if (ret) {
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
platform_driver_unregister(&rt2800soc_driver);
#endif
return ret;
@@ -1313,7 +1282,7 @@ static void __exit rt2800pci_exit(void)
#ifdef CONFIG_RT2800PCI_PCI
pci_unregister_driver(&rt2800pci_driver);
#endif
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
platform_driver_unregister(&rt2800soc_driver);
#endif
}
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index ab95346cf6a3..82755cf8b73e 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -92,7 +92,7 @@ static bool rt2800usb_check_crc(const u8 *data, const size_t len)
static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
const u8 *data, const size_t len)
{
- u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
+ u16 chipset = (rt2x00_rev(rt2x00dev) >> 16) & 0xffff;
size_t offset = 0;
/*
@@ -138,7 +138,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
u32 reg;
u32 offset;
u32 length;
- u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
+ u16 chipset = (rt2x00_rev(rt2x00dev) >> 16) & 0xffff;
/*
* Check which section of the firmware we need.
@@ -248,24 +248,6 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
}
-static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
- if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
- !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
- return 0;
-
- msleep(1);
- }
-
- ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
- return -EACCES;
-}
-
static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -274,7 +256,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all registers.
*/
- if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) ||
+ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
@@ -295,9 +277,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
- /* Don't use bulk in aggregation when working with USB 1.1 */
- rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
- (rt2x00dev->rx->usb_maxpacket == 512));
+ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
/*
* Total room for RX frames in kilobytes, PBF might still exceed
@@ -346,7 +326,7 @@ static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
/* Wait for DMA, ignore error */
- rt2800usb_wait_wpdma_ready(rt2x00dev);
+ rt2800_wait_wpdma_ready(rt2x00dev);
rt2x00usb_disable_radio(rt2x00dev);
}
@@ -573,41 +553,57 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- __le32 *rxd = (__le32 *)entry->skb->data;
+ __le32 *rxi = (__le32 *)entry->skb->data;
__le32 *rxwi;
- u32 rxd0;
+ __le32 *rxd;
+ u32 rxi0;
u32 rxwi0;
u32 rxwi1;
u32 rxwi2;
u32 rxwi3;
+ u32 rxd0;
+ int rx_pkt_len;
+
+ /*
+ * RX frame format is :
+ * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
+ * |<------------ rx_pkt_len -------------->|
+ */
+ rt2x00_desc_read(rxi, 0, &rxi0);
+ rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN);
+
+ rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
+
+ /*
+ * FIXME : we need to check for rx_pkt_len validity
+ */
+ rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len);
/*
* Copy descriptor to the skbdesc->desc buffer, making it safe from
* moving of frame data in rt2x00usb.
*/
- memcpy(skbdesc->desc, rxd, skbdesc->desc_len);
- rxd = (__le32 *)skbdesc->desc;
- rxwi = &rxd[RXINFO_DESC_SIZE / sizeof(__le32)];
+ memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
/*
* It is now safe to read the descriptor on all architectures.
*/
- rt2x00_desc_read(rxd, 0, &rxd0);
rt2x00_desc_read(rxwi, 0, &rxwi0);
rt2x00_desc_read(rxwi, 1, &rxwi1);
rt2x00_desc_read(rxwi, 2, &rxwi2);
rt2x00_desc_read(rxwi, 3, &rxwi3);
+ rt2x00_desc_read(rxd, 0, &rxd0);
- if (rt2x00_get_field32(rxd0, RXINFO_W0_CRC_ERROR))
+ if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
rxdesc->cipher_status =
- rt2x00_get_field32(rxd0, RXINFO_W0_CIPHER_ERROR);
+ rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
}
- if (rt2x00_get_field32(rxd0, RXINFO_W0_DECRYPTED)) {
+ if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
* decryption. Unfortunately the descriptor doesn't contain
@@ -622,13 +618,11 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_MMIC_ERROR;
}
- if (rt2x00_get_field32(rxd0, RXINFO_W0_MY_BSS))
+ if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd0, RXINFO_W0_L2PAD)) {
+ if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -663,7 +657,6 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
* Remove RXWI descriptor from start of buffer.
*/
skb_pull(entry->skb, skbdesc->desc_len);
- skb_trim(entry->skb, rxdesc->size);
}
/*
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 1e4340a182ef..d1d8ae94b4d4 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -79,6 +79,8 @@
*/
#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
+#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
+#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
/*
* TX Info structure
@@ -101,6 +103,54 @@
#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000)
/*
+ * RX Info structure
+ */
+
+/*
+ * Word 0
+ */
+
+#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff)
+
+/*
+ * RX WI structure
+ */
+
+/*
+ * Word0
+ */
+#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
+#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
+#define RXWI_W0_BSSID FIELD32(0x00001c00)
+#define RXWI_W0_UDF FIELD32(0x0000e000)
+#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
+#define RXWI_W0_TID FIELD32(0xf0000000)
+
+/*
+ * Word1
+ */
+#define RXWI_W1_FRAG FIELD32(0x0000000f)
+#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
+#define RXWI_W1_MCS FIELD32(0x007f0000)
+#define RXWI_W1_BW FIELD32(0x00800000)
+#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
+#define RXWI_W1_STBC FIELD32(0x06000000)
+#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
+
+/*
+ * Word2
+ */
+#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
+#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
+#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
+
+/*
+ * Word3
+ */
+#define RXWI_W3_SNR0 FIELD32(0x000000ff)
+#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
+
+/*
* RX descriptor format for RX Ring.
*/
@@ -115,25 +165,25 @@
* AMSDU: rx with 802.3 header, not 802.11 header.
*/
-#define RXINFO_W0_BA FIELD32(0x00000001)
-#define RXINFO_W0_DATA FIELD32(0x00000002)
-#define RXINFO_W0_NULLDATA FIELD32(0x00000004)
-#define RXINFO_W0_FRAG FIELD32(0x00000008)
-#define RXINFO_W0_UNICAST_TO_ME FIELD32(0x00000010)
-#define RXINFO_W0_MULTICAST FIELD32(0x00000020)
-#define RXINFO_W0_BROADCAST FIELD32(0x00000040)
-#define RXINFO_W0_MY_BSS FIELD32(0x00000080)
-#define RXINFO_W0_CRC_ERROR FIELD32(0x00000100)
-#define RXINFO_W0_CIPHER_ERROR FIELD32(0x00000600)
-#define RXINFO_W0_AMSDU FIELD32(0x00000800)
-#define RXINFO_W0_HTC FIELD32(0x00001000)
-#define RXINFO_W0_RSSI FIELD32(0x00002000)
-#define RXINFO_W0_L2PAD FIELD32(0x00004000)
-#define RXINFO_W0_AMPDU FIELD32(0x00008000)
-#define RXINFO_W0_DECRYPTED FIELD32(0x00010000)
-#define RXINFO_W0_PLCP_RSSI FIELD32(0x00020000)
-#define RXINFO_W0_CIPHER_ALG FIELD32(0x00040000)
-#define RXINFO_W0_LAST_AMSDU FIELD32(0x00080000)
-#define RXINFO_W0_PLCP_SIGNAL FIELD32(0xfff00000)
+#define RXD_W0_BA FIELD32(0x00000001)
+#define RXD_W0_DATA FIELD32(0x00000002)
+#define RXD_W0_NULLDATA FIELD32(0x00000004)
+#define RXD_W0_FRAG FIELD32(0x00000008)
+#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000010)
+#define RXD_W0_MULTICAST FIELD32(0x00000020)
+#define RXD_W0_BROADCAST FIELD32(0x00000040)
+#define RXD_W0_MY_BSS FIELD32(0x00000080)
+#define RXD_W0_CRC_ERROR FIELD32(0x00000100)
+#define RXD_W0_CIPHER_ERROR FIELD32(0x00000600)
+#define RXD_W0_AMSDU FIELD32(0x00000800)
+#define RXD_W0_HTC FIELD32(0x00001000)
+#define RXD_W0_RSSI FIELD32(0x00002000)
+#define RXD_W0_L2PAD FIELD32(0x00004000)
+#define RXD_W0_AMPDU FIELD32(0x00008000)
+#define RXD_W0_DECRYPTED FIELD32(0x00010000)
+#define RXD_W0_PLCP_RSSI FIELD32(0x00020000)
+#define RXD_W0_CIPHER_ALG FIELD32(0x00040000)
+#define RXD_W0_LAST_AMSDU FIELD32(0x00080000)
+#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000)
#endif /* RT2800USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 4d841c07c970..096da85a66fa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -104,6 +104,12 @@
#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate))
/*
+ * Determine the number of L2 padding bytes required between the header and
+ * the payload.
+ */
+#define L2PAD_SIZE(__hdrlen) (-(__hdrlen) & 3)
+
+/*
* Determine the alignment requirement,
* to make sure the 802.11 payload is padded to a 4-byte boundrary
* we must determine the address of the payload and calculate the
@@ -113,6 +119,12 @@
( ((unsigned long)((__skb)->data + (__header))) & 3 )
/*
+ * Constants for extra TX headroom for alignment purposes.
+ */
+#define RT2X00_ALIGN_SIZE 4 /* Only whole frame needs alignment */
+#define RT2X00_L2PAD_SIZE 8 /* Both header & payload need alignment */
+
+/*
* Standard timing and size defines.
* These values should follow the ieee80211 specifications.
*/
@@ -931,25 +943,25 @@ static inline void rt2x00_print_chip(struct rt2x00_dev *rt2x00dev)
rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
}
-static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip)
+static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
{
- return (chipset->rt == chip);
+ return (rt2x00dev->chip.rt == rt);
}
-static inline char rt2x00_rf(const struct rt2x00_chip *chipset, const u16 chip)
+static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
{
- return (chipset->rf == chip);
+ return (rt2x00dev->chip.rf == rf);
}
-static inline u32 rt2x00_rev(const struct rt2x00_chip *chipset)
+static inline u32 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
{
- return chipset->rev;
+ return rt2x00dev->chip.rev;
}
-static inline bool rt2x00_check_rev(const struct rt2x00_chip *chipset,
+static inline bool rt2x00_check_rev(struct rt2x00_dev *rt2x00dev,
const u32 mask, const u32 rev)
{
- return ((chipset->rev & mask) == rev);
+ return ((rt2x00dev->chip.rev & mask) == rev);
}
static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
@@ -958,20 +970,20 @@ static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
rt2x00dev->chip.intf = intf;
}
-static inline bool rt2x00_intf(const struct rt2x00_chip *chipset,
+static inline bool rt2x00_intf(struct rt2x00_dev *rt2x00dev,
enum rt2x00_chip_intf intf)
{
- return (chipset->intf == intf);
+ return (rt2x00dev->chip.intf == intf);
}
static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev)
{
- return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_PCI);
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
}
static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev)
{
- return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_USB);
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
}
/**
@@ -1013,9 +1025,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
void rt2x00mac_stop(struct ieee80211_hw *hw);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed);
void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 06c43ca39bf8..b93731b79903 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -385,9 +385,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
memset(&rxdesc, 0, sizeof(rxdesc));
rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
- /* Trim buffer to correct size */
- skb_trim(entry->skb, rxdesc.size);
-
/*
* The data behind the ieee80211 header must be
* aligned on a 4 byte boundary.
@@ -404,11 +401,16 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
(rxdesc.flags & RX_FLAG_IV_STRIPPED))
rt2x00crypto_rx_insert_iv(entry->skb, header_length,
&rxdesc);
- else if (rxdesc.dev_flags & RXDONE_L2PAD)
+ else if (header_length &&
+ (rxdesc.size > header_length) &&
+ (rxdesc.dev_flags & RXDONE_L2PAD))
rt2x00queue_remove_l2pad(entry->skb, header_length);
else
rt2x00queue_align_payload(entry->skb, header_length);
+ /* Trim buffer to correct size */
+ skb_trim(entry->skb, rxdesc.size);
+
/*
* Check if the frame was received using HT. In that case,
* the rate is the MCS index and should be passed to mac80211
@@ -686,7 +688,17 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* Initialize extra TX headroom required.
*/
- rt2x00dev->hw->extra_tx_headroom = rt2x00dev->ops->extra_tx_headroom;
+ rt2x00dev->hw->extra_tx_headroom =
+ max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM,
+ rt2x00dev->ops->extra_tx_headroom);
+
+ /*
+ * Take TX headroom required for alignment into account.
+ */
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
+ rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE;
+ else if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags))
+ rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE;
/*
* Register HW.
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index 0efbf5a6c254..ffee9f8ef74f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -240,7 +240,7 @@ void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
/*
- * Frame was received successfully since non-succesfull
+ * Frame was received successfully since non-successful
* frames would have been dropped by the hardware.
*/
qual->rx_success++;
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index de549c244ed8..00f1f939f1bb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -187,10 +187,10 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
EXPORT_SYMBOL_GPL(rt2x00mac_stop);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct rt2x00_intf *intf = vif_to_intf(conf->vif);
+ struct rt2x00_intf *intf = vif_to_intf(vif);
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
struct queue_entry *entry = NULL;
unsigned int i;
@@ -203,7 +203,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
return -ENODEV;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
/*
* We don't support mixed combinations of
@@ -263,7 +263,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
* increase interface count and start initialization.
*/
- if (conf->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP)
rt2x00dev->intf_ap_count++;
else
rt2x00dev->intf_sta_count++;
@@ -273,16 +273,16 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
mutex_init(&intf->beacon_skb_mutex);
intf->beacon = entry;
- if (conf->type == NL80211_IFTYPE_AP)
- memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN);
- memcpy(&intf->mac, conf->mac_addr, ETH_ALEN);
+ if (vif->type == NL80211_IFTYPE_AP)
+ memcpy(&intf->bssid, vif->addr, ETH_ALEN);
+ memcpy(&intf->mac, vif->addr, ETH_ALEN);
/*
* The MAC adddress must be configured after the device
* has been initialized. Otherwise the device can reset
* the MAC registers.
*/
- rt2x00lib_config_intf(rt2x00dev, intf, conf->type, intf->mac, NULL);
+ rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL);
/*
* Some filters depend on the current working mode. We can force
@@ -296,10 +296,10 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
EXPORT_SYMBOL_GPL(rt2x00mac_add_interface);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct rt2x00_intf *intf = vif_to_intf(conf->vif);
+ struct rt2x00_intf *intf = vif_to_intf(vif);
/*
* Don't allow interfaces to be remove while
@@ -307,11 +307,11 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
* no interface is present.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
- (conf->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
- (conf->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
+ (vif->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
+ (vif->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
return;
- if (conf->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP)
rt2x00dev->intf_ap_count--;
else
rt2x00dev->intf_sta_count--;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0feb4d0e4668..801be436cf1d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -41,6 +41,9 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
{
unsigned int i;
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return 0;
+
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2x00pci_register_read(rt2x00dev, offset, reg);
if (!rt2x00_get_field32(*reg, field))
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 239afc7a9c0b..0b4801a14601 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -104,7 +104,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
* is also mapped to the DMA so it can be used for transfering
* additional descriptor information to the hardware.
*/
- skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
+ skb_push(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->skb_dma =
dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
@@ -112,7 +112,7 @@ void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
/*
* Restore data pointer to original location again.
*/
- skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
+ skb_pull(skb, rt2x00dev->ops->extra_tx_headroom);
skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
}
@@ -134,7 +134,7 @@ void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
* by the driver, but it was actually mapped to DMA.
*/
dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
- skb->len + rt2x00dev->hw->extra_tx_headroom,
+ skb->len + rt2x00dev->ops->extra_tx_headroom,
DMA_TO_DEVICE);
skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
}
@@ -177,55 +177,45 @@ void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- unsigned int frame_length = skb->len;
+ unsigned int payload_length = skb->len - header_length;
unsigned int header_align = ALIGN_SIZE(skb, 0);
unsigned int payload_align = ALIGN_SIZE(skb, header_length);
- unsigned int l2pad = 4 - (payload_align - header_align);
+ unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
- if (header_align == payload_align) {
- /*
- * Both header and payload must be moved the same
- * amount of bytes to align them properly. This means
- * we don't use the L2 padding but just move the entire
- * frame.
- */
- rt2x00queue_align_frame(skb);
- } else if (!payload_align) {
- /*
- * Simple L2 padding, only the header needs to be moved,
- * the payload is already properly aligned.
- */
- skb_push(skb, header_align);
- memmove(skb->data, skb->data + header_align, frame_length);
- skbdesc->flags |= SKBDESC_L2_PADDED;
- } else {
- /*
- *
- * Complicated L2 padding, both header and payload need
- * to be moved. By default we only move to the start
- * of the buffer, so our header alignment needs to be
- * increased if there is not enough room for the header
- * to be moved.
- */
- if (payload_align > header_align)
- header_align += 4;
+ /*
+ * Adjust the header alignment if the payload needs to be moved more
+ * than the header.
+ */
+ if (payload_align > header_align)
+ header_align += 4;
+
+ /* There is nothing to do if no alignment is needed */
+ if (!header_align)
+ return;
+
+ /* Reserve the amount of space needed in front of the frame */
+ skb_push(skb, header_align);
+
+ /*
+ * Move the header.
+ */
+ memmove(skb->data, skb->data + header_align, header_length);
- skb_push(skb, header_align);
- memmove(skb->data, skb->data + header_align, header_length);
+ /* Move the payload, if present and if required */
+ if (payload_length && payload_align)
memmove(skb->data + header_length + l2pad,
skb->data + header_length + l2pad + payload_align,
- frame_length - header_length);
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
+ payload_length);
+
+ /* Trim the skb to the correct size */
+ skb_trim(skb, header_length + l2pad + payload_length);
}
void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- unsigned int l2pad = 4 - (header_length & 3);
+ unsigned int l2pad = L2PAD_SIZE(header_length);
- if (!l2pad || (skbdesc->flags & SKBDESC_L2_PADDED))
+ if (!l2pad)
return;
memmove(skb->data + l2pad, skb->data, header_length);
@@ -346,7 +336,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
* Header and alignment information.
*/
txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
- txdesc->l2pad = ALIGN_SIZE(entry->skb, txdesc->header_length);
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
+ (entry->skb->len > txdesc->header_length))
+ txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
/*
* Check whether this frame is to be acked.
@@ -387,10 +379,13 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Beacons and probe responses require the tsf timestamp
- * to be inserted into the frame.
+ * to be inserted into the frame, except for a frame that has been injected
+ * through a monitor interface. This latter is needed for testing a
+ * monitor interface.
*/
- if (ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control))
+ if ((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 70775e5ba1ac..c1e482bb37b3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -92,8 +92,6 @@ enum data_queue_qid {
* @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
* @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
* mac80211 but was stripped for processing by the driver.
- * @SKBDESC_L2_PADDED: Payload has been padded for 4-byte alignment,
- * the padded bytes are located between header and payload.
* @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
* don't try to pass it back.
*/
@@ -101,8 +99,7 @@ enum skb_frame_desc_flags {
SKBDESC_DMA_MAPPED_RX = 1 << 0,
SKBDESC_DMA_MAPPED_TX = 1 << 1,
SKBDESC_IV_STRIPPED = 1 << 2,
- SKBDESC_L2_PADDED = 1 << 3,
- SKBDESC_NOT_MAC80211 = 1 << 4,
+ SKBDESC_NOT_MAC80211 = 1 << 3,
};
/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 0a751e73aa0f..8b8c500949b4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -200,7 +200,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
* Obtain the status about this packet.
* Note that when the status is 0 it does not mean the
* frame was send out correctly. It only means the frame
- * was succesfully pushed to the hardware, we have no
+ * was successfully pushed to the hardware, we have no
* way to determine the transmission status right now.
* (Only indirectly by looking at the failed TX counters
* in the register).
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 0ca589306d71..1f97a797bc41 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -637,8 +637,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
rt61pci_bbp_read(rt2x00dev, 4, &r4);
rt61pci_bbp_read(rt2x00dev, 77, &r77);
- rt2x00_set_field8(&r3, BBP_R3_SMART_MODE,
- rt2x00_rf(&rt2x00dev->chip, RF5325));
+ rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF5325));
/*
* Configure the RX antenna.
@@ -684,8 +683,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt61pci_bbp_read(rt2x00dev, 4, &r4);
rt61pci_bbp_read(rt2x00dev, 77, &r77);
- rt2x00_set_field8(&r3, BBP_R3_SMART_MODE,
- rt2x00_rf(&rt2x00dev->chip, RF2529));
+ rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
!test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags));
@@ -833,12 +831,11 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg);
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF5325))
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325))
rt61pci_config_antenna_5x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2527))
+ else if (rt2x00_rf(rt2x00dev, RF2527))
rt61pci_config_antenna_2x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2529)) {
+ else if (rt2x00_rf(rt2x00dev, RF2529)) {
if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else
@@ -879,8 +876,7 @@ static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
- smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527));
+ smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
rt61pci_bbp_read(rt2x00dev, 3, &r3);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -2302,10 +2298,10 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip_rf(rt2x00dev, value, reg);
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_rf(&rt2x00dev->chip, RF5225) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5325) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2527) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2529)) {
+ if (!rt2x00_rf(rt2x00dev, RF5225) &&
+ !rt2x00_rf(rt2x00dev, RF5325) &&
+ !rt2x00_rf(rt2x00dev, RF2527) &&
+ !rt2x00_rf(rt2x00dev, RF2529)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2360,7 +2356,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
* the antenna settings should be gathered from the NIC
* eeprom word.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2529) &&
+ if (rt2x00_rf(rt2x00dev, RF2529) &&
!test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) {
rt2x00dev->default_ant.rx =
ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
@@ -2571,8 +2567,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels = rf_vals_seq;
}
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF5325)) {
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_seq);
}
@@ -2812,7 +2807,7 @@ static const struct rt2x00_ops rt61pci_ops = {
/*
* RT61pci module information.
*/
-static struct pci_device_id rt61pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = {
/* RT2561s */
{ PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) },
/* RT2561 v2 */
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index ced3b6ab5e16..a02691294395 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -136,8 +136,8 @@ static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
* all others contain 20 bits.
*/
rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS,
- 20 + (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527)));
+ 20 + (rt2x00_rf(rt2x00dev, RF5225) ||
+ rt2x00_rf(rt2x00dev, RF2527)));
rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
@@ -741,11 +741,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
- if (rt2x00_rf(&rt2x00dev->chip, RF5226) ||
- rt2x00_rf(&rt2x00dev->chip, RF5225))
+ if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225))
rt73usb_config_antenna_5x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2528) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527))
+ else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527))
rt73usb_config_antenna_2x(rt2x00dev, ant);
}
@@ -779,8 +777,7 @@ static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
- smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527));
+ smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
rt73usb_bbp_read(rt2x00dev, 3, &r3);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -1210,8 +1207,7 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000);
reg = 0x000023b0;
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527))
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527))
rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1);
rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg);
@@ -1827,16 +1823,16 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
rt2x00_print_chip(rt2x00dev);
- if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) ||
- rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
+ if (!rt2x00_check_rev(rt2x00dev, 0x000ffff0, 0x25730) ||
+ rt2x00_check_rev(rt2x00dev, 0x0000000f, 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
- if (!rt2x00_rf(&rt2x00dev->chip, RF5226) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2528) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5225) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2527)) {
+ if (!rt2x00_rf(rt2x00dev, RF5226) &&
+ !rt2x00_rf(rt2x00dev, RF2528) &&
+ !rt2x00_rf(rt2x00dev, RF5225) &&
+ !rt2x00_rf(rt2x00dev, RF2527)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2081,17 +2077,17 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2528)) {
+ if (rt2x00_rf(rt2x00dev, RF2528)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
spec->channels = rf_vals_bg_2528;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5226)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5226);
spec->channels = rf_vals_5226;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2527)) {
spec->num_channels = 14;
spec->channels = rf_vals_5225_2527;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5225)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527);
spec->channels = rf_vals_5225_2527;
@@ -2354,6 +2350,7 @@ static struct usb_device_id rt73usb_device_table[] = {
{ USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
+ { USB_DEVICE(0x0411, 0x00d9), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 8721282a8185..de3844fe06d8 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -60,7 +60,6 @@ struct rtl8180_priv {
struct rtl818x_csr __iomem *map;
const struct rtl818x_rf_ops *rf;
struct ieee80211_vif *vif;
- int mode;
/* rtl8180 driver specific */
spinlock_t lock;
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 8a40a1439984..b9192bfcc557 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -33,7 +33,7 @@ MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
MODULE_LICENSE("GPL");
-static struct pci_device_id rtl8180_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
/* rtl8185 */
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
{ PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -82,8 +82,6 @@ static const struct ieee80211_channel rtl818x_channels[] = {
};
-
-
void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
{
struct rtl8180_priv *priv = dev->priv;
@@ -615,7 +613,6 @@ static int rtl8180_start(struct ieee80211_hw *dev)
reg |= RTL818X_CMD_TX_ENABLE;
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
- priv->mode = NL80211_IFTYPE_MONITOR;
return 0;
err_free_rings:
@@ -633,8 +630,6 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
u8 reg;
int i;
- priv->mode = NL80211_IFTYPE_UNSPECIFIED;
-
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -657,38 +652,39 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
}
static int rtl8180_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
- if (priv->mode != NL80211_IFTYPE_MONITOR)
- return -EOPNOTSUPP;
+ /*
+ * We only support one active interface at a time.
+ */
+ if (priv->vif)
+ return -EBUSY;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
break;
default:
return -EOPNOTSUPP;
}
- priv->vif = conf->vif;
+ priv->vif = vif;
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
- le32_to_cpu(*(__le32 *)conf->mac_addr));
+ le32_to_cpu(*(__le32 *)vif->addr));
rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4],
- le16_to_cpu(*(__le16 *)(conf->mac_addr + 4)));
+ le16_to_cpu(*(__le16 *)(vif->addr + 4)));
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
return 0;
}
static void rtl8180_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
- priv->mode = NL80211_IFTYPE_MONITOR;
priv->vif = NULL;
}
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 6af0f3f71f3a..6bb32112e65c 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -92,7 +92,7 @@ struct rtl8187_priv {
struct rtl818x_csr *map;
const struct rtl818x_rf_ops *rf;
struct ieee80211_vif *vif;
- int mode;
+
/* The mutex protects the TX loopback state.
* Any attempt to set channels concurrently locks the device.
*/
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index bc5726dd5fe4..f336c63053c1 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1018,31 +1018,30 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
}
static int rtl8187_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8187_priv *priv = dev->priv;
int i;
int ret = -EOPNOTSUPP;
mutex_lock(&priv->conf_mutex);
- if (priv->mode != NL80211_IFTYPE_MONITOR)
+ if (priv->vif)
goto exit;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
break;
default:
goto exit;
}
ret = 0;
- priv->vif = conf->vif;
+ priv->vif = vif;
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
for (i = 0; i < ETH_ALEN; i++)
rtl818x_iowrite8(priv, &priv->map->MAC[i],
- ((u8 *)conf->mac_addr)[i]);
+ ((u8 *)vif->addr)[i]);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
exit:
@@ -1051,11 +1050,10 @@ exit:
}
static void rtl8187_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8187_priv *priv = dev->priv;
mutex_lock(&priv->conf_mutex);
- priv->mode = NL80211_IFTYPE_MONITOR;
priv->vif = NULL;
mutex_unlock(&priv->conf_mutex);
}
@@ -1365,7 +1363,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- priv->mode = NL80211_IFTYPE_MONITOR;
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_RX_INCLUDES_FCS;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c
index ded44c045eb2..f82aa8b4bdde 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c
@@ -33,7 +33,7 @@ static void led_turn_on(struct work_struct *work)
struct rtl8187_led *led = &priv->led_tx;
/* Don't change the LED, when the device is down. */
- if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
return ;
/* Skip if the LED is not registered. */
@@ -71,7 +71,7 @@ static void led_turn_off(struct work_struct *work)
struct rtl8187_led *led = &priv->led_tx;
/* Don't change the LED, when the device is down. */
- if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
return ;
/* Skip if the LED is not registered. */
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 054533f7a124..6301578d1565 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -247,6 +247,7 @@ struct wl1251_debugfs {
struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data;
struct dentry *tx_queue_len;
+ struct dentry *tx_queue_status;
struct dentry *retry_count;
struct dentry *excessive_retries;
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c
index acfa086dbfc5..beff084040b5 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.c
@@ -976,3 +976,72 @@ out:
kfree(acx);
return ret;
}
+
+int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifs, u16 txop)
+{
+ struct wl1251_acx_ac_cfg *acx;
+ int ret = 0;
+
+ wl1251_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
+ "aifs %d txop %d", ac, cw_min, cw_max, aifs, txop);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->ac = ac;
+ acx->cw_min = cw_min;
+ acx->cw_max = cw_max;
+ acx->aifsn = aifs;
+ acx->txop_limit = txop;
+
+ ret = wl1251_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1251_warning("acx ac cfg failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
+ enum wl1251_acx_channel_type type,
+ u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
+ enum wl1251_acx_ack_policy ack_policy)
+{
+ struct wl1251_acx_tid_cfg *acx;
+ int ret = 0;
+
+ wl1251_debug(DEBUG_ACX, "acx tid cfg %d type %d tsid %d "
+ "ps_scheme %d ack_policy %d", queue, type, tsid,
+ ps_scheme, ack_policy);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->queue = queue;
+ acx->type = type;
+ acx->tsid = tsid;
+ acx->ps_scheme = ps_scheme;
+ acx->ack_policy = ack_policy;
+
+ ret = wl1251_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1251_warning("acx tid cfg failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 652371432cd8..26160c45784c 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -1166,6 +1166,87 @@ struct wl1251_acx_wr_tbtt_and_dtim {
u8 padding;
} __attribute__ ((packed));
+struct wl1251_acx_ac_cfg {
+ struct acx_header header;
+
+ /*
+ * Access Category - The TX queue's access category
+ * (refer to AccessCategory_enum)
+ */
+ u8 ac;
+
+ /*
+ * The contention window minimum size (in slots) for
+ * the access class.
+ */
+ u8 cw_min;
+
+ /*
+ * The contention window maximum size (in slots) for
+ * the access class.
+ */
+ u16 cw_max;
+
+ /* The AIF value (in slots) for the access class. */
+ u8 aifsn;
+
+ u8 reserved;
+
+ /* The TX Op Limit (in microseconds) for the access class. */
+ u16 txop_limit;
+} __attribute__ ((packed));
+
+
+enum wl1251_acx_channel_type {
+ CHANNEL_TYPE_DCF = 0,
+ CHANNEL_TYPE_EDCF = 1,
+ CHANNEL_TYPE_HCCA = 2,
+};
+
+enum wl1251_acx_ps_scheme {
+ /* regular ps: simple sending of packets */
+ WL1251_ACX_PS_SCHEME_LEGACY = 0,
+
+ /* sending a packet triggers a unscheduled apsd downstream */
+ WL1251_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
+
+ /* a pspoll packet will be sent before every data packet */
+ WL1251_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
+
+ /* scheduled apsd mode */
+ WL1251_ACX_PS_SCHEME_SAPSD = 3,
+};
+
+enum wl1251_acx_ack_policy {
+ WL1251_ACX_ACK_POLICY_LEGACY = 0,
+ WL1251_ACX_ACK_POLICY_NO_ACK = 1,
+ WL1251_ACX_ACK_POLICY_BLOCK = 2,
+};
+
+struct wl1251_acx_tid_cfg {
+ struct acx_header header;
+
+ /* tx queue id number (0-7) */
+ u8 queue;
+
+ /* channel access type for the queue, enum wl1251_acx_channel_type */
+ u8 type;
+
+ /* EDCA: ac index (0-3), HCCA: traffic stream id (8-15) */
+ u8 tsid;
+
+ /* ps scheme of the specified queue, enum wl1251_acx_ps_scheme */
+ u8 ps_scheme;
+
+ /* the tx queue ack policy, enum wl1251_acx_ack_policy */
+ u8 ack_policy;
+
+ u8 padding[3];
+
+ /* not supported */
+ u32 apsdconf[2];
+} __attribute__ ((packed));
+
/*************************************************************************
Host Interrupt Register (WiLink -> Host)
@@ -1322,5 +1403,11 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
int wl1251_acx_rate_policies(struct wl1251 *wl);
int wl1251_acx_mem_cfg(struct wl1251 *wl);
int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
+int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifs, u16 txop);
+int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
+ enum wl1251_acx_channel_type type,
+ u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
+ enum wl1251_acx_ack_policy ack_policy);
#endif /* __WL1251_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
index 770f260726bd..0320b478bb3f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -410,3 +410,86 @@ out:
kfree(cmd);
return ret;
}
+
+int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
+ struct ieee80211_channel *channels[],
+ unsigned int n_channels, unsigned int n_probes)
+{
+ struct wl1251_cmd_scan *cmd;
+ int i, ret = 0;
+
+ wl1251_debug(DEBUG_CMD, "cmd scan");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
+ cmd->params.rx_filter_options = cpu_to_le32(CFG_RX_PRSP_EN |
+ CFG_RX_MGMT_EN |
+ CFG_RX_BCN_EN);
+ cmd->params.scan_options = 0;
+ cmd->params.num_channels = n_channels;
+ cmd->params.num_probe_requests = n_probes;
+ cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
+ cmd->params.tid_trigger = 0;
+
+ for (i = 0; i < n_channels; i++) {
+ cmd->channels[i].min_duration =
+ cpu_to_le32(WL1251_SCAN_MIN_DURATION);
+ cmd->channels[i].max_duration =
+ cpu_to_le32(WL1251_SCAN_MAX_DURATION);
+ memset(&cmd->channels[i].bssid_lsb, 0xff, 4);
+ memset(&cmd->channels[i].bssid_msb, 0xff, 2);
+ cmd->channels[i].early_termination = 0;
+ cmd->channels[i].tx_power_att = 0;
+ cmd->channels[i].channel = channels[i]->hw_value;
+ }
+
+ cmd->params.ssid_len = ssid_len;
+ if (ssid)
+ memcpy(cmd->params.ssid, ssid, ssid_len);
+
+ ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd scan failed: %d", ret);
+ goto out;
+ }
+
+ wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
+
+ if (cmd->header.status != CMD_STATUS_SUCCESS) {
+ wl1251_error("cmd scan status wasn't success: %d",
+ cmd->header.status);
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
+
+int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
+{
+ struct wl1251_cmd_trigger_scan_to *cmd;
+ int ret;
+
+ wl1251_debug(DEBUG_CMD, "cmd trigger scan to");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->timeout = timeout;
+
+ ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd trigger scan to failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index dff798ad0ef5..4ad67cae94d2 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -27,6 +27,8 @@
#include "wl1251.h"
+#include <net/cfg80211.h>
+
struct acx_header;
int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len);
@@ -43,6 +45,10 @@ int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
size_t len);
int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
void *buf, size_t buf_len);
+int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
+ struct ieee80211_channel *channels[],
+ unsigned int n_channels, unsigned int n_probes);
+int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout);
/* unit ms */
#define WL1251_COMMAND_TIMEOUT 2000
@@ -163,8 +169,12 @@ struct cmd_read_write_memory {
#define CMDMBOX_HEADER_LEN 4
#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
+#define WL1251_SCAN_MIN_DURATION 30000
+#define WL1251_SCAN_MAX_DURATION 60000
+
+#define WL1251_SCAN_NUM_PROBES 3
-struct basic_scan_parameters {
+struct wl1251_scan_parameters {
u32 rx_config_options;
u32 rx_filter_options;
@@ -189,11 +199,11 @@ struct basic_scan_parameters {
u8 tid_trigger;
u8 ssid_len;
- u32 ssid[8];
+ u8 ssid[32];
} __attribute__ ((packed));
-struct basic_scan_channel_parameters {
+struct wl1251_scan_ch_parameters {
u32 min_duration; /* in TU */
u32 max_duration; /* in TU */
u32 bssid_lsb;
@@ -213,11 +223,11 @@ struct basic_scan_channel_parameters {
/* SCAN parameters */
#define SCAN_MAX_NUM_OF_CHANNELS 16
-struct cmd_scan {
+struct wl1251_cmd_scan {
struct wl1251_cmd_header header;
- struct basic_scan_parameters params;
- struct basic_scan_channel_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
+ struct wl1251_scan_parameters params;
+ struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
} __attribute__ ((packed));
enum {
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
index a00723059f83..0ccba57fb9fb 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
@@ -237,6 +237,27 @@ static const struct file_operations tx_queue_len_ops = {
.open = wl1251_open_file_generic,
};
+static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1251 *wl = file->private_data;
+ char buf[3], status;
+ int len;
+
+ if (wl->tx_queue_stopped)
+ status = 's';
+ else
+ status = 'r';
+
+ len = scnprintf(buf, sizeof(buf), "%c\n", status);
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations tx_queue_status_ops = {
+ .read = tx_queue_status_read,
+ .open = wl1251_open_file_generic,
+};
+
static void wl1251_debugfs_delete_files(struct wl1251 *wl)
{
DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -331,6 +352,7 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl)
DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data);
DEBUGFS_DEL(tx_queue_len);
+ DEBUGFS_DEL(tx_queue_status);
DEBUGFS_DEL(retry_count);
DEBUGFS_DEL(excessive_retries);
}
@@ -431,6 +453,7 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl)
DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir);
+ DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir);
DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c
index 5cb573383eeb..5aad56ea7153 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl12xx/wl1251_init.c
@@ -294,6 +294,11 @@ static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl)
goto out;
}
+ wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE);
+ wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK);
+ wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI);
+ wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO);
+
out:
kfree(config);
return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.h b/drivers/net/wireless/wl12xx/wl1251_init.h
index b3b25ec885ea..269cefb3e7d4 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.h
+++ b/drivers/net/wireless/wl12xx/wl1251_init.h
@@ -26,6 +26,53 @@
#include "wl1251.h"
+enum {
+ /* best effort/legacy */
+ AC_BE = 0,
+
+ /* background */
+ AC_BK = 1,
+
+ /* video */
+ AC_VI = 2,
+
+ /* voice */
+ AC_VO = 3,
+
+ /* broadcast dummy access category */
+ AC_BCAST = 4,
+
+ NUM_ACCESS_CATEGORIES = 4
+};
+
+/* following are defult values for the IE fields*/
+#define CWMIN_BK 15
+#define CWMIN_BE 15
+#define CWMIN_VI 7
+#define CWMIN_VO 3
+#define CWMAX_BK 1023
+#define CWMAX_BE 63
+#define CWMAX_VI 15
+#define CWMAX_VO 7
+
+/* slot number setting to start transmission at PIFS interval */
+#define AIFS_PIFS 1
+
+/*
+ * slot number setting to start transmission at DIFS interval - normal DCF
+ * access
+ */
+#define AIFS_DIFS 2
+
+#define AIFSN_BK 7
+#define AIFSN_BE 3
+#define AIFSN_VI AIFS_PIFS
+#define AIFSN_VO AIFS_PIFS
+#define TXOP_BK 0
+#define TXOP_BE 0
+#define TXOP_VI 3008
+#define TXOP_VO 1504
+
int wl1251_hw_init_hwenc_config(struct wl1251 *wl);
int wl1251_hw_init_templates_config(struct wl1251 *wl);
int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 2f50a256efa5..595f0f94d16e 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -395,6 +395,7 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* the queue here, otherwise the queue will get too long.
*/
if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) {
+ wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
ieee80211_stop_queues(wl->hw);
/*
@@ -510,13 +511,13 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
}
static int wl1251_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
int ret = 0;
wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- conf->type, conf->mac_addr);
+ vif->type, vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
@@ -524,9 +525,9 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- wl->vif = conf->vif;
+ wl->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
break;
@@ -538,8 +539,8 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- if (memcmp(wl->mac_addr, conf->mac_addr, ETH_ALEN)) {
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = wl1251_acx_station_id(wl);
if (ret < 0)
@@ -552,7 +553,7 @@ out:
}
static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
@@ -562,43 +563,25 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&wl->mutex);
}
-static int wl1251_build_null_data(struct wl1251 *wl)
+static int wl1251_build_qos_null_data(struct wl1251 *wl)
{
- struct wl12xx_null_data_template template;
+ struct ieee80211_qos_hdr template;
- if (!is_zero_ether_addr(wl->bssid)) {
- memcpy(template.header.da, wl->bssid, ETH_ALEN);
- memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
- } else {
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- }
-
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
-
- return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template,
- sizeof(template));
-
-}
-
-static int wl1251_build_ps_poll(struct wl1251 *wl, u16 aid)
-{
- struct wl12xx_ps_poll_template template;
+ memset(&template, 0, sizeof(template));
- memcpy(template.bssid, wl->bssid, ETH_ALEN);
- memcpy(template.ta, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr1, wl->bssid, ETH_ALEN);
+ memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr3, wl->bssid, ETH_ALEN);
- /* aid in PS-Poll has its two MSBs each set to 1 */
- template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
+ template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_TODS);
- template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
+ /* FIXME: not sure what priority to use here */
+ template.qos_ctrl = cpu_to_le16(0);
- return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template,
+ return wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, &template,
sizeof(template));
-
}
static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
@@ -640,20 +623,25 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
* through the bss_info_changed() hook.
*/
ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ if (ret < 0)
+ goto out_sleep;
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm disabled");
wl->psm_requested = false;
- if (wl->psm)
+ if (wl->psm) {
ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
+ if (ret < 0)
+ goto out_sleep;
+ }
}
if (conf->power_level != wl->power_level) {
ret = wl1251_acx_tx_power(wl, conf->power_level);
if (ret < 0)
- goto out;
+ goto out_sleep;
wl->power_level = conf->power_level;
}
@@ -864,199 +852,61 @@ out:
return ret;
}
-static int wl1251_build_basic_rates(char *rates)
-{
- u8 index = 0;
-
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
-
- return index;
-}
-
-static int wl1251_build_extended_rates(char *rates)
+static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
+ struct cfg80211_scan_request *req)
{
- u8 index = 0;
-
- rates[index++] = IEEE80211_OFDM_RATE_6MB;
- rates[index++] = IEEE80211_OFDM_RATE_9MB;
- rates[index++] = IEEE80211_OFDM_RATE_12MB;
- rates[index++] = IEEE80211_OFDM_RATE_18MB;
- rates[index++] = IEEE80211_OFDM_RATE_24MB;
- rates[index++] = IEEE80211_OFDM_RATE_36MB;
- rates[index++] = IEEE80211_OFDM_RATE_48MB;
- rates[index++] = IEEE80211_OFDM_RATE_54MB;
-
- return index;
-}
-
+ struct wl1251 *wl = hw->priv;
+ struct sk_buff *skb;
+ size_t ssid_len = 0;
+ u8 *ssid = NULL;
+ int ret;
-static int wl1251_build_probe_req(struct wl1251 *wl, u8 *ssid, size_t ssid_len)
-{
- struct wl12xx_probe_req_template template;
- struct wl12xx_ie_rates *rates;
- char *ptr;
- u16 size;
-
- ptr = (char *)&template;
- size = sizeof(struct ieee80211_header);
-
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-
- /* IEs */
- /* SSID */
- template.ssid.header.id = WLAN_EID_SSID;
- template.ssid.header.len = ssid_len;
- if (ssid_len && ssid)
- memcpy(template.ssid.ssid, ssid, ssid_len);
- size += sizeof(struct wl12xx_ie_header) + ssid_len;
- ptr += size;
-
- /* Basic Rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_SUPP_RATES;
- rates->header.len = wl1251_build_basic_rates(rates->rates);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
- ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- /* Extended rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_EXT_SUPP_RATES;
- rates->header.len = wl1251_build_extended_rates(rates->rates);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- wl1251_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
-
- return wl1251_cmd_template_set(wl, CMD_PROBE_REQ, &template,
- size);
-}
+ wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
-static int wl1251_hw_scan(struct wl1251 *wl, u8 *ssid, size_t len,
- u8 active_scan, u8 high_prio, u8 num_channels,
- u8 probe_requests)
-{
- struct wl1251_cmd_trigger_scan_to *trigger = NULL;
- struct cmd_scan *params = NULL;
- int i, ret;
- u16 scan_options = 0;
-
- if (wl->scanning)
- return -EINVAL;
-
- params = kzalloc(sizeof(*params), GFP_KERNEL);
- if (!params)
- return -ENOMEM;
-
- params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
- params->params.rx_filter_options =
- cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
-
- /* High priority scan */
- if (!active_scan)
- scan_options |= SCAN_PASSIVE;
- if (high_prio)
- scan_options |= SCAN_PRIORITY_HIGH;
- params->params.scan_options = scan_options;
-
- params->params.num_channels = num_channels;
- params->params.num_probe_requests = probe_requests;
- params->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
- params->params.tid_trigger = 0;
-
- for (i = 0; i < num_channels; i++) {
- params->channels[i].min_duration = cpu_to_le32(30000);
- params->channels[i].max_duration = cpu_to_le32(60000);
- memset(&params->channels[i].bssid_lsb, 0xff, 4);
- memset(&params->channels[i].bssid_msb, 0xff, 2);
- params->channels[i].early_termination = 0;
- params->channels[i].tx_power_att = 0;
- params->channels[i].channel = i + 1;
- memset(params->channels[i].pad, 0, 3);
+ if (req->n_ssids) {
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
}
- for (i = num_channels; i < SCAN_MAX_NUM_OF_CHANNELS; i++)
- memset(&params->channels[i], 0,
- sizeof(struct basic_scan_channel_parameters));
-
- if (len && ssid) {
- params->params.ssid_len = len;
- memcpy(params->params.ssid, ssid, len);
- } else {
- params->params.ssid_len = 0;
- memset(params->params.ssid, 0, 32);
- }
+ mutex_lock(&wl->mutex);
- ret = wl1251_build_probe_req(wl, ssid, len);
- if (ret < 0) {
- wl1251_error("PROBE request template failed");
+ if (wl->scanning) {
+ wl1251_debug(DEBUG_SCAN, "scan already in progress");
+ ret = -EINVAL;
goto out;
}
- trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
- if (!trigger)
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
goto out;
- trigger->timeout = 0;
-
- ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
- sizeof(*trigger));
- if (ret < 0) {
- wl1251_error("trigger scan to failed for hw scan");
+ skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ req->ie, req->ie_len);
+ if (!skb) {
+ ret = -ENOMEM;
goto out;
}
- wl1251_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
-
- wl->scanning = true;
+ ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
+ skb->len);
+ dev_kfree_skb(skb);
+ if (ret < 0)
+ goto out_sleep;
- ret = wl1251_cmd_send(wl, CMD_SCAN, params, sizeof(*params));
+ ret = wl1251_cmd_trigger_scan_to(wl, 0);
if (ret < 0)
- wl1251_error("SCAN failed");
+ goto out_sleep;
- wl1251_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params));
+ wl->scanning = true;
- if (params->header.status != CMD_STATUS_SUCCESS) {
- wl1251_error("TEST command answer error: %d",
- params->header.status);
+ ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
+ req->n_channels, WL1251_SCAN_NUM_PROBES);
+ if (ret < 0) {
wl->scanning = false;
- ret = -EIO;
- goto out;
- }
-
-out:
- kfree(params);
- return ret;
-
-}
-
-static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
- struct cfg80211_scan_request *req)
-{
- struct wl1251 *wl = hw->priv;
- int ret;
- u8 *ssid = NULL;
- size_t ssid_len = 0;
-
- wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
-
- if (req->n_ssids) {
- ssid = req->ssids[0].ssid;
- ssid_len = req->ssids[0].ssid_len;
+ goto out_sleep;
}
- mutex_lock(&wl->mutex);
-
- ret = wl1251_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1251_hw_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3);
-
+out_sleep:
wl1251_ps_elp_sleep(wl);
out:
@@ -1095,7 +945,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
{
enum wl1251_cmd_ps_mode mode;
struct wl1251 *wl = hw->priv;
- struct sk_buff *beacon;
+ struct sk_buff *beacon, *skb;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1109,7 +959,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
- ret = wl1251_build_null_data(wl);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out_sleep;
+
+ ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
+ skb->data, skb->len);
+ dev_kfree_skb(skb);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1251_build_qos_null_data(wl);
if (ret < 0)
goto out;
@@ -1130,7 +990,14 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
wl->dtim_period);
wl->aid = bss_conf->aid;
- ret = wl1251_build_ps_poll(wl, wl->aid);
+ skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out_sleep;
+
+ ret = wl1251_cmd_template_set(wl, CMD_PS_POLL,
+ skb->data,
+ skb->len);
+ dev_kfree_skb(skb);
if (ret < 0)
goto out_sleep;
@@ -1176,7 +1043,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE);
if (ret < 0) {
wl1251_warning("Set ctsprotect failed %d", ret);
- goto out;
+ goto out_sleep;
}
}
@@ -1187,7 +1054,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0) {
dev_kfree_skb(beacon);
- goto out;
+ goto out_sleep;
}
ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data,
@@ -1196,13 +1063,13 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
dev_kfree_skb(beacon);
if (ret < 0)
- goto out;
+ goto out_sleep;
ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
wl->channel, wl->dtim_period);
if (ret < 0)
- goto out;
+ goto out_sleep;
}
out_sleep:
@@ -1273,6 +1140,48 @@ static struct ieee80211_channel wl1251_channels[] = {
{ .hw_value = 13, .center_freq = 2472},
};
+static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ enum wl1251_acx_ps_scheme ps_scheme;
+ struct wl1251 *wl = hw->priv;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ wl1251_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
+
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue),
+ params->cw_min, params->cw_max,
+ params->aifs, params->txop);
+ if (ret < 0)
+ goto out_sleep;
+
+ if (params->uapsd)
+ ps_scheme = WL1251_ACX_PS_SCHEME_UPSD_TRIGGER;
+ else
+ ps_scheme = WL1251_ACX_PS_SCHEME_LEGACY;
+
+ ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue),
+ CHANNEL_TYPE_EDCF,
+ wl1251_tx_get_queue(queue), ps_scheme,
+ WL1251_ACX_ACK_POLICY_LEGACY);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1251_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_supported_band wl1251_band_2ghz = {
.channels = wl1251_channels,
@@ -1293,6 +1202,7 @@ static const struct ieee80211_ops wl1251_ops = {
.hw_scan = wl1251_op_hw_scan,
.bss_info_changed = wl1251_op_bss_info_changed,
.set_rts_threshold = wl1251_op_set_rts_threshold,
+ .conf_tx = wl1251_op_conf_tx,
};
static int wl1251_register_hw(struct wl1251 *wl)
@@ -1332,12 +1242,15 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_BEACON_FILTER;
+ IEEE80211_HW_BEACON_FILTER |
+ IEEE80211_HW_SUPPORTS_UAPSD;
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
+ wl->hw->queues = 4;
+
ret = wl1251_register_hw(wl);
if (ret)
goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 9931b197ff77..851dfb65e474 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -26,7 +26,8 @@
#include "wl1251_cmd.h"
#include "wl1251_io.h"
-#define WL1251_WAKEUP_TIMEOUT 2000
+/* in ms */
+#define WL1251_WAKEUP_TIMEOUT 100
void wl1251_elp_work(struct work_struct *work)
{
@@ -67,7 +68,7 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
int wl1251_ps_elp_wakeup(struct wl1251 *wl)
{
- unsigned long timeout;
+ unsigned long timeout, start;
u32 elp_reg;
if (!wl->elp)
@@ -75,6 +76,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
wl1251_debug(DEBUG_PSM, "waking up chip from elp");
+ start = jiffies;
timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
@@ -95,8 +97,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
}
wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
- jiffies_to_msecs(jiffies) -
- (jiffies_to_msecs(timeout) - WL1251_WAKEUP_TIMEOUT));
+ jiffies_to_msecs(jiffies - start));
wl->elp = false;
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index f84cc89cbffc..b56732226cc0 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -126,7 +126,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
if (wl->rx_current_buffer)
rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;
- skb = dev_alloc_skb(length);
+ skb = __dev_alloc_skb(length, GFP_KERNEL);
if (!skb) {
wl1251_error("Couldn't allocate RX frame");
return;
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c
index f85970615849..c8223185efd2 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.c
@@ -167,8 +167,7 @@ static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
tx_hdr->expiry_time = cpu_to_le32(1 << 16);
tx_hdr->id = id;
- /* FIXME: how to get the correct queue id? */
- tx_hdr->xmit_queue = 0;
+ tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
wl1251_tx_control(tx_hdr, control, fc);
wl1251_tx_frag_block_num(tx_hdr);
@@ -220,6 +219,7 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
/* align the buffer on a 4-byte boundary */
skb_reserve(skb, offset);
memmove(skb->data, src, skb->len);
+ tx_hdr = (struct tx_double_buffer_desc *) skb->data;
} else {
wl1251_info("No handler, fixme!");
return -EINVAL;
@@ -237,8 +237,9 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
wl1251_mem_write(wl, addr, skb->data, len);
- wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x",
- tx_hdr->id, skb, tx_hdr->length, tx_hdr->rate);
+ wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
+ "queue %d", tx_hdr->id, skb, tx_hdr->length,
+ tx_hdr->rate, tx_hdr->xmit_queue);
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index 7c1c1665c810..55856c6bb97a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -26,6 +26,7 @@
#define __WL1251_TX_H__
#include <linux/bitops.h>
+#include "wl1251_acx.h"
/*
*
@@ -209,6 +210,22 @@ struct tx_result {
u8 done_2;
} __attribute__ ((packed));
+static inline int wl1251_tx_get_queue(int queue)
+{
+ switch (queue) {
+ case 0:
+ return QOS_AC_VO;
+ case 1:
+ return QOS_AC_VI;
+ case 2:
+ return QOS_AC_BE;
+ case 3:
+ return QOS_AC_BK;
+ default:
+ return QOS_AC_BE;
+ }
+}
+
void wl1251_tx_work(struct work_struct *work);
void wl1251_tx_complete(struct wl1251 *wl);
void wl1251_tx_flush(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 94359b1a861f..d0938db043b3 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -107,10 +107,9 @@ enum {
CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
-#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
-
#define WL1271_FW_NAME "wl1271-fw.bin"
#define WL1271_NVS_NAME "wl1271-nvs.bin"
+#define WL1271_NVS_LEN 468
/*
* Enable/disable 802.11a support for WL1273
@@ -276,6 +275,7 @@ struct wl1271_debugfs {
struct dentry *retry_count;
struct dentry *excessive_retries;
+ struct dentry *gpio_power;
};
#define NUM_TX_QUEUES 4
@@ -322,6 +322,17 @@ struct wl1271 {
enum wl1271_state state;
struct mutex mutex;
+#define WL1271_FLAG_STA_RATES_CHANGED (0)
+#define WL1271_FLAG_STA_ASSOCIATED (1)
+#define WL1271_FLAG_JOINED (2)
+#define WL1271_FLAG_GPIO_POWER (3)
+#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
+#define WL1271_FLAG_SCANNING (5)
+#define WL1271_FLAG_IN_ELP (6)
+#define WL1271_FLAG_PSM (7)
+#define WL1271_FLAG_PSM_REQUESTED (8)
+ unsigned long flags;
+
struct wl1271_partition_set part;
struct wl1271_chip chip;
@@ -359,7 +370,6 @@ struct wl1271 {
/* Frames scheduled for transmission, not handled yet */
struct sk_buff_head tx_queue;
- bool tx_queue_stopped;
struct work_struct tx_work;
@@ -387,14 +397,15 @@ struct wl1271 {
u32 mbox_ptr[2];
/* Are we currently scanning */
- bool scanning;
struct wl1271_scan scan;
/* Our association ID */
u16 aid;
/* currently configured rate set */
+ u32 sta_rate_set;
u32 basic_rate_set;
+ u32 rate_set;
/* The current band */
enum ieee80211_band band;
@@ -405,18 +416,9 @@ struct wl1271 {
unsigned int rx_config;
unsigned int rx_filter;
- /* is firmware in elp mode */
- bool elp;
-
struct completion *elp_compl;
struct delayed_work elp_work;
- /* we can be in psm, but not in elp, we have to differentiate */
- bool psm;
-
- /* PSM mode requested */
- bool psm_requested;
-
/* retry counter for PSM entries */
u8 psm_entry_retry;
@@ -435,9 +437,6 @@ struct wl1271 {
struct ieee80211_vif *vif;
- /* Used for a workaround to send disconnect before rejoining */
- bool joined;
-
/* Current chipset configuration */
struct conf_drv_settings conf;
@@ -455,7 +454,9 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_TX_QUEUE_MAX_LENGTH 20
-/* WL1271 needs a 200ms sleep after power on */
+/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
+ on in case is has been shut down shortly before */
+#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
static inline bool wl1271_11a_enabled(void)
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 5cc89bbdac7a..0b3434843476 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -390,6 +390,35 @@ out:
return ret;
}
+int wl1271_acx_dco_itrim_params(struct wl1271 *wl)
+{
+ struct acx_dco_itrim_params *dco;
+ struct conf_itrim_settings *c = &wl->conf.itrim;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx dco itrim parameters");
+
+ dco = kzalloc(sizeof(*dco), GFP_KERNEL);
+ if (!dco) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dco->enable = c->enable;
+ dco->timeout = cpu_to_le32(c->timeout);
+
+ ret = wl1271_cmd_configure(wl, ACX_SET_DCO_ITRIM_PARAMS,
+ dco, sizeof(*dco));
+ if (ret < 0) {
+ wl1271_warning("failed to set dco itrim parameters: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(dco);
+ return ret;
+}
+
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
{
struct acx_beacon_filter_option *beacon_filter = NULL;
@@ -758,10 +787,11 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
+int wl1271_acx_rate_policies(struct wl1271 *wl)
{
struct acx_rate_policy *acx;
struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
+ int idx = 0;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -773,12 +803,21 @@ int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
goto out;
}
- /* configure one default (one-size-fits-all) rate class */
- acx->rate_class_cnt = cpu_to_le32(1);
- acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates);
- acx->rate_class[0].short_retry_limit = c->short_retry_limit;
- acx->rate_class[0].long_retry_limit = c->long_retry_limit;
- acx->rate_class[0].aflags = c->aflags;
+ /* configure one basic rate class */
+ idx = ACX_TX_BASIC_RATE;
+ acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set);
+ acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
+ acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
+ acx->rate_class[idx].aflags = c->aflags;
+
+ /* configure one AP supported rate class */
+ idx = ACX_TX_AP_FULL_RATE;
+ acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->rate_set);
+ acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
+ acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
+ acx->rate_class[idx].aflags = c->aflags;
+
+ acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
@@ -1012,59 +1051,6 @@ out:
return ret;
}
-int wl1271_acx_smart_reflex(struct wl1271 *wl)
-{
- struct acx_smart_reflex_state *sr_state = NULL;
- struct acx_smart_reflex_config_params *sr_param = NULL;
- int i, ret;
-
- wl1271_debug(DEBUG_ACX, "acx smart reflex");
-
- sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
- if (!sr_param) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
- struct conf_mart_reflex_err_table *e =
- &(wl->conf.init.sr_err_tbl[i]);
-
- sr_param->error_table[i].len = e->len;
- sr_param->error_table[i].upper_limit = e->upper_limit;
- memcpy(sr_param->error_table[i].values, e->values, e->len);
- }
-
- ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
- sr_param, sizeof(*sr_param));
- if (ret < 0) {
- wl1271_warning("failed to set smart reflex params: %d", ret);
- goto out;
- }
-
- sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
- if (!sr_state) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* enable smart reflex */
- sr_state->enable = wl->conf.init.sr_enable;
-
- ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
- sr_state, sizeof(*sr_state));
- if (ret < 0) {
- wl1271_warning("failed to set smart reflex params: %d", ret);
- goto out;
- }
-
-out:
- kfree(sr_state);
- kfree(sr_param);
- return ret;
-
-}
-
int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
{
struct wl1271_acx_bet_enable *acx = NULL;
@@ -1132,3 +1118,31 @@ out:
kfree(acx);
return ret;
}
+
+int wl1271_acx_pm_config(struct wl1271 *wl)
+{
+ struct wl1271_acx_pm_config *acx = NULL;
+ struct conf_pm_config_settings *c = &wl->conf.pm_config;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx pm config");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time);
+ acx->host_fast_wakeup_support = c->host_fast_wakeup_support;
+
+ ret = wl1271_cmd_configure(wl, ACX_PM_CONFIG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx pm config failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 2ce0a8128542..1bb63af64f0e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -415,23 +415,12 @@ struct acx_bt_wlan_coex {
u8 pad[3];
} __attribute__ ((packed));
-struct acx_smart_reflex_state {
+struct acx_dco_itrim_params {
struct acx_header header;
u8 enable;
u8 padding[3];
-} __attribute__ ((packed));
-
-struct smart_reflex_err_table {
- u8 len;
- s8 upper_limit;
- s8 values[14];
-} __attribute__ ((packed));
-
-struct acx_smart_reflex_config_params {
- struct acx_header header;
-
- struct smart_reflex_err_table error_table[3];
+ __le32 timeout;
} __attribute__ ((packed));
#define PTA_ANTENNA_TYPE_DEF (0)
@@ -837,6 +826,9 @@ struct acx_rate_class {
u8 reserved;
};
+#define ACX_TX_BASIC_RATE 0
+#define ACX_TX_AP_FULL_RATE 1
+#define ACX_TX_RATE_POLICY_CNT 2
struct acx_rate_policy {
struct acx_header header;
@@ -877,8 +869,8 @@ struct acx_tx_config_options {
__le16 tx_compl_threshold; /* number of packets */
} __attribute__ ((packed));
-#define ACX_RX_MEM_BLOCKS 64
-#define ACX_TX_MIN_MEM_BLOCKS 64
+#define ACX_RX_MEM_BLOCKS 70
+#define ACX_TX_MIN_MEM_BLOCKS 40
#define ACX_TX_DESCRIPTORS 32
#define ACX_NUM_SSID_PROFILES 1
@@ -969,6 +961,13 @@ struct wl1271_acx_arp_filter {
used. */
} __attribute__((packed));
+struct wl1271_acx_pm_config {
+ struct acx_header header;
+
+ __le32 host_clk_settling_time;
+ u8 host_fast_wakeup_support;
+ u8 padding[3];
+} __attribute__ ((packed));
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
@@ -1027,13 +1026,13 @@ enum {
ACX_HT_BSS_OPERATION = 0x0058,
ACX_COEX_ACTIVITY = 0x0059,
ACX_SET_SMART_REFLEX_DEBUG = 0x005A,
- ACX_SET_SMART_REFLEX_STATE = 0x005B,
- ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
+ ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
DOT11_RX_MSDU_LIFE_TIME = 0x1004,
DOT11_CUR_TX_PWR = 0x100D,
DOT11_RX_DOT11_MODE = 0x1012,
DOT11_RTS_THRESHOLD = 0x1013,
DOT11_GROUP_ADDRESS_TBL = 0x1014,
+ ACX_PM_CONFIG = 0x1016,
MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
@@ -1056,6 +1055,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
void *mc_list, u32 mc_list_len);
int wl1271_acx_service_period_timeout(struct wl1271 *wl);
int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
+int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
int wl1271_acx_conn_monit_params(struct wl1271 *wl);
@@ -1069,7 +1069,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
int wl1271_acx_cts_protect(struct wl1271 *wl,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates);
+int wl1271_acx_rate_policies(struct wl1271 *wl);
int wl1271_acx_ac_cfg(struct wl1271 *wl);
int wl1271_acx_tid_cfg(struct wl1271 *wl);
int wl1271_acx_frag_threshold(struct wl1271 *wl);
@@ -1081,5 +1081,6 @@ int wl1271_acx_smart_reflex(struct wl1271 *wl);
int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
u8 version);
+int wl1271_acx_pm_config(struct wl1271 *wl);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index b7c96454cca3..e803b876f3f0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -225,9 +225,15 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
if (nvs == NULL)
return -ENODEV;
+ if (wl->nvs_len < WL1271_NVS_LEN)
+ return -EINVAL;
+
nvs_ptr = nvs;
- nvs_len = wl->nvs_len;
+ /* only the first part of the NVS needs to be uploaded */
+ nvs_len = WL1271_NVS_LEN;
+
+ /* FIXME: read init settings from the remaining part of the NVS */
/* Update the device MAC address into the nvs */
nvs[11] = wl->mac_addr[0];
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index c3385b3d246c..a74259bb596b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -209,6 +209,26 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
gen_parms->settings = g->settings;
+ gen_parms->sr_state = g->sr_state;
+
+ memcpy(gen_parms->srf1,
+ g->srf1,
+ CONF_MAX_SMART_REFLEX_PARAMS);
+ memcpy(gen_parms->srf2,
+ g->srf2,
+ CONF_MAX_SMART_REFLEX_PARAMS);
+ memcpy(gen_parms->srf3,
+ g->srf3,
+ CONF_MAX_SMART_REFLEX_PARAMS);
+ memcpy(gen_parms->sr_debug_table,
+ g->sr_debug_table,
+ CONF_MAX_SMART_REFLEX_PARAMS);
+
+ gen_parms->sr_sen_n_p = g->sr_sen_n_p;
+ gen_parms->sr_sen_n_p_gain = g->sr_sen_n_p_gain;
+ gen_parms->sr_sen_nrn = g->sr_sen_nrn;
+ gen_parms->sr_sen_prn = g->sr_sen_prn;
+
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
if (ret < 0)
wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
@@ -253,6 +273,8 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
CONF_NUMBER_OF_RATE_GROUPS);
memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
CONF_NUMBER_OF_RATE_GROUPS);
+ memcpy(radio_parms->tx_rate_limits_extreme, r->tx_rate_limits_extreme,
+ CONF_NUMBER_OF_RATE_GROUPS);
memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
CONF_NUMBER_OF_CHANNELS_2_4);
@@ -263,6 +285,11 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
+ radio_parms->degraded_low_to_normal_threshold =
+ r->degraded_low_to_normal_threshold;
+ radio_parms->degraded_normal_to_high_threshold =
+ r->degraded_normal_to_high_threshold;
+
for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
radio_parms->tx_ref_pd_voltage_5[i] =
@@ -275,6 +302,8 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
memcpy(radio_parms->tx_rate_limits_degraded_5,
r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
+ memcpy(radio_parms->tx_rate_limits_extreme_5,
+ r->tx_rate_limits_extreme_5, CONF_NUMBER_OF_RATE_GROUPS);
memcpy(radio_parms->tx_channel_limits_ofdm_5,
r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
@@ -283,6 +312,10 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
CONF_NUMBER_OF_RATE_GROUPS);
memcpy(radio_parms->rx_fem_insertion_loss_5,
r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
+ radio_parms->degraded_low_to_normal_threshold_5 =
+ r->degraded_low_to_normal_threshold_5;
+ radio_parms->degraded_normal_to_high_threshold_5 =
+ r->degraded_normal_to_high_threshold_5;
wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
radio_parms, sizeof(*radio_parms));
@@ -311,19 +344,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
do_cal = false;
}
- /* FIXME: This is a workaround, because with the current stack, we
- * cannot know when we have disassociated. So, if we have already
- * joined, we disconnect before joining again. */
- if (wl->joined) {
- ret = wl1271_cmd_disconnect(wl);
- if (ret < 0) {
- wl1271_error("failed to disconnect before rejoining");
- goto out;
- }
-
- wl->joined = false;
- }
-
join = kzalloc(sizeof(*join), GFP_KERNEL);
if (!join) {
ret = -ENOMEM;
@@ -388,8 +408,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
goto out_free;
}
- wl->joined = true;
-
/*
* ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
* simplify locking we just sleep instead, for now
@@ -487,7 +505,7 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
return 0;
}
-int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
+int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
{
struct cmd_enabledisable_path *cmd;
int ret;
@@ -501,7 +519,8 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
goto out;
}
- cmd->channel = channel;
+ /* the channel here is only used for calibration, so hardcoded to 1 */
+ cmd->channel = 1;
if (enable) {
cmd_rx = CMD_ENABLE_RX;
@@ -514,22 +533,22 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("rx %s cmd for channel %d failed",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
goto out;
}
wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("tx %s cmd for channel %d failed",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
return ret;
}
wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
out:
kfree(cmd);
@@ -636,7 +655,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
channels = wl->hw->wiphy->bands[ieee_band]->channels;
n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
- if (wl->scanning)
+ if (test_bit(WL1271_FLAG_SCANNING, &wl->flags))
return -EINVAL;
params = kzalloc(sizeof(*params), GFP_KERNEL);
@@ -711,7 +730,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
- wl->scanning = true;
+ set_bit(WL1271_FLAG_SCANNING, &wl->flags);
if (wl1271_11a_enabled()) {
wl->scan.state = band;
if (band == WL1271_SCAN_BAND_DUAL) {
@@ -729,7 +748,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
if (ret < 0) {
wl1271_error("SCAN failed");
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
goto out;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index b4fa4acb9229..09fe91297acf 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -37,7 +37,7 @@ int wl1271_cmd_join(struct wl1271 *wl);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
-int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable);
+int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
@@ -437,6 +437,21 @@ struct wl1271_general_parms_cmd {
u8 tx_bip_fem_autodetect;
u8 tx_bip_fem_manufacturer;
u8 settings;
+
+ u8 sr_state;
+
+ s8 srf1[CONF_MAX_SMART_REFLEX_PARAMS];
+ s8 srf2[CONF_MAX_SMART_REFLEX_PARAMS];
+ s8 srf3[CONF_MAX_SMART_REFLEX_PARAMS];
+
+ s8 sr_debug_table[CONF_MAX_SMART_REFLEX_PARAMS];
+
+ u8 sr_sen_n_p;
+ u8 sr_sen_n_p_gain;
+ u8 sr_sen_nrn;
+ u8 sr_sen_prn;
+
+ u8 padding[3];
} __attribute__ ((packed));
struct wl1271_radio_parms_cmd {
@@ -458,11 +473,12 @@ struct wl1271_radio_parms_cmd {
/* Dynamic radio parameters */
/* 2.4GHz */
__le16 tx_ref_pd_voltage;
- s8 tx_ref_power;
+ u8 tx_ref_power;
s8 tx_offset_db;
s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
+ s8 tx_rate_limits_extreme[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
@@ -471,15 +487,19 @@ struct wl1271_radio_parms_cmd {
u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
u8 rx_fem_insertion_loss;
- u8 padding2;
+ u8 degraded_low_to_normal_threshold;
+ u8 degraded_normal_to_high_threshold;
+
+ u8 padding1; /* our own padding, not in ref driver */
/* 5GHz */
__le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
+ u8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
+ s8 tx_rate_limits_extreme_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
@@ -488,7 +508,10 @@ struct wl1271_radio_parms_cmd {
s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
- u8 padding3[2];
+ u8 degraded_low_to_normal_threshold_5;
+ u8 degraded_normal_to_high_threshold_5;
+
+ u8 padding2[2];
} __attribute__ ((packed));
struct wl1271_cmd_cal_channel_tune {
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 565373ede265..1993d63c214e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -258,7 +258,8 @@ struct conf_rx_settings {
#define CONF_TX_MAX_RATE_CLASSES 8
#define CONF_TX_RATE_MASK_UNSPECIFIED 0
-#define CONF_TX_RATE_MASK_ALL 0x1eff
+#define CONF_TX_RATE_MASK_BASIC (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS)
#define CONF_TX_RATE_RETRY_LIMIT 10
struct conf_tx_rate_class {
@@ -722,31 +723,6 @@ struct conf_conn_settings {
u8 psm_entry_retries;
};
-#define CONF_SR_ERR_TBL_MAX_VALUES 14
-
-struct conf_mart_reflex_err_table {
- /*
- * Length of the error table values table.
- *
- * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
- */
- u8 len;
-
- /*
- * Smart Reflex error table upper limit.
- *
- * Range: s8
- */
- s8 upper_limit;
-
- /*
- * Smart Reflex error table values.
- *
- * Range: s8
- */
- s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
-};
-
enum {
CONF_REF_CLK_19_2_E,
CONF_REF_CLK_26_E,
@@ -759,6 +735,9 @@ enum single_dual_band_enum {
CONF_DUAL_BAND
};
+
+#define CONF_MAX_SMART_REFLEX_PARAMS 16
+
struct conf_general_parms {
/*
* RF Reference Clock type / speed
@@ -815,6 +794,20 @@ struct conf_general_parms {
* Range: Unknown
*/
u8 settings;
+
+ /* Smart reflex settings */
+ u8 sr_state;
+
+ s8 srf1[CONF_MAX_SMART_REFLEX_PARAMS];
+ s8 srf2[CONF_MAX_SMART_REFLEX_PARAMS];
+ s8 srf3[CONF_MAX_SMART_REFLEX_PARAMS];
+
+ s8 sr_debug_table[CONF_MAX_SMART_REFLEX_PARAMS];
+
+ u8 sr_sen_n_p;
+ u8 sr_sen_n_p_gain;
+ u8 sr_sen_nrn;
+ u8 sr_sen_prn;
};
#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
@@ -847,12 +840,13 @@ struct conf_radio_parms {
*
* Range: unknown
*/
- s16 tx_ref_pd_voltage;
- s8 tx_ref_power;
+ u16 tx_ref_pd_voltage;
+ u8 tx_ref_power;
s8 tx_offset_db;
s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
+ s8 tx_rate_limits_extreme[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
@@ -861,17 +855,22 @@ struct conf_radio_parms {
u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
u8 rx_fem_insertion_loss;
+ u8 degraded_low_to_normal_threshold;
+ u8 degraded_normal_to_high_threshold;
+
+
/*
* Dynamic radio parameters for 5GHz
*
* Range: unknown
*/
- s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
+ u16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
+ u8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
+ s8 tx_rate_limits_extreme_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
@@ -879,33 +878,46 @@ struct conf_radio_parms {
/* FIXME: this is inconsistent with the types for 2.4GHz */
s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
-};
-#define CONF_SR_ERR_TBL_COUNT 3
+ u8 degraded_low_to_normal_threshold_5;
+ u8 degraded_normal_to_high_threshold_5;
+};
struct conf_init_settings {
/*
- * Configure Smart Reflex error table values.
+ * Configure general parameters.
*/
- struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT];
+ struct conf_general_parms genparam;
/*
- * Smart Reflex enable flag.
- *
- * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled
+ * Configure radio parameters.
*/
- u8 sr_enable;
+ struct conf_radio_parms radioparam;
+};
+
+struct conf_itrim_settings {
+ /* enable dco itrim */
+ u8 enable;
+
+ /* moderation timeout in microsecs from the last TX */
+ u32 timeout;
+};
+
+struct conf_pm_config_settings {
/*
- * Configure general parameters.
+ * Host clock settling time
+ *
+ * Range: 0 - 30000 us
*/
- struct conf_general_parms genparam;
+ u32 host_clk_settling_time;
/*
- * Configure radio parameters.
+ * Host fast wakeup support
+ *
+ * Range: true, false
*/
- struct conf_radio_parms radioparam;
-
+ bool host_fast_wakeup_support;
};
struct conf_drv_settings {
@@ -914,6 +926,8 @@ struct conf_drv_settings {
struct conf_tx_settings tx;
struct conf_conn_settings conn;
struct conf_init_settings init;
+ struct conf_itrim_settings itrim;
+ struct conf_pm_config_settings pm_config;
};
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index c1805e5f8964..8d7588ca68fd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -237,6 +237,64 @@ static const struct file_operations tx_queue_len_ops = {
.open = wl1271_open_file_generic,
};
+static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+
+ int res;
+ char buf[10];
+
+ res = scnprintf(buf, sizeof(buf), "%d\n", state);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+}
+
+static ssize_t gpio_power_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ char buf[10];
+ size_t len;
+ unsigned long value;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ buf[len] = '\0';
+
+ ret = strict_strtoul(buf, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in gpio_power");
+ goto out;
+ }
+
+ if (value) {
+ wl->set_power(true);
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ } else {
+ wl->set_power(false);
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ }
+
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations gpio_power_ops = {
+ .read = gpio_power_read,
+ .write = gpio_power_write,
+ .open = wl1271_open_file_generic
+};
+
static void wl1271_debugfs_delete_files(struct wl1271 *wl)
{
DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -333,6 +391,8 @@ static void wl1271_debugfs_delete_files(struct wl1271 *wl)
DEBUGFS_DEL(tx_queue_len);
DEBUGFS_DEL(retry_count);
DEBUGFS_DEL(excessive_retries);
+
+ DEBUGFS_DEL(gpio_power);
}
static int wl1271_debugfs_add_files(struct wl1271 *wl)
@@ -434,6 +494,8 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
+ DEBUGFS_ADD(gpio_power, wl->debugfs.rootdir);
+
out:
if (ret < 0)
wl1271_debugfs_delete_files(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index d13fdd99c85c..0a145afc9905 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -35,7 +35,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
- if (wl->scanning) {
+ if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
NULL, size);
@@ -43,7 +43,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
* to the wl1271_cmd_scan function that we are not
* scanning as it checks that.
*/
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
wl->scan.active,
wl->scan.high_prio,
@@ -62,7 +62,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, false);
mutex_lock(&wl->mutex);
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
}
}
return 0;
@@ -78,7 +78,7 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
switch (mbox->ps_status) {
case EVENT_ENTER_POWER_SAVE_FAIL:
- if (!wl->psm) {
+ if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
wl->psm_entry_retry = 0;
break;
}
@@ -89,7 +89,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
} else {
wl1271_error("PSM entry failed, giving up.\n");
wl->psm_entry_retry = 0;
- *beacon_loss = true;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
@@ -136,7 +135,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* filtering) is enabled. Without PSM, the stack will receive all
* beacons and can detect beacon loss by itself.
*/
- if (vector & BSS_LOSE_EVENT_ID && wl->psm) {
+ if (vector & BSS_LOSE_EVENT_ID &&
+ test_bit(WL1271_FLAG_PSM, &wl->flags)) {
wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
/* indicate to the stack, that beacons have been lost */
@@ -150,7 +150,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
return ret;
}
- if (beacon_loss) {
+ if (wl->vif && beacon_loss) {
/* Obviously, it's dangerous to release the mutex while
we are holding many of the variables in the wl struct.
That's why it's done last in the function, and care must
@@ -184,7 +184,7 @@ void wl1271_event_mbox_config(struct wl1271 *wl)
wl->mbox_ptr[0], wl->mbox_ptr[1]);
}
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
{
struct event_mailbox mbox;
int ret;
@@ -204,9 +204,7 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
return ret;
/* then we let the firmware know it can go on...*/
- if (do_ack)
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
- INTR_TRIG_EVENT_ACK);
+ wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 4e3f55ebb1a8..278f9206aa56 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -112,6 +112,6 @@ struct event_mailbox {
int wl1271_event_unmask(struct wl1271 *wl);
void wl1271_event_mbox_config(struct wl1271 *wl);
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack);
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 11249b436cf1..c9848eecb767 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -229,6 +229,10 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
+ ret = wl1271_acx_dco_itrim_params(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
/* Initialize connection monitoring thresholds */
ret = wl1271_acx_conn_monit_params(wl);
if (ret < 0)
@@ -280,12 +284,12 @@ int wl1271_hw_init(struct wl1271 *wl)
goto out_free_memmap;
/* Configure TX rate classes */
- ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL);
+ ret = wl1271_acx_rate_policies(wl);
if (ret < 0)
goto out_free_memmap;
/* Enable data path */
- ret = wl1271_cmd_data_path(wl, wl->channel, 1);
+ ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
@@ -299,8 +303,8 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Configure smart reflex */
- ret = wl1271_acx_smart_reflex(wl);
+ /* configure PM */
+ ret = wl1271_acx_pm_config(wl);
if (ret < 0)
goto out_free_memmap;
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index b62c00ff42fe..e4867b895c43 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -47,6 +47,8 @@
#include "wl1271_cmd.h"
#include "wl1271_boot.h"
+#define WL1271_BOOT_RETRIES 3
+
static struct conf_drv_settings default_conf = {
.sg = {
.per_threshold = 7500,
@@ -67,16 +69,17 @@ static struct conf_drv_settings default_conf = {
.ps_poll_timeout = 15,
.upsd_timeout = 15,
.rts_threshold = 2347,
- .rx_cca_threshold = 0xFFEF,
- .irq_blk_threshold = 0,
- .irq_pkt_threshold = USHORT_MAX,
- .irq_timeout = 5,
+ .rx_cca_threshold = 0,
+ .irq_blk_threshold = 0xFFFF,
+ .irq_pkt_threshold = 0,
+ .irq_timeout = 600,
.queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
},
.tx = {
.tx_energy_detection = 0,
.rc_conf = {
- .enabled_rates = CONF_TX_RATE_MASK_UNSPECIFIED,
+ .enabled_rates = CONF_HW_BIT_RATE_1MBPS |
+ CONF_HW_BIT_RATE_2MBPS,
.short_retry_limit = 10,
.long_retry_limit = 10,
.aflags = 0
@@ -172,8 +175,8 @@ static struct conf_drv_settings default_conf = {
}
},
.frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
- .tx_compl_timeout = 5,
- .tx_compl_threshold = 5
+ .tx_compl_timeout = 700,
+ .tx_compl_threshold = 4
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -186,12 +189,12 @@ static struct conf_drv_settings default_conf = {
.rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
}
},
- .synch_fail_thold = 5,
+ .synch_fail_thold = 10,
.bss_lose_timeout = 100,
.beacon_rx_timeout = 10000,
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
- .ps_poll_threshold = 4,
+ .ps_poll_threshold = 20,
.sig_trigger_count = 2,
.sig_trigger = {
[0] = {
@@ -226,46 +229,35 @@ static struct conf_drv_settings default_conf = {
.psm_entry_retries = 3
},
.init = {
- .sr_err_tbl = {
- [0] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
- 0x00 }
- },
- [1] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
- 0x00 }
- },
- [2] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
- 0x00 }
- }
- },
- .sr_enable = 1,
.genparam = {
.ref_clk = CONF_REF_CLK_38_4_E,
.settling_time = 5,
.clk_valid_on_wakeup = 0,
.dc2dcmode = 0,
.single_dual_band = CONF_SINGLE_BAND,
- .tx_bip_fem_autodetect = 0,
+ .tx_bip_fem_autodetect = 1,
.tx_bip_fem_manufacturer = 1,
.settings = 1,
+ .sr_state = 1,
+ .srf1 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
+ 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
+ .srf2 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
+ 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
+ .srf3 = { 0x07, 0x03, 0x18, 0x10, 0x05, 0xfb, 0xf0,
+ 0xe8, 0, 0, 0, 0, 0, 0, 0, 0 },
+ .sr_debug_table = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 },
+ .sr_sen_n_p = 0,
+ .sr_sen_n_p_gain = 0,
+ .sr_sen_nrn = 0,
+ .sr_sen_prn = 0,
},
.radioparam = {
- .rx_trace_loss = 10,
- .tx_trace_loss = 10,
+ .rx_trace_loss = 0x24,
+ .tx_trace_loss = 0x0,
.rx_rssi_and_proc_compens = {
0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
- 0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8,
+ 0xfc, 0x00, 0x80, 0x10, 0xf0, 0xf8,
0x00, 0x0a, 0x14 },
.rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
.tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
@@ -273,13 +265,15 @@ static struct conf_drv_settings default_conf = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00 },
- .tx_ref_pd_voltage = 0x24e,
- .tx_ref_power = 0x78,
+ .tx_ref_pd_voltage = 0x1a9,
+ .tx_ref_power = 0x80,
.tx_offset_db = 0x0,
.tx_rate_limits_normal = {
- 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 },
+ 0x1d, 0x1f, 0x24, 0x28, 0x28, 0x29 },
.tx_rate_limits_degraded = {
- 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
+ 0x19, 0x1f, 0x22, 0x23, 0x27, 0x28 },
+ .tx_rate_limits_extreme = {
+ 0x19, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
.tx_channel_limits_11b = {
0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
@@ -289,10 +283,12 @@ static struct conf_drv_settings default_conf = {
0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
0x20, 0x50 },
.tx_pdv_rate_offsets = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ 0x07, 0x08, 0x04, 0x02, 0x02, 0x00 },
.tx_ibias = {
- 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 },
- .rx_fem_insertion_loss = 0x14,
+ 0x11, 0x11, 0x15, 0x11, 0x15, 0x0f },
+ .rx_fem_insertion_loss = 0x0e,
+ .degraded_low_to_normal_threshold = 0x1e,
+ .degraded_normal_to_high_threshold = 0x2d,
.tx_ref_pd_voltage_5 = {
0x0190, 0x01a4, 0x01c3, 0x01d8,
0x020a, 0x021c },
@@ -304,6 +300,8 @@ static struct conf_drv_settings default_conf = {
0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
.tx_rate_limits_degraded_5 = {
0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
+ .tx_rate_limits_extreme_5 = {
+ 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
.tx_channel_limits_ofdm_5 = {
0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
@@ -315,8 +313,18 @@ static struct conf_drv_settings default_conf = {
.tx_ibias_5 = {
0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
.rx_fem_insertion_loss_5 = {
- 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }
+ 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
+ .degraded_low_to_normal_threshold_5 = 0x00,
+ .degraded_normal_to_high_threshold_5 = 0x00
}
+ },
+ .itrim = {
+ .enable = false,
+ .timeout = 50000,
+ },
+ .pm_config = {
+ .host_clk_settling_time = 5000,
+ .host_fast_wakeup_support = false
}
};
@@ -359,7 +367,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_cmd_data_path(wl, wl->channel, 1);
+ ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
return ret;
@@ -374,11 +382,13 @@ static void wl1271_disable_interrupts(struct wl1271 *wl)
static void wl1271_power_off(struct wl1271 *wl)
{
wl->set_power(false);
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static void wl1271_power_on(struct wl1271 *wl)
{
wl->set_power(true);
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static void wl1271_fw_status(struct wl1271 *wl,
@@ -447,14 +457,13 @@ static void wl1271_irq_work(struct work_struct *work)
intr &= WL1271_INTR_MASK;
if (intr & WL1271_ACX_INTR_EVENT_A) {
- bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
- wl1271_event_handle(wl, 0, do_ack);
+ wl1271_event_handle(wl, 0);
}
if (intr & WL1271_ACX_INTR_EVENT_B) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
- wl1271_event_handle(wl, 1, true);
+ wl1271_event_handle(wl, 1);
}
if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -614,6 +623,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
struct wl1271_partition_set partition;
int ret = 0;
+ msleep(WL1271_PRE_POWER_ON_SLEEP);
wl1271_power_on(wl);
msleep(WL1271_POWER_ON_SLEEP);
wl1271_spi_reset(wl);
@@ -643,7 +653,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
break;
case CHIP_ID_1271_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -651,38 +661,34 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
break;
default:
- wl1271_error("unsupported chip id: 0x%x", wl->chip.id);
+ wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
ret = -ENODEV;
- goto out_power_off;
+ goto out;
}
if (wl->fw == NULL) {
ret = wl1271_fetch_firmware(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
}
/* No NVS from netlink, try to get it from the filesystem */
if (wl->nvs == NULL) {
ret = wl1271_fetch_nvs(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
}
- goto out;
-
-out_power_off:
- wl1271_power_off(wl);
-
out:
return ret;
}
int wl1271_plt_start(struct wl1271 *wl)
{
+ int retries = WL1271_BOOT_RETRIES;
int ret;
mutex_lock(&wl->mutex);
@@ -696,35 +702,48 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
- wl->state = WL1271_STATE_PLT;
-
- ret = wl1271_chip_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1271_boot(wl);
- if (ret < 0)
- goto out_power_off;
-
- wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
+ while (retries) {
+ retries--;
+ ret = wl1271_chip_wakeup(wl);
+ if (ret < 0)
+ goto power_off;
- ret = wl1271_plt_init(wl);
- if (ret < 0)
- goto out_irq_disable;
+ ret = wl1271_boot(wl);
+ if (ret < 0)
+ goto power_off;
- /* Make sure power saving is disabled */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- goto out_irq_disable;
+ ret = wl1271_plt_init(wl);
+ if (ret < 0)
+ goto irq_disable;
- goto out;
+ /* Make sure power saving is disabled */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ goto irq_disable;
-out_irq_disable:
- wl1271_disable_interrupts(wl);
+ wl->state = WL1271_STATE_PLT;
+ wl1271_notice("firmware booted in PLT mode (%s)",
+ wl->chip.fw_ver);
+ goto out;
-out_power_off:
- wl1271_power_off(wl);
+irq_disable:
+ wl1271_disable_interrupts(wl);
+ mutex_unlock(&wl->mutex);
+ /* Unlocking the mutex in the middle of handling is
+ inherently unsafe. In this case we deem it safe to do,
+ because we need to let any possibly pending IRQ out of
+ the system (and while we are WL1271_STATE_OFF the IRQ
+ work function will not do anything.) Also, any other
+ possible concurrent operations will fail due to the
+ current state, hence the wl1271 struct should be safe. */
+ cancel_work_sync(&wl->irq_work);
+ mutex_lock(&wl->mutex);
+power_off:
+ wl1271_power_off(wl);
+ }
+ wl1271_error("firmware boot in PLT mode failed despite %d retries",
+ WL1271_BOOT_RETRIES);
out:
mutex_unlock(&wl->mutex);
@@ -762,7 +781,20 @@ out:
static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = txinfo->control.sta;
+ unsigned long flags;
+ /* peek into the rates configured in the STA entry */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) {
+ wl->sta_rate_set = sta->supp_rates[conf->channel->band];
+ set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
+ }
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ /* queue the packet */
skb_queue_tail(&wl->tx_queue, skb);
/*
@@ -784,7 +816,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* protected. Maybe fix this by removing the stupid
* variable altogether and checking the real queue state?
*/
- wl->tx_queue_stopped = true;
+ set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
}
return NETDEV_TX_OK;
@@ -880,6 +912,7 @@ static struct notifier_block wl1271_dev_notifier = {
static int wl1271_op_start(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
+ int retries = WL1271_BOOT_RETRIES;
int ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 start");
@@ -893,30 +926,42 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
goto out;
}
- ret = wl1271_chip_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1271_boot(wl);
- if (ret < 0)
- goto out_power_off;
-
- ret = wl1271_hw_init(wl);
- if (ret < 0)
- goto out_irq_disable;
-
- wl->state = WL1271_STATE_ON;
+ while (retries) {
+ retries--;
+ ret = wl1271_chip_wakeup(wl);
+ if (ret < 0)
+ goto power_off;
- wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+ ret = wl1271_boot(wl);
+ if (ret < 0)
+ goto power_off;
- goto out;
+ ret = wl1271_hw_init(wl);
+ if (ret < 0)
+ goto irq_disable;
-out_irq_disable:
- wl1271_disable_interrupts(wl);
+ wl->state = WL1271_STATE_ON;
+ wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+ goto out;
-out_power_off:
- wl1271_power_off(wl);
+irq_disable:
+ wl1271_disable_interrupts(wl);
+ mutex_unlock(&wl->mutex);
+ /* Unlocking the mutex in the middle of handling is
+ inherently unsafe. In this case we deem it safe to do,
+ because we need to let any possibly pending IRQ out of
+ the system (and while we are WL1271_STATE_OFF the IRQ
+ work function will not do anything.) Also, any other
+ possible concurrent operations will fail due to the
+ current state, hence the wl1271 struct should be safe. */
+ cancel_work_sync(&wl->irq_work);
+ mutex_lock(&wl->mutex);
+power_off:
+ wl1271_power_off(wl);
+ }
+ wl1271_error("firmware boot failed despite %d retries",
+ WL1271_BOOT_RETRIES);
out:
mutex_unlock(&wl->mutex);
@@ -944,11 +989,10 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
WARN_ON(wl->state != WL1271_STATE_ON);
- if (wl->scanning) {
+ if (test_and_clear_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, true);
mutex_lock(&wl->mutex);
- wl->scanning = false;
}
wl->state = WL1271_STATE_OFF;
@@ -973,10 +1017,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->band = IEEE80211_BAND_2GHZ;
wl->rx_counter = 0;
- wl->elp = false;
- wl->psm = 0;
wl->psm_entry_retry = 0;
- wl->tx_queue_stopped = false;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->tx_blocks_available = 0;
wl->tx_results_count = 0;
@@ -986,7 +1027,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->tx_security_seq_32 = 0;
wl->time_offset = 0;
wl->session_counter = 0;
- wl->joined = false;
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
+ wl->flags = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_blocks_freed[i] = 0;
@@ -996,13 +1039,13 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
}
static int wl1271_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
int ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- conf->type, conf->mac_addr);
+ vif->type, vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
@@ -1010,9 +1053,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- wl->vif = conf->vif;
+ wl->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
break;
@@ -1032,7 +1075,7 @@ out:
}
static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
@@ -1109,6 +1152,51 @@ out:
}
#endif
+static int wl1271_join_channel(struct wl1271 *wl, int channel)
+{
+ int ret = 0;
+ /* we need to use a dummy BSSID for now */
+ static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
+ 0xad, 0xbe, 0xef };
+
+ /* the dummy join is not required for ad-hoc */
+ if (wl->bss_type == BSS_TYPE_IBSS)
+ goto out;
+
+ /* disable mac filter, so we hear everything */
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+
+ wl->channel = channel;
+ memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
+
+ ret = wl1271_cmd_join(wl);
+ if (ret < 0)
+ goto out;
+
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
+
+out:
+ return ret;
+}
+
+static int wl1271_unjoin_channel(struct wl1271 *wl)
+{
+ int ret;
+
+ /* to stop listening to a channel, we disconnect */
+ ret = wl1271_cmd_disconnect(wl);
+ if (ret < 0)
+ goto out;
+
+ clear_bit(WL1271_FLAG_JOINED, &wl->flags);
+ wl->channel = 0;
+ memset(wl->bssid, 0, ETH_ALEN);
+ wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
+
+out:
+ return ret;
+}
+
static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1271 *wl = hw->priv;
@@ -1117,10 +1205,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d",
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
channel,
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
- conf->power_level);
+ conf->power_level,
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
mutex_lock(&wl->mutex);
@@ -1130,34 +1219,44 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (ret < 0)
goto out;
- if (channel != wl->channel) {
- /*
- * We assume that the stack will configure the right channel
- * before associating, so we don't need to send a join
- * command here. We will join the right channel when the
- * BSSID changes
- */
- wl->channel = channel;
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (conf->flags & IEEE80211_CONF_IDLE &&
+ test_bit(WL1271_FLAG_JOINED, &wl->flags))
+ wl1271_unjoin_channel(wl);
+ else if (!(conf->flags & IEEE80211_CONF_IDLE))
+ wl1271_join_channel(wl, channel);
+
+ if (conf->flags & IEEE80211_CONF_IDLE) {
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
+ wl1271_acx_rate_policies(wl);
+ }
}
- if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
- wl1271_info("psm enabled");
+ /* if the channel changes while joined, join again */
+ if (channel != wl->channel && test_bit(WL1271_FLAG_JOINED, &wl->flags))
+ wl1271_join_channel(wl, channel);
- wl->psm_requested = true;
+ if (conf->flags & IEEE80211_CONF_PS &&
+ !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
+ set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
/*
* We enter PSM only if we're already associated.
* If we're not, we'll enter it when joining an SSID,
* through the bss_info_changed() hook.
*/
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ wl1271_info("psm enabled");
+ ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ }
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
- wl->psm_requested) {
+ test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
wl1271_info("psm disabled");
- wl->psm_requested = false;
+ clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
- if (wl->psm)
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags))
ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE);
}
@@ -1440,22 +1539,6 @@ out:
return ret;
}
-static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set)
-{
- struct ieee80211_supported_band *band;
- u32 enabled_rates = 0;
- int bit;
-
- band = wl->hw->wiphy->bands[wl->band];
- for (bit = 0; bit < band->n_bitrates; bit++) {
- if (basic_rate_set & 0x1)
- enabled_rates |= band->bitrates[bit].hw_value;
- basic_rate_set >>= 1;
- }
-
- return enabled_rates;
-}
-
static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -1473,9 +1556,68 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if ((changed & BSS_CHANGED_BSSID) &&
+ /*
+ * Now we know the correct bssid, so we send a new join command
+ * and enable the BSSID filter
+ */
+ memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
+ wl->rx_config |= CFG_BSSID_FILTER_EN;
+ memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ ret = wl1271_cmd_build_null_data(wl);
+ if (ret < 0) {
+ wl1271_warning("cmd buld null data failed %d",
+ ret);
+ goto out_sleep;
+ }
+ ret = wl1271_cmd_join(wl);
+ if (ret < 0) {
+ wl1271_warning("cmd join failed %d", ret);
+ goto out_sleep;
+ }
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
+ }
+
+ if (wl->bss_type == BSS_TYPE_IBSS) {
+ /* FIXME: This implements rudimentary ad-hoc support -
+ proper templates are on the wish list and notification
+ on when they change. This patch will update the templates
+ on every call to this function. Also, the firmware will not
+ answer to probe-requests as it does not have the proper
+ SSID set in the JOIN command. The probe-response template
+ is set nevertheless, as the FW will ASSERT without it */
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+
+ if (beacon) {
+ struct ieee80211_hdr *hdr;
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
+ beacon->data,
+ beacon->len);
+
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out_sleep;
+ }
+
+ hdr = (struct ieee80211_hdr *) beacon->data;
+ hdr->frame_control = cpu_to_le16(
+ IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+
+ ret = wl1271_cmd_template_set(wl,
+ CMD_TEMPL_PROBE_RESPONSE,
+ beacon->data,
+ beacon->len);
+ dev_kfree_skb(beacon);
+ if (ret < 0)
+ goto out_sleep;
+ }
+ }
+
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
wl->aid = bss_conf->aid;
+ set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
/*
* with wl1271, we don't need to update the
@@ -1492,7 +1634,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
goto out_sleep;
/* If we want to go in PSM but we're not there yet */
- if (wl->psm_requested && !wl->psm) {
+ if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
+ !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
mode = STATION_POWER_SAVE_MODE;
ret = wl1271_ps_set_mode(wl, mode);
if (ret < 0)
@@ -1500,7 +1643,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
} else {
/* use defaults when not associated */
- wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+ clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
wl->aid = 0;
}
@@ -1535,17 +1678,6 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BASIC_RATES) {
- wl->basic_rate_set = wl1271_enabled_rates_get(
- wl, bss_conf->basic_rates);
-
- ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
- if (ret < 0) {
- wl1271_warning("Set rate policies failed %d", ret);
- goto out_sleep;
- }
- }
-
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -1599,19 +1731,19 @@ static struct ieee80211_rate wl1271_rates[] = {
/* can't be const, mac80211 writes to this */
static struct ieee80211_channel wl1271_channels[] = {
- { .hw_value = 1, .center_freq = 2412},
- { .hw_value = 2, .center_freq = 2417},
- { .hw_value = 3, .center_freq = 2422},
- { .hw_value = 4, .center_freq = 2427},
- { .hw_value = 5, .center_freq = 2432},
- { .hw_value = 6, .center_freq = 2437},
- { .hw_value = 7, .center_freq = 2442},
- { .hw_value = 8, .center_freq = 2447},
- { .hw_value = 9, .center_freq = 2452},
- { .hw_value = 10, .center_freq = 2457},
- { .hw_value = 11, .center_freq = 2462},
- { .hw_value = 12, .center_freq = 2467},
- { .hw_value = 13, .center_freq = 2472},
+ { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
+ { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
+ { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
+ { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
+ { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
+ { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
+ { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
+ { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
+ { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
+ { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
+ { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
+ { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
+ { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
};
/* can't be const, mac80211 writes to this */
@@ -1757,7 +1889,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_SUPPORTS_PS;
- wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
@@ -1818,21 +1951,18 @@ static int __devinit wl1271_probe(struct spi_device *spi)
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
wl->channel = WL1271_DEFAULT_CHANNEL;
- wl->scanning = false;
wl->default_key = 0;
wl->rx_counter = 0;
wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
- wl->elp = false;
- wl->psm = 0;
- wl->psm_requested = false;
wl->psm_entry_retry = 0;
- wl->tx_queue_stopped = false;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+ wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
- wl->joined = false;
+ wl->flags = 0;
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
wl->tx_frames[i] = NULL;
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 507cd91d7eed..e407790f6771 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -39,12 +39,13 @@ void wl1271_elp_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (wl->elp || !wl->psm)
+ if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
+ !test_bit(WL1271_FLAG_PSM, &wl->flags))
goto out;
wl1271_debug(DEBUG_PSM, "chip to elp");
wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
- wl->elp = true;
+ set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
out:
mutex_unlock(&wl->mutex);
@@ -55,7 +56,7 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
- if (wl->psm) {
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
cancel_delayed_work(&wl->elp_work);
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -70,7 +71,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
u32 start_time = jiffies;
bool pending = false;
- if (!wl->elp)
+ if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
return 0;
wl1271_debug(DEBUG_PSM, "waking up chip from elp");
@@ -101,7 +102,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
}
}
- wl->elp = false;
+ clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
jiffies_to_msecs(jiffies - start_time));
@@ -143,7 +144,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
if (ret < 0)
return ret;
- wl->psm = 1;
+ set_bit(WL1271_FLAG_PSM, &wl->flags);
break;
case STATION_ACTIVE_MODE:
default:
@@ -166,7 +167,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
if (ret < 0)
return ret;
- wl->psm = 0;
+ clear_bit(WL1271_FLAG_PSM, &wl->flags);
break;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 02978a16e732..ee9564aa6ecc 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -397,8 +397,7 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
/* poll for data ready */
do {
val = wl1271_spi_read32(wl, OCP_DATA_READ);
- timeout--;
- } while (!(val & OCP_READY_MASK) && timeout);
+ } while (!(val & OCP_READY_MASK) && --timeout);
if (!timeout) {
wl1271_warning("Top register access timed out.");
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 00af065c77c2..a288cc317d7b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -121,6 +121,11 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
pad = pad - skb->len;
tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
+ /* if the packets are destined for AP (have a STA entry) send them
+ with AP rate policies, otherwise use default basic rates */
+ if (control->control.sta)
+ tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
+
desc->tx_attr = cpu_to_le16(tx_attr);
wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
@@ -214,18 +219,50 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
return ret;
}
+static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
+{
+ struct ieee80211_supported_band *band;
+ u32 enabled_rates = 0;
+ int bit;
+
+ band = wl->hw->wiphy->bands[wl->band];
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ if (rate_set & 0x1)
+ enabled_rates |= band->bitrates[bit].hw_value;
+ rate_set >>= 1;
+ }
+
+ return enabled_rates;
+}
+
void wl1271_tx_work(struct work_struct *work)
{
struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
struct sk_buff *skb;
bool woken_up = false;
+ u32 sta_rates = 0;
int ret;
+ /* check if the rates supported by the AP have changed */
+ if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
+ &wl->flags))) {
+ unsigned long flags;
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ sta_rates = wl->sta_rate_set;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ }
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ /* if rates have changed, re-configure the rate policy */
+ if (unlikely(sta_rates)) {
+ wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
+ wl1271_acx_rate_policies(wl);
+ }
+
while ((skb = skb_dequeue(&wl->tx_queue))) {
if (!woken_up) {
ret = wl1271_ps_elp_wakeup(wl, false);
@@ -240,18 +277,18 @@ void wl1271_tx_work(struct work_struct *work)
wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
"stop queues");
ieee80211_stop_queues(wl->hw);
- wl->tx_queue_stopped = true;
+ set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
skb_queue_head(&wl->tx_queue, skb);
goto out;
} else if (ret < 0) {
dev_kfree_skb(skb);
goto out;
- } else if (wl->tx_queue_stopped) {
+ } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
+ &wl->flags)) {
/* firmware buffer has space, restart queues */
wl1271_debug(DEBUG_TX,
"complete_packet: waking queues");
ieee80211_wake_queues(wl->hw);
- wl->tx_queue_stopped = false;
}
}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 8ebf5c33955d..a22a19203120 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -374,7 +374,7 @@ static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
* zd_mac_tx_failed - callback for failed frames
* @dev: the mac80211 wireless device
*
- * This function is called if a frame couldn't be successfully be
+ * This function is called if a frame couldn't be successfully
* transferred. The first frame from the tx queue, will be selected and
* reported as error to the upper layers.
*/
@@ -869,7 +869,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
}
static int zd_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
@@ -877,22 +877,22 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
return -EOPNOTSUPP;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
- mac->type = conf->type;
+ mac->type = vif->type;
break;
default:
return -EOPNOTSUPP;
}
- return zd_write_mac_addr(&mac->chip, conf->mac_addr);
+ return zd_write_mac_addr(&mac->chip, vif->addr);
}
static void zd_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
mac->type = NL80211_IFTYPE_UNSPECIFIED;
@@ -987,12 +987,13 @@ static void zd_op_configure_filter(struct ieee80211_hw *hw,
changed_flags &= SUPPORTED_FIF_FLAGS;
*new_flags &= SUPPORTED_FIF_FLAGS;
- /* changed_flags is always populated but this driver
- * doesn't support all FIF flags so its possible we don't
- * need to do anything */
- if (!changed_flags)
- return;
-
+ /*
+ * If multicast parameter (as returned by zd_op_prepare_multicast)
+ * has changed, no bit in changed_flags is set. To handle this
+ * situation, we do not return if changed_flags is 0. If we do so,
+ * we will have some issue with IPv6 which uses multicast for link
+ * layer address resolution.
+ */
if (*new_flags & (FIF_PROMISC_IN_BSS | FIF_ALLMULTI))
zd_mc_add_all(&hash);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index ac19ecd19cfe..4daf1c94ec04 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1078,11 +1078,15 @@ static int eject_installer(struct usb_interface *intf)
int r;
/* Find bulk out endpoint */
- endpoint = &iface_desc->endpoint[1].desc;
- if (usb_endpoint_dir_out(endpoint) &&
- usb_endpoint_xfer_bulk(endpoint)) {
- bulk_out_ep = endpoint->bEndpointAddress;
- } else {
+ for (r = 1; r >= 0; r--) {
+ endpoint = &iface_desc->endpoint[r].desc;
+ if (usb_endpoint_dir_out(endpoint) &&
+ usb_endpoint_xfer_bulk(endpoint)) {
+ bulk_out_ep = endpoint->bEndpointAddress;
+ break;
+ }
+ }
+ if (r == -1) {
dev_err(&udev->dev,
"zd1211rw: Could not find bulk out endpoint\n");
return -ENODEV;
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 8c777ba4e2b3..f7fe1aa03b42 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -925,11 +925,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
/* Set the MAC address in the EmacLite device */
xemaclite_set_mac_address(lp, ndev->dev_addr);
- dev_info(dev,
- "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
- ndev->dev_addr[0], ndev->dev_addr[1],
- ndev->dev_addr[2], ndev->dev_addr[3],
- ndev->dev_addr[4], ndev->dev_addr[5]);
+ dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
ndev->netdev_ops = &xemaclite_netdev_ops;
ndev->flags &= ~IFF_MULTICAST;
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 0f773a9a3ff2..8b231b30fd12 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -237,7 +237,7 @@ static const struct pci_id_info pci_id_tbl[] = {
{ }
};
-static const struct pci_device_id yellowfin_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ }
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 4df48d58eaa6..adb74253a996 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -2,12 +2,14 @@
# Makefile for the PCI bus specific drivers.
#
-obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
+obj-y += access.o bus.o probe.o remove.o pci.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
irq.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
+obj-$(CONFIG_PCI_QUIRKS) += quirks.o
+
obj-$(CONFIG_PCI_LEGACY) += legacy.o
CFLAGS_legacy.o += -Wno-deprecated-declarations
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 83aae4747594..ffe22bc3ac8e 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -1456,7 +1456,7 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
/*
* Check interrupt remapping support in DMAR table description.
*/
-int dmar_ir_support(void)
+int __init dmar_ir_support(void)
{
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *)dmar_tbl;
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 4dd7114964ac..efa9f2de51c1 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -332,8 +332,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
slot->hotplug_slot->info->attention_status = 0;
slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot);
slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot);
- slot->hotplug_slot->info->max_bus_speed = PCI_SPEED_UNKNOWN;
- slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN;
acpiphp_slot->slot = slot;
snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun);
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 9c6a9fd26812..d8ffc7366801 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -310,8 +310,6 @@ struct controller {
u8 first_slot;
u8 add_support;
u8 push_flag;
- enum pci_bus_speed speed;
- enum pci_bus_speed speed_capability;
u8 push_button; /* 0 = no pushbutton, 1 = pushbutton present */
u8 slot_switch_type; /* 0 = no switch, 1 = switch present */
u8 defeature_PHP; /* 0 = PHP not supported, 1 = PHP supported */
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 075b4f4b6e0d..f184d1d2ecbe 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -583,30 +583,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
-{
- struct slot *slot = hotplug_slot->private;
- struct controller *ctrl = slot->ctrl;
-
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
-
- *value = ctrl->speed_capability;
-
- return 0;
-}
-
-static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
-{
- struct slot *slot = hotplug_slot->private;
- struct controller *ctrl = slot->ctrl;
-
- dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
-
- *value = ctrl->speed;
-
- return 0;
-}
-
static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = process_SI,
@@ -616,8 +592,6 @@ static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
.get_attention_status = get_attention_status,
.get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_status,
- .get_max_bus_speed = get_max_bus_speed,
- .get_cur_bus_speed = get_cur_bus_speed,
};
#define SLOT_NAME_SIZE 10
@@ -629,6 +603,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *hotplug_slot_info;
+ struct pci_bus *bus = ctrl->pci_bus;
u8 number_of_slots;
u8 slot_device;
u8 slot_number;
@@ -694,7 +669,7 @@ static int ctrl_slot_setup(struct controller *ctrl,
slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
if (is_slot66mhz(slot))
slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
- if (ctrl->speed == PCI_SPEED_66MHz)
+ if (bus->cur_bus_speed == PCI_SPEED_66MHz)
slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
ctrl_slot =
@@ -844,6 +819,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 rc;
struct controller *ctrl;
struct pci_func *func;
+ struct pci_bus *bus;
int err;
err = pci_enable_device(pdev);
@@ -852,6 +828,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_name(pdev), err);
return err;
}
+ bus = pdev->subordinate;
/* Need to read VID early b/c it's used to differentiate CPQ and INTC
* discovery
@@ -929,22 +906,22 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_read_config_byte(pdev, 0x41, &bus_cap);
if (bus_cap & 0x80) {
dbg("bus max supports 133MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_133MHz_PCIX;
+ bus->max_bus_speed = PCI_SPEED_133MHz_PCIX;
break;
}
if (bus_cap & 0x40) {
dbg("bus max supports 100MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
break;
}
if (bus_cap & 20) {
dbg("bus max supports 66MHz PCI-X\n");
- ctrl->speed_capability = PCI_SPEED_66MHz_PCIX;
+ bus->max_bus_speed = PCI_SPEED_66MHz_PCIX;
break;
}
if (bus_cap & 10) {
dbg("bus max supports 66MHz PCI\n");
- ctrl->speed_capability = PCI_SPEED_66MHz;
+ bus->max_bus_speed = PCI_SPEED_66MHz;
break;
}
@@ -955,7 +932,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case PCI_SUB_HPC_ID:
/* Original 6500/7000 implementation */
ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
+ bus->max_bus_speed = PCI_SPEED_33MHz;
ctrl->push_button = 0;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
@@ -966,7 +943,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* First Pushbutton implementation */
ctrl->push_flag = 1;
ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
+ bus->max_bus_speed = PCI_SPEED_33MHz;
ctrl->push_button = 1;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
@@ -976,7 +953,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case PCI_SUB_HPC_ID_INTC:
/* Third party (6500/7000) */
ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_33MHz;
+ bus->max_bus_speed = PCI_SPEED_33MHz;
ctrl->push_button = 0;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
@@ -987,7 +964,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* First 66 Mhz implementation */
ctrl->push_flag = 1;
ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_66MHz;
+ bus->max_bus_speed = PCI_SPEED_66MHz;
ctrl->push_button = 1;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
@@ -998,7 +975,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* First PCI-X implementation, 100MHz */
ctrl->push_flag = 1;
ctrl->slot_switch_type = 1;
- ctrl->speed_capability = PCI_SPEED_100MHz_PCIX;
+ bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
ctrl->push_button = 1;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
@@ -1015,9 +992,9 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case PCI_VENDOR_ID_INTEL:
/* Check for speed capability (0=33, 1=66) */
if (subsystem_deviceid & 0x0001)
- ctrl->speed_capability = PCI_SPEED_66MHz;
+ bus->max_bus_speed = PCI_SPEED_66MHz;
else
- ctrl->speed_capability = PCI_SPEED_33MHz;
+ bus->max_bus_speed = PCI_SPEED_33MHz;
/* Check for push button */
if (subsystem_deviceid & 0x0002)
@@ -1079,7 +1056,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pdev->bus->number);
dbg("Hotplug controller capabilities:\n");
- dbg(" speed_capability %d\n", ctrl->speed_capability);
+ dbg(" speed_capability %d\n", bus->max_bus_speed);
dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ?
"switch present" : "no switch");
dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ?
@@ -1142,7 +1119,7 @@ static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Check for 66Mhz operation */
- ctrl->speed = get_controller_speed(ctrl);
+ bus->cur_bus_speed = get_controller_speed(ctrl);
/********************************************************
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 0ff689afa757..e43908d9b5df 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1130,12 +1130,13 @@ static int is_bridge(struct pci_func * func)
static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_slot)
{
struct slot *slot;
+ struct pci_bus *bus = ctrl->pci_bus;
u8 reg;
u8 slot_power = readb(ctrl->hpc_reg + SLOT_POWER);
u16 reg16;
u32 leds = readl(ctrl->hpc_reg + LED_CONTROL);
- if (ctrl->speed == adapter_speed)
+ if (bus->cur_bus_speed == adapter_speed)
return 0;
/* We don't allow freq/mode changes if we find another adapter running
@@ -1152,7 +1153,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
* lower speed/mode, we allow the new adapter to function at
* this rate if supported
*/
- if (ctrl->speed < adapter_speed)
+ if (bus->cur_bus_speed < adapter_speed)
return 0;
return 1;
@@ -1161,20 +1162,20 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
/* If the controller doesn't support freq/mode changes and the
* controller is running at a higher mode, we bail
*/
- if ((ctrl->speed > adapter_speed) && (!ctrl->pcix_speed_capability))
+ if ((bus->cur_bus_speed > adapter_speed) && (!ctrl->pcix_speed_capability))
return 1;
/* But we allow the adapter to run at a lower rate if possible */
- if ((ctrl->speed < adapter_speed) && (!ctrl->pcix_speed_capability))
+ if ((bus->cur_bus_speed < adapter_speed) && (!ctrl->pcix_speed_capability))
return 0;
/* We try to set the max speed supported by both the adapter and
* controller
*/
- if (ctrl->speed_capability < adapter_speed) {
- if (ctrl->speed == ctrl->speed_capability)
+ if (bus->max_bus_speed < adapter_speed) {
+ if (bus->cur_bus_speed == bus->max_bus_speed)
return 0;
- adapter_speed = ctrl->speed_capability;
+ adapter_speed = bus->max_bus_speed;
}
writel(0x0L, ctrl->hpc_reg + LED_CONTROL);
@@ -1229,8 +1230,8 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
pci_write_config_byte(ctrl->pci_dev, 0x43, reg);
/* Only if mode change...*/
- if (((ctrl->speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
- ((ctrl->speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
+ if (((bus->cur_bus_speed == PCI_SPEED_66MHz) && (adapter_speed == PCI_SPEED_66MHz_PCIX)) ||
+ ((bus->cur_bus_speed == PCI_SPEED_66MHz_PCIX) && (adapter_speed == PCI_SPEED_66MHz)))
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
@@ -1243,7 +1244,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
set_SOGO(ctrl);
wait_for_ctrl_irq(ctrl);
- ctrl->speed = adapter_speed;
+ bus->cur_bus_speed = adapter_speed;
slot = cpqhp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
info("Successfully changed frequency/mode for adapter in slot %d\n",
@@ -1269,6 +1270,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
*/
static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
{
+ struct pci_bus *bus = ctrl->pci_bus;
u8 hp_slot;
u8 temp_byte;
u8 adapter_speed;
@@ -1309,7 +1311,7 @@ static u32 board_replaced(struct pci_func *func, struct controller *ctrl)
wait_for_ctrl_irq (ctrl);
adapter_speed = get_adapter_speed(ctrl, hp_slot);
- if (ctrl->speed != adapter_speed)
+ if (bus->cur_bus_speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
rc = WRONG_BUS_FREQUENCY;
@@ -1426,6 +1428,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
u32 temp_register = 0xFFFFFFFF;
u32 rc = 0;
struct pci_func *new_slot = NULL;
+ struct pci_bus *bus = ctrl->pci_bus;
struct slot *p_slot;
struct resource_lists res_lists;
@@ -1456,7 +1459,7 @@ static u32 board_added(struct pci_func *func, struct controller *ctrl)
wait_for_ctrl_irq (ctrl);
adapter_speed = get_adapter_speed(ctrl, hp_slot);
- if (ctrl->speed != adapter_speed)
+ if (bus->cur_bus_speed != adapter_speed)
if (set_controller_speed(ctrl, adapter_speed, hp_slot))
rc = WRONG_BUS_FREQUENCY;
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 7485ffda950c..d934dd4fa873 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -395,89 +395,40 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 * value)
return rc;
}
-static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
+static int get_max_bus_speed(struct slot *slot)
{
- int rc = -ENODEV;
- struct slot *pslot;
+ int rc;
u8 mode = 0;
+ enum pci_bus_speed speed;
+ struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
- debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__,
- hotplug_slot, value);
+ debug("%s - Entry slot[%p]\n", __func__, slot);
ibmphp_lock_operations();
-
- if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- rc = 0;
- mode = pslot->supported_bus_mode;
- *value = pslot->supported_speed;
- switch (*value) {
- case BUS_SPEED_33:
- break;
- case BUS_SPEED_66:
- if (mode == BUS_MODE_PCIX)
- *value += 0x01;
- break;
- case BUS_SPEED_100:
- case BUS_SPEED_133:
- *value = pslot->supported_speed + 0x01;
- break;
- default:
- /* Note (will need to change): there would be soon 256, 512 also */
- rc = -ENODEV;
- }
- }
- }
-
+ mode = slot->supported_bus_mode;
+ speed = slot->supported_speed;
ibmphp_unlock_operations();
- debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value);
- return rc;
-}
-static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
-{
- int rc = -ENODEV;
- struct slot *pslot;
- u8 mode = 0;
-
- debug("%s - Entry hotplug_slot[%p] pvalue[%p]\n", __func__,
- hotplug_slot, value);
-
- ibmphp_lock_operations();
-
- if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- rc = get_cur_bus_info(&pslot);
- if (!rc) {
- mode = pslot->bus_on->current_bus_mode;
- *value = pslot->bus_on->current_speed;
- switch (*value) {
- case BUS_SPEED_33:
- break;
- case BUS_SPEED_66:
- if (mode == BUS_MODE_PCIX)
- *value += 0x01;
- else if (mode == BUS_MODE_PCI)
- ;
- else
- *value = PCI_SPEED_UNKNOWN;
- break;
- case BUS_SPEED_100:
- case BUS_SPEED_133:
- *value += 0x01;
- break;
- default:
- /* Note of change: there would also be 256, 512 soon */
- rc = -ENODEV;
- }
- }
- }
+ switch (speed) {
+ case BUS_SPEED_33:
+ break;
+ case BUS_SPEED_66:
+ if (mode == BUS_MODE_PCIX)
+ speed += 0x01;
+ break;
+ case BUS_SPEED_100:
+ case BUS_SPEED_133:
+ speed += 0x01;
+ break;
+ default:
+ /* Note (will need to change): there would be soon 256, 512 also */
+ rc = -ENODEV;
}
- ibmphp_unlock_operations();
- debug("%s - Exit rc[%d] value[%x]\n", __func__, rc, *value);
+ if (!rc)
+ bus->max_bus_speed = speed;
+
+ debug("%s - Exit rc[%d] speed[%x]\n", __func__, rc, speed);
return rc;
}
@@ -572,6 +523,7 @@ static int __init init_ops(void)
if (slot_cur->bus_on->current_speed == 0xFF)
if (get_cur_bus_info(&slot_cur))
return -1;
+ get_max_bus_speed(slot_cur);
if (slot_cur->ctrl->options == 0xFF)
if (get_hpc_options(slot_cur, &slot_cur->ctrl->options))
@@ -655,6 +607,7 @@ static int validate(struct slot *slot_cur, int opn)
int ibmphp_update_slot_info(struct slot *slot_cur)
{
struct hotplug_slot_info *info;
+ struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus;
int rc;
u8 bus_speed;
u8 mode;
@@ -700,8 +653,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
bus_speed = PCI_SPEED_UNKNOWN;
}
- info->cur_bus_speed = bus_speed;
- info->max_bus_speed = slot_cur->hotplug_slot->info->max_bus_speed;
+ bus->cur_bus_speed = bus_speed;
// To do: bus_names
rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info);
@@ -1326,8 +1278,6 @@ struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
.get_attention_status = get_attention_status,
.get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_present,
- .get_max_bus_speed = get_max_bus_speed,
- .get_cur_bus_speed = get_cur_bus_speed,
/* .get_max_adapter_speed = get_max_adapter_speed,
.get_bus_name_status = get_bus_name,
*/
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 38183a534b65..728b119f71ad 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -64,32 +64,6 @@ static int debug;
static LIST_HEAD(pci_hotplug_slot_list);
static DEFINE_MUTEX(pci_hp_mutex);
-/* these strings match up with the values in pci_bus_speed */
-static char *pci_bus_speed_strings[] = {
- "33 MHz PCI", /* 0x00 */
- "66 MHz PCI", /* 0x01 */
- "66 MHz PCI-X", /* 0x02 */
- "100 MHz PCI-X", /* 0x03 */
- "133 MHz PCI-X", /* 0x04 */
- NULL, /* 0x05 */
- NULL, /* 0x06 */
- NULL, /* 0x07 */
- NULL, /* 0x08 */
- "66 MHz PCI-X 266", /* 0x09 */
- "100 MHz PCI-X 266", /* 0x0a */
- "133 MHz PCI-X 266", /* 0x0b */
- NULL, /* 0x0c */
- NULL, /* 0x0d */
- NULL, /* 0x0e */
- NULL, /* 0x0f */
- NULL, /* 0x10 */
- "66 MHz PCI-X 533", /* 0x11 */
- "100 MHz PCI-X 533", /* 0x12 */
- "133 MHz PCI-X 533", /* 0x13 */
- "2.5 GT/s PCIe", /* 0x14 */
- "5.0 GT/s PCIe", /* 0x15 */
-};
-
#ifdef CONFIG_HOTPLUG_PCI_CPCI
extern int cpci_hotplug_init(int debug);
extern void cpci_hotplug_exit(void);
@@ -118,8 +92,6 @@ GET_STATUS(power_status, u8)
GET_STATUS(attention_status, u8)
GET_STATUS(latch_status, u8)
GET_STATUS(adapter_status, u8)
-GET_STATUS(max_bus_speed, enum pci_bus_speed)
-GET_STATUS(cur_bus_speed, enum pci_bus_speed)
static ssize_t power_read_file(struct pci_slot *slot, char *buf)
{
@@ -263,60 +235,6 @@ static struct pci_slot_attribute hotplug_slot_attr_presence = {
.show = presence_read_file,
};
-static char *unknown_speed = "Unknown bus speed";
-
-static ssize_t max_bus_speed_read_file(struct pci_slot *slot, char *buf)
-{
- char *speed_string;
- int retval;
- enum pci_bus_speed value;
-
- retval = get_max_bus_speed(slot->hotplug, &value);
- if (retval)
- goto exit;
-
- if (value == PCI_SPEED_UNKNOWN)
- speed_string = unknown_speed;
- else
- speed_string = pci_bus_speed_strings[value];
-
- retval = sprintf (buf, "%s\n", speed_string);
-
-exit:
- return retval;
-}
-
-static struct pci_slot_attribute hotplug_slot_attr_max_bus_speed = {
- .attr = {.name = "max_bus_speed", .mode = S_IFREG | S_IRUGO},
- .show = max_bus_speed_read_file,
-};
-
-static ssize_t cur_bus_speed_read_file(struct pci_slot *slot, char *buf)
-{
- char *speed_string;
- int retval;
- enum pci_bus_speed value;
-
- retval = get_cur_bus_speed(slot->hotplug, &value);
- if (retval)
- goto exit;
-
- if (value == PCI_SPEED_UNKNOWN)
- speed_string = unknown_speed;
- else
- speed_string = pci_bus_speed_strings[value];
-
- retval = sprintf (buf, "%s\n", speed_string);
-
-exit:
- return retval;
-}
-
-static struct pci_slot_attribute hotplug_slot_attr_cur_bus_speed = {
- .attr = {.name = "cur_bus_speed", .mode = S_IFREG | S_IRUGO},
- .show = cur_bus_speed_read_file,
-};
-
static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
size_t count)
{
@@ -391,26 +309,6 @@ static bool has_adapter_file(struct pci_slot *pci_slot)
return false;
}
-static bool has_max_bus_speed_file(struct pci_slot *pci_slot)
-{
- struct hotplug_slot *slot = pci_slot->hotplug;
- if ((!slot) || (!slot->ops))
- return false;
- if (slot->ops->get_max_bus_speed)
- return true;
- return false;
-}
-
-static bool has_cur_bus_speed_file(struct pci_slot *pci_slot)
-{
- struct hotplug_slot *slot = pci_slot->hotplug;
- if ((!slot) || (!slot->ops))
- return false;
- if (slot->ops->get_cur_bus_speed)
- return true;
- return false;
-}
-
static bool has_test_file(struct pci_slot *pci_slot)
{
struct hotplug_slot *slot = pci_slot->hotplug;
@@ -456,20 +354,6 @@ static int fs_add_slot(struct pci_slot *slot)
goto exit_adapter;
}
- if (has_max_bus_speed_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_max_bus_speed.attr);
- if (retval)
- goto exit_max_speed;
- }
-
- if (has_cur_bus_speed_file(slot)) {
- retval = sysfs_create_file(&slot->kobj,
- &hotplug_slot_attr_cur_bus_speed.attr);
- if (retval)
- goto exit_cur_speed;
- }
-
if (has_test_file(slot)) {
retval = sysfs_create_file(&slot->kobj,
&hotplug_slot_attr_test.attr);
@@ -480,14 +364,6 @@ static int fs_add_slot(struct pci_slot *slot)
goto exit;
exit_test:
- if (has_cur_bus_speed_file(slot))
- sysfs_remove_file(&slot->kobj,
- &hotplug_slot_attr_cur_bus_speed.attr);
-exit_cur_speed:
- if (has_max_bus_speed_file(slot))
- sysfs_remove_file(&slot->kobj,
- &hotplug_slot_attr_max_bus_speed.attr);
-exit_max_speed:
if (has_adapter_file(slot))
sysfs_remove_file(&slot->kobj,
&hotplug_slot_attr_presence.attr);
@@ -523,14 +399,6 @@ static void fs_remove_slot(struct pci_slot *slot)
sysfs_remove_file(&slot->kobj,
&hotplug_slot_attr_presence.attr);
- if (has_max_bus_speed_file(slot))
- sysfs_remove_file(&slot->kobj,
- &hotplug_slot_attr_max_bus_speed.attr);
-
- if (has_cur_bus_speed_file(slot))
- sysfs_remove_file(&slot->kobj,
- &hotplug_slot_attr_cur_bus_speed.attr);
-
if (has_test_file(slot))
sysfs_remove_file(&slot->kobj, &hotplug_slot_attr_test.attr);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 5674b2075bdc..920f820edf87 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -69,8 +69,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
static int get_attention_status (struct hotplug_slot *slot, u8 *value);
static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
-static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
/**
* release_slot - free up the memory used by a slot
@@ -113,8 +111,6 @@ static int init_slot(struct controller *ctrl)
ops->disable_slot = disable_slot;
ops->get_power_status = get_power_status;
ops->get_adapter_status = get_adapter_status;
- ops->get_max_bus_speed = get_max_bus_speed;
- ops->get_cur_bus_speed = get_cur_bus_speed;
if (MRL_SENS(ctrl))
ops->get_latch_status = get_latch_status;
if (ATTN_LED(ctrl)) {
@@ -227,27 +223,6 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return pciehp_get_adapter_status(slot, value);
}
-static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
- enum pci_bus_speed *value)
-{
- struct slot *slot = hotplug_slot->private;
-
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
- return pciehp_get_max_link_speed(slot, value);
-}
-
-static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
-{
- struct slot *slot = hotplug_slot->private;
-
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
- return pciehp_get_cur_link_speed(slot, value);
-}
-
static int pciehp_probe(struct pcie_device *dev)
{
int rc;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 10040d58c8ef..40b48f569b1e 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -492,6 +492,7 @@ int pciehp_power_on_slot(struct slot * slot)
u16 slot_cmd;
u16 cmd_mask;
u16 slot_status;
+ u16 lnk_status;
int retval = 0;
/* Clear sticky power-fault bit from previous power failures */
@@ -523,6 +524,14 @@ int pciehp_power_on_slot(struct slot * slot)
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
+ retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
+ if (retval) {
+ ctrl_err(ctrl, "%s: Cannot read LNKSTA register\n",
+ __func__);
+ return retval;
+ }
+ pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
+
return retval;
}
@@ -610,37 +619,6 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int pciehp_get_max_link_speed(struct slot *slot, enum pci_bus_speed *value)
-{
- struct controller *ctrl = slot->ctrl;
- enum pcie_link_speed lnk_speed;
- u32 lnk_cap;
- int retval = 0;
-
- retval = pciehp_readl(ctrl, PCI_EXP_LNKCAP, &lnk_cap);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__);
- return retval;
- }
-
- switch (lnk_cap & 0x000F) {
- case 1:
- lnk_speed = PCIE_2_5GB;
- break;
- case 2:
- lnk_speed = PCIE_5_0GB;
- break;
- default:
- lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
- break;
- }
-
- *value = lnk_speed;
- ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed);
-
- return retval;
-}
-
int pciehp_get_max_lnk_width(struct slot *slot,
enum pcie_link_width *value)
{
@@ -691,38 +669,6 @@ int pciehp_get_max_lnk_width(struct slot *slot,
return retval;
}
-int pciehp_get_cur_link_speed(struct slot *slot, enum pci_bus_speed *value)
-{
- struct controller *ctrl = slot->ctrl;
- enum pcie_link_speed lnk_speed = PCI_SPEED_UNKNOWN;
- int retval = 0;
- u16 lnk_status;
-
- retval = pciehp_readw(ctrl, PCI_EXP_LNKSTA, &lnk_status);
- if (retval) {
- ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n",
- __func__);
- return retval;
- }
-
- switch (lnk_status & PCI_EXP_LNKSTA_CLS) {
- case 1:
- lnk_speed = PCIE_2_5GB;
- break;
- case 2:
- lnk_speed = PCIE_5_0GB;
- break;
- default:
- lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
- break;
- }
-
- *value = lnk_speed;
- ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed);
-
- return retval;
-}
-
int pciehp_get_cur_lnk_width(struct slot *slot,
enum pcie_link_width *value)
{
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index c159223389ec..dcaae725fd79 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -130,10 +130,9 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 * value)
return 0;
}
-static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
+static enum pci_bus_speed get_max_bus_speed(struct slot *slot)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
-
+ enum pci_bus_speed speed;
switch (slot->type) {
case 1:
case 2:
@@ -141,30 +140,30 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe
case 4:
case 5:
case 6:
- *value = PCI_SPEED_33MHz; /* speed for case 1-6 */
+ speed = PCI_SPEED_33MHz; /* speed for case 1-6 */
break;
case 7:
case 8:
- *value = PCI_SPEED_66MHz;
+ speed = PCI_SPEED_66MHz;
break;
case 11:
case 14:
- *value = PCI_SPEED_66MHz_PCIX;
+ speed = PCI_SPEED_66MHz_PCIX;
break;
case 12:
case 15:
- *value = PCI_SPEED_100MHz_PCIX;
+ speed = PCI_SPEED_100MHz_PCIX;
break;
case 13:
case 16:
- *value = PCI_SPEED_133MHz_PCIX;
+ speed = PCI_SPEED_133MHz_PCIX;
break;
default:
- *value = PCI_SPEED_UNKNOWN;
+ speed = PCI_SPEED_UNKNOWN;
break;
-
}
- return 0;
+
+ return speed;
}
static int get_children_props(struct device_node *dn, const int **drc_indexes,
@@ -408,6 +407,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
slot->state = NOT_VALID;
return -EINVAL;
}
+
+ slot->bus->max_bus_speed = get_max_bus_speed(slot);
return 0;
}
@@ -429,7 +430,6 @@ struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
.get_power_status = get_power_status,
.get_attention_status = get_attention_status,
.get_adapter_status = get_adapter_status,
- .get_max_bus_speed = get_max_bus_speed,
};
module_init(rpaphp_init);
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 8e210cd76e55..d2627e1c3ac1 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -333,8 +333,6 @@ struct hpc_ops {
int (*set_attention_status)(struct slot *slot, u8 status);
int (*get_latch_status)(struct slot *slot, u8 *status);
int (*get_adapter_status)(struct slot *slot, u8 *status);
- int (*get_max_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
- int (*get_cur_bus_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_adapter_speed)(struct slot *slot, enum pci_bus_speed *speed);
int (*get_mode1_ECC_cap)(struct slot *slot, u8 *mode);
int (*get_prog_int)(struct slot *slot, u8 *prog_int);
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 8a520a3d0f59..a5062297f488 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -65,8 +65,6 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
static int get_attention_status (struct hotplug_slot *slot, u8 *value);
static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
-static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
-static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
@@ -76,8 +74,6 @@ static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.get_attention_status = get_attention_status,
.get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_status,
- .get_max_bus_speed = get_max_bus_speed,
- .get_cur_bus_speed = get_cur_bus_speed,
};
/**
@@ -279,37 +275,6 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
- enum pci_bus_speed *value)
-{
- struct slot *slot = get_slot(hotplug_slot);
- int retval;
-
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
- retval = slot->hpc_ops->get_max_bus_speed(slot, value);
- if (retval < 0)
- *value = PCI_SPEED_UNKNOWN;
-
- return 0;
-}
-
-static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
-{
- struct slot *slot = get_slot(hotplug_slot);
- int retval;
-
- ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
- __func__, slot_name(slot));
-
- retval = slot->hpc_ops->get_cur_bus_speed(slot, value);
- if (retval < 0)
- *value = PCI_SPEED_UNKNOWN;
-
- return 0;
-}
-
static int is_shpc_capable(struct pci_dev *dev)
{
if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index b8ab2796e66a..179b1c1cb99b 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -285,17 +285,8 @@ static int board_added(struct slot *p_slot)
return WRONG_BUS_FREQUENCY;
}
- rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp);
- if (rc) {
- ctrl_err(ctrl, "Can't get bus operation speed\n");
- return WRONG_BUS_FREQUENCY;
- }
-
- rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp);
- if (rc) {
- ctrl_err(ctrl, "Can't get max bus operation speed\n");
- msp = bsp;
- }
+ bsp = ctrl->pci_dev->bus->cur_bus_speed;
+ msp = ctrl->pci_dev->bus->max_bus_speed;
/* Check if there are other slots or devices on the same bus */
if (!list_empty(&ctrl->pci_dev->subordinate->devices))
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 86dc39847769..5f5e8d2e3552 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -660,6 +660,75 @@ static int hpc_slot_disable(struct slot * slot)
return retval;
}
+static int shpc_get_cur_bus_speed(struct controller *ctrl)
+{
+ int retval = 0;
+ struct pci_bus *bus = ctrl->pci_dev->subordinate;
+ enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
+ u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG);
+ u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
+ u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7);
+
+ if ((pi == 1) && (speed_mode > 4)) {
+ retval = -ENODEV;
+ goto out;
+ }
+
+ switch (speed_mode) {
+ case 0x0:
+ bus_speed = PCI_SPEED_33MHz;
+ break;
+ case 0x1:
+ bus_speed = PCI_SPEED_66MHz;
+ break;
+ case 0x2:
+ bus_speed = PCI_SPEED_66MHz_PCIX;
+ break;
+ case 0x3:
+ bus_speed = PCI_SPEED_100MHz_PCIX;
+ break;
+ case 0x4:
+ bus_speed = PCI_SPEED_133MHz_PCIX;
+ break;
+ case 0x5:
+ bus_speed = PCI_SPEED_66MHz_PCIX_ECC;
+ break;
+ case 0x6:
+ bus_speed = PCI_SPEED_100MHz_PCIX_ECC;
+ break;
+ case 0x7:
+ bus_speed = PCI_SPEED_133MHz_PCIX_ECC;
+ break;
+ case 0x8:
+ bus_speed = PCI_SPEED_66MHz_PCIX_266;
+ break;
+ case 0x9:
+ bus_speed = PCI_SPEED_100MHz_PCIX_266;
+ break;
+ case 0xa:
+ bus_speed = PCI_SPEED_133MHz_PCIX_266;
+ break;
+ case 0xb:
+ bus_speed = PCI_SPEED_66MHz_PCIX_533;
+ break;
+ case 0xc:
+ bus_speed = PCI_SPEED_100MHz_PCIX_533;
+ break;
+ case 0xd:
+ bus_speed = PCI_SPEED_133MHz_PCIX_533;
+ break;
+ default:
+ retval = -ENODEV;
+ break;
+ }
+
+ out:
+ bus->cur_bus_speed = bus_speed;
+ dbg("Current bus speed = %d\n", bus_speed);
+ return retval;
+}
+
+
static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
{
int retval;
@@ -720,6 +789,8 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value)
retval = shpc_write_cmd(slot, 0, cmd);
if (retval)
ctrl_err(ctrl, "%s: Write command failed!\n", __func__);
+ else
+ shpc_get_cur_bus_speed(ctrl);
return retval;
}
@@ -803,10 +874,10 @@ static irqreturn_t shpc_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
+static int shpc_get_max_bus_speed(struct controller *ctrl)
{
int retval = 0;
- struct controller *ctrl = slot->ctrl;
+ struct pci_bus *bus = ctrl->pci_dev->subordinate;
enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
u32 slot_avail1 = shpc_readl(ctrl, SLOT_AVAIL1);
@@ -842,79 +913,12 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value)
retval = -ENODEV;
}
- *value = bus_speed;
+ bus->max_bus_speed = bus_speed;
ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed);
return retval;
}
-static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value)
-{
- int retval = 0;
- struct controller *ctrl = slot->ctrl;
- enum pci_bus_speed bus_speed = PCI_SPEED_UNKNOWN;
- u16 sec_bus_reg = shpc_readw(ctrl, SEC_BUS_CONFIG);
- u8 pi = shpc_readb(ctrl, PROG_INTERFACE);
- u8 speed_mode = (pi == 2) ? (sec_bus_reg & 0xF) : (sec_bus_reg & 0x7);
-
- if ((pi == 1) && (speed_mode > 4)) {
- *value = PCI_SPEED_UNKNOWN;
- return -ENODEV;
- }
-
- switch (speed_mode) {
- case 0x0:
- *value = PCI_SPEED_33MHz;
- break;
- case 0x1:
- *value = PCI_SPEED_66MHz;
- break;
- case 0x2:
- *value = PCI_SPEED_66MHz_PCIX;
- break;
- case 0x3:
- *value = PCI_SPEED_100MHz_PCIX;
- break;
- case 0x4:
- *value = PCI_SPEED_133MHz_PCIX;
- break;
- case 0x5:
- *value = PCI_SPEED_66MHz_PCIX_ECC;
- break;
- case 0x6:
- *value = PCI_SPEED_100MHz_PCIX_ECC;
- break;
- case 0x7:
- *value = PCI_SPEED_133MHz_PCIX_ECC;
- break;
- case 0x8:
- *value = PCI_SPEED_66MHz_PCIX_266;
- break;
- case 0x9:
- *value = PCI_SPEED_100MHz_PCIX_266;
- break;
- case 0xa:
- *value = PCI_SPEED_133MHz_PCIX_266;
- break;
- case 0xb:
- *value = PCI_SPEED_66MHz_PCIX_533;
- break;
- case 0xc:
- *value = PCI_SPEED_100MHz_PCIX_533;
- break;
- case 0xd:
- *value = PCI_SPEED_133MHz_PCIX_533;
- break;
- default:
- *value = PCI_SPEED_UNKNOWN;
- retval = -ENODEV;
- break;
- }
-
- ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed);
- return retval;
-}
-
static struct hpc_ops shpchp_hpc_ops = {
.power_on_slot = hpc_power_on_slot,
.slot_enable = hpc_slot_enable,
@@ -926,8 +930,6 @@ static struct hpc_ops shpchp_hpc_ops = {
.get_latch_status = hpc_get_latch_status,
.get_adapter_status = hpc_get_adapter_status,
- .get_max_bus_speed = hpc_get_max_bus_speed,
- .get_cur_bus_speed = hpc_get_cur_bus_speed,
.get_adapter_speed = hpc_get_adapter_speed,
.get_mode1_ECC_cap = hpc_get_mode1_ECC_cap,
.get_prog_int = hpc_get_prog_int,
@@ -1086,6 +1088,9 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
}
ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq);
+ shpc_get_max_bus_speed(ctrl);
+ shpc_get_cur_bus_speed(ctrl);
+
/*
* If this is the first controller to be initialized,
* initialize the shpchpd work queue
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c5df94e86678..807224ec8351 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -75,7 +75,8 @@ static ssize_t local_cpus_show(struct device *dev,
int len;
#ifdef CONFIG_NUMA
- mask = cpumask_of_node(dev_to_node(dev));
+ mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
+ cpumask_of_node(dev_to_node(dev));
#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
#endif
@@ -93,7 +94,8 @@ static ssize_t local_cpulist_show(struct device *dev,
int len;
#ifdef CONFIG_NUMA
- mask = cpumask_of_node(dev_to_node(dev));
+ mask = (dev_to_node(dev) == -1) ? cpu_online_mask :
+ cpumask_of_node(dev_to_node(dev));
#else
mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
#endif
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 0906599ebfde..1f9be53c39ba 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -19,7 +19,6 @@
#include <linux/pci-aspm.h>
#include <linux/pm_wakeup.h>
#include <linux/interrupt.h>
-#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include <linux/device.h>
#include <asm/setup.h>
#include "pci.h"
@@ -29,7 +28,23 @@ const char *pci_power_names[] = {
};
EXPORT_SYMBOL_GPL(pci_power_names);
-unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
+int isa_dma_bridge_buggy;
+EXPORT_SYMBOL(isa_dma_bridge_buggy);
+
+int pci_pci_problems;
+EXPORT_SYMBOL(pci_pci_problems);
+
+unsigned int pci_pm_d3_delay;
+
+static void pci_dev_d3_sleep(struct pci_dev *dev)
+{
+ unsigned int delay = dev->d3_delay;
+
+ if (delay < pci_pm_d3_delay)
+ delay = pci_pm_d3_delay;
+
+ msleep(delay);
+}
#ifdef CONFIG_PCI_DOMAINS
int pci_domains_supported = 1;
@@ -522,7 +537,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
/* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(PCI_PM_D2_DELAY);
@@ -1409,6 +1424,7 @@ void pci_pm_init(struct pci_dev *dev)
}
dev->pm_cap = pm;
+ dev->d3_delay = PCI_PM_D3_WAIT;
dev->d1_support = false;
dev->d2_support = false;
@@ -2247,12 +2263,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D3hot;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D0;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
return 0;
}
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fbd0e3adbca3..5d169bc3ccd1 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -319,6 +319,13 @@ struct pci_dev_reset_methods {
int (*reset)(struct pci_dev *dev, int probe);
};
+#ifdef CONFIG_PCI_QUIRKS
extern int pci_dev_specific_reset(struct pci_dev *dev, int probe);
+#else
+static inline int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+{
+ return -ENOTTY;
+}
+#endif
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 797d47809f7a..8c30a9544d61 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -321,7 +321,7 @@ static int aer_inject(struct aer_error_inj *einj)
unsigned long flags;
unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
int pos_cap_err, rp_pos_cap_err;
- u32 sever;
+ u32 sever, mask;
int ret = 0;
dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
@@ -374,6 +374,24 @@ static int aer_inject(struct aer_error_inj *einj)
err->header_log2 = einj->header_log2;
err->header_log3 = einj->header_log3;
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &mask);
+ if (einj->cor_status && !(einj->cor_status & ~mask)) {
+ ret = -EINVAL;
+ printk(KERN_WARNING "The correctable error(s) is masked "
+ "by device\n");
+ spin_unlock_irqrestore(&inject_lock, flags);
+ goto out_put;
+ }
+
+ pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK, &mask);
+ if (einj->uncor_status && !(einj->uncor_status & ~mask)) {
+ ret = -EINVAL;
+ printk(KERN_WARNING "The uncorrectable error(s) is masked "
+ "by device\n");
+ spin_unlock_irqrestore(&inject_lock, flags);
+ goto out_put;
+ }
+
rperr = __find_aer_error_by_dev(rpdev);
if (!rperr) {
rperr = rperr_alloc;
@@ -413,8 +431,14 @@ static int aer_inject(struct aer_error_inj *einj)
if (ret)
goto out_put;
- if (find_aer_device(rpdev, &edev))
+ if (find_aer_device(rpdev, &edev)) {
+ if (!get_service_data(edev)) {
+ printk(KERN_WARNING "AER service is not initialized\n");
+ ret = -EINVAL;
+ goto out_put;
+ }
aer_irq(-1, edev);
+ }
else
ret = -EINVAL;
out_put:
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 413262eb95b7..b174188ac121 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -27,7 +27,7 @@
*/
static void release_pcie_device(struct device *dev)
{
- kfree(to_pcie_device(dev));
+ kfree(to_pcie_device(dev));
}
/**
@@ -346,12 +346,11 @@ static int suspend_iter(struct device *dev, void *data)
{
struct pcie_port_service_driver *service_driver;
- if ((dev->bus == &pcie_port_bus_type) &&
- (dev->driver)) {
- service_driver = to_service_driver(dev->driver);
- if (service_driver->suspend)
- service_driver->suspend(to_pcie_device(dev));
- }
+ if ((dev->bus == &pcie_port_bus_type) && dev->driver) {
+ service_driver = to_service_driver(dev->driver);
+ if (service_driver->suspend)
+ service_driver->suspend(to_pcie_device(dev));
+ }
return 0;
}
@@ -494,6 +493,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new)
return driver_register(&new->driver);
}
+EXPORT_SYMBOL(pcie_port_service_register);
/**
* pcie_port_service_unregister - unregister PCI Express port service driver
@@ -503,6 +503,4 @@ void pcie_port_service_unregister(struct pcie_port_service_driver *drv)
{
driver_unregister(&drv->driver);
}
-
-EXPORT_SYMBOL(pcie_port_service_register);
EXPORT_SYMBOL(pcie_port_service_unregister);
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 34d65172a4d7..13c8972886e6 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -63,7 +63,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
* pcie_portdrv_probe - Probe PCI-Express port devices
* @dev: PCI-Express port device being probed
*
- * If detected invokes the pcie_port_device_register() method for
+ * If detected invokes the pcie_port_device_register() method for
* this port device.
*
*/
@@ -78,7 +78,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
(dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)))
return -ENODEV;
- if (!dev->irq && dev->pin) {
+ if (!dev->irq && dev->pin) {
dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
"check vendor BIOS\n", dev->vendor, dev->device);
}
@@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
return 0;
}
-static void pcie_portdrv_remove (struct pci_dev *dev)
+static void pcie_portdrv_remove(struct pci_dev *dev)
{
pcie_port_device_remove(dev);
pci_disable_device(dev);
@@ -129,14 +129,13 @@ static int error_detected_iter(struct device *device, void *data)
static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
enum pci_channel_state error)
{
- struct aer_broadcast_data result_data =
- {error, PCI_ERS_RESULT_CAN_RECOVER};
- int retval;
+ struct aer_broadcast_data data = {error, PCI_ERS_RESULT_CAN_RECOVER};
+ int ret;
/* can not fail */
- retval = device_for_each_child(&dev->dev, &result_data, error_detected_iter);
+ ret = device_for_each_child(&dev->dev, &data, error_detected_iter);
- return result_data.result;
+ return data.result;
}
static int mmio_enabled_iter(struct device *device, void *data)
@@ -290,7 +289,7 @@ static int __init pcie_portdrv_init(void)
return retval;
}
-static void __exit pcie_portdrv_exit(void)
+static void __exit pcie_portdrv_exit(void)
{
pci_unregister_driver(&pcie_portdriver);
pcie_port_bus_unregister();
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 98ffb2de22e9..11824d791f68 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -387,10 +387,146 @@ static struct pci_bus * pci_alloc_bus(void)
INIT_LIST_HEAD(&b->children);
INIT_LIST_HEAD(&b->devices);
INIT_LIST_HEAD(&b->slots);
+ b->max_bus_speed = PCI_SPEED_UNKNOWN;
+ b->cur_bus_speed = PCI_SPEED_UNKNOWN;
}
return b;
}
+static unsigned char pcix_bus_speed[] = {
+ PCI_SPEED_UNKNOWN, /* 0 */
+ PCI_SPEED_66MHz_PCIX, /* 1 */
+ PCI_SPEED_100MHz_PCIX, /* 2 */
+ PCI_SPEED_133MHz_PCIX, /* 3 */
+ PCI_SPEED_UNKNOWN, /* 4 */
+ PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
+ PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
+ PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
+ PCI_SPEED_UNKNOWN, /* 8 */
+ PCI_SPEED_66MHz_PCIX_266, /* 9 */
+ PCI_SPEED_100MHz_PCIX_266, /* A */
+ PCI_SPEED_133MHz_PCIX_266, /* B */
+ PCI_SPEED_UNKNOWN, /* C */
+ PCI_SPEED_66MHz_PCIX_533, /* D */
+ PCI_SPEED_100MHz_PCIX_533, /* E */
+ PCI_SPEED_133MHz_PCIX_533 /* F */
+};
+
+static unsigned char pcie_link_speed[] = {
+ PCI_SPEED_UNKNOWN, /* 0 */
+ PCIE_SPEED_2_5GT, /* 1 */
+ PCIE_SPEED_5_0GT, /* 2 */
+ PCIE_SPEED_8_0GT, /* 3 */
+ PCI_SPEED_UNKNOWN, /* 4 */
+ PCI_SPEED_UNKNOWN, /* 5 */
+ PCI_SPEED_UNKNOWN, /* 6 */
+ PCI_SPEED_UNKNOWN, /* 7 */
+ PCI_SPEED_UNKNOWN, /* 8 */
+ PCI_SPEED_UNKNOWN, /* 9 */
+ PCI_SPEED_UNKNOWN, /* A */
+ PCI_SPEED_UNKNOWN, /* B */
+ PCI_SPEED_UNKNOWN, /* C */
+ PCI_SPEED_UNKNOWN, /* D */
+ PCI_SPEED_UNKNOWN, /* E */
+ PCI_SPEED_UNKNOWN /* F */
+};
+
+void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
+{
+ bus->cur_bus_speed = pcie_link_speed[linksta & 0xf];
+}
+EXPORT_SYMBOL_GPL(pcie_update_link_speed);
+
+static unsigned char agp_speeds[] = {
+ AGP_UNKNOWN,
+ AGP_1X,
+ AGP_2X,
+ AGP_4X,
+ AGP_8X
+};
+
+static enum pci_bus_speed agp_speed(int agp3, int agpstat)
+{
+ int index = 0;
+
+ if (agpstat & 4)
+ index = 3;
+ else if (agpstat & 2)
+ index = 2;
+ else if (agpstat & 1)
+ index = 1;
+ else
+ goto out;
+
+ if (agp3) {
+ index += 2;
+ if (index == 5)
+ index = 0;
+ }
+
+ out:
+ return agp_speeds[index];
+}
+
+
+static void pci_set_bus_speed(struct pci_bus *bus)
+{
+ struct pci_dev *bridge = bus->self;
+ int pos;
+
+ pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
+ if (!pos)
+ pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
+ if (pos) {
+ u32 agpstat, agpcmd;
+
+ pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
+ bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
+
+ pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
+ bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
+ }
+
+ pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
+ if (pos) {
+ u16 status;
+ enum pci_bus_speed max;
+ pci_read_config_word(bridge, pos + 2, &status);
+
+ if (status & 0x8000) {
+ max = PCI_SPEED_133MHz_PCIX_533;
+ } else if (status & 0x4000) {
+ max = PCI_SPEED_133MHz_PCIX_266;
+ } else if (status & 0x0002) {
+ if (((status >> 12) & 0x3) == 2) {
+ max = PCI_SPEED_133MHz_PCIX_ECC;
+ } else {
+ max = PCI_SPEED_133MHz_PCIX;
+ }
+ } else {
+ max = PCI_SPEED_66MHz_PCIX;
+ }
+
+ bus->max_bus_speed = max;
+ bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf];
+
+ return;
+ }
+
+ pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ if (pos) {
+ u32 linkcap;
+ u16 linksta;
+
+ pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap);
+ bus->max_bus_speed = pcie_link_speed[linkcap & 0xf];
+
+ pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta);
+ pcie_update_link_speed(bus, linksta);
+ }
+}
+
+
static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
struct pci_dev *bridge, int busnr)
{
@@ -430,6 +566,8 @@ static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
child->self = bridge;
child->bridge = get_device(&bridge->dev);
+ pci_set_bus_speed(child);
+
/* Set up default resource pointers and names.. */
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
@@ -1081,6 +1219,37 @@ struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
}
EXPORT_SYMBOL(pci_scan_single_device);
+static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn)
+{
+ u16 cap;
+ unsigned pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+ if (!pos)
+ return 0;
+ pci_read_config_word(dev, pos + 4, &cap);
+ return cap >> 8;
+}
+
+static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn)
+{
+ return (fn + 1) % 8;
+}
+
+static unsigned no_next_fn(struct pci_dev *dev, unsigned fn)
+{
+ return 0;
+}
+
+static int only_one_child(struct pci_bus *bus)
+{
+ struct pci_dev *parent = bus->self;
+ if (!parent || !pci_is_pcie(parent))
+ return 0;
+ if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
+ parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
+ return 1;
+ return 0;
+}
+
/**
* pci_scan_slot - scan a PCI slot on a bus for devices.
* @bus: PCI bus to scan
@@ -1094,21 +1263,28 @@ EXPORT_SYMBOL(pci_scan_single_device);
*/
int pci_scan_slot(struct pci_bus *bus, int devfn)
{
- int fn, nr = 0;
+ unsigned fn, nr = 0;
struct pci_dev *dev;
+ unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn;
+
+ if (only_one_child(bus) && (devfn > 0))
+ return 0; /* Already scanned the entire slot */
dev = pci_scan_single_device(bus, devfn);
if (dev && !dev->is_added) /* new device? */
nr++;
- if (dev && dev->multifunction) {
- for (fn = 1; fn < 8; fn++) {
- dev = pci_scan_single_device(bus, devfn + fn);
- if (dev) {
- if (!dev->is_added)
- nr++;
- dev->multifunction = 1;
- }
+ if (pci_ari_enabled(bus))
+ next_fn = next_ari_fn;
+ else if (dev && dev->multifunction)
+ next_fn = next_trad_fn;
+
+ for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) {
+ dev = pci_scan_single_device(bus, devfn + fn);
+ if (dev) {
+ if (!dev->is_added)
+ nr++;
+ dev->multifunction = 1;
}
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index c74694345b6e..213acf69ff95 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -25,14 +25,9 @@
#include <linux/dmi.h>
#include <linux/pci-aspm.h>
#include <linux/ioport.h>
+#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
-int isa_dma_bridge_buggy;
-EXPORT_SYMBOL(isa_dma_bridge_buggy);
-int pci_pci_problems;
-EXPORT_SYMBOL(pci_pci_problems);
-
-#ifdef CONFIG_PCI_QUIRKS
/*
* This quirk function disables memory decoding and releases memory resources
* of the device specified by kernel's boot parameter 'pci=resource_alignment='.
@@ -2595,6 +2590,7 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
}
pci_do_fixups(dev, start, end);
}
+EXPORT_SYMBOL(pci_fixup_device);
static int __init pci_apply_final_quirks(void)
{
@@ -2706,9 +2702,3 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe)
return -ENOTTY;
}
-
-#else
-void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
-int pci_dev_specific_reset(struct pci_dev *dev, int probe) { return -ENOTTY; }
-#endif
-EXPORT_SYMBOL(pci_fixup_device);
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 8c02b6c53bdb..49c9e6c9779a 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -47,6 +47,55 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf)
slot->number);
}
+/* these strings match up with the values in pci_bus_speed */
+static char *pci_bus_speed_strings[] = {
+ "33 MHz PCI", /* 0x00 */
+ "66 MHz PCI", /* 0x01 */
+ "66 MHz PCI-X", /* 0x02 */
+ "100 MHz PCI-X", /* 0x03 */
+ "133 MHz PCI-X", /* 0x04 */
+ NULL, /* 0x05 */
+ NULL, /* 0x06 */
+ NULL, /* 0x07 */
+ NULL, /* 0x08 */
+ "66 MHz PCI-X 266", /* 0x09 */
+ "100 MHz PCI-X 266", /* 0x0a */
+ "133 MHz PCI-X 266", /* 0x0b */
+ "Unknown AGP", /* 0x0c */
+ "1x AGP", /* 0x0d */
+ "2x AGP", /* 0x0e */
+ "4x AGP", /* 0x0f */
+ "8x AGP", /* 0x10 */
+ "66 MHz PCI-X 533", /* 0x11 */
+ "100 MHz PCI-X 533", /* 0x12 */
+ "133 MHz PCI-X 533", /* 0x13 */
+ "2.5 GT/s PCIe", /* 0x14 */
+ "5.0 GT/s PCIe", /* 0x15 */
+ "8.0 GT/s PCIe", /* 0x16 */
+};
+
+static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf)
+{
+ const char *speed_string;
+
+ if (speed < ARRAY_SIZE(pci_bus_speed_strings))
+ speed_string = pci_bus_speed_strings[speed];
+ else
+ speed_string = "Unknown";
+
+ return sprintf(buf, "%s\n", speed_string);
+}
+
+static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf)
+{
+ return bus_speed_read(slot->bus->max_bus_speed, buf);
+}
+
+static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf)
+{
+ return bus_speed_read(slot->bus->cur_bus_speed, buf);
+}
+
static void pci_slot_release(struct kobject *kobj)
{
struct pci_dev *dev;
@@ -66,9 +115,15 @@ static void pci_slot_release(struct kobject *kobj)
static struct pci_slot_attribute pci_slot_attr_address =
__ATTR(address, (S_IFREG | S_IRUGO), address_read_file, NULL);
+static struct pci_slot_attribute pci_slot_attr_max_speed =
+ __ATTR(max_bus_speed, (S_IFREG | S_IRUGO), max_speed_read_file, NULL);
+static struct pci_slot_attribute pci_slot_attr_cur_speed =
+ __ATTR(cur_bus_speed, (S_IFREG | S_IRUGO), cur_speed_read_file, NULL);
static struct attribute *pci_slot_default_attrs[] = {
&pci_slot_attr_address.attr,
+ &pci_slot_attr_max_speed.attr,
+ &pci_slot_attr_cur_speed.attr,
NULL,
};
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 9f3adbd9f700..0a6601c76809 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -84,7 +84,7 @@ config YENTA
tristate "CardBus yenta-compatible bridge support"
depends on PCI
select CARDBUS if !EMBEDDED
- select PCCARD_NONSTATIC
+ select PCCARD_NONSTATIC if PCMCIA != n
---help---
This option enables support for CardBus host bridges. Virtually
all modern PCMCIA bridges are CardBus compatible. A "bridge" is
@@ -161,9 +161,8 @@ config TCIC
config PCMCIA_M8XX
tristate "MPC8xx PCMCIA support"
- depends on PCMCIA && PPC && 8xx
- select PCCARD_IODYN
- select PCCARD_NONSTATIC
+ depends on PCCARD && PPC && 8xx
+ select PCCARD_IODYN if PCMCIA != n
help
Say Y here to include support for PowerPC 8xx series PCMCIA
controller.
@@ -174,6 +173,27 @@ config PCMCIA_AU1X00
tristate "Au1x00 pcmcia support"
depends on SOC_AU1X00 && PCMCIA
+config PCMCIA_ALCHEMY_DEVBOARD
+ tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
+ depends on SOC_AU1X00 && PCMCIA
+ select 64BIT_PHYS_ADDR
+ help
+ Enable this driver of you want PCMCIA support on your Alchemy
+ Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200 board.
+ NOT suitable for the PB1000!
+
+ This driver is also available as a module called db1xxx_ss.ko
+
+config PCMCIA_XXS1500
+ tristate "MyCable XXS1500 PCMCIA socket support"
+ depends on PCMCIA && MIPS_XXS1500
+ select 64BIT_PHYS_ADDR
+ help
+ Support for the PCMCIA/CF socket interface on MyCable XXS1500
+ systems.
+
+ This driver is also available as a module called xxs1500_ss.ko
+
config PCMCIA_BCM63XX
tristate "bcm63xx pcmcia support"
depends on BCM63XX && PCMCIA
@@ -238,14 +258,12 @@ config PCMCIA_PROBE
config M32R_PCC
bool "M32R PCMCIA I/F"
depends on M32R && CHIP_M32700 && PCMCIA
- select PCCARD_NONSTATIC
help
Say Y here to use the M32R PCMCIA controller.
config M32R_CFC
bool "M32R CF I/F Controller"
depends on M32R && (PLAT_USRV || PLAT_M32700UT || PLAT_MAPPI2 || PLAT_MAPPI3 || PLAT_OPSPUT)
- select PCCARD_NONSTATIC
help
Say Y here to use the M32R CompactFlash controller.
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 83ff802de544..381b031d9d75 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -2,11 +2,11 @@
# Makefile for the kernel pcmcia subsystem (c/o David Hinds)
#
-pcmcia_core-y += cs.o cistpl.o rsrc_mgr.o socket_sysfs.o
+pcmcia_core-y += cs.o rsrc_mgr.o socket_sysfs.o
pcmcia_core-$(CONFIG_CARDBUS) += cardbus.o
obj-$(CONFIG_PCCARD) += pcmcia_core.o
-pcmcia-y += ds.o pcmcia_resource.o
+pcmcia-y += ds.o pcmcia_resource.o cistpl.o
pcmcia-$(CONFIG_PCMCIA_IOCTL) += pcmcia_ioctl.o
obj-$(CONFIG_PCMCIA) += pcmcia.o
@@ -35,18 +35,10 @@ obj-$(CONFIG_OMAP_CF) += omap_cf.o
obj-$(CONFIG_BFIN_CFPCMCIA) += bfin_cf_pcmcia.o
obj-$(CONFIG_AT91_CF) += at91_cf.o
obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
+obj-$(CONFIG_PCMCIA_ALCHEMY_DEVBOARD) += db1xxx_ss.o
au1x00_ss-y += au1000_generic.o
au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o
-au1x00_ss-$(CONFIG_MIPS_PB1100) += au1000_pb1x00.o
-au1x00_ss-$(CONFIG_MIPS_PB1200) += au1000_db1x00.o
-au1x00_ss-$(CONFIG_MIPS_PB1500) += au1000_pb1x00.o
-au1x00_ss-$(CONFIG_MIPS_DB1000) += au1000_db1x00.o
-au1x00_ss-$(CONFIG_MIPS_DB1100) += au1000_db1x00.o
-au1x00_ss-$(CONFIG_MIPS_DB1200) += au1000_db1x00.o
-au1x00_ss-$(CONFIG_MIPS_DB1500) += au1000_db1x00.o
-au1x00_ss-$(CONFIG_MIPS_DB1550) += au1000_db1x00.o
-au1x00_ss-$(CONFIG_MIPS_XXS1500) += au1000_xxs1500.o
sa1111_cs-y += sa1111_generic.o
sa1111_cs-$(CONFIG_ASSABET_NEPONSET) += sa1100_neponset.o
@@ -76,3 +68,5 @@ pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
obj-$(CONFIG_PCMCIA_PXA2XX) += pxa2xx_base.o $(pxa2xx-obj-y)
+
+obj-$(CONFIG_PCMCIA_XXS1500) += xxs1500_ss.o
diff --git a/drivers/pcmcia/au1000_db1x00.c b/drivers/pcmcia/au1000_db1x00.c
deleted file mode 100644
index c78d77fd7e3b..000000000000
--- a/drivers/pcmcia/au1000_db1x00.c
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- *
- * Alchemy Semi Db1x00 boards specific pcmcia routines.
- *
- * Copyright 2002 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ppopov@mvista.com or source@mvista.com
- *
- * Copyright 2004 Pete Popov, updated the driver to 2.6.
- * Followed the sa11xx API and largely copied many of the hardware
- * independent functions.
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/init.h>
-
-#include <asm/irq.h>
-#include <asm/signal.h>
-#include <asm/mach-au1x00/au1000.h>
-
-#if defined(CONFIG_MIPS_DB1200)
- #include <db1200.h>
-#elif defined(CONFIG_MIPS_PB1200)
- #include <pb1200.h>
-#else
- #include <asm/mach-db1x00/db1x00.h>
- static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
-#endif
-
-#include "au1000_generic.h"
-
-#if 0
-#define debug(x,args...) printk(KERN_DEBUG "%s: " x, __func__ , ##args)
-#else
-#define debug(x,args...)
-#endif
-
-
-struct au1000_pcmcia_socket au1000_pcmcia_socket[PCMCIA_NUM_SOCKS];
-extern int au1x00_pcmcia_socket_probe(struct device *, struct pcmcia_low_level *, int, int);
-
-static int db1x00_pcmcia_hw_init(struct au1000_pcmcia_socket *skt)
-{
-#ifdef CONFIG_MIPS_DB1550
- skt->irq = skt->nr ? AU1000_GPIO_5 : AU1000_GPIO_3;
-#elif defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200)
- skt->irq = skt->nr ? BOARD_PC1_INT : BOARD_PC0_INT;
-#else
- skt->irq = skt->nr ? AU1000_GPIO_5 : AU1000_GPIO_2;
-#endif
- return 0;
-}
-
-static void db1x00_pcmcia_shutdown(struct au1000_pcmcia_socket *skt)
-{
- bcsr->pcmcia = 0; /* turn off power */
- au_sync_delay(2);
-}
-
-static void
-db1x00_pcmcia_socket_state(struct au1000_pcmcia_socket *skt, struct pcmcia_state *state)
-{
- u32 inserted;
- unsigned char vs;
-
- state->ready = 0;
- state->vs_Xv = 0;
- state->vs_3v = 0;
- state->detect = 0;
-
- switch (skt->nr) {
- case 0:
- vs = bcsr->status & 0x3;
-#if defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200)
- inserted = BOARD_CARD_INSERTED(0);
-#else
- inserted = !(bcsr->status & (1<<4));
-#endif
- break;
- case 1:
- vs = (bcsr->status & 0xC)>>2;
-#if defined(CONFIG_MIPS_DB1200) || defined(CONFIG_MIPS_PB1200)
- inserted = BOARD_CARD_INSERTED(1);
-#else
- inserted = !(bcsr->status & (1<<5));
-#endif
- break;
- default:/* should never happen */
- return;
- }
-
- if (inserted)
- debug("db1x00 socket %d: inserted %d, vs %d pcmcia %x\n",
- skt->nr, inserted, vs, bcsr->pcmcia);
-
- if (inserted) {
- switch (vs) {
- case 0:
- case 2:
- state->vs_3v=1;
- break;
- case 3: /* 5V */
- break;
- default:
- /* return without setting 'detect' */
- printk(KERN_ERR "db1x00 bad VS (%d)\n",
- vs);
- }
- state->detect = 1;
- state->ready = 1;
- }
- else {
- /* if the card was previously inserted and then ejected,
- * we should turn off power to it
- */
- if ((skt->nr == 0) && (bcsr->pcmcia & BCSR_PCMCIA_PC0RST)) {
- bcsr->pcmcia &= ~(BCSR_PCMCIA_PC0RST |
- BCSR_PCMCIA_PC0DRVEN |
- BCSR_PCMCIA_PC0VPP |
- BCSR_PCMCIA_PC0VCC);
- au_sync_delay(10);
- }
- else if ((skt->nr == 1) && bcsr->pcmcia & BCSR_PCMCIA_PC1RST) {
- bcsr->pcmcia &= ~(BCSR_PCMCIA_PC1RST |
- BCSR_PCMCIA_PC1DRVEN |
- BCSR_PCMCIA_PC1VPP |
- BCSR_PCMCIA_PC1VCC);
- au_sync_delay(10);
- }
- }
-
- state->bvd1=1;
- state->bvd2=1;
- state->wrprot=0;
-}
-
-static int
-db1x00_pcmcia_configure_socket(struct au1000_pcmcia_socket *skt, struct socket_state_t *state)
-{
- u16 pwr;
- int sock = skt->nr;
-
- debug("config_skt %d Vcc %dV Vpp %dV, reset %d\n",
- sock, state->Vcc, state->Vpp,
- state->flags & SS_RESET);
-
- /* pcmcia reg was set to zero at init time. Be careful when
- * initializing a socket not to wipe out the settings of the
- * other socket.
- */
- pwr = bcsr->pcmcia;
- pwr &= ~(0xf << sock*8); /* clear voltage settings */
-
- state->Vpp = 0;
- switch(state->Vcc){
- case 0: /* Vcc 0 */
- pwr |= SET_VCC_VPP(0,0,sock);
- break;
- case 50: /* Vcc 5V */
- switch(state->Vpp) {
- case 0:
- pwr |= SET_VCC_VPP(2,0,sock);
- break;
- case 50:
- pwr |= SET_VCC_VPP(2,1,sock);
- break;
- case 12:
- pwr |= SET_VCC_VPP(2,2,sock);
- break;
- case 33:
- default:
- pwr |= SET_VCC_VPP(0,0,sock);
- printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __func__,
- state->Vcc,
- state->Vpp);
- break;
- }
- break;
- case 33: /* Vcc 3.3V */
- switch(state->Vpp) {
- case 0:
- pwr |= SET_VCC_VPP(1,0,sock);
- break;
- case 12:
- pwr |= SET_VCC_VPP(1,2,sock);
- break;
- case 33:
- pwr |= SET_VCC_VPP(1,1,sock);
- break;
- case 50:
- default:
- pwr |= SET_VCC_VPP(0,0,sock);
- printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __func__,
- state->Vcc,
- state->Vpp);
- break;
- }
- break;
- default: /* what's this ? */
- pwr |= SET_VCC_VPP(0,0,sock);
- printk(KERN_ERR "%s: bad Vcc %d\n",
- __func__, state->Vcc);
- break;
- }
-
- bcsr->pcmcia = pwr;
- au_sync_delay(300);
-
- if (sock == 0) {
- if (!(state->flags & SS_RESET)) {
- pwr |= BCSR_PCMCIA_PC0DRVEN;
- bcsr->pcmcia = pwr;
- au_sync_delay(300);
- pwr |= BCSR_PCMCIA_PC0RST;
- bcsr->pcmcia = pwr;
- au_sync_delay(100);
- }
- else {
- pwr &= ~(BCSR_PCMCIA_PC0RST | BCSR_PCMCIA_PC0DRVEN);
- bcsr->pcmcia = pwr;
- au_sync_delay(100);
- }
- }
- else {
- if (!(state->flags & SS_RESET)) {
- pwr |= BCSR_PCMCIA_PC1DRVEN;
- bcsr->pcmcia = pwr;
- au_sync_delay(300);
- pwr |= BCSR_PCMCIA_PC1RST;
- bcsr->pcmcia = pwr;
- au_sync_delay(100);
- }
- else {
- pwr &= ~(BCSR_PCMCIA_PC1RST | BCSR_PCMCIA_PC1DRVEN);
- bcsr->pcmcia = pwr;
- au_sync_delay(100);
- }
- }
- return 0;
-}
-
-/*
- * Enable card status IRQs on (re-)initialisation. This can
- * be called at initialisation, power management event, or
- * pcmcia event.
- */
-void db1x00_socket_init(struct au1000_pcmcia_socket *skt)
-{
- /* nothing to do for now */
-}
-
-/*
- * Disable card status IRQs and PCMCIA bus on suspend.
- */
-void db1x00_socket_suspend(struct au1000_pcmcia_socket *skt)
-{
- /* nothing to do for now */
-}
-
-struct pcmcia_low_level db1x00_pcmcia_ops = {
- .owner = THIS_MODULE,
-
- .hw_init = db1x00_pcmcia_hw_init,
- .hw_shutdown = db1x00_pcmcia_shutdown,
-
- .socket_state = db1x00_pcmcia_socket_state,
- .configure_socket = db1x00_pcmcia_configure_socket,
-
- .socket_init = db1x00_socket_init,
- .socket_suspend = db1x00_socket_suspend
-};
-
-int au1x_board_init(struct device *dev)
-{
- int ret = -ENODEV;
- bcsr->pcmcia = 0; /* turn off power, if it's not already off */
- au_sync_delay(2);
- ret = au1x00_pcmcia_socket_probe(dev, &db1x00_pcmcia_ops, 0, 2);
- return ret;
-}
diff --git a/drivers/pcmcia/au1000_generic.h b/drivers/pcmcia/au1000_generic.h
index 13a4fbc58711..aa743f6875bd 100644
--- a/drivers/pcmcia/au1000_generic.h
+++ b/drivers/pcmcia/au1000_generic.h
@@ -44,22 +44,12 @@
/* pcmcia socket 1 needs external glue logic so the memory map
* differs from board to board.
*/
-#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100) || \
- defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_PB1550) || \
- defined(CONFIG_MIPS_PB1200)
+#if defined(CONFIG_MIPS_PB1000)
#define AU1X_SOCK1_IO 0xF08000000ULL
#define AU1X_SOCK1_PHYS_ATTR 0xF48000000ULL
#define AU1X_SOCK1_PHYS_MEM 0xF88000000ULL
#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4800000
#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8800000
-#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) || \
- defined(CONFIG_MIPS_DB1500) || defined(CONFIG_MIPS_DB1550) || \
- defined(CONFIG_MIPS_DB1200)
-#define AU1X_SOCK1_IO 0xF04000000ULL
-#define AU1X_SOCK1_PHYS_ATTR 0xF44000000ULL
-#define AU1X_SOCK1_PHYS_MEM 0xF84000000ULL
-#define AU1X_SOCK1_PSEUDO_PHYS_ATTR 0xF4400000
-#define AU1X_SOCK1_PSEUDO_PHYS_MEM 0xF8400000
#endif
struct pcmcia_state {
diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c
index b1984ed72d1d..5a979cb8f3e6 100644
--- a/drivers/pcmcia/au1000_pb1x00.c
+++ b/drivers/pcmcia/au1000_pb1x00.c
@@ -1,6 +1,6 @@
/*
*
- * Alchemy Semi Pb1x00 boards specific pcmcia routines.
+ * Alchemy Semi Pb1000 boards specific pcmcia routines.
*
* Copyright 2002 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
@@ -46,20 +46,11 @@
#define debug(fmt, arg...) do { } while (0)
-#ifdef CONFIG_MIPS_PB1000
#include <asm/pb1000.h>
#define PCMCIA_IRQ AU1000_GPIO_15
-#elif defined (CONFIG_MIPS_PB1500)
-#include <asm/pb1500.h>
-#define PCMCIA_IRQ AU1500_GPIO_203
-#elif defined (CONFIG_MIPS_PB1100)
-#include <asm/pb1100.h>
-#define PCMCIA_IRQ AU1000_GPIO_11
-#endif
static int pb1x00_pcmcia_init(struct pcmcia_init *init)
{
-#ifdef CONFIG_MIPS_PB1000
u16 pcr;
pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST;
@@ -74,21 +65,10 @@ static int pb1x00_pcmcia_init(struct pcmcia_init *init)
au_sync_delay(20);
return PCMCIA_NUM_SOCKS;
-
-#else /* fixme -- take care of the Pb1500 at some point */
-
- u16 pcr;
- pcr = au_readw(PCMCIA_BOARD_REG) & ~0xf; /* turn off power */
- pcr &= ~(PC_DEASSERT_RST | PC_DRV_EN);
- au_writew(pcr, PCMCIA_BOARD_REG);
- au_sync_delay(500);
- return PCMCIA_NUM_SOCKS;
-#endif
}
static int pb1x00_pcmcia_shutdown(void)
{
-#ifdef CONFIG_MIPS_PB1000
u16 pcr;
pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST;
pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,0);
@@ -96,14 +76,6 @@ static int pb1x00_pcmcia_shutdown(void)
au_writel(pcr, PB1000_PCR);
au_sync_delay(20);
return 0;
-#else
- u16 pcr;
- pcr = au_readw(PCMCIA_BOARD_REG) & ~0xf; /* turn off power */
- pcr &= ~(PC_DEASSERT_RST | PC_DRV_EN);
- au_writew(pcr, PCMCIA_BOARD_REG);
- au_sync_delay(2);
- return 0;
-#endif
}
static int
@@ -112,21 +84,11 @@ pb1x00_pcmcia_socket_state(unsigned sock, struct pcmcia_state *state)
u32 inserted0, inserted1;
u16 vs0, vs1;
-#ifdef CONFIG_MIPS_PB1000
vs0 = vs1 = (u16)au_readl(PB1000_ACR1);
inserted0 = !(vs0 & (ACR1_SLOT_0_CD1 | ACR1_SLOT_0_CD2));
inserted1 = !(vs1 & (ACR1_SLOT_1_CD1 | ACR1_SLOT_1_CD2));
vs0 = (vs0 >> 4) & 0x3;
vs1 = (vs1 >> 12) & 0x3;
-#else
- vs0 = (au_readw(BOARD_STATUS_REG) >> 4) & 0x3;
-#ifdef CONFIG_MIPS_PB1500
- inserted0 = !((au_readl(GPIO2_PINSTATE) >> 1) & 0x1); /* gpio 201 */
-#else /* Pb1100 */
- inserted0 = !((au_readl(SYS_PINSTATERD) >> 9) & 0x1); /* gpio 9 */
-#endif
- inserted1 = 0;
-#endif
state->ready = 0;
state->vs_Xv = 0;
@@ -203,7 +165,6 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
if(configure->sock > PCMCIA_MAX_SOCK) return -1;
-#ifdef CONFIG_MIPS_PB1000
pcr = au_readl(PB1000_PCR);
if (configure->sock == 0) {
@@ -323,84 +284,6 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
au_writel(pcr, PB1000_PCR);
au_sync_delay(300);
-#else
-
- pcr = au_readw(PCMCIA_BOARD_REG) & ~0xf;
-
- debug("Vcc %dV Vpp %dV, pcr %x, reset %d\n",
- configure->vcc, configure->vpp, pcr, configure->reset);
-
-
- switch(configure->vcc){
- case 0: /* Vcc 0 */
- pcr |= SET_VCC_VPP(0,0);
- break;
- case 50: /* Vcc 5V */
- switch(configure->vpp) {
- case 0:
- pcr |= SET_VCC_VPP(2,0);
- break;
- case 50:
- pcr |= SET_VCC_VPP(2,1);
- break;
- case 12:
- pcr |= SET_VCC_VPP(2,2);
- break;
- case 33:
- default:
- pcr |= SET_VCC_VPP(0,0);
- printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __func__,
- configure->vcc,
- configure->vpp);
- break;
- }
- break;
- case 33: /* Vcc 3.3V */
- switch(configure->vpp) {
- case 0:
- pcr |= SET_VCC_VPP(1,0);
- break;
- case 12:
- pcr |= SET_VCC_VPP(1,2);
- break;
- case 33:
- pcr |= SET_VCC_VPP(1,1);
- break;
- case 50:
- default:
- pcr |= SET_VCC_VPP(0,0);
- printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __func__,
- configure->vcc,
- configure->vpp);
- break;
- }
- break;
- default: /* what's this ? */
- pcr |= SET_VCC_VPP(0,0);
- printk(KERN_ERR "%s: bad Vcc %d\n",
- __func__, configure->vcc);
- break;
- }
-
- au_writew(pcr, PCMCIA_BOARD_REG);
- au_sync_delay(300);
-
- if (!configure->reset) {
- pcr |= PC_DRV_EN;
- au_writew(pcr, PCMCIA_BOARD_REG);
- au_sync_delay(100);
- pcr |= PC_DEASSERT_RST;
- au_writew(pcr, PCMCIA_BOARD_REG);
- au_sync_delay(100);
- }
- else {
- pcr &= ~(PC_DEASSERT_RST | PC_DRV_EN);
- au_writew(pcr, PCMCIA_BOARD_REG);
- au_sync_delay(100);
- }
-#endif
return 0;
}
diff --git a/drivers/pcmcia/au1000_xxs1500.c b/drivers/pcmcia/au1000_xxs1500.c
deleted file mode 100644
index b43d47b50819..000000000000
--- a/drivers/pcmcia/au1000_xxs1500.c
+++ /dev/null
@@ -1,188 +0,0 @@
-/*
- *
- * MyCable board specific pcmcia routines.
- *
- * Copyright 2003 MontaVista Software Inc.
- * Author: Pete Popov, MontaVista Software, Inc.
- * ppopov@mvista.com or source@mvista.com
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- *
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/proc_fs.h>
-#include <linux/types.h>
-
-#include <pcmcia/cs_types.h>
-#include <pcmcia/cs.h>
-#include <pcmcia/ss.h>
-#include <pcmcia/cistpl.h>
-#include <pcmcia/bus_ops.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-
-#include <asm/au1000.h>
-#include <asm/au1000_pcmcia.h>
-
-#define PCMCIA_MAX_SOCK 0
-#define PCMCIA_NUM_SOCKS (PCMCIA_MAX_SOCK + 1)
-#define PCMCIA_IRQ AU1000_GPIO_4
-
-#if 0
-#define DEBUG(x, args...) printk(__func__ ": " x, ##args)
-#else
-#define DEBUG(x,args...)
-#endif
-
-static int xxs1500_pcmcia_init(struct pcmcia_init *init)
-{
- return PCMCIA_NUM_SOCKS;
-}
-
-static int xxs1500_pcmcia_shutdown(void)
-{
- /* turn off power */
- au_writel(au_readl(GPIO2_PINSTATE) | (1<<14)|(1<<30),
- GPIO2_OUTPUT);
- au_sync_delay(100);
-
- /* assert reset */
- au_writel(au_readl(GPIO2_PINSTATE) | (1<<4)|(1<<20),
- GPIO2_OUTPUT);
- au_sync_delay(100);
- return 0;
-}
-
-
-static int
-xxs1500_pcmcia_socket_state(unsigned sock, struct pcmcia_state *state)
-{
- u32 inserted; u32 vs;
- unsigned long gpio, gpio2;
-
- if(sock > PCMCIA_MAX_SOCK) return -1;
-
- gpio = au_readl(SYS_PINSTATERD);
- gpio2 = au_readl(GPIO2_PINSTATE);
-
- vs = gpio2 & ((1<<8) | (1<<9));
- inserted = (!(gpio & 0x1) && !(gpio & 0x2));
-
- state->ready = 0;
- state->vs_Xv = 0;
- state->vs_3v = 0;
- state->detect = 0;
-
- if (inserted) {
- switch (vs) {
- case 0:
- case 1:
- case 2:
- state->vs_3v=1;
- break;
- case 3: /* 5V */
- default:
- /* return without setting 'detect' */
- printk(KERN_ERR "au1x00_cs: unsupported VS\n",
- vs);
- return;
- }
- state->detect = 1;
- }
-
- if (state->detect) {
- state->ready = 1;
- }
-
- state->bvd1= gpio2 & (1<<10);
- state->bvd2 = gpio2 & (1<<11);
- state->wrprot=0;
- return 1;
-}
-
-
-static int xxs1500_pcmcia_get_irq_info(struct pcmcia_irq_info *info)
-{
-
- if(info->sock > PCMCIA_MAX_SOCK) return -1;
- info->irq = PCMCIA_IRQ;
- return 0;
-}
-
-
-static int
-xxs1500_pcmcia_configure_socket(const struct pcmcia_configure *configure)
-{
-
- if(configure->sock > PCMCIA_MAX_SOCK) return -1;
-
- DEBUG("Vcc %dV Vpp %dV, reset %d\n",
- configure->vcc, configure->vpp, configure->reset);
-
- switch(configure->vcc){
- case 33: /* Vcc 3.3V */
- /* turn on power */
- DEBUG("turn on power\n");
- au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<14))|(1<<30),
- GPIO2_OUTPUT);
- au_sync_delay(100);
- break;
- case 50: /* Vcc 5V */
- default: /* what's this ? */
- printk(KERN_ERR "au1x00_cs: unsupported VCC\n");
- case 0: /* Vcc 0 */
- /* turn off power */
- au_sync_delay(100);
- au_writel(au_readl(GPIO2_PINSTATE) | (1<<14)|(1<<30),
- GPIO2_OUTPUT);
- break;
- }
-
- if (!configure->reset) {
- DEBUG("deassert reset\n");
- au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<4))|(1<<20),
- GPIO2_OUTPUT);
- au_sync_delay(100);
- au_writel((au_readl(GPIO2_PINSTATE) & ~(1<<5))|(1<<21),
- GPIO2_OUTPUT);
- }
- else {
- DEBUG("assert reset\n");
- au_writel(au_readl(GPIO2_PINSTATE) | (1<<4)|(1<<20),
- GPIO2_OUTPUT);
- }
- au_sync_delay(100);
- return 0;
-}
-
-struct pcmcia_low_level xxs1500_pcmcia_ops = {
- xxs1500_pcmcia_init,
- xxs1500_pcmcia_shutdown,
- xxs1500_pcmcia_socket_state,
- xxs1500_pcmcia_get_irq_info,
- xxs1500_pcmcia_configure_socket
-};
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index d99f846451a3..ac0686efbf75 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -20,170 +20,12 @@
*/
-#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <asm/irq.h>
-#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
-#include <pcmcia/cs.h>
-#include <pcmcia/cistpl.h>
-#include "cs_internal.h"
-
-/*====================================================================*/
-
-/* Offsets in the Expansion ROM Image Header */
-#define ROM_SIGNATURE 0x0000 /* 2 bytes */
-#define ROM_DATA_PTR 0x0018 /* 2 bytes */
-
-/* Offsets in the CardBus PC Card Data Structure */
-#define PCDATA_SIGNATURE 0x0000 /* 4 bytes */
-#define PCDATA_VPD_PTR 0x0008 /* 2 bytes */
-#define PCDATA_LENGTH 0x000a /* 2 bytes */
-#define PCDATA_REVISION 0x000c
-#define PCDATA_IMAGE_SZ 0x0010 /* 2 bytes */
-#define PCDATA_ROM_LEVEL 0x0012 /* 2 bytes */
-#define PCDATA_CODE_TYPE 0x0014
-#define PCDATA_INDICATOR 0x0015
-
-/*=====================================================================
-
- Expansion ROM's have a special layout, and pointers specify an
- image number and an offset within that image. xlate_rom_addr()
- converts an image/offset address to an absolute offset from the
- ROM's base address.
-
-=====================================================================*/
-
-static u_int xlate_rom_addr(void __iomem *b, u_int addr)
-{
- u_int img = 0, ofs = 0, sz;
- u_short data;
- while ((readb(b) == 0x55) && (readb(b + 1) == 0xaa)) {
- if (img == (addr >> 28))
- return (addr & 0x0fffffff) + ofs;
- data = readb(b + ROM_DATA_PTR) + (readb(b + ROM_DATA_PTR + 1) << 8);
- sz = 512 * (readb(b + data + PCDATA_IMAGE_SZ) +
- (readb(b + data + PCDATA_IMAGE_SZ + 1) << 8));
- if ((sz == 0) || (readb(b + data + PCDATA_INDICATOR) & 0x80))
- break;
- b += sz;
- ofs += sz;
- img++;
- }
- return 0;
-}
-
-/*=====================================================================
-
- These are similar to setup_cis_mem and release_cis_mem for 16-bit
- cards. The "result" that is used externally is the cb_cis_virt
- pointer in the struct pcmcia_socket structure.
-
-=====================================================================*/
-
-static void cb_release_cis_mem(struct pcmcia_socket *s)
-{
- if (s->cb_cis_virt) {
- dev_dbg(&s->dev, "cb_release_cis_mem()\n");
- iounmap(s->cb_cis_virt);
- s->cb_cis_virt = NULL;
- s->cb_cis_res = NULL;
- }
-}
-
-static int cb_setup_cis_mem(struct pcmcia_socket *s, struct resource *res)
-{
- unsigned int start, size;
-
- if (res == s->cb_cis_res)
- return 0;
-
- if (s->cb_cis_res)
- cb_release_cis_mem(s);
-
- start = res->start;
- size = res->end - start + 1;
- s->cb_cis_virt = ioremap(start, size);
-
- if (!s->cb_cis_virt)
- return -1;
-
- s->cb_cis_res = res;
-
- return 0;
-}
-
-/*=====================================================================
-
- This is used by the CIS processing code to read CIS information
- from a CardBus device.
-
-=====================================================================*/
-
-int read_cb_mem(struct pcmcia_socket *s, int space, u_int addr, u_int len,
- void *ptr)
-{
- struct pci_dev *dev;
- struct resource *res;
-
- dev_dbg(&s->dev, "read_cb_mem(%d, %#x, %u)\n", space, addr, len);
- dev = pci_get_slot(s->cb_dev->subordinate, 0);
- if (!dev)
- goto fail;
-
- /* Config space? */
- if (space == 0) {
- if (addr + len > 0x100)
- goto failput;
- for (; len; addr++, ptr++, len--)
- pci_read_config_byte(dev, addr, ptr);
- return 0;
- }
-
- res = dev->resource + space - 1;
-
- pci_dev_put(dev);
-
- if (!res->flags)
- goto fail;
-
- if (cb_setup_cis_mem(s, res) != 0)
- goto fail;
-
- if (space == 7) {
- addr = xlate_rom_addr(s->cb_cis_virt, addr);
- if (addr == 0)
- goto fail;
- }
-
- if (addr + len > res->end - res->start)
- goto fail;
-
- memcpy_fromio(ptr, s->cb_cis_virt + addr, len);
- return 0;
-
-failput:
- pci_dev_put(dev);
-fail:
- memset(ptr, 0xff, len);
- return -1;
-}
-
-/*=====================================================================
-
- cb_alloc() and cb_free() allocate and free the kernel data
- structures for a Cardbus device, and handle the lowest level PCI
- device setup issues.
-
-=====================================================================*/
static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq)
{
@@ -215,6 +57,13 @@ static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq)
}
}
+/**
+ * cb_alloc() - add CardBus device
+ * @s: the pcmcia_socket where the CardBus device is located
+ *
+ * cb_alloc() allocates the kernel data structures for a Cardbus device
+ * and handles the lowest level PCI device setup issues.
+ */
int __ref cb_alloc(struct pcmcia_socket *s)
{
struct pci_bus *bus = s->cb_dev->subordinate;
@@ -249,12 +98,16 @@ int __ref cb_alloc(struct pcmcia_socket *s)
return 0;
}
+/**
+ * cb_free() - remove CardBus device
+ * @s: the pcmcia_socket where the CardBus device was located
+ *
+ * cb_free() handles the lowest level PCI device cleanup.
+ */
void cb_free(struct pcmcia_socket *s)
{
struct pci_dev *bridge = s->cb_dev;
- cb_release_cis_mem(s);
-
if (bridge)
pci_remove_behind_bridge(bridge);
}
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 25b1cd219e37..936417c3e79e 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -76,7 +76,6 @@ void release_cis_mem(struct pcmcia_socket *s)
s->cis_virt = NULL;
}
}
-EXPORT_SYMBOL(release_cis_mem);
/*
* Map the card memory at "card_offset" into virtual space.
@@ -195,7 +194,6 @@ int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
*(u_char *)(ptr+2), *(u_char *)(ptr+3));
return 0;
}
-EXPORT_SYMBOL(pcmcia_read_cis_mem);
void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
@@ -254,7 +252,6 @@ void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
}
}
}
-EXPORT_SYMBOL(pcmcia_write_cis_mem);
/*======================================================================
@@ -268,29 +265,27 @@ EXPORT_SYMBOL(pcmcia_write_cis_mem);
static void read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
size_t len, void *ptr)
{
- struct cis_cache_entry *cis;
- int ret;
+ struct cis_cache_entry *cis;
+ int ret;
- if (s->fake_cis) {
- if (s->fake_cis_len >= addr+len)
- memcpy(ptr, s->fake_cis+addr, len);
- else
- memset(ptr, 0xff, len);
- return;
- }
+ if (s->state & SOCKET_CARDBUS)
+ return;
- list_for_each_entry(cis, &s->cis_cache, node) {
- if (cis->addr == addr && cis->len == len && cis->attr == attr) {
- memcpy(ptr, cis->cache, len);
- return;
+ if (s->fake_cis) {
+ if (s->fake_cis_len >= addr+len)
+ memcpy(ptr, s->fake_cis+addr, len);
+ else
+ memset(ptr, 0xff, len);
+ return;
+ }
+
+ list_for_each_entry(cis, &s->cis_cache, node) {
+ if (cis->addr == addr && cis->len == len && cis->attr == attr) {
+ memcpy(ptr, cis->cache, len);
+ return;
+ }
}
- }
-#ifdef CONFIG_CARDBUS
- if (s->state & SOCKET_CARDBUS)
- ret = read_cb_mem(s, attr, addr, len, ptr);
- else
-#endif
ret = pcmcia_read_cis_mem(s, attr, addr, len, ptr);
if (ret == 0) {
@@ -319,24 +314,24 @@ remove_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, u_int len)
}
}
+/**
+ * destroy_cis_cache() - destroy the CIS cache
+ * @s: pcmcia_socket for which CIS cache shall be destroyed
+ *
+ * This destroys the CIS cache but keeps any fake CIS alive.
+ */
+
void destroy_cis_cache(struct pcmcia_socket *s)
{
struct list_head *l, *n;
+ struct cis_cache_entry *cis;
list_for_each_safe(l, n, &s->cis_cache) {
- struct cis_cache_entry *cis = list_entry(l, struct cis_cache_entry, node);
-
+ cis = list_entry(l, struct cis_cache_entry, node);
list_del(&cis->node);
kfree(cis);
}
-
- /*
- * If there was a fake CIS, destroy that as well.
- */
- kfree(s->fake_cis);
- s->fake_cis = NULL;
}
-EXPORT_SYMBOL(destroy_cis_cache);
/*======================================================================
@@ -350,6 +345,9 @@ int verify_cis_cache(struct pcmcia_socket *s)
struct cis_cache_entry *cis;
char *buf;
+ if (s->state & SOCKET_CARDBUS)
+ return -EINVAL;
+
buf = kmalloc(256, GFP_KERNEL);
if (buf == NULL) {
dev_printk(KERN_WARNING, &s->dev,
@@ -361,12 +359,8 @@ int verify_cis_cache(struct pcmcia_socket *s)
if (len > 256)
len = 256;
-#ifdef CONFIG_CARDBUS
- if (s->state & SOCKET_CARDBUS)
- read_cb_mem(s, cis->attr, cis->addr, len, buf);
- else
-#endif
- pcmcia_read_cis_mem(s, cis->attr, cis->addr, len, buf);
+
+ pcmcia_read_cis_mem(s, cis->attr, cis->addr, len, buf);
if (memcmp(buf, cis->cache, len) != 0) {
kfree(buf);
@@ -401,7 +395,6 @@ int pcmcia_replace_cis(struct pcmcia_socket *s,
memcpy(s->fake_cis, data, len);
return 0;
}
-EXPORT_SYMBOL(pcmcia_replace_cis);
/*======================================================================
@@ -425,25 +418,16 @@ int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple
{
if (!s)
return -EINVAL;
- if (!(s->state & SOCKET_PRESENT))
+
+ if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
return -ENODEV;
tuple->TupleLink = tuple->Flags = 0;
-#ifdef CONFIG_CARDBUS
- if (s->state & SOCKET_CARDBUS) {
- struct pci_dev *dev = s->cb_dev;
- u_int ptr;
- pci_bus_read_config_dword(dev->subordinate, 0, PCI_CARDBUS_CIS, &ptr);
- tuple->CISOffset = ptr & ~7;
- SPACE(tuple->Flags) = (ptr & 7);
- } else
-#endif
- {
- /* Assume presence of a LONGLINK_C to address 0 */
- tuple->CISOffset = tuple->LinkOffset = 0;
- SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
- }
- if (!(s->state & SOCKET_CARDBUS) && (s->functions > 1) &&
- !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
+
+ /* Assume presence of a LONGLINK_C to address 0 */
+ tuple->CISOffset = tuple->LinkOffset = 0;
+ SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
+
+ if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
cisdata_t req = tuple->DesiredTuple;
tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
if (pccard_get_next_tuple(s, function, tuple) == 0) {
@@ -456,7 +440,6 @@ int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple
}
return pccard_get_next_tuple(s, function, tuple);
}
-EXPORT_SYMBOL(pccard_get_first_tuple);
static int follow_link(struct pcmcia_socket *s, tuple_t *tuple)
{
@@ -479,7 +462,7 @@ static int follow_link(struct pcmcia_socket *s, tuple_t *tuple)
} else {
return -1;
}
- if (!(s->state & SOCKET_CARDBUS) && SPACE(tuple->Flags)) {
+ if (SPACE(tuple->Flags)) {
/* This is ugly, but a common CIS error is to code the long
link offset incorrectly, so we check the right spot... */
read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
@@ -505,7 +488,7 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
if (!s)
return -EINVAL;
- if (!(s->state & SOCKET_PRESENT))
+ if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
return -ENODEV;
link[1] = tuple->TupleLink;
@@ -592,7 +575,6 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_
tuple->CISOffset = ofs + 2;
return 0;
}
-EXPORT_SYMBOL(pccard_get_next_tuple);
/*====================================================================*/
@@ -616,7 +598,6 @@ int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple)
_MIN(len, tuple->TupleDataMax), tuple->TupleData);
return 0;
}
-EXPORT_SYMBOL(pccard_get_tuple_data);
/*======================================================================
@@ -1190,119 +1171,6 @@ static int parse_cftable_entry(tuple_t *tuple,
/*====================================================================*/
-#ifdef CONFIG_CARDBUS
-
-static int parse_bar(tuple_t *tuple, cistpl_bar_t *bar)
-{
- u_char *p;
- if (tuple->TupleDataLen < 6)
- return -EINVAL;
- p = (u_char *)tuple->TupleData;
- bar->attr = *p;
- p += 2;
- bar->size = get_unaligned_le32(p);
- return 0;
-}
-
-static int parse_config_cb(tuple_t *tuple, cistpl_config_t *config)
-{
- u_char *p;
-
- p = (u_char *)tuple->TupleData;
- if ((*p != 3) || (tuple->TupleDataLen < 6))
- return -EINVAL;
- config->last_idx = *(++p);
- p++;
- config->base = get_unaligned_le32(p);
- config->subtuples = tuple->TupleDataLen - 6;
- return 0;
-}
-
-static int parse_cftable_entry_cb(tuple_t *tuple,
- cistpl_cftable_entry_cb_t *entry)
-{
- u_char *p, *q, features;
-
- p = tuple->TupleData;
- q = p + tuple->TupleDataLen;
- entry->index = *p & 0x3f;
- entry->flags = 0;
- if (*p & 0x40)
- entry->flags |= CISTPL_CFTABLE_DEFAULT;
-
- /* Process optional features */
- if (++p == q)
- return -EINVAL;
- features = *p; p++;
-
- /* Power options */
- if ((features & 3) > 0) {
- p = parse_power(p, q, &entry->vcc);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->vcc.present = 0;
- if ((features & 3) > 1) {
- p = parse_power(p, q, &entry->vpp1);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->vpp1.present = 0;
- if ((features & 3) > 2) {
- p = parse_power(p, q, &entry->vpp2);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->vpp2.present = 0;
-
- /* I/O window options */
- if (features & 0x08) {
- if (p == q)
- return -EINVAL;
- entry->io = *p; p++;
- } else
- entry->io = 0;
-
- /* Interrupt options */
- if (features & 0x10) {
- p = parse_irq(p, q, &entry->irq);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->irq.IRQInfo1 = 0;
-
- if (features & 0x20) {
- if (p == q)
- return -EINVAL;
- entry->mem = *p; p++;
- } else
- entry->mem = 0;
-
- /* Misc features */
- if (features & 0x80) {
- if (p == q)
- return -EINVAL;
- entry->flags |= (*p << 8);
- if (*p & 0x80) {
- if (++p == q)
- return -EINVAL;
- entry->flags |= (*p << 16);
- }
- while (*p & 0x80)
- if (++p == q)
- return -EINVAL;
- p++;
- }
-
- entry->subtuples = q-p;
-
- return 0;
-}
-
-#endif
-
-/*====================================================================*/
-
static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo)
{
u_char *p, *q;
@@ -1404,17 +1272,6 @@ int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse)
case CISTPL_DEVICE_A:
ret = parse_device(tuple, &parse->device);
break;
-#ifdef CONFIG_CARDBUS
- case CISTPL_BAR:
- ret = parse_bar(tuple, &parse->bar);
- break;
- case CISTPL_CONFIG_CB:
- ret = parse_config_cb(tuple, &parse->config);
- break;
- case CISTPL_CFTABLE_ENTRY_CB:
- ret = parse_cftable_entry_cb(tuple, &parse->cftable_entry_cb);
- break;
-#endif
case CISTPL_CHECKSUM:
ret = parse_checksum(tuple, &parse->checksum);
break;
@@ -1513,7 +1370,6 @@ done:
kfree(buf);
return ret;
}
-EXPORT_SYMBOL(pccard_read_tuple);
/**
@@ -1573,84 +1429,242 @@ next_entry:
kfree(buf);
return ret;
}
-EXPORT_SYMBOL(pccard_loop_tuple);
-
-/*======================================================================
-
- This tries to determine if a card has a sensible CIS. It returns
- the number of tuples in the CIS, or 0 if the CIS looks bad. The
- checks include making sure several critical tuples are present and
- valid; seeing if the total number of tuples is reasonable; and
- looking for tuples that use reserved codes.
-
-======================================================================*/
+/**
+ * pccard_validate_cis() - check whether card has a sensible CIS
+ * @s: the struct pcmcia_socket we are to check
+ * @info: returns the number of tuples in the (valid) CIS, or 0
+ *
+ * This tries to determine if a card has a sensible CIS. In @info, it
+ * returns the number of tuples in the CIS, or 0 if the CIS looks bad. The
+ * checks include making sure several critical tuples are present and
+ * valid; seeing if the total number of tuples is reasonable; and
+ * looking for tuples that use reserved codes.
+ *
+ * The function returns 0 on success.
+ */
int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *info)
{
- tuple_t *tuple;
- cisparse_t *p;
- unsigned int count = 0;
- int ret, reserved, dev_ok = 0, ident_ok = 0;
+ tuple_t *tuple;
+ cisparse_t *p;
+ unsigned int count = 0;
+ int ret, reserved, dev_ok = 0, ident_ok = 0;
- if (!s)
- return -EINVAL;
+ if (!s)
+ return -EINVAL;
- tuple = kmalloc(sizeof(*tuple), GFP_KERNEL);
- if (tuple == NULL) {
- dev_printk(KERN_WARNING, &s->dev, "no memory to validate CIS\n");
- return -ENOMEM;
- }
- p = kmalloc(sizeof(*p), GFP_KERNEL);
- if (p == NULL) {
- kfree(tuple);
- dev_printk(KERN_WARNING, &s->dev, "no memory to validate CIS\n");
- return -ENOMEM;
- }
+ /* We do not want to validate the CIS cache... */
+ destroy_cis_cache(s);
- count = reserved = 0;
- tuple->DesiredTuple = RETURN_FIRST_TUPLE;
- tuple->Attributes = TUPLE_RETURN_COMMON;
- ret = pccard_get_first_tuple(s, BIND_FN_ALL, tuple);
- if (ret != 0)
- goto done;
-
- /* First tuple should be DEVICE; we should really have either that
- or a CFTABLE_ENTRY of some sort */
- if ((tuple->TupleCode == CISTPL_DEVICE) ||
- (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY, p) == 0) ||
- (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY_CB, p) == 0))
- dev_ok++;
-
- /* All cards should have a MANFID tuple, and/or a VERS_1 or VERS_2
- tuple, for card identification. Certain old D-Link and Linksys
- cards have only a broken VERS_2 tuple; hence the bogus test. */
- if ((pccard_read_tuple(s, BIND_FN_ALL, CISTPL_MANFID, p) == 0) ||
- (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_1, p) == 0) ||
- (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_2, p) != -ENOSPC))
- ident_ok++;
-
- if (!dev_ok && !ident_ok)
- goto done;
-
- for (count = 1; count < MAX_TUPLES; count++) {
- ret = pccard_get_next_tuple(s, BIND_FN_ALL, tuple);
+ tuple = kmalloc(sizeof(*tuple), GFP_KERNEL);
+ if (tuple == NULL) {
+ dev_warn(&s->dev, "no memory to validate CIS\n");
+ return -ENOMEM;
+ }
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ kfree(tuple);
+ dev_warn(&s->dev, "no memory to validate CIS\n");
+ return -ENOMEM;
+ }
+
+ count = reserved = 0;
+ tuple->DesiredTuple = RETURN_FIRST_TUPLE;
+ tuple->Attributes = TUPLE_RETURN_COMMON;
+ ret = pccard_get_first_tuple(s, BIND_FN_ALL, tuple);
if (ret != 0)
- break;
- if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) ||
- ((tuple->TupleCode > 0x47) && (tuple->TupleCode < 0x80)) ||
- ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff)))
- reserved++;
- }
- if ((count == MAX_TUPLES) || (reserved > 5) ||
- ((!dev_ok || !ident_ok) && (count > 10)))
- count = 0;
+ goto done;
+
+ /* First tuple should be DEVICE; we should really have either that
+ or a CFTABLE_ENTRY of some sort */
+ if ((tuple->TupleCode == CISTPL_DEVICE) ||
+ (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY, p)) ||
+ (!pccard_read_tuple(s, BIND_FN_ALL, CISTPL_CFTABLE_ENTRY_CB, p)))
+ dev_ok++;
+
+ /* All cards should have a MANFID tuple, and/or a VERS_1 or VERS_2
+ tuple, for card identification. Certain old D-Link and Linksys
+ cards have only a broken VERS_2 tuple; hence the bogus test. */
+ if ((pccard_read_tuple(s, BIND_FN_ALL, CISTPL_MANFID, p) == 0) ||
+ (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_1, p) == 0) ||
+ (pccard_read_tuple(s, BIND_FN_ALL, CISTPL_VERS_2, p) != -ENOSPC))
+ ident_ok++;
+
+ if (!dev_ok && !ident_ok)
+ goto done;
+
+ for (count = 1; count < MAX_TUPLES; count++) {
+ ret = pccard_get_next_tuple(s, BIND_FN_ALL, tuple);
+ if (ret != 0)
+ break;
+ if (((tuple->TupleCode > 0x23) && (tuple->TupleCode < 0x40)) ||
+ ((tuple->TupleCode > 0x47) && (tuple->TupleCode < 0x80)) ||
+ ((tuple->TupleCode > 0x90) && (tuple->TupleCode < 0xff)))
+ reserved++;
+ }
+ if ((count == MAX_TUPLES) || (reserved > 5) ||
+ ((!dev_ok || !ident_ok) && (count > 10)))
+ count = 0;
+
+ ret = 0;
done:
- if (info)
- *info = count;
- kfree(tuple);
- kfree(p);
- return 0;
+ /* invalidate CIS cache on failure */
+ if (!dev_ok || !ident_ok || !count) {
+ destroy_cis_cache(s);
+ ret = -EIO;
+ }
+
+ if (info)
+ *info = count;
+ kfree(tuple);
+ kfree(p);
+ return ret;
+}
+
+
+#define to_socket(_dev) container_of(_dev, struct pcmcia_socket, dev)
+
+static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf,
+ loff_t off, size_t count)
+{
+ tuple_t tuple;
+ int status, i;
+ loff_t pointer = 0;
+ ssize_t ret = 0;
+ u_char *tuplebuffer;
+ u_char *tempbuffer;
+
+ tuplebuffer = kmalloc(sizeof(u_char) * 256, GFP_KERNEL);
+ if (!tuplebuffer)
+ return -ENOMEM;
+
+ tempbuffer = kmalloc(sizeof(u_char) * 258, GFP_KERNEL);
+ if (!tempbuffer) {
+ ret = -ENOMEM;
+ goto free_tuple;
+ }
+
+ memset(&tuple, 0, sizeof(tuple_t));
+
+ tuple.Attributes = TUPLE_RETURN_LINK | TUPLE_RETURN_COMMON;
+ tuple.DesiredTuple = RETURN_FIRST_TUPLE;
+ tuple.TupleOffset = 0;
+
+ status = pccard_get_first_tuple(s, BIND_FN_ALL, &tuple);
+ while (!status) {
+ tuple.TupleData = tuplebuffer;
+ tuple.TupleDataMax = 255;
+ memset(tuplebuffer, 0, sizeof(u_char) * 255);
+
+ status = pccard_get_tuple_data(s, &tuple);
+ if (status)
+ break;
+
+ if (off < (pointer + 2 + tuple.TupleDataLen)) {
+ tempbuffer[0] = tuple.TupleCode & 0xff;
+ tempbuffer[1] = tuple.TupleLink & 0xff;
+ for (i = 0; i < tuple.TupleDataLen; i++)
+ tempbuffer[i + 2] = tuplebuffer[i] & 0xff;
+
+ for (i = 0; i < (2 + tuple.TupleDataLen); i++) {
+ if (((i + pointer) >= off) &&
+ (i + pointer) < (off + count)) {
+ buf[ret] = tempbuffer[i];
+ ret++;
+ }
+ }
+ }
+
+ pointer += 2 + tuple.TupleDataLen;
+
+ if (pointer >= (off + count))
+ break;
+
+ if (tuple.TupleCode == CISTPL_END)
+ break;
+ status = pccard_get_next_tuple(s, BIND_FN_ALL, &tuple);
+ }
+
+ kfree(tempbuffer);
+ free_tuple:
+ kfree(tuplebuffer);
+
+ return ret;
}
-EXPORT_SYMBOL(pccard_validate_cis);
+
+
+static ssize_t pccard_show_cis(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ unsigned int size = 0x200;
+
+ if (off >= size)
+ count = 0;
+ else {
+ struct pcmcia_socket *s;
+ unsigned int chains;
+
+ if (off + count > size)
+ count = size - off;
+
+ s = to_socket(container_of(kobj, struct device, kobj));
+
+ if (!(s->state & SOCKET_PRESENT))
+ return -ENODEV;
+ if (pccard_validate_cis(s, &chains))
+ return -EIO;
+ if (!chains)
+ return -ENODATA;
+
+ count = pccard_extract_cis(s, buf, off, count);
+ }
+
+ return count;
+}
+
+
+static ssize_t pccard_store_cis(struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct pcmcia_socket *s;
+ int error;
+
+ s = to_socket(container_of(kobj, struct device, kobj));
+
+ if (off)
+ return -EINVAL;
+
+ if (count >= CISTPL_MAX_CIS_SIZE)
+ return -EINVAL;
+
+ if (!(s->state & SOCKET_PRESENT))
+ return -ENODEV;
+
+ error = pcmcia_replace_cis(s, buf, count);
+ if (error)
+ return -EIO;
+
+ mutex_lock(&s->skt_mutex);
+ if ((s->callback) && (s->state & SOCKET_PRESENT) &&
+ !(s->state & SOCKET_CARDBUS)) {
+ if (try_module_get(s->callback->owner)) {
+ s->callback->requery(s, 1);
+ module_put(s->callback->owner);
+ }
+ }
+ mutex_unlock(&s->skt_mutex);
+
+ return count;
+}
+
+
+struct bin_attribute pccard_cis_attr = {
+ .attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
+ .size = 0x200,
+ .read = pccard_show_cis,
+ .write = pccard_store_cis,
+};
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 6d6f82b38a68..444d45a77fca 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -140,19 +140,13 @@ struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt)
struct device *dev = get_device(&skt->dev);
if (!dev)
return NULL;
- skt = dev_get_drvdata(dev);
- if (!try_module_get(skt->owner)) {
- put_device(&skt->dev);
- return NULL;
- }
- return skt;
+ return dev_get_drvdata(dev);
}
EXPORT_SYMBOL(pcmcia_get_socket);
void pcmcia_put_socket(struct pcmcia_socket *skt)
{
- module_put(skt->owner);
put_device(&skt->dev);
}
EXPORT_SYMBOL(pcmcia_put_socket);
@@ -283,15 +277,14 @@ void pcmcia_unregister_socket(struct pcmcia_socket *socket)
if (socket->thread)
kthread_stop(socket->thread);
- release_cis_mem(socket);
-
/* remove from our own list */
down_write(&pcmcia_socket_list_rwsem);
list_del(&socket->socket_list);
up_write(&pcmcia_socket_list_rwsem);
/* wait for sysfs to drop all references */
- release_resource_db(socket);
+ if (socket->resource_ops->exit)
+ socket->resource_ops->exit(socket);
wait_for_completion(&socket->socket_released);
} /* pcmcia_unregister_socket */
EXPORT_SYMBOL(pcmcia_unregister_socket);
@@ -328,7 +321,7 @@ static int send_event(struct pcmcia_socket *s, event_t event, int priority)
{
int ret;
- if (s->state & SOCKET_CARDBUS)
+ if ((s->state & SOCKET_CARDBUS) && (event != CS_EVENT_CARD_REMOVAL))
return 0;
dev_dbg(&s->dev, "send_event(event %d, pri %d, callback 0x%p)\n",
@@ -346,13 +339,6 @@ static int send_event(struct pcmcia_socket *s, event_t event, int priority)
return ret;
}
-static void socket_remove_drivers(struct pcmcia_socket *skt)
-{
- dev_dbg(&skt->dev, "remove_drivers\n");
-
- send_event(skt, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
-}
-
static int socket_reset(struct pcmcia_socket *skt)
{
int status, i;
@@ -395,7 +381,7 @@ static void socket_shutdown(struct pcmcia_socket *s)
dev_dbg(&s->dev, "shutdown\n");
- socket_remove_drivers(s);
+ send_event(s, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
s->state &= SOCKET_INUSE | SOCKET_PRESENT;
msleep(shutdown_delay * 10);
s->state &= SOCKET_INUSE;
@@ -406,7 +392,8 @@ static void socket_shutdown(struct pcmcia_socket *s)
s->ops->set_socket(s, &s->socket);
s->irq.AssignedIRQ = s->irq.Config = 0;
s->lock_count = 0;
- destroy_cis_cache(s);
+ kfree(s->fake_cis);
+ s->fake_cis = NULL;
#ifdef CONFIG_CARDBUS
cb_free(s);
#endif
@@ -421,7 +408,7 @@ static void socket_shutdown(struct pcmcia_socket *s)
"*** DANGER *** unable to remove socket power\n");
}
- cs_socket_put(s);
+ s->state &= ~SOCKET_INUSE;
}
static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
@@ -460,7 +447,8 @@ static int socket_setup(struct pcmcia_socket *skt, int initial_delay)
return -EINVAL;
}
skt->state |= SOCKET_CARDBUS;
- }
+ } else
+ skt->state &= ~SOCKET_CARDBUS;
/*
* Decode the card voltage requirements, and apply power to the card.
@@ -509,8 +497,8 @@ static int socket_insert(struct pcmcia_socket *skt)
dev_dbg(&skt->dev, "insert\n");
- if (!cs_socket_get(skt))
- return -ENODEV;
+ WARN_ON(skt->state & SOCKET_INUSE);
+ skt->state |= SOCKET_INUSE;
ret = socket_setup(skt, setup_delay);
if (ret == 0) {
@@ -542,6 +530,8 @@ static int socket_suspend(struct pcmcia_socket *skt)
if (skt->state & SOCKET_SUSPEND)
return -EBUSY;
+ skt->suspended_state = skt->state;
+
send_event(skt, CS_EVENT_PM_SUSPEND, CS_EVENT_PRI_LOW);
skt->socket = dead_socket;
skt->ops->set_socket(skt, &skt->socket);
@@ -564,36 +554,37 @@ static int socket_early_resume(struct pcmcia_socket *skt)
static int socket_late_resume(struct pcmcia_socket *skt)
{
- if (!(skt->state & SOCKET_PRESENT)) {
- skt->state &= ~SOCKET_SUSPEND;
+ skt->state &= ~SOCKET_SUSPEND;
+
+ if (!(skt->state & SOCKET_PRESENT))
return socket_insert(skt);
+
+ if (skt->resume_status) {
+ socket_shutdown(skt);
+ return 0;
}
- if (skt->resume_status == 0) {
- /*
- * FIXME: need a better check here for cardbus cards.
- */
- if (verify_cis_cache(skt) != 0) {
- dev_dbg(&skt->dev, "cis mismatch - different card\n");
- socket_remove_drivers(skt);
- destroy_cis_cache(skt);
- /*
- * Workaround: give DS time to schedule removal.
- * Remove me once the 100ms delay is eliminated
- * in ds.c
- */
- msleep(200);
- send_event(skt, CS_EVENT_CARD_INSERTION, CS_EVENT_PRI_LOW);
- } else {
- dev_dbg(&skt->dev, "cis matches cache\n");
- send_event(skt, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW);
- }
- } else {
+ if (skt->suspended_state != skt->state) {
+ dev_dbg(&skt->dev,
+ "suspend state 0x%x != resume state 0x%x\n",
+ skt->suspended_state, skt->state);
+
socket_shutdown(skt);
+ return socket_insert(skt);
}
- skt->state &= ~SOCKET_SUSPEND;
+#ifdef CONFIG_CARDBUS
+ if (skt->state & SOCKET_CARDBUS) {
+ /* We can't be sure the CardBus card is the same
+ * as the one previously inserted. Therefore, remove
+ * and re-add... */
+ cb_free(skt);
+ cb_alloc(skt);
+ return 0;
+ }
+#endif
+ send_event(skt, CS_EVENT_PM_RESUME, CS_EVENT_PRI_LOW);
return 0;
}
@@ -707,6 +698,10 @@ static int pccardd(void *__skt)
/* make sure we are running before we exit */
set_current_state(TASK_RUNNING);
+ /* shut down socket, if a device is still present */
+ if (skt->state & SOCKET_PRESENT)
+ socket_remove(skt);
+
/* remove from the device core */
pccard_sysfs_remove_socket(&skt->dev);
device_unregister(&skt->dev);
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 3bc02d53a3a3..9625bf20f874 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -87,23 +87,9 @@ struct pccard_resource_ops {
#define SOCKET_CARDBUS 0x8000
#define SOCKET_CARDBUS_CONFIG 0x10000
-static inline int cs_socket_get(struct pcmcia_socket *skt)
-{
- int ret;
-
- WARN_ON(skt->state & SOCKET_INUSE);
-
- ret = try_module_get(skt->owner);
- if (ret)
- skt->state |= SOCKET_INUSE;
- return ret;
-}
-
static inline void cs_socket_put(struct pcmcia_socket *skt)
{
if (skt->state & SOCKET_INUSE) {
- skt->state &= ~SOCKET_INUSE;
- module_put(skt->owner);
}
}
@@ -112,12 +98,6 @@ static inline void cs_socket_put(struct pcmcia_socket *skt)
* Stuff internal to module "pcmcia_core":
*/
-/* cistpl.c */
-int verify_cis_cache(struct pcmcia_socket *s);
-
-/* rsrc_mgr.c */
-void release_resource_db(struct pcmcia_socket *s);
-
/* socket_sysfs.c */
extern int pccard_sysfs_add_socket(struct device *dev);
extern void pccard_sysfs_remove_socket(struct device *dev);
@@ -125,8 +105,6 @@ extern void pccard_sysfs_remove_socket(struct device *dev);
/* cardbus.c */
int cb_alloc(struct pcmcia_socket *s);
void cb_free(struct pcmcia_socket *s);
-int read_cb_mem(struct pcmcia_socket *s, int space, u_int addr, u_int len,
- void *ptr);
@@ -139,6 +117,7 @@ struct pcmcia_callback{
int (*event) (struct pcmcia_socket *s,
event_t event, int priority);
void (*requery) (struct pcmcia_socket *s, int new_cis);
+ int (*validate) (struct pcmcia_socket *s, unsigned int *i);
int (*suspend) (struct pcmcia_socket *s);
int (*resume) (struct pcmcia_socket *s);
};
@@ -160,7 +139,25 @@ int pcmcia_insert_card(struct pcmcia_socket *skt);
struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt);
void pcmcia_put_socket(struct pcmcia_socket *skt);
+/*
+ * Stuff internal to module "pcmcia".
+ */
+/* ds.c */
+extern struct bus_type pcmcia_bus_type;
+
+/* pcmcia_resource.c */
+extern int pcmcia_release_configuration(struct pcmcia_device *p_dev);
+extern int pcmcia_validate_mem(struct pcmcia_socket *s);
+extern struct resource *pcmcia_find_mem_region(u_long base,
+ u_long num,
+ u_long align,
+ int low,
+ struct pcmcia_socket *s);
+
+
/* cistpl.c */
+extern struct bin_attribute pccard_cis_attr;
+
int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr,
u_int addr, u_int len, void *ptr);
void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr,
@@ -172,8 +169,8 @@ int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
int pcmcia_replace_cis(struct pcmcia_socket *s,
const u8 *data, const size_t len);
int pccard_validate_cis(struct pcmcia_socket *s, unsigned int *count);
+int verify_cis_cache(struct pcmcia_socket *s);
-/* loop over CIS entries */
int pccard_loop_tuple(struct pcmcia_socket *s, unsigned int function,
cisdata_t code, cisparse_t *parse, void *priv_data,
int (*loop_tuple) (tuple_t *tuple,
@@ -189,31 +186,6 @@ int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function,
int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple);
-/* rsrc_mgr.c */
-int pcmcia_validate_mem(struct pcmcia_socket *s);
-struct resource *pcmcia_find_io_region(unsigned long base,
- int num,
- unsigned long align,
- struct pcmcia_socket *s);
-int pcmcia_adjust_io_region(struct resource *res,
- unsigned long r_start,
- unsigned long r_end,
- struct pcmcia_socket *s);
-struct resource *pcmcia_find_mem_region(u_long base,
- u_long num,
- u_long align,
- int low,
- struct pcmcia_socket *s);
-
-/*
- * Stuff internal to module "pcmcia".
- */
-/* ds.c */
-extern struct bus_type pcmcia_bus_type;
-
-/* pcmcia_resource.c */
-extern int pcmcia_release_configuration(struct pcmcia_device *p_dev);
-
#ifdef CONFIG_PCMCIA_IOCTL
/* ds.c */
extern spinlock_t pcmcia_dev_list_lock;
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
new file mode 100644
index 000000000000..b35b72b0d5b5
--- /dev/null
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -0,0 +1,630 @@
+/*
+ * PCMCIA socket code for the Alchemy Db1xxx/Pb1xxx boards.
+ *
+ * Copyright (c) 2009 Manuel Lauss <manuel.lauss@gmail.com>
+ *
+ */
+
+/* This is a fairly generic PCMCIA socket driver suitable for the
+ * following Alchemy Development boards:
+ * Db1000, Db/Pb1500, Db/Pb1100, Db/Pb1550, Db/Pb1200.
+ *
+ * The Db1000 is used as a reference: Per-socket card-, carddetect- and
+ * statuschange IRQs connected to SoC GPIOs, control and status register
+ * bits arranged in per-socket groups in an external PLD. All boards
+ * listed here use this layout, including bit positions and meanings.
+ * Of course there are exceptions in later boards:
+ *
+ * - Pb1100/Pb1500: single socket only; voltage key bits VS are
+ * at STATUS[5:4] (instead of STATUS[1:0]).
+ * - Au1200-based: additional card-eject irqs, irqs not gpios!
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/spinlock.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/ss.h>
+
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+#define MEM_MAP_SIZE 0x400000
+#define IO_MAP_SIZE 0x1000
+
+struct db1x_pcmcia_sock {
+ struct pcmcia_socket socket;
+ int nr; /* socket number */
+ void *virt_io;
+
+ /* the "pseudo" addresses of the PCMCIA space. */
+ unsigned long phys_io;
+ unsigned long phys_attr;
+ unsigned long phys_mem;
+
+ /* previous flags for set_socket() */
+ unsigned int old_flags;
+
+ /* interrupt sources: linux irq numbers! */
+ int insert_irq; /* default carddetect irq */
+ int stschg_irq; /* card-status-change irq */
+ int card_irq; /* card irq */
+ int eject_irq; /* db1200/pb1200 have these */
+
+#define BOARD_TYPE_DEFAULT 0 /* most boards */
+#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
+#define BOARD_TYPE_PB1100 2 /* VS bits slightly different */
+ int board_type;
+};
+
+#define to_db1x_socket(x) container_of(x, struct db1x_pcmcia_sock, socket)
+
+/* DB/PB1200: check CPLD SIGSTATUS register bit 10/12 */
+static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
+{
+ unsigned short sigstat;
+
+ sigstat = bcsr_read(BCSR_SIGSTAT);
+ return sigstat & 1 << (8 + 2 * sock->nr);
+}
+
+/* carddetect gpio: low-active */
+static int db1000_card_inserted(struct db1x_pcmcia_sock *sock)
+{
+ return !gpio_get_value(irq_to_gpio(sock->insert_irq));
+}
+
+static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
+{
+ switch (sock->board_type) {
+ case BOARD_TYPE_DB1200:
+ return db1200_card_inserted(sock);
+ default:
+ return db1000_card_inserted(sock);
+ }
+}
+
+/* STSCHG tends to bounce heavily when cards are inserted/ejected.
+ * To avoid this, the interrupt is normally disabled and only enabled
+ * after reset to a card has been de-asserted.
+ */
+static inline void set_stschg(struct db1x_pcmcia_sock *sock, int en)
+{
+ if (sock->stschg_irq != -1) {
+ if (en)
+ enable_irq(sock->stschg_irq);
+ else
+ disable_irq(sock->stschg_irq);
+ }
+}
+
+static irqreturn_t db1000_pcmcia_cdirq(int irq, void *data)
+{
+ struct db1x_pcmcia_sock *sock = data;
+
+ pcmcia_parse_events(&sock->socket, SS_DETECT);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t db1000_pcmcia_stschgirq(int irq, void *data)
+{
+ struct db1x_pcmcia_sock *sock = data;
+
+ pcmcia_parse_events(&sock->socket, SS_STSCHG);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t db1200_pcmcia_cdirq(int irq, void *data)
+{
+ struct db1x_pcmcia_sock *sock = data;
+
+ /* Db/Pb1200 have separate per-socket insertion and ejection
+ * interrupts which stay asserted as long as the card is
+ * inserted/missing. The one which caused us to be called
+ * needs to be disabled and the other one enabled.
+ */
+ if (irq == sock->insert_irq) {
+ disable_irq_nosync(sock->insert_irq);
+ enable_irq(sock->eject_irq);
+ } else {
+ disable_irq_nosync(sock->eject_irq);
+ enable_irq(sock->insert_irq);
+ }
+
+ pcmcia_parse_events(&sock->socket, SS_DETECT);
+
+ return IRQ_HANDLED;
+}
+
+static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
+{
+ int ret;
+ unsigned long flags;
+
+ if (sock->stschg_irq != -1) {
+ ret = request_irq(sock->stschg_irq, db1000_pcmcia_stschgirq,
+ 0, "pcmcia_stschg", sock);
+ if (ret)
+ return ret;
+ }
+
+ /* Db/Pb1200 have separate per-socket insertion and ejection
+ * interrupts, which should show edge behaviour but don't.
+ * So interrupts are disabled until both insertion and
+ * ejection handler have been registered and the currently
+ * active one disabled.
+ */
+ if (sock->board_type == BOARD_TYPE_DB1200) {
+ local_irq_save(flags);
+
+ ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq,
+ IRQF_DISABLED, "pcmcia_insert", sock);
+ if (ret)
+ goto out1;
+
+ ret = request_irq(sock->eject_irq, db1200_pcmcia_cdirq,
+ IRQF_DISABLED, "pcmcia_eject", sock);
+ if (ret) {
+ free_irq(sock->insert_irq, sock);
+ local_irq_restore(flags);
+ goto out1;
+ }
+
+ /* disable the currently active one */
+ if (db1200_card_inserted(sock))
+ disable_irq_nosync(sock->insert_irq);
+ else
+ disable_irq_nosync(sock->eject_irq);
+
+ local_irq_restore(flags);
+ } else {
+ /* all other (older) Db1x00 boards use a GPIO to show
+ * card detection status: use both-edge triggers.
+ */
+ set_irq_type(sock->insert_irq, IRQ_TYPE_EDGE_BOTH);
+ ret = request_irq(sock->insert_irq, db1000_pcmcia_cdirq,
+ 0, "pcmcia_carddetect", sock);
+
+ if (ret)
+ goto out1;
+ }
+
+ return 0; /* all done */
+
+out1:
+ if (sock->stschg_irq != -1)
+ free_irq(sock->stschg_irq, sock);
+
+ return ret;
+}
+
+static void db1x_pcmcia_free_irqs(struct db1x_pcmcia_sock *sock)
+{
+ if (sock->stschg_irq != -1)
+ free_irq(sock->stschg_irq, sock);
+
+ free_irq(sock->insert_irq, sock);
+ if (sock->eject_irq != -1)
+ free_irq(sock->eject_irq, sock);
+}
+
+/*
+ * configure a PCMCIA socket on the Db1x00 series of boards (and
+ * compatibles).
+ *
+ * 2 external registers are involved:
+ * pcmcia_status (offset 0x04): bits [0:1/2:3]: read card voltage id
+ * pcmcia_control(offset 0x10):
+ * bits[0:1] set vcc for card
+ * bits[2:3] set vpp for card
+ * bit 4: enable data buffers
+ * bit 7: reset# for card
+ * add 8 for second socket.
+ */
+static int db1x_pcmcia_configure(struct pcmcia_socket *skt,
+ struct socket_state_t *state)
+{
+ struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
+ unsigned short cr_clr, cr_set;
+ unsigned int changed;
+ int v, p, ret;
+
+ /* card voltage setup */
+ cr_clr = (0xf << (sock->nr * 8)); /* clear voltage settings */
+ cr_set = 0;
+ v = p = ret = 0;
+
+ switch (state->Vcc) {
+ case 50:
+ ++v;
+ case 33:
+ ++v;
+ case 0:
+ break;
+ default:
+ printk(KERN_INFO "pcmcia%d unsupported Vcc %d\n",
+ sock->nr, state->Vcc);
+ }
+
+ switch (state->Vpp) {
+ case 12:
+ ++p;
+ case 33:
+ case 50:
+ ++p;
+ case 0:
+ break;
+ default:
+ printk(KERN_INFO "pcmcia%d unsupported Vpp %d\n",
+ sock->nr, state->Vpp);
+ }
+
+ /* sanity check: Vpp must be 0, 12, or Vcc */
+ if (((state->Vcc == 33) && (state->Vpp == 50)) ||
+ ((state->Vcc == 50) && (state->Vpp == 33))) {
+ printk(KERN_INFO "pcmcia%d bad Vcc/Vpp combo (%d %d)\n",
+ sock->nr, state->Vcc, state->Vpp);
+ v = p = 0;
+ ret = -EINVAL;
+ }
+
+ /* create new voltage code */
+ cr_set |= ((v << 2) | p) << (sock->nr * 8);
+
+ changed = state->flags ^ sock->old_flags;
+
+ if (changed & SS_RESET) {
+ if (state->flags & SS_RESET) {
+ set_stschg(sock, 0);
+ /* assert reset, disable io buffers */
+ cr_clr |= (1 << (7 + (sock->nr * 8)));
+ cr_clr |= (1 << (4 + (sock->nr * 8)));
+ } else {
+ /* de-assert reset, enable io buffers */
+ cr_set |= 1 << (7 + (sock->nr * 8));
+ cr_set |= 1 << (4 + (sock->nr * 8));
+ }
+ }
+
+ /* update PCMCIA configuration */
+ bcsr_mod(BCSR_PCMCIA, cr_clr, cr_set);
+
+ sock->old_flags = state->flags;
+
+ /* reset was taken away: give card time to initialize properly */
+ if ((changed & SS_RESET) && !(state->flags & SS_RESET)) {
+ msleep(500);
+ set_stschg(sock, 1);
+ }
+
+ return ret;
+}
+
+/* VCC bits at [3:2]/[11:10] */
+#define GET_VCC(cr, socknr) \
+ ((((cr) >> 2) >> ((socknr) * 8)) & 3)
+
+/* VS bits at [0:1]/[3:2] */
+#define GET_VS(sr, socknr) \
+ (((sr) >> (2 * (socknr))) & 3)
+
+/* reset bits at [7]/[15] */
+#define GET_RESET(cr, socknr) \
+ ((cr) & (1 << (7 + (8 * (socknr)))))
+
+static int db1x_pcmcia_get_status(struct pcmcia_socket *skt,
+ unsigned int *value)
+{
+ struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
+ unsigned short cr, sr;
+ unsigned int status;
+
+ status = db1x_card_inserted(sock) ? SS_DETECT : 0;
+
+ cr = bcsr_read(BCSR_PCMCIA);
+ sr = bcsr_read(BCSR_STATUS);
+
+ /* PB1100/PB1500: voltage key bits are at [5:4] */
+ if (sock->board_type == BOARD_TYPE_PB1100)
+ sr >>= 4;
+
+ /* determine card type */
+ switch (GET_VS(sr, sock->nr)) {
+ case 0:
+ case 2:
+ status |= SS_3VCARD; /* 3V card */
+ case 3:
+ break; /* 5V card: set nothing */
+ default:
+ status |= SS_XVCARD; /* treated as unsupported in core */
+ }
+
+ /* if Vcc is not zero, we have applied power to a card */
+ status |= GET_VCC(cr, sock->nr) ? SS_POWERON : 0;
+
+ /* reset de-asserted? then we're ready */
+ status |= (GET_RESET(cr, sock->nr)) ? SS_READY : SS_RESET;
+
+ *value = status;
+
+ return 0;
+}
+
+static int db1x_pcmcia_sock_init(struct pcmcia_socket *skt)
+{
+ return 0;
+}
+
+static int db1x_pcmcia_sock_suspend(struct pcmcia_socket *skt)
+{
+ return 0;
+}
+
+static int au1x00_pcmcia_set_io_map(struct pcmcia_socket *skt,
+ struct pccard_io_map *map)
+{
+ struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
+
+ map->start = (u32)sock->virt_io;
+ map->stop = map->start + IO_MAP_SIZE;
+
+ return 0;
+}
+
+static int au1x00_pcmcia_set_mem_map(struct pcmcia_socket *skt,
+ struct pccard_mem_map *map)
+{
+ struct db1x_pcmcia_sock *sock = to_db1x_socket(skt);
+
+ if (map->flags & MAP_ATTRIB)
+ map->static_start = sock->phys_attr + map->card_start;
+ else
+ map->static_start = sock->phys_mem + map->card_start;
+
+ return 0;
+}
+
+static struct pccard_operations db1x_pcmcia_operations = {
+ .init = db1x_pcmcia_sock_init,
+ .suspend = db1x_pcmcia_sock_suspend,
+ .get_status = db1x_pcmcia_get_status,
+ .set_socket = db1x_pcmcia_configure,
+ .set_io_map = au1x00_pcmcia_set_io_map,
+ .set_mem_map = au1x00_pcmcia_set_mem_map,
+};
+
+static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
+{
+ struct db1x_pcmcia_sock *sock;
+ struct resource *r;
+ phys_t physio;
+ int ret, bid;
+
+ sock = kzalloc(sizeof(struct db1x_pcmcia_sock), GFP_KERNEL);
+ if (!sock)
+ return -ENOMEM;
+
+ sock->nr = pdev->id;
+
+ bid = BCSR_WHOAMI_BOARD(bcsr_read(BCSR_WHOAMI));
+ switch (bid) {
+ case BCSR_WHOAMI_PB1500:
+ case BCSR_WHOAMI_PB1500R2:
+ case BCSR_WHOAMI_PB1100:
+ sock->board_type = BOARD_TYPE_PB1100;
+ break;
+ case BCSR_WHOAMI_DB1000 ... BCSR_WHOAMI_PB1550_SDR:
+ sock->board_type = BOARD_TYPE_DEFAULT;
+ break;
+ case BCSR_WHOAMI_PB1200 ... BCSR_WHOAMI_DB1200:
+ sock->board_type = BOARD_TYPE_DB1200;
+ break;
+ default:
+ printk(KERN_INFO "db1xxx-ss: unknown board %d!\n", bid);
+ ret = -ENODEV;
+ goto out0;
+ };
+
+ /*
+ * gather resources necessary and optional nice-to-haves to
+ * operate a socket:
+ * This includes IRQs for Carddetection/ejection, the card
+ * itself and optional status change detection.
+ * Also, the memory areas covered by a socket. For these
+ * we require the 32bit "pseudo" addresses (see the au1000.h
+ * header for more information).
+ */
+
+ /* card: irq assigned to the card itself. */
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card");
+ sock->card_irq = r ? r->start : 0;
+
+ /* insert: irq which triggers on card insertion/ejection */
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert");
+ sock->insert_irq = r ? r->start : -1;
+
+ /* stschg: irq which trigger on card status change (optional) */
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg");
+ sock->stschg_irq = r ? r->start : -1;
+
+ /* eject: irq which triggers on ejection (DB1200/PB1200 only) */
+ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "eject");
+ sock->eject_irq = r ? r->start : -1;
+
+ ret = -ENODEV;
+
+ /*
+ * pseudo-attr: The 32bit address of the PCMCIA attribute space
+ * for this socket (usually the 36bit address shifted 4 to the
+ * right).
+ */
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pseudo-attr");
+ if (!r) {
+ printk(KERN_ERR "pcmcia%d has no 'pseudo-attr' resource!\n",
+ sock->nr);
+ goto out0;
+ }
+ sock->phys_attr = r->start;
+
+ /*
+ * pseudo-mem: The 32bit address of the PCMCIA memory space for
+ * this socket (usually the 36bit address shifted 4 to the right)
+ */
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pseudo-mem");
+ if (!r) {
+ printk(KERN_ERR "pcmcia%d has no 'pseudo-mem' resource!\n",
+ sock->nr);
+ goto out0;
+ }
+ sock->phys_mem = r->start;
+
+ /*
+ * pseudo-io: The 32bit address of the PCMCIA IO space for this
+ * socket (usually the 36bit address shifted 4 to the right).
+ */
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pseudo-io");
+ if (!r) {
+ printk(KERN_ERR "pcmcia%d has no 'pseudo-io' resource!\n",
+ sock->nr);
+ goto out0;
+ }
+ sock->phys_io = r->start;
+
+
+ /* IO: we must remap the full 36bit address (for reference see
+ * alchemy/common/setup.c::__fixup_bigphys_addr())
+ */
+ physio = ((phys_t)sock->phys_io) << 4;
+
+ /*
+ * PCMCIA client drivers use the inb/outb macros to access
+ * the IO registers. Since mips_io_port_base is added
+ * to the access address of the mips implementation of
+ * inb/outb, we need to subtract it here because we want
+ * to access the I/O or MEM address directly, without
+ * going through this "mips_io_port_base" mechanism.
+ */
+ sock->virt_io = (void *)(ioremap(physio, IO_MAP_SIZE) -
+ mips_io_port_base);
+
+ if (!sock->virt_io) {
+ printk(KERN_ERR "pcmcia%d: cannot remap IO area\n",
+ sock->nr);
+ ret = -ENOMEM;
+ goto out0;
+ }
+
+ sock->socket.ops = &db1x_pcmcia_operations;
+ sock->socket.owner = THIS_MODULE;
+ sock->socket.pci_irq = sock->card_irq;
+ sock->socket.features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
+ sock->socket.map_size = MEM_MAP_SIZE;
+ sock->socket.io_offset = (unsigned long)sock->virt_io;
+ sock->socket.dev.parent = &pdev->dev;
+ sock->socket.resource_ops = &pccard_static_ops;
+
+ platform_set_drvdata(pdev, sock);
+
+ ret = db1x_pcmcia_setup_irqs(sock);
+ if (ret) {
+ printk(KERN_ERR "pcmcia%d cannot setup interrupts\n",
+ sock->nr);
+ goto out1;
+ }
+
+ set_stschg(sock, 0);
+
+ ret = pcmcia_register_socket(&sock->socket);
+ if (ret) {
+ printk(KERN_ERR "pcmcia%d failed to register\n", sock->nr);
+ goto out2;
+ }
+
+ printk(KERN_INFO "Alchemy Db/Pb1xxx pcmcia%d @ io/attr/mem %08lx"
+ "(%p) %08lx %08lx card/insert/stschg/eject irqs @ %d "
+ "%d %d %d\n", sock->nr, sock->phys_io, sock->virt_io,
+ sock->phys_attr, sock->phys_mem, sock->card_irq,
+ sock->insert_irq, sock->stschg_irq, sock->eject_irq);
+
+ return 0;
+
+out2:
+ db1x_pcmcia_free_irqs(sock);
+out1:
+ iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
+out0:
+ kfree(sock);
+ return ret;
+}
+
+static int __devexit db1x_pcmcia_socket_remove(struct platform_device *pdev)
+{
+ struct db1x_pcmcia_sock *sock = platform_get_drvdata(pdev);
+
+ db1x_pcmcia_free_irqs(sock);
+ pcmcia_unregister_socket(&sock->socket);
+ iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
+ kfree(sock);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int db1x_pcmcia_suspend(struct device *dev)
+{
+ return pcmcia_socket_dev_suspend(dev);
+}
+
+static int db1x_pcmcia_resume(struct device *dev)
+{
+ return pcmcia_socket_dev_resume(dev);
+}
+
+static struct dev_pm_ops db1x_pcmcia_pmops = {
+ .resume = db1x_pcmcia_resume,
+ .suspend = db1x_pcmcia_suspend,
+ .thaw = db1x_pcmcia_resume,
+ .freeze = db1x_pcmcia_suspend,
+};
+
+#define DB1XXX_SS_PMOPS &db1x_pcmcia_pmops
+
+#else
+
+#define DB1XXX_SS_PMOPS NULL
+
+#endif
+
+static struct platform_driver db1x_pcmcia_socket_driver = {
+ .driver = {
+ .name = "db1xxx_pcmcia",
+ .owner = THIS_MODULE,
+ .pm = DB1XXX_SS_PMOPS
+ },
+ .probe = db1x_pcmcia_socket_probe,
+ .remove = __devexit_p(db1x_pcmcia_socket_remove),
+};
+
+int __init db1x_pcmcia_socket_load(void)
+{
+ return platform_driver_register(&db1x_pcmcia_socket_driver);
+}
+
+void __exit db1x_pcmcia_socket_unload(void)
+{
+ platform_driver_unregister(&db1x_pcmcia_socket_driver);
+}
+
+module_init(db1x_pcmcia_socket_load);
+module_exit(db1x_pcmcia_socket_unload);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCMCIA Socket Services for Alchemy Db/Pb1x00 boards");
+MODULE_AUTHOR("Manuel Lauss");
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 1a4a3c49cc15..c8ae27b42466 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -970,13 +970,14 @@ static int runtime_suspend(struct device *dev)
return rc;
}
-static void runtime_resume(struct device *dev)
+static int runtime_resume(struct device *dev)
{
int rc;
down(&dev->sem);
rc = pcmcia_dev_resume(dev);
up(&dev->sem);
+ return rc;
}
/************************ per-device sysfs output ***************************/
@@ -1027,7 +1028,7 @@ static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute
if ((!p_dev->suspended) && !strncmp(buf, "off", 3))
ret = runtime_suspend(dev);
else if (p_dev->suspended && !strncmp(buf, "on", 2))
- runtime_resume(dev);
+ ret = runtime_resume(dev);
return ret ? ret : count;
}
@@ -1240,10 +1241,12 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
s->pcmcia_state.present = 0;
pcmcia_card_remove(skt, NULL);
handle_event(skt, event);
+ destroy_cis_cache(s);
break;
case CS_EVENT_CARD_INSERTION:
s->pcmcia_state.present = 1;
+ destroy_cis_cache(s); /* to be on the safe side... */
pcmcia_card_add(skt);
handle_event(skt, event);
break;
@@ -1251,8 +1254,22 @@ static int ds_event(struct pcmcia_socket *skt, event_t event, int priority)
case CS_EVENT_EJECTION_REQUEST:
break;
- case CS_EVENT_PM_SUSPEND:
case CS_EVENT_PM_RESUME:
+ if (verify_cis_cache(skt) != 0) {
+ dev_dbg(&skt->dev, "cis mismatch - different card\n");
+ /* first, remove the card */
+ ds_event(skt, CS_EVENT_CARD_REMOVAL, CS_EVENT_PRI_HIGH);
+ destroy_cis_cache(skt);
+ kfree(skt->fake_cis);
+ skt->fake_cis = NULL;
+ /* now, add the new card */
+ ds_event(skt, CS_EVENT_CARD_INSERTION,
+ CS_EVENT_PRI_LOW);
+ }
+ handle_event(skt, event);
+ break;
+
+ case CS_EVENT_PM_SUSPEND:
case CS_EVENT_RESET_PHYSICAL:
case CS_EVENT_CARD_RESET:
default:
@@ -1296,6 +1313,7 @@ static struct pcmcia_callback pcmcia_bus_callback = {
.owner = THIS_MODULE,
.event = ds_event,
.requery = pcmcia_bus_rescan,
+ .validate = pccard_validate_cis,
.suspend = pcmcia_bus_suspend,
.resume = pcmcia_bus_resume,
};
@@ -1319,6 +1337,13 @@ static int __devinit pcmcia_bus_add_socket(struct device *dev,
*/
msleep(250);
+ ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr);
+ if (ret) {
+ dev_printk(KERN_ERR, dev, "PCMCIA registration failed\n");
+ pcmcia_put_socket(socket);
+ return ret;
+ }
+
#ifdef CONFIG_PCMCIA_IOCTL
init_waitqueue_head(&socket->queue);
#endif
@@ -1353,6 +1378,10 @@ static void pcmcia_bus_remove_socket(struct device *dev,
pcmcia_card_remove(socket, NULL);
mutex_unlock(&socket->skt_mutex);
+ release_cis_mem(socket);
+
+ sysfs_remove_bin_file(&dev->kobj, &pccard_cis_attr);
+
pcmcia_put_socket(socket);
return;
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 26a621c9e2fc..0ece2cd4a85e 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -764,7 +764,7 @@ static int __init init_m32r_pcc(void)
for (i = 0 ; i < pcc_sockets ; i++) {
socket[i].socket.dev.parent = &pcc_device.dev;
socket[i].socket.ops = &pcc_operations;
- socket[i].socket.resource_ops = &pccard_nonstatic_ops;
+ socket[i].socket.resource_ops = &pccard_static_ops;
socket[i].socket.owner = THIS_MODULE;
socket[i].number = i;
ret = pcmcia_register_socket(&socket[i].socket);
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 7f79c4e169ae..3a1fe3ab2cd4 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1233,7 +1233,7 @@ static int __init m8xx_probe(struct of_device *ofdev,
socket[i].socket.io_offset = 0;
socket[i].socket.pci_irq = pcmcia_schlvl;
socket[i].socket.ops = &m8xx_services;
- socket[i].socket.resource_ops = &pccard_nonstatic_ops;
+ socket[i].socket.resource_ops = &pccard_iodyn_ops;
socket[i].socket.cb_dev = NULL;
socket[i].socket.dev.parent = &ofdev->dev;
socket[i].pcmcia = pcmcia;
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index d5db95644b64..8ceb7abc580a 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -43,6 +43,39 @@ module_param(io_speed, int, 0444);
static u8 pcmcia_used_irq[NR_IRQS];
#endif
+static int pcmcia_adjust_io_region(struct resource *res, unsigned long start,
+ unsigned long end, struct pcmcia_socket *s)
+{
+ if (s->resource_ops->adjust_io_region)
+ return s->resource_ops->adjust_io_region(res, start, end, s);
+ return -ENOMEM;
+}
+
+static struct resource *pcmcia_find_io_region(unsigned long base, int num,
+ unsigned long align,
+ struct pcmcia_socket *s)
+{
+ if (s->resource_ops->find_io)
+ return s->resource_ops->find_io(base, num, align, s);
+ return NULL;
+}
+
+int pcmcia_validate_mem(struct pcmcia_socket *s)
+{
+ if (s->resource_ops->validate_mem)
+ return s->resource_ops->validate_mem(s);
+ /* if there is no callback, we can assume that everything is OK */
+ return 0;
+}
+
+struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
+ int low, struct pcmcia_socket *s)
+{
+ if (s->resource_ops->find_mem)
+ return s->resource_ops->find_mem(base, num, align, low, s);
+ return NULL;
+}
+
/** alloc_io_space
*
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index 52db17263d8b..cdd30c180066 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -21,50 +21,6 @@
#include <pcmcia/cistpl.h>
#include "cs_internal.h"
-
-int pcmcia_validate_mem(struct pcmcia_socket *s)
-{
- if (s->resource_ops->validate_mem)
- return s->resource_ops->validate_mem(s);
- /* if there is no callback, we can assume that everything is OK */
- return 0;
-}
-EXPORT_SYMBOL(pcmcia_validate_mem);
-
-int pcmcia_adjust_io_region(struct resource *res, unsigned long r_start,
- unsigned long r_end, struct pcmcia_socket *s)
-{
- if (s->resource_ops->adjust_io_region)
- return s->resource_ops->adjust_io_region(res, r_start, r_end, s);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(pcmcia_adjust_io_region);
-
-struct resource *pcmcia_find_io_region(unsigned long base, int num,
- unsigned long align, struct pcmcia_socket *s)
-{
- if (s->resource_ops->find_io)
- return s->resource_ops->find_io(base, num, align, s);
- return NULL;
-}
-EXPORT_SYMBOL(pcmcia_find_io_region);
-
-struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
- int low, struct pcmcia_socket *s)
-{
- if (s->resource_ops->find_mem)
- return s->resource_ops->find_mem(base, num, align, low, s);
- return NULL;
-}
-EXPORT_SYMBOL(pcmcia_find_mem_region);
-
-void release_resource_db(struct pcmcia_socket *s)
-{
- if (s->resource_ops->exit)
- s->resource_ops->exit(s);
-}
-
-
static int static_init(struct pcmcia_socket *s)
{
unsigned long flags;
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 9b0dc433a8c3..91626c17f97b 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -264,36 +264,37 @@ static void do_io_probe(struct pcmcia_socket *s, unsigned int base,
}
#endif
-/*======================================================================
-
- This is tricky... when we set up CIS memory, we try to validate
- the memory window space allocations.
-
-======================================================================*/
+/*======================================================================*/
-/* Validation function for cards with a valid CIS */
+/**
+ * readable() - iomem validation function for cards with a valid CIS
+ */
static int readable(struct pcmcia_socket *s, struct resource *res,
unsigned int *count)
{
- int ret = -1;
+ int ret = -EINVAL;
s->cis_mem.res = res;
s->cis_virt = ioremap(res->start, s->map_size);
if (s->cis_virt) {
- ret = pccard_validate_cis(s, count);
- /* invalidate mapping and CIS cache */
+ /* as we're only called from pcmcia.c, we're safe */
+ if (s->callback->validate)
+ ret = s->callback->validate(s, count);
+ /* invalidate mapping */
iounmap(s->cis_virt);
s->cis_virt = NULL;
- destroy_cis_cache(s);
}
s->cis_mem.res = NULL;
- if ((ret != 0) || (*count == 0))
- return 0;
- return 1;
+ if ((ret) || (*count == 0))
+ return -EINVAL;
+ return 0;
}
-/* Validation function for simple memory cards */
-static int checksum(struct pcmcia_socket *s, struct resource *res)
+/**
+ * checksum() - iomem validation function for simple memory cards
+ */
+static int checksum(struct pcmcia_socket *s, struct resource *res,
+ unsigned int *value)
{
pccard_mem_map map;
int i, a = 0, b = -1, d;
@@ -321,61 +322,83 @@ static int checksum(struct pcmcia_socket *s, struct resource *res)
iounmap(virt);
}
- return (b == -1) ? -1 : (a>>1);
-}
-
-static int
-cis_readable(struct pcmcia_socket *s, unsigned long base, unsigned long size)
-{
- struct resource *res1, *res2;
- unsigned int info1, info2;
- int ret = 0;
-
- res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe");
- res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM,
- "PCMCIA memprobe");
-
- if (res1 && res2) {
- ret = readable(s, res1, &info1);
- ret += readable(s, res2, &info2);
- }
+ if (b == -1)
+ return -EINVAL;
- free_region(res2);
- free_region(res1);
+ *value = a;
- return (ret == 2) && (info1 == info2);
+ return 0;
}
-static int
-checksum_match(struct pcmcia_socket *s, unsigned long base, unsigned long size)
+/**
+ * do_validate_mem() - low level validate a memory region for PCMCIA use
+ * @s: PCMCIA socket to validate
+ * @base: start address of resource to check
+ * @size: size of resource to check
+ * @validate: validation function to use
+ *
+ * do_validate_mem() splits up the memory region which is to be checked
+ * into two parts. Both are passed to the @validate() function. If
+ * @validate() returns non-zero, or the value parameter to @validate()
+ * is zero, or the value parameter is different between both calls,
+ * the check fails, and -EINVAL is returned. Else, 0 is returned.
+ */
+static int do_validate_mem(struct pcmcia_socket *s,
+ unsigned long base, unsigned long size,
+ int validate (struct pcmcia_socket *s,
+ struct resource *res,
+ unsigned int *value))
{
struct resource *res1, *res2;
- int a = -1, b = -1;
+ unsigned int info1 = 1, info2 = 1;
+ int ret = -EINVAL;
res1 = claim_region(s, base, size/2, IORESOURCE_MEM, "PCMCIA memprobe");
res2 = claim_region(s, base + size/2, size/2, IORESOURCE_MEM,
"PCMCIA memprobe");
if (res1 && res2) {
- a = checksum(s, res1);
- b = checksum(s, res2);
+ ret = 0;
+ if (validate) {
+ ret = validate(s, res1, &info1);
+ ret += validate(s, res2, &info2);
+ }
}
free_region(res2);
free_region(res1);
- return (a == b) && (a >= 0);
-}
+ dev_dbg(&s->dev, "cs: memory probe 0x%06lx-0x%06lx: %p %p %u %u %u",
+ base, base+size-1, res1, res2, ret, info1, info2);
-/*======================================================================
+ if ((ret) || (info1 != info2) || (info1 == 0))
+ return -EINVAL;
- The memory probe. If the memory list includes a 64K-aligned block
- below 1MB, we probe in 64K chunks, and as soon as we accumulate at
- least mem_limit free space, we quit.
+ return 0;
+}
-======================================================================*/
-static int do_mem_probe(u_long base, u_long num, struct pcmcia_socket *s)
+/**
+ * do_mem_probe() - validate a memory region for PCMCIA use
+ * @s: PCMCIA socket to validate
+ * @base: start address of resource to check
+ * @num: size of resource to check
+ * @validate: validation function to use
+ * @fallback: validation function to use if validate fails
+ *
+ * do_mem_probe() checks a memory region for use by the PCMCIA subsystem.
+ * To do so, the area is split up into sensible parts, and then passed
+ * into the @validate() function. Only if @validate() and @fallback() fail,
+ * the area is marked as unavaibale for use by the PCMCIA subsystem. The
+ * function returns the size of the usable memory area.
+ */
+static int do_mem_probe(struct pcmcia_socket *s, u_long base, u_long num,
+ int validate (struct pcmcia_socket *s,
+ struct resource *res,
+ unsigned int *value),
+ int fallback (struct pcmcia_socket *s,
+ struct resource *res,
+ unsigned int *value))
{
struct socket_data *s_data = s->resource_data;
u_long i, j, bad, fail, step;
@@ -393,15 +416,14 @@ static int do_mem_probe(u_long base, u_long num, struct pcmcia_socket *s)
for (i = j = base; i < base+num; i = j + step) {
if (!fail) {
for (j = i; j < base+num; j += step) {
- if (cis_readable(s, j, step))
+ if (!do_validate_mem(s, j, step, validate))
break;
}
fail = ((i == base) && (j == base+num));
}
- if (fail) {
- for (j = i; j < base+num; j += 2*step)
- if (checksum_match(s, j, step) &&
- checksum_match(s, j + step, step))
+ if ((fail) && (fallback)) {
+ for (j = i; j < base+num; j += step)
+ if (!do_validate_mem(s, j, step, fallback))
break;
}
if (i != j) {
@@ -416,8 +438,14 @@ static int do_mem_probe(u_long base, u_long num, struct pcmcia_socket *s)
return num - bad;
}
+
#ifdef CONFIG_PCMCIA_PROBE
+/**
+ * inv_probe() - top-to-bottom search for one usuable high memory area
+ * @s: PCMCIA socket to validate
+ * @m: resource_map to check
+ */
static u_long inv_probe(struct resource_map *m, struct pcmcia_socket *s)
{
struct socket_data *s_data = s->resource_data;
@@ -432,9 +460,18 @@ static u_long inv_probe(struct resource_map *m, struct pcmcia_socket *s)
}
if (m->base < 0x100000)
return 0;
- return do_mem_probe(m->base, m->num, s);
+ return do_mem_probe(s, m->base, m->num, readable, checksum);
}
+/**
+ * validate_mem() - memory probe function
+ * @s: PCMCIA socket to validate
+ * @probe_mask: MEM_PROBE_LOW | MEM_PROBE_HIGH
+ *
+ * The memory probe. If the memory list includes a 64K-aligned block
+ * below 1MB, we probe in 64K chunks, and as soon as we accumulate at
+ * least mem_limit free space, we quit. Returns 0 on usuable ports.
+ */
static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
{
struct resource_map *m, mm;
@@ -457,7 +494,8 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
if (mm.base >= 0x100000)
continue;
if ((mm.base | mm.num) & 0xffff) {
- ok += do_mem_probe(mm.base, mm.num, s);
+ ok += do_mem_probe(s, mm.base, mm.num, readable,
+ checksum);
continue;
}
/* Special probe for 64K-aligned block */
@@ -467,7 +505,8 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
if (ok >= mem_limit)
sub_interval(&s_data->mem_db, b, 0x10000);
else
- ok += do_mem_probe(b, 0x10000, s);
+ ok += do_mem_probe(s, b, 0x10000,
+ readable, checksum);
}
}
}
@@ -480,6 +519,13 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
#else /* CONFIG_PCMCIA_PROBE */
+/**
+ * validate_mem() - memory probe function
+ * @s: PCMCIA socket to validate
+ * @probe_mask: ignored
+ *
+ * Returns 0 on usuable ports.
+ */
static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
{
struct resource_map *m, mm;
@@ -488,7 +534,7 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
for (m = s_data->mem_db.next; m != &s_data->mem_db; m = mm.next) {
mm = *m;
- ok += do_mem_probe(mm.base, mm.num, s);
+ ok += do_mem_probe(s, mm.base, mm.num, readable, checksum);
}
if (ok > 0)
return 0;
@@ -498,7 +544,13 @@ static int validate_mem(struct pcmcia_socket *s, unsigned int probe_mask)
#endif /* CONFIG_PCMCIA_PROBE */
-/*
+/**
+ * pcmcia_nonstatic_validate_mem() - try to validate iomem for PCMCIA use
+ * @s: PCMCIA socket to validate
+ *
+ * This is tricky... when we set up CIS memory, we try to validate
+ * the memory window space allocations.
+ *
* Locking note: Must be called with skt_mutex held!
*/
static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s)
@@ -516,10 +568,11 @@ static int pcmcia_nonstatic_validate_mem(struct pcmcia_socket *s)
probe_mask = MEM_PROBE_HIGH;
if (probe_mask & ~s_data->rsrc_mem_probe) {
- if (s->state & SOCKET_PRESENT)
+ if (s->state & SOCKET_PRESENT) {
ret = validate_mem(s, probe_mask);
- if (!ret)
- s_data->rsrc_mem_probe |= probe_mask;
+ if (!ret)
+ s_data->rsrc_mem_probe |= probe_mask;
+ }
}
mutex_unlock(&rsrc_mutex);
@@ -724,16 +777,11 @@ static int adjust_memory(struct pcmcia_socket *s, unsigned int action, unsigned
switch (action) {
case ADD_MANAGED_RESOURCE:
ret = add_interval(&data->mem_db, start, size);
+ if (!ret)
+ do_mem_probe(s, start, size, NULL, NULL);
break;
case REMOVE_MANAGED_RESOURCE:
ret = sub_interval(&data->mem_db, start, size);
- if (!ret) {
- struct pcmcia_socket *socket;
- down_read(&pcmcia_socket_list_rwsem);
- list_for_each_entry(socket, &pcmcia_socket_list, socket_list)
- release_cis_mem(socket);
- up_read(&pcmcia_socket_list_rwsem);
- }
break;
default:
ret = -EINVAL;
diff --git a/drivers/pcmcia/socket_sysfs.c b/drivers/pcmcia/socket_sysfs.c
index 7a456000332a..537d79305e7a 100644
--- a/drivers/pcmcia/socket_sysfs.c
+++ b/drivers/pcmcia/socket_sysfs.c
@@ -213,138 +213,6 @@ static ssize_t pccard_store_resource(struct device *dev,
}
static DEVICE_ATTR(available_resources_setup_done, 0600, pccard_show_resource, pccard_store_resource);
-
-static ssize_t pccard_extract_cis(struct pcmcia_socket *s, char *buf, loff_t off, size_t count)
-{
- tuple_t tuple;
- int status, i;
- loff_t pointer = 0;
- ssize_t ret = 0;
- u_char *tuplebuffer;
- u_char *tempbuffer;
-
- tuplebuffer = kmalloc(sizeof(u_char) * 256, GFP_KERNEL);
- if (!tuplebuffer)
- return -ENOMEM;
-
- tempbuffer = kmalloc(sizeof(u_char) * 258, GFP_KERNEL);
- if (!tempbuffer) {
- ret = -ENOMEM;
- goto free_tuple;
- }
-
- memset(&tuple, 0, sizeof(tuple_t));
-
- tuple.Attributes = TUPLE_RETURN_LINK | TUPLE_RETURN_COMMON;
- tuple.DesiredTuple = RETURN_FIRST_TUPLE;
- tuple.TupleOffset = 0;
-
- status = pccard_get_first_tuple(s, BIND_FN_ALL, &tuple);
- while (!status) {
- tuple.TupleData = tuplebuffer;
- tuple.TupleDataMax = 255;
- memset(tuplebuffer, 0, sizeof(u_char) * 255);
-
- status = pccard_get_tuple_data(s, &tuple);
- if (status)
- break;
-
- if (off < (pointer + 2 + tuple.TupleDataLen)) {
- tempbuffer[0] = tuple.TupleCode & 0xff;
- tempbuffer[1] = tuple.TupleLink & 0xff;
- for (i = 0; i < tuple.TupleDataLen; i++)
- tempbuffer[i + 2] = tuplebuffer[i] & 0xff;
-
- for (i = 0; i < (2 + tuple.TupleDataLen); i++) {
- if (((i + pointer) >= off) &&
- (i + pointer) < (off + count)) {
- buf[ret] = tempbuffer[i];
- ret++;
- }
- }
- }
-
- pointer += 2 + tuple.TupleDataLen;
-
- if (pointer >= (off + count))
- break;
-
- if (tuple.TupleCode == CISTPL_END)
- break;
- status = pccard_get_next_tuple(s, BIND_FN_ALL, &tuple);
- }
-
- kfree(tempbuffer);
- free_tuple:
- kfree(tuplebuffer);
-
- return ret;
-}
-
-static ssize_t pccard_show_cis(struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- unsigned int size = 0x200;
-
- if (off >= size)
- count = 0;
- else {
- struct pcmcia_socket *s;
- unsigned int chains;
-
- if (off + count > size)
- count = size - off;
-
- s = to_socket(container_of(kobj, struct device, kobj));
-
- if (!(s->state & SOCKET_PRESENT))
- return -ENODEV;
- if (pccard_validate_cis(s, &chains))
- return -EIO;
- if (!chains)
- return -ENODATA;
-
- count = pccard_extract_cis(s, buf, off, count);
- }
-
- return count;
-}
-
-static ssize_t pccard_store_cis(struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
-{
- struct pcmcia_socket *s = to_socket(container_of(kobj, struct device, kobj));
- int error;
-
- if (off)
- return -EINVAL;
-
- if (count >= CISTPL_MAX_CIS_SIZE)
- return -EINVAL;
-
- if (!(s->state & SOCKET_PRESENT))
- return -ENODEV;
-
- error = pcmcia_replace_cis(s, buf, count);
- if (error)
- return -EIO;
-
- mutex_lock(&s->skt_mutex);
- if ((s->callback) && (s->state & SOCKET_PRESENT) &&
- !(s->state & SOCKET_CARDBUS)) {
- if (try_module_get(s->callback->owner)) {
- s->callback->requery(s, 1);
- module_put(s->callback->owner);
- }
- }
- mutex_unlock(&s->skt_mutex);
-
- return count;
-}
-
-
static struct attribute *pccard_socket_attributes[] = {
&dev_attr_card_type.attr,
&dev_attr_card_voltage.attr,
@@ -362,28 +230,12 @@ static const struct attribute_group socket_attrs = {
.attrs = pccard_socket_attributes,
};
-static struct bin_attribute pccard_cis_attr = {
- .attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
- .size = 0x200,
- .read = pccard_show_cis,
- .write = pccard_store_cis,
-};
-
int pccard_sysfs_add_socket(struct device *dev)
{
- int ret = 0;
-
- ret = sysfs_create_group(&dev->kobj, &socket_attrs);
- if (!ret) {
- ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr);
- if (ret)
- sysfs_remove_group(&dev->kobj, &socket_attrs);
- }
- return ret;
+ return sysfs_create_group(&dev->kobj, &socket_attrs);
}
void pccard_sysfs_remove_socket(struct device *dev)
{
- sysfs_remove_bin_file(&dev->kobj, &pccard_cis_attr);
sysfs_remove_group(&dev->kobj, &socket_attrs);
}
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
new file mode 100644
index 000000000000..4e36930b51ce
--- /dev/null
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -0,0 +1,357 @@
+/*
+ * PCMCIA socket code for the MyCable XXS1500 system.
+ *
+ * Copyright (c) 2009 Manuel Lauss <manuel.lauss@gmail.com>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/resource.h>
+#include <linux/spinlock.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/ss.h>
+#include <pcmcia/cistpl.h>
+
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/mach-au1x00/au1000.h>
+
+#define MEM_MAP_SIZE 0x400000
+#define IO_MAP_SIZE 0x1000
+
+
+/*
+ * 3.3V cards only; all interfacing is done via gpios:
+ *
+ * 0/1: carddetect (00 = card present, xx = huh)
+ * 4: card irq
+ * 204: reset (high-act)
+ * 205: buffer enable (low-act)
+ * 208/209: card voltage key (00,01,10,11)
+ * 210: battwarn
+ * 211: batdead
+ * 214: power (low-act)
+ */
+#define GPIO_CDA 0
+#define GPIO_CDB 1
+#define GPIO_CARDIRQ 4
+#define GPIO_RESET 204
+#define GPIO_OUTEN 205
+#define GPIO_VSL 208
+#define GPIO_VSH 209
+#define GPIO_BATTDEAD 210
+#define GPIO_BATTWARN 211
+#define GPIO_POWER 214
+
+struct xxs1500_pcmcia_sock {
+ struct pcmcia_socket socket;
+ void *virt_io;
+
+ /* the "pseudo" addresses of the PCMCIA space. */
+ unsigned long phys_io;
+ unsigned long phys_attr;
+ unsigned long phys_mem;
+
+ /* previous flags for set_socket() */
+ unsigned int old_flags;
+};
+
+#define to_xxs_socket(x) container_of(x, struct xxs1500_pcmcia_sock, socket)
+
+static irqreturn_t cdirq(int irq, void *data)
+{
+ struct xxs1500_pcmcia_sock *sock = data;
+
+ pcmcia_parse_events(&sock->socket, SS_DETECT);
+
+ return IRQ_HANDLED;
+}
+
+static int xxs1500_pcmcia_configure(struct pcmcia_socket *skt,
+ struct socket_state_t *state)
+{
+ struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt);
+ unsigned int changed;
+
+ /* power control */
+ switch (state->Vcc) {
+ case 0:
+ gpio_set_value(GPIO_POWER, 1); /* power off */
+ break;
+ case 33:
+ gpio_set_value(GPIO_POWER, 0); /* power on */
+ break;
+ case 50:
+ default:
+ return -EINVAL;
+ }
+
+ changed = state->flags ^ sock->old_flags;
+
+ if (changed & SS_RESET) {
+ if (state->flags & SS_RESET) {
+ gpio_set_value(GPIO_RESET, 1); /* assert reset */
+ gpio_set_value(GPIO_OUTEN, 1); /* buffers off */
+ } else {
+ gpio_set_value(GPIO_RESET, 0); /* deassert reset */
+ gpio_set_value(GPIO_OUTEN, 0); /* buffers on */
+ msleep(500);
+ }
+ }
+
+ sock->old_flags = state->flags;
+
+ return 0;
+}
+
+static int xxs1500_pcmcia_get_status(struct pcmcia_socket *skt,
+ unsigned int *value)
+{
+ unsigned int status;
+ int i;
+
+ status = 0;
+
+ /* check carddetects: GPIO[0:1] must both be low */
+ if (!gpio_get_value(GPIO_CDA) && !gpio_get_value(GPIO_CDB))
+ status |= SS_DETECT;
+
+ /* determine card voltage: GPIO[208:209] binary value */
+ i = (!!gpio_get_value(GPIO_VSL)) | ((!!gpio_get_value(GPIO_VSH)) << 1);
+
+ switch (i) {
+ case 0:
+ case 1:
+ case 2:
+ status |= SS_3VCARD; /* 3V card */
+ break;
+ case 3: /* 5V card, unsupported */
+ default:
+ status |= SS_XVCARD; /* treated as unsupported in core */
+ }
+
+ /* GPIO214: low active power switch */
+ status |= gpio_get_value(GPIO_POWER) ? 0 : SS_POWERON;
+
+ /* GPIO204: high-active reset line */
+ status |= gpio_get_value(GPIO_RESET) ? SS_RESET : SS_READY;
+
+ /* other stuff */
+ status |= gpio_get_value(GPIO_BATTDEAD) ? 0 : SS_BATDEAD;
+ status |= gpio_get_value(GPIO_BATTWARN) ? 0 : SS_BATWARN;
+
+ *value = status;
+
+ return 0;
+}
+
+static int xxs1500_pcmcia_sock_init(struct pcmcia_socket *skt)
+{
+ gpio_direction_input(GPIO_CDA);
+ gpio_direction_input(GPIO_CDB);
+ gpio_direction_input(GPIO_VSL);
+ gpio_direction_input(GPIO_VSH);
+ gpio_direction_input(GPIO_BATTDEAD);
+ gpio_direction_input(GPIO_BATTWARN);
+ gpio_direction_output(GPIO_RESET, 1); /* assert reset */
+ gpio_direction_output(GPIO_OUTEN, 1); /* disable buffers */
+ gpio_direction_output(GPIO_POWER, 1); /* power off */
+
+ return 0;
+}
+
+static int xxs1500_pcmcia_sock_suspend(struct pcmcia_socket *skt)
+{
+ return 0;
+}
+
+static int au1x00_pcmcia_set_io_map(struct pcmcia_socket *skt,
+ struct pccard_io_map *map)
+{
+ struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt);
+
+ map->start = (u32)sock->virt_io;
+ map->stop = map->start + IO_MAP_SIZE;
+
+ return 0;
+}
+
+static int au1x00_pcmcia_set_mem_map(struct pcmcia_socket *skt,
+ struct pccard_mem_map *map)
+{
+ struct xxs1500_pcmcia_sock *sock = to_xxs_socket(skt);
+
+ if (map->flags & MAP_ATTRIB)
+ map->static_start = sock->phys_attr + map->card_start;
+ else
+ map->static_start = sock->phys_mem + map->card_start;
+
+ return 0;
+}
+
+static struct pccard_operations xxs1500_pcmcia_operations = {
+ .init = xxs1500_pcmcia_sock_init,
+ .suspend = xxs1500_pcmcia_sock_suspend,
+ .get_status = xxs1500_pcmcia_get_status,
+ .set_socket = xxs1500_pcmcia_configure,
+ .set_io_map = au1x00_pcmcia_set_io_map,
+ .set_mem_map = au1x00_pcmcia_set_mem_map,
+};
+
+static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
+{
+ struct xxs1500_pcmcia_sock *sock;
+ struct resource *r;
+ phys_t physio;
+ int ret, irq;
+
+ sock = kzalloc(sizeof(struct xxs1500_pcmcia_sock), GFP_KERNEL);
+ if (!sock)
+ return -ENOMEM;
+
+ ret = -ENODEV;
+
+ /*
+ * pseudo-attr: The 32bit address of the PCMCIA attribute space
+ * for this socket (usually the 36bit address shifted 4 to the
+ * right).
+ */
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pseudo-attr");
+ if (!r) {
+ dev_err(&pdev->dev, "missing 'pseudo-attr' resource!\n");
+ goto out0;
+ }
+ sock->phys_attr = r->start;
+
+ /*
+ * pseudo-mem: The 32bit address of the PCMCIA memory space for
+ * this socket (usually the 36bit address shifted 4 to the right)
+ */
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pseudo-mem");
+ if (!r) {
+ dev_err(&pdev->dev, "missing 'pseudo-mem' resource!\n");
+ goto out0;
+ }
+ sock->phys_mem = r->start;
+
+ /*
+ * pseudo-io: The 32bit address of the PCMCIA IO space for this
+ * socket (usually the 36bit address shifted 4 to the right).
+ */
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pseudo-io");
+ if (!r) {
+ dev_err(&pdev->dev, "missing 'pseudo-io' resource!\n");
+ goto out0;
+ }
+ sock->phys_io = r->start;
+
+
+ /* for io must remap the full 36bit address (for reference see
+ * alchemy/common/setup.c::__fixup_bigphys_addr)
+ */
+ physio = ((phys_t)sock->phys_io) << 4;
+
+ /*
+ * PCMCIA client drivers use the inb/outb macros to access
+ * the IO registers. Since mips_io_port_base is added
+ * to the access address of the mips implementation of
+ * inb/outb, we need to subtract it here because we want
+ * to access the I/O or MEM address directly, without
+ * going through this "mips_io_port_base" mechanism.
+ */
+ sock->virt_io = (void *)(ioremap(physio, IO_MAP_SIZE) -
+ mips_io_port_base);
+
+ if (!sock->virt_io) {
+ dev_err(&pdev->dev, "cannot remap IO area\n");
+ ret = -ENOMEM;
+ goto out0;
+ }
+
+ sock->socket.ops = &xxs1500_pcmcia_operations;
+ sock->socket.owner = THIS_MODULE;
+ sock->socket.pci_irq = gpio_to_irq(GPIO_CARDIRQ);
+ sock->socket.features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
+ sock->socket.map_size = MEM_MAP_SIZE;
+ sock->socket.io_offset = (unsigned long)sock->virt_io;
+ sock->socket.dev.parent = &pdev->dev;
+ sock->socket.resource_ops = &pccard_static_ops;
+
+ platform_set_drvdata(pdev, sock);
+
+ /* setup carddetect irq: use one of the 2 GPIOs as an
+ * edge detector.
+ */
+ irq = gpio_to_irq(GPIO_CDA);
+ set_irq_type(irq, IRQ_TYPE_EDGE_BOTH);
+ ret = request_irq(irq, cdirq, 0, "pcmcia_carddetect", sock);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot setup cd irq\n");
+ goto out1;
+ }
+
+ ret = pcmcia_register_socket(&sock->socket);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register\n");
+ goto out2;
+ }
+
+ printk(KERN_INFO "MyCable XXS1500 PCMCIA socket services\n");
+
+ return 0;
+
+out2:
+ free_irq(gpio_to_irq(GPIO_CDA), sock);
+out1:
+ iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
+out0:
+ kfree(sock);
+ return ret;
+}
+
+static int __devexit xxs1500_pcmcia_remove(struct platform_device *pdev)
+{
+ struct xxs1500_pcmcia_sock *sock = platform_get_drvdata(pdev);
+
+ pcmcia_unregister_socket(&sock->socket);
+ free_irq(gpio_to_irq(GPIO_CDA), sock);
+ iounmap((void *)(sock->virt_io + (u32)mips_io_port_base));
+ kfree(sock);
+
+ return 0;
+}
+
+static struct platform_driver xxs1500_pcmcia_socket_driver = {
+ .driver = {
+ .name = "xxs1500_pcmcia",
+ .owner = THIS_MODULE,
+ },
+ .probe = xxs1500_pcmcia_probe,
+ .remove = __devexit_p(xxs1500_pcmcia_remove),
+};
+
+int __init xxs1500_pcmcia_socket_load(void)
+{
+ return platform_driver_register(&xxs1500_pcmcia_socket_driver);
+}
+
+void __exit xxs1500_pcmcia_socket_unload(void)
+{
+ platform_driver_unregister(&xxs1500_pcmcia_socket_driver);
+}
+
+module_init(xxs1500_pcmcia_socket_load);
+module_exit(xxs1500_pcmcia_socket_unload);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCMCIA Socket Services for MyCable XXS1500 systems");
+MODULE_AUTHOR("Manuel Lauss");
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 5b648f0c6075..ad4c414dbfbc 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -393,8 +393,6 @@ static void hp_wmi_notify(u32 value, void *context)
} else
printk(KERN_INFO "HP WMI: Unknown key pressed - %x\n",
eventcode);
-
- kfree(obj);
}
static int __init hp_wmi_input_setup(void)
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 5af53340da6f..cc7172ea19dd 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -145,7 +145,7 @@ struct sony_laptop_input_s {
struct input_dev *key_dev;
struct kfifo fifo;
spinlock_t fifo_lock;
- struct workqueue_struct *wq;
+ struct timer_list release_key_timer;
};
static struct sony_laptop_input_s sony_laptop_input = {
@@ -299,20 +299,26 @@ static int sony_laptop_input_keycode_map[] = {
};
/* release buttons after a short delay if pressed */
-static void do_sony_laptop_release_key(struct work_struct *work)
+static void do_sony_laptop_release_key(unsigned long unused)
{
struct sony_laptop_keypress kp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sony_laptop_input.fifo_lock, flags);
- while (kfifo_out_locked(&sony_laptop_input.fifo, (unsigned char *)&kp,
- sizeof(kp), &sony_laptop_input.fifo_lock)
- == sizeof(kp)) {
- msleep(10);
+ if (kfifo_out(&sony_laptop_input.fifo,
+ (unsigned char *)&kp, sizeof(kp)) == sizeof(kp)) {
input_report_key(kp.dev, kp.key, 0);
input_sync(kp.dev);
}
+
+ /* If there is something in the fifo schedule next release. */
+ if (kfifo_len(&sony_laptop_input.fifo) != 0)
+ mod_timer(&sony_laptop_input.release_key_timer,
+ jiffies + msecs_to_jiffies(10));
+
+ spin_unlock_irqrestore(&sony_laptop_input.fifo_lock, flags);
}
-static DECLARE_WORK(sony_laptop_release_key_work,
- do_sony_laptop_release_key);
/* forward event to the input subsystem */
static void sony_laptop_report_input_event(u8 event)
@@ -366,13 +372,13 @@ static void sony_laptop_report_input_event(u8 event)
/* we emit the scancode so we can always remap the key */
input_event(kp.dev, EV_MSC, MSC_SCAN, event);
input_sync(kp.dev);
- kfifo_in_locked(&sony_laptop_input.fifo,
- (unsigned char *)&kp, sizeof(kp),
- &sony_laptop_input.fifo_lock);
- if (!work_pending(&sony_laptop_release_key_work))
- queue_work(sony_laptop_input.wq,
- &sony_laptop_release_key_work);
+ /* schedule key release */
+ kfifo_in_locked(&sony_laptop_input.fifo,
+ (unsigned char *)&kp, sizeof(kp),
+ &sony_laptop_input.fifo_lock);
+ mod_timer(&sony_laptop_input.release_key_timer,
+ jiffies + msecs_to_jiffies(10));
} else
dprintk("unknown input event %.2x\n", event);
}
@@ -390,27 +396,21 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
/* kfifo */
spin_lock_init(&sony_laptop_input.fifo_lock);
- error =
- kfifo_alloc(&sony_laptop_input.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
+ error = kfifo_alloc(&sony_laptop_input.fifo,
+ SONY_LAPTOP_BUF_SIZE, GFP_KERNEL);
if (error) {
printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
goto err_dec_users;
}
- /* init workqueue */
- sony_laptop_input.wq = create_singlethread_workqueue("sony-laptop");
- if (!sony_laptop_input.wq) {
- printk(KERN_ERR DRV_PFX
- "Unable to create workqueue.\n");
- error = -ENXIO;
- goto err_free_kfifo;
- }
+ setup_timer(&sony_laptop_input.release_key_timer,
+ do_sony_laptop_release_key, 0);
/* input keys */
key_dev = input_allocate_device();
if (!key_dev) {
error = -ENOMEM;
- goto err_destroy_wq;
+ goto err_free_kfifo;
}
key_dev->name = "Sony Vaio Keys";
@@ -419,18 +419,15 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
key_dev->dev.parent = &acpi_device->dev;
/* Initialize the Input Drivers: special keys */
- set_bit(EV_KEY, key_dev->evbit);
- set_bit(EV_MSC, key_dev->evbit);
- set_bit(MSC_SCAN, key_dev->mscbit);
+ input_set_capability(key_dev, EV_MSC, MSC_SCAN);
+
+ __set_bit(EV_KEY, key_dev->evbit);
key_dev->keycodesize = sizeof(sony_laptop_input_keycode_map[0]);
key_dev->keycodemax = ARRAY_SIZE(sony_laptop_input_keycode_map);
key_dev->keycode = &sony_laptop_input_keycode_map;
- for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++) {
- if (sony_laptop_input_keycode_map[i] != KEY_RESERVED) {
- set_bit(sony_laptop_input_keycode_map[i],
- key_dev->keybit);
- }
- }
+ for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++)
+ __set_bit(sony_laptop_input_keycode_map[i], key_dev->keybit);
+ __clear_bit(KEY_RESERVED, key_dev->keybit);
error = input_register_device(key_dev);
if (error)
@@ -450,9 +447,8 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device)
jog_dev->id.vendor = PCI_VENDOR_ID_SONY;
key_dev->dev.parent = &acpi_device->dev;
- jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
- jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE);
- jog_dev->relbit[0] = BIT_MASK(REL_WHEEL);
+ input_set_capability(jog_dev, EV_KEY, BTN_MIDDLE);
+ input_set_capability(jog_dev, EV_REL, REL_WHEEL);
error = input_register_device(jog_dev);
if (error)
@@ -473,9 +469,6 @@ err_unregister_keydev:
err_free_keydev:
input_free_device(key_dev);
-err_destroy_wq:
- destroy_workqueue(sony_laptop_input.wq);
-
err_free_kfifo:
kfifo_free(&sony_laptop_input.fifo);
@@ -486,12 +479,23 @@ err_dec_users:
static void sony_laptop_remove_input(void)
{
- /* cleanup only after the last user has gone */
+ struct sony_laptop_keypress kp = { NULL };
+
+ /* Cleanup only after the last user has gone */
if (!atomic_dec_and_test(&sony_laptop_input.users))
return;
- /* flush workqueue first */
- flush_workqueue(sony_laptop_input.wq);
+ del_timer_sync(&sony_laptop_input.release_key_timer);
+
+ /*
+ * Generate key-up events for remaining keys. Note that we don't
+ * need locking since nobody is adding new events to the kfifo.
+ */
+ while (kfifo_out(&sony_laptop_input.fifo,
+ (unsigned char *)&kp, sizeof(kp)) == sizeof(kp)) {
+ input_report_key(kp.dev, kp.key, 0);
+ input_sync(kp.dev);
+ }
/* destroy input devs */
input_unregister_device(sony_laptop_input.key_dev);
@@ -502,7 +506,6 @@ static void sony_laptop_remove_input(void)
sony_laptop_input.jog_dev = NULL;
}
- destroy_workqueue(sony_laptop_input.wq);
kfifo_free(&sony_laptop_input.fifo);
}
diff --git a/drivers/power/pmu_battery.c b/drivers/power/pmu_battery.c
index 9346a862f1f2..9c87ad564803 100644
--- a/drivers/power/pmu_battery.c
+++ b/drivers/power/pmu_battery.c
@@ -89,6 +89,8 @@ static int pmu_bat_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_STATUS:
if (pbi->flags & PMU_BATT_CHARGING)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (pmu_power_flags & PMU_PWR_AC_PRESENT)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
else
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
diff --git a/drivers/power/wm8350_power.c b/drivers/power/wm8350_power.c
index ad4f071e1287..0693902d6151 100644
--- a/drivers/power/wm8350_power.c
+++ b/drivers/power/wm8350_power.c
@@ -190,7 +190,7 @@ static irqreturn_t wm8350_charger_handler(int irq, void *data)
struct wm8350_power *power = &wm8350->power;
struct wm8350_charger_policy *policy = power->policy;
- switch (irq) {
+ switch (irq - wm8350->irq_base) {
case WM8350_IRQ_CHG_BAT_FAIL:
dev_err(wm8350->dev, "battery failed\n");
break;
@@ -428,18 +428,18 @@ static void wm8350_init_charger(struct wm8350 *wm8350)
static void free_charger_irq(struct wm8350 *wm8350)
{
- wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT);
- wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD);
- wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL);
- wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO);
- wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END);
- wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START);
- wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9);
- wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1);
- wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85);
- wm8350_free_irq(wm8350, WM8350_IRQ_EXT_USB_FB);
- wm8350_free_irq(wm8350, WM8350_IRQ_EXT_WALL_FB);
- wm8350_free_irq(wm8350, WM8350_IRQ_EXT_BAT_FB);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_HOT, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_COLD, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_BAT_FAIL, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_TO, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_END, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_START, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P9, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_3P1, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CHG_VBATT_LT_2P85, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_EXT_USB_FB, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_EXT_WALL_FB, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_EXT_BAT_FB, wm8350);
}
static __devinit int wm8350_power_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 04719551381b..5fb83e2ced25 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -11,15 +11,17 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
+#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
-#include <linux/mfd/88pm8607.h>
+#include <linux/mfd/88pm860x.h>
struct pm8607_regulator_info {
struct regulator_desc desc;
- struct pm8607_chip *chip;
+ struct pm860x_chip *chip;
struct regulator_dev *regulator;
+ struct i2c_client *i2c;
int min_uV;
int max_uV;
@@ -46,7 +48,6 @@ static inline int check_range(struct pm8607_regulator_info *info,
static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- uint8_t chip_id = info->chip->chip_id;
int ret = -EINVAL;
switch (info->desc.id) {
@@ -88,79 +89,29 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
case PM8607_ID_LDO2:
case PM8607_ID_LDO3:
case PM8607_ID_LDO9:
- switch (chip_id) {
- case PM8607_CHIP_A0:
- case PM8607_CHIP_A1:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 8) ? (index * 50000 + 2550000) :
- -EINVAL);
- break;
- case PM8607_CHIP_B0:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2550000) :
- 3300000);
- break;
- }
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2550000) :
+ 3300000);
break;
case PM8607_ID_LDO4:
- switch (chip_id) {
- case PM8607_CHIP_A0:
- case PM8607_CHIP_A1:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 8) ? (index * 50000 + 2550000) :
- -EINVAL);
- break;
- case PM8607_CHIP_B0:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 6) ? (index * 50000 + 2550000) :
- ((index == 6) ? 2900000 : 3300000));
- break;
- }
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 6) ? (index * 50000 + 2550000) :
+ ((index == 6) ? 2900000 : 3300000));
break;
case PM8607_ID_LDO6:
- switch (chip_id) {
- case PM8607_CHIP_A0:
- case PM8607_CHIP_A1:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 8) ? (index * 50000 + 2450000) :
- -EINVAL);
- break;
- case PM8607_CHIP_B0:
- ret = (index < 2) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2500000) :
- 3300000);
- break;
- }
+ ret = (index < 2) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2500000) :
+ 3300000);
break;
case PM8607_ID_LDO10:
- switch (chip_id) {
- case PM8607_CHIP_A0:
- case PM8607_CHIP_A1:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 8) ? (index * 50000 + 2550000) :
- 1200000);
- break;
- case PM8607_CHIP_B0:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2550000) :
- ((index == 7) ? 3300000 : 1200000));
- break;
- }
+ ret = (index < 3) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2550000) :
+ ((index == 7) ? 3300000 : 1200000));
break;
case PM8607_ID_LDO14:
- switch (chip_id) {
- case PM8607_CHIP_A0:
- case PM8607_CHIP_A1:
- ret = (index < 3) ? (index * 50000 + 1800000) :
- ((index < 8) ? (index * 50000 + 2550000) :
- -EINVAL);
- break;
- case PM8607_CHIP_B0:
- ret = (index < 2) ? (index * 50000 + 1800000) :
- ((index < 7) ? (index * 50000 + 2600000) :
- 3300000);
- break;
- }
+ ret = (index < 2) ? (index * 50000 + 1800000) :
+ ((index < 7) ? (index * 50000 + 2600000) :
+ 3300000);
break;
}
return ret;
@@ -169,7 +120,6 @@ static int pm8607_list_voltage(struct regulator_dev *rdev, unsigned index)
static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- uint8_t chip_id = info->chip->chip_id;
int val = -ENOENT;
int ret;
@@ -254,161 +204,77 @@ static int choose_voltage(struct regulator_dev *rdev, int min_uV, int max_uV)
case PM8607_ID_LDO2:
case PM8607_ID_LDO3:
case PM8607_ID_LDO9:
- switch (chip_id) {
- case PM8607_CHIP_A0:
- case PM8607_CHIP_A1:
- if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2700mV */
- else { /* 2700mV ~ 2900mV / 50mV */
- if (min_uV <= 2900000) {
- val = (min_uV - 2650001) / 50000;
- val += 3;
- } else
- val = -EINVAL;
- }
- break;
- case PM8607_CHIP_B0:
- if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2700mV */
- } else { /* 2700mV ~ 2850mV / 50mV */
- if (min_uV <= 2850000) {
- val = (min_uV - 2650001) / 50000;
- val += 3;
- } else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ } else { /* 2700mV ~ 2850mV / 50mV */
+ if (min_uV <= 2850000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
}
break;
case PM8607_ID_LDO4:
- switch (chip_id) {
- case PM8607_CHIP_A0:
- case PM8607_CHIP_A1:
- if (min_uV < 2700000) /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2700mV */
- else { /* 2700mV ~ 2900mV / 50mV */
- if (min_uV <= 2900000) {
- val = (min_uV - 2650001) / 50000;
- val += 3;
- } else
- val = -EINVAL;
- }
- break;
- case PM8607_CHIP_B0:
- if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2700mV */
- } else { /* 2700mV ~ 2800mV / 50mV */
- if (min_uV <= 2850000) {
- val = (min_uV - 2650001) / 50000;
- val += 3;
- } else if (min_uV <= 2900000)
- val = 6;
- else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
+ if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1900000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 3; /* 2700mV */
+ } else { /* 2700mV ~ 2800mV / 50mV */
+ if (min_uV <= 2850000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 3;
+ } else if (min_uV <= 2900000)
+ val = 6;
+ else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
}
break;
case PM8607_ID_LDO6:
- switch (chip_id) {
- case PM8607_CHIP_A0:
- case PM8607_CHIP_A1:
- if (min_uV < 2600000) { /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2600mV */
- } else { /* 2600mV ~ 2800mV / 50mV */
- if (min_uV <= 2800000) {
- val = (min_uV - 2550001) / 50000;
- val += 3;
- } else
- val = -EINVAL;
- }
- break;
- case PM8607_CHIP_B0:
- if (min_uV < 2600000) { /* 1800mV ~ 1850mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1850000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 2; /* 2600mV */
- } else { /* 2600mV ~ 2800mV / 50mV */
- if (min_uV <= 2800000) {
- val = (min_uV - 2550001) / 50000;
- val += 2;
- } else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
+ if (min_uV < 2600000) { /* 1800mV ~ 1850mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1850000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 2; /* 2600mV */
+ } else { /* 2600mV ~ 2800mV / 50mV */
+ if (min_uV <= 2800000) {
+ val = (min_uV - 2550001) / 50000;
+ val += 2;
+ } else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
}
break;
case PM8607_ID_LDO14:
- switch (chip_id) {
- case PM8607_CHIP_A0:
- case PM8607_CHIP_A1:
- if (min_uV < 2700000) { /* 1800mV ~ 1900mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1900000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 3; /* 2700mV */
- } else { /* 2700mV ~ 2900mV / 50mV */
- if (min_uV <= 2900000) {
- val = (min_uV - 2650001) / 50000;
- val += 3;
- } else
- val = -EINVAL;
- }
- break;
- case PM8607_CHIP_B0:
- if (min_uV < 2700000) { /* 1800mV ~ 1850mV / 50mV */
- if (min_uV <= 1800000)
- val = 0;
- else if (min_uV <= 1850000)
- val = (min_uV - 1750001) / 50000;
- else
- val = 2; /* 2700mV */
- } else { /* 2700mV ~ 2900mV / 50mV */
- if (min_uV <= 2900000) {
- val = (min_uV - 2650001) / 50000;
- val += 2;
- } else if (min_uV <= 3300000)
- val = 7;
- else
- val = -EINVAL;
- }
- break;
+ if (min_uV < 2700000) { /* 1800mV ~ 1850mV / 50mV */
+ if (min_uV <= 1800000)
+ val = 0;
+ else if (min_uV <= 1850000)
+ val = (min_uV - 1750001) / 50000;
+ else
+ val = 2; /* 2700mV */
+ } else { /* 2700mV ~ 2900mV / 50mV */
+ if (min_uV <= 2900000) {
+ val = (min_uV - 2650001) / 50000;
+ val += 2;
+ } else if (min_uV <= 3300000)
+ val = 7;
+ else
+ val = -EINVAL;
}
break;
}
@@ -428,7 +294,6 @@ static int pm8607_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- struct pm8607_chip *chip = info->chip;
uint8_t val, mask;
int ret;
@@ -443,13 +308,13 @@ static int pm8607_set_voltage(struct regulator_dev *rdev,
val = (uint8_t)(ret << info->vol_shift);
mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
- ret = pm8607_set_bits(chip, info->vol_reg, mask, val);
+ ret = pm860x_set_bits(info->i2c, info->vol_reg, mask, val);
if (ret)
return ret;
switch (info->desc.id) {
case PM8607_ID_BUCK1:
case PM8607_ID_BUCK3:
- ret = pm8607_set_bits(chip, info->update_reg,
+ ret = pm860x_set_bits(info->i2c, info->update_reg,
1 << info->update_bit,
1 << info->update_bit);
break;
@@ -460,11 +325,10 @@ static int pm8607_set_voltage(struct regulator_dev *rdev,
static int pm8607_get_voltage(struct regulator_dev *rdev)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- struct pm8607_chip *chip = info->chip;
uint8_t val, mask;
int ret;
- ret = pm8607_reg_read(chip, info->vol_reg);
+ ret = pm860x_reg_read(info->i2c, info->vol_reg);
if (ret < 0)
return ret;
@@ -477,9 +341,8 @@ static int pm8607_get_voltage(struct regulator_dev *rdev)
static int pm8607_enable(struct regulator_dev *rdev)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- struct pm8607_chip *chip = info->chip;
- return pm8607_set_bits(chip, info->enable_reg,
+ return pm860x_set_bits(info->i2c, info->enable_reg,
1 << info->enable_bit,
1 << info->enable_bit);
}
@@ -487,19 +350,17 @@ static int pm8607_enable(struct regulator_dev *rdev)
static int pm8607_disable(struct regulator_dev *rdev)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- struct pm8607_chip *chip = info->chip;
- return pm8607_set_bits(chip, info->enable_reg,
+ return pm860x_set_bits(info->i2c, info->enable_reg,
1 << info->enable_bit, 0);
}
static int pm8607_is_enabled(struct regulator_dev *rdev)
{
struct pm8607_regulator_info *info = rdev_get_drvdata(rdev);
- struct pm8607_chip *chip = info->chip;
int ret;
- ret = pm8607_reg_read(chip, info->enable_reg);
+ ret = pm860x_reg_read(info->i2c, info->enable_reg);
if (ret < 0)
return ret;
@@ -589,8 +450,8 @@ static inline struct pm8607_regulator_info *find_regulator_info(int id)
static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
{
- struct pm8607_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct pm8607_platform_data *pdata = chip->dev->platform_data;
+ struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct pm860x_platform_data *pdata = chip->dev->platform_data;
struct pm8607_regulator_info *info = NULL;
info = find_regulator_info(pdev->id);
@@ -599,6 +460,7 @@ static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
return -EINVAL;
}
+ info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion;
info->chip = chip;
info->regulator = regulator_register(&info->desc, &pdev->dev,
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 262f62eec837..bb645a2fb87e 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -76,6 +76,12 @@ config REGULATOR_MAX8660
This driver controls a Maxim 8660/8661 voltage output
regulator via I2C bus.
+config REGULATOR_MAX8925
+ tristate "Maxim MAX8925 Power Management IC"
+ depends on MFD_MAX8925
+ help
+ Say y here to support the voltage regulaltor of Maxim MAX8925 PMIC.
+
config REGULATOR_TWL4030
bool "TI TWL4030/TWL5030/TWL6030/TPS695x0 PMIC"
depends on TWL4030_CORE
@@ -166,7 +172,7 @@ config REGULATOR_TPS6507X
config REGULATOR_88PM8607
bool "Marvell 88PM8607 Power regulators"
- depends on MFD_88PM8607=y
+ depends on MFD_88PM860X=y
help
This driver supports 88PM8607 voltage regulator chips.
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b3c806c79415..9a180c8e1527 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
+obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 686ef270ecf7..5a3509b835f4 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -19,6 +19,7 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
+#include <linux/delay.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -1084,6 +1085,13 @@ overflow_err:
return NULL;
}
+static int _regulator_get_enable_time(struct regulator_dev *rdev)
+{
+ if (!rdev->desc->ops->enable_time)
+ return 0;
+ return rdev->desc->ops->enable_time(rdev);
+}
+
/* Internal regulator request function */
static struct regulator *_regulator_get(struct device *dev, const char *id,
int exclusive)
@@ -1251,7 +1259,7 @@ static int _regulator_can_change_status(struct regulator_dev *rdev)
/* locks held by regulator_enable() */
static int _regulator_enable(struct regulator_dev *rdev)
{
- int ret;
+ int ret, delay;
/* do we need to enable the supply regulator first */
if (rdev->supply) {
@@ -1275,13 +1283,34 @@ static int _regulator_enable(struct regulator_dev *rdev)
if (!_regulator_can_change_status(rdev))
return -EPERM;
- if (rdev->desc->ops->enable) {
- ret = rdev->desc->ops->enable(rdev);
- if (ret < 0)
- return ret;
- } else {
+ if (!rdev->desc->ops->enable)
return -EINVAL;
+
+ /* Query before enabling in case configuration
+ * dependant. */
+ ret = _regulator_get_enable_time(rdev);
+ if (ret >= 0) {
+ delay = ret;
+ } else {
+ printk(KERN_WARNING
+ "%s: enable_time() failed for %s: %d\n",
+ __func__, rdev_get_name(rdev),
+ ret);
+ delay = 0;
}
+
+ /* Allow the regulator to ramp; it would be useful
+ * to extend this for bulk operations so that the
+ * regulators can ramp together. */
+ ret = rdev->desc->ops->enable(rdev);
+ if (ret < 0)
+ return ret;
+
+ if (delay >= 1000)
+ mdelay(delay / 1000);
+ else if (delay)
+ udelay(delay);
+
} else if (ret < 0) {
printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
__func__, rdev_get_name(rdev), ret);
@@ -1341,6 +1370,9 @@ static int _regulator_disable(struct regulator_dev *rdev)
__func__, rdev_get_name(rdev));
return ret;
}
+
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ NULL);
}
/* decrease our supplies ref count and disable if required */
@@ -1399,8 +1431,8 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
return ret;
}
/* notify other consumers that power has been forced off */
- _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE,
- NULL);
+ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
+ REGULATOR_EVENT_DISABLE, NULL);
}
/* decrease our supplies ref count and disable if required */
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 76d08c282f9c..9416f0cac1b3 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -54,7 +54,7 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val);
#define LP3971_BUCK2_BASE 0x29
#define LP3971_BUCK3_BASE 0x32
-const static int buck_base_addr[] = {
+static const int buck_base_addr[] = {
LP3971_BUCK1_BASE,
LP3971_BUCK2_BASE,
LP3971_BUCK3_BASE,
@@ -63,7 +63,7 @@ const static int buck_base_addr[] = {
#define LP3971_BUCK_TARGET_VOL1_REG(x) (buck_base_addr[x])
#define LP3971_BUCK_TARGET_VOL2_REG(x) (buck_base_addr[x]+1)
-const static int buck_voltage_map[] = {
+static const int buck_voltage_map[] = {
0, 800, 850, 900, 950, 1000, 1050, 1100,
1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500,
1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800,
@@ -96,17 +96,17 @@ const static int buck_voltage_map[] = {
#define LDO_VOL_CONTR_SHIFT(x) ((x & 1) << 2)
#define LDO_VOL_CONTR_MASK 0x0f
-const static int ldo45_voltage_map[] = {
+static const int ldo45_voltage_map[] = {
1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350,
1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300,
};
-const static int ldo123_voltage_map[] = {
+static const int ldo123_voltage_map[] = {
1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500,
2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300,
};
-const static int *ldo_voltage_map[] = {
+static const int *ldo_voltage_map[] = {
ldo123_voltage_map, /* LDO1 */
ldo123_voltage_map, /* LDO2 */
ldo123_voltage_map, /* LDO3 */
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
new file mode 100644
index 000000000000..67873f08ed40
--- /dev/null
+++ b/drivers/regulator/max8925-regulator.c
@@ -0,0 +1,306 @@
+/*
+ * Regulators driver for Maxim max8925
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/max8925.h>
+
+#define SD1_DVM_VMIN 850000
+#define SD1_DVM_VMAX 1000000
+#define SD1_DVM_STEP 50000
+#define SD1_DVM_SHIFT 5 /* SDCTL1 bit5 */
+#define SD1_DVM_EN 6 /* SDV1 bit 6 */
+
+struct max8925_regulator_info {
+ struct regulator_desc desc;
+ struct regulator_dev *regulator;
+ struct i2c_client *i2c;
+ struct max8925_chip *chip;
+
+ int min_uV;
+ int max_uV;
+ int step_uV;
+ int vol_reg;
+ int vol_shift;
+ int vol_nbits;
+ int enable_bit;
+ int enable_reg;
+};
+
+static inline int check_range(struct max8925_regulator_info *info,
+ int min_uV, int max_uV)
+{
+ if (min_uV < info->min_uV || min_uV > info->max_uV)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int max8925_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
+ return info->min_uV + index * info->step_uV;
+}
+
+static int max8925_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned char data, mask;
+
+ if (check_range(info, min_uV, max_uV)) {
+ dev_err(info->chip->dev, "invalid voltage range (%d, %d) uV\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+ data = (min_uV - info->min_uV + info->step_uV - 1) / info->step_uV;
+ data <<= info->vol_shift;
+ mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
+
+ return max8925_set_bits(info->i2c, info->vol_reg, mask, data);
+}
+
+static int max8925_get_voltage(struct regulator_dev *rdev)
+{
+ struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned char data, mask;
+ int ret;
+
+ ret = max8925_reg_read(info->i2c, info->vol_reg);
+ if (ret < 0)
+ return ret;
+ mask = ((1 << info->vol_nbits) - 1) << info->vol_shift;
+ data = (ret & mask) >> info->vol_shift;
+
+ return max8925_list_voltage(rdev, data);
+}
+
+static int max8925_enable(struct regulator_dev *rdev)
+{
+ struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return max8925_set_bits(info->i2c, info->enable_reg,
+ 1 << info->enable_bit,
+ 1 << info->enable_bit);
+}
+
+static int max8925_disable(struct regulator_dev *rdev)
+{
+ struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return max8925_set_bits(info->i2c, info->enable_reg,
+ 1 << info->enable_bit, 0);
+}
+
+static int max8925_is_enabled(struct regulator_dev *rdev)
+{
+ struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = max8925_reg_read(info->i2c, info->vol_reg);
+ if (ret < 0)
+ return ret;
+
+ return ret & (1 << info->enable_bit);
+}
+
+static int max8925_set_dvm_voltage(struct regulator_dev *rdev, int uV)
+{
+ struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned char data, mask;
+
+ if (uV < SD1_DVM_VMIN || uV > SD1_DVM_VMAX)
+ return -EINVAL;
+
+ data = (uV - SD1_DVM_VMIN + SD1_DVM_STEP - 1) / SD1_DVM_STEP;
+ data <<= SD1_DVM_SHIFT;
+ mask = 3 << SD1_DVM_SHIFT;
+
+ return max8925_set_bits(info->i2c, info->enable_reg, mask, data);
+}
+
+static int max8925_set_dvm_enable(struct regulator_dev *rdev)
+{
+ struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return max8925_set_bits(info->i2c, info->vol_reg, 1 << SD1_DVM_EN,
+ 1 << SD1_DVM_EN);
+}
+
+static int max8925_set_dvm_disable(struct regulator_dev *rdev)
+{
+ struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return max8925_set_bits(info->i2c, info->vol_reg, 1 << SD1_DVM_EN, 0);
+}
+
+static struct regulator_ops max8925_regulator_sdv_ops = {
+ .set_voltage = max8925_set_voltage,
+ .get_voltage = max8925_get_voltage,
+ .enable = max8925_enable,
+ .disable = max8925_disable,
+ .is_enabled = max8925_is_enabled,
+ .set_suspend_voltage = max8925_set_dvm_voltage,
+ .set_suspend_enable = max8925_set_dvm_enable,
+ .set_suspend_disable = max8925_set_dvm_disable,
+};
+
+static struct regulator_ops max8925_regulator_ldo_ops = {
+ .set_voltage = max8925_set_voltage,
+ .get_voltage = max8925_get_voltage,
+ .enable = max8925_enable,
+ .disable = max8925_disable,
+ .is_enabled = max8925_is_enabled,
+};
+
+#define MAX8925_SDV(_id, min, max, step) \
+{ \
+ .desc = { \
+ .name = "SDV" #_id, \
+ .ops = &max8925_regulator_sdv_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MAX8925_ID_SD##_id, \
+ .owner = THIS_MODULE, \
+ }, \
+ .min_uV = min * 1000, \
+ .max_uV = max * 1000, \
+ .step_uV = step * 1000, \
+ .vol_reg = MAX8925_SDV##_id, \
+ .vol_shift = 0, \
+ .vol_nbits = 6, \
+ .enable_reg = MAX8925_SDCTL##_id, \
+ .enable_bit = 0, \
+}
+
+#define MAX8925_LDO(_id, min, max, step) \
+{ \
+ .desc = { \
+ .name = "LDO" #_id, \
+ .ops = &max8925_regulator_ldo_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MAX8925_ID_LDO##_id, \
+ .owner = THIS_MODULE, \
+ }, \
+ .min_uV = min * 1000, \
+ .max_uV = max * 1000, \
+ .step_uV = step * 1000, \
+ .vol_reg = MAX8925_LDOVOUT##_id, \
+ .vol_shift = 0, \
+ .vol_nbits = 6, \
+ .enable_reg = MAX8925_LDOCTL##_id, \
+ .enable_bit = 0, \
+}
+
+static struct max8925_regulator_info max8925_regulator_info[] = {
+ MAX8925_SDV(1, 637.5, 1425, 12.5),
+ MAX8925_SDV(2, 650, 2225, 25),
+ MAX8925_SDV(3, 750, 3900, 50),
+
+ MAX8925_LDO(1, 750, 3900, 50),
+ MAX8925_LDO(2, 650, 2250, 25),
+ MAX8925_LDO(3, 650, 2250, 25),
+ MAX8925_LDO(4, 750, 3900, 50),
+ MAX8925_LDO(5, 750, 3900, 50),
+ MAX8925_LDO(6, 750, 3900, 50),
+ MAX8925_LDO(7, 750, 3900, 50),
+ MAX8925_LDO(8, 750, 3900, 50),
+ MAX8925_LDO(9, 750, 3900, 50),
+ MAX8925_LDO(10, 750, 3900, 50),
+ MAX8925_LDO(11, 750, 3900, 50),
+ MAX8925_LDO(12, 750, 3900, 50),
+ MAX8925_LDO(13, 750, 3900, 50),
+ MAX8925_LDO(14, 750, 3900, 50),
+ MAX8925_LDO(15, 750, 3900, 50),
+ MAX8925_LDO(16, 750, 3900, 50),
+ MAX8925_LDO(17, 650, 2250, 25),
+ MAX8925_LDO(18, 650, 2250, 25),
+ MAX8925_LDO(19, 750, 3900, 50),
+ MAX8925_LDO(20, 750, 3900, 50),
+};
+
+static inline struct max8925_regulator_info *find_regulator_info(int id)
+{
+ struct max8925_regulator_info *ri;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(max8925_regulator_info); i++) {
+ ri = &max8925_regulator_info[i];
+ if (ri->desc.id == id)
+ return ri;
+ }
+ return NULL;
+}
+
+static int __devinit max8925_regulator_probe(struct platform_device *pdev)
+{
+ struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct max8925_platform_data *pdata = chip->dev->platform_data;
+ struct max8925_regulator_info *ri = NULL;
+ struct regulator_dev *rdev;
+
+ ri = find_regulator_info(pdev->id);
+ if (ri == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ return -EINVAL;
+ }
+ ri->i2c = chip->i2c;
+ ri->chip = chip;
+
+ rdev = regulator_register(&ri->desc, &pdev->dev,
+ pdata->regulator[pdev->id], ri);
+ if (IS_ERR(rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ ri->desc.name);
+ return PTR_ERR(rdev);
+ }
+
+ platform_set_drvdata(pdev, rdev);
+ return 0;
+}
+
+static int __devexit max8925_regulator_remove(struct platform_device *pdev)
+{
+ struct regulator_dev *rdev = platform_get_drvdata(pdev);
+
+ regulator_unregister(rdev);
+ return 0;
+}
+
+static struct platform_driver max8925_regulator_driver = {
+ .driver = {
+ .name = "max8925-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8925_regulator_probe,
+ .remove = __devexit_p(max8925_regulator_remove),
+};
+
+static int __init max8925_regulator_init(void)
+{
+ return platform_driver_register(&max8925_regulator_driver);
+}
+subsys_initcall(max8925_regulator_init);
+
+static void __exit max8925_regulator_exit(void)
+{
+ platform_driver_unregister(&max8925_regulator_driver);
+}
+module_exit(max8925_regulator_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_DESCRIPTION("Regulator Driver for Maxim 8925 PMIC");
+MODULE_ALIAS("platform:max8925-regulator");
+
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 39c495300045..a40e35ab8555 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -2,6 +2,7 @@
* Regulator Driver for Freescale MC13783 PMIC
*
* Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,11 +17,44 @@
#include <linux/init.h>
#include <linux/err.h>
-#define MC13783_REG_SWITCHERS4 28
-#define MC13783_REG_SWITCHERS4_PLLEN (1 << 18)
-
#define MC13783_REG_SWITCHERS5 29
#define MC13783_REG_SWITCHERS5_SW3EN (1 << 20)
+#define MC13783_REG_SWITCHERS5_SW3VSEL 18
+#define MC13783_REG_SWITCHERS5_SW3VSEL_M (3 << 18)
+
+#define MC13783_REG_REGULATORSETTING0 30
+#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL 2
+#define MC13783_REG_REGULATORSETTING0_VDIGVSEL 4
+#define MC13783_REG_REGULATORSETTING0_VGENVSEL 6
+#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL 9
+#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL 11
+#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL 13
+#define MC13783_REG_REGULATORSETTING0_VSIMVSEL 14
+#define MC13783_REG_REGULATORSETTING0_VESIMVSEL 15
+#define MC13783_REG_REGULATORSETTING0_VCAMVSEL 16
+
+#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL_M (3 << 2)
+#define MC13783_REG_REGULATORSETTING0_VDIGVSEL_M (3 << 4)
+#define MC13783_REG_REGULATORSETTING0_VGENVSEL_M (7 << 6)
+#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL_M (3 << 9)
+#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL_M (3 << 11)
+#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL_M (1 << 13)
+#define MC13783_REG_REGULATORSETTING0_VSIMVSEL_M (1 << 14)
+#define MC13783_REG_REGULATORSETTING0_VESIMVSEL_M (1 << 15)
+#define MC13783_REG_REGULATORSETTING0_VCAMVSEL_M (7 << 16)
+
+#define MC13783_REG_REGULATORSETTING1 31
+#define MC13783_REG_REGULATORSETTING1_VVIBVSEL 0
+#define MC13783_REG_REGULATORSETTING1_VRF1VSEL 2
+#define MC13783_REG_REGULATORSETTING1_VRF2VSEL 4
+#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL 6
+#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL 9
+
+#define MC13783_REG_REGULATORSETTING1_VVIBVSEL_M (3 << 0)
+#define MC13783_REG_REGULATORSETTING1_VRF1VSEL_M (3 << 2)
+#define MC13783_REG_REGULATORSETTING1_VRF2VSEL_M (3 << 4)
+#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL_M (7 << 6)
+#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL_M (7 << 9)
#define MC13783_REG_REGULATORMODE0 32
#define MC13783_REG_REGULATORMODE0_VAUDIOEN (1 << 0)
@@ -53,14 +87,88 @@ struct mc13783_regulator {
struct regulator_desc desc;
int reg;
int enable_bit;
+ int vsel_reg;
+ int vsel_shift;
+ int vsel_mask;
+ int const *voltages;
+};
+
+/* Voltage Values */
+static const int const mc13783_sw3_val[] = {
+ 5000000, 5000000, 5000000, 5500000,
+};
+
+static const int const mc13783_vaudio_val[] = {
+ 2775000,
+};
+
+static const int const mc13783_viohi_val[] = {
+ 2775000,
+};
+
+static const int const mc13783_violo_val[] = {
+ 1200000, 1300000, 1500000, 1800000,
+};
+
+static const int const mc13783_vdig_val[] = {
+ 1200000, 1300000, 1500000, 1800000,
+};
+
+static const int const mc13783_vgen_val[] = {
+ 1200000, 1300000, 1500000, 1800000,
+ 1100000, 2000000, 2775000, 2400000,
+};
+
+static const int const mc13783_vrfdig_val[] = {
+ 1200000, 1500000, 1800000, 1875000,
+};
+
+static const int const mc13783_vrfref_val[] = {
+ 2475000, 2600000, 2700000, 2775000,
+};
+
+static const int const mc13783_vrfcp_val[] = {
+ 2700000, 2775000,
+};
+
+static const int const mc13783_vsim_val[] = {
+ 1800000, 2900000, 3000000,
+};
+
+static const int const mc13783_vesim_val[] = {
+ 1800000, 2900000,
+};
+
+static const int const mc13783_vcam_val[] = {
+ 1500000, 1800000, 2500000, 2550000,
+ 2600000, 2750000, 2800000, 3000000,
+};
+
+static const int const mc13783_vrfbg_val[] = {
+ 1250000,
+};
+
+static const int const mc13783_vvib_val[] = {
+ 1300000, 1800000, 2000000, 3000000,
+};
+
+static const int const mc13783_vmmc_val[] = {
+ 1600000, 1800000, 2000000, 2600000,
+ 2700000, 2800000, 2900000, 3000000,
+};
+
+static const int const mc13783_vrf_val[] = {
+ 1500000, 1875000, 2700000, 2775000,
};
static struct regulator_ops mc13783_regulator_ops;
+static struct regulator_ops mc13783_fixed_regulator_ops;
-#define MC13783_DEFINE(prefix, _name, _reg) \
+#define MC13783_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages) \
[MC13783_ ## prefix ## _ ## _name] = { \
.desc = { \
.name = #prefix "_" #_name, \
+ .n_voltages = ARRAY_SIZE(_voltages), \
.ops = &mc13783_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MC13783_ ## prefix ## _ ## _name, \
@@ -68,36 +176,83 @@ static struct regulator_ops mc13783_regulator_ops;
}, \
.reg = MC13783_REG_ ## _reg, \
.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
+ .vsel_reg = MC13783_REG_ ## _vsel_reg, \
+ .vsel_shift = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL,\
+ .vsel_mask = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL_M,\
+ .voltages = _voltages, \
}
-#define MC13783_DEFINE_SW(_name, _reg) MC13783_DEFINE(SW, _name, _reg)
-#define MC13783_DEFINE_REGU(_name, _reg) MC13783_DEFINE(REGU, _name, _reg)
+#define MC13783_FIXED_DEFINE(prefix, _name, _reg, _voltages) \
+ [MC13783_ ## prefix ## _ ## _name] = { \
+ .desc = { \
+ .name = #prefix "_" #_name, \
+ .n_voltages = ARRAY_SIZE(_voltages), \
+ .ops = &mc13783_fixed_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MC13783_ ## prefix ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .reg = MC13783_REG_ ## _reg, \
+ .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
+ .voltages = _voltages, \
+ }
+
+#define MC13783_GPO_DEFINE(prefix, _name, _reg) \
+ [MC13783_ ## prefix ## _ ## _name] = { \
+ .desc = { \
+ .name = #prefix "_" #_name, \
+ .ops = &mc13783_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MC13783_ ## prefix ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .reg = MC13783_REG_ ## _reg, \
+ .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
+ }
+
+#define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages) \
+ MC13783_DEFINE(SW, _name, _reg, _vsel_reg, _voltages)
+#define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages) \
+ MC13783_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages)
static struct mc13783_regulator mc13783_regulators[] = {
- MC13783_DEFINE_SW(SW3, SWITCHERS5),
- MC13783_DEFINE_SW(PLL, SWITCHERS4),
-
- MC13783_DEFINE_REGU(VAUDIO, REGULATORMODE0),
- MC13783_DEFINE_REGU(VIOHI, REGULATORMODE0),
- MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0),
- MC13783_DEFINE_REGU(VDIG, REGULATORMODE0),
- MC13783_DEFINE_REGU(VGEN, REGULATORMODE0),
- MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0),
- MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0),
- MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0),
- MC13783_DEFINE_REGU(VSIM, REGULATORMODE1),
- MC13783_DEFINE_REGU(VESIM, REGULATORMODE1),
- MC13783_DEFINE_REGU(VCAM, REGULATORMODE1),
- MC13783_DEFINE_REGU(VRFBG, REGULATORMODE1),
- MC13783_DEFINE_REGU(VVIB, REGULATORMODE1),
- MC13783_DEFINE_REGU(VRF1, REGULATORMODE1),
- MC13783_DEFINE_REGU(VRF2, REGULATORMODE1),
- MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1),
- MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1),
- MC13783_DEFINE_REGU(GPO1, POWERMISC),
- MC13783_DEFINE_REGU(GPO2, POWERMISC),
- MC13783_DEFINE_REGU(GPO3, POWERMISC),
- MC13783_DEFINE_REGU(GPO4, POWERMISC),
+ MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
+
+ MC13783_FIXED_DEFINE(REGU, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
+ MC13783_FIXED_DEFINE(REGU, VIOHI, REGULATORMODE0, mc13783_viohi_val),
+ MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_violo_val),
+ MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vdig_val),
+ MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vgen_val),
+ MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vrfdig_val),
+ MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vrfref_val),
+ MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vrfcp_val),
+ MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0, \
+ mc13783_vsim_val),
+ MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0, \
+ mc13783_vesim_val),
+ MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \
+ mc13783_vcam_val),
+ MC13783_FIXED_DEFINE(REGU, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
+ MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vvib_val),
+ MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vrf_val),
+ MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vrf_val),
+ MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vmmc_val),
+ MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vmmc_val),
+ MC13783_GPO_DEFINE(REGU, GPO1, POWERMISC),
+ MC13783_GPO_DEFINE(REGU, GPO2, POWERMISC),
+ MC13783_GPO_DEFINE(REGU, GPO3, POWERMISC),
+ MC13783_GPO_DEFINE(REGU, GPO4, POWERMISC),
};
struct mc13783_regulator_priv {
@@ -154,10 +309,140 @@ static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
return (val & mc13783_regulators[id].enable_bit) != 0;
}
+static int mc13783_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ int id = rdev_get_id(rdev);
+
+ if (selector >= mc13783_regulators[id].desc.n_voltages)
+ return -EINVAL;
+
+ return mc13783_regulators[id].voltages[selector];
+}
+
+static int mc13783_get_best_voltage_index(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int reg_id = rdev_get_id(rdev);
+ int i;
+ int bestmatch;
+ int bestindex;
+
+ /*
+ * Locate the minimum voltage fitting the criteria on
+ * this regulator. The switchable voltages are not
+ * in strict falling order so we need to check them
+ * all for the best match.
+ */
+ bestmatch = INT_MAX;
+ bestindex = -1;
+ for (i = 0; i < mc13783_regulators[reg_id].desc.n_voltages; i++) {
+ if (mc13783_regulators[reg_id].voltages[i] >= min_uV &&
+ mc13783_regulators[reg_id].voltages[i] < bestmatch) {
+ bestmatch = mc13783_regulators[reg_id].voltages[i];
+ bestindex = i;
+ }
+ }
+
+ if (bestindex < 0 || bestmatch > max_uV) {
+ dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+ return bestindex;
+}
+
+static int mc13783_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int value, id = rdev_get_id(rdev);
+ int ret;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+ __func__, id, min_uV, max_uV);
+
+ /* Find the best index */
+ value = mc13783_get_best_voltage_index(rdev, min_uV, max_uV);
+ dev_dbg(rdev_get_dev(rdev), "%s best value: %d \n", __func__, value);
+ if (value < 0)
+ return value;
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].vsel_reg,
+ mc13783_regulators[id].vsel_mask,
+ value << mc13783_regulators[id].vsel_shift);
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static int mc13783_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_read(priv->mc13783,
+ mc13783_regulators[id].vsel_reg, &val);
+ mc13783_unlock(priv->mc13783);
+
+ if (ret)
+ return ret;
+
+ val = (val & mc13783_regulators[id].vsel_mask)
+ >> mc13783_regulators[id].vsel_shift;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+
+ BUG_ON(val < 0 || val > mc13783_regulators[id].desc.n_voltages);
+
+ return mc13783_regulators[id].voltages[val];
+}
+
static struct regulator_ops mc13783_regulator_ops = {
.enable = mc13783_regulator_enable,
.disable = mc13783_regulator_disable,
.is_enabled = mc13783_regulator_is_enabled,
+ .list_voltage = mc13783_regulator_list_voltage,
+ .set_voltage = mc13783_regulator_set_voltage,
+ .get_voltage = mc13783_regulator_get_voltage,
+};
+
+static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int id = rdev_get_id(rdev);
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+ __func__, id, min_uV, max_uV);
+
+ if (min_uV > mc13783_regulators[id].voltages[0] &&
+ max_uV < mc13783_regulators[id].voltages[0])
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int mc13783_fixed_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ int id = rdev_get_id(rdev);
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ return mc13783_regulators[id].voltages[0];
+}
+
+static struct regulator_ops mc13783_fixed_regulator_ops = {
+ .enable = mc13783_regulator_enable,
+ .disable = mc13783_regulator_disable,
+ .is_enabled = mc13783_regulator_is_enabled,
+ .list_voltage = mc13783_regulator_list_voltage,
+ .set_voltage = mc13783_fixed_regulator_set_voltage,
+ .get_voltage = mc13783_fixed_regulator_get_voltage,
};
static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 1bbff099a546..8e90acf1a183 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -290,6 +290,51 @@ static int wm8350_isink_is_enabled(struct regulator_dev *rdev)
return -EINVAL;
}
+static int wm8350_isink_enable_time(struct regulator_dev *rdev)
+{
+ struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
+ int isink = rdev_get_id(rdev);
+ int reg;
+
+ switch (isink) {
+ case WM8350_ISINK_A:
+ reg = wm8350_reg_read(wm8350, WM8350_CSA_FLASH_CONTROL);
+ break;
+ case WM8350_ISINK_B:
+ reg = wm8350_reg_read(wm8350, WM8350_CSB_FLASH_CONTROL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (reg & WM8350_CS1_FLASH_MODE) {
+ switch (reg & WM8350_CS1_ON_RAMP_MASK) {
+ case 0:
+ return 0;
+ case 1:
+ return 1950;
+ case 2:
+ return 3910;
+ case 3:
+ return 7800;
+ }
+ } else {
+ switch (reg & WM8350_CS1_ON_RAMP_MASK) {
+ case 0:
+ return 0;
+ case 1:
+ return 250000;
+ case 2:
+ return 500000;
+ case 3:
+ return 1000000;
+ }
+ }
+
+ return -EINVAL;
+}
+
+
int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode,
u16 trigger, u16 duration, u16 on_ramp, u16 off_ramp,
u16 drive)
@@ -1221,6 +1266,7 @@ static struct regulator_ops wm8350_isink_ops = {
.enable = wm8350_isink_enable,
.disable = wm8350_isink_disable,
.is_enabled = wm8350_isink_is_enabled,
+ .enable_time = wm8350_isink_enable_time,
};
static struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
@@ -1407,7 +1453,7 @@ static int wm8350_regulator_remove(struct platform_device *pdev)
struct regulator_dev *rdev = platform_get_drvdata(pdev);
struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
- wm8350_free_irq(wm8350, wm8350_reg[pdev->id].irq);
+ wm8350_free_irq(wm8350, wm8350_reg[pdev->id].irq, rdev);
regulator_unregister(rdev);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c8c12325e69b..e9aa814ddd23 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -1096,9 +1096,9 @@ static int cmos_pnp_resume(struct pnp_dev *pnp)
#define cmos_pnp_resume NULL
#endif
-static void cmos_pnp_shutdown(struct device *pdev)
+static void cmos_pnp_shutdown(struct pnp_dev *pnp)
{
- if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(pdev))
+ if (system_state == SYSTEM_POWER_OFF && !cmos_poweroff(&pnp->dev))
return;
cmos_do_shutdown();
@@ -1117,15 +1117,12 @@ static struct pnp_driver cmos_pnp_driver = {
.id_table = rtc_ids,
.probe = cmos_pnp_probe,
.remove = __exit_p(cmos_pnp_remove),
+ .shutdown = cmos_pnp_shutdown,
/* flag ensures resume() gets called, and stops syslog spam */
.flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
.suspend = cmos_pnp_suspend,
.resume = cmos_pnp_resume,
- .driver = {
- .name = (char *)driver_name,
- .shutdown = cmos_pnp_shutdown,
- }
};
#endif /* CONFIG_PNP */
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index f1e440521c54..3d0dc76b38af 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -307,11 +307,18 @@ static int wm8350_rtc_update_irq_enable(struct device *dev,
{
struct wm8350 *wm8350 = dev_get_drvdata(dev);
+ /* Suppress duplicate changes since genirq nests enable and
+ * disable calls. */
+ if (enabled == wm8350->rtc.update_enabled)
+ return 0;
+
if (enabled)
wm8350_unmask_irq(wm8350, WM8350_IRQ_RTC_SEC);
else
wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC);
+ wm8350->rtc.update_enabled = enabled;
+
return 0;
}
@@ -478,8 +485,8 @@ static int __devexit wm8350_rtc_remove(struct platform_device *pdev)
struct wm8350 *wm8350 = platform_get_drvdata(pdev);
struct wm8350_rtc *wm_rtc = &wm8350->rtc;
- wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC);
- wm8350_free_irq(wm8350, WM8350_IRQ_RTC_ALM);
+ wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350);
+ wm8350_free_irq(wm8350, WM8350_IRQ_RTC_ALM, wm8350);
rtc_device_unregister(wm_rtc->rtc);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 5819dc02a143..1c500c462225 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -23,6 +23,7 @@
#include <asm/debug.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
+#include <asm/compat.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/cio.h>
@@ -2844,13 +2845,16 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
rc = -EFAULT;
if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
goto out;
-#ifndef CONFIG_64BIT
- /* Make sure pointers are sane even on 31 bit. */
- if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) {
+ if (is_compat_task() || sizeof(long) == 4) {
+ /* Make sure pointers are sane even on 31 bit. */
rc = -EINVAL;
- goto out;
+ if ((usrparm.psf_data >> 32) != 0)
+ goto out;
+ if ((usrparm.rssd_result >> 32) != 0)
+ goto out;
+ usrparm.psf_data &= 0x7fffffffULL;
+ usrparm.rssd_result &= 0x7fffffffULL;
}
-#endif
/* alloc I/O data area */
psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 478bcdb90b6f..fc7b30b4a255 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -17,7 +17,7 @@
#include <linux/fs.h>
#include <linux/blkpg.h>
#include <linux/smp_lock.h>
-
+#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/cmb.h>
#include <asm/uaccess.h>
@@ -358,9 +358,8 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
}
static int dasd_ioctl_readall_cmb(struct dasd_block *block, unsigned int cmd,
- unsigned long arg)
+ struct cmbdata __user *argp)
{
- struct cmbdata __user *argp = (void __user *) arg;
size_t size = _IOC_SIZE(cmd);
struct cmbdata data;
int ret;
@@ -376,7 +375,12 @@ dasd_do_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct dasd_block *block = bdev->bd_disk->private_data;
- void __user *argp = (void __user *)arg;
+ void __user *argp;
+
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (void __user *)arg;
if (!block)
return -ENODEV;
@@ -414,7 +418,7 @@ dasd_do_ioctl(struct block_device *bdev, fmode_t mode,
case BIODASDCMFDISABLE:
return disable_cmf(block->base->cdev);
case BIODASDREADALLCMB:
- return dasd_ioctl_readall_cmb(block, cmd, arg);
+ return dasd_ioctl_readall_cmb(block, cmd, argp);
default:
/* if the discipline has an ioctl method try it. */
if (block->base->discipline->ioctl) {
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 9d61683b5633..59ec073724bf 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -1037,22 +1037,6 @@ static void tty3215_flush_buffer(struct tty_struct *tty)
}
/*
- * Currently we don't have any io controls for 3215 ttys
- */
-static int tty3215_ioctl(struct tty_struct *tty, struct file * file,
- unsigned int cmd, unsigned long arg)
-{
- if (tty->flags & (1 << TTY_IO_ERROR))
- return -EIO;
-
- switch (cmd) {
- default:
- return -ENOIOCTLCMD;
- }
- return 0;
-}
-
-/*
* Disable reading from a 3215 tty
*/
static void tty3215_throttle(struct tty_struct * tty)
@@ -1117,7 +1101,6 @@ static const struct tty_operations tty3215_ops = {
.write_room = tty3215_write_room,
.chars_in_buffer = tty3215_chars_in_buffer,
.flush_buffer = tty3215_flush_buffer,
- .ioctl = tty3215_ioctl,
.throttle = tty3215_throttle,
.unthrottle = tty3215_unthrottle,
.stop = tty3215_stop,
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 247b2b934728..31c59b0d6df0 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -15,6 +15,7 @@
#include <linux/types.h>
#include <linux/smp_lock.h>
+#include <asm/compat.h>
#include <asm/ccwdev.h>
#include <asm/cio.h>
#include <asm/ebcdic.h>
@@ -322,6 +323,7 @@ fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *o
static long
fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
+ char __user *argp;
struct fs3270 *fp;
struct raw3270_iocb iocb;
int rc;
@@ -329,6 +331,10 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
fp = filp->private_data;
if (!fp)
return -ENODEV;
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (char __user *)arg;
rc = 0;
mutex_lock(&fs3270_mutex);
switch (cmd) {
@@ -339,10 +345,10 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
fp->write_command = arg;
break;
case TUBGETI:
- rc = put_user(fp->read_command, (char __user *) arg);
+ rc = put_user(fp->read_command, argp);
break;
case TUBGETO:
- rc = put_user(fp->write_command,(char __user *) arg);
+ rc = put_user(fp->write_command, argp);
break;
case TUBGETMOD:
iocb.model = fp->view.model;
@@ -351,8 +357,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
iocb.pf_cnt = 24;
iocb.re_cnt = 20;
iocb.map = 0;
- if (copy_to_user((char __user *) arg, &iocb,
- sizeof(struct raw3270_iocb)))
+ if (copy_to_user(argp, &iocb, sizeof(struct raw3270_iocb)))
rc = -EFAULT;
break;
}
@@ -511,8 +516,8 @@ static const struct file_operations fs3270_fops = {
.write = fs3270_write, /* write */
.unlocked_ioctl = fs3270_ioctl, /* ioctl */
.compat_ioctl = fs3270_ioctl, /* ioctl */
- .open = fs3270_open, /* open */
- .release = fs3270_close, /* release */
+ .open = fs3270_open, /* open */
+ .release = fs3270_close, /* release */
};
/*
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 96816149368a..8d3d720737da 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -46,8 +46,6 @@
*/
static int tapeblock_open(struct block_device *, fmode_t);
static int tapeblock_release(struct gendisk *, fmode_t);
-static int tapeblock_ioctl(struct block_device *, fmode_t, unsigned int,
- unsigned long);
static int tapeblock_medium_changed(struct gendisk *);
static int tapeblock_revalidate_disk(struct gendisk *);
@@ -55,7 +53,6 @@ static const struct block_device_operations tapeblock_fops = {
.owner = THIS_MODULE,
.open = tapeblock_open,
.release = tapeblock_release,
- .ioctl = tapeblock_ioctl,
.media_changed = tapeblock_medium_changed,
.revalidate_disk = tapeblock_revalidate_disk,
};
@@ -416,42 +413,6 @@ tapeblock_release(struct gendisk *disk, fmode_t mode)
}
/*
- * Support of some generic block device IOCTLs.
- */
-static int
-tapeblock_ioctl(
- struct block_device * bdev,
- fmode_t mode,
- unsigned int command,
- unsigned long arg
-) {
- int rc;
- int minor;
- struct gendisk *disk = bdev->bd_disk;
- struct tape_device *device;
-
- rc = 0;
- BUG_ON(!disk);
- device = disk->private_data;
- BUG_ON(!device);
- minor = MINOR(bdev->bd_dev);
-
- DBF_LH(6, "tapeblock_ioctl(0x%0x)\n", command);
- DBF_LH(6, "device = %d:%d\n", tapeblock_major, minor);
-
- switch (command) {
- /* Refuse some IOCTL calls without complaining (mount). */
- case 0x5310: /* CDROMMULTISESSION */
- rc = -EINVAL;
- break;
- default:
- rc = -EINVAL;
- }
-
- return rc;
-}
-
-/*
* Initialize block device frontend.
*/
int
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index a6087cec55b4..921dcda77676 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
+#include <asm/compat.h>
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
@@ -139,21 +140,26 @@ vmcp_write(struct file *file, const char __user *buff, size_t count,
static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct vmcp_session *session;
+ int __user *argp;
int temp;
session = (struct vmcp_session *)file->private_data;
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (int __user *)arg;
if (mutex_lock_interruptible(&session->mutex))
return -ERESTARTSYS;
switch (cmd) {
case VMCP_GETCODE:
temp = session->resp_code;
mutex_unlock(&session->mutex);
- return put_user(temp, (int __user *)arg);
+ return put_user(temp, argp);
case VMCP_SETBUF:
free_pages((unsigned long)session->response,
get_order(session->bufsize));
session->response=NULL;
- temp = get_user(session->bufsize, (int __user *)arg);
+ temp = get_user(session->bufsize, argp);
if (get_order(session->bufsize) > 8) {
session->bufsize = PAGE_SIZE;
temp = -EINVAL;
@@ -163,7 +169,7 @@ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case VMCP_GETSIZE:
temp = session->resp_size;
mutex_unlock(&session->mutex);
- return put_user(temp, (int __user *)arg);
+ return put_user(temp, argp);
default:
mutex_unlock(&session->mutex);
return -ENOIOCTLCMD;
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index cc5144b6f9d9..c84ac9443079 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -12,6 +12,7 @@
#include <linux/uaccess.h>
#include <linux/miscdevice.h>
+#include <asm/compat.h>
#include <asm/cio.h>
#include <asm/chsc.h>
#include <asm/isc.h>
@@ -770,24 +771,30 @@ out_free:
static long chsc_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
+ void __user *argp;
+
CHSC_MSG(2, "chsc_ioctl called, cmd=%x\n", cmd);
+ if (is_compat_task())
+ argp = compat_ptr(arg);
+ else
+ argp = (void __user *)arg;
switch (cmd) {
case CHSC_START:
- return chsc_ioctl_start((void __user *)arg);
+ return chsc_ioctl_start(argp);
case CHSC_INFO_CHANNEL_PATH:
- return chsc_ioctl_info_channel_path((void __user *)arg);
+ return chsc_ioctl_info_channel_path(argp);
case CHSC_INFO_CU:
- return chsc_ioctl_info_cu((void __user *)arg);
+ return chsc_ioctl_info_cu(argp);
case CHSC_INFO_SCH_CU:
- return chsc_ioctl_info_sch_cu((void __user *)arg);
+ return chsc_ioctl_info_sch_cu(argp);
case CHSC_INFO_CI:
- return chsc_ioctl_conf_info((void __user *)arg);
+ return chsc_ioctl_conf_info(argp);
case CHSC_INFO_CCL:
- return chsc_ioctl_conf_comp_list((void __user *)arg);
+ return chsc_ioctl_conf_comp_list(argp);
case CHSC_INFO_CPD:
- return chsc_ioctl_chpd((void __user *)arg);
+ return chsc_ioctl_chpd(argp);
case CHSC_INFO_DCAL:
- return chsc_ioctl_dcal((void __user *)arg);
+ return chsc_ioctl_dcal(argp);
default: /* unknown ioctl number */
return -ENOIOCTLCMD;
}
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 0d4d18bdd45c..c68be24e27d9 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -393,10 +393,12 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
* u_mult_inv > 128 bytes.
*/
if (copied == 0) {
- int len;
+ unsigned int len;
spin_unlock_bh(&zcrypt_device_lock);
/* len is max 256 / 2 - 120 = 8 */
len = crt->inputdatalength / 2 - 120;
+ if (len > sizeof(z1))
+ return -EFAULT;
z1 = z2 = z3 = 0;
if (copy_from_user(&z1, crt->np_prime, len) ||
copy_from_user(&z2, crt->bp_key, len) ||
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index 3c77bfe0764c..147bb1a69aba 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -3398,7 +3398,7 @@ claw_init(void)
goto out_err;
}
CLAW_DBF_TEXT(2, setup, "init_mod");
- claw_root_dev = root_device_register("qeth");
+ claw_root_dev = root_device_register("claw");
ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
if (ret)
goto register_err;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0b763396d5d1..038299ae3feb 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -486,22 +486,14 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
case IPA_RC_L2_DUP_MAC:
case IPA_RC_L2_DUP_LAYER3_MAC:
dev_warn(&card->gdev->dev,
- "MAC address "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "already exists\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5]);
+ "MAC address %pM already exists\n",
+ card->dev->dev_addr);
break;
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
dev_warn(&card->gdev->dev,
- "MAC address "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "is not authorized\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5]);
+ "MAC address %pM is not authorized\n",
+ card->dev->dev_addr);
break;
default:
break;
@@ -512,12 +504,8 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
OSA_ADDR_LEN);
dev_info(&card->gdev->dev,
- "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "successfully registered on device %s\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5],
- card->dev->name);
+ "MAC address %pM successfully registered on device %s\n",
+ card->dev->dev_addr, card->dev->name);
}
return 0;
}
diff --git a/drivers/scsi/cxgb3i/cxgb3i_offload.c b/drivers/scsi/cxgb3i/cxgb3i_offload.c
index 26ffdcd5a437..15a00e8b7122 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_offload.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_offload.c
@@ -1440,6 +1440,10 @@ void cxgb3i_c3cn_release(struct s3_conn *c3cn)
static int is_cxgb3_dev(struct net_device *dev)
{
struct cxgb3i_sdev_data *cdata;
+ struct net_device *ndev = dev;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN)
+ ndev = vlan_dev_real_dev(dev);
write_lock(&cdata_rwlock);
list_for_each_entry(cdata, &cdata_list, list) {
@@ -1447,7 +1451,7 @@ static int is_cxgb3_dev(struct net_device *dev)
int i;
for (i = 0; i < ports->nports; i++)
- if (dev == ports->lldevs[i]) {
+ if (ndev == ports->lldevs[i]) {
write_unlock(&cdata_rwlock);
return 1;
}
@@ -1566,6 +1570,26 @@ out_err:
return -EINVAL;
}
+/**
+ * cxgb3i_find_dev - find the interface associated with the given address
+ * @ipaddr: ip address
+ */
+static struct net_device *
+cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr)
+{
+ struct flowi fl;
+ int err;
+ struct rtable *rt;
+
+ memset(&fl, 0, sizeof(fl));
+ fl.nl_u.ip4_u.daddr = ipaddr;
+
+ err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl);
+ if (!err)
+ return (&rt->u.dst)->dev;
+
+ return NULL;
+}
/**
* cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address
@@ -1581,6 +1605,7 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
struct cxgb3i_sdev_data *cdata;
struct t3cdev *cdev;
__be32 sipv4;
+ struct net_device *dstdev;
int err;
c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
@@ -1591,6 +1616,13 @@ int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
c3cn->daddr.sin_port = usin->sin_port;
c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
+ dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr);
+ if (!dstdev || !is_cxgb3_dev(dstdev))
+ return -ENETUNREACH;
+
+ if (dstdev->priv_flags & IFF_802_1Q_VLAN)
+ dev = dstdev;
+
rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
c3cn->daddr.sin_addr.s_addr,
c3cn->saddr.sin_port,
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index ce522702a6c1..2cc39684ce97 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4142,8 +4142,8 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
spin_lock_irq(shost->host_lock);
if (vport->fc_rscn_flush) {
/* Another thread is walking fc_rscn_id_list on this vport */
- spin_unlock_irq(shost->host_lock);
vport->fc_flag |= FC_RSCN_DISCOVERY;
+ spin_unlock_irq(shost->host_lock);
/* Send back ACC */
lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
return 0;
@@ -5948,8 +5948,8 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
lpfc_initial_fdisc(vport);
break;
}
-
} else {
+ vport->vpi_state |= LPFC_VPI_REGISTERED;
if (vport == phba->pport)
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_issue_fabric_reglogin(vport);
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 3b9424427652..2445e399fd60 100755..100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -747,6 +747,10 @@ lpfc_linkdown(struct lpfc_hba *phba)
if (phba->link_state == LPFC_LINK_DOWN)
return 0;
+
+ /* Block all SCSI stack I/Os */
+ lpfc_scsi_dev_block(phba);
+
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
if (phba->link_state > LPFC_LINK_DOWN) {
@@ -1555,10 +1559,16 @@ lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* to book keeping the FCFIs can be used.
*/
if (shdr_status || shdr_add_status) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2521 READ_FCF_RECORD mailbox failed "
- "with status x%x add_status x%x, mbx\n",
- shdr_status, shdr_add_status);
+ if (shdr_status == STATUS_FCF_TABLE_EMPTY) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2726 READ_FCF_RECORD Indicates empty "
+ "FCF table.\n");
+ } else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2521 READ_FCF_RECORD mailbox failed "
+ "with status x%x add_status x%x, mbx\n",
+ shdr_status, shdr_add_status);
+ }
goto out;
}
/* Interpreting the returned information of FCF records */
@@ -1698,7 +1708,9 @@ lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
return;
}
+ spin_lock_irq(&phba->hbalock);
vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
+ spin_unlock_irq(&phba->hbalock);
if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
lpfc_initial_fdisc(vport);
@@ -2259,7 +2271,10 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->mbxStatus);
break;
}
+ spin_lock_irq(&phba->hbalock);
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
+ vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(&phba->hbalock);
vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
/*
@@ -4475,8 +4490,10 @@ lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
lpfc_mbx_unreg_vpi(vports[i]);
+ spin_lock_irq(&phba->hbalock);
vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
+ spin_unlock_irq(&phba->hbalock);
}
lpfc_destroy_vport_work_array(phba, vports);
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 1585148a17e5..8a2a1c5935c6 100644..100755
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1013,7 +1013,7 @@ struct lpfc_mbx_wq_destroy {
};
#define LPFC_HDR_BUF_SIZE 128
-#define LPFC_DATA_BUF_SIZE 4096
+#define LPFC_DATA_BUF_SIZE 2048
struct rq_context {
uint32_t word0;
#define lpfc_rq_context_rq_size_SHIFT 16
@@ -1371,6 +1371,7 @@ struct lpfc_mbx_query_fw_cfg {
#define STATUS_ERROR_ACITMAIN 0x2a
#define STATUS_REBOOT_REQUIRED 0x2c
#define STATUS_FCF_IN_USE 0x3a
+#define STATUS_FCF_TABLE_EMPTY 0x43
struct lpfc_mbx_sli4_config {
struct mbox_header header;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d4da6bdd0e73..b8eb1b6e5e77 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3006,6 +3006,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
struct lpfc_vport *vport;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
+ uint32_t link_state;
phba->fc_eventTag = acqe_fcoe->event_tag;
phba->fcoe_eventtag = acqe_fcoe->event_tag;
@@ -3052,9 +3053,12 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
break;
/*
* Currently, driver support only one FCF - so treat this as
- * a link down.
+ * a link down, but save the link state because we don't want
+ * it to be changed to Link Down unless it is already down.
*/
+ link_state = phba->link_state;
lpfc_linkdown(phba);
+ phba->link_state = link_state;
/* Unregister FCF if no devices connected to it */
lpfc_unregister_unused_fcf(phba);
break;
@@ -7226,8 +7230,6 @@ lpfc_prep_dev_for_perm_failure(struct lpfc_hba *phba)
{
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2711 PCI channel permanent disable for failure\n");
- /* Block all SCSI devices' I/Os on the host */
- lpfc_scsi_dev_block(phba);
/* Clean up all driver's outstanding SCSI I/Os */
lpfc_sli_flush_fcp_rings(phba);
}
@@ -7256,6 +7258,9 @@ lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+ /* Block all SCSI devices' I/Os on the host */
+ lpfc_scsi_dev_block(phba);
+
switch (state) {
case pci_channel_io_normal:
/* Non-fatal error, prepare for recovery */
@@ -7507,6 +7512,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
error = -ENODEV;
goto out_free_sysfs_attr;
}
+ /* Default to single FCP EQ for non-MSI-X */
+ if (phba->intr_type != MSIX)
+ phba->cfg_fcp_eq_count = 1;
/* Set up SLI-4 HBA */
if (lpfc_sli4_hba_setup(phba)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index a246410ce9df..28c6bfd3e82e 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1574,7 +1574,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
case LPFC_PG_TYPE_NO_DIF:
num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
datasegcnt);
- /* we shoud have 2 or more entries in buffer list */
+ /* we should have 2 or more entries in buffer list */
if (num_bde < 2)
goto err;
break;
@@ -1611,7 +1611,7 @@ lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba,
num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
datasegcnt, protsegcnt);
- /* we shoud have 3 or more entries in buffer list */
+ /* we should have 3 or more entries in buffer list */
if (num_bde < 3)
goto err;
break;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7935667b81a5..589549b2bf0e 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1383,7 +1383,7 @@ lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
/* HBQ for ELS and CT traffic. */
static struct lpfc_hbq_init lpfc_els_hbq = {
.rn = 1,
- .entry_count = 200,
+ .entry_count = 256,
.mask_count = 0,
.profile = 0,
.ring_mask = (1 << LPFC_ELS_RING),
@@ -1482,8 +1482,11 @@ err:
int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
- return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
- lpfc_hbq_defs[qno]->add_count));
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return 0;
+ else
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->add_count);
}
/**
@@ -1498,8 +1501,12 @@ lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
{
- return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
- lpfc_hbq_defs[qno]->init_count));
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->entry_count);
+ else
+ return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
+ lpfc_hbq_defs[qno]->init_count);
}
/**
@@ -4110,6 +4117,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
if (rc) {
dma_free_coherent(&phba->pcidev->dev, dma_size,
dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
return -EIO;
}
@@ -5848,7 +5856,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.un.ulpWord[3]);
wqe->generic.word3 = 0;
bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
- bf_set(wqe_xc, &wqe->generic, 1);
/* The entire sequence is transmitted for this IOCB */
xmit_len = total_len;
cmnd = CMD_XMIT_SEQUENCE64_CR;
@@ -10944,7 +10951,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
return dmabuf;
}
temp_hdr = seq_dmabuf->hbuf.virt;
- if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
+ if (be16_to_cpu(new_hdr->fh_seq_cnt) <
+ be16_to_cpu(temp_hdr->fh_seq_cnt)) {
list_del_init(&seq_dmabuf->hbuf.list);
list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
@@ -10955,6 +10963,11 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
seq_dmabuf->time_stamp = jiffies;
lpfc_update_rcv_time_stamp(vport);
+ if (list_empty(&seq_dmabuf->dbuf.list)) {
+ temp_hdr = dmabuf->hbuf.virt;
+ list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
+ return seq_dmabuf;
+ }
/* find the correct place in the sequence to insert this frame */
list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
@@ -10963,7 +10976,8 @@ lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
* If the frame's sequence count is greater than the frame on
* the list then insert the frame right after this frame
*/
- if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
+ if (be16_to_cpu(new_hdr->fh_seq_cnt) >
+ be16_to_cpu(temp_hdr->fh_seq_cnt)) {
list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
return seq_dmabuf;
}
@@ -11210,7 +11224,7 @@ lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* If there is a hole in the sequence count then fail. */
- if (++seq_count != hdr->fh_seq_cnt)
+ if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
return 0;
fctl = (hdr->fh_f_ctl[0] << 16 |
hdr->fh_f_ctl[1] << 8 |
@@ -11242,6 +11256,7 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
struct lpfc_iocbq *first_iocbq, *iocbq;
struct fc_frame_header *fc_hdr;
uint32_t sid;
+ struct ulp_bde64 *pbde;
fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
/* remove from receive buffer list */
@@ -11283,8 +11298,9 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
if (!iocbq->context3) {
iocbq->context3 = d_buf;
iocbq->iocb.ulpBdeCount++;
- iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
- LPFC_DATA_BUF_SIZE;
+ pbde = (struct ulp_bde64 *)
+ &iocbq->iocb.unsli3.sli3Words[4];
+ pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
bf_get(lpfc_rcqe_length,
&seq_dmabuf->cq_event.cqe.rcqe_cmpl);
@@ -11401,15 +11417,9 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
return;
}
/* If not last frame in sequence continue processing frames. */
- if (!lpfc_seq_complete(seq_dmabuf)) {
- /*
- * When saving off frames post a new one and mark this
- * frame to be freed when it is finished.
- **/
- lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
- dmabuf->tag = -1;
+ if (!lpfc_seq_complete(seq_dmabuf))
return;
- }
+
/* Send the complete sequence to the upper layer protocol */
lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 25d66d070cf8..44e5f574236b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -28,7 +28,7 @@
/* Multi-queue arrangement for fast-path FCP work queues */
#define LPFC_FN_EQN_MAX 8
#define LPFC_SP_EQN_DEF 1
-#define LPFC_FP_EQN_DEF 1
+#define LPFC_FP_EQN_DEF 4
#define LPFC_FP_EQN_MIN 1
#define LPFC_FP_EQN_MAX (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c7f3aed2aab8..792f72263f1a 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.6"
+#define LPFC_DRIVER_VERSION "8.3.7"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 7d6dd83d3592..e3c7fa642306 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -512,8 +512,10 @@ enable_vport(struct fc_vport *fc_vport)
return VPORT_OK;
}
+ spin_lock_irq(&phba->hbalock);
vport->load_flag |= FC_LOADING;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+ spin_unlock_irq(&phba->hbalock);
/* Use the Physical nodes Fabric NDLP to determine if the link is
* up and ready to FDISC.
@@ -700,7 +702,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
}
spin_unlock_irq(&phba->ndlp_lock);
}
- if (vport->vpi_state != LPFC_VPI_REGISTERED)
+ if (!(vport->vpi_state & LPFC_VPI_REGISTERED))
goto skip_logo;
vport->unreg_vpi_cmpl = VPORT_INVAL;
timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 24223473f573..60de85091502 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1433,6 +1433,10 @@ int osd_finalize_request(struct osd_request *or,
cdbh->command_specific_options |= or->attributes_mode;
if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
ret = _osd_req_finalize_attr_page(or);
+ if (ret) {
+ OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
+ return ret;
+ }
} else {
/* TODO: I think that for the GET_ATTR command these 2 should
* be reversed to keep them in execution order (for embeded
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index e7d2688fbeba..b6f1ef954af1 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -2483,14 +2483,12 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
sense_copied = 1;
}
- if (RES_IS_GSCSI(res->cfg_entry)) {
+ if (RES_IS_GSCSI(res->cfg_entry))
pmcraid_cancel_all(cmd, sense_copied);
- } else if (sense_copied) {
+ else if (sense_copied)
pmcraid_erp_done(cmd);
- return 0;
- } else {
+ else
pmcraid_request_sense(cmd);
- }
return 1;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 21e2bc4d7401..3a9f5b288aee 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -232,6 +232,9 @@ qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
if (off)
return 0;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
return -EINVAL;
if (start > ha->optrom_size)
@@ -379,6 +382,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
if (!capable(CAP_SYS_ADMIN))
return 0;
@@ -398,6 +404,9 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
uint8_t *tmp_data;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return 0;
+
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
!ha->isp_ops->write_nvram)
return 0;
@@ -1238,10 +1247,11 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- int rval;
+ int rval = QLA_FUNCTION_FAILED;
uint16_t state[5];
- rval = qla2x00_get_firmware_state(vha, state);
+ if (!vha->hw->flags.eeh_busy)
+ rval = qla2x00_get_firmware_state(vha, state);
if (rval != QLA_SUCCESS)
memset(state, -1, sizeof(state));
@@ -1452,10 +1462,13 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
if (!fcport)
return;
- if (unlikely(pci_channel_offline(fcport->vha->hw->pdev)))
+ if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
+ return;
+
+ if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
- else
- qla2x00_abort_fcport_cmds(fcport);
+ return;
+ }
/*
* Transport has effectively 'deleted' the rport, clear
@@ -1475,6 +1488,9 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
if (!fcport)
return;
+ if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
+ return;
+
if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
return;
@@ -1515,6 +1531,12 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat = &ha->fc_host_stat;
memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ goto done;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto done;
+
stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
if (stats == NULL) {
DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index f660dd70b72e..d6d9c86cb058 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -26,7 +26,7 @@
/* #define QL_DEBUG_LEVEL_14 */ /* Output RSCN trace msgs */
/* #define QL_DEBUG_LEVEL_15 */ /* Output NPIV trace msgs */
/* #define QL_DEBUG_LEVEL_16 */ /* Output ISP84XX trace msgs */
-/* #define QL_DEBUG_LEVEL_17 */ /* Output MULTI-Q trace messages */
+/* #define QL_DEBUG_LEVEL_17 */ /* Output EEH trace messages */
/*
* Macros use for debugging the driver.
@@ -132,6 +132,13 @@
#else
#define DEBUG16(x) do {} while (0)
#endif
+
+#if defined(QL_DEBUG_LEVEL_17)
+#define DEBUG17(x) do {x;} while (0)
+#else
+#define DEBUG17(x) do {} while (0)
+#endif
+
/*
* Firmware Dump structure definition
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 384afda7dbe9..608e675f68c8 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2256,11 +2256,13 @@ struct qla_hw_data {
uint32_t disable_serdes :1;
uint32_t gpsc_supported :1;
uint32_t npiv_supported :1;
+ uint32_t pci_channel_io_perm_failure :1;
uint32_t fce_enabled :1;
uint32_t fac_supported :1;
uint32_t chip_reset_done :1;
uint32_t port0 :1;
uint32_t running_gold_fw :1;
+ uint32_t eeh_busy :1;
uint32_t cpu_affinity_enabled :1;
uint32_t disable_msix_handshake :1;
} flags;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0b6801fc6389..f61fb8d01330 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -324,6 +324,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
extern int
qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int qla2x00_get_data_rate(scsi_qla_host_t *);
/*
* Global Function Prototypes in qla_isr.c source file.
*/
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 73a793539d45..b4a0eac8f96d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -269,6 +269,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
vha->flags.reset_active = 0;
+ ha->flags.pci_channel_io_perm_failure = 0;
+ ha->flags.eeh_busy = 0;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
@@ -581,6 +583,9 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
uint32_t cnt;
uint16_t cmd;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return;
+
ha->isp_ops->disable_intrs(ha);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -786,6 +791,12 @@ void
qla24xx_reset_chip(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+
+ if (pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure) {
+ return;
+ }
+
ha->isp_ops->disable_intrs(ha);
/* Perform RISC reset. */
@@ -2266,6 +2277,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
clear_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
clear_bit(RSCN_UPDATE, &vha->dpc_flags);
+ qla2x00_get_data_rate(vha);
+
/* Determine what we need to do */
if (ha->current_topology == ISP_CFG_FL &&
(test_bit(LOCAL_LOOP_UPDATE, &flags))) {
@@ -3560,6 +3573,13 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
/* Requeue all commands in outstanding command list. */
qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ if (unlikely(pci_channel_offline(ha->pdev) &&
+ ha->flags.pci_channel_io_perm_failure)) {
+ clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
+ status = 0;
+ return status;
+ }
+
ha->isp_ops->get_flash_version(vha, req->ring);
ha->isp_ops->nvram_config(vha);
@@ -4458,6 +4478,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
int ret, retries;
struct qla_hw_data *ha = vha->hw;
+ if (ha->flags.pci_channel_io_perm_failure)
+ return;
if (!IS_FWI2_CAPABLE(ha))
return;
if (!ha->fw_major_version)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 1692a883f4de..ffd0efdff40e 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -152,7 +152,7 @@ qla2300_intr_handler(int irq, void *dev_id)
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
if (stat & HSR_RISC_PAUSED) {
- if (pci_channel_offline(ha->pdev))
+ if (unlikely(pci_channel_offline(ha->pdev)))
break;
hccr = RD_REG_WORD(&reg->hccr);
@@ -1846,12 +1846,15 @@ qla24xx_intr_handler(int irq, void *dev_id)
reg = &ha->iobase->isp24;
status = 0;
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return IRQ_HANDLED;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
vha = pci_get_drvdata(ha->pdev);
for (iter = 50; iter--; ) {
stat = RD_REG_DWORD(&reg->host_status);
if (stat & HSRX_RISC_PAUSED) {
- if (pci_channel_offline(ha->pdev))
+ if (unlikely(pci_channel_offline(ha->pdev)))
break;
hccr = RD_REG_DWORD(&reg->hccr);
@@ -1992,7 +1995,7 @@ qla24xx_msix_default(int irq, void *dev_id)
do {
stat = RD_REG_DWORD(&reg->host_status);
if (stat & HSRX_RISC_PAUSED) {
- if (pci_channel_offline(ha->pdev))
+ if (unlikely(pci_channel_offline(ha->pdev)))
break;
hccr = RD_REG_DWORD(&reg->hccr);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 05d595d9a7ef..056e4d4505f3 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -56,6 +56,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
DEBUG11(printk("%s(%ld): entered.\n", __func__, base_vha->host_no));
+ if (ha->flags.pci_channel_io_perm_failure) {
+ DEBUG(printk("%s(%ld): Perm failure on EEH, timeout MBX "
+ "Exiting.\n", __func__, vha->host_no));
+ return QLA_FUNCTION_TIMEOUT;
+ }
+
/*
* Wait for active mailbox commands to finish by waiting at most tov
* seconds. This is to serialize actual issuing of mailbox cmds during
@@ -154,10 +160,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
/* Check for pending interrupts. */
qla2x00_poll(ha->rsp_q_map[0]);
- if (command != MBC_LOAD_RISC_RAM_EXTENDED &&
- !ha->flags.mbox_int)
+ if (!ha->flags.mbox_int &&
+ !(IS_QLA2200(ha) &&
+ command == MBC_LOAD_RISC_RAM_EXTENDED))
msleep(10);
} /* while */
+ DEBUG17(qla_printk(KERN_WARNING, ha,
+ "Waited %d sec\n",
+ (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ)));
}
/* Check whether we timed out */
@@ -227,7 +237,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (rval == QLA_FUNCTION_TIMEOUT &&
mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
- if (!io_lock_on || (mcp->flags & IOCTL_CMD)) {
+ if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
+ ha->flags.eeh_busy) {
/* not in dpc. schedule it for dpc to take over. */
DEBUG(printk("%s(%ld): timeout schedule "
"isp_abort_needed.\n", __func__,
@@ -237,7 +248,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
base_vha->host_no));
qla_printk(KERN_WARNING, ha,
"Mailbox command timeout occurred. Scheduling ISP "
- "abort.\n");
+ "abort. eeh_busy: 0x%x\n", ha->flags.eeh_busy);
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
qla2xxx_wake_dpc(vha);
} else if (!abort_active) {
@@ -2530,6 +2541,9 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2565,6 +2579,9 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2595,6 +2612,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -2639,6 +2659,9 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
if (!IS_FWI2_CAPABLE(vha->hw))
return QLA_FUNCTION_FAILED;
+ if (unlikely(pci_channel_offline(vha->hw->pdev)))
+ return QLA_FUNCTION_FAILED;
+
DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
mcp->mb[0] = MBC_TRACE_CONTROL;
@@ -3643,3 +3666,36 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
return rval;
}
+
+int
+qla2x00_get_data_rate(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ DEBUG11(printk(KERN_INFO "%s(%ld): entered.\n", __func__, vha->host_no));
+
+ mcp->mb[0] = MBC_DATA_RATE;
+ mcp->mb[1] = 0;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_2|MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2_3_11(printk(KERN_INFO "%s(%ld): failed=%x mb[0]=%x.\n",
+ __func__, vha->host_no, rval, mcp->mb[0]));
+ } else {
+ DEBUG11(printk(KERN_INFO
+ "%s(%ld): done.\n", __func__, vha->host_no));
+ if (mcp->mb[1] != 0x7)
+ ha->link_data_rate = mcp->mb[1];
+ }
+
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 2a4c7f4e7b69..b901aa267e7d 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -639,8 +639,10 @@ static void qla_do_work(struct work_struct *work)
struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
struct scsi_qla_host *vha;
+ spin_lock_irq(&rsp->hw->hardware_lock);
vha = qla25xx_get_host(rsp);
qla24xx_process_response_queue(vha, rsp);
+ spin_unlock_irq(&rsp->hw->hardware_lock);
}
/* create response queue */
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 2f873d237325..209f50e788a1 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -475,11 +475,11 @@ qla2xxx_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
srb_t *sp;
int rval;
- if (unlikely(pci_channel_offline(ha->pdev))) {
- if (ha->pdev->error_state == pci_channel_io_frozen)
- cmd->result = DID_REQUEUE << 16;
- else
+ if (ha->flags.eeh_busy) {
+ if (ha->flags.pci_channel_io_perm_failure)
cmd->result = DID_NO_CONNECT << 16;
+ else
+ cmd->result = DID_REQUEUE << 16;
goto qc24_fail_command;
}
@@ -552,8 +552,15 @@ qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
#define ABORT_POLLING_PERIOD 1000
#define ABORT_WAIT_ITER ((10 * 1000) / (ABORT_POLLING_PERIOD))
unsigned long wait_iter = ABORT_WAIT_ITER;
+ scsi_qla_host_t *vha = shost_priv(cmd->device->host);
+ struct qla_hw_data *ha = vha->hw;
int ret = QLA_SUCCESS;
+ if (unlikely(pci_channel_offline(ha->pdev)) || ha->flags.eeh_busy) {
+ DEBUG17(qla_printk(KERN_WARNING, ha, "return:eh_wait\n"));
+ return ret;
+ }
+
while (CMD_SP(cmd) && wait_iter--) {
msleep(ABORT_POLLING_PERIOD);
}
@@ -1810,6 +1817,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set ISP-type information. */
qla2x00_set_isp_flags(ha);
+
+ /* Set EEH reset type to fundamental if required by hba */
+ if ( IS_QLA24XX(ha) || IS_QLA25XX(ha) || IS_QLA81XX(ha)) {
+ pdev->needs_freset = 1;
+ pci_save_state(pdev);
+ }
+
/* Configure PCI I/O space */
ret = qla2x00_iospace_config(ha);
if (ret)
@@ -2174,6 +2188,24 @@ qla2x00_free_device(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+
+ /* Disable timer */
+ if (vha->timer_active)
+ qla2x00_stop_timer(vha);
+
+ /* Kill the kernel thread for this host */
+ if (ha->dpc_thread) {
+ struct task_struct *t = ha->dpc_thread;
+
+ /*
+ * qla2xxx_wake_dpc checks for ->dpc_thread
+ * so we need to zero it out.
+ */
+ ha->dpc_thread = NULL;
+ kthread_stop(t);
+ }
+
qla25xx_delete_queues(vha);
if (ha->flags.fce_enabled)
@@ -2185,6 +2217,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
/* Stop currently executing firmware. */
qla2x00_try_to_stop_firmware(vha);
+ vha->flags.online = 0;
+
/* turn-off interrupts on the card */
if (ha->interrupts_on)
ha->isp_ops->disable_intrs(ha);
@@ -2859,6 +2893,13 @@ qla2x00_do_dpc(void *data)
if (!base_vha->flags.init_done)
continue;
+ if (ha->flags.eeh_busy) {
+ DEBUG17(qla_printk(KERN_WARNING, ha,
+ "qla2x00_do_dpc: dpc_flags: %lx\n",
+ base_vha->dpc_flags));
+ continue;
+ }
+
DEBUG3(printk("scsi(%ld): DPC handler\n", base_vha->host_no));
ha->dpc_active = 1;
@@ -3049,8 +3090,13 @@ qla2x00_timer(scsi_qla_host_t *vha)
int index;
srb_t *sp;
int t;
+ uint16_t w;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
+
+ /* Hardware read to raise pending EEH errors during mailbox waits. */
+ if (!pci_channel_offline(ha->pdev))
+ pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
/*
* Ports - Port down timer.
*
@@ -3252,16 +3298,23 @@ qla2x00_release_firmware(void)
static pci_ers_result_t
qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
- scsi_qla_host_t *base_vha = pci_get_drvdata(pdev);
+ scsi_qla_host_t *vha = pci_get_drvdata(pdev);
+ struct qla_hw_data *ha = vha->hw;
+
+ DEBUG2(qla_printk(KERN_WARNING, ha, "error_detected:state %x\n",
+ state));
switch (state) {
case pci_channel_io_normal:
+ ha->flags.eeh_busy = 0;
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
+ ha->flags.eeh_busy = 1;
pci_disable_device(pdev);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
- qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+ ha->flags.pci_channel_io_perm_failure = 1;
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
@@ -3312,6 +3365,8 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
struct qla_hw_data *ha = base_vha->hw;
int rc;
+ DEBUG17(qla_printk(KERN_WARNING, ha, "slot_reset\n"));
+
if (ha->mem_only)
rc = pci_enable_device_mem(pdev);
else
@@ -3320,19 +3375,33 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev)
if (rc) {
qla_printk(KERN_WARNING, ha,
"Can't re-enable PCI device after reset.\n");
-
return ret;
}
- pci_set_master(pdev);
if (ha->isp_ops->pci_config(base_vha))
return ret;
+#ifdef QL_DEBUG_LEVEL_17
+ {
+ uint8_t b;
+ uint32_t i;
+
+ printk("slot_reset_1: ");
+ for (i = 0; i < 256; i++) {
+ pci_read_config_byte(ha->pdev, i, &b);
+ printk("%s%02x", (i%16) ? " " : "\n", b);
+ }
+ printk("\n");
+ }
+#endif
set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
if (qla2x00_abort_isp(base_vha) == QLA_SUCCESS)
ret = PCI_ERS_RESULT_RECOVERED;
clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
+ DEBUG17(qla_printk(KERN_WARNING, ha,
+ "slot_reset-return:ret=%x\n", ret));
+
return ret;
}
@@ -3343,12 +3412,17 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
struct qla_hw_data *ha = base_vha->hw;
int ret;
+ DEBUG17(qla_printk(KERN_WARNING, ha, "pci_resume\n"));
+
ret = qla2x00_wait_for_hba_online(base_vha);
if (ret != QLA_SUCCESS) {
qla_printk(KERN_ERR, ha,
"the device failed to resume I/O "
"from slot/link_reset");
}
+
+ ha->flags.eeh_busy = 0;
+
pci_cleanup_aer_uncorrect_error_status(pdev);
}
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index c482220f7eed..a65dd95507c6 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.01-k8"
+#define QLA2XXX_VERSION "8.03.01-k9"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d8927681ec88..c6642423cc67 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
*/
req->next_rq->resid_len = scsi_in(cmd)->resid;
+ scsi_release_buffers(cmd);
blk_end_request_all(req, 0);
- scsi_release_buffers(cmd);
scsi_next_command(cmd);
return;
}
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 3058bb1aff95..fd7b15be7640 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -623,6 +623,11 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
}
break;
case INQUIRY:
+ if (lun >= host->max_lun) {
+ cmd->result = DID_NO_CONNECT << 16;
+ done(cmd);
+ return 0;
+ }
if (id != host->max_id - 1)
break;
if (!lun && !cmd->device->channel &&
diff --git a/drivers/serial/21285.c b/drivers/serial/21285.c
index 1e3d19397a59..8681f1345056 100644
--- a/drivers/serial/21285.c
+++ b/drivers/serial/21285.c
@@ -58,7 +58,7 @@ static const char serial21285_name[] = "Footbridge UART";
static void serial21285_stop_tx(struct uart_port *port)
{
if (tx_enabled(port)) {
- disable_irq(IRQ_CONTX);
+ disable_irq_nosync(IRQ_CONTX);
tx_enabled(port) = 0;
}
}
@@ -74,7 +74,7 @@ static void serial21285_start_tx(struct uart_port *port)
static void serial21285_stop_rx(struct uart_port *port)
{
if (rx_enabled(port)) {
- disable_irq(IRQ_CONRX);
+ disable_irq_nosync(IRQ_CONRX);
rx_enabled(port) = 0;
}
}
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index ed605cf05a42..c87f7bdbf3b1 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -1217,12 +1217,6 @@ static void autoconfig(struct uart_8250_port *up, unsigned int probeflags)
}
#endif
-#ifdef CONFIG_SERIAL_8250_AU1X00
- /* if access method is AU, it is a 16550 with a quirk */
- if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU)
- up->bugs |= UART_BUG_NOMSR;
-#endif
-
serial_outp(up, UART_LCR, save_lcr);
if (up->capabilities != uart_config[up->port.type].flags) {
@@ -1896,8 +1890,8 @@ static int serial8250_get_poll_char(struct uart_port *port)
struct uart_8250_port *up = (struct uart_8250_port *)port;
unsigned char lsr = serial_inp(up, UART_LSR);
- while (!(lsr & UART_LSR_DR))
- lsr = serial_inp(up, UART_LSR);
+ if (!(lsr & UART_LSR_DR))
+ return NO_POLL_CHAR;
return serial_inp(up, UART_RX);
}
@@ -2428,7 +2422,7 @@ serial8250_pm(struct uart_port *port, unsigned int state,
static unsigned int serial8250_port_size(struct uart_8250_port *pt)
{
if (pt->port.iotype == UPIO_AU)
- return 0x100000;
+ return 0x1000;
#ifdef CONFIG_ARCH_OMAP
if (is_omap_port(pt))
return 0x16 << pt->port.regshift;
@@ -2585,6 +2579,13 @@ static void serial8250_config_port(struct uart_port *port, int flags)
if (flags & UART_CONFIG_TYPE)
autoconfig(up, probeflags);
+
+#ifdef CONFIG_SERIAL_8250_AU1X00
+ /* if access method is AU, it is a 16550 with a quirk */
+ if (up->port.type == PORT_16550A && up->port.iotype == UPIO_AU)
+ up->bugs |= UART_BUG_NOMSR;
+#endif
+
if (up->port.type != PORT_UNKNOWN && flags & UART_CONFIG_IRQ)
autoconfig_irq(up);
diff --git a/drivers/serial/amba-pl011.c b/drivers/serial/amba-pl011.c
index ef7adc8135dd..b4111159386f 100644
--- a/drivers/serial/amba-pl011.c
+++ b/drivers/serial/amba-pl011.c
@@ -335,9 +335,9 @@ static int pl010_get_poll_char(struct uart_port *port)
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int status;
- do {
- status = readw(uap->port.membase + UART01x_FR);
- } while (status & UART01x_FR_RXFE);
+ status = readw(uap->port.membase + UART01x_FR);
+ if (status & UART01x_FR_RXFE)
+ return NO_POLL_CHAR;
return readw(uap->port.membase + UART01x_DR);
}
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index eadc1ab6bbce..05d16055423c 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -14,7 +14,10 @@
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/kgdb.h>
+#include <linux/kdb.h>
#include <linux/tty.h>
+#include <linux/console.h>
+#include <linux/input.h>
#define MAX_CONFIG_LEN 40
@@ -22,6 +25,8 @@ static struct kgdb_io kgdboc_io_ops;
/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
static int configured = -1;
+static int kgdboc_use_kbd; /* 1 if we use a keyboard */
+static int kgdboc_use_kms; /* 1 if we use kernel mode switching */
static char config[MAX_CONFIG_LEN];
static struct kparam_string kps = {
@@ -45,25 +50,85 @@ static int kgdboc_option_setup(char *opt)
__setup("kgdboc=", kgdboc_option_setup);
+static void cleanup_kgdboc(void)
+{
+#ifdef CONFIG_KDB_KEYBOARD
+ int i;
+
+ /* Unregister the keyboard poll hook, if registered */
+ for (i = 0; i < kdb_poll_idx; i++) {
+ if (kdb_poll_funcs[i] == kdb_get_kbd_char) {
+ kdb_poll_idx--;
+ kdb_poll_funcs[i] = kdb_poll_funcs[kdb_poll_idx];
+ kdb_poll_funcs[kdb_poll_idx] = NULL;
+ i--;
+ }
+ }
+#endif /* CONFIG_KDB_KEYBOARD */
+
+ if (configured == 1)
+ kgdb_unregister_io_module(&kgdboc_io_ops);
+}
+
static int configure_kgdboc(void)
{
struct tty_driver *p;
int tty_line = 0;
int err;
+ char *cptr = config;
+ struct console *cons;
err = kgdboc_option_setup(config);
if (err || !strlen(config) || isspace(config[0]))
goto noconfig;
err = -ENODEV;
+ kgdboc_io_ops.is_console = 0;
+ kgdboc_use_kbd = 0;
+
+ kgdboc_use_kms = 0;
+ if (strncmp(cptr, "kms,", 4) == 0) {
+ cptr += 4;
+ kgdboc_use_kms = 1;
+ }
+#ifdef CONFIG_KDB_KEYBOARD
+ kgdb_tty_driver = NULL;
+
+ if (strncmp(cptr, "kbd", 3) == 0) {
+ if (kdb_poll_idx < KDB_POLL_FUNC_MAX) {
+ kdb_poll_funcs[kdb_poll_idx] = kdb_get_kbd_char;
+ kdb_poll_idx++;
+ kgdboc_use_kbd = 1;
+ if (cptr[3] == ',')
+ cptr += 4;
+ else
+ goto do_register;
+ }
+ }
+#endif /* CONFIG_KDB_KEYBOARD */
- p = tty_find_polling_driver(config, &tty_line);
+ p = tty_find_polling_driver(cptr, &tty_line);
if (!p)
goto noconfig;
+ cons = console_drivers;
+ while (cons) {
+ int idx;
+ if (cons->device && cons->device(cons, &idx) == p &&
+ idx == tty_line) {
+ kgdboc_io_ops.is_console = 1;
+ break;
+ }
+ cons = cons->next;
+ }
+
kgdb_tty_driver = p;
kgdb_tty_line = tty_line;
+#ifdef CONFIG_KDB_KEYBOARD
+do_register:
+#endif /* CONFIG_KDB_KEYBOARD */
+
err = kgdb_register_io_module(&kgdboc_io_ops);
if (err)
goto noconfig;
@@ -75,6 +140,7 @@ static int configure_kgdboc(void)
noconfig:
config[0] = 0;
configured = 0;
+ cleanup_kgdboc();
return err;
}
@@ -88,20 +154,18 @@ static int __init init_kgdboc(void)
return configure_kgdboc();
}
-static void cleanup_kgdboc(void)
-{
- if (configured == 1)
- kgdb_unregister_io_module(&kgdboc_io_ops);
-}
-
static int kgdboc_get_char(void)
{
+ if (!kgdb_tty_driver)
+ return -1;
return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver,
kgdb_tty_line);
}
static void kgdboc_put_char(u8 chr)
{
+ if (!kgdb_tty_driver)
+ return;
kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver,
kgdb_tty_line, chr);
}
@@ -140,8 +204,19 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
return configure_kgdboc();
}
+static int dbg_restore_graphics;
+
static void kgdboc_pre_exp_handler(void)
{
+ if (!dbg_restore_graphics && kgdboc_use_kms && dbg_kms_console_core &&
+ dbg_kms_console_core->activate_console) {
+ if (dbg_kms_console_core->activate_console(dbg_kms_console_core)) {
+ printk(KERN_ERR "kgdboc: kernel mode switch error\n");
+ } else {
+ dbg_restore_graphics = 1;
+ dbg_pre_vt_hook();
+ }
+ }
/* Increment the module count when the debugger is active */
if (!kgdb_connected)
try_module_get(THIS_MODULE);
@@ -152,6 +227,21 @@ static void kgdboc_post_exp_handler(void)
/* decrement the module count when the debugger detaches */
if (!kgdb_connected)
module_put(THIS_MODULE);
+ if (kgdboc_use_kms && dbg_kms_console_core &&
+ dbg_kms_console_core->restore_console) {
+ if (dbg_restore_graphics) {
+ if (dbg_kms_console_core->restore_console(dbg_kms_console_core))
+ printk(KERN_ERR "kgdboc: graphics restore failed\n");
+ dbg_restore_graphics = 0;
+ dbg_post_vt_hook();
+ }
+ }
+
+#ifdef CONFIG_KDB_KEYBOARD
+ /* If using the kdb keyboard driver release all the keys. */
+ if (kgdboc_use_kbd)
+ input_dbg_clear_keys();
+#endif /* CONFIG_KDB_KEYBOARD */
}
static struct kgdb_io kgdboc_io_ops = {
@@ -162,6 +252,19 @@ static struct kgdb_io kgdboc_io_ops = {
.post_exception = kgdboc_post_exp_handler,
};
+#ifdef CONFIG_KGDB_SERIAL_CONSOLE
+/* This is only available if kgdboc is a built in for early debugging */
+void __init early_kgdboc_init(void)
+{
+ /* save the first character of the config string because the
+ * init routine can destroy it.
+ */
+ char save_ch = config[0];
+ init_kgdboc();
+ config[0] = save_ch;
+}
+#endif /* CONFIG_KGDB_SERIAL_CONSOLE */
+
module_init(init_kgdboc);
module_exit(cleanup_kgdboc);
module_param_call(kgdboc, param_set_kgdboc_var, param_get_string, &kps, 0644);
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index c0e660318c76..df854401af2d 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -820,6 +820,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */
+ PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC860", 0xd85f6206, 0x698f93db, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC860 3G Network Adapter R1 */
PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
@@ -828,7 +829,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"),
PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
- PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"),
+ PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"),
PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b),
PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83),
PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232 1.00.",0x19ca78af,0x69fb7490),
@@ -862,6 +863,18 @@ static struct pcmcia_device_id serial_ids[] = {
};
MODULE_DEVICE_TABLE(pcmcia, serial_ids);
+MODULE_FIRMWARE("cis/PCMLM28.cis");
+MODULE_FIRMWARE("cis/DP83903.cis");
+MODULE_FIRMWARE("cis/3CCFEM556.cis");
+MODULE_FIRMWARE("cis/3CXEM556.cis");
+MODULE_FIRMWARE("cis/SW_8xx_SER.cis");
+MODULE_FIRMWARE("cis/SW_7xx_SER.cis");
+MODULE_FIRMWARE("cis/SW_555_SER.cis");
+MODULE_FIRMWARE("cis/MT5634ZLX.cis");
+MODULE_FIRMWARE("cis/COMpad2.cis");
+MODULE_FIRMWARE("cis/COMpad4.cis");
+MODULE_FIRMWARE("cis/RS-COM-2P.cis");
+
static struct pcmcia_driver serial_cs_driver = {
.owner = THIS_MODULE,
.drv = {
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 37f0de9dd9ce..d29b3fefb0a8 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -126,7 +126,11 @@ static int sci_poll_get_char(struct uart_port *port)
handle_error(port);
continue;
}
- } while (!(status & SCxSR_RDxF(port)));
+ break;
+ } while (1);
+
+ if (!(status & SCxSR_RDxF(port)))
+ return NO_POLL_CHAR;
c = sci_in(port, SCxRDR);
diff --git a/drivers/serial/sunzilog.c b/drivers/serial/sunzilog.c
index 2c7a66af4f52..978b3cee02d7 100644
--- a/drivers/serial/sunzilog.c
+++ b/drivers/serial/sunzilog.c
@@ -102,6 +102,8 @@ struct uart_sunzilog_port {
#endif
};
+static void sunzilog_putchar(struct uart_port *port, int ch);
+
#define ZILOG_CHANNEL_FROM_PORT(PORT) ((struct zilog_channel __iomem *)((PORT)->membase))
#define UART_ZILOG(PORT) ((struct uart_sunzilog_port *)(PORT))
@@ -996,6 +998,50 @@ static int sunzilog_verify_port(struct uart_port *port, struct serial_struct *se
return -EINVAL;
}
+#ifdef CONFIG_CONSOLE_POLL
+static int sunzilog_get_poll_char(struct uart_port *port)
+{
+ unsigned char ch, r1;
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *) port;
+ struct zilog_channel __iomem *channel
+ = ZILOG_CHANNEL_FROM_PORT(&up->port);
+
+
+ r1 = read_zsreg(channel, R1);
+ if (r1 & (PAR_ERR | Rx_OVR | CRC_ERR)) {
+ writeb(ERR_RES, &channel->control);
+ ZSDELAY();
+ ZS_WSYNC(channel);
+ }
+
+ ch = readb(&channel->control);
+ ZSDELAY();
+
+ /* This funny hack depends upon BRK_ABRT not interfering
+ * with the other bits we care about in R1.
+ */
+ if (ch & BRK_ABRT)
+ r1 |= BRK_ABRT;
+
+ if (!(ch & Rx_CH_AV))
+ return NO_POLL_CHAR;
+
+ ch = readb(&channel->data);
+ ZSDELAY();
+
+ ch &= up->parity_mask;
+ return ch;
+}
+
+static void sunzilog_put_poll_char(struct uart_port *port,
+ unsigned char ch)
+{
+ struct uart_sunzilog_port *up = (struct uart_sunzilog_port *)port;
+
+ sunzilog_putchar(&up->port, ch);
+}
+#endif /* CONFIG_CONSOLE_POLL */
+
static struct uart_ops sunzilog_pops = {
.tx_empty = sunzilog_tx_empty,
.set_mctrl = sunzilog_set_mctrl,
@@ -1013,6 +1059,10 @@ static struct uart_ops sunzilog_pops = {
.request_port = sunzilog_request_port,
.config_port = sunzilog_config_port,
.verify_port = sunzilog_verify_port,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = sunzilog_get_poll_char,
+ .poll_put_char = sunzilog_put_poll_char,
+#endif
};
static int uart_chip_count;
diff --git a/drivers/spi/au1550_spi.c b/drivers/spi/au1550_spi.c
index cfd5ff9508fa..ba8ac4f599d3 100644
--- a/drivers/spi/au1550_spi.c
+++ b/drivers/spi/au1550_spi.c
@@ -412,11 +412,13 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
}
/* put buffers on the ring */
- res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, hw->rx, t->len);
+ res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx),
+ t->len, DDMA_FLAGS_IE);
if (!res)
dev_err(hw->dev, "rx dma put dest error\n");
- res = au1xxx_dbdma_put_source(hw->dma_tx_ch, (void *)hw->tx, t->len);
+ res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx),
+ t->len, DDMA_FLAGS_IE);
if (!res)
dev_err(hw->dev, "tx dma put source error\n");
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c
index 9f386379c169..1b47363cb73f 100644
--- a/drivers/spi/xilinx_spi.c
+++ b/drivers/spi/xilinx_spi.c
@@ -93,6 +93,26 @@ struct xilinx_spi {
void (*rx_fn) (struct xilinx_spi *);
};
+static void xspi_write32(u32 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+}
+
+static unsigned int xspi_read32(void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static void xspi_write32_be(u32 val, void __iomem *addr)
+{
+ iowrite32be(val, addr);
+}
+
+static unsigned int xspi_read32_be(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
static void xspi_tx8(struct xilinx_spi *xspi)
{
xspi->write_fn(*xspi->tx_ptr, xspi->regs + XSPI_TXD_OFFSET);
@@ -374,11 +394,11 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
xspi->mem = *mem;
xspi->irq = irq;
if (pdata->little_endian) {
- xspi->read_fn = ioread32;
- xspi->write_fn = iowrite32;
+ xspi->read_fn = xspi_read32;
+ xspi->write_fn = xspi_write32;
} else {
- xspi->read_fn = ioread32be;
- xspi->write_fn = iowrite32be;
+ xspi->read_fn = xspi_read32_be;
+ xspi->write_fn = xspi_write32_be;
}
xspi->bits_per_word = pdata->bits_per_word;
if (xspi->bits_per_word == 8) {
diff --git a/drivers/staging/tm6000/Kconfig b/drivers/staging/tm6000/Kconfig
new file mode 100644
index 000000000000..cb2011536363
--- /dev/null
+++ b/drivers/staging/tm6000/Kconfig
@@ -0,0 +1,32 @@
+config VIDEO_TM6000
+ tristate "TV Master TM5600/6000/6010 driver"
+ depends on VIDEO_DEV && I2C && INPUT && EXPERIMENTAL
+ select VIDEO_TUNER
+ select TUNER_XC2028
+ select VIDEOBUF_VMALLOC
+ help
+ Support for TM5600/TM6000/TM6010 USB Device
+
+ Since these cards have no MPEG decoder onboard, they transmit
+ only compressed MPEG data over the usb bus, so you need
+ an external software decoder to watch TV on your computer.
+
+ Say Y if you own such a device and want to use it.
+
+config VIDEO_TM6000_ALSA
+ tristate "TV Master TM5600/6000/6010 audio support"
+ depends on VIDEO_TM6000 && SND && EXPERIMENTAL
+ select SND_PCM
+ ---help---
+ This is a video4linux driver for direct (DMA) audio for
+ TM5600/TM6000/TM6010 USB Devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tm6000-alsa.
+
+config VIDEO_TM6000_DVB
+ bool "DVB Support for tm6000 based TV cards"
+ depends on VIDEO_TM6000 && DVB_CORE && EXPERIMENTAL
+ select DVB_ZL10353
+ ---help---
+ This adds support for DVB cards based on the tm5600/tm6000 chip.
diff --git a/drivers/staging/tm6000/Makefile b/drivers/staging/tm6000/Makefile
new file mode 100644
index 000000000000..25aefe74dddc
--- /dev/null
+++ b/drivers/staging/tm6000/Makefile
@@ -0,0 +1,15 @@
+tm6000-objs := tm6000-cards.o \
+ tm6000-core.o \
+ tm6000-i2c.o \
+ tm6000-video.o \
+ tm6000-stds.o
+
+ifeq ($(CONFIG_VIDEO_TM6000_DVB),y)
+tm6000-objs += tm6000-dvb.o \
+ hack.o
+endif
+
+obj-$(CONFIG_VIDEO_TM6000) += tm6000.o
+obj-$(CONFIG_VIDEO_TM6000_ALSA) += tm6000-alsa.o
+
+EXTRA_CFLAGS = -Idrivers/media/video
diff --git a/drivers/staging/tm6000/README b/drivers/staging/tm6000/README
new file mode 100644
index 000000000000..cff09b7b477e
--- /dev/null
+++ b/drivers/staging/tm6000/README
@@ -0,0 +1,11 @@
+Todo:
+ - checkpatch.pl cleanups
+ - sparse cleanups
+ - convert to new i2c approach
+ - better support DVB
+ - fix reading from i2c, if possible
+ - fix loosing frames
+ - fix oops?
+
+Please send patches to linux-media@vger.kernel.org
+
diff --git a/drivers/staging/tm6000/hack.c b/drivers/staging/tm6000/hack.c
new file mode 100644
index 000000000000..f181fce6716b
--- /dev/null
+++ b/drivers/staging/tm6000/hack.c
@@ -0,0 +1,252 @@
+
+
+
+
+
+
+/*
+ hack.h - hackish code that needs to be improved (or removed) at a
+ later point
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "hack.h"
+
+#include "tm6000.h"
+
+#include <linux/usb.h>
+
+static inline int tm6000_snd_control_msg(struct tm6000_core *dev, __u8 request, __u16 value, __u16 index, void *data, __u16 size)
+{
+ return tm6000_read_write_usb (dev, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, request, value, index, data, size);
+}
+
+static int pseudo_zl10353_pll(struct tm6000_core *tm6000_dev, struct dvb_frontend_parameters *p)
+{
+ int ret;
+ u8 *data = kzalloc(50*sizeof(u8), GFP_KERNEL);
+
+printk(KERN_ALERT "should set frequency %u\n", p->frequency);
+printk(KERN_ALERT "and bandwith %u\n", p->u.ofdm.bandwidth);
+
+ if(tm6000_dev->dvb->frontend->ops.tuner_ops.set_params) {
+ tm6000_dev->dvb->frontend->ops.tuner_ops.set_params(tm6000_dev->dvb->frontend, p);
+ }
+ else {
+ printk(KERN_ALERT "pseudo zl10353: couldn't set tuner parameters\n");
+ }
+
+ // init ZL10353
+ data[0] = 0x0b;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x501e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x80;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x551e, 0x00, data, 0x1);
+ msleep(100);
+ data[0] = 0x01;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0xea1e, 0x00, data, 0x1);
+ msleep(100);
+ data[0] = 0x00;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0xea1e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x1c;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x561e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x40;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x5e1e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x36;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x641e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x67;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x651e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0xe5;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x661e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x19;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x6c1e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0xe9;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x6d1e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x44;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x511e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x46;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x521e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x15;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x531e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x0f;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x541e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x75;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x5c1e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x01;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x701e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x00;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x701e, 0x00, data, 0x1);
+ msleep(15);
+
+ msleep(50);
+
+ switch(p->u.ofdm.bandwidth) {
+ case BANDWIDTH_8_MHZ:
+ data[0] = 0x00;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x701e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x36;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x641e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x67;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x651e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0xe5;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x661e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x19;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x6c1e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0xe9;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x6d1e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x44;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x511e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x46;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x521e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x15;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x531e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x0f;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x541e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x75;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x5c1e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x01;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x701e, 0x00, data, 0x1);
+ msleep(15);
+ break;
+
+ default:
+ printk(KERN_ALERT "tm6000: bandwidth not supported\n");
+ case BANDWIDTH_7_MHZ:
+ data[0] = 0x00;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x701e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x35;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x641e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x5a;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x651e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0xe9;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x661e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x19;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x6c1e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0xe9;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x6d1e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x44;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x511e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x46;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x521e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x15;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x531e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x0f;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x541e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x86;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x5c1e, 0x00, data, 0x1);
+ msleep(15);
+ data[0] = 0x01;
+ ret = tm6000_snd_control_msg(tm6000_dev, 0x10, 0x701e, 0x00, data, 0x1);
+ msleep(15);
+ break;
+ }
+
+ kfree(data);
+
+ return 0;
+};
+
+
+
+int pseudo_zl10353_set_frontend(struct dvb_frontend *fe,
+ struct dvb_frontend_parameters *p)
+{
+ struct tm6000_core *tm6000_dev = fe->dvb->priv;
+ u32 status;
+
+ if(p != NULL) {
+// mutex_lock(&tm6000_dev->mutex);
+ pseudo_zl10353_pll(tm6000_dev, p);
+// mutex_unlock(&tm6000_dev->mutex);
+ }
+
+ if(tm6000_dev->dvb->frontend->ops.read_status) {
+ tm6000_dev->dvb->frontend->ops.read_status(tm6000_dev->dvb->frontend, &status);
+ printk(KERN_ALERT "demodulator status: FE_HAS_CARRIER %i \n", (status & FE_HAS_CARRIER));
+ printk(KERN_ALERT "demodulator status: FE_HAS_VITERBI %i \n", (status & FE_HAS_VITERBI));
+ printk(KERN_ALERT "demodulator status: FE_HAS_LOCK %i \n", (status & FE_HAS_LOCK));
+ printk(KERN_ALERT "demodulator status: FE_HAS_SYNC %i \n", (status & FE_HAS_SYNC));
+ printk(KERN_ALERT "demodulator status: FE_HAS_SIGNAL %i \n", (status & FE_HAS_SIGNAL));
+ }
+ else {
+ printk(KERN_ALERT "pseudo zl10353: couldn't read demodulator status\n");
+ }
+ return 0;
+}
+
+int pseudo_zl10353_read_status(struct dvb_frontend *fe, fe_status_t *status)
+{
+
+ *status = FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_SYNC | FE_HAS_LOCK | FE_HAS_SIGNAL;
+
+ return 0;
+}
+
+struct dvb_frontend* pseudo_zl10353_attach(struct tm6000_core *dev,
+ const struct zl10353_config *config,
+ struct i2c_adapter *i2c)
+{
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ dvb->frontend = dvb_attach(zl10353_attach, config, i2c);
+ if(!dvb->frontend) {
+ printk(KERN_ERR "Error during zl10353_attach!\n");
+ return NULL;
+ }
+
+ /* override some functions with our implementations */
+ dvb->frontend->ops.set_frontend = pseudo_zl10353_set_frontend;
+ dvb->frontend->ops.read_status = pseudo_zl10353_read_status;
+ dvb->frontend->frontend_priv = dev;
+
+ return dvb->frontend;
+}
diff --git a/drivers/staging/tm6000/hack.h b/drivers/staging/tm6000/hack.h
new file mode 100644
index 000000000000..96f1b61df682
--- /dev/null
+++ b/drivers/staging/tm6000/hack.h
@@ -0,0 +1,45 @@
+/*
+ hack.h - hackish code that needs to be improved (or removed) at a
+ later point
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef HACK_H
+#define HACK_H
+
+#include <linux/i2c.h>
+
+#include "zl10353.h"
+#include "dvb_frontend.h"
+
+struct tm6000_core;
+
+int pseudo_zl103530_init(struct dvb_frontend *fe);
+
+int pseudo_zl10353_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p);
+
+int pseudo_zl10353_read_status(struct dvb_frontend *fe, fe_status_t *status);
+
+int pseudo_zl10353_read_signal_strength(struct dvb_frontend* fe, u16* strength);
+
+int pseudo_zl10353_read_snr(struct dvb_frontend *fe, u16 *snr);
+
+struct dvb_frontend* pseudo_zl10353_attach(struct tm6000_core *dev,
+ const struct zl10353_config *config,
+ struct i2c_adapter *i2c);
+
+#endif
diff --git a/drivers/staging/tm6000/tm6000-alsa.c b/drivers/staging/tm6000/tm6000-alsa.c
new file mode 100644
index 000000000000..cf24340e0874
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-alsa.c
@@ -0,0 +1,413 @@
+/*
+ *
+ * Support for audio capture for tm5600/6000
+ * (c) 2007-2008 Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Based on cx88-alsa.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+
+#include <asm/delay.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/control.h>
+#include <sound/initval.h>
+
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+
+#undef dprintk
+
+#define dprintk(level, fmt, arg...) do { \
+ if (debug >= level) \
+ printk(KERN_INFO "%s/1: " fmt, chip->core->name , ## arg); \
+ } while (0)
+
+/****************************************************************************
+ Data type declarations - Can be moded to a header file later
+ ****************************************************************************/
+
+struct snd_tm6000_card {
+ struct snd_card *card;
+
+ spinlock_t reg_lock;
+
+ atomic_t count;
+
+ unsigned int period_size;
+ unsigned int num_periods;
+
+ struct tm6000_core *core;
+ struct tm6000_buffer *buf;
+
+ int bufsize;
+
+ struct snd_pcm_substream *substream;
+};
+
+
+/****************************************************************************
+ Module global static vars
+ ****************************************************************************/
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
+
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(enable, "Enable tm6000x soundcard. default enabled.");
+
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for tm6000x capture interface(s).");
+
+
+/****************************************************************************
+ Module macros
+ ****************************************************************************/
+
+MODULE_DESCRIPTION("ALSA driver module for tm5600/tm6000 based TV cards");
+MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_SUPPORTED_DEVICE("{{Trident,tm5600},"
+ "{{Trident,tm6000}");
+static unsigned int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "enable debug messages");
+
+/****************************************************************************
+ Module specific funtions
+ ****************************************************************************/
+
+/*
+ * BOARD Specific: Sets audio DMA
+ */
+
+static int _tm6000_start_audio_dma(struct snd_tm6000_card *chip)
+{
+ struct tm6000_core *core = chip->core;
+ int val;
+
+ /* Enables audio */
+ val = tm6000_get_reg(core, REQ_07_SET_GET_AVREG, 0xcc, 0x0);
+ val |= 0x20;
+ tm6000_set_reg(core, REQ_07_SET_GET_AVREG, 0xcc, val);
+
+ tm6000_set_reg(core, REQ_08_SET_GET_AVREG_BIT, 0x01, 0x80);
+
+ return 0;
+}
+
+/*
+ * BOARD Specific: Resets audio DMA
+ */
+static int _tm6000_stop_audio_dma(struct snd_tm6000_card *chip)
+{
+ struct tm6000_core *core = chip->core;
+ int val;
+ dprintk(1, "Stopping audio DMA\n");
+
+ /* Enables audio */
+ val = tm6000_get_reg(core, REQ_07_SET_GET_AVREG, 0xcc, 0x0);
+ val &= ~0x20;
+ tm6000_set_reg(core, REQ_07_SET_GET_AVREG, 0xcc, val);
+
+ tm6000_set_reg(core, REQ_08_SET_GET_AVREG_BIT, 0x01, 0);
+
+ return 0;
+}
+
+static int dsp_buffer_free(struct snd_tm6000_card *chip)
+{
+ BUG_ON(!chip->bufsize);
+
+ dprintk(2, "Freeing buffer\n");
+
+ /* FIXME: Frees buffer */
+
+ chip->bufsize = 0;
+
+ return 0;
+}
+
+/****************************************************************************
+ ALSA PCM Interface
+ ****************************************************************************/
+
+/*
+ * Digital hardware definition
+ */
+#define DEFAULT_FIFO_SIZE 4096
+
+static struct snd_pcm_hardware snd_tm6000_digital_hw = {
+ .info = SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+
+ .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
+ .rate_min = 44100,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .period_bytes_min = DEFAULT_FIFO_SIZE/4,
+ .period_bytes_max = DEFAULT_FIFO_SIZE/4,
+ .periods_min = 1,
+ .periods_max = 1024,
+ .buffer_bytes_max = (1024*1024),
+};
+
+/*
+ * audio pcm capture open callback
+ */
+static int snd_tm6000_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int err;
+
+ err = snd_pcm_hw_constraint_pow2(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (err < 0)
+ goto _error;
+
+ chip->substream = substream;
+
+ runtime->hw = snd_tm6000_digital_hw;
+
+ return 0;
+_error:
+ dprintk(1, "Error opening PCM!\n");
+ return err;
+}
+
+/*
+ * audio close callback
+ */
+static int snd_tm6000_close(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+/*
+ * hw_params callback
+ */
+static int snd_tm6000_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+
+ if (substream->runtime->dma_area) {
+ dsp_buffer_free(chip);
+ substream->runtime->dma_area = NULL;
+ }
+
+ chip->period_size = params_period_bytes(hw_params);
+ chip->num_periods = params_periods(hw_params);
+ chip->bufsize = chip->period_size * params_periods(hw_params);
+
+ BUG_ON(!chip->bufsize);
+
+ dprintk(1, "Setting buffer\n");
+
+ /* FIXME: Allocate buffer for audio */
+
+
+ return 0;
+}
+
+/*
+ * hw free callback
+ */
+static int snd_tm6000_hw_free(struct snd_pcm_substream *substream)
+{
+
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+
+ if (substream->runtime->dma_area) {
+ dsp_buffer_free(chip);
+ substream->runtime->dma_area = NULL;
+ }
+
+ return 0;
+}
+
+/*
+ * prepare callback
+ */
+static int snd_tm6000_prepare(struct snd_pcm_substream *substream)
+{
+ return 0;
+}
+
+
+/*
+ * trigger callback
+ */
+static int snd_tm6000_card_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+ int err;
+
+ spin_lock(&chip->reg_lock);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ err = _tm6000_start_audio_dma(chip);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ err = _tm6000_stop_audio_dma(chip);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ spin_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+/*
+ * pointer callback
+ */
+static snd_pcm_uframes_t snd_tm6000_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_tm6000_card *chip = snd_pcm_substream_chip(substream);
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ u16 count;
+
+ count = atomic_read(&chip->count);
+
+ return runtime->period_size * (count & (runtime->periods-1));
+}
+
+/*
+ * operators
+ */
+static struct snd_pcm_ops snd_tm6000_pcm_ops = {
+ .open = snd_tm6000_pcm_open,
+ .close = snd_tm6000_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = snd_tm6000_hw_params,
+ .hw_free = snd_tm6000_hw_free,
+ .prepare = snd_tm6000_prepare,
+ .trigger = snd_tm6000_card_trigger,
+ .pointer = snd_tm6000_pointer,
+};
+
+/*
+ * create a PCM device
+ */
+static int __devinit snd_tm6000_pcm(struct snd_tm6000_card *chip,
+ int device, char *name)
+{
+ int err;
+ struct snd_pcm *pcm;
+
+ err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm);
+ if (err < 0)
+ return err;
+ pcm->private_data = chip;
+ strcpy(pcm->name, name);
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_tm6000_pcm_ops);
+
+ return 0;
+}
+
+/* FIXME: Control interface - How to control volume/mute? */
+
+/****************************************************************************
+ Basic Flow for Sound Devices
+ ****************************************************************************/
+
+/*
+ * Alsa Constructor - Component probe
+ */
+
+int tm6000_audio_init(struct tm6000_core *dev, int idx)
+{
+ struct snd_card *card;
+ struct snd_tm6000_card *chip;
+ int rc, len;
+ char component[14];
+
+ if (idx >= SNDRV_CARDS)
+ return -ENODEV;
+
+ if (!enable[idx])
+ return -ENOENT;
+
+ rc = snd_card_create(index[idx], id[idx], THIS_MODULE, 0, &card);
+ if (rc < 0) {
+ snd_printk(KERN_ERR "cannot create card instance %d\n", idx);
+ return rc;
+ }
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ chip->core = dev;
+ chip->card = card;
+
+ strcpy(card->driver, "tm6000-alsa");
+ sprintf(component, "USB%04x:%04x",
+ le16_to_cpu(dev->udev->descriptor.idVendor),
+ le16_to_cpu(dev->udev->descriptor.idProduct));
+ snd_component_add(card, component);
+
+ if (dev->udev->descriptor.iManufacturer)
+ len = usb_string(dev->udev,
+ dev->udev->descriptor.iManufacturer,
+ card->longname, sizeof(card->longname));
+ else
+ len = 0;
+
+ if (len > 0)
+ strlcat(card->longname, " ", sizeof(card->longname));
+
+ strlcat(card->longname, card->shortname, sizeof(card->longname));
+
+ len = strlcat(card->longname, " at ", sizeof(card->longname));
+
+ if (len < sizeof(card->longname))
+ usb_make_path(dev->udev, card->longname + len,
+ sizeof(card->longname) - len);
+
+ strlcat(card->longname,
+ dev->udev->speed == USB_SPEED_LOW ? ", low speed" :
+ dev->udev->speed == USB_SPEED_FULL ? ", full speed" :
+ ", high speed",
+ sizeof(card->longname));
+
+ rc = snd_tm6000_pcm(chip, 0, "tm6000 Digital");
+ if (rc < 0)
+ goto error;
+
+ rc = snd_card_register(card);
+ if (rc < 0)
+ goto error;
+
+
+ return 0;
+
+error:
+ snd_card_free(card);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tm6000_audio_init);
+
diff --git a/drivers/staging/tm6000/tm6000-cards.c b/drivers/staging/tm6000/tm6000-cards.c
new file mode 100644
index 000000000000..59fb505663e4
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-cards.c
@@ -0,0 +1,662 @@
+/*
+ tm6000-cards.c - driver for TM5600/TM6000 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/usb.h>
+#include <linux/version.h>
+#include <media/v4l2-common.h>
+#include <media/tuner.h>
+#include <media/tvaudio.h>
+#include <media/i2c-addr.h>
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+#include "tuner-xc2028.h"
+#include "tuner-xc5000.h"
+
+#define TM6000_BOARD_UNKNOWN 0
+#define TM5600_BOARD_GENERIC 1
+#define TM6000_BOARD_GENERIC 2
+#define TM6010_BOARD_GENERIC 3
+#define TM5600_BOARD_10MOONS_UT821 4
+#define TM5600_BOARD_10MOONS_UT330 5
+#define TM6000_BOARD_ADSTECH_DUAL_TV 6
+#define TM6000_BOARD_FREECOM_AND_SIMILAR 7
+#define TM6000_BOARD_ADSTECH_MINI_DUAL_TV 8
+#define TM6010_BOARD_HAUPPAUGE_900H 9
+
+#define TM6000_MAXBOARDS 16
+static unsigned int card[] = {[0 ... (TM6000_MAXBOARDS - 1)] = UNSET };
+
+module_param_array(card, int, NULL, 0444);
+
+static unsigned long tm6000_devused;
+
+
+struct tm6000_board {
+ char *name;
+
+ struct tm6000_capabilities caps;
+
+ enum tm6000_devtype type; /* variant of the chipset */
+ int tuner_type; /* type of the tuner */
+ int tuner_addr; /* tuner address */
+ int demod_addr; /* demodulator address */
+ int gpio_addr_tun_reset; /* GPIO used for tuner reset */
+};
+
+struct tm6000_board tm6000_boards[] = {
+ [TM6000_BOARD_UNKNOWN] = {
+ .name = "Unknown tm6000 video grabber",
+ .caps = {
+ .has_tuner = 1,
+ },
+ .gpio_addr_tun_reset = TM6000_GPIO_1,
+ },
+ [TM5600_BOARD_GENERIC] = {
+ .name = "Generic tm5600 board",
+ .type = TM5600,
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc2 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ },
+ .gpio_addr_tun_reset = TM6000_GPIO_1,
+ },
+ [TM6000_BOARD_GENERIC] = {
+ .name = "Generic tm6000 board",
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc2 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ },
+ .gpio_addr_tun_reset = TM6000_GPIO_1,
+ },
+ [TM6010_BOARD_GENERIC] = {
+ .name = "Generic tm6010 board",
+ .type = TM6010,
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc2 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ },
+ .gpio_addr_tun_reset = TM6010_GPIO_4,
+ },
+ [TM5600_BOARD_10MOONS_UT821] = {
+ .name = "10Moons UT 821",
+ .tuner_type = TUNER_XC2028,
+ .type = TM5600,
+ .tuner_addr = 0xc2 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_eeprom = 1,
+ },
+ .gpio_addr_tun_reset = TM6000_GPIO_1,
+ },
+ [TM5600_BOARD_10MOONS_UT330] = {
+ .name = "10Moons UT 330",
+ .tuner_type = TUNER_PHILIPS_FQ1216AME_MK4,
+ .tuner_addr = 0xc8 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 0,
+ .has_zl10353 = 0,
+ .has_eeprom = 1,
+ },
+ },
+ [TM6000_BOARD_ADSTECH_DUAL_TV] = {
+ .name = "ADSTECH Dual TV USB",
+ .tuner_type = TUNER_XC2028,
+ .tuner_addr = 0xc8 >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_tda9874 = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ },
+ },
+ [TM6000_BOARD_FREECOM_AND_SIMILAR] = {
+ .name = "Freecom Hybrid Stick / Moka DVB-T Receiver Dual",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 0,
+ .has_remote = 1,
+ },
+ .gpio_addr_tun_reset = TM6000_GPIO_4,
+ },
+ [TM6000_BOARD_ADSTECH_MINI_DUAL_TV] = {
+ .name = "ADSTECH Mini Dual TV USB",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc8 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 0,
+ },
+ .gpio_addr_tun_reset = TM6000_GPIO_4,
+ },
+ [TM6010_BOARD_HAUPPAUGE_900H] = {
+ .name = "Hauppauge HVR-900H",
+ .tuner_type = TUNER_XC2028, /* has a XC3028 */
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ },
+ .gpio_addr_tun_reset = TM6000_GPIO_2,
+ },
+ [TM6010_BOARD_BEHOLD_WANDER] = {
+ .name = "Beholder Wander DVB-T/TV/FM USB2.0",
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0xc2 >> 1,
+ .demod_addr = 0x1e >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 1,
+ .has_zl10353 = 1,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio_addr_tun_reset = TM6000_GPIO_2,
+ },
+ [TM6010_BOARD_BEHOLD_VOYAGER] = {
+ .name = "Beholder Voyager TV/FM USB2.0",
+ .tuner_type = TUNER_XC5000,
+ .tuner_addr = 0xc2 >> 1,
+ .type = TM6010,
+ .caps = {
+ .has_tuner = 1,
+ .has_dvb = 0,
+ .has_zl10353 = 0,
+ .has_eeprom = 1,
+ .has_remote = 1,
+ },
+ .gpio_addr_tun_reset = TM6000_GPIO_2,
+ },
+
+};
+
+/* table of devices that work with this driver */
+struct usb_device_id tm6000_id_table [] = {
+ { USB_DEVICE(0x6000, 0x0001), .driver_info = TM5600_BOARD_10MOONS_UT821 },
+ { USB_DEVICE(0x6000, 0x0002), .driver_info = TM6010_BOARD_GENERIC },
+ { USB_DEVICE(0x06e1, 0xf332), .driver_info = TM6000_BOARD_ADSTECH_DUAL_TV },
+ { USB_DEVICE(0x14aa, 0x0620), .driver_info = TM6000_BOARD_FREECOM_AND_SIMILAR },
+ { USB_DEVICE(0x06e1, 0xb339), .driver_info = TM6000_BOARD_ADSTECH_MINI_DUAL_TV },
+ { USB_DEVICE(0x2040, 0x6600), .driver_info = TM6010_BOARD_HAUPPAUGE_900H },
+ { USB_DEVICE(0x6000, 0xdec0), .driver_info = TM6010_BOARD_BEHOLD_WANDER },
+ { USB_DEVICE(0x6000, 0xdec1), .driver_info = TM6010_BOARD_BEHOLD_VOYAGER },
+ { },
+};
+
+/* Tuner callback to provide the proper gpio changes needed for xc2028 */
+
+static int tm6000_tuner_callback(void *ptr, int component, int command, int arg)
+{
+ int rc=0;
+ struct tm6000_core *dev = ptr;
+
+ if (dev->tuner_type!=TUNER_XC2028)
+ return 0;
+
+ switch (command) {
+ case XC2028_RESET_CLK:
+ tm6000_set_reg (dev, REQ_04_EN_DISABLE_MCU_INT,
+ 0x02, arg);
+ msleep(10);
+ rc=tm6000_set_reg (dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 0);
+ if (rc<0)
+ return rc;
+ msleep(10);
+ rc=tm6000_set_reg (dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 1);
+ break;
+ case XC2028_TUNER_RESET:
+ /* Reset codes during load firmware */
+ switch (arg) {
+ case 0:
+ tm6000_set_reg (dev, REQ_03_SET_GET_MCU_PIN,
+ dev->tuner_reset_gpio, 0x00);
+ msleep(130);
+ tm6000_set_reg (dev, REQ_03_SET_GET_MCU_PIN,
+ dev->tuner_reset_gpio, 0x01);
+ msleep(130);
+ break;
+ case 1:
+ tm6000_set_reg (dev, REQ_04_EN_DISABLE_MCU_INT,
+ 0x02, 0x01);
+ msleep(10);
+ break;
+
+ case 2:
+ rc=tm6000_set_reg (dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 0);
+ if (rc<0)
+ return rc;
+ msleep(100);
+ rc=tm6000_set_reg (dev, REQ_03_SET_GET_MCU_PIN,
+ TM6000_GPIO_CLK, 1);
+ msleep(100);
+ break;
+ }
+ }
+ return (rc);
+}
+
+static void tm6000_config_tuner (struct tm6000_core *dev)
+{
+ struct tuner_setup tun_setup;
+
+ /* Load tuner module */
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
+ "tuner", "tuner",dev->tuner_addr, NULL);
+
+ memset(&tun_setup, 0, sizeof(tun_setup));
+ tun_setup.type = dev->tuner_type;
+ tun_setup.addr = dev->tuner_addr;
+ tun_setup.mode_mask = T_ANALOG_TV | T_RADIO;
+ tun_setup.tuner_callback = tm6000_tuner_callback;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup);
+
+ if (dev->tuner_type == TUNER_XC2028) {
+ struct v4l2_priv_tun_config xc2028_cfg;
+ struct xc2028_ctrl ctl;
+
+ memset(&xc2028_cfg, 0, sizeof(xc2028_cfg));
+ memset (&ctl,0,sizeof(ctl));
+
+ ctl.mts = 1;
+ ctl.read_not_reliable = 1;
+ ctl.msleep = 10;
+
+ xc2028_cfg.tuner = TUNER_XC2028;
+ xc2028_cfg.priv = &ctl;
+
+ switch(dev->model) {
+ case TM6010_BOARD_HAUPPAUGE_900H:
+ ctl.fname = "xc3028L-v36.fw";
+ break;
+ default:
+ if (dev->dev_type == TM6010)
+ ctl.fname = "xc3028-v27.fw";
+ else
+ ctl.fname = "tm6000-xc3028.fw";
+ }
+
+ printk(KERN_INFO "Setting firmware parameters for xc2028\n");
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config,
+ &xc2028_cfg);
+ }
+}
+
+static int tm6000_init_dev(struct tm6000_core *dev)
+{
+ struct v4l2_frequency f;
+ int rc = 0;
+
+ mutex_init(&dev->lock);
+
+ mutex_lock(&dev->lock);
+
+ /* Initializa board-specific data */
+ dev->dev_type = tm6000_boards[dev->model].type;
+ dev->tuner_type = tm6000_boards[dev->model].tuner_type;
+ dev->tuner_addr = tm6000_boards[dev->model].tuner_addr;
+ dev->tuner_reset_gpio = tm6000_boards[dev->model].gpio_addr_tun_reset;
+
+ dev->demod_addr = tm6000_boards[dev->model].demod_addr;
+
+ dev->caps = tm6000_boards[dev->model].caps;
+
+ /* initialize hardware */
+ rc=tm6000_init (dev);
+ if (rc<0)
+ goto err;
+
+ rc = v4l2_device_register(&dev->udev->dev, &dev->v4l2_dev);
+ if (rc < 0)
+ goto err;
+
+ /* register i2c bus */
+ rc=tm6000_i2c_register(dev);
+ if (rc<0)
+ goto err;
+
+ /* register and initialize V4L2 */
+ rc=tm6000_v4l2_register(dev);
+ if (rc<0)
+ goto err;
+
+ /* Default values for STD and resolutions */
+ dev->width = 720;
+ dev->height = 480;
+ dev->norm = V4L2_STD_PAL_M;
+
+ /* Configure tuner */
+ tm6000_config_tuner (dev);
+
+ /* Set video standard */
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
+
+ /* Set tuner frequency - also loads firmware on xc2028/xc3028 */
+ f.tuner = 0;
+ f.type = V4L2_TUNER_ANALOG_TV;
+ f.frequency = 3092; /* 193.25 MHz */
+ dev->freq = f.frequency;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
+
+ if (dev->caps.has_tda9874)
+ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
+ "tvaudio", "tvaudio", I2C_ADDR_TDA9874, NULL);
+
+ if(dev->caps.has_dvb) {
+ dev->dvb = kzalloc(sizeof(*(dev->dvb)), GFP_KERNEL);
+ if(!dev->dvb) {
+ rc = -ENOMEM;
+ goto err2;
+ }
+#ifdef CONFIG_VIDEO_TM6000_DVB
+ rc = tm6000_dvb_register(dev);
+ if(rc < 0) {
+ kfree(dev->dvb);
+ dev->dvb = NULL;
+ goto err2;
+ }
+#endif
+ }
+
+err2:
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+err:
+ mutex_unlock(&dev->lock);
+ return rc;
+}
+
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+
+static void get_max_endpoint ( struct usb_device *usbdev,
+ char *msgtype,
+ struct usb_host_endpoint *curr_e,
+ unsigned int *maxsize,
+ struct usb_host_endpoint **ep )
+{
+ u16 tmp = le16_to_cpu(curr_e->desc.wMaxPacketSize);
+ unsigned int size = tmp & 0x7ff;
+
+ if (usbdev->speed == USB_SPEED_HIGH)
+ size = size * hb_mult (tmp);
+
+ if (size>*maxsize) {
+ *ep = curr_e;
+ *maxsize = size;
+ printk("tm6000: %s endpoint: 0x%02x (max size=%u bytes)\n",
+ msgtype, curr_e->desc.bEndpointAddress,
+ size);
+ }
+}
+
+/*
+ * tm6000_usb_probe()
+ * checks for supported devices
+ */
+static int tm6000_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usbdev;
+ struct tm6000_core *dev = NULL;
+ int i,rc=0;
+ int nr=0;
+ char *speed;
+
+
+ usbdev=usb_get_dev(interface_to_usbdev(interface));
+
+ /* Selects the proper interface */
+ rc=usb_set_interface(usbdev,0,1);
+ if (rc<0)
+ goto err;
+
+ /* Check to see next free device and mark as used */
+ nr=find_first_zero_bit(&tm6000_devused,TM6000_MAXBOARDS);
+ if (nr >= TM6000_MAXBOARDS) {
+ printk ("tm6000: Supports only %i em28xx boards.\n",TM6000_MAXBOARDS);
+ usb_put_dev(usbdev);
+ return -ENOMEM;
+ }
+
+ /* Create and initialize dev struct */
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev == NULL) {
+ printk ("tm6000" ": out of memory!\n");
+ usb_put_dev(usbdev);
+ return -ENOMEM;
+ }
+ spin_lock_init(&dev->slock);
+
+ /* Increment usage count */
+ tm6000_devused|=1<<nr;
+ snprintf(dev->name, 29, "tm6000 #%d", nr);
+
+ dev->model=id->driver_info;
+ if ((card[nr]>=0) && (card[nr]<ARRAY_SIZE(tm6000_boards))) {
+ dev->model=card[nr];
+ }
+
+ dev->udev= usbdev;
+ dev->devno=nr;
+
+ switch (usbdev->speed) {
+ case USB_SPEED_LOW:
+ speed = "1.5";
+ break;
+ case USB_SPEED_UNKNOWN:
+ case USB_SPEED_FULL:
+ speed = "12";
+ break;
+ case USB_SPEED_HIGH:
+ speed = "480";
+ break;
+ default:
+ speed = "unknown";
+ }
+
+
+
+ /* Get endpoints */
+ for (i = 0; i < interface->num_altsetting; i++) {
+ int ep;
+
+ for (ep = 0; ep < interface->altsetting[i].desc.bNumEndpoints; ep++) {
+ struct usb_host_endpoint *e;
+ int dir_out;
+
+ e = &interface->altsetting[i].endpoint[ep];
+
+ dir_out = ((e->desc.bEndpointAddress &
+ USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
+
+ printk("tm6000: alt %d, interface %i, class %i\n",
+ i,
+ interface->altsetting[i].desc.bInterfaceNumber,
+ interface->altsetting[i].desc.bInterfaceClass);
+
+ switch (e->desc.bmAttributes) {
+ case USB_ENDPOINT_XFER_BULK:
+ if (!dir_out) {
+ get_max_endpoint (usbdev, "Bulk IN", e,
+ &dev->max_bulk_in,
+ &dev->bulk_in);
+ } else {
+ get_max_endpoint (usbdev, "Bulk OUT", e,
+ &dev->max_bulk_out,
+ &dev->bulk_out);
+ }
+ break;
+ case USB_ENDPOINT_XFER_ISOC:
+ if (!dir_out) {
+ get_max_endpoint (usbdev, "ISOC IN", e,
+ &dev->max_isoc_in,
+ &dev->isoc_in);
+ } else {
+ get_max_endpoint (usbdev, "ISOC OUT", e,
+ &dev->max_isoc_out,
+ &dev->isoc_out);
+ }
+ break;
+ }
+ }
+ }
+
+
+ printk("tm6000: New video device @ %s Mbps (%04x:%04x, ifnum %d)\n",
+ speed,
+ le16_to_cpu(dev->udev->descriptor.idVendor),
+ le16_to_cpu(dev->udev->descriptor.idProduct),
+ interface->altsetting->desc.bInterfaceNumber);
+
+/* check if the the device has the iso in endpoint at the correct place */
+ if (!dev->isoc_in) {
+ printk("tm6000: probing error: no IN ISOC endpoint!\n");
+ rc= -ENODEV;
+
+ goto err;
+ }
+
+ /* save our data pointer in this interface device */
+ usb_set_intfdata(interface, dev);
+
+ printk("tm6000: Found %s\n", tm6000_boards[dev->model].name);
+
+ rc=tm6000_init_dev(dev);
+
+ if (rc<0)
+ goto err;
+
+ return 0;
+
+err:
+ printk("tm6000: Error %d while registering\n", rc);
+
+ tm6000_devused&=~(1<<nr);
+ usb_put_dev(usbdev);
+
+ kfree(dev);
+ return rc;
+}
+
+/*
+ * tm6000_usb_disconnect()
+ * called when the device gets diconencted
+ * video device will be unregistered on v4l2_close in case it is still open
+ */
+static void tm6000_usb_disconnect(struct usb_interface *interface)
+{
+ struct tm6000_core *dev = usb_get_intfdata(interface);
+ usb_set_intfdata(interface, NULL);
+
+ if (!dev)
+ return;
+
+ printk("tm6000: disconnecting %s\n", dev->name);
+
+ mutex_lock(&dev->lock);
+
+#ifdef CONFIG_VIDEO_TM6000_DVB
+ if(dev->dvb) {
+ tm6000_dvb_unregister(dev);
+ kfree(dev->dvb);
+ }
+#endif
+
+ tm6000_v4l2_unregister(dev);
+
+ tm6000_i2c_unregister(dev);
+
+ v4l2_device_unregister(&dev->v4l2_dev);
+
+// wake_up_interruptible_all(&dev->open);
+
+ dev->state |= DEV_DISCONNECTED;
+
+ usb_put_dev(dev->udev);
+
+ mutex_unlock(&dev->lock);
+ kfree(dev);
+}
+
+static struct usb_driver tm6000_usb_driver = {
+ .name = "tm6000",
+ .probe = tm6000_usb_probe,
+ .disconnect = tm6000_usb_disconnect,
+ .id_table = tm6000_id_table,
+};
+
+static int __init tm6000_module_init(void)
+{
+ int result;
+
+ printk(KERN_INFO "tm6000" " v4l2 driver version %d.%d.%d loaded\n",
+ (TM6000_VERSION >> 16) & 0xff,
+ (TM6000_VERSION >> 8) & 0xff, TM6000_VERSION & 0xff);
+
+ /* register this driver with the USB subsystem */
+ result = usb_register(&tm6000_usb_driver);
+ if (result)
+ printk("tm6000"
+ " usb_register failed. Error number %d.\n", result);
+
+ return result;
+}
+
+static void __exit tm6000_module_exit(void)
+{
+ /* deregister at USB subsystem */
+ usb_deregister(&tm6000_usb_driver);
+}
+
+module_init(tm6000_module_init);
+module_exit(tm6000_module_exit);
+
+MODULE_DESCRIPTION("Trident TVMaster TM5600/TM6000 USB2 adapter");
+MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/tm6000/tm6000-core.c b/drivers/staging/tm6000/tm6000-core.c
new file mode 100644
index 000000000000..d41af1d6c66a
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-core.c
@@ -0,0 +1,511 @@
+/*
+ tm6000-core.c - driver for TM5600/TM6000 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - DVB-T support
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/i2c.h>
+#include <linux/video_decoder.h>
+#include "tm6000.h"
+#include "tm6000-regs.h"
+#include <media/v4l2-common.h>
+#include <media/tuner.h>
+
+#define USB_TIMEOUT 5*HZ /* ms */
+
+int tm6000_read_write_usb (struct tm6000_core *dev, u8 req_type, u8 req,
+ u16 value, u16 index, u8 *buf, u16 len)
+{
+ int ret, i;
+ unsigned int pipe;
+ static int ini=0, last=0, n=0;
+ u8 *data=NULL;
+
+ if (len)
+ data = kzalloc(len, GFP_KERNEL);
+
+
+ if (req_type & USB_DIR_IN)
+ pipe=usb_rcvctrlpipe(dev->udev, 0);
+ else {
+ pipe=usb_sndctrlpipe(dev->udev, 0);
+ memcpy(data, buf, len);
+ }
+
+ if (tm6000_debug & V4L2_DEBUG_I2C) {
+ if (!ini)
+ last=ini=jiffies;
+
+ printk("%06i (dev %p, pipe %08x): ", n, dev->udev, pipe);
+
+ printk( "%s: %06u ms %06u ms %02x %02x %02x %02x %02x %02x %02x %02x ",
+ (req_type & USB_DIR_IN)?" IN":"OUT",
+ jiffies_to_msecs(jiffies-last),
+ jiffies_to_msecs(jiffies-ini),
+ req_type, req,value&0xff,value>>8, index&0xff, index>>8,
+ len&0xff, len>>8);
+ last=jiffies;
+ n++;
+
+ if ( !(req_type & USB_DIR_IN) ) {
+ printk(">>> ");
+ for (i=0;i<len;i++) {
+ printk(" %02x",buf[i]);
+ }
+ printk("\n");
+ }
+ }
+
+ ret = usb_control_msg(dev->udev, pipe, req, req_type, value, index, data,
+ len, USB_TIMEOUT);
+
+ if (req_type & USB_DIR_IN)
+ memcpy(buf, data, len);
+
+ if (tm6000_debug & V4L2_DEBUG_I2C) {
+ if (ret<0) {
+ if (req_type & USB_DIR_IN)
+ printk("<<< (len=%d)\n",len);
+
+ printk("%s: Error #%d\n", __FUNCTION__, ret);
+ } else if (req_type & USB_DIR_IN) {
+ printk("<<< ");
+ for (i=0;i<len;i++) {
+ printk(" %02x",buf[i]);
+ }
+ printk("\n");
+ }
+ }
+
+ kfree(data);
+
+ msleep(5);
+
+ return ret;
+}
+
+int tm6000_set_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index)
+{
+ return
+ tm6000_read_write_usb (dev, USB_DIR_OUT | USB_TYPE_VENDOR,
+ req, value, index, NULL, 0);
+}
+
+int tm6000_get_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index)
+{
+ int rc;
+ u8 buf[1];
+
+ rc=tm6000_read_write_usb (dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
+ value, index, buf, 1);
+
+ if (rc<0)
+ return rc;
+
+ return *buf;
+}
+
+int tm6000_get_reg16 (struct tm6000_core *dev, u8 req, u16 value, u16 index)
+{
+ int rc;
+ u8 buf[2];
+
+ rc=tm6000_read_write_usb (dev, USB_DIR_IN | USB_TYPE_VENDOR, req,
+ value, index, buf, 2);
+
+ if (rc<0)
+ return rc;
+
+ return buf[1]|buf[0]<<8;
+}
+
+void tm6000_set_fourcc_format(struct tm6000_core *dev)
+{
+ if (dev->dev_type == TM6010) {
+ if (dev->fourcc == V4L2_PIX_FMT_UYVY)
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0xc1, 0xfc);
+ else
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0xc1, 0xfd);
+ } else {
+ if (dev->fourcc == V4L2_PIX_FMT_UYVY)
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0xc1, 0xd0);
+ else
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0xc1, 0x90);
+ }
+}
+
+int tm6000_init_analog_mode (struct tm6000_core *dev)
+{
+ if (dev->dev_type == TM6010) {
+ int val;
+
+ /* Enable video */
+ val = tm6000_get_reg(dev, REQ_07_SET_GET_AVREG, 0xcc, 0);
+ val |= 0x60;
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xcc, val);
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xfe, 0xcf);
+
+ } else {
+ /* Enables soft reset */
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0x3f, 0x01);
+
+ if (dev->scaler) {
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xc0, 0x20);
+ } else {
+ /* Enable Hfilter and disable TS Drop err */
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xc0, 0x80);
+ }
+
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xc3, 0x88);
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xda, 0x23);
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xd1, 0xc0);
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xd2, 0xd8);
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xd6, 0x06);
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xdf, 0x1f);
+
+ /* AP Software reset */
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xff, 0x08);
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xff, 0x00);
+
+ tm6000_set_fourcc_format(dev);
+
+ /* Disables soft reset */
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0x3f, 0x00);
+
+ /* E3: Select input 0 - TV tuner */
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xe3, 0x00);
+ tm6000_set_reg(dev, REQ_07_SET_GET_AVREG, 0xeb, 0x60);
+
+ /* This controls input */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, TM6000_GPIO_2, 0x0);
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, TM6000_GPIO_3, 0x01);
+ }
+ msleep(20);
+
+ /* Tuner firmware can now be loaded */
+
+ /*FIXME: Hack!!! */
+ struct v4l2_frequency f;
+ mutex_lock(&dev->lock);
+ f.frequency=dev->freq;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f);
+ mutex_unlock(&dev->lock);
+
+ msleep(100);
+ tm6000_set_standard (dev, &dev->norm);
+ tm6000_set_audio_bitrate (dev,48000);
+
+ return 0;
+}
+
+int tm6000_init_digital_mode (struct tm6000_core *dev)
+{
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00ff, 0x08);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00ff, 0x00);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x003f, 0x01);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00df, 0x08);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00e2, 0x0c);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00e8, 0xff);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00eb, 0xd8);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00c0, 0x40);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00c1, 0xd0);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00c3, 0x09);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00da, 0x37);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00d1, 0xd8);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00d2, 0xc0);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00d6, 0x60);
+
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00e2, 0x0c);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00e8, 0xff);
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x00eb, 0x08);
+ msleep(50);
+
+ tm6000_set_reg (dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x00);
+ msleep(50);
+ tm6000_set_reg (dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x01);
+ msleep(50);
+ tm6000_set_reg (dev, REQ_04_EN_DISABLE_MCU_INT, 0x0020, 0x00);
+ msleep(100);
+
+ return 0;
+}
+
+struct reg_init {
+ u8 req;
+ u8 reg;
+ u8 val;
+};
+
+/* The meaning of those initializations are unknown */
+struct reg_init tm6000_init_tab[] = {
+ /* REG VALUE */
+ { REQ_07_SET_GET_AVREG, 0xdf, 0x1f },
+ { REQ_07_SET_GET_AVREG, 0xff, 0x08 },
+ { REQ_07_SET_GET_AVREG, 0xff, 0x00 },
+ { REQ_07_SET_GET_AVREG, 0xd5, 0x4f },
+ { REQ_07_SET_GET_AVREG, 0xda, 0x23 },
+ { REQ_07_SET_GET_AVREG, 0xdb, 0x08 },
+ { REQ_07_SET_GET_AVREG, 0xe2, 0x00 },
+ { REQ_07_SET_GET_AVREG, 0xe3, 0x10 },
+ { REQ_07_SET_GET_AVREG, 0xe5, 0x00 },
+ { REQ_07_SET_GET_AVREG, 0xe8, 0x00 },
+ { REQ_07_SET_GET_AVREG, 0xeb, 0x64 }, /* 48000 bits/sample, external input */
+ { REQ_07_SET_GET_AVREG, 0xee, 0xc2 },
+ { REQ_07_SET_GET_AVREG, 0x3f, 0x01 }, /* Start of soft reset */
+ { REQ_07_SET_GET_AVREG, 0x00, 0x00 },
+ { REQ_07_SET_GET_AVREG, 0x01, 0x07 },
+ { REQ_07_SET_GET_AVREG, 0x02, 0x5f },
+ { REQ_07_SET_GET_AVREG, 0x03, 0x00 },
+ { REQ_07_SET_GET_AVREG, 0x05, 0x64 },
+ { REQ_07_SET_GET_AVREG, 0x07, 0x01 },
+ { REQ_07_SET_GET_AVREG, 0x08, 0x82 },
+ { REQ_07_SET_GET_AVREG, 0x09, 0x36 },
+ { REQ_07_SET_GET_AVREG, 0x0a, 0x50 },
+ { REQ_07_SET_GET_AVREG, 0x0c, 0x6a },
+ { REQ_07_SET_GET_AVREG, 0x11, 0xc9 },
+ { REQ_07_SET_GET_AVREG, 0x12, 0x07 },
+ { REQ_07_SET_GET_AVREG, 0x13, 0x3b },
+ { REQ_07_SET_GET_AVREG, 0x14, 0x47 },
+ { REQ_07_SET_GET_AVREG, 0x15, 0x6f },
+ { REQ_07_SET_GET_AVREG, 0x17, 0xcd },
+ { REQ_07_SET_GET_AVREG, 0x18, 0x1e },
+ { REQ_07_SET_GET_AVREG, 0x19, 0x8b },
+ { REQ_07_SET_GET_AVREG, 0x1a, 0xa2 },
+ { REQ_07_SET_GET_AVREG, 0x1b, 0xe9 },
+ { REQ_07_SET_GET_AVREG, 0x1c, 0x1c },
+ { REQ_07_SET_GET_AVREG, 0x1d, 0xcc },
+ { REQ_07_SET_GET_AVREG, 0x1e, 0xcc },
+ { REQ_07_SET_GET_AVREG, 0x1f, 0xcd },
+ { REQ_07_SET_GET_AVREG, 0x20, 0x3c },
+ { REQ_07_SET_GET_AVREG, 0x21, 0x3c },
+ { REQ_07_SET_GET_AVREG, 0x2d, 0x48 },
+ { REQ_07_SET_GET_AVREG, 0x2e, 0x88 },
+ { REQ_07_SET_GET_AVREG, 0x30, 0x22 },
+ { REQ_07_SET_GET_AVREG, 0x31, 0x61 },
+ { REQ_07_SET_GET_AVREG, 0x32, 0x74 },
+ { REQ_07_SET_GET_AVREG, 0x33, 0x1c },
+ { REQ_07_SET_GET_AVREG, 0x34, 0x74 },
+ { REQ_07_SET_GET_AVREG, 0x35, 0x1c },
+ { REQ_07_SET_GET_AVREG, 0x36, 0x7a },
+ { REQ_07_SET_GET_AVREG, 0x37, 0x26 },
+ { REQ_07_SET_GET_AVREG, 0x38, 0x40 },
+ { REQ_07_SET_GET_AVREG, 0x39, 0x0a },
+ { REQ_07_SET_GET_AVREG, 0x42, 0x55 },
+ { REQ_07_SET_GET_AVREG, 0x51, 0x11 },
+ { REQ_07_SET_GET_AVREG, 0x55, 0x01 },
+ { REQ_07_SET_GET_AVREG, 0x57, 0x02 },
+ { REQ_07_SET_GET_AVREG, 0x58, 0x35 },
+ { REQ_07_SET_GET_AVREG, 0x59, 0xa0 },
+ { REQ_07_SET_GET_AVREG, 0x80, 0x15 },
+ { REQ_07_SET_GET_AVREG, 0x82, 0x42 },
+ { REQ_07_SET_GET_AVREG, 0xc1, 0xd0 },
+ { REQ_07_SET_GET_AVREG, 0xc3, 0x88 },
+ { REQ_07_SET_GET_AVREG, 0x3f, 0x00 }, /* End of the soft reset */
+ { REQ_05_SET_GET_USBREG, 0x18, 0x00 },
+};
+
+struct reg_init tm6010_init_tab[] = {
+ { REQ_07_SET_GET_AVREG, 0xc0, 0x00 },
+ { REQ_07_SET_GET_AVREG, 0xc4, 0xa0 },
+ { REQ_07_SET_GET_AVREG, 0xc6, 0x40 },
+ { REQ_07_SET_GET_AVREG, 0xca, 0x31 },
+ { REQ_07_SET_GET_AVREG, 0xcc, 0xe1 },
+ { REQ_07_SET_GET_AVREG, 0xe0, 0x03 },
+ { REQ_07_SET_GET_AVREG, 0xfe, 0x7f },
+
+ { REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0 },
+ { REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf4 },
+ { REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf8 },
+ { REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x00 },
+ { REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf2 },
+ { REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xf0 },
+ { REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2 },
+ { REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x60 },
+ { REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc },
+
+ { REQ_07_SET_GET_AVREG, 0x3f, 0x01 },
+ { REQ_07_SET_GET_AVREG, 0x00, 0x00 },
+ { REQ_07_SET_GET_AVREG, 0x01, 0x07 },
+ { REQ_07_SET_GET_AVREG, 0x02, 0x5f },
+ { REQ_07_SET_GET_AVREG, 0x03, 0x00 },
+ { REQ_07_SET_GET_AVREG, 0x05, 0x64 },
+ { REQ_07_SET_GET_AVREG, 0x07, 0x01 },
+ { REQ_07_SET_GET_AVREG, 0x08, 0x82 },
+ { REQ_07_SET_GET_AVREG, 0x09, 0x36 },
+ { REQ_07_SET_GET_AVREG, 0x0a, 0x50 },
+ { REQ_07_SET_GET_AVREG, 0x0c, 0x6a },
+ { REQ_07_SET_GET_AVREG, 0x11, 0xc9 },
+ { REQ_07_SET_GET_AVREG, 0x12, 0x07 },
+ { REQ_07_SET_GET_AVREG, 0x13, 0x3b },
+ { REQ_07_SET_GET_AVREG, 0x14, 0x47 },
+ { REQ_07_SET_GET_AVREG, 0x15, 0x6f },
+ { REQ_07_SET_GET_AVREG, 0x17, 0xcd },
+ { REQ_07_SET_GET_AVREG, 0x18, 0x1e },
+ { REQ_07_SET_GET_AVREG, 0x19, 0x8b },
+ { REQ_07_SET_GET_AVREG, 0x1a, 0xa2 },
+ { REQ_07_SET_GET_AVREG, 0x1b, 0xe9 },
+ { REQ_07_SET_GET_AVREG, 0x1c, 0x1c },
+ { REQ_07_SET_GET_AVREG, 0x1d, 0xcc },
+ { REQ_07_SET_GET_AVREG, 0x1e, 0xcc },
+ { REQ_07_SET_GET_AVREG, 0x1f, 0xcd },
+ { REQ_07_SET_GET_AVREG, 0x20, 0x3c },
+ { REQ_07_SET_GET_AVREG, 0x21, 0x3c },
+ { REQ_07_SET_GET_AVREG, 0x2d, 0x48 },
+ { REQ_07_SET_GET_AVREG, 0x2e, 0x88 },
+ { REQ_07_SET_GET_AVREG, 0x30, 0x22 },
+ { REQ_07_SET_GET_AVREG, 0x31, 0x61 },
+ { REQ_07_SET_GET_AVREG, 0x32, 0x74 },
+ { REQ_07_SET_GET_AVREG, 0x33, 0x1c },
+ { REQ_07_SET_GET_AVREG, 0x34, 0x74 },
+ { REQ_07_SET_GET_AVREG, 0x35, 0x1c },
+ { REQ_07_SET_GET_AVREG, 0x36, 0x7a },
+ { REQ_07_SET_GET_AVREG, 0x37, 0x26 },
+ { REQ_07_SET_GET_AVREG, 0x38, 0x40 },
+ { REQ_07_SET_GET_AVREG, 0x39, 0x0a },
+ { REQ_07_SET_GET_AVREG, 0x42, 0x55 },
+ { REQ_07_SET_GET_AVREG, 0x51, 0x11 },
+ { REQ_07_SET_GET_AVREG, 0x55, 0x01 },
+ { REQ_07_SET_GET_AVREG, 0x57, 0x02 },
+ { REQ_07_SET_GET_AVREG, 0x58, 0x35 },
+ { REQ_07_SET_GET_AVREG, 0x59, 0xa0 },
+ { REQ_07_SET_GET_AVREG, 0x80, 0x15 },
+ { REQ_07_SET_GET_AVREG, 0x82, 0x42 },
+ { REQ_07_SET_GET_AVREG, 0xc1, 0xd0 },
+ { REQ_07_SET_GET_AVREG, 0xc3, 0x88 },
+ { REQ_07_SET_GET_AVREG, 0x3f, 0x00 },
+
+ { REQ_05_SET_GET_USBREG, 0x18, 0x00 },
+
+ /* set remote wakeup key:any key wakeup */
+ { REQ_07_SET_GET_AVREG, 0xe5, 0xfe },
+ { REQ_07_SET_GET_AVREG, 0xda, 0xff },
+};
+
+int tm6000_init (struct tm6000_core *dev)
+{
+ int board, rc=0, i, size;
+ struct reg_init *tab;
+
+ if (dev->dev_type == TM6010) {
+ tab = tm6010_init_tab;
+ size = ARRAY_SIZE(tm6010_init_tab);
+ } else {
+ tab = tm6000_init_tab;
+ size = ARRAY_SIZE(tm6000_init_tab);
+ }
+
+ /* Load board's initialization table */
+ for (i=0; i< size; i++) {
+ rc= tm6000_set_reg (dev, tab[i].req, tab[i].reg, tab[i].val);
+ if (rc<0) {
+ printk (KERN_ERR "Error %i while setting req %d, "
+ "reg %d to value %d\n", rc,
+ tab[i].req,tab[i].reg, tab[i].val);
+ return rc;
+ }
+ }
+
+ msleep(5); /* Just to be conservative */
+
+ /* Check board version - maybe 10Moons specific */
+ board=tm6000_get_reg16 (dev, 0x40, 0, 0);
+ if (board >=0) {
+ printk (KERN_INFO "Board version = 0x%04x\n",board);
+ } else {
+ printk (KERN_ERR "Error %i while retrieving board version\n",board);
+ }
+
+ if (dev->dev_type == TM6010) {
+ /* Turn xceive 3028 on */
+ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, TM6010_GPIO_3, 0x01);
+ msleep(11);
+ }
+
+ /* Reset GPIO1 and GPIO4. */
+ for (i=0; i< 2; i++) {
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->tuner_reset_gpio, 0x00);
+ if (rc<0) {
+ printk (KERN_ERR "Error %i doing GPIO1 reset\n",rc);
+ return rc;
+ }
+
+ msleep(10); /* Just to be conservative */
+ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN,
+ dev->tuner_reset_gpio, 0x01);
+ if (rc<0) {
+ printk (KERN_ERR "Error %i doing GPIO1 reset\n",rc);
+ return rc;
+ }
+
+ msleep(10);
+ rc=tm6000_set_reg (dev, REQ_03_SET_GET_MCU_PIN, TM6000_GPIO_4, 0);
+ if (rc<0) {
+ printk (KERN_ERR "Error %i doing GPIO4 reset\n",rc);
+ return rc;
+ }
+
+ msleep(10);
+ rc=tm6000_set_reg (dev, REQ_03_SET_GET_MCU_PIN, TM6000_GPIO_4, 1);
+ if (rc<0) {
+ printk (KERN_ERR "Error %i doing GPIO4 reset\n",rc);
+ return rc;
+ }
+
+ if (!i) {
+ rc=tm6000_get_reg16(dev, 0x40,0,0);
+ if (rc>=0) {
+ printk ("board=%d\n", rc);
+ }
+ }
+ }
+
+ msleep(50);
+
+ return 0;
+}
+
+int tm6000_set_audio_bitrate(struct tm6000_core *dev, int bitrate)
+{
+ int val;
+
+ val=tm6000_get_reg (dev, REQ_07_SET_GET_AVREG, 0xeb, 0x0);
+printk("Original value=%d\n",val);
+ if (val<0)
+ return val;
+
+ val &= 0x0f; /* Preserve the audio input control bits */
+ switch (bitrate) {
+ case 44100:
+ val|=0xd0;
+ dev->audio_bitrate=bitrate;
+ break;
+ case 48000:
+ val|=0x60;
+ dev->audio_bitrate=bitrate;
+ break;
+ }
+ val=tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0xeb, val);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(tm6000_set_audio_bitrate);
diff --git a/drivers/staging/tm6000/tm6000-dvb.c b/drivers/staging/tm6000/tm6000-dvb.c
new file mode 100644
index 000000000000..e900d6ddea30
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-dvb.c
@@ -0,0 +1,322 @@
+/*
+ tm6000-dvb.c - dvb-t support for TM5600/TM6000 USB video capture devices
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/usb.h>
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+
+#include "hack.h"
+
+#include "zl10353.h"
+
+#include <media/tuner.h>
+
+#include "tuner-xc2028.h"
+
+static void tm6000_urb_received(struct urb *urb)
+{
+ int ret;
+ struct tm6000_core* dev = urb->context;
+
+ if(urb->status != 0){
+ printk(KERN_ERR "tm6000: status != 0\n");
+ }
+ else if(urb->actual_length>0){
+ dvb_dmx_swfilter(&dev->dvb->demux, urb->transfer_buffer,
+ urb->actual_length);
+ }
+
+ if(dev->dvb->streams > 0) {
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if(ret < 0) {
+ printk(KERN_ERR "tm6000: error %s\n", __FUNCTION__);
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ }
+}
+
+int tm6000_start_stream(struct tm6000_core *dev)
+{
+ int ret;
+ unsigned int pipe, maxPaketSize;
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ printk(KERN_INFO "tm6000: got start stream request %s\n",__FUNCTION__);
+
+ tm6000_init_digital_mode(dev);
+
+/*
+ ret = tm6000_set_led_status(tm6000_dev, 0x1);
+ if(ret < 0) {
+ return -1;
+ }
+*/
+
+ dvb->bulk_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if(dvb->bulk_urb == NULL) {
+ printk(KERN_ERR "tm6000: couldn't allocate urb\n");
+ return -ENOMEM;
+ }
+
+ maxPaketSize = dev->bulk_in->desc.wMaxPacketSize;
+
+ dvb->bulk_urb->transfer_buffer = kzalloc(maxPaketSize, GFP_KERNEL);
+ if(dvb->bulk_urb->transfer_buffer == NULL) {
+ usb_free_urb(dvb->bulk_urb);
+ printk(KERN_ERR "tm6000: couldn't allocate transfer buffer!\n");
+ return -ENOMEM;
+ }
+
+ pipe = usb_rcvbulkpipe(dev->udev, dev->bulk_in->desc.bEndpointAddress
+ & USB_ENDPOINT_NUMBER_MASK);
+
+ usb_fill_bulk_urb(dvb->bulk_urb, dev->udev, pipe,
+ dvb->bulk_urb->transfer_buffer,
+ maxPaketSize,
+ tm6000_urb_received, dev);
+
+ ret = usb_set_interface(dev->udev, 0, 1);
+ if(ret < 0) {
+ printk(KERN_ERR "tm6000: error %i in %s during set interface\n", ret, __FUNCTION__);
+ return ret;
+ }
+
+ ret = usb_clear_halt(dev->udev, pipe);
+ if(ret < 0) {
+ printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n",ret,__FUNCTION__);
+ return ret;
+ }
+ else {
+ printk(KERN_ERR "tm6000: pipe resetted\n");
+ }
+
+// mutex_lock(&tm6000_driver.open_close_mutex);
+ ret = usb_submit_urb(dvb->bulk_urb, GFP_KERNEL);
+
+
+// mutex_unlock(&tm6000_driver.open_close_mutex);
+ if (ret) {
+ printk(KERN_ERR "tm6000: submit of urb failed (error=%i)\n",ret);
+
+ kfree(dvb->bulk_urb->transfer_buffer);
+ usb_free_urb(dvb->bulk_urb);
+ return ret;
+ }
+
+ return 0;
+}
+
+void tm6000_stop_stream(struct tm6000_core *dev)
+{
+ int ret;
+ struct tm6000_dvb *dvb = dev->dvb;
+
+// tm6000_set_led_status(tm6000_dev, 0x0);
+
+ ret = usb_set_interface(dev->udev, 0, 0);
+ if(ret < 0) {
+ printk(KERN_ERR "tm6000: error %i in %s during set interface\n",ret,__FUNCTION__);
+ }
+
+ if(dvb->bulk_urb) {
+ usb_kill_urb(dvb->bulk_urb);
+ kfree(dvb->bulk_urb->transfer_buffer);
+ usb_free_urb(dvb->bulk_urb);
+ dvb->bulk_urb = NULL;
+ }
+}
+
+int tm6000_start_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct tm6000_core *dev = demux->priv;
+ struct tm6000_dvb *dvb = dev->dvb;
+ printk(KERN_INFO "tm6000: got start feed request %s\n",__FUNCTION__);
+
+ mutex_lock(&dvb->mutex);
+ if(dvb->streams == 0) {
+ dvb->streams = 1;
+// mutex_init(&tm6000_dev->streaming_mutex);
+ tm6000_start_stream(dev);
+ }
+ else {
+ ++(dvb->streams);
+ }
+ mutex_unlock(&dvb->mutex);
+
+ return 0;
+}
+
+int tm6000_stop_feed(struct dvb_demux_feed *feed) {
+ struct dvb_demux *demux = feed->demux;
+ struct tm6000_core *dev = demux->priv;
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ printk(KERN_INFO "tm6000: got stop feed request %s\n",__FUNCTION__);
+
+ mutex_lock(&dvb->mutex);
+ --dvb->streams;
+
+ if(0 == dvb->streams) {
+ tm6000_stop_stream(dev);
+// mutex_destroy(&tm6000_dev->streaming_mutex);
+ }
+ mutex_unlock(&dvb->mutex);
+// mutex_destroy(&tm6000_dev->streaming_mutex);
+
+ return 0;
+}
+
+int tm6000_dvb_attach_frontend(struct tm6000_core *dev)
+{
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ if(dev->caps.has_zl10353) {
+ struct zl10353_config config =
+ {.demod_address = dev->demod_addr >> 1,
+ .no_tuner = 1,
+// .input_frequency = 0x19e9,
+// .r56_agc_targets = 0x1c,
+ };
+
+ dvb->frontend = pseudo_zl10353_attach(dev, &config,
+ &dev->i2c_adap);
+ }
+ else {
+ printk(KERN_ERR "tm6000: no frontend defined for the device!\n");
+ return -1;
+ }
+
+ return (!dvb->frontend) ? -1 : 0;
+}
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+int tm6000_dvb_register(struct tm6000_core *dev)
+{
+ int ret = -1;
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ mutex_init(&dvb->mutex);
+
+ dvb->streams = 0;
+
+ /* attach the frontend */
+ ret = tm6000_dvb_attach_frontend(dev);
+ if(ret < 0) {
+ printk(KERN_ERR "tm6000: couldn't attach the frontend!\n");
+ goto err;
+ }
+
+ ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
+ THIS_MODULE, &dev->udev->dev, adapter_nr);
+ dvb->adapter.priv = dev;
+
+ if (dvb->frontend) {
+ struct xc2028_config cfg = {
+ .i2c_adap = &dev->i2c_adap,
+ .i2c_addr = dev->tuner_addr,
+ };
+
+ ret = dvb_register_frontend(&dvb->adapter, dvb->frontend);
+ if (ret < 0) {
+ printk(KERN_ERR
+ "tm6000: couldn't register frontend\n");
+ goto adapter_err;
+ }
+
+ if (!dvb_attach(xc2028_attach, dvb->frontend, &cfg)) {
+ printk(KERN_ERR "tm6000: couldn't register "
+ "frontend (xc3028)\n");
+ ret = -EINVAL;
+ goto frontend_err;
+ }
+ printk(KERN_INFO "tm6000: XC2028/3028 asked to be "
+ "attached to frontend!\n");
+ } else {
+ printk(KERN_ERR "tm6000: no frontend found\n");
+ }
+
+ dvb->demux.dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING
+ | DMX_MEMORY_BASED_FILTERING;
+ dvb->demux.priv = dev;
+ dvb->demux.filternum = 256;
+ dvb->demux.feednum = 256;
+ dvb->demux.start_feed = tm6000_start_feed;
+ dvb->demux.stop_feed = tm6000_stop_feed;
+ dvb->demux.write_to_decoder = NULL;
+ ret = dvb_dmx_init(&dvb->demux);
+ if(ret < 0) {
+ printk("tm6000: dvb_dmx_init failed (errno = %d)\n", ret);
+ goto frontend_err;
+ }
+
+ dvb->dmxdev.filternum = dev->dvb->demux.filternum;
+ dvb->dmxdev.demux = &dev->dvb->demux.dmx;
+ dvb->dmxdev.capabilities = 0;
+
+ ret = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
+ if(ret < 0) {
+ printk("tm6000: dvb_dmxdev_init failed (errno = %d)\n", ret);
+ goto dvb_dmx_err;
+ }
+
+ return 0;
+
+dvb_dmx_err:
+ dvb_dmx_release(&dvb->demux);
+frontend_err:
+ if(dvb->frontend) {
+ dvb_frontend_detach(dvb->frontend);
+ dvb_unregister_frontend(dvb->frontend);
+ }
+adapter_err:
+ dvb_unregister_adapter(&dvb->adapter);
+err:
+ return ret;
+}
+
+void tm6000_dvb_unregister(struct tm6000_core *dev)
+{
+ struct tm6000_dvb *dvb = dev->dvb;
+
+ if(dvb->bulk_urb != NULL) {
+ struct urb *bulk_urb = dvb->bulk_urb;
+
+ kfree(bulk_urb->transfer_buffer);
+ bulk_urb->transfer_buffer = NULL;
+ usb_unlink_urb(bulk_urb);
+ usb_free_urb(bulk_urb);
+ }
+
+// mutex_lock(&tm6000_driver.open_close_mutex);
+ if(dvb->frontend) {
+ dvb_frontend_detach(dvb->frontend);
+ dvb_unregister_frontend(dvb->frontend);
+ }
+
+ dvb_dmxdev_release(&dvb->dmxdev);
+ dvb_dmx_release(&dvb->demux);
+ dvb_unregister_adapter(&dvb->adapter);
+ mutex_destroy(&dvb->mutex);
+// mutex_unlock(&tm6000_driver.open_close_mutex);
+
+}
diff --git a/drivers/staging/tm6000/tm6000-i2c.c b/drivers/staging/tm6000/tm6000-i2c.c
new file mode 100644
index 000000000000..4da10f5ea024
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-i2c.c
@@ -0,0 +1,245 @@
+/*
+ tm6000-i2c.c - driver for TM5600/TM6000 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - Fix SMBus Read Byte command
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/i2c.h>
+
+#include "tm6000.h"
+#include "tm6000-regs.h"
+#include <media/v4l2-common.h>
+#include <media/tuner.h>
+#include "tuner-xc2028.h"
+
+
+/*FIXME: Hack to avoid needing to patch i2c-id.h */
+#define I2C_HW_B_TM6000 I2C_HW_B_EM28XX
+/* ----------------------------------------------------------- */
+
+static unsigned int i2c_debug = 0;
+module_param(i2c_debug, int, 0644);
+MODULE_PARM_DESC(i2c_debug, "enable debug messages [i2c]");
+
+#define i2c_dprintk(lvl,fmt, args...) if (i2c_debug>=lvl) do{ \
+ printk(KERN_DEBUG "%s at %s: " fmt, \
+ dev->name, __FUNCTION__ , ##args); } while (0)
+
+static int tm6000_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[], int num)
+{
+ struct tm6000_core *dev = i2c_adap->algo_data;
+ int addr, rc, i, byte;
+
+ if (num <= 0)
+ return 0;
+ for (i = 0; i < num; i++) {
+ addr = (msgs[i].addr << 1) & 0xff;
+ i2c_dprintk(2,"%s %s addr=0x%x len=%d:",
+ (msgs[i].flags & I2C_M_RD) ? "read" : "write",
+ i == num - 1 ? "stop" : "nonstop", addr, msgs[i].len);
+ if (msgs[i].flags & I2C_M_RD) {
+ /* read request without preceding register selection */
+ /*
+ * The TM6000 only supports a read transaction
+ * immediately after a 1 or 2 byte write to select
+ * a register. We cannot fulfil this request.
+ */
+ i2c_dprintk(2, " read without preceding write not"
+ " supported");
+ rc = -EOPNOTSUPP;
+ goto err;
+ } else if (i + 1 < num && msgs[i].len <= 2 &&
+ (msgs[i + 1].flags & I2C_M_RD) &&
+ msgs[i].addr == msgs[i + 1].addr) {
+ /* 1 or 2 byte write followed by a read */
+ if (i2c_debug >= 2)
+ for (byte = 0; byte < msgs[i].len; byte++)
+ printk(" %02x", msgs[i].buf[byte]);
+ i2c_dprintk(2, "; joined to read %s len=%d:",
+ i == num - 2 ? "stop" : "nonstop",
+ msgs[i + 1].len);
+ rc = tm6000_read_write_usb (dev,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ msgs[i].len == 1 ? REQ_16_SET_GET_I2C_WR1_RDN
+ : REQ_14_SET_GET_I2C_WR2_RDN,
+ addr | msgs[i].buf[0] << 8,
+ msgs[i].len == 1 ? 0 : msgs[i].buf[1],
+ msgs[i + 1].buf, msgs[i + 1].len);
+ i++;
+ if (i2c_debug >= 2)
+ for (byte = 0; byte < msgs[i].len; byte++)
+ printk(" %02x", msgs[i].buf[byte]);
+ } else {
+ /* write bytes */
+ if (i2c_debug >= 2)
+ for (byte = 0; byte < msgs[i].len; byte++)
+ printk(" %02x", msgs[i].buf[byte]);
+ rc = tm6000_read_write_usb(dev,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ REQ_16_SET_GET_I2C_WR1_RDN,
+ addr | msgs[i].buf[0] << 8, 0,
+ msgs[i].buf + 1, msgs[i].len - 1);
+ }
+ if (i2c_debug >= 2)
+ printk("\n");
+ if (rc < 0)
+ goto err;
+ }
+
+ return num;
+err:
+ i2c_dprintk(2," ERROR: %i\n", rc);
+ return rc;
+}
+
+static int tm6000_i2c_eeprom(struct tm6000_core *dev,
+ unsigned char *eedata, int len)
+{
+ int i, rc;
+ unsigned char *p = eedata;
+ unsigned char bytes[17];
+
+ dev->i2c_client.addr = 0xa0 >> 1;
+
+ bytes[16] = '\0';
+ for (i = 0; i < len; ) {
+ *p = i;
+ rc = tm6000_read_write_usb (dev,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ REQ_16_SET_GET_I2C_WR1_RDN, 0xa0 | i<<8, 0, p, 1);
+ if (rc < 1) {
+ if (p == eedata)
+ goto noeeprom;
+ else {
+ printk(KERN_WARNING
+ "%s: i2c eeprom read error (err=%d)\n",
+ dev->name, rc);
+ }
+ return -1;
+ }
+ p++;
+ if (0 == (i % 16))
+ printk(KERN_INFO "%s: i2c eeprom %02x:", dev->name, i);
+ printk(" %02x", eedata[i]);
+ if ((eedata[i] >= ' ') && (eedata[i] <= 'z')) {
+ bytes[i%16] = eedata[i];
+ } else {
+ bytes[i%16]='.';
+ }
+
+ i++;
+
+ if (0 == (i % 16)) {
+ bytes[16] = '\0';
+ printk(" %s\n", bytes);
+ }
+ }
+ if (0 != (i%16)) {
+ bytes[i%16] = '\0';
+ for (i %= 16; i < 16; i++)
+ printk(" ");
+ }
+ printk(" %s\n", bytes);
+
+ return 0;
+
+noeeprom:
+ printk(KERN_INFO "%s: Huh, no eeprom present (err=%d)?\n",
+ dev->name, rc);
+ return rc;
+}
+
+/* ----------------------------------------------------------- */
+
+/*
+ * functionality()
+ */
+static u32 functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_EMUL;
+}
+
+#define mass_write(addr, reg, data...) \
+ { const static u8 _val[] = data; \
+ rc=tm6000_read_write_usb(dev,USB_DIR_OUT | USB_TYPE_VENDOR, \
+ REQ_16_SET_GET_I2C_WR1_RDN,(reg<<8)+addr, 0x00, (u8 *) _val, \
+ ARRAY_SIZE(_val)); \
+ if (rc<0) { \
+ printk(KERN_ERR "Error on line %d: %d\n",__LINE__,rc); \
+ return rc; \
+ } \
+ msleep (10); \
+ }
+
+static struct i2c_algorithm tm6000_algo = {
+ .master_xfer = tm6000_i2c_xfer,
+ .functionality = functionality,
+};
+
+static struct i2c_adapter tm6000_adap_template = {
+ .owner = THIS_MODULE,
+ .class = I2C_CLASS_TV_ANALOG,
+ .name = "tm6000",
+ .id = I2C_HW_B_TM6000,
+ .algo = &tm6000_algo,
+};
+
+static struct i2c_client tm6000_client_template = {
+ .name = "tm6000 internal",
+};
+
+/* ----------------------------------------------------------- */
+
+/*
+ * tm6000_i2c_register()
+ * register i2c bus
+ */
+int tm6000_i2c_register(struct tm6000_core *dev)
+{
+ unsigned char eedata[256];
+
+ dev->i2c_adap = tm6000_adap_template;
+ dev->i2c_adap.dev.parent = &dev->udev->dev;
+ strcpy(dev->i2c_adap.name, dev->name);
+ dev->i2c_adap.algo_data = dev;
+ i2c_add_adapter(&dev->i2c_adap);
+
+ dev->i2c_client = tm6000_client_template;
+ dev->i2c_client.adapter = &dev->i2c_adap;
+
+ i2c_set_adapdata(&dev->i2c_adap, &dev->v4l2_dev);
+
+ tm6000_i2c_eeprom(dev, eedata, sizeof(eedata));
+
+ return 0;
+}
+
+/*
+ * tm6000_i2c_unregister()
+ * unregister i2c_bus
+ */
+int tm6000_i2c_unregister(struct tm6000_core *dev)
+{
+ i2c_del_adapter(&dev->i2c_adap);
+ return 0;
+}
diff --git a/drivers/staging/tm6000/tm6000-regs.h b/drivers/staging/tm6000/tm6000-regs.h
new file mode 100644
index 000000000000..85acc07f62e9
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-regs.h
@@ -0,0 +1,86 @@
+/*
+ tm6000-regs.h - driver for TM5600/TM6000 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * Define TV Master TM5600/TM6000 Request codes
+ */
+#define REQ_00_SET_IR_VALUE 0
+#define REQ_01_SET_WAKEUP_IRCODE 1
+#define REQ_02_GET_IR_CODE 2
+#define REQ_03_SET_GET_MCU_PIN 3
+#define REQ_04_EN_DISABLE_MCU_INT 4
+#define REQ_05_SET_GET_USBREG 5
+ /* Write: RegNum, Value, 0 */
+ /* Read : RegNum, Value, 1, RegStatus */
+#define REQ_06_SET_GET_USBREG_BIT 6
+#define REQ_07_SET_GET_AVREG 7
+ /* Write: RegNum, Value, 0 */
+ /* Read : RegNum, Value, 1, RegStatus */
+#define REQ_08_SET_GET_AVREG_BIT 8
+#define REQ_09_SET_GET_TUNER_FQ 9
+#define REQ_10_SET_TUNER_SYSTEM 10
+#define REQ_11_SET_EEPROM_ADDR 11
+#define REQ_12_SET_GET_EEPROMBYTE 12
+#define REQ_13_GET_EEPROM_SEQREAD 13
+#define REQ_14_SET_GET_I2C_WR2_RDN 14
+#define REQ_15_SET_GET_I2CBYTE 15
+ /* Write: Subaddr, Slave Addr, value, 0 */
+ /* Read : Subaddr, Slave Addr, value, 1 */
+#define REQ_16_SET_GET_I2C_WR1_RDN 16
+ /* Subaddr, Slave Addr, 0, length */
+#define REQ_17_SET_GET_I2CFP 17
+ /* Write: Slave Addr, register, value */
+ /* Read : Slave Addr, register, 2, data */
+
+/*
+ * Define TV Master TM5600/TM6000 GPIO lines
+ */
+
+#define TM6000_GPIO_CLK 0x101
+#define TM6000_GPIO_DATA 0x100
+
+#define TM6000_GPIO_1 0x102
+#define TM6000_GPIO_2 0x103
+#define TM6000_GPIO_3 0x104
+#define TM6000_GPIO_4 0x300
+#define TM6000_GPIO_5 0x301
+#define TM6000_GPIO_6 0x304
+#define TM6000_GPIO_7 0x305
+
+/* tm6010 defines GPIO with different values */
+#define TM6010_GPIO_0 0x0102
+#define TM6010_GPIO_1 0x0103
+#define TM6010_GPIO_2 0x0104
+#define TM6010_GPIO_3 0x0105
+#define TM6010_GPIO_4 0x0106
+#define TM6010_GPIO_5 0x0107
+#define TM6010_GPIO_6 0x0300
+#define TM6010_GPIO_7 0x0301
+#define TM6010_GPIO_9 0x0305
+/*
+ * Define TV Master TM5600/TM6000 URB message codes and length
+ */
+
+enum {
+ TM6000_URB_MSG_VIDEO=1,
+ TM6000_URB_MSG_AUDIO,
+ TM6000_URB_MSG_VBI,
+ TM6000_URB_MSG_PTS,
+ TM6000_URB_MSG_ERR,
+};
diff --git a/drivers/staging/tm6000/tm6000-stds.c b/drivers/staging/tm6000/tm6000-stds.c
new file mode 100644
index 000000000000..c61d1a61ea3e
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-stds.c
@@ -0,0 +1,873 @@
+/*
+ tm6000-stds.c - driver for TM5600/TM6000 USB video capture devices
+
+ Copyright (C) 2007 Mauro Carvalho Chehab <mchehab@redhat.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "tm6000.h"
+#include "tm6000-regs.h"
+
+struct tm6000_reg_settings {
+ unsigned char req;
+ unsigned char reg;
+ unsigned char value;
+};
+
+struct tm6000_std_tv_settings {
+ v4l2_std_id id;
+ struct tm6000_reg_settings sif[12];
+ struct tm6000_reg_settings nosif[12];
+ struct tm6000_reg_settings common[25];
+};
+
+struct tm6000_std_settings {
+ v4l2_std_id id;
+ struct tm6000_reg_settings common[37];
+};
+
+static struct tm6000_std_tv_settings tv_stds[] = {
+ {
+ .id = V4L2_STD_PAL_M,
+ .sif = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x08},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x62},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfe},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x0f},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x60},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x04},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x00},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x1e},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x83},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0x0a},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0xe0},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x88},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x20},
+ {REQ_07_SET_GET_AVREG, 0x31, 0x61},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x52},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdc},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL_Nc,
+ .sif = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x08},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x62},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfe},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x0f},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x60},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x36},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x02},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x1e},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x91},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0x1f},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x8c},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x2c},
+ {REQ_07_SET_GET_AVREG, 0x31, 0xc1},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x52},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdc},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL,
+ .sif = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x08},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x62},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfe},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0xcb},
+ {0, 0, 0}
+ },
+ .nosif = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x0f},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x60},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x32},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x02},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x25},
+ {REQ_07_SET_GET_AVREG, 0x19, 0xd5},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0x63},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0x50},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x8c},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x2c},
+ {REQ_07_SET_GET_AVREG, 0x31, 0xc1},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x52},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdc},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM,
+ .sif = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x08},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x62},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfe},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x0f},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x60},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x38},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x02},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x24},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x92},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0xe8},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0xed},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x8c},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x2c},
+ {REQ_07_SET_GET_AVREG, 0x31, 0xc1},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x2c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x18},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x42},
+ {REQ_07_SET_GET_AVREG, 0x83, 0xFF},
+
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_NTSC,
+ .sif = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x08},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x62},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfe},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0xcb},
+ {0, 0, 0},
+ },
+ .nosif = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x0f},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x60},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8b},
+ {0, 0, 0},
+ },
+ .common = {
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x00},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0f},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x00},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x1e},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x8b},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0xa2},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0xe9},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x88},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x22},
+ {REQ_07_SET_GET_AVREG, 0x31, 0x61},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x42},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdd},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ },
+};
+
+static struct tm6000_std_settings composite_stds[] = {
+ {
+ .id = V4L2_STD_PAL_M,
+ .common = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf4},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x0f},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x68},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8b},
+
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x04},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x00},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x1e},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x83},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0x0a},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0xe0},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x88},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x20},
+ {REQ_07_SET_GET_AVREG, 0x31, 0x61},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x52},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdc},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL_Nc,
+ .common = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf4},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x0f},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x68},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8b},
+
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x36},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x02},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x1e},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x91},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0x1f},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x8c},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x2c},
+ {REQ_07_SET_GET_AVREG, 0x31, 0xc1},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x52},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdc},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL,
+ .common = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf4},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x0f},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x68},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8b},
+
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x32},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x02},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x25},
+ {REQ_07_SET_GET_AVREG, 0x19, 0xd5},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0x63},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0x50},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x8c},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x2c},
+ {REQ_07_SET_GET_AVREG, 0x31, 0xc1},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x52},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdc},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM,
+ .common = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf4},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x0f},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x68},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8b},
+
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x38},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x02},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x24},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x92},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0xe8},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0xed},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x8c},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x2c},
+ {REQ_07_SET_GET_AVREG, 0x31, 0xc1},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x2c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x18},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x42},
+ {REQ_07_SET_GET_AVREG, 0x83, 0xFF},
+
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_NTSC,
+ .common = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xf4},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf3},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x0f},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf1},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x68},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8b},
+
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x00},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0f},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x00},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x1e},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x8b},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0xa2},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0xe9},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x88},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x22},
+ {REQ_07_SET_GET_AVREG, 0x31, 0x61},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x42},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdd},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ },
+};
+
+static struct tm6000_std_settings svideo_stds[] = {
+ {
+ .id = V4L2_STD_PAL_M,
+ .common = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xfc},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x00},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x68},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8a},
+
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x05},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x04},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x1e},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x83},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0x0a},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0xe0},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x88},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x22},
+ {REQ_07_SET_GET_AVREG, 0x31, 0x61},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x52},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdc},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL_Nc,
+ .common = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xfc},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x00},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x68},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8a},
+
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x37},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x04},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x1e},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x91},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0x1f},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x88},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x22},
+ {REQ_07_SET_GET_AVREG, 0x31, 0xc1},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x52},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdc},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_PAL,
+ .common = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xfc},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x00},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x68},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8a},
+
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x33},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x04},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x00},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x25},
+ {REQ_07_SET_GET_AVREG, 0x19, 0xd5},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0x63},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0x50},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x8c},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x2a},
+ {REQ_07_SET_GET_AVREG, 0x31, 0xc1},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x0c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x52},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdc},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_SECAM,
+ .common = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xfc},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x00},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x68},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8a},
+
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x39},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0e},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x03},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x24},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x92},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0xe8},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0xed},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x8c},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x2a},
+ {REQ_07_SET_GET_AVREG, 0x31, 0xc1},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x2c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x18},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x42},
+ {REQ_07_SET_GET_AVREG, 0x83, 0xFF},
+
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ }, {
+ .id = V4L2_STD_NTSC,
+ .common = {
+ {REQ_08_SET_GET_AVREG_BIT, 0xe2, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe3, 0xfc},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe4, 0xf8},
+ {REQ_08_SET_GET_AVREG_BIT, 0xe6, 0x00},
+ {REQ_08_SET_GET_AVREG_BIT, 0xea, 0xf2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xeb, 0xf0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xec, 0xc2},
+ {REQ_08_SET_GET_AVREG_BIT, 0xed, 0xe0},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf0, 0x68},
+ {REQ_08_SET_GET_AVREG_BIT, 0xf1, 0xfc},
+ {REQ_07_SET_GET_AVREG, 0xfe, 0x8a},
+
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x00, 0x01},
+ {REQ_07_SET_GET_AVREG, 0x01, 0x0f},
+ {REQ_07_SET_GET_AVREG, 0x02, 0x5f},
+ {REQ_07_SET_GET_AVREG, 0x03, 0x03},
+ {REQ_07_SET_GET_AVREG, 0x07, 0x00},
+ {REQ_07_SET_GET_AVREG, 0x17, 0x8b},
+ {REQ_07_SET_GET_AVREG, 0x18, 0x1e},
+ {REQ_07_SET_GET_AVREG, 0x19, 0x8b},
+ {REQ_07_SET_GET_AVREG, 0x1a, 0xa2},
+ {REQ_07_SET_GET_AVREG, 0x1b, 0xe9},
+ {REQ_07_SET_GET_AVREG, 0x1c, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x1d, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1e, 0xcc},
+ {REQ_07_SET_GET_AVREG, 0x1f, 0xcd},
+ {REQ_07_SET_GET_AVREG, 0x2e, 0x88},
+ {REQ_07_SET_GET_AVREG, 0x30, 0x22},
+ {REQ_07_SET_GET_AVREG, 0x31, 0x61},
+ {REQ_07_SET_GET_AVREG, 0x33, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x35, 0x1c},
+ {REQ_07_SET_GET_AVREG, 0x82, 0x42},
+ {REQ_07_SET_GET_AVREG, 0x83, 0x6F},
+
+ {REQ_07_SET_GET_AVREG, 0x04, 0xdd},
+ {REQ_07_SET_GET_AVREG, 0x0d, 0x07},
+ {REQ_07_SET_GET_AVREG, 0x3f, 0x00},
+ {0, 0, 0},
+ },
+ },
+};
+
+void tm6000_get_std_res(struct tm6000_core *dev)
+{
+ /* Currently, those are the only supported resoltions */
+ if (dev->norm & V4L2_STD_525_60) {
+ dev->height = 480;
+ } else {
+ dev->height = 576;
+ }
+ dev->width = 720;
+}
+
+static int tm6000_load_std(struct tm6000_core *dev,
+ struct tm6000_reg_settings *set, int max_size)
+{
+ int i, rc;
+
+ /* Load board's initialization table */
+ for (i = 0; max_size; i++) {
+ if (!set[i].req)
+ return 0;
+
+ if ((dev->dev_type != TM6010) &&
+ (set[i].req == REQ_08_SET_GET_AVREG_BIT))
+ continue;
+
+ rc = tm6000_set_reg(dev, set[i].req, set[i].reg, set[i].value);
+ if (rc < 0) {
+ printk(KERN_ERR "Error %i while setting "
+ "req %d, reg %d to value %d\n",
+ rc, set[i].req, set[i].reg, set[i].value);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int tm6000_set_tv(struct tm6000_core *dev, int pos)
+{
+ int rc;
+
+ /* FIXME: This code is for tm6010 - not tested yet - doesn't work with
+ tm5600
+ */
+
+ /* FIXME: This is tuner-dependent */
+ int nosif = 0;
+
+ if (nosif) {
+ rc = tm6000_load_std(dev, tv_stds[pos].nosif,
+ sizeof(tv_stds[pos].nosif));
+ } else {
+ rc = tm6000_load_std(dev, tv_stds[pos].sif,
+ sizeof(tv_stds[pos].sif));
+ }
+ if (rc < 0)
+ return rc;
+ rc = tm6000_load_std(dev, tv_stds[pos].common,
+ sizeof(tv_stds[pos].common));
+
+ return rc;
+}
+
+int tm6000_set_standard(struct tm6000_core *dev, v4l2_std_id * norm)
+{
+ int i, rc = 0;
+
+ dev->norm = *norm;
+ tm6000_get_std_res(dev);
+
+ switch (dev->input) {
+ case TM6000_INPUT_TV:
+ for (i = 0; i < ARRAY_SIZE(tv_stds); i++) {
+ if (*norm & tv_stds[i].id) {
+ rc = tm6000_set_tv(dev, i);
+ goto ret;
+ }
+ }
+ return -EINVAL;
+ case TM6000_INPUT_SVIDEO:
+ for (i = 0; i < ARRAY_SIZE(svideo_stds); i++) {
+ if (*norm & svideo_stds[i].id) {
+ rc = tm6000_load_std(dev, svideo_stds[i].common,
+ sizeof(svideo_stds[i].
+ common));
+ goto ret;
+ }
+ }
+ return -EINVAL;
+ case TM6000_INPUT_COMPOSITE:
+ for (i = 0; i < ARRAY_SIZE(composite_stds); i++) {
+ if (*norm & composite_stds[i].id) {
+ rc = tm6000_load_std(dev,
+ composite_stds[i].common,
+ sizeof(composite_stds[i].
+ common));
+ goto ret;
+ }
+ }
+ return -EINVAL;
+ }
+
+ret:
+ if (rc < 0)
+ return rc;
+
+ msleep(40);
+
+
+ return 0;
+}
diff --git a/drivers/staging/tm6000/tm6000-usb-isoc.h b/drivers/staging/tm6000/tm6000-usb-isoc.h
new file mode 100644
index 000000000000..11297763de6f
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-usb-isoc.h
@@ -0,0 +1,53 @@
+/*
+ tm6000-buf.c - driver for TM5600/TM6000 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/videodev2.h>
+
+#define TM6000_URB_MSG_LEN 180
+
+struct usb_isoc_ctl {
+ /* max packet size of isoc transaction */
+ int max_pkt_size;
+
+ /* number of allocated urbs */
+ int num_bufs;
+
+ /* urb for isoc transfers */
+ struct urb **urb;
+
+ /* transfer buffers for isoc transfer */
+ char **transfer_buffer;
+
+ /* Last buffer command and region */
+ u8 cmd;
+ int pos, size, pktsize;
+
+ /* Last field: ODD or EVEN? */
+ int field;
+
+ /* Stores incomplete commands */
+ u32 tmp_buf;
+ int tmp_buf_len;
+
+ /* Stores already requested buffers */
+ struct tm6000_buffer *buf;
+
+ /* Stores the number of received fields */
+ int nfields;
+};
diff --git a/drivers/staging/tm6000/tm6000-video.c b/drivers/staging/tm6000/tm6000-video.c
new file mode 100644
index 000000000000..c87b0d491ada
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000-video.c
@@ -0,0 +1,1550 @@
+/*
+ tm6000-video.c - driver for TM5600/TM6000 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - Fixed module load/unload
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/random.h>
+#include <linux/version.h>
+#include <linux/usb.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ioctl.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/highmem.h>
+#include <linux/freezer.h>
+
+#include "tm6000-regs.h"
+#include "tm6000.h"
+
+#define BUFFER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
+
+/* Limits minimum and default number of buffers */
+#define TM6000_MIN_BUF 4
+#define TM6000_DEF_BUF 8
+
+#define TM6000_MAX_ISO_PACKETS 40 /* Max number of ISO packets */
+
+/* Declare static vars that will be used as parameters */
+static unsigned int vid_limit = 16; /* Video memory limit, in Mb */
+static int video_nr = -1; /* /dev/videoN, -1 for autodetect */
+
+/* Debug level */
+int tm6000_debug;
+
+/* supported controls */
+static struct v4l2_queryctrl tm6000_qctrl[] = {
+ {
+ .id = V4L2_CID_BRIGHTNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Brightness",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 1,
+ .default_value = 54,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_CONTRAST,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Contrast",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 0x1,
+ .default_value = 119,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_SATURATION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Saturation",
+ .minimum = 0,
+ .maximum = 255,
+ .step = 0x1,
+ .default_value = 112,
+ .flags = 0,
+ }, {
+ .id = V4L2_CID_HUE,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Hue",
+ .minimum = -128,
+ .maximum = 127,
+ .step = 0x1,
+ .default_value = 0,
+ .flags = 0,
+ }
+};
+
+static int qctl_regs[ARRAY_SIZE(tm6000_qctrl)];
+
+static struct tm6000_fmt format[] = {
+ {
+ .name = "4:2:2, packed, YVY2",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = 16,
+ }, {
+ .name = "4:2:2, packed, UYVY",
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .depth = 16,
+ }, {
+ .name = "A/V + VBI mux packet",
+ .fourcc = V4L2_PIX_FMT_TM6000,
+ .depth = 16,
+ }
+};
+
+/* ------------------------------------------------------------------
+ DMA and thread functions
+ ------------------------------------------------------------------*/
+
+#define norm_maxw(a) 720
+#define norm_maxh(a) 576
+
+#define norm_minw(a) norm_maxw(a)
+#define norm_minh(a) norm_maxh(a)
+
+/*
+ * video-buf generic routine to get the next available buffer
+ */
+static inline void get_next_buf(struct tm6000_dmaqueue *dma_q,
+ struct tm6000_buffer **buf)
+{
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ char *outp;
+
+ if (list_empty(&dma_q->active)) {
+ dprintk(dev, V4L2_DEBUG_QUEUE, "No active queue to serve\n");
+ *buf = NULL;
+ return;
+ }
+
+ *buf = list_entry(dma_q->active.next,
+ struct tm6000_buffer, vb.queue);
+
+ if (!buf)
+ return;
+
+ /* Cleans up buffer - Usefull for testing for frame/URB loss */
+ outp = videobuf_to_vmalloc(&(*buf)->vb);
+ memset(outp, 0, (*buf)->vb.size);
+
+ return;
+}
+
+/*
+ * Announces that a buffer were filled and request the next
+ */
+static inline void buffer_filled(struct tm6000_core *dev,
+ struct tm6000_dmaqueue *dma_q,
+ struct tm6000_buffer *buf)
+{
+ /* Advice that buffer was filled */
+ dprintk(dev, V4L2_DEBUG_ISOC, "[%p/%d] wakeup\n", buf, buf->vb.i);
+ buf->vb.state = VIDEOBUF_DONE;
+ buf->vb.field_count++;
+ do_gettimeofday(&buf->vb.ts);
+
+ list_del(&buf->vb.queue);
+ wake_up(&buf->vb.done);
+}
+
+const char *tm6000_msg_type[] = {
+ "unknown(0)", /* 0 */
+ "video", /* 1 */
+ "audio", /* 2 */
+ "vbi", /* 3 */
+ "pts", /* 4 */
+ "err", /* 5 */
+ "unknown(6)", /* 6 */
+ "unknown(7)", /* 7 */
+};
+
+/*
+ * Identify the tm5600/6000 buffer header type and properly handles
+ */
+static int copy_packet(struct urb *urb, u32 header, u8 **ptr, u8 *endp,
+ u8 *out_p, struct tm6000_buffer **buf)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ u8 c;
+ unsigned int cmd, cpysize, pktsize, size, field, block, line, pos = 0;
+ int rc = 0;
+ /* FIXME: move to tm6000-isoc */
+ static int last_line = -2, start_line = -2, last_field = -2;
+
+ /* FIXME: this is the hardcoded window size
+ */
+ unsigned int linewidth = (*buf)->vb.width << 1;
+
+ if (!dev->isoc_ctl.cmd) {
+ c = (header >> 24) & 0xff;
+
+ /* split the header fields */
+ size = (((header & 0x7e) << 1) -1) *4;
+ block = (header >> 7) & 0xf;
+ field = (header >> 11) & 0x1;
+ line = (header >> 12) & 0x1ff;
+ cmd = (header >> 21) & 0x7;
+
+ /* Validates header fields */
+ if(size > TM6000_URB_MSG_LEN)
+ size = TM6000_URB_MSG_LEN;
+
+ if (cmd == TM6000_URB_MSG_VIDEO) {
+ if ((block+1)*TM6000_URB_MSG_LEN>linewidth)
+ cmd = TM6000_URB_MSG_ERR;
+
+ /* FIXME: Mounts the image as field0+field1
+ * It should, instead, check if the user selected
+ * entrelaced or non-entrelaced mode
+ */
+ pos= ((line<<1)+field)*linewidth +
+ block*TM6000_URB_MSG_LEN;
+
+ /* Don't allow to write out of the buffer */
+ if (pos+TM6000_URB_MSG_LEN > (*buf)->vb.size) {
+ dprintk(dev, V4L2_DEBUG_ISOC,
+ "ERR: size=%d, num=%d, line=%d, "
+ "field=%d\n",
+ size, block, line, field);
+
+ cmd = TM6000_URB_MSG_ERR;
+ }
+ } else {
+ pos=0;
+ }
+
+ /* Prints debug info */
+ dprintk(dev, V4L2_DEBUG_ISOC, "size=%d, num=%d, "
+ " line=%d, field=%d\n",
+ size, block, line, field);
+
+ if ((last_line!=line)&&(last_line+1!=line) &&
+ (cmd != TM6000_URB_MSG_ERR) ) {
+ if (cmd != TM6000_URB_MSG_VIDEO) {
+ dprintk(dev, V4L2_DEBUG_ISOC, "cmd=%d, "
+ "size=%d, num=%d, line=%d, field=%d\n",
+ cmd, size, block, line, field);
+ }
+ if (start_line<0)
+ start_line=last_line;
+ /* Prints debug info */
+ dprintk(dev, V4L2_DEBUG_ISOC, "lines= %d-%d, "
+ "field=%d\n",
+ start_line, last_line, field);
+
+ if ((start_line<6 && last_line>200) &&
+ (last_field != field) ) {
+
+ dev->isoc_ctl.nfields++;
+ if (dev->isoc_ctl.nfields>=2) {
+ dev->isoc_ctl.nfields=0;
+
+ /* Announces that a new buffer were filled */
+ buffer_filled (dev, dma_q, *buf);
+ dprintk(dev, V4L2_DEBUG_ISOC,
+ "new buffer filled\n");
+ get_next_buf (dma_q, buf);
+ if (!*buf)
+ return rc;
+ out_p = videobuf_to_vmalloc(&((*buf)->vb));
+ if (!out_p)
+ return rc;
+
+ pos = dev->isoc_ctl.pos = 0;
+ }
+ }
+
+ start_line=line;
+ last_field=field;
+ }
+ last_line=line;
+
+ pktsize = TM6000_URB_MSG_LEN;
+ } else {
+ /* Continue the last copy */
+ cmd = dev->isoc_ctl.cmd;
+ size= dev->isoc_ctl.size;
+ pos = dev->isoc_ctl.pos;
+ pktsize = dev->isoc_ctl.pktsize;
+ }
+
+ cpysize = (endp-(*ptr) > size) ? size : endp - *ptr;
+
+ if (cpysize) {
+ /* handles each different URB message */
+ switch(cmd) {
+ case TM6000_URB_MSG_VIDEO:
+ /* Fills video buffer */
+ memcpy(&out_p[pos], *ptr, cpysize);
+ break;
+ case TM6000_URB_MSG_PTS:
+ break;
+ case TM6000_URB_MSG_AUDIO:
+/* Need some code to process audio */
+printk ("%ld: cmd=%s, size=%d\n", jiffies,
+ tm6000_msg_type[cmd],size);
+ break;
+ default:
+ dprintk (dev, V4L2_DEBUG_ISOC, "cmd=%s, size=%d\n",
+ tm6000_msg_type[cmd],size);
+ }
+ }
+ if (cpysize<size) {
+ /* End of URB packet, but cmd processing is not
+ * complete. Preserve the state for a next packet
+ */
+ dev->isoc_ctl.pos = pos+cpysize;
+ dev->isoc_ctl.size= size-cpysize;
+ dev->isoc_ctl.cmd = cmd;
+ dev->isoc_ctl.pktsize = pktsize-cpysize;
+ (*ptr)+=cpysize;
+ } else {
+ dev->isoc_ctl.cmd = 0;
+ (*ptr)+=pktsize;
+ }
+
+ return rc;
+}
+
+static int copy_streams(u8 *data, u8 *out_p, unsigned long len,
+ struct urb *urb, struct tm6000_buffer **buf)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
+ u8 *ptr=data, *endp=data+len;
+ unsigned long header=0;
+ int rc=0;
+
+ for (ptr=data; ptr<endp;) {
+ if (!dev->isoc_ctl.cmd) {
+ u8 *p=(u8 *)&dev->isoc_ctl.tmp_buf;
+ /* FIXME: This seems very complex
+ * It just recovers up to 3 bytes of the header that
+ * might be at the previous packet
+ */
+ if (dev->isoc_ctl.tmp_buf_len) {
+ while (dev->isoc_ctl.tmp_buf_len) {
+ if ( *(ptr+3-dev->isoc_ctl.tmp_buf_len) == 0x47) {
+ break;
+ }
+ p++;
+ dev->isoc_ctl.tmp_buf_len--;
+ }
+ if (dev->isoc_ctl.tmp_buf_len) {
+ memcpy (&header,p,
+ dev->isoc_ctl.tmp_buf_len);
+ memcpy (((u8 *)header)+
+ dev->isoc_ctl.tmp_buf,
+ ptr,
+ 4-dev->isoc_ctl.tmp_buf_len);
+ ptr+=4-dev->isoc_ctl.tmp_buf_len;
+ goto HEADER;
+ }
+ }
+ /* Seek for sync */
+ for (;ptr<endp-3;ptr++) {
+ if (*(ptr+3)==0x47)
+ break;
+ }
+
+ if (ptr+3>=endp) {
+ dev->isoc_ctl.tmp_buf_len=endp-ptr;
+ memcpy (&dev->isoc_ctl.tmp_buf,ptr,
+ dev->isoc_ctl.tmp_buf_len);
+ dev->isoc_ctl.cmd=0;
+ return rc;
+ }
+
+ /* Get message header */
+ header=*(unsigned long *)ptr;
+ ptr+=4;
+ }
+HEADER:
+ /* Copy or continue last copy */
+ rc=copy_packet(urb,header,&ptr,endp,out_p,buf);
+ if (rc<0) {
+ buf=NULL;
+ printk(KERN_ERR "tm6000: buffer underrun at %ld\n",
+ jiffies);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+/*
+ * Identify the tm5600/6000 buffer header type and properly handles
+ */
+static int copy_multiplexed(u8 *ptr, u8 *out_p, unsigned long len,
+ struct urb *urb, struct tm6000_buffer **buf)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
+ unsigned int pos=dev->isoc_ctl.pos,cpysize;
+ int rc=1;
+
+ while (len>0) {
+ cpysize=min(len,(*buf)->vb.size-pos);
+//printk("Copying %d bytes (max=%lu) from %p to %p[%u]\n",cpysize,(*buf)->vb.size,ptr,out_p,pos);
+ memcpy(&out_p[pos], ptr, cpysize);
+ pos+=cpysize;
+ ptr+=cpysize;
+ len-=cpysize;
+ if (pos >= (*buf)->vb.size) {
+ pos=0;
+ /* Announces that a new buffer were filled */
+ buffer_filled (dev, dma_q, *buf);
+ dprintk(dev, V4L2_DEBUG_ISOC, "new buffer filled\n");
+ get_next_buf (dma_q, buf);
+ if (!*buf)
+ break;
+ out_p = videobuf_to_vmalloc(&((*buf)->vb));
+ if (!out_p)
+ return rc;
+ pos = 0;
+ }
+ }
+
+ dev->isoc_ctl.pos=pos;
+ return rc;
+}
+
+static void inline print_err_status (struct tm6000_core *dev,
+ int packet, int status)
+{
+ char *errmsg = "Unknown";
+
+ switch(status) {
+ case -ENOENT:
+ errmsg = "unlinked synchronuously";
+ break;
+ case -ECONNRESET:
+ errmsg = "unlinked asynchronuously";
+ break;
+ case -ENOSR:
+ errmsg = "Buffer error (overrun)";
+ break;
+ case -EPIPE:
+ errmsg = "Stalled (device not responding)";
+ break;
+ case -EOVERFLOW:
+ errmsg = "Babble (bad cable?)";
+ break;
+ case -EPROTO:
+ errmsg = "Bit-stuff error (bad cable?)";
+ break;
+ case -EILSEQ:
+ errmsg = "CRC/Timeout (could be anything)";
+ break;
+ case -ETIME:
+ errmsg = "Device does not respond";
+ break;
+ }
+ if (packet<0) {
+ dprintk(dev, V4L2_DEBUG_QUEUE, "URB status %d [%s].\n",
+ status, errmsg);
+ } else {
+ dprintk(dev, V4L2_DEBUG_QUEUE, "URB packet %d, status %d [%s].\n",
+ packet, status, errmsg);
+ }
+}
+
+
+/*
+ * Controls the isoc copy of each urb packet
+ */
+static inline int tm6000_isoc_copy(struct urb *urb)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev= container_of(dma_q,struct tm6000_core,vidq);
+ struct tm6000_buffer *buf;
+ int i, len=0, rc=1;
+ int size;
+ char *outp = NULL, *p;
+ unsigned long copied;
+
+ get_next_buf(dma_q, &buf);
+ if (!buf)
+ outp = videobuf_to_vmalloc(&buf->vb);
+
+ if (!outp)
+ return 0;
+
+ size = buf->vb.size;
+
+ copied=0;
+
+ if (urb->status<0) {
+ print_err_status (dev,-1,urb->status);
+ return 0;
+ }
+
+ for (i = 0; i < urb->number_of_packets; i++) {
+ int status = urb->iso_frame_desc[i].status;
+
+ if (status<0) {
+ print_err_status (dev,i,status);
+ continue;
+ }
+
+ len=urb->iso_frame_desc[i].actual_length;
+
+// if (len>=TM6000_URB_MSG_LEN) {
+ p=urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+ if (!urb->iso_frame_desc[i].status) {
+ if ((buf->fmt->fourcc)==V4L2_PIX_FMT_TM6000) {
+ rc=copy_multiplexed(p, outp, len, urb, &buf);
+ if (rc<=0)
+ return rc;
+ } else {
+ copy_streams(p, outp, len, urb, &buf);
+ }
+ }
+ copied += len;
+ if (copied>=size)
+ break;
+// }
+ }
+ return rc;
+}
+
+/* ------------------------------------------------------------------
+ URB control
+ ------------------------------------------------------------------*/
+
+/*
+ * IRQ callback, called by URB callback
+ */
+static void tm6000_irq_callback(struct urb *urb)
+{
+ struct tm6000_dmaqueue *dma_q = urb->context;
+ struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ int i;
+
+ if (!dev)
+ return;
+
+ spin_lock(&dev->slock);
+ tm6000_isoc_copy(urb);
+ spin_unlock(&dev->slock);
+
+ /* Reset urb buffers */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ urb->iso_frame_desc[i].status = 0;
+ urb->iso_frame_desc[i].actual_length = 0;
+ }
+
+ urb->status = usb_submit_urb(urb, GFP_ATOMIC);
+ if (urb->status)
+ tm6000_err("urb resubmit failed (error=%i)\n",
+ urb->status);
+}
+
+/*
+ * Stop and Deallocate URBs
+ */
+static void tm6000_uninit_isoc(struct tm6000_core *dev)
+{
+ struct urb *urb;
+ int i;
+
+ dev->isoc_ctl.nfields = -1;
+ dev->isoc_ctl.buf = NULL;
+ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
+ urb=dev->isoc_ctl.urb[i];
+ if (urb) {
+ usb_kill_urb(urb);
+ usb_unlink_urb(urb);
+ if (dev->isoc_ctl.transfer_buffer[i]) {
+ usb_buffer_free(dev->udev,
+ urb->transfer_buffer_length,
+ dev->isoc_ctl.transfer_buffer[i],
+ urb->transfer_dma);
+ }
+ usb_free_urb(urb);
+ dev->isoc_ctl.urb[i] = NULL;
+ }
+ dev->isoc_ctl.transfer_buffer[i] = NULL;
+ }
+
+ kfree (dev->isoc_ctl.urb);
+ kfree (dev->isoc_ctl.transfer_buffer);
+
+ dev->isoc_ctl.urb=NULL;
+ dev->isoc_ctl.transfer_buffer=NULL;
+ dev->isoc_ctl.num_bufs = 0;
+
+ dev->isoc_ctl.num_bufs=0;
+}
+
+/*
+ * Allocate URBs and start IRQ
+ */
+static int tm6000_prepare_isoc(struct tm6000_core *dev, unsigned int framesize)
+{
+ struct tm6000_dmaqueue *dma_q = &dev->vidq;
+ int i, j, sb_size, pipe, size, max_packets, num_bufs = 5;
+ struct urb *urb;
+
+ /* De-allocates all pending stuff */
+ tm6000_uninit_isoc(dev);
+
+ pipe = usb_rcvisocpipe(dev->udev,
+ dev->isoc_in->desc.bEndpointAddress &
+ USB_ENDPOINT_NUMBER_MASK);
+
+ size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe));
+
+ if (size > dev->max_isoc_in)
+ size = dev->max_isoc_in;
+
+ dev->isoc_ctl.max_pkt_size = size;
+
+ max_packets = ( framesize + size - 1) / size;
+
+ if (max_packets > TM6000_MAX_ISO_PACKETS)
+ max_packets = TM6000_MAX_ISO_PACKETS;
+
+ sb_size = max_packets * size;
+
+ dev->isoc_ctl.num_bufs = num_bufs;
+
+ dev->isoc_ctl.urb = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL);
+ if (!dev->isoc_ctl.urb) {
+ tm6000_err("cannot alloc memory for usb buffers\n");
+ return -ENOMEM;
+ }
+
+ dev->isoc_ctl.transfer_buffer = kmalloc(sizeof(void *)*num_bufs,
+ GFP_KERNEL);
+ if (!dev->isoc_ctl.urb) {
+ tm6000_err("cannot allocate memory for usbtransfer\n");
+ kfree(dev->isoc_ctl.urb);
+ return -ENOMEM;
+ }
+
+ dprintk(dev, V4L2_DEBUG_QUEUE, "Allocating %d x %d packets"
+ " (%d bytes) of %d bytes each to handle %u size\n",
+ max_packets, num_bufs, sb_size,
+ dev->max_isoc_in, size);
+
+
+ /* allocate urbs and transfer buffers */
+ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
+ urb = usb_alloc_urb(max_packets, GFP_KERNEL);
+ if (!urb) {
+ tm6000_err("cannot alloc isoc_ctl.urb %i\n", i);
+ tm6000_uninit_isoc(dev);
+ usb_free_urb(urb);
+ return -ENOMEM;
+ }
+ dev->isoc_ctl.urb[i] = urb;
+
+ dev->isoc_ctl.transfer_buffer[i] = usb_buffer_alloc(dev->udev,
+ sb_size, GFP_KERNEL, &urb->transfer_dma);
+ if (!dev->isoc_ctl.transfer_buffer[i]) {
+ tm6000_err ("unable to allocate %i bytes for transfer"
+ " buffer %i%s\n",
+ sb_size, i,
+ in_interrupt()?" while in int":"");
+ tm6000_uninit_isoc(dev);
+ return -ENOMEM;
+ }
+ memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size);
+
+ usb_fill_bulk_urb(urb, dev->udev, pipe,
+ dev->isoc_ctl.transfer_buffer[i], sb_size,
+ tm6000_irq_callback, dma_q);
+ urb->interval = dev->isoc_in->desc.bInterval;
+ urb->number_of_packets = max_packets;
+ urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+
+ for (j = 0; j < max_packets; j++) {
+ urb->iso_frame_desc[j].offset = size * j;
+ urb->iso_frame_desc[j].length = size;
+ }
+ }
+
+ return 0;
+}
+
+static int tm6000_start_thread( struct tm6000_core *dev)
+{
+ struct tm6000_dmaqueue *dma_q = &dev->vidq;
+ int i;
+
+ dma_q->frame=0;
+ dma_q->ini_jiffies=jiffies;
+
+ init_waitqueue_head(&dma_q->wq);
+
+ /* submit urbs and enables IRQ */
+ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) {
+ int rc = usb_submit_urb(dev->isoc_ctl.urb[i], GFP_ATOMIC);
+ if (rc) {
+ tm6000_err("submit of urb %i failed (error=%i)\n", i,
+ rc);
+ tm6000_uninit_isoc(dev);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ Videobuf operations
+ ------------------------------------------------------------------*/
+
+static int
+buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size)
+{
+ struct tm6000_fh *fh = vq->priv_data;
+
+ *size = fh->fmt->depth * fh->width * fh->height >> 3;
+ if (0 == *count)
+ *count = TM6000_DEF_BUF;
+
+ if (*count < TM6000_MIN_BUF) {
+ *count=TM6000_MIN_BUF;
+ }
+
+ while (*size * *count > vid_limit * 1024 * 1024)
+ (*count)--;
+
+ return 0;
+}
+
+static void free_buffer(struct videobuf_queue *vq, struct tm6000_buffer *buf)
+{
+ struct tm6000_fh *fh = vq->priv_data;
+ struct tm6000_core *dev = fh->dev;
+ unsigned long flags;
+
+ if (in_interrupt())
+ BUG();
+
+ /* We used to wait for the buffer to finish here, but this didn't work
+ because, as we were keeping the state as VIDEOBUF_QUEUED,
+ videobuf_queue_cancel marked it as finished for us.
+ (Also, it could wedge forever if the hardware was misconfigured.)
+
+ This should be safe; by the time we get here, the buffer isn't
+ queued anymore. If we ever start marking the buffers as
+ VIDEOBUF_ACTIVE, it won't be, though.
+ */
+ spin_lock_irqsave(&dev->slock, flags);
+ if (dev->isoc_ctl.buf == buf)
+ dev->isoc_ctl.buf = NULL;
+ spin_unlock_irqrestore(&dev->slock, flags);
+
+ videobuf_vmalloc_free(&buf->vb);
+ buf->vb.state = VIDEOBUF_NEEDS_INIT;
+}
+
+static int
+buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb,
+ enum v4l2_field field)
+{
+ struct tm6000_fh *fh = vq->priv_data;
+ struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+ struct tm6000_core *dev = fh->dev;
+ int rc = 0, urb_init = 0;
+
+ BUG_ON(NULL == fh->fmt);
+
+
+ /* FIXME: It assumes depth=2 */
+ /* The only currently supported format is 16 bits/pixel */
+ buf->vb.size = fh->fmt->depth*fh->width*fh->height >> 3;
+ if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
+ return -EINVAL;
+
+ if (buf->fmt != fh->fmt ||
+ buf->vb.width != fh->width ||
+ buf->vb.height != fh->height ||
+ buf->vb.field != field) {
+ buf->fmt = fh->fmt;
+ buf->vb.width = fh->width;
+ buf->vb.height = fh->height;
+ buf->vb.field = field;
+ buf->vb.state = VIDEOBUF_NEEDS_INIT;
+ }
+
+ if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
+ if (0 != (rc = videobuf_iolock(vq, &buf->vb, NULL)))
+ goto fail;
+ urb_init = 1;
+ }
+
+ if (!dev->isoc_ctl.num_bufs)
+ urb_init = 1;
+
+ if (urb_init) {
+ rc = tm6000_prepare_isoc(dev, buf->vb.size);
+ if (rc < 0)
+ goto fail;
+
+ rc = tm6000_start_thread(dev);
+ if (rc < 0)
+ goto fail;
+
+ }
+
+ buf->vb.state = VIDEOBUF_PREPARED;
+ return 0;
+
+fail:
+ free_buffer(vq, buf);
+ return rc;
+}
+
+static void
+buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+ struct tm6000_fh *fh = vq->priv_data;
+ struct tm6000_core *dev = fh->dev;
+ struct tm6000_dmaqueue *vidq = &dev->vidq;
+
+ buf->vb.state = VIDEOBUF_QUEUED;
+ list_add_tail(&buf->vb.queue, &vidq->active);
+}
+
+static void buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb)
+{
+ struct tm6000_buffer *buf = container_of(vb,struct tm6000_buffer,vb);
+
+ free_buffer(vq,buf);
+}
+
+static struct videobuf_queue_ops tm6000_video_qops = {
+ .buf_setup = buffer_setup,
+ .buf_prepare = buffer_prepare,
+ .buf_queue = buffer_queue,
+ .buf_release = buffer_release,
+};
+
+/* ------------------------------------------------------------------
+ IOCTL handling
+ ------------------------------------------------------------------*/
+
+static int res_get(struct tm6000_core *dev, struct tm6000_fh *fh)
+{
+ /* is it free? */
+ mutex_lock(&dev->lock);
+ if (dev->resources) {
+ /* no, someone else uses it */
+ mutex_unlock(&dev->lock);
+ return 0;
+ }
+ /* it's free, grab it */
+ dev->resources =1;
+ dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: get\n");
+ mutex_unlock(&dev->lock);
+ return 1;
+}
+
+static int res_locked(struct tm6000_core *dev)
+{
+ return (dev->resources);
+}
+
+static void res_free(struct tm6000_core *dev, struct tm6000_fh *fh)
+{
+ mutex_lock(&dev->lock);
+ dev->resources = 0;
+ dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: put\n");
+ mutex_unlock(&dev->lock);
+}
+
+/* ------------------------------------------------------------------
+ IOCTL vidioc handling
+ ------------------------------------------------------------------*/
+static int vidioc_querycap (struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ // struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
+
+ strlcpy(cap->driver, "tm6000", sizeof(cap->driver));
+ strlcpy(cap->card,"Trident TVMaster TM5600/6000", sizeof(cap->card));
+ // strlcpy(cap->bus_info, dev->udev->dev.bus_id, sizeof(cap->bus_info));
+ cap->version = TM6000_VERSION;
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
+ V4L2_CAP_STREAMING |
+ V4L2_CAP_TUNER |
+ V4L2_CAP_READWRITE;
+ return 0;
+}
+
+static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ if (unlikely(f->index >= ARRAY_SIZE(format)))
+ return -EINVAL;
+
+ strlcpy(f->description,format[f->index].name,sizeof(f->description));
+ f->pixelformat = format[f->index].fourcc;
+ return 0;
+}
+
+static int vidioc_g_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tm6000_fh *fh=priv;
+
+ f->fmt.pix.width = fh->width;
+ f->fmt.pix.height = fh->height;
+ f->fmt.pix.field = fh->vb_vidq.field;
+ f->fmt.pix.pixelformat = fh->fmt->fourcc;
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fh->fmt->depth) >> 3;
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return (0);
+}
+
+static struct tm6000_fmt* format_by_fourcc(unsigned int fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(format); i++)
+ if (format[i].fourcc == fourcc)
+ return format+i;
+ return NULL;
+}
+
+static int vidioc_try_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
+ struct tm6000_fmt *fmt;
+ enum v4l2_field field;
+
+ fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ if (NULL == fmt) {
+ dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Fourcc format (0x%08x)"
+ " invalid.\n", f->fmt.pix.pixelformat);
+ return -EINVAL;
+ }
+
+ field = f->fmt.pix.field;
+
+ if (field == V4L2_FIELD_ANY) {
+// field=V4L2_FIELD_INTERLACED;
+ field=V4L2_FIELD_SEQ_TB;
+ } else if (V4L2_FIELD_INTERLACED != field) {
+ dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Field type invalid.\n");
+ return -EINVAL;
+ }
+
+ tm6000_get_std_res (dev);
+
+ f->fmt.pix.width = dev->width;
+ f->fmt.pix.height = dev->height;
+
+ f->fmt.pix.width &= ~0x01;
+
+ f->fmt.pix.field = field;
+
+ f->fmt.pix.bytesperline =
+ (f->fmt.pix.width * fmt->depth) >> 3;
+ f->fmt.pix.sizeimage =
+ f->fmt.pix.height * f->fmt.pix.bytesperline;
+
+ return 0;
+}
+
+/*FIXME: This seems to be generic enough to be at videodev2 */
+static int vidioc_s_fmt_vid_cap (struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+ int ret = vidioc_try_fmt_vid_cap(file,fh,f);
+ if (ret < 0)
+ return (ret);
+
+ fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
+ fh->width = f->fmt.pix.width;
+ fh->height = f->fmt.pix.height;
+ fh->vb_vidq.field = f->fmt.pix.field;
+ fh->type = f->type;
+
+ dev->fourcc = f->fmt.pix.pixelformat;
+
+ tm6000_set_fourcc_format(dev);
+
+ return (0);
+}
+
+static int vidioc_reqbufs (struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_reqbufs(&fh->vb_vidq, p));
+}
+
+static int vidioc_querybuf (struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_querybuf(&fh->vb_vidq, p));
+}
+
+static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_qbuf(&fh->vb_vidq, p));
+}
+
+static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct tm6000_fh *fh=priv;
+
+ return (videobuf_dqbuf(&fh->vb_vidq, p,
+ file->f_flags & O_NONBLOCK));
+}
+
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+static int vidiocgmbuf (struct file *file, void *priv, struct video_mbuf *mbuf)
+{
+ struct tm6000_fh *fh=priv;
+
+ return videobuf_cgmbuf (&fh->vb_vidq, mbuf, 8);
+}
+#endif
+
+static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (i != fh->type)
+ return -EINVAL;
+
+ if (!res_get(dev,fh))
+ return -EBUSY;
+ return (videobuf_streamon(&fh->vb_vidq));
+}
+
+static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ if (i != fh->type)
+ return -EINVAL;
+
+ videobuf_streamoff(&fh->vb_vidq);
+ res_free(dev,fh);
+
+ return (0);
+}
+
+static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm)
+{
+ int rc=0;
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ rc=tm6000_set_standard (dev, norm);
+
+ fh->width = dev->width;
+ fh->height = dev->height;
+
+ if (rc<0)
+ return rc;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm);
+
+ return 0;
+}
+
+static int vidioc_enum_input (struct file *file, void *priv,
+ struct v4l2_input *inp)
+{
+ switch (inp->index) {
+ case TM6000_INPUT_TV:
+ inp->type = V4L2_INPUT_TYPE_TUNER;
+ strcpy(inp->name,"Television");
+ break;
+ case TM6000_INPUT_COMPOSITE:
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ strcpy(inp->name,"Composite");
+ break;
+ case TM6000_INPUT_SVIDEO:
+ inp->type = V4L2_INPUT_TYPE_CAMERA;
+ strcpy(inp->name,"S-Video");
+ break;
+ default:
+ return -EINVAL;
+ }
+ inp->std = TM6000_STD;
+
+ return 0;
+}
+
+static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+
+ *i=dev->input;
+
+ return 0;
+}
+static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+ int rc=0;
+ char buf[1];
+
+ switch (i) {
+ case TM6000_INPUT_TV:
+ dev->input=i;
+ *buf=0;
+ break;
+ case TM6000_INPUT_COMPOSITE:
+ case TM6000_INPUT_SVIDEO:
+ dev->input=i;
+ *buf=1;
+ break;
+ default:
+ return -EINVAL;
+ }
+ rc=tm6000_read_write_usb (dev, USB_DIR_OUT | USB_TYPE_VENDOR,
+ REQ_03_SET_GET_MCU_PIN, 0x03, 1, buf, 1);
+
+ if (!rc) {
+ dev->input=i;
+ rc=vidioc_s_std (file, priv, &dev->vfd->current_norm);
+ }
+
+ return (rc);
+}
+
+ /* --- controls ---------------------------------------------- */
+static int vidioc_queryctrl (struct file *file, void *priv,
+ struct v4l2_queryctrl *qc)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++)
+ if (qc->id && qc->id == tm6000_qctrl[i].id) {
+ memcpy(qc, &(tm6000_qctrl[i]),
+ sizeof(*qc));
+ return (0);
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_g_ctrl (struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct tm6000_fh *fh=priv;
+ struct tm6000_core *dev = fh->dev;
+ int val;
+
+ /* FIXME: Probably, those won't work! Maybe we need shadow regs */
+ switch (ctrl->id) {
+ case V4L2_CID_CONTRAST:
+ val=tm6000_get_reg (dev, REQ_07_SET_GET_AVREG, 0x08, 0);
+ break;
+ case V4L2_CID_BRIGHTNESS:
+ val=tm6000_get_reg (dev, REQ_07_SET_GET_AVREG, 0x09, 0);
+ return 0;
+ case V4L2_CID_SATURATION:
+ val=tm6000_get_reg (dev, REQ_07_SET_GET_AVREG, 0x0a, 0);
+ return 0;
+ case V4L2_CID_HUE:
+ val=tm6000_get_reg (dev, REQ_07_SET_GET_AVREG, 0x0b, 0);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ if (val<0)
+ return val;
+
+ ctrl->value=val;
+
+ return 0;
+}
+static int vidioc_s_ctrl (struct file *file, void *priv,
+ struct v4l2_control *ctrl)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+ u8 val=ctrl->value;
+
+ switch (ctrl->id) {
+ case V4L2_CID_CONTRAST:
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x08, val);
+ return 0;
+ case V4L2_CID_BRIGHTNESS:
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x09, val);
+ return 0;
+ case V4L2_CID_SATURATION:
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x0a, val);
+ return 0;
+ case V4L2_CID_HUE:
+ tm6000_set_reg (dev, REQ_07_SET_GET_AVREG, 0x0b, val);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int vidioc_g_tuner (struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (unlikely(UNSET == dev->tuner_type))
+ return -EINVAL;
+ if (0 != t->index)
+ return -EINVAL;
+
+ strcpy(t->name, "Television");
+ t->type = V4L2_TUNER_ANALOG_TV;
+ t->capability = V4L2_TUNER_CAP_NORM;
+ t->rangehigh = 0xffffffffUL;
+ t->rxsubchans = V4L2_TUNER_SUB_MONO;
+
+ return 0;
+}
+
+static int vidioc_s_tuner (struct file *file, void *priv,
+ struct v4l2_tuner *t)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (UNSET == dev->tuner_type)
+ return -EINVAL;
+ if (0 != t->index)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vidioc_g_frequency (struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (unlikely(UNSET == dev->tuner_type))
+ return -EINVAL;
+
+ f->type = V4L2_TUNER_ANALOG_TV;
+ f->frequency = dev->freq;
+
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_frequency, f);
+
+ return 0;
+}
+
+static int vidioc_s_frequency (struct file *file, void *priv,
+ struct v4l2_frequency *f)
+{
+ struct tm6000_fh *fh =priv;
+ struct tm6000_core *dev = fh->dev;
+
+ if (unlikely(f->type != V4L2_TUNER_ANALOG_TV))
+ return -EINVAL;
+
+ if (unlikely(UNSET == dev->tuner_type))
+ return -EINVAL;
+ if (unlikely(f->tuner != 0))
+ return -EINVAL;
+
+// mutex_lock(&dev->lock);
+ dev->freq = f->frequency;
+ v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f);
+// mutex_unlock(&dev->lock);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------
+ File operations for the device
+ ------------------------------------------------------------------*/
+
+static int tm6000_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct tm6000_core *dev = video_drvdata(file);
+ struct tm6000_fh *fh;
+ enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ int i,rc;
+
+ printk(KERN_INFO "tm6000: open called (dev=%s)\n",
+ video_device_node_name(vdev));
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: open called (dev=%s)\n",
+ video_device_node_name(vdev));
+
+
+ /* If more than one user, mutex should be added */
+ dev->users++;
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "open dev=%s type=%s users=%d\n",
+ video_device_node_name(vdev), v4l2_type_names[type],
+ dev->users);
+
+ /* allocate + initialize per filehandle data */
+ fh = kzalloc(sizeof(*fh),GFP_KERNEL);
+ if (NULL == fh) {
+ dev->users--;
+ return -ENOMEM;
+ }
+
+ file->private_data = fh;
+ fh->dev = dev;
+
+ fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ dev->fourcc = format[0].fourcc;
+
+ fh->fmt = format_by_fourcc(dev->fourcc);
+
+ tm6000_get_std_res (dev);
+
+ fh->width = dev->width;
+ fh->height = dev->height;
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, "
+ "dev->vidq=0x%08lx\n",
+ (unsigned long)fh,(unsigned long)dev,(unsigned long)&dev->vidq);
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
+ "queued=%d\n",list_empty(&dev->vidq.queued));
+ dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty "
+ "active=%d\n",list_empty(&dev->vidq.active));
+
+ /* initialize hardware on analog mode */
+ if (dev->mode!=TM6000_MODE_ANALOG) {
+ rc=tm6000_init_analog_mode (dev);
+ if (rc<0)
+ return rc;
+
+ /* Put all controls at a sane state */
+ for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++)
+ qctl_regs[i] =tm6000_qctrl[i].default_value;
+
+ dev->mode=TM6000_MODE_ANALOG;
+ }
+
+ videobuf_queue_vmalloc_init(&fh->vb_vidq, &tm6000_video_qops,
+ NULL, &dev->slock,
+ fh->type,
+ V4L2_FIELD_INTERLACED,
+ sizeof(struct tm6000_buffer),fh);
+
+ return 0;
+}
+
+static ssize_t
+tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos)
+{
+ struct tm6000_fh *fh = file->private_data;
+
+ if (fh->type==V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+ if (res_locked(fh->dev))
+ return -EBUSY;
+
+ return videobuf_read_stream(&fh->vb_vidq, data, count, pos, 0,
+ file->f_flags & O_NONBLOCK);
+ }
+ return 0;
+}
+
+static unsigned int
+tm6000_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct tm6000_fh *fh = file->private_data;
+ struct tm6000_buffer *buf;
+
+ if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
+ return POLLERR;
+
+ if (res_get(fh->dev,fh)) {
+ /* streaming capture */
+ if (list_empty(&fh->vb_vidq.stream))
+ return POLLERR;
+ buf = list_entry(fh->vb_vidq.stream.next,struct tm6000_buffer,vb.stream);
+ } else {
+ /* read() capture */
+ return videobuf_poll_stream(file, &fh->vb_vidq,
+ wait);
+ }
+ poll_wait(file, &buf->vb.done, wait);
+ if (buf->vb.state == VIDEOBUF_DONE ||
+ buf->vb.state == VIDEOBUF_ERROR)
+ return POLLIN|POLLRDNORM;
+ return 0;
+}
+
+static int tm6000_release(struct file *file)
+{
+ struct tm6000_fh *fh = file->private_data;
+ struct tm6000_core *dev = fh->dev;
+ struct video_device *vdev = video_devdata(file);
+
+ dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: close called (dev=%s, users=%d)\n",
+ video_device_node_name(vdev), dev->users);
+
+ dev->users--;
+
+ if (!dev->users) {
+ tm6000_uninit_isoc(dev);
+ videobuf_mmap_free(&fh->vb_vidq);
+ }
+
+ kfree (fh);
+
+ return 0;
+}
+
+static int tm6000_mmap(struct file *file, struct vm_area_struct * vma)
+{
+ struct tm6000_fh *fh = file->private_data;
+ int ret;
+
+ ret=videobuf_mmap_mapper(&fh->vb_vidq, vma);
+
+ return ret;
+}
+
+static struct v4l2_file_operations tm6000_fops = {
+ .owner = THIS_MODULE,
+ .open = tm6000_open,
+ .release = tm6000_release,
+ .ioctl = video_ioctl2, /* V4L2 ioctl handler */
+ .read = tm6000_read,
+ .poll = tm6000_poll,
+ .mmap = tm6000_mmap,
+};
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+ .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
+ .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
+ .vidioc_s_std = vidioc_s_std,
+ .vidioc_enum_input = vidioc_enum_input,
+ .vidioc_g_input = vidioc_g_input,
+ .vidioc_s_input = vidioc_s_input,
+ .vidioc_queryctrl = vidioc_queryctrl,
+ .vidioc_g_ctrl = vidioc_g_ctrl,
+ .vidioc_s_ctrl = vidioc_s_ctrl,
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_g_frequency = vidioc_g_frequency,
+ .vidioc_s_frequency = vidioc_s_frequency,
+ .vidioc_streamon = vidioc_streamon,
+ .vidioc_streamoff = vidioc_streamoff,
+ .vidioc_reqbufs = vidioc_reqbufs,
+ .vidioc_querybuf = vidioc_querybuf,
+ .vidioc_qbuf = vidioc_qbuf,
+ .vidioc_dqbuf = vidioc_dqbuf,
+#ifdef CONFIG_VIDEO_V4L1_COMPAT
+ .vidiocgmbuf = vidiocgmbuf,
+#endif
+};
+
+static struct video_device tm6000_template = {
+ .name = "tm6000",
+ .fops = &tm6000_fops,
+ .ioctl_ops = &video_ioctl_ops,
+ .release = video_device_release,
+ .tvnorms = TM6000_STD,
+ .current_norm = V4L2_STD_NTSC_M,
+};
+
+/* -----------------------------------------------------------------
+ Initialization and module stuff
+ ------------------------------------------------------------------*/
+
+int tm6000_v4l2_register(struct tm6000_core *dev)
+{
+ int ret = -1;
+ struct video_device *vfd;
+
+ vfd = video_device_alloc();
+ if(!vfd) {
+ return -ENOMEM;
+ }
+ dev->vfd = vfd;
+
+ /* init video dma queues */
+ INIT_LIST_HEAD(&dev->vidq.active);
+ INIT_LIST_HEAD(&dev->vidq.queued);
+
+ memcpy (dev->vfd, &tm6000_template, sizeof(*(dev->vfd)));
+ dev->vfd->debug=tm6000_debug;
+ vfd->v4l2_dev = &dev->v4l2_dev;
+ video_set_drvdata(vfd, dev);
+
+ ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr);
+ printk(KERN_INFO "Trident TVMaster TM5600/TM6000 USB2 board (Load status: %d)\n", ret);
+ return ret;
+}
+
+int tm6000_v4l2_unregister(struct tm6000_core *dev)
+{
+ video_unregister_device(dev->vfd);
+
+ return 0;
+}
+
+int tm6000_v4l2_exit(void)
+{
+ return 0;
+}
+
+module_param(video_nr, int, 0);
+MODULE_PARM_DESC(video_nr,"Allow changing video device number");
+
+module_param_named (debug, tm6000_debug, int, 0444);
+MODULE_PARM_DESC(debug,"activates debug info");
+
+module_param(vid_limit,int,0644);
+MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes");
+
diff --git a/drivers/staging/tm6000/tm6000.h b/drivers/staging/tm6000/tm6000.h
new file mode 100644
index 000000000000..877cbf6eac4b
--- /dev/null
+++ b/drivers/staging/tm6000/tm6000.h
@@ -0,0 +1,278 @@
+/*
+ tm6000.h - driver for TM5600/TM6000 USB video capture devices
+
+ Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org>
+
+ Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com>
+ - DVB-T support
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation version 2
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+// Use the tm6000-hack, instead of the proper initialization code
+//#define HACK 1
+
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf-vmalloc.h>
+#include "tm6000-usb-isoc.h"
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <media/v4l2-device.h>
+
+
+#include <linux/dvb/frontend.h>
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dmxdev.h"
+
+#define TM6000_VERSION KERNEL_VERSION(0, 0, 1)
+
+/* Inputs */
+
+enum tm6000_itype {
+ TM6000_INPUT_TV = 0,
+ TM6000_INPUT_COMPOSITE,
+ TM6000_INPUT_SVIDEO,
+};
+
+enum tm6000_devtype {
+ TM6000 = 0,
+ TM5600,
+ TM6010,
+};
+
+/* ------------------------------------------------------------------
+ Basic structures
+ ------------------------------------------------------------------*/
+
+struct tm6000_fmt {
+ char *name;
+ u32 fourcc; /* v4l2 format id */
+ int depth;
+};
+
+/* buffer for one video frame */
+struct tm6000_buffer {
+ /* common v4l buffer stuff -- must be first */
+ struct videobuf_buffer vb;
+
+ struct tm6000_fmt *fmt;
+};
+
+struct tm6000_dmaqueue {
+ struct list_head active;
+ struct list_head queued;
+
+ /* thread for generating video stream*/
+ struct task_struct *kthread;
+ wait_queue_head_t wq;
+ /* Counters to control fps rate */
+ int frame;
+ int ini_jiffies;
+};
+
+/* device states */
+enum tm6000_core_state {
+ DEV_INITIALIZED = 0x01,
+ DEV_DISCONNECTED = 0x02,
+ DEV_MISCONFIGURED = 0x04,
+};
+
+/* io methods */
+enum tm6000_io_method {
+ IO_NONE,
+ IO_READ,
+ IO_MMAP,
+};
+
+enum tm6000_mode {
+ TM6000_MODE_UNKNOWN=0,
+ TM6000_MODE_ANALOG,
+ TM6000_MODE_DIGITAL,
+};
+
+struct tm6000_capabilities {
+ unsigned int has_tuner:1;
+ unsigned int has_tda9874:1;
+ unsigned int has_dvb:1;
+ unsigned int has_zl10353:1;
+ unsigned int has_eeprom:1;
+ unsigned int has_remote:1;
+};
+
+struct tm6000_dvb {
+ struct dvb_adapter adapter;
+ struct dvb_demux demux;
+ struct dvb_frontend *frontend;
+ struct dmxdev dmxdev;
+ unsigned int streams;
+ struct urb *bulk_urb;
+ struct mutex mutex;
+};
+
+struct tm6000_core {
+ /* generic device properties */
+ char name[30]; /* name (including minor) of the device */
+ int model; /* index in the device_data struct */
+ int devno; /* marks the number of this device */
+ enum tm6000_devtype dev_type; /* type of device */
+
+ v4l2_std_id norm; /* Current norm */
+ int width,height; /* Selected resolution */
+
+ enum tm6000_core_state state;
+
+ /* Device Capabilities*/
+ struct tm6000_capabilities caps;
+
+ /* Tuner configuration */
+ int tuner_type; /* type of the tuner */
+ int tuner_addr; /* tuner address */
+ int tuner_reset_gpio; /* GPIO used for tuner reset */
+
+ /* Demodulator configuration */
+ int demod_addr; /* demodulator address */
+
+ int audio_bitrate;
+ /* i2c i/o */
+ struct i2c_adapter i2c_adap;
+ struct i2c_client i2c_client;
+
+ /* video for linux */
+ int users;
+
+ /* various device info */
+ unsigned int resources;
+ struct video_device *vfd;
+ struct tm6000_dmaqueue vidq;
+ struct v4l2_device v4l2_dev;
+
+ int input;
+ int freq;
+ unsigned int fourcc;
+
+ enum tm6000_mode mode;
+
+ /* DVB-T support */
+ struct tm6000_dvb *dvb;
+
+ /* locks */
+ struct mutex lock;
+
+ /* usb transfer */
+ struct usb_device *udev; /* the usb device */
+
+ struct usb_host_endpoint *bulk_in, *bulk_out, *isoc_in, *isoc_out;
+ unsigned int max_bulk_in, max_bulk_out;
+ unsigned int max_isoc_in, max_isoc_out;
+
+ /* scaler!=0 if scaler is active*/
+ int scaler;
+
+ /* Isoc control struct */
+ struct usb_isoc_ctl isoc_ctl;
+
+ spinlock_t slock;
+};
+
+struct tm6000_fh {
+ struct tm6000_core *dev;
+
+ /* video capture */
+ struct tm6000_fmt *fmt;
+ unsigned int width,height;
+ struct videobuf_queue vb_vidq;
+
+ enum v4l2_buf_type type;
+};
+
+#define TM6000_STD V4L2_STD_PAL|V4L2_STD_PAL_N|V4L2_STD_PAL_Nc| \
+ V4L2_STD_PAL_M|V4L2_STD_PAL_60|V4L2_STD_NTSC_M| \
+ V4L2_STD_NTSC_M_JP|V4L2_STD_SECAM
+
+/* In tm6000-core.c */
+
+int tm6000_read_write_usb (struct tm6000_core *dev, u8 reqtype, u8 req,
+ u16 value, u16 index, u8 *buf, u16 len);
+int tm6000_get_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index);
+int tm6000_set_reg (struct tm6000_core *dev, u8 req, u16 value, u16 index);
+int tm6000_init (struct tm6000_core *dev);
+int tm6000_init_after_firmware (struct tm6000_core *dev);
+
+int tm6000_init_analog_mode (struct tm6000_core *dev);
+int tm6000_init_digital_mode (struct tm6000_core *dev);
+int tm6000_set_audio_bitrate (struct tm6000_core *dev, int bitrate);
+
+int tm6000_dvb_register(struct tm6000_core *dev);
+void tm6000_dvb_unregister(struct tm6000_core *dev);
+
+int tm6000_v4l2_register(struct tm6000_core *dev);
+int tm6000_v4l2_unregister(struct tm6000_core *dev);
+int tm6000_v4l2_exit(void);
+void tm6000_set_fourcc_format(struct tm6000_core *dev);
+
+/* In tm6000-stds.c */
+void tm6000_get_std_res(struct tm6000_core *dev);
+int tm6000_set_standard (struct tm6000_core *dev, v4l2_std_id *norm);
+
+/* In tm6000-i2c.c */
+int tm6000_i2c_register(struct tm6000_core *dev);
+int tm6000_i2c_unregister(struct tm6000_core *dev);
+
+/* In tm6000-queue.c */
+
+int tm6000_v4l2_mmap(struct file *filp, struct vm_area_struct *vma);
+
+int tm6000_vidioc_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type i);
+int tm6000_vidioc_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type i);
+int tm6000_vidioc_reqbufs (struct file *file, void *priv,
+ struct v4l2_requestbuffers *rb);
+int tm6000_vidioc_querybuf (struct file *file, void *priv,
+ struct v4l2_buffer *b);
+int tm6000_vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *b);
+int tm6000_vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *b);
+ssize_t tm6000_v4l2_read(struct file *filp, char __user * buf, size_t count,
+ loff_t * f_pos);
+unsigned int tm6000_v4l2_poll(struct file *file,
+ struct poll_table_struct *wait);
+int tm6000_queue_init(struct tm6000_core *dev);
+
+/* In tm6000-alsa.c */
+int tm6000_audio_init(struct tm6000_core *dev, int idx);
+
+
+/* Debug stuff */
+
+extern int tm6000_debug;
+
+#define dprintk(dev, level, fmt, arg...) do {\
+ if (tm6000_debug & level) \
+ printk(KERN_INFO "(%lu) %s %s :"fmt, jiffies, \
+ dev->name, __FUNCTION__ , ##arg); } while (0)
+
+#define V4L2_DEBUG_REG 0x0004
+#define V4L2_DEBUG_I2C 0x0008
+#define V4L2_DEBUG_QUEUE 0x0010
+#define V4L2_DEBUG_ISOC 0x0020
+#define V4L2_DEBUG_RES_LOCK 0x0040 /* Resource locking */
+#define V4L2_DEBUG_OPEN 0x0080 /* video open/close debug */
+
+#define tm6000_err(fmt, arg...) do {\
+ printk(KERN_ERR "tm6000 %s :"fmt, \
+ __FUNCTION__ , ##arg); } while (0)
+
+
diff --git a/drivers/uwb/i1480/i1480-est.c b/drivers/uwb/i1480/i1480-est.c
index 7bf8c6febae7..f2eb4d8b76c9 100644
--- a/drivers/uwb/i1480/i1480-est.c
+++ b/drivers/uwb/i1480/i1480-est.c
@@ -54,7 +54,7 @@ static struct uwb_est_entry i1480_est_fd01[] = {
.size = sizeof(struct i1480_rceb) + 2 },
};
-static int i1480_est_init(void)
+static int __init i1480_est_init(void)
{
int result = uwb_est_register(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
i1480_est_fd00,
@@ -73,7 +73,7 @@ static int i1480_est_init(void)
}
module_init(i1480_est_init);
-static void i1480_est_exit(void)
+static void __exit i1480_est_exit(void)
{
uwb_est_unregister(i1480_CET_VS1, 0x00, 0x8086, 0x0c3b,
i1480_est_fd00, ARRAY_SIZE(i1480_est_fd00));
diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
index aa42fcee4c4f..75164866c2d8 100644
--- a/drivers/uwb/wlp/messages.c
+++ b/drivers/uwb/wlp/messages.c
@@ -259,6 +259,63 @@ out:
}
+static ssize_t wlp_get_attribute(struct wlp *wlp, u16 type_code,
+ struct wlp_attr_hdr *attr_hdr, void *value, ssize_t value_len,
+ ssize_t buflen)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ ssize_t attr_len = sizeof(*attr_hdr) + value_len;
+ if (buflen < 0)
+ return -EINVAL;
+ if (buflen < attr_len) {
+ dev_err(dev, "WLP: Not enough space in buffer to parse"
+ " attribute field. Need %d, received %zu\n",
+ (int)attr_len, buflen);
+ return -EIO;
+ }
+ if (wlp_check_attr_hdr(wlp, attr_hdr, type_code, value_len) < 0) {
+ dev_err(dev, "WLP: Header verification failed. \n");
+ return -EINVAL;
+ }
+ memcpy(value, (void *)attr_hdr + sizeof(*attr_hdr), value_len);
+ return attr_len;
+}
+
+static ssize_t wlp_vget_attribute(struct wlp *wlp, u16 type_code,
+ struct wlp_attr_hdr *attr_hdr, void *value, ssize_t max_value_len,
+ ssize_t buflen)
+{
+ struct device *dev = &wlp->rc->uwb_dev.dev;
+ size_t len;
+ if (buflen < 0)
+ return -EINVAL;
+ if (buflen < sizeof(*attr_hdr)) {
+ dev_err(dev, "WLP: Not enough space in buffer to parse"
+ " header.\n");
+ return -EIO;
+ }
+ if (le16_to_cpu(attr_hdr->type) != type_code) {
+ dev_err(dev, "WLP: Unexpected attribute type. Got %u, "
+ "expected %u.\n", le16_to_cpu(attr_hdr->type),
+ type_code);
+ return -EINVAL;
+ }
+ len = le16_to_cpu(attr_hdr->length);
+ if (len > max_value_len) {
+ dev_err(dev, "WLP: Attribute larger than maximum "
+ "allowed. Received %zu, max is %d.\n", len,
+ (int)max_value_len);
+ return -EFBIG;
+ }
+ if (buflen < sizeof(*attr_hdr) + len) {
+ dev_err(dev, "WLP: Not enough space in buffer to parse "
+ "variable data.\n");
+ return -EIO;
+ }
+ memcpy(value, (void *)attr_hdr + sizeof(*attr_hdr), len);
+ return sizeof(*attr_hdr) + len;
+}
+
/**
* Get value of attribute from fixed size attribute field.
*
@@ -274,22 +331,8 @@ out:
ssize_t wlp_get_##name(struct wlp *wlp, struct wlp_attr_##name *attr, \
type *value, ssize_t buflen) \
{ \
- struct device *dev = &wlp->rc->uwb_dev.dev; \
- if (buflen < 0) \
- return -EINVAL; \
- if (buflen < sizeof(*attr)) { \
- dev_err(dev, "WLP: Not enough space in buffer to parse" \
- " attribute field. Need %d, received %zu\n", \
- (int)sizeof(*attr), buflen); \
- return -EIO; \
- } \
- if (wlp_check_attr_hdr(wlp, &attr->hdr, type_code, \
- sizeof(attr->name)) < 0) { \
- dev_err(dev, "WLP: Header verification failed. \n"); \
- return -EINVAL; \
- } \
- *value = attr->name; \
- return sizeof(*attr); \
+ return wlp_get_attribute(wlp, (type_code), &attr->hdr, \
+ value, sizeof(*value), buflen); \
}
#define wlp_get_sparse(type, type_code, name) \
@@ -313,35 +356,8 @@ static ssize_t wlp_get_##name(struct wlp *wlp, \
struct wlp_attr_##name *attr, \
type_val *value, ssize_t buflen) \
{ \
- struct device *dev = &wlp->rc->uwb_dev.dev; \
- size_t len; \
- if (buflen < 0) \
- return -EINVAL; \
- if (buflen < sizeof(*attr)) { \
- dev_err(dev, "WLP: Not enough space in buffer to parse" \
- " header.\n"); \
- return -EIO; \
- } \
- if (le16_to_cpu(attr->hdr.type) != type_code) { \
- dev_err(dev, "WLP: Unexpected attribute type. Got %u, " \
- "expected %u.\n", le16_to_cpu(attr->hdr.type), \
- type_code); \
- return -EINVAL; \
- } \
- len = le16_to_cpu(attr->hdr.length); \
- if (len > max) { \
- dev_err(dev, "WLP: Attribute larger than maximum " \
- "allowed. Received %zu, max is %d.\n", len, \
- (int)max); \
- return -EFBIG; \
- } \
- if (buflen < sizeof(*attr) + len) { \
- dev_err(dev, "WLP: Not enough space in buffer to parse "\
- "variable data.\n"); \
- return -EIO; \
- } \
- memcpy(value, (void *) attr + sizeof(*attr), len); \
- return sizeof(*attr) + len; \
+ return wlp_vget_attribute(wlp, (type_code), &attr->hdr, \
+ value, (max), buflen); \
}
wlp_get(u8, WLP_ATTR_WLP_VER, version)
diff --git a/drivers/vbus/Kconfig b/drivers/vbus/Kconfig
new file mode 100644
index 000000000000..f51cba10913e
--- /dev/null
+++ b/drivers/vbus/Kconfig
@@ -0,0 +1,25 @@
+#
+# Virtual-Bus (VBus) driver configuration
+#
+
+config VBUS_PROXY
+ bool "Virtual-Bus support"
+ select SHM_SIGNAL
+ select IOQ
+ default n
+ help
+ Adds support for a virtual-bus model drivers in a guest to connect
+ to host side virtual-bus resources. If you are using this kernel
+ in a virtualization solution which implements virtual-bus devices
+ on the backend, say Y. If unsure, say N.
+
+config VBUS_PCIBRIDGE
+ bool "PCI to Virtual-Bus bridge"
+ depends on PCI
+ depends on VBUS_PROXY
+ select IOQ
+ default n
+ help
+ Provides a way to bridge host side vbus devices via a PCI-BRIDGE
+ object. If you are running virtualization with vbus devices on the
+ host, and the vbus is exposed via PCI, say Y. Otherwise, say N.
diff --git a/drivers/vbus/Makefile b/drivers/vbus/Makefile
new file mode 100644
index 000000000000..944b7f1fec90
--- /dev/null
+++ b/drivers/vbus/Makefile
@@ -0,0 +1,6 @@
+
+vbus-proxy-objs += bus-proxy.o
+obj-$(CONFIG_VBUS_PROXY) += vbus-proxy.o
+
+vbus-pcibridge-objs += pci-bridge.o
+obj-$(CONFIG_VBUS_PCIBRIDGE) += vbus-pcibridge.o
diff --git a/drivers/vbus/bus-proxy.c b/drivers/vbus/bus-proxy.c
new file mode 100644
index 000000000000..47928423a050
--- /dev/null
+++ b/drivers/vbus/bus-proxy.c
@@ -0,0 +1,247 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/vbus_driver.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+
+#define VBUS_PROXY_NAME "vbus-proxy"
+
+static struct vbus_device_proxy *to_dev(struct device *_dev)
+{
+ return _dev ? container_of(_dev, struct vbus_device_proxy, dev) : NULL;
+}
+
+static struct vbus_driver *to_drv(struct device_driver *_drv)
+{
+ return container_of(_drv, struct vbus_driver, drv);
+}
+
+/*
+ * This function is invoked whenever a new driver and/or device is added
+ * to check if there is a match
+ */
+static int vbus_dev_proxy_match(struct device *_dev, struct device_driver *_drv)
+{
+ struct vbus_device_proxy *dev = to_dev(_dev);
+ struct vbus_driver *drv = to_drv(_drv);
+
+ return !strcmp(dev->type, drv->type);
+}
+
+static int vbus_dev_proxy_uevent(struct device *_dev, struct kobj_uevent_env *env)
+{
+ struct vbus_device_proxy *dev = to_dev(_dev);
+
+ if (add_uevent_var(env, "MODALIAS=vbus-proxy:%s", dev->type))
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * This function is invoked after the bus infrastructure has already made a
+ * match. The device will contain a reference to the paired driver which
+ * we will extract.
+ */
+static int vbus_dev_proxy_probe(struct device *_dev)
+{
+ int ret = 0;
+ struct vbus_device_proxy *dev = to_dev(_dev);
+ struct vbus_driver *drv = to_drv(_dev->driver);
+
+ if (drv->ops->probe)
+ ret = drv->ops->probe(dev);
+
+ return ret;
+}
+
+static struct bus_type vbus_proxy = {
+ .name = VBUS_PROXY_NAME,
+ .match = vbus_dev_proxy_match,
+ .uevent = vbus_dev_proxy_uevent,
+};
+
+static struct device vbus_proxy_rootdev = {
+ .parent = NULL,
+ .init_name = VBUS_PROXY_NAME,
+};
+
+static int __init vbus_init(void)
+{
+ int ret;
+
+ ret = bus_register(&vbus_proxy);
+ BUG_ON(ret < 0);
+
+ ret = device_register(&vbus_proxy_rootdev);
+ BUG_ON(ret < 0);
+
+ return 0;
+}
+
+postcore_initcall(vbus_init);
+
+static void device_release(struct device *dev)
+{
+ struct vbus_device_proxy *_dev;
+
+ _dev = container_of(dev, struct vbus_device_proxy, dev);
+
+ _dev->ops->release(_dev);
+}
+
+static ssize_t _show_modalias(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "vbus-proxy:%s\n", to_dev(dev)->type);
+}
+static DEVICE_ATTR(modalias, S_IRUSR | S_IRGRP | S_IROTH, _show_modalias, NULL);
+
+int vbus_device_proxy_register(struct vbus_device_proxy *new)
+{
+ int ret;
+
+ new->dev.parent = &vbus_proxy_rootdev;
+ new->dev.bus = &vbus_proxy;
+ new->dev.release = &device_release;
+
+ ret = device_register(&new->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = device_create_file(&new->dev, &dev_attr_modalias);
+ if (ret < 0) {
+ device_unregister(&new->dev);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vbus_device_proxy_register);
+
+void vbus_device_proxy_unregister(struct vbus_device_proxy *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_modalias);
+ device_unregister(&dev->dev);
+}
+EXPORT_SYMBOL_GPL(vbus_device_proxy_unregister);
+
+static int match_device_id(struct device *_dev, void *data)
+{
+ struct vbus_device_proxy *dev = to_dev(_dev);
+ u64 id = *(u64 *)data;
+
+ return dev->id == id;
+}
+
+struct vbus_device_proxy *vbus_device_proxy_find(u64 id)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&vbus_proxy, NULL, &id, &match_device_id);
+
+ return to_dev(dev);
+}
+EXPORT_SYMBOL_GPL(vbus_device_proxy_find);
+
+int vbus_driver_register(struct vbus_driver *new)
+{
+ new->drv.bus = &vbus_proxy;
+ new->drv.name = new->type;
+ new->drv.owner = new->owner;
+ new->drv.probe = vbus_dev_proxy_probe;
+
+ return driver_register(&new->drv);
+}
+EXPORT_SYMBOL_GPL(vbus_driver_register);
+
+void vbus_driver_unregister(struct vbus_driver *drv)
+{
+ driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(vbus_driver_unregister);
+
+/*
+ *---------------------------------
+ * driver-side IOQ helper
+ *---------------------------------
+ */
+static void
+vbus_driver_ioq_release(struct ioq *ioq)
+{
+ kfree(ioq->head_desc);
+ kfree(ioq);
+}
+
+static struct ioq_ops vbus_driver_ioq_ops = {
+ .release = vbus_driver_ioq_release,
+};
+
+
+int vbus_driver_ioq_alloc(struct vbus_device_proxy *dev, const char *name,
+ int id, int prio, size_t count, struct ioq **ioq)
+{
+ struct ioq *_ioq;
+ struct ioq_ring_head *head = NULL;
+ struct shm_signal *signal = NULL;
+ size_t len = IOQ_HEAD_DESC_SIZE(count);
+ int ret = -ENOMEM;
+
+ _ioq = kzalloc(sizeof(*_ioq), GFP_KERNEL);
+ if (!_ioq)
+ goto error;
+
+ head = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!head)
+ goto error;
+
+ head->magic = IOQ_RING_MAGIC;
+ head->ver = IOQ_RING_VER;
+ head->count = cpu_to_le32(count);
+
+ ret = dev->ops->shm(dev, name, id, prio, head, len,
+ &head->signal, &signal, 0);
+ if (ret < 0)
+ goto error;
+
+ ioq_init(_ioq,
+ &vbus_driver_ioq_ops,
+ ioq_locality_north,
+ head,
+ signal,
+ count);
+
+ *ioq = _ioq;
+
+ return 0;
+
+ error:
+ kfree(_ioq);
+ kfree(head);
+
+ if (signal)
+ shm_signal_put(signal);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vbus_driver_ioq_alloc);
diff --git a/drivers/vbus/pci-bridge.c b/drivers/vbus/pci-bridge.c
new file mode 100644
index 000000000000..0d513248dae6
--- /dev/null
+++ b/drivers/vbus/pci-bridge.c
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (C) 2009 Novell. All Rights Reserved.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/ioq.h>
+#include <linux/interrupt.h>
+#include <linux/vbus_driver.h>
+#include <linux/vbus_pci.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
+
+#define VBUS_PCI_NAME "pci-to-vbus-bridge"
+
+struct vbus_pci {
+ spinlock_t lock;
+ struct pci_dev *dev;
+ struct ioq eventq;
+ struct vbus_pci_event *ring;
+ struct vbus_pci_regs *regs;
+ struct vbus_pci_signals *signals;
+ int irq;
+ bool enabled;
+ struct {
+ struct dentry *fs;
+ int events;
+ int qnotify;
+ int qinject;
+ int notify;
+ int inject;
+ int bridgecalls;
+ int buscalls;
+ } stats;
+};
+
+static struct vbus_pci vbus_pci;
+
+struct vbus_pci_device {
+ char type[VBUS_MAX_DEVTYPE_LEN];
+ u64 handle;
+ struct list_head shms;
+ struct vbus_device_proxy vdev;
+ struct work_struct drop;
+};
+
+static DEFINE_PER_CPU(struct vbus_pci_fastcall_desc, vbus_pci_percpu_fastcall)
+____cacheline_aligned;
+
+/*
+ * -------------------
+ * common routines
+ * -------------------
+ */
+
+static int
+vbus_pci_bridgecall(unsigned long nr, void *data, unsigned long len)
+{
+ struct vbus_pci_call_desc params = {
+ .vector = nr,
+ .len = len,
+ .datap = __pa(data),
+ };
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vbus_pci.lock, flags);
+
+ memcpy_toio(&vbus_pci.regs->bridgecall, &params, sizeof(params));
+ ret = ioread32(&vbus_pci.regs->bridgecall);
+
+ spin_unlock_irqrestore(&vbus_pci.lock, flags);
+
+ vbus_pci.stats.bridgecalls++;
+
+ return ret;
+}
+
+static int
+vbus_pci_buscall(unsigned long nr, void *data, unsigned long len)
+{
+ struct vbus_pci_fastcall_desc *params;
+ int ret;
+
+ preempt_disable();
+
+ params = &get_cpu_var(vbus_pci_percpu_fastcall);
+
+ params->call.vector = nr;
+ params->call.len = len;
+ params->call.datap = __pa(data);
+
+ iowrite32(smp_processor_id(), &vbus_pci.signals->fastcall);
+
+ ret = params->result;
+
+ preempt_enable();
+
+ vbus_pci.stats.buscalls++;
+
+ return ret;
+}
+
+static struct vbus_pci_device *
+to_dev(struct vbus_device_proxy *vdev)
+{
+ return container_of(vdev, struct vbus_pci_device, vdev);
+}
+
+static void
+_signal_init(struct shm_signal *signal, struct shm_signal_desc *desc,
+ struct shm_signal_ops *ops)
+{
+ desc->magic = SHM_SIGNAL_MAGIC;
+ desc->ver = SHM_SIGNAL_VER;
+
+ shm_signal_init(signal, shm_locality_north, ops, desc);
+}
+
+/*
+ * -------------------
+ * _signal
+ * -------------------
+ */
+
+struct _signal {
+ char name[64];
+ struct vbus_pci *pcivbus;
+ struct shm_signal signal;
+ u32 handle;
+ struct rb_node node;
+ struct list_head list;
+ int irq;
+ struct irq_desc *desc;
+};
+
+static struct _signal *
+to_signal(struct shm_signal *signal)
+{
+ return container_of(signal, struct _signal, signal);
+}
+
+static int
+_signal_inject(struct shm_signal *signal)
+{
+ struct _signal *_signal = to_signal(signal);
+
+ vbus_pci.stats.inject++;
+ iowrite32(_signal->handle, &vbus_pci.signals->shmsignal);
+
+ return 0;
+}
+
+static void
+_signal_release(struct shm_signal *signal)
+{
+ struct _signal *_signal = to_signal(signal);
+
+ kfree(_signal);
+}
+
+static struct shm_signal_ops _signal_ops = {
+ .inject = _signal_inject,
+ .release = _signal_release,
+};
+
+static void shmsignal_disconnect(struct _signal *_signal);
+
+/*
+ * -------------------
+ * vbus_device_proxy routines
+ * -------------------
+ */
+
+static int
+vbus_pci_device_open(struct vbus_device_proxy *vdev, int version, int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ struct vbus_pci_deviceopen params;
+ int ret;
+
+ if (dev->handle)
+ return -EINVAL;
+
+ params.devid = vdev->id;
+ params.version = version;
+
+ ret = vbus_pci_buscall(VBUS_PCI_HC_DEVOPEN,
+ &params, sizeof(params));
+ if (ret < 0)
+ return ret;
+
+ dev->handle = params.handle;
+
+ return 0;
+}
+
+static int
+vbus_pci_device_close(struct vbus_device_proxy *vdev, int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ unsigned long iflags;
+ int ret;
+
+ if (!dev->handle)
+ return -EINVAL;
+
+ spin_lock_irqsave(&vbus_pci.lock, iflags);
+
+ while (!list_empty(&dev->shms)) {
+ struct _signal *_signal;
+
+ _signal = list_first_entry(&dev->shms, struct _signal, list);
+
+ list_del(&_signal->list);
+ shmsignal_disconnect(_signal);
+
+ spin_unlock_irqrestore(&vbus_pci.lock, iflags);
+ shm_signal_put(&_signal->signal);
+ spin_lock_irqsave(&vbus_pci.lock, iflags);
+ }
+
+ spin_unlock_irqrestore(&vbus_pci.lock, iflags);
+
+ /*
+ * The DEVICECLOSE will implicitly close all of the shm on the
+ * host-side, so there is no need to do an explicit per-shm
+ * hypercall
+ */
+ ret = vbus_pci_buscall(VBUS_PCI_HC_DEVCLOSE,
+ &dev->handle, sizeof(dev->handle));
+
+ if (ret < 0)
+ printk(KERN_ERR "VBUS-PCI: Error closing device %s/%lld: %d\n",
+ vdev->type, vdev->id, ret);
+
+ dev->handle = 0;
+
+ return 0;
+}
+
+/*
+ * -------------------
+ * shmsignal interrupt routines
+ * -------------------
+ */
+
+/* We abstract these routines so that we can drop in irqchip later */
+
+static void
+shmsignal_wakeup(struct _signal *_signal)
+{
+ _shm_signal_wakeup(&_signal->signal);
+}
+
+static int
+shmsignal_connect(struct _signal *_signal)
+{
+ return 0;
+}
+
+static void
+shmsignal_disconnect(struct _signal *_signal)
+{
+
+}
+
+static int
+vbus_pci_device_shm(struct vbus_device_proxy *vdev, const char *name,
+ int id, int prio,
+ void *ptr, size_t len,
+ struct shm_signal_desc *sdesc, struct shm_signal **signal,
+ int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ struct _signal *_signal = NULL;
+ struct vbus_pci_deviceshm params;
+ unsigned long iflags;
+ int ret;
+
+ if (!dev->handle)
+ return -EINVAL;
+
+ params.devh = dev->handle;
+ params.id = id;
+ params.flags = flags;
+ params.datap = (u64)__pa(ptr);
+ params.len = len;
+
+ if (signal) {
+ /*
+ * The signal descriptor must be embedded within the
+ * provided ptr
+ */
+ if (!sdesc
+ || (len < sizeof(*sdesc))
+ || ((void *)sdesc < ptr)
+ || ((void *)sdesc > (ptr + len - sizeof(*sdesc))))
+ return -EINVAL;
+
+ _signal = kzalloc(sizeof(*_signal), GFP_KERNEL);
+ if (!_signal)
+ return -ENOMEM;
+
+ _signal_init(&_signal->signal, sdesc, &_signal_ops);
+
+ /*
+ * take another reference for the host. This is dropped
+ * by a SHMCLOSE event
+ */
+ shm_signal_get(&_signal->signal);
+
+ params.signal.offset = (u64)(unsigned long)sdesc -
+ (u64)(unsigned long)ptr;
+ params.signal.prio = prio;
+ params.signal.cookie = (u64)(unsigned long)_signal;
+
+ } else
+ params.signal.offset = -1; /* yes, this is a u32, but its ok */
+
+ ret = vbus_pci_buscall(VBUS_PCI_HC_DEVSHM,
+ &params, sizeof(params));
+ if (ret < 0)
+ goto fail;
+
+ if (signal) {
+
+ BUG_ON(ret < 0);
+
+ _signal->handle = ret;
+
+ if (!name)
+ snprintf(_signal->name, sizeof(_signal->name),
+ "dev%lld-id%d", vdev->id, id);
+ else
+ snprintf(_signal->name, sizeof(_signal->name),
+ "%s", name);
+
+ shmsignal_connect(_signal);
+
+ spin_lock_irqsave(&vbus_pci.lock, iflags);
+ list_add_tail(&_signal->list, &dev->shms);
+ spin_unlock_irqrestore(&vbus_pci.lock, iflags);
+
+ shm_signal_get(&_signal->signal);
+ *signal = &_signal->signal;
+ }
+
+ return 0;
+
+fail:
+ if (_signal) {
+ /*
+ * We held two references above, so we need to drop
+ * both of them
+ */
+ shm_signal_put(&_signal->signal);
+ shm_signal_put(&_signal->signal);
+ }
+
+ return ret;
+}
+
+static int
+vbus_pci_device_call(struct vbus_device_proxy *vdev, u32 func, void *data,
+ size_t len, int flags)
+{
+ struct vbus_pci_device *dev = to_dev(vdev);
+ struct vbus_pci_devicecall params = {
+ .devh = dev->handle,
+ .func = func,
+ .datap = (u64)__pa(data),
+ .len = len,
+ .flags = flags,
+ };
+
+ if (!dev->handle)
+ return -EINVAL;
+
+ return vbus_pci_buscall(VBUS_PCI_HC_DEVCALL, &params, sizeof(params));
+}
+
+static void
+vbus_pci_device_release(struct vbus_device_proxy *vdev)
+{
+ struct vbus_pci_device *_dev = to_dev(vdev);
+
+ vbus_pci_device_close(vdev, 0);
+
+ kfree(_dev);
+}
+
+static struct vbus_device_proxy_ops vbus_pci_device_ops = {
+ .open = vbus_pci_device_open,
+ .close = vbus_pci_device_close,
+ .shm = vbus_pci_device_shm,
+ .call = vbus_pci_device_call,
+ .release = vbus_pci_device_release,
+};
+
+/*
+ * -------------------
+ * vbus events
+ * -------------------
+ */
+
+struct deferred_devadd_event {
+ struct work_struct work;
+ struct vbus_pci_add_event event;
+};
+
+static void deferred_devdrop(struct work_struct *work);
+
+static void
+deferred_devadd(struct work_struct *work)
+{
+ struct deferred_devadd_event *_event;
+ struct vbus_pci_device *new;
+ int ret;
+
+ _event = container_of(work, struct deferred_devadd_event, work);
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ printk(KERN_ERR "VBUS_PCI: Out of memory on add_event\n");
+ return;
+ }
+
+ INIT_LIST_HEAD(&new->shms);
+
+ memcpy(new->type, _event->event.type, VBUS_MAX_DEVTYPE_LEN);
+ new->vdev.type = new->type;
+ new->vdev.id = _event->event.id;
+ new->vdev.ops = &vbus_pci_device_ops;
+
+ dev_set_name(&new->vdev.dev, "%lld", _event->event.id);
+
+ INIT_WORK(&new->drop, deferred_devdrop);
+
+ ret = vbus_device_proxy_register(&new->vdev);
+ if (ret < 0)
+ panic("failed to register device %lld(%s): %d\n",
+ new->vdev.id, new->type, ret);
+
+ kfree(_event);
+}
+
+static void
+deferred_devdrop(struct work_struct *work)
+{
+ struct vbus_pci_device *dev;
+
+ dev = container_of(work, struct vbus_pci_device, drop);
+ vbus_device_proxy_unregister(&dev->vdev);
+}
+
+static void
+event_devadd(struct vbus_pci_add_event *event)
+{
+ struct deferred_devadd_event *_event;
+
+ _event = kzalloc(sizeof(*_event), GFP_ATOMIC);
+ if (!_event) {
+ printk(KERN_ERR \
+ "VBUS_PCI: Out of ATOMIC memory on add_event\n");
+ return;
+ }
+
+ INIT_WORK(&_event->work, deferred_devadd);
+ memcpy(&_event->event, event, sizeof(*event));
+
+ schedule_work(&_event->work);
+}
+
+static void
+event_devdrop(struct vbus_pci_handle_event *event)
+{
+ struct vbus_device_proxy *dev = vbus_device_proxy_find(event->handle);
+
+ if (!dev) {
+ printk(KERN_WARNING "VBUS-PCI: devdrop failed: %lld\n",
+ event->handle);
+ return;
+ }
+
+ schedule_work(&to_dev(dev)->drop);
+}
+
+static void
+event_shmsignal(struct vbus_pci_handle_event *event)
+{
+ struct _signal *_signal = (struct _signal *)(unsigned long)event->handle;
+
+ vbus_pci.stats.notify++;
+
+ shmsignal_wakeup(_signal);
+}
+
+static void
+event_shmclose(struct vbus_pci_handle_event *event)
+{
+ struct _signal *_signal = (struct _signal *)(unsigned long)event->handle;
+
+ /*
+ * This reference was taken during the DEVICESHM call
+ */
+ shm_signal_put(&_signal->signal);
+}
+
+/*
+ * -------------------
+ * eventq routines
+ * -------------------
+ */
+
+static struct ioq_notifier eventq_notifier;
+
+static int __devinit
+eventq_init(int qlen)
+{
+ struct ioq_iterator iter;
+ int ret;
+ int i;
+
+ vbus_pci.ring = kzalloc(sizeof(struct vbus_pci_event) * qlen,
+ GFP_KERNEL);
+ if (!vbus_pci.ring)
+ return -ENOMEM;
+
+ /*
+ * We want to iterate on the "valid" index. By default the iterator
+ * will not "autoupdate" which means it will not hypercall the host
+ * with our changes. This is good, because we are really just
+ * initializing stuff here anyway. Note that you can always manually
+ * signal the host with ioq_signal() if the autoupdate feature is not
+ * used.
+ */
+ ret = ioq_iter_init(&vbus_pci.eventq, &iter, ioq_idxtype_valid, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Seek to the tail of the valid index (which should be our first
+ * item since the queue is brand-new)
+ */
+ ret = ioq_iter_seek(&iter, ioq_seek_tail, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * Now populate each descriptor with an empty vbus_event and mark it
+ * valid
+ */
+ for (i = 0; i < qlen; i++) {
+ struct vbus_pci_event *event = &vbus_pci.ring[i];
+ size_t len = sizeof(*event);
+ struct ioq_ring_desc *desc = iter.desc;
+
+ BUG_ON(iter.desc->valid);
+
+ desc->cookie = (u64)(unsigned long)event;
+ desc->ptr = cpu_to_le64(__pa(event));
+ desc->len = cpu_to_le64(len); /* total length */
+ desc->valid = 1;
+
+ /*
+ * This push operation will simultaneously advance the
+ * valid-tail index and increment our position in the queue
+ * by one.
+ */
+ ret = ioq_iter_push(&iter, 0);
+ BUG_ON(ret < 0);
+ }
+
+ vbus_pci.eventq.notifier = &eventq_notifier;
+
+ /*
+ * And finally, ensure that we can receive notification
+ */
+ ioq_notify_enable(&vbus_pci.eventq, 0);
+
+ return 0;
+}
+
+/* Invoked whenever the hypervisor ioq_signal()s our eventq */
+static void
+eventq_wakeup(struct ioq_notifier *notifier)
+{
+ struct ioq_iterator iter;
+ int ret;
+
+ /* We want to iterate on the head of the in-use index */
+ ret = ioq_iter_init(&vbus_pci.eventq, &iter, ioq_idxtype_inuse, 0);
+ BUG_ON(ret < 0);
+
+ ret = ioq_iter_seek(&iter, ioq_seek_head, 0, 0);
+ BUG_ON(ret < 0);
+
+ /*
+ * The EOM is indicated by finding a packet that is still owned by
+ * the south side.
+ *
+ * FIXME: This in theory could run indefinitely if the host keeps
+ * feeding us events since there is nothing like a NAPI budget. We
+ * might need to address that
+ */
+ while (!iter.desc->sown) {
+ struct ioq_ring_desc *desc = iter.desc;
+ struct vbus_pci_event *event;
+
+ event = (struct vbus_pci_event *)(unsigned long)desc->cookie;
+
+ switch (event->eventid) {
+ case VBUS_PCI_EVENT_DEVADD:
+ event_devadd(&event->data.add);
+ break;
+ case VBUS_PCI_EVENT_DEVDROP:
+ event_devdrop(&event->data.handle);
+ break;
+ case VBUS_PCI_EVENT_SHMSIGNAL:
+ event_shmsignal(&event->data.handle);
+ break;
+ case VBUS_PCI_EVENT_SHMCLOSE:
+ event_shmclose(&event->data.handle);
+ break;
+ default:
+ printk(KERN_WARNING "VBUS_PCI: Unexpected event %d\n",
+ event->eventid);
+ break;
+ };
+
+ memset(event, 0, sizeof(*event));
+
+ /* Advance the in-use head */
+ ret = ioq_iter_pop(&iter, 0);
+ BUG_ON(ret < 0);
+
+ vbus_pci.stats.events++;
+ }
+
+ /* And let the south side know that we changed the queue */
+ ioq_signal(&vbus_pci.eventq, 0);
+}
+
+static struct ioq_notifier eventq_notifier = {
+ .signal = &eventq_wakeup,
+};
+
+/* Injected whenever the host issues an ioq_signal() on the eventq */
+static irqreturn_t
+eventq_intr(int irq, void *dev)
+{
+ vbus_pci.stats.qnotify++;
+ _shm_signal_wakeup(vbus_pci.eventq.signal);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * -------------------
+ */
+
+static int
+eventq_signal_inject(struct shm_signal *signal)
+{
+ vbus_pci.stats.qinject++;
+
+ /* The eventq uses the special-case handle=0 */
+ iowrite32(0, &vbus_pci.signals->eventq);
+
+ return 0;
+}
+
+static void
+eventq_signal_release(struct shm_signal *signal)
+{
+ kfree(signal);
+}
+
+static struct shm_signal_ops eventq_signal_ops = {
+ .inject = eventq_signal_inject,
+ .release = eventq_signal_release,
+};
+
+/*
+ * -------------------
+ */
+
+static void
+eventq_ioq_release(struct ioq *ioq)
+{
+ /* released as part of the vbus_pci object */
+}
+
+static struct ioq_ops eventq_ioq_ops = {
+ .release = eventq_ioq_release,
+};
+
+/*
+ * -------------------
+ */
+
+static void
+vbus_pci_release(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ if (vbus_pci.stats.fs)
+ debugfs_remove(vbus_pci.stats.fs);
+#endif
+
+ if (vbus_pci.irq > 0)
+ free_irq(vbus_pci.irq, NULL);
+
+ if (vbus_pci.signals)
+ pci_iounmap(vbus_pci.dev, (void *)vbus_pci.signals);
+
+ if (vbus_pci.regs)
+ pci_iounmap(vbus_pci.dev, (void *)vbus_pci.regs);
+
+ pci_release_regions(vbus_pci.dev);
+ pci_disable_device(vbus_pci.dev);
+
+ kfree(vbus_pci.eventq.head_desc);
+ kfree(vbus_pci.ring);
+
+ vbus_pci.enabled = false;
+}
+
+static int __devinit
+vbus_pci_open(void)
+{
+ struct vbus_pci_bridge_negotiate params = {
+ .magic = VBUS_PCI_ABI_MAGIC,
+ .version = VBUS_PCI_HC_VERSION,
+ .capabilities = 0,
+ };
+
+ return vbus_pci_bridgecall(VBUS_PCI_BRIDGE_NEGOTIATE,
+ &params, sizeof(params));
+}
+
+#define QLEN 1024
+
+static int __devinit
+vbus_pci_eventq_register(void)
+{
+ struct vbus_pci_busreg params = {
+ .count = 1,
+ .eventq = {
+ {
+ .count = QLEN,
+ .ring = (u64)__pa(vbus_pci.eventq.head_desc),
+ .data = (u64)__pa(vbus_pci.ring),
+ },
+ },
+ };
+
+ return vbus_pci_bridgecall(VBUS_PCI_BRIDGE_QREG,
+ &params, sizeof(params));
+}
+
+static int __devinit
+_ioq_init(size_t ringsize, struct ioq *ioq, struct ioq_ops *ops)
+{
+ struct shm_signal *signal = NULL;
+ struct ioq_ring_head *head = NULL;
+ size_t len = IOQ_HEAD_DESC_SIZE(ringsize);
+
+ head = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!head)
+ return -ENOMEM;
+
+ signal = kzalloc(sizeof(*signal), GFP_KERNEL);
+ if (!signal) {
+ kfree(head);
+ return -ENOMEM;
+ }
+
+ head->magic = IOQ_RING_MAGIC;
+ head->ver = IOQ_RING_VER;
+ head->count = cpu_to_le32(ringsize);
+
+ _signal_init(signal, &head->signal, &eventq_signal_ops);
+
+ ioq_init(ioq, ops, ioq_locality_north, head, signal, ringsize);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _debugfs_seq_show(struct seq_file *m, void *p)
+{
+#define P(F) \
+ seq_printf(m, " .%-30s: %d\n", #F, (int)vbus_pci.stats.F)
+
+ P(events);
+ P(qnotify);
+ P(qinject);
+ P(notify);
+ P(inject);
+ P(bridgecalls);
+ P(buscalls);
+
+#undef P
+
+ return 0;
+}
+
+static int _debugfs_fops_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, _debugfs_seq_show, inode->i_private);
+}
+
+static const struct file_operations stat_fops = {
+ .open = _debugfs_fops_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+#endif
+
+static int __devinit
+vbus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int ret;
+ int cpu;
+
+ if (vbus_pci.enabled)
+ return -EEXIST; /* we only support one bridge per kernel */
+
+ if (pdev->revision != VBUS_PCI_ABI_VERSION) {
+ printk(KERN_DEBUG "VBUS_PCI: expected ABI version %d, got %d\n",
+ VBUS_PCI_ABI_VERSION,
+ pdev->revision);
+ return -ENODEV;
+ }
+
+ vbus_pci.dev = pdev;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = pci_request_regions(pdev, VBUS_PCI_NAME);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Could not init BARs: %d\n", ret);
+ goto out_fail;
+ }
+
+ vbus_pci.regs = pci_iomap(pdev, 0, sizeof(struct vbus_pci_regs));
+ if (!vbus_pci.regs) {
+ printk(KERN_ERR "VBUS_PCI: Could not map BARs\n");
+ goto out_fail;
+ }
+
+ vbus_pci.signals = pci_iomap(pdev, 1, sizeof(struct vbus_pci_signals));
+ if (!vbus_pci.signals) {
+ printk(KERN_ERR "VBUS_PCI: Could not map BARs\n");
+ goto out_fail;
+ }
+
+ ret = vbus_pci_open();
+ if (ret < 0) {
+ printk(KERN_DEBUG "VBUS_PCI: Could not register with host: %d\n",
+ ret);
+ goto out_fail;
+ }
+
+ /*
+ * Allocate an IOQ to use for host-2-guest event notification
+ */
+ ret = _ioq_init(QLEN, &vbus_pci.eventq, &eventq_ioq_ops);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Cound not init eventq: %d\n", ret);
+ goto out_fail;
+ }
+
+ ret = eventq_init(QLEN);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Cound not setup ring: %d\n", ret);
+ goto out_fail;
+ }
+
+ ret = pci_enable_msi(pdev);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Cound not enable MSI: %d\n", ret);
+ goto out_fail;
+ }
+
+ vbus_pci.irq = pdev->irq;
+
+ ret = request_irq(pdev->irq, eventq_intr, 0, "vbus", NULL);
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Failed to register IRQ %d\n: %d",
+ pdev->irq, ret);
+ goto out_fail;
+ }
+
+ /*
+ * Add one fastcall vector per cpu so that we can do lockless
+ * hypercalls
+ */
+ for_each_possible_cpu(cpu) {
+ struct vbus_pci_fastcall_desc *desc =
+ &per_cpu(vbus_pci_percpu_fastcall, cpu);
+ struct vbus_pci_call_desc params = {
+ .vector = cpu,
+ .len = sizeof(*desc),
+ .datap = __pa(desc),
+ };
+
+ ret = vbus_pci_bridgecall(VBUS_PCI_BRIDGE_FASTCALL_ADD,
+ &params, sizeof(params));
+ if (ret < 0) {
+ printk(KERN_ERR \
+ "VBUS_PCI: Failed to register cpu:%d\n: %d",
+ cpu, ret);
+ goto out_fail;
+ }
+ }
+
+ /*
+ * Finally register our queue on the host to start receiving events
+ */
+ ret = vbus_pci_eventq_register();
+ if (ret < 0) {
+ printk(KERN_ERR "VBUS_PCI: Could not register with host: %d\n",
+ ret);
+ goto out_fail;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ vbus_pci.stats.fs = debugfs_create_file(VBUS_PCI_NAME, S_IRUGO,
+ NULL, NULL, &stat_fops);
+ if (IS_ERR(vbus_pci.stats.fs)) {
+ ret = PTR_ERR(vbus_pci.stats.fs);
+ printk(KERN_ERR "VBUS_PCI: error creating stats-fs: %d\n", ret);
+ goto out_fail;
+ }
+#endif
+
+ vbus_pci.enabled = true;
+
+ printk(KERN_INFO "Virtual-Bus: Copyright (c) 2009, " \
+ "Gregory Haskins <ghaskins@novell.com>\n");
+
+ return 0;
+
+ out_fail:
+ vbus_pci_release();
+
+ return ret;
+}
+
+static void __devexit
+vbus_pci_remove(struct pci_dev *pdev)
+{
+ vbus_pci_release();
+}
+
+static DEFINE_PCI_DEVICE_TABLE(vbus_pci_tbl) = {
+ { PCI_DEVICE(0x11da, 0x2000) },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, vbus_pci_tbl);
+
+static struct pci_driver vbus_pci_driver = {
+ .name = VBUS_PCI_NAME,
+ .id_table = vbus_pci_tbl,
+ .probe = vbus_pci_probe,
+ .remove = vbus_pci_remove,
+};
+
+static int __init
+vbus_pci_init(void)
+{
+ memset(&vbus_pci, 0, sizeof(vbus_pci));
+ spin_lock_init(&vbus_pci.lock);
+
+ return pci_register_driver(&vbus_pci_driver);
+}
+
+static void __exit
+vbus_pci_exit(void)
+{
+ pci_unregister_driver(&vbus_pci_driver);
+}
+
+module_init(vbus_pci_init);
+module_exit(vbus_pci_exit);
+
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
new file mode 100644
index 000000000000..b8f705cca438
--- /dev/null
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -0,0 +1,304 @@
+/*
+ * Backlight driver for Marvell Semiconductor 88PM8606
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/i2c.h>
+#include <linux/backlight.h>
+#include <linux/mfd/88pm860x.h>
+
+#define MAX_BRIGHTNESS (0xFF)
+#define MIN_BRIGHTNESS (0)
+
+#define CURRENT_MASK (0x1F << 1)
+
+struct pm860x_backlight_data {
+ struct pm860x_chip *chip;
+ struct i2c_client *i2c;
+ int current_brightness;
+ int port;
+ int pwm;
+ int iset;
+};
+
+static inline int wled_a(int port)
+{
+ int ret;
+
+ ret = ((port - PM8606_BACKLIGHT1) << 1) + 2;
+ return ret;
+}
+
+static inline int wled_b(int port)
+{
+ int ret;
+
+ ret = ((port - PM8606_BACKLIGHT1) << 1) + 3;
+ return ret;
+}
+
+/* WLED2 & WLED3 share the same IDC */
+static inline int wled_idc(int port)
+{
+ int ret;
+
+ switch (port) {
+ case PM8606_BACKLIGHT1:
+ case PM8606_BACKLIGHT2:
+ ret = ((port - PM8606_BACKLIGHT1) << 1) + 3;
+ break;
+ case PM8606_BACKLIGHT3:
+ default:
+ ret = ((port - PM8606_BACKLIGHT2) << 1) + 3;
+ break;
+ }
+ return ret;
+}
+
+static int pm860x_backlight_set(struct backlight_device *bl, int brightness)
+{
+ struct pm860x_backlight_data *data = bl_get_data(bl);
+ struct pm860x_chip *chip = data->chip;
+ unsigned char value;
+ int ret;
+
+ if (brightness > MAX_BRIGHTNESS)
+ value = MAX_BRIGHTNESS;
+ else
+ value = brightness;
+
+ ret = pm860x_reg_write(data->i2c, wled_a(data->port), value);
+ if (ret < 0)
+ goto out;
+
+ if ((data->current_brightness == 0) && brightness) {
+ if (data->iset) {
+ ret = pm860x_set_bits(data->i2c, wled_idc(data->port),
+ CURRENT_MASK, data->iset);
+ if (ret < 0)
+ goto out;
+ }
+ if (data->pwm) {
+ ret = pm860x_set_bits(data->i2c, PM8606_PWM,
+ PM8606_PWM_FREQ_MASK, data->pwm);
+ if (ret < 0)
+ goto out;
+ }
+ if (brightness == MAX_BRIGHTNESS) {
+ /* set WLED_ON bit as 100% */
+ ret = pm860x_set_bits(data->i2c, wled_b(data->port),
+ PM8606_WLED_ON, PM8606_WLED_ON);
+ }
+ } else {
+ if (brightness == MAX_BRIGHTNESS) {
+ /* set WLED_ON bit as 100% */
+ ret = pm860x_set_bits(data->i2c, wled_b(data->port),
+ PM8606_WLED_ON, PM8606_WLED_ON);
+ } else {
+ /* clear WLED_ON bit since it's not 100% */
+ ret = pm860x_set_bits(data->i2c, wled_b(data->port),
+ PM8606_WLED_ON, 0);
+ }
+ }
+ if (ret < 0)
+ goto out;
+
+ dev_dbg(chip->dev, "set brightness %d\n", value);
+ data->current_brightness = value;
+ return 0;
+out:
+ dev_dbg(chip->dev, "set brightness %d failure with return "
+ "value:%d\n", value, ret);
+ return ret;
+}
+
+static int pm860x_backlight_update_status(struct backlight_device *bl)
+{
+ int brightness = bl->props.brightness;
+
+ if (bl->props.power != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ if (bl->props.state & BL_CORE_SUSPENDED)
+ brightness = 0;
+
+ return pm860x_backlight_set(bl, brightness);
+}
+
+static int pm860x_backlight_get_brightness(struct backlight_device *bl)
+{
+ struct pm860x_backlight_data *data = bl_get_data(bl);
+ struct pm860x_chip *chip = data->chip;
+ int ret;
+
+ ret = pm860x_reg_read(data->i2c, wled_a(data->port));
+ if (ret < 0)
+ goto out;
+ data->current_brightness = ret;
+ dev_dbg(chip->dev, "get brightness %d\n", data->current_brightness);
+ return data->current_brightness;
+out:
+ return -EINVAL;
+}
+
+static struct backlight_ops pm860x_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = pm860x_backlight_update_status,
+ .get_brightness = pm860x_backlight_get_brightness,
+};
+
+static int __check_device(struct pm860x_backlight_pdata *pdata, char *name)
+{
+ struct pm860x_backlight_pdata *p = pdata;
+ int ret = -EINVAL;
+
+ while (p && p->id) {
+ if ((p->id != PM8606_ID_BACKLIGHT) || (p->flags < 0))
+ break;
+
+ if (!strncmp(name, pm860x_backlight_name[p->flags],
+ MFD_NAME_SIZE)) {
+ ret = (int)p->flags;
+ break;
+ }
+ p++;
+ }
+ return ret;
+}
+
+static int pm860x_backlight_probe(struct platform_device *pdev)
+{
+ struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct pm860x_platform_data *pm860x_pdata;
+ struct pm860x_backlight_pdata *pdata = NULL;
+ struct pm860x_backlight_data *data;
+ struct backlight_device *bl;
+ struct resource *res;
+ unsigned char value;
+ char name[MFD_NAME_SIZE];
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No I/O resource!\n");
+ return -EINVAL;
+ }
+
+ if (pdev->dev.parent->platform_data) {
+ pm860x_pdata = pdev->dev.parent->platform_data;
+ pdata = pm860x_pdata->backlight;
+ }
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "platform data isn't assigned to "
+ "backlight\n");
+ return -EINVAL;
+ }
+
+ data = kzalloc(sizeof(struct pm860x_backlight_data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+ strncpy(name, res->name, MFD_NAME_SIZE);
+ data->chip = chip;
+ data->i2c = (chip->id == CHIP_PM8606) ? chip->client \
+ : chip->companion;
+ data->current_brightness = MAX_BRIGHTNESS;
+ data->pwm = pdata->pwm;
+ data->iset = pdata->iset;
+ data->port = __check_device(pdata, name);
+ if (data->port < 0) {
+ dev_err(&pdev->dev, "wrong platform data is assigned");
+ return -EINVAL;
+ }
+
+ bl = backlight_device_register(name, &pdev->dev, data,
+ &pm860x_backlight_ops);
+ if (IS_ERR(bl)) {
+ dev_err(&pdev->dev, "failed to register backlight\n");
+ kfree(data);
+ return PTR_ERR(bl);
+ }
+ bl->props.max_brightness = MAX_BRIGHTNESS;
+ bl->props.brightness = MAX_BRIGHTNESS;
+
+ platform_set_drvdata(pdev, bl);
+
+ /* Enable reference VSYS */
+ ret = pm860x_reg_read(data->i2c, PM8606_VSYS);
+ if (ret < 0)
+ goto out;
+ if ((ret & PM8606_VSYS_EN) == 0) {
+ value = ret | PM8606_VSYS_EN;
+ ret = pm860x_reg_write(data->i2c, PM8606_VSYS, value);
+ if (ret < 0)
+ goto out;
+ }
+ /* Enable reference OSC */
+ ret = pm860x_reg_read(data->i2c, PM8606_MISC);
+ if (ret < 0)
+ goto out;
+ if ((ret & PM8606_MISC_OSC_EN) == 0) {
+ value = ret | PM8606_MISC_OSC_EN;
+ ret = pm860x_reg_write(data->i2c, PM8606_MISC, value);
+ if (ret < 0)
+ goto out;
+ }
+ /* read current backlight */
+ ret = pm860x_backlight_get_brightness(bl);
+ if (ret < 0)
+ goto out;
+
+ backlight_update_status(bl);
+ return 0;
+out:
+ kfree(data);
+ return ret;
+}
+
+static int pm860x_backlight_remove(struct platform_device *pdev)
+{
+ struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct pm860x_backlight_data *data = bl_get_data(bl);
+
+ backlight_device_unregister(bl);
+ kfree(data);
+ return 0;
+}
+
+static struct platform_driver pm860x_backlight_driver = {
+ .driver = {
+ .name = "88pm860x-backlight",
+ .owner = THIS_MODULE,
+ },
+ .probe = pm860x_backlight_probe,
+ .remove = pm860x_backlight_remove,
+};
+
+static int __init pm860x_backlight_init(void)
+{
+ return platform_driver_register(&pm860x_backlight_driver);
+}
+module_init(pm860x_backlight_init);
+
+static void __exit pm860x_backlight_exit(void)
+{
+ platform_driver_unregister(&pm860x_backlight_driver);
+}
+module_exit(pm860x_backlight_exit);
+
+MODULE_DESCRIPTION("Backlight Driver for Marvell Semiconductor 88PM8606");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:88pm860x-backlight");
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 09bfa9662e4d..0c77fc610212 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -212,6 +212,13 @@ config BACKLIGHT_DA903X
If you have a LCD backlight connected to the WLED output of DA9030
or DA9034 WLED output, say Y here to enable this driver.
+config BACKLIGHT_MAX8925
+ tristate "Backlight driver for MAX8925"
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_MAX8925
+ help
+ If you have a LCD backlight connected to the WLED output of MAX8925
+ WLED output, say Y here to enable this driver.
+
config BACKLIGHT_MBP_NVIDIA
tristate "MacBook Pro Nvidia Backlight Driver"
depends on BACKLIGHT_CLASS_DEVICE && X86
@@ -262,3 +269,9 @@ config BACKLIGHT_ADP5520
To compile this driver as a module, choose M here: the module will
be called adp5520_bl.
+config BACKLIGHT_88PM860X
+ tristate "Backlight Driver for 88PM8606 using WLED"
+ depends on BACKLIGHT_CLASS_DEVICE && MFD_88PM860X
+ help
+ Say Y to enable the backlight driver for Marvell 88PM8606.
+
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 9a405548874c..6c704d41462d 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -22,10 +22,12 @@ obj-$(CONFIG_BACKLIGHT_PROGEAR) += progear_bl.o
obj-$(CONFIG_BACKLIGHT_CARILLO_RANCH) += cr_bllcd.o
obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o
obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
+obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o
obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o
obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
+obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
new file mode 100644
index 000000000000..c267069a52a3
--- /dev/null
+++ b/drivers/video/backlight/max8925_bl.c
@@ -0,0 +1,200 @@
+/*
+ * Backlight driver for Maxim MAX8925
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/fb.h>
+#include <linux/i2c.h>
+#include <linux/backlight.h>
+#include <linux/mfd/max8925.h>
+
+#define MAX_BRIGHTNESS (0xff)
+#define MIN_BRIGHTNESS (0)
+
+#define LWX_FREQ(x) (((x - 601) / 100) & 0x7)
+
+struct max8925_backlight_data {
+ struct max8925_chip *chip;
+
+ int current_brightness;
+};
+
+static int max8925_backlight_set(struct backlight_device *bl, int brightness)
+{
+ struct max8925_backlight_data *data = bl_get_data(bl);
+ struct max8925_chip *chip = data->chip;
+ unsigned char value;
+ int ret;
+
+ if (brightness > MAX_BRIGHTNESS)
+ value = MAX_BRIGHTNESS;
+ else
+ value = brightness;
+
+ ret = max8925_reg_write(chip->i2c, MAX8925_WLED_CNTL, value);
+ if (ret < 0)
+ goto out;
+
+ if (!data->current_brightness && brightness)
+ /* enable WLED output */
+ ret = max8925_set_bits(chip->i2c, MAX8925_WLED_MODE_CNTL, 1, 1);
+ else if (!brightness)
+ /* disable WLED output */
+ ret = max8925_set_bits(chip->i2c, MAX8925_WLED_MODE_CNTL, 1, 0);
+ if (ret < 0)
+ goto out;
+ dev_dbg(chip->dev, "set brightness %d\n", value);
+ data->current_brightness = value;
+ return 0;
+out:
+ dev_dbg(chip->dev, "set brightness %d failure with return value:%d\n",
+ value, ret);
+ return ret;
+}
+
+static int max8925_backlight_update_status(struct backlight_device *bl)
+{
+ int brightness = bl->props.brightness;
+
+ if (bl->props.power != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ if (bl->props.fb_blank != FB_BLANK_UNBLANK)
+ brightness = 0;
+
+ if (bl->props.state & BL_CORE_SUSPENDED)
+ brightness = 0;
+
+ return max8925_backlight_set(bl, brightness);
+}
+
+static int max8925_backlight_get_brightness(struct backlight_device *bl)
+{
+ struct max8925_backlight_data *data = bl_get_data(bl);
+ struct max8925_chip *chip = data->chip;
+ int ret;
+
+ ret = max8925_reg_read(chip->i2c, MAX8925_WLED_CNTL);
+ if (ret < 0)
+ return -EINVAL;
+ data->current_brightness = ret;
+ dev_dbg(chip->dev, "get brightness %d\n", data->current_brightness);
+ return ret;
+}
+
+static struct backlight_ops max8925_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .update_status = max8925_backlight_update_status,
+ .get_brightness = max8925_backlight_get_brightness,
+};
+
+static int __devinit max8925_backlight_probe(struct platform_device *pdev)
+{
+ struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct max8925_platform_data *max8925_pdata;
+ struct max8925_backlight_pdata *pdata = NULL;
+ struct max8925_backlight_data *data;
+ struct backlight_device *bl;
+ struct resource *res;
+ char name[MAX8925_NAME_SIZE];
+ unsigned char value;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "No I/O resource!\n");
+ return -EINVAL;
+ }
+
+ if (pdev->dev.parent->platform_data) {
+ max8925_pdata = pdev->dev.parent->platform_data;
+ pdata = max8925_pdata->backlight;
+ }
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "platform data isn't assigned to "
+ "backlight\n");
+ return -EINVAL;
+ }
+
+ data = kzalloc(sizeof(struct max8925_backlight_data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+ strncpy(name, res->name, MAX8925_NAME_SIZE);
+ data->chip = chip;
+ data->current_brightness = 0;
+
+ bl = backlight_device_register(name, &pdev->dev, data,
+ &max8925_backlight_ops);
+ if (IS_ERR(bl)) {
+ dev_err(&pdev->dev, "failed to register backlight\n");
+ kfree(data);
+ return PTR_ERR(bl);
+ }
+ bl->props.max_brightness = MAX_BRIGHTNESS;
+ bl->props.brightness = MAX_BRIGHTNESS;
+
+ platform_set_drvdata(pdev, bl);
+
+ value = 0;
+ if (pdata->lxw_scl)
+ value |= (1 << 7);
+ if (pdata->lxw_freq)
+ value |= (LWX_FREQ(pdata->lxw_freq) << 4);
+ if (pdata->dual_string)
+ value |= (1 << 1);
+ ret = max8925_set_bits(chip->i2c, MAX8925_WLED_MODE_CNTL, 0xfe, value);
+ if (ret < 0)
+ goto out;
+
+ backlight_update_status(bl);
+ return 0;
+out:
+ kfree(data);
+ return ret;
+}
+
+static int __devexit max8925_backlight_remove(struct platform_device *pdev)
+{
+ struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct max8925_backlight_data *data = bl_get_data(bl);
+
+ backlight_device_unregister(bl);
+ kfree(data);
+ return 0;
+}
+
+static struct platform_driver max8925_backlight_driver = {
+ .driver = {
+ .name = "max8925-backlight",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8925_backlight_probe,
+ .remove = __devexit_p(max8925_backlight_remove),
+};
+
+static int __init max8925_backlight_init(void)
+{
+ return platform_driver_register(&max8925_backlight_driver);
+}
+module_init(max8925_backlight_init);
+
+static void __exit max8925_backlight_exit(void)
+{
+ platform_driver_unregister(&max8925_backlight_driver);
+};
+module_exit(max8925_backlight_exit);
+
+MODULE_DESCRIPTION("Backlight Driver for Maxim MAX8925");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:max8925-backlight");
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 409ca9643528..a3a7f8938175 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -139,8 +139,6 @@ static int omapbl_probe(struct platform_device *pdev)
if (!pdata)
return -ENXIO;
- omapbl_ops.check_fb = pdata->check_fb;
-
bl = kzalloc(sizeof(struct omap_backlight), GFP_KERNEL);
if (unlikely(!bl))
return -ENOMEM;
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3681c6a88212..6f2ed5a15e0c 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -75,6 +75,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/crc32.h> /* For counting font checksums */
+#include <linux/kgdb.h>
#include <asm/fb.h>
#include <asm/irq.h>
#include <asm/system.h>
@@ -2318,6 +2319,12 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
}
}
+ if (in_dbg_master()) {
+ if (info->fbops->fb_blank)
+ info->fbops->fb_blank(blank, info);
+ return 0;
+ }
+
if (!fbcon_is_inactive(vc, info)) {
if (ops->blank_state != blank) {
ops->blank_state = blank;
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 01f77bcc68f9..afea9abbd678 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -693,7 +693,7 @@ static void __devinit setup_memc(struct fb_info *fbi)
unsigned long tmp;
int i;
- /* FIXME: use platfrom specific parameters */
+ /* FIXME: use platform specific parameters */
/* setup SDRAM controller */
write_reg_dly((LMCFG_LMC_DS | LMCFG_LMC_TS | LMCFG_LMD_TS |
LMCFG_LMA_TS),
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index c7c6455f1fa8..e192b058a688 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -189,11 +189,6 @@ static struct {
struct omapfb_color_key color_key;
} dispc;
-static struct platform_device omapdss_device = {
- .name = "omapdss",
- .id = -1,
-};
-
static void enable_lcd_clocks(int enable);
static void inline dispc_write_reg(int idx, u32 val)
@@ -920,20 +915,20 @@ static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
static int get_dss_clocks(void)
{
- dispc.dss_ick = clk_get(&omapdss_device.dev, "ick");
+ dispc.dss_ick = clk_get(&dispc.fbdev->dssdev->dev, "ick");
if (IS_ERR(dispc.dss_ick)) {
dev_err(dispc.fbdev->dev, "can't get ick\n");
return PTR_ERR(dispc.dss_ick);
}
- dispc.dss1_fck = clk_get(&omapdss_device.dev, "dss1_fck");
+ dispc.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck");
if (IS_ERR(dispc.dss1_fck)) {
dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
clk_put(dispc.dss_ick);
return PTR_ERR(dispc.dss1_fck);
}
- dispc.dss_54m_fck = clk_get(&omapdss_device.dev, "tv_fck");
+ dispc.dss_54m_fck = clk_get(&dispc.fbdev->dssdev->dev, "tv_fck");
if (IS_ERR(dispc.dss_54m_fck)) {
dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
clk_put(dispc.dss_ick);
@@ -1385,12 +1380,6 @@ static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
int skip_init = 0;
int i;
- r = platform_device_register(&omapdss_device);
- if (r) {
- dev_err(fbdev->dev, "can't register omapdss device\n");
- return r;
- }
-
memset(&dispc, 0, sizeof(dispc));
dispc.base = ioremap(DISPC_BASE, SZ_1K);
@@ -1534,7 +1523,6 @@ static void omap_dispc_cleanup(void)
free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
put_dss_clocks();
iounmap(dispc.base);
- platform_device_unregister(&omapdss_device);
}
const struct lcd_ctrl omap2_int_ctrl = {
diff --git a/drivers/video/omap/lcd_htcherald.c b/drivers/video/omap/lcd_htcherald.c
index a9007c5d1fad..4802419da83b 100644
--- a/drivers/video/omap/lcd_htcherald.c
+++ b/drivers/video/omap/lcd_htcherald.c
@@ -115,12 +115,12 @@ struct platform_driver htcherald_panel_driver = {
},
};
-static int htcherald_panel_drv_init(void)
+static int __init htcherald_panel_drv_init(void)
{
return platform_driver_register(&htcherald_panel_driver);
}
-static void htcherald_panel_drv_cleanup(void)
+static void __exit htcherald_panel_drv_cleanup(void)
{
platform_driver_unregister(&htcherald_panel_driver);
}
diff --git a/drivers/video/omap/omapfb.h b/drivers/video/omap/omapfb.h
index 46e4714014e8..af3c9e571ec3 100644
--- a/drivers/video/omap/omapfb.h
+++ b/drivers/video/omap/omapfb.h
@@ -203,6 +203,8 @@ struct omapfb_device {
struct omapfb_mem_desc mem_desc;
struct fb_info *fb_info[OMAPFB_PLANE_NUM];
+
+ struct platform_device *dssdev; /* dummy dev for clocks */
};
#ifdef CONFIG_ARCH_OMAP1
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index c7f59a5ccdbc..2c4f470fa086 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -83,6 +83,19 @@ static struct caps_table_struct color_caps[] = {
{ 1 << OMAPFB_COLOR_YUY422, "YUY422", },
};
+static void omapdss_release(struct device *dev)
+{
+}
+
+/* dummy device for clocks */
+static struct platform_device omapdss_device = {
+ .name = "omapdss",
+ .id = -1,
+ .dev = {
+ .release = omapdss_release,
+ },
+};
+
/*
* ---------------------------------------------------------------------------
* LCD panel
@@ -1700,6 +1713,7 @@ static int omapfb_do_probe(struct platform_device *pdev,
fbdev->dev = &pdev->dev;
fbdev->panel = panel;
+ fbdev->dssdev = &omapdss_device;
platform_set_drvdata(pdev, fbdev);
mutex_init(&fbdev->rqueue_mutex);
@@ -1814,8 +1828,16 @@ cleanup:
static int omapfb_probe(struct platform_device *pdev)
{
+ int r;
+
BUG_ON(fbdev_pdev != NULL);
+ r = platform_device_register(&omapdss_device);
+ if (r) {
+ dev_err(&pdev->dev, "can't register omapdss device\n");
+ return r;
+ }
+
/* Delay actual initialization until the LCD is registered */
fbdev_pdev = pdev;
if (fbdev_panel != NULL)
@@ -1843,6 +1865,9 @@ static int omapfb_remove(struct platform_device *pdev)
fbdev->state = OMAPFB_DISABLED;
omapfb_free_resources(fbdev, saved_state);
+ platform_device_unregister(&omapdss_device);
+ fbdev->dssdev = NULL;
+
return 0;
}
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index fed7b1bda19c..1162603c72e5 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -83,13 +83,13 @@ static inline u32 rfbi_read_reg(int idx)
static int rfbi_get_clocks(void)
{
- rfbi.dss_ick = clk_get(rfbi.fbdev->dev, "ick");
+ rfbi.dss_ick = clk_get(&dispc.fbdev->dssdev->dev, "ick");
if (IS_ERR(rfbi.dss_ick)) {
dev_err(rfbi.fbdev->dev, "can't get ick\n");
return PTR_ERR(rfbi.dss_ick);
}
- rfbi.dss1_fck = clk_get(rfbi.fbdev->dev, "dss1_fck");
+ rfbi.dss1_fck = clk_get(&dispc.fbdev->dssdev->dev, "dss1_fck");
if (IS_ERR(rfbi.dss1_fck)) {
dev_err(rfbi.fbdev->dev, "can't get dss1_fck\n");
clk_put(rfbi.dss_ick);
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 71d8dec30635..c63ce767b277 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -25,6 +25,13 @@ config OMAP2_DSS_DEBUG_SUPPORT
This enables debug messages. You need to enable printing
with 'debug' module parameter.
+config OMAP2_DSS_COLLECT_IRQ_STATS
+ bool "Collect DSS IRQ statistics"
+ depends on OMAP2_DSS_DEBUG_SUPPORT
+ default n
+ help
+ Collect DSS IRQ statistics, printable via debugfs
+
config OMAP2_DSS_RFBI
bool "RFBI support"
default n
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 29497a0c9a91..82918eec6d2e 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -124,6 +124,7 @@ static void restore_all_ctx(void)
dss_clk_disable_all_no_ctx();
}
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
/* CLOCKS */
static void core_dump_clocks(struct seq_file *s)
{
@@ -149,6 +150,7 @@ static void core_dump_clocks(struct seq_file *s)
clocks[i]->usecount);
}
}
+#endif /* defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) */
static int dss_get_clock(struct clk **clock, const char *clk_name)
{
@@ -395,6 +397,14 @@ static int dss_initialize_debugfs(void)
debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
&dss_debug_dump_clocks, &dss_debug_fops);
+ debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
+ &dispc_dump_irqs, &dss_debug_fops);
+
+#ifdef CONFIG_OMAP2_DSS_DSI
+ debugfs_create_file("dsi_irq", S_IRUGO, dss_debugfs_dir,
+ &dsi_dump_irqs, &dss_debug_fops);
+#endif
+
debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
&dss_dump_regs, &dss_debug_fops);
debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 6dabf4b2f005..de8bfbac9e26 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -148,6 +148,12 @@ static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES,
DISPC_VID_ATTRIBUTES(0),
DISPC_VID_ATTRIBUTES(1) };
+struct dispc_irq_stats {
+ unsigned long last_reset;
+ unsigned irq_count;
+ unsigned irqs[32];
+};
+
static struct {
void __iomem *base;
@@ -160,6 +166,11 @@ static struct {
struct work_struct error_work;
u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spinlock_t irq_stats_lock;
+ struct dispc_irq_stats irq_stats;
+#endif
} dispc;
static void _omap_dispc_set_irqs(void);
@@ -1443,7 +1454,10 @@ static unsigned long calc_fclk_five_taps(u16 width, u16 height,
do_div(tmp, 2 * out_height * ppl);
fclk = tmp;
- if (height > 2 * out_height && ppl != out_width) {
+ if (height > 2 * out_height) {
+ if (ppl == out_width)
+ return 0;
+
tmp = pclk * (height - 2 * out_height) * out_width;
do_div(tmp, 2 * out_height * (ppl - out_width));
fclk = max(fclk, (u32) tmp);
@@ -1623,7 +1637,7 @@ static int _dispc_setup_plane(enum omap_plane plane,
DSSDBG("required fclk rate = %lu Hz\n", fclk);
DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
- if (fclk > dispc_fclk_rate()) {
+ if (!fclk || fclk > dispc_fclk_rate()) {
DSSERR("failed to set up scaling, "
"required fclk rate = %lu Hz, "
"current fclk rate = %lu Hz\n",
@@ -2247,6 +2261,50 @@ void dispc_dump_clocks(struct seq_file *s)
enable_clocks(0);
}
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+void dispc_dump_irqs(struct seq_file *s)
+{
+ unsigned long flags;
+ struct dispc_irq_stats stats;
+
+ spin_lock_irqsave(&dispc.irq_stats_lock, flags);
+
+ stats = dispc.irq_stats;
+ memset(&dispc.irq_stats, 0, sizeof(dispc.irq_stats));
+ dispc.irq_stats.last_reset = jiffies;
+
+ spin_unlock_irqrestore(&dispc.irq_stats_lock, flags);
+
+ seq_printf(s, "period %u ms\n",
+ jiffies_to_msecs(jiffies - stats.last_reset));
+
+ seq_printf(s, "irqs %d\n", stats.irq_count);
+#define PIS(x) \
+ seq_printf(s, "%-20s %10d\n", #x, stats.irqs[ffs(DISPC_IRQ_##x)-1]);
+
+ PIS(FRAMEDONE);
+ PIS(VSYNC);
+ PIS(EVSYNC_EVEN);
+ PIS(EVSYNC_ODD);
+ PIS(ACBIAS_COUNT_STAT);
+ PIS(PROG_LINE_NUM);
+ PIS(GFX_FIFO_UNDERFLOW);
+ PIS(GFX_END_WIN);
+ PIS(PAL_GAMMA_MASK);
+ PIS(OCP_ERR);
+ PIS(VID1_FIFO_UNDERFLOW);
+ PIS(VID1_END_WIN);
+ PIS(VID2_FIFO_UNDERFLOW);
+ PIS(VID2_END_WIN);
+ PIS(SYNC_LOST);
+ PIS(SYNC_LOST_DIGIT);
+ PIS(WAKEUP);
+#undef PIS
+}
+#else
+void dispc_dump_irqs(struct seq_file *s) { }
+#endif
+
void dispc_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r))
@@ -2665,6 +2723,13 @@ void dispc_irq_handler(void)
irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock(&dispc.irq_stats_lock);
+ dispc.irq_stats.irq_count++;
+ dss_collect_irq_stats(irqstatus, dispc.irq_stats.irqs);
+ spin_unlock(&dispc.irq_stats_lock);
+#endif
+
#ifdef DEBUG
if (dss_debug)
print_irq_status(irqstatus);
@@ -3012,6 +3077,11 @@ int dispc_init(void)
spin_lock_init(&dispc.irq_lock);
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock_init(&dispc.irq_stats_lock);
+ dispc.irq_stats.last_reset = jiffies;
+#endif
+
INIT_WORK(&dispc.error_work, dispc_error_worker);
dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 5936487b5def..6122178f5f85 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -204,6 +204,14 @@ struct dsi_update_region {
struct omap_dss_device *device;
};
+struct dsi_irq_stats {
+ unsigned long last_reset;
+ unsigned irq_count;
+ unsigned dsi_irqs[32];
+ unsigned vc_irqs[4][32];
+ unsigned cio_irqs[32];
+};
+
static struct
{
void __iomem *base;
@@ -258,6 +266,11 @@ static struct
#endif
int debug_read;
int debug_write;
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spinlock_t irq_stats_lock;
+ struct dsi_irq_stats irq_stats;
+#endif
} dsi;
#ifdef DEBUG
@@ -528,6 +541,12 @@ void dsi_irq_handler(void)
irqstatus = dsi_read_reg(DSI_IRQSTATUS);
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock(&dsi.irq_stats_lock);
+ dsi.irq_stats.irq_count++;
+ dss_collect_irq_stats(irqstatus, dsi.irq_stats.dsi_irqs);
+#endif
+
if (irqstatus & DSI_IRQ_ERROR_MASK) {
DSSERR("DSI error, irqstatus %x\n", irqstatus);
print_irq_status(irqstatus);
@@ -549,6 +568,10 @@ void dsi_irq_handler(void)
vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ dss_collect_irq_stats(vcstatus, dsi.irq_stats.vc_irqs[i]);
+#endif
+
if (vcstatus & DSI_VC_IRQ_BTA)
complete(&dsi.bta_completion);
@@ -568,6 +591,10 @@ void dsi_irq_handler(void)
if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ dss_collect_irq_stats(ciostatus, dsi.irq_stats.cio_irqs);
+#endif
+
dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
/* flush posted write */
dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
@@ -579,6 +606,10 @@ void dsi_irq_handler(void)
dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
/* flush posted write */
dsi_read_reg(DSI_IRQSTATUS);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_unlock(&dsi.irq_stats_lock);
+#endif
}
@@ -797,12 +828,12 @@ static int dsi_pll_power(enum dsi_pll_power_state state)
/* PLL_PWR_STATUS */
while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) {
- udelay(1);
- if (t++ > 1000) {
+ if (++t > 1000) {
DSSERR("Failed to set DSI PLL power mode to %d\n",
state);
return -ENODEV;
}
+ udelay(1);
}
return 0;
@@ -1226,6 +1257,95 @@ void dsi_dump_clocks(struct seq_file *s)
enable_clocks(0);
}
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+void dsi_dump_irqs(struct seq_file *s)
+{
+ unsigned long flags;
+ struct dsi_irq_stats stats;
+
+ spin_lock_irqsave(&dsi.irq_stats_lock, flags);
+
+ stats = dsi.irq_stats;
+ memset(&dsi.irq_stats, 0, sizeof(dsi.irq_stats));
+ dsi.irq_stats.last_reset = jiffies;
+
+ spin_unlock_irqrestore(&dsi.irq_stats_lock, flags);
+
+ seq_printf(s, "period %u ms\n",
+ jiffies_to_msecs(jiffies - stats.last_reset));
+
+ seq_printf(s, "irqs %d\n", stats.irq_count);
+#define PIS(x) \
+ seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
+
+ seq_printf(s, "-- DSI interrupts --\n");
+ PIS(VC0);
+ PIS(VC1);
+ PIS(VC2);
+ PIS(VC3);
+ PIS(WAKEUP);
+ PIS(RESYNC);
+ PIS(PLL_LOCK);
+ PIS(PLL_UNLOCK);
+ PIS(PLL_RECALL);
+ PIS(COMPLEXIO_ERR);
+ PIS(HS_TX_TIMEOUT);
+ PIS(LP_RX_TIMEOUT);
+ PIS(TE_TRIGGER);
+ PIS(ACK_TRIGGER);
+ PIS(SYNC_LOST);
+ PIS(LDO_POWER_GOOD);
+ PIS(TA_TIMEOUT);
+#undef PIS
+
+#define PIS(x) \
+ seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
+ stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
+ stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
+ stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
+ stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
+
+ seq_printf(s, "-- VC interrupts --\n");
+ PIS(CS);
+ PIS(ECC_CORR);
+ PIS(PACKET_SENT);
+ PIS(FIFO_TX_OVF);
+ PIS(FIFO_RX_OVF);
+ PIS(BTA);
+ PIS(ECC_NO_CORR);
+ PIS(FIFO_TX_UDF);
+ PIS(PP_BUSY_CHANGE);
+#undef PIS
+
+#define PIS(x) \
+ seq_printf(s, "%-20s %10d\n", #x, \
+ stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
+
+ seq_printf(s, "-- CIO interrupts --\n");
+ PIS(ERRSYNCESC1);
+ PIS(ERRSYNCESC2);
+ PIS(ERRSYNCESC3);
+ PIS(ERRESC1);
+ PIS(ERRESC2);
+ PIS(ERRESC3);
+ PIS(ERRCONTROL1);
+ PIS(ERRCONTROL2);
+ PIS(ERRCONTROL3);
+ PIS(STATEULPS1);
+ PIS(STATEULPS2);
+ PIS(STATEULPS3);
+ PIS(ERRCONTENTIONLP0_1);
+ PIS(ERRCONTENTIONLP1_1);
+ PIS(ERRCONTENTIONLP0_2);
+ PIS(ERRCONTENTIONLP1_2);
+ PIS(ERRCONTENTIONLP0_3);
+ PIS(ERRCONTENTIONLP1_3);
+ PIS(ULPSACTIVENOT_ALL0);
+ PIS(ULPSACTIVENOT_ALL1);
+#undef PIS
+}
+#endif
+
void dsi_dump_regs(struct seq_file *s)
{
#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
@@ -1321,12 +1441,12 @@ static int dsi_complexio_power(enum dsi_complexio_power_state state)
/* PWR_STATUS */
while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) {
- udelay(1);
- if (t++ > 1000) {
+ if (++t > 1000) {
DSSERR("failed to set complexio power state to "
"%d\n", state);
return -ENODEV;
}
+ udelay(1);
}
return 0;
@@ -1526,10 +1646,10 @@ static void dsi_complexio_uninit(void)
static int _dsi_wait_reset(void)
{
- int i = 0;
+ int t = 0;
while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) {
- if (i++ > 5) {
+ if (++t > 5) {
DSSERR("soft reset failed\n");
return -ENODEV;
}
@@ -1999,7 +2119,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
return -EINVAL;
}
- data_id = data_type | channel << 6;
+ data_id = data_type | dsi.vc[channel].dest_per << 6;
r = (data_id << 0) | (data << 8) | (ecc << 24);
@@ -2011,7 +2131,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
int dsi_vc_send_null(int channel)
{
u8 nullpkg[] = {0, 0, 0, 0};
- return dsi_vc_send_long(0, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
+ return dsi_vc_send_long(channel, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
}
EXPORT_SYMBOL(dsi_vc_send_null);
@@ -2058,7 +2178,7 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
int r;
if (dsi.debug_read)
- DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %u)\n", channel, dcs_cmd);
+ DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %x)\n", channel, dcs_cmd);
r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0);
if (r)
@@ -2586,7 +2706,6 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
/* using fifo not empty */
/* TX_FIFO_NOT_EMPTY */
while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) {
- udelay(1);
fifo_stalls++;
if (fifo_stalls > 0xfffff) {
DSSERR("fifo stalls overflow, pixels left %d\n",
@@ -2594,6 +2713,7 @@ static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
dsi_if_enable(0);
return -EIO;
}
+ udelay(1);
}
#elif 1
/* using fifo emptiness */
@@ -2812,11 +2932,15 @@ static int dsi_set_update_mode(struct omap_dss_device *dssdev,
static int dsi_set_te(struct omap_dss_device *dssdev, bool enable)
{
- int r;
- r = dssdev->driver->enable_te(dssdev, enable);
- /* XXX for some reason, DSI TE breaks if we don't wait here.
- * Panel bug? Needs more studying */
- msleep(100);
+ int r = 0;
+
+ if (dssdev->driver->enable_te) {
+ r = dssdev->driver->enable_te(dssdev, enable);
+ /* XXX for some reason, DSI TE breaks if we don't wait here.
+ * Panel bug? Needs more studying */
+ msleep(100);
+ }
+
return r;
}
@@ -3637,6 +3761,11 @@ int dsi_init(struct platform_device *pdev)
spin_lock_init(&dsi.errors_lock);
dsi.errors = 0;
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ spin_lock_init(&dsi.irq_stats_lock);
+ dsi.irq_stats.last_reset = jiffies;
+#endif
+
init_completion(&dsi.bta_completion);
init_completion(&dsi.update_completion);
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 9b05ee65a15d..0a26b7d84d41 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -467,14 +467,14 @@ static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
static int _omap_dss_wait_reset(void)
{
- unsigned timeout = 1000;
+ int t = 0;
while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
- udelay(1);
- if (!--timeout) {
+ if (++t > 1000) {
DSSERR("soft reset failed\n");
return -ENODEV;
}
+ udelay(1);
}
return 0;
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 8da5ac42151b..2bcb1245d6c2 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -240,6 +240,7 @@ int dsi_init(struct platform_device *pdev);
void dsi_exit(void);
void dsi_dump_clocks(struct seq_file *s);
+void dsi_dump_irqs(struct seq_file *s);
void dsi_dump_regs(struct seq_file *s);
void dsi_save_context(void);
@@ -268,6 +269,7 @@ int dpi_init_display(struct omap_dss_device *dssdev);
int dispc_init(void);
void dispc_exit(void);
void dispc_dump_clocks(struct seq_file *s);
+void dispc_dump_irqs(struct seq_file *s);
void dispc_dump_regs(struct seq_file *s);
void dispc_irq_handler(void);
void dispc_fake_vsync_irq(void);
@@ -367,4 +369,16 @@ void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
unsigned long rfbi_get_max_tx_rate(void);
int rfbi_init_display(struct omap_dss_device *display);
+
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr)
+{
+ int b;
+ for (b = 0; b < 32; ++b) {
+ if (irqstatus & (1 << b))
+ irq_arr[b]++;
+ }
+}
+#endif
+
#endif
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index d0b3006ad8a5..b936495c065d 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -120,7 +120,7 @@ static struct {
struct omap_dss_device *dssdev[2];
- struct kfifo *cmd_fifo;
+ struct kfifo cmd_fifo;
spinlock_t cmd_lock;
struct completion cmd_done;
atomic_t cmd_fifo_full;
@@ -1011,20 +1011,20 @@ static void process_cmd_fifo(void)
return;
while (true) {
- spin_lock_irqsave(rfbi.cmd_fifo->lock, flags);
+ spin_lock_irqsave(&rfbi.cmd_lock, flags);
- len = __kfifo_get(rfbi.cmd_fifo, (unsigned char *)&p,
+ len = kfifo_out(&rfbi.cmd_fifo, (unsigned char *)&p,
sizeof(struct update_param));
if (len == 0) {
DSSDBG("nothing more in fifo\n");
atomic_set(&rfbi.cmd_pending, 0);
- spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+ spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
break;
}
/* DSSDBG("fifo full %d\n", rfbi.cmd_fifo_full.counter);*/
- spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+ spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
BUG_ON(len != sizeof(struct update_param));
BUG_ON(p.rfbi_module > 1);
@@ -1052,25 +1052,25 @@ static void rfbi_push_cmd(struct update_param *p)
unsigned long flags;
int available;
- spin_lock_irqsave(rfbi.cmd_fifo->lock, flags);
+ spin_lock_irqsave(&rfbi.cmd_lock, flags);
available = RFBI_CMD_FIFO_LEN_BYTES -
- __kfifo_len(rfbi.cmd_fifo);
+ kfifo_len(&rfbi.cmd_fifo);
/* DSSDBG("%d bytes left in fifo\n", available); */
if (available < sizeof(struct update_param)) {
DSSDBG("Going to wait because FIFO FULL..\n");
- spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+ spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
atomic_inc(&rfbi.cmd_fifo_full);
wait_for_completion(&rfbi.cmd_done);
/*DSSDBG("Woke up because fifo not full anymore\n");*/
continue;
}
- ret = __kfifo_put(rfbi.cmd_fifo, (unsigned char *)p,
+ ret = kfifo_in(&rfbi.cmd_fifo, (unsigned char *)p,
sizeof(struct update_param));
/* DSSDBG("pushed %d bytes\n", ret);*/
- spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
+ spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
BUG_ON(ret != sizeof(struct update_param));
@@ -1155,12 +1155,12 @@ int rfbi_init(void)
{
u32 rev;
u32 l;
+ int r;
spin_lock_init(&rfbi.cmd_lock);
- rfbi.cmd_fifo = kfifo_alloc(RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL,
- &rfbi.cmd_lock);
- if (IS_ERR(rfbi.cmd_fifo))
- return -ENOMEM;
+ r = kfifo_alloc(&rfbi.cmd_fifo, RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL);
+ if (r)
+ return r;
init_completion(&rfbi.cmd_done);
atomic_set(&rfbi.cmd_fifo_full, 0);
@@ -1196,7 +1196,7 @@ void rfbi_exit(void)
{
DSSDBG("rfbi_exit\n");
- kfifo_free(rfbi.cmd_fifo);
+ kfifo_free(&rfbi.cmd_fifo);
iounmap(rfbi.base);
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index ef299839858a..d17caef6915a 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -1311,6 +1311,7 @@ static void omapfb_free_fbmem(struct fb_info *fbi)
if (rg->vrfb.vaddr[0]) {
iounmap(rg->vrfb.vaddr[0]);
omap_vrfb_release_ctx(&rg->vrfb);
+ rg->vrfb.vaddr[0] = NULL;
}
}
@@ -2114,6 +2115,11 @@ static int omapfb_probe(struct platform_device *pdev)
dssdev = NULL;
for_each_dss_dev(dssdev) {
omap_dss_get_device(dssdev);
+ if (!dssdev->driver) {
+ dev_err(&pdev->dev, "no driver for display\n");
+ r = -EINVAL;
+ goto cleanup;
+ }
fbdev->displays[fbdev->num_displays++] = dssdev;
}
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 415858b421b3..825b665245bb 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1221,9 +1221,9 @@ static void setup_smart_timing(struct pxafb_info *fbi,
static int pxafb_smart_thread(void *arg)
{
struct pxafb_info *fbi = arg;
- struct pxafb_mach_info *inf;
+ struct pxafb_mach_info *inf = fbi->dev->platform_data;
- if (!fbi || !fbi->dev->platform_data->smart_update) {
+ if (!inf->smart_update) {
pr_err("%s: not properly initialized, thread terminated\n",
__func__);
return -EINVAL;
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 609d0a521ca2..79840f11fecb 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -1102,7 +1102,7 @@ static void sst_set_vidmod_ics(struct fb_info *info, const int bpp)
* detect dac type
* prerequisite : write to FbiInitx enabled, video and fbi and pci fifo reset,
* dram refresh disabled, FbiInit remaped.
- * TODO: mmh.. maybe i shoud put the "prerequisite" in the func ...
+ * TODO: mmh.. maybe i should put the "prerequisite" in the func ...
*/
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index 980548390048..3ee5e63cfa4f 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -1571,8 +1571,8 @@ out_err_iobase:
if (default_par->mtrr_handle >= 0)
mtrr_del(default_par->mtrr_handle, info->fix.smem_start,
info->fix.smem_len);
- release_mem_region(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
+ release_region(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
out_err_screenbase:
if (info->screen_base)
iounmap(info->screen_base);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 9dd588042880..f95be86595a0 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -28,7 +28,7 @@
struct virtio_balloon
{
struct virtio_device *vdev;
- struct virtqueue *inflate_vq, *deflate_vq;
+ struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
/* Where the ballooning thread waits for config to change. */
wait_queue_head_t config_change;
@@ -49,6 +49,10 @@ struct virtio_balloon
/* The array of pfns we tell the Host about. */
unsigned int num_pfns;
u32 pfns[256];
+
+ /* Memory statistics */
+ int need_stats_update;
+ struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
};
static struct virtio_device_id id_table[] = {
@@ -154,6 +158,72 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
}
}
+static inline void update_stat(struct virtio_balloon *vb, int idx,
+ u16 tag, u64 val)
+{
+ BUG_ON(idx >= VIRTIO_BALLOON_S_NR);
+ vb->stats[idx].tag = tag;
+ vb->stats[idx].val = val;
+}
+
+#define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT)
+
+static void update_balloon_stats(struct virtio_balloon *vb)
+{
+ unsigned long events[NR_VM_EVENT_ITEMS];
+ struct sysinfo i;
+ int idx = 0;
+
+ all_vm_events(events);
+ si_meminfo(&i);
+
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN,
+ pages_to_bytes(events[PSWPIN]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT,
+ pages_to_bytes(events[PSWPOUT]));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]);
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]);
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE,
+ pages_to_bytes(i.freeram));
+ update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT,
+ pages_to_bytes(i.totalram));
+}
+
+/*
+ * While most virtqueues communicate guest-initiated requests to the hypervisor,
+ * the stats queue operates in reverse. The driver initializes the virtqueue
+ * with a single buffer. From that point forward, all conversations consist of
+ * a hypervisor request (a call to this function) which directs us to refill
+ * the virtqueue with a fresh stats buffer. Since stats collection can sleep,
+ * we notify our kthread which does the actual work via stats_handle_request().
+ */
+static void stats_request(struct virtqueue *vq)
+{
+ struct virtio_balloon *vb;
+ unsigned int len;
+
+ vb = vq->vq_ops->get_buf(vq, &len);
+ if (!vb)
+ return;
+ vb->need_stats_update = 1;
+ wake_up(&vb->config_change);
+}
+
+static void stats_handle_request(struct virtio_balloon *vb)
+{
+ struct virtqueue *vq;
+ struct scatterlist sg;
+
+ vb->need_stats_update = 0;
+ update_balloon_stats(vb);
+
+ vq = vb->stats_vq;
+ sg_init_one(&sg, vb->stats, sizeof(vb->stats));
+ if (vq->vq_ops->add_buf(vq, &sg, 1, 0, vb) < 0)
+ BUG();
+ vq->vq_ops->kick(vq);
+}
+
static void virtballoon_changed(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
@@ -190,8 +260,11 @@ static int balloon(void *_vballoon)
try_to_freeze();
wait_event_interruptible(vb->config_change,
(diff = towards_target(vb)) != 0
+ || vb->need_stats_update
|| kthread_should_stop()
|| freezing(current));
+ if (vb->need_stats_update)
+ stats_handle_request(vb);
if (diff > 0)
fill_balloon(vb, diff);
else if (diff < 0)
@@ -204,10 +277,10 @@ static int balloon(void *_vballoon)
static int virtballoon_probe(struct virtio_device *vdev)
{
struct virtio_balloon *vb;
- struct virtqueue *vqs[2];
- vq_callback_t *callbacks[] = { balloon_ack, balloon_ack };
- const char *names[] = { "inflate", "deflate" };
- int err;
+ struct virtqueue *vqs[3];
+ vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
+ const char *names[] = { "inflate", "deflate", "stats" };
+ int err, nvqs;
vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
if (!vb) {
@@ -220,13 +293,29 @@ static int virtballoon_probe(struct virtio_device *vdev)
init_waitqueue_head(&vb->config_change);
vb->vdev = vdev;
- /* We expect two virtqueues. */
- err = vdev->config->find_vqs(vdev, 2, vqs, callbacks, names);
+ /* We expect two virtqueues: inflate and deflate,
+ * and optionally stat. */
+ nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
+ err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
if (err)
goto out_free_vb;
vb->inflate_vq = vqs[0];
vb->deflate_vq = vqs[1];
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
+ struct scatterlist sg;
+ vb->stats_vq = vqs[2];
+
+ /*
+ * Prime this virtqueue with one buffer so the hypervisor can
+ * use it to signal us later.
+ */
+ sg_init_one(&sg, vb->stats, sizeof vb->stats);
+ if (vb->stats_vq->vq_ops->add_buf(vb->stats_vq,
+ &sg, 1, 0, vb) < 0)
+ BUG();
+ vb->stats_vq->vq_ops->kick(vb->stats_vq);
+ }
vb->thread = kthread_run(balloon, vb, "vballoon");
if (IS_ERR(vb->thread)) {
@@ -264,7 +353,10 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
kfree(vb);
}
-static unsigned int features[] = { VIRTIO_BALLOON_F_MUST_TELL_HOST };
+static unsigned int features[] = {
+ VIRTIO_BALLOON_F_MUST_TELL_HOST,
+ VIRTIO_BALLOON_F_STATS_VQ,
+};
static struct virtio_driver virtio_balloon = {
.feature_table = features,
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index acc7e3b7fe17..ad5897dc4495 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -986,7 +986,7 @@ int w1_process(void *data)
return 0;
}
-static int w1_init(void)
+static int __init w1_init(void)
{
int retval;
@@ -1034,7 +1034,7 @@ err_out_exit_init:
return retval;
}
-static void w1_fini(void)
+static void __exit w1_fini(void)
{
struct w1_master *dev;
diff --git a/drivers/watchdog/ixp2000_wdt.c b/drivers/watchdog/ixp2000_wdt.c
index 4f4b35a20d84..3c79dc587958 100644
--- a/drivers/watchdog/ixp2000_wdt.c
+++ b/drivers/watchdog/ixp2000_wdt.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
+#include <linux/timer.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 430a5848a9a5..c7a9479934af 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -96,9 +96,6 @@ static void wdt_enable(void)
{
spin_lock(&io_lock);
- if (wdt_clk)
- clk_set_rate(wdt_clk, 1);
-
/* stop counter, initiate counter reset */
__raw_writel(RESET_COUNT, WDTIM_CTRL(wdt_base));
/*wait for reset to complete. 100% guarantee event */
@@ -125,19 +122,25 @@ static void wdt_disable(void)
spin_lock(&io_lock);
__raw_writel(0, WDTIM_CTRL(wdt_base)); /*stop counter */
- if (wdt_clk)
- clk_set_rate(wdt_clk, 0);
spin_unlock(&io_lock);
}
static int pnx4008_wdt_open(struct inode *inode, struct file *file)
{
+ int ret;
+
if (test_and_set_bit(WDT_IN_USE, &wdt_status))
return -EBUSY;
clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
+ ret = clk_enable(wdt_clk);
+ if (ret) {
+ clear_bit(WDT_IN_USE, &wdt_status);
+ return ret;
+ }
+
wdt_enable();
return nonseekable_open(inode, file);
@@ -225,6 +228,7 @@ static int pnx4008_wdt_release(struct inode *inode, struct file *file)
printk(KERN_WARNING "WATCHDOG: Device closed unexpectdly\n");
wdt_disable();
+ clk_disable(wdt_clk);
clear_bit(WDT_IN_USE, &wdt_status);
clear_bit(WDT_OK_TO_CLOSE, &wdt_status);
@@ -273,25 +277,33 @@ static int __devinit pnx4008_wdt_probe(struct platform_device *pdev)
}
wdt_base = (void __iomem *)IO_ADDRESS(res->start);
- wdt_clk = clk_get(&pdev->dev, "wdt_ck");
+ wdt_clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(wdt_clk)) {
ret = PTR_ERR(wdt_clk);
release_resource(wdt_mem);
kfree(wdt_mem);
goto out;
- } else
- clk_set_rate(wdt_clk, 1);
+ }
+
+ ret = clk_enable(wdt_clk);
+ if (ret) {
+ release_resource(wdt_mem);
+ kfree(wdt_mem);
+ goto out;
+ }
ret = misc_register(&pnx4008_wdt_miscdev);
if (ret < 0) {
printk(KERN_ERR MODULE_NAME "cannot register misc device\n");
release_resource(wdt_mem);
kfree(wdt_mem);
- clk_set_rate(wdt_clk, 0);
+ clk_disable(wdt_clk);
+ clk_put(wdt_clk);
} else {
boot_status = (__raw_readl(WDTIM_RES(wdt_base)) & WDOG_RESET) ?
WDIOF_CARDRESET : 0;
wdt_disable(); /*disable for now */
+ clk_disable(wdt_clk);
set_bit(WDT_DEVICE_INITED, &wdt_status);
}
@@ -302,11 +314,10 @@ out:
static int __devexit pnx4008_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&pnx4008_wdt_miscdev);
- if (wdt_clk) {
- clk_set_rate(wdt_clk, 0);
- clk_put(wdt_clk);
- wdt_clk = NULL;
- }
+
+ clk_disable(wdt_clk);
+ clk_put(wdt_clk);
+
if (wdt_mem) {
release_resource(wdt_mem);
kfree(wdt_mem);
diff --git a/firmware/Makefile b/firmware/Makefile
index 6d5c3abd06be..1c00d05578f7 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -69,7 +69,8 @@ fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin
fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis \
cis/DP83903.cis cis/NE2K.cis \
- cis/tamarack.cis cis/PE-200.cis
+ cis/tamarack.cis cis/PE-200.cis \
+ cis/PE520.cis
fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis
fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis
fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
diff --git a/firmware/WHENCE b/firmware/WHENCE
index 34b5d0a036db..ac174feda7cb 100644
--- a/firmware/WHENCE
+++ b/firmware/WHENCE
@@ -601,6 +601,7 @@ File: cis/LA-PCM.cis
cis/NE2K.cis
cis/tamarack.cis
cis/PE-200.cis
+ cis/PE520.cis
Licence: GPL
diff --git a/firmware/cis/PE520.cis.ihex b/firmware/cis/PE520.cis.ihex
new file mode 100644
index 000000000000..97a745b5496e
--- /dev/null
+++ b/firmware/cis/PE520.cis.ihex
@@ -0,0 +1,9 @@
+:1000000001030000FF152304014B544900504535FE
+:10001000323020504C55530050434D434941204508
+:10002000746865726E65740000FF20046101100041
+:10003000210206001A050101D00F0B1B09C101198D
+:0A00400001556530FFFF1400FF00BA
+:00000001FF
+#
+# Replacement CIS for PE520 ethernet card
+#
diff --git a/fs/Kconfig b/fs/Kconfig
index 64d44efad7a5..5f85b5947613 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -177,6 +177,7 @@ source "fs/efs/Kconfig"
source "fs/jffs2/Kconfig"
# UBIFS File system configuration
source "fs/ubifs/Kconfig"
+source "fs/logfs/Kconfig"
source "fs/cramfs/Kconfig"
source "fs/squashfs/Kconfig"
source "fs/freevxfs/Kconfig"
@@ -234,6 +235,7 @@ config NFS_COMMON
source "net/sunrpc/Kconfig"
source "fs/smbfs/Kconfig"
+source "fs/ceph/Kconfig"
source "fs/cifs/Kconfig"
source "fs/ncpfs/Kconfig"
source "fs/coda/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index af6d04700d9c..97f340f14ba2 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_NTFS_FS) += ntfs/
obj-$(CONFIG_UFS_FS) += ufs/
obj-$(CONFIG_EFS_FS) += efs/
obj-$(CONFIG_JFFS2_FS) += jffs2/
+obj-$(CONFIG_LOGFS) += logfs/
obj-$(CONFIG_UBIFS_FS) += ubifs/
obj-$(CONFIG_AFFS_FS) += affs/
obj-$(CONFIG_ROMFS_FS) += romfs/
@@ -124,3 +125,4 @@ obj-$(CONFIG_OCFS2_FS) += ocfs2/
obj-$(CONFIG_BTRFS_FS) += btrfs/
obj-$(CONFIG_GFS2_FS) += gfs2/
obj-$(CONFIG_EXOFS_FS) += exofs/
+obj-$(CONFIG_CEPH_FS) += ceph/
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 7dc85997e96c..c57d9ce5ff7e 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -171,6 +171,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
#ifdef ELF_FDPIC_PLAT_INIT
unsigned long dynaddr;
#endif
+#ifndef CONFIG_MMU
+ unsigned long stack_prot;
+#endif
struct file *interpreter = NULL; /* to shut gcc up */
char *interpreter_name = NULL;
int executable_stack;
@@ -316,6 +319,8 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
* defunct, deceased, etc. after this point we have to exit via
* error_kill */
set_personality(PER_LINUX_FDPIC);
+ if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
+ current->personality |= READ_IMPLIES_EXEC;
set_binfmt(&elf_fdpic_format);
current->mm->start_code = 0;
@@ -377,9 +382,13 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
if (stack_size < PAGE_SIZE * 2)
stack_size = PAGE_SIZE * 2;
+ stack_prot = PROT_READ | PROT_WRITE;
+ if (executable_stack == EXSTACK_ENABLE_X ||
+ (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC))
+ stack_prot |= PROT_EXEC;
+
down_write(&current->mm->mmap_sem);
- current->mm->start_brk = do_mmap(NULL, 0, stack_size,
- PROT_READ | PROT_WRITE | PROT_EXEC,
+ current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot,
MAP_PRIVATE | MAP_ANONYMOUS |
MAP_UNINITIALIZED | MAP_GROWSDOWN,
0);
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index d4a00ea1054c..52261ae9364b 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -355,7 +355,7 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
if (!flat_reloc_valid(r, start_brk - start_data + text_len)) {
printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)",
- (int) r,(int)(start_brk-start_code),(int)text_len);
+ (int) r,(int)(start_brk-start_data+text_len),(int)text_len);
goto failed;
}
diff --git a/fs/buffer.c b/fs/buffer.c
index 6fa530256bfd..1d920bab5e70 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2893,7 +2893,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
/*
* The page straddles i_size. It must be zeroed out on each and every
- * writepage invokation because it may be mmapped. "A file is mapped
+ * writepage invocation because it may be mmapped. "A file is mapped
* in multiples of the page size. For a file that is not a multiple of
* the page size, the remaining memory is zeroed when mapped, and
* writes to that region are not written out to the file."
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
new file mode 100644
index 000000000000..bc1fbd956187
--- /dev/null
+++ b/fs/ceph/Kconfig
@@ -0,0 +1,26 @@
+config CEPH_FS
+ tristate "Ceph distributed file system (EXPERIMENTAL)"
+ depends on INET && EXPERIMENTAL
+ select LIBCRC32C
+ help
+ Choose Y or M here to include support for mounting the
+ experimental Ceph distributed file system. Ceph is an extremely
+ scalable file system designed to provide high performance,
+ reliable access to petabytes of storage.
+
+ More information at http://ceph.newdream.net/.
+
+ If unsure, say N.
+
+config CEPH_FS_PRETTYDEBUG
+ bool "Include file:line in ceph debug output"
+ depends on CEPH_FS
+ default n
+ help
+ If you say Y here, debug output will include a filename and
+ line to aid debugging. This icnreases kernel size and slows
+ execution slightly when debug call sites are enabled (e.g.,
+ via CONFIG_DYNAMIC_DEBUG).
+
+ If unsure, say N.
+
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
new file mode 100644
index 000000000000..827629c85768
--- /dev/null
+++ b/fs/ceph/Makefile
@@ -0,0 +1,37 @@
+#
+# Makefile for CEPH filesystem.
+#
+
+ifneq ($(KERNELRELEASE),)
+
+obj-$(CONFIG_CEPH_FS) += ceph.o
+
+ceph-objs := super.o inode.o dir.o file.o addr.o ioctl.o \
+ export.o caps.o snap.o xattr.o \
+ messenger.o msgpool.o buffer.o \
+ mds_client.o mdsmap.o \
+ mon_client.o \
+ osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
+ debugfs.o \
+ auth.o auth_none.o \
+ ceph_fs.o ceph_strings.o ceph_hash.o ceph_frag.o
+
+else
+#Otherwise we were called directly from the command
+# line; invoke the kernel build system.
+
+KERNELDIR ?= /lib/modules/$(shell uname -r)/build
+PWD := $(shell pwd)
+
+default: all
+
+all:
+ $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules
+
+modules_install:
+ $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_FS=m modules_install
+
+clean:
+ $(MAKE) -C $(KERNELDIR) M=$(PWD) clean
+
+endif
diff --git a/fs/ceph/README b/fs/ceph/README
new file mode 100644
index 000000000000..18352fab37c0
--- /dev/null
+++ b/fs/ceph/README
@@ -0,0 +1,20 @@
+#
+# The following files are shared by (and manually synchronized
+# between) the Ceph userland and kernel client.
+#
+# userland kernel
+src/include/ceph_fs.h fs/ceph/ceph_fs.h
+src/include/ceph_fs.cc fs/ceph/ceph_fs.c
+src/include/msgr.h fs/ceph/msgr.h
+src/include/rados.h fs/ceph/rados.h
+src/include/ceph_strings.cc fs/ceph/ceph_strings.c
+src/include/ceph_frag.h fs/ceph/ceph_frag.h
+src/include/ceph_frag.cc fs/ceph/ceph_frag.c
+src/include/ceph_hash.h fs/ceph/ceph_hash.h
+src/include/ceph_hash.cc fs/ceph/ceph_hash.c
+src/crush/crush.c fs/ceph/crush/crush.c
+src/crush/crush.h fs/ceph/crush/crush.h
+src/crush/mapper.c fs/ceph/crush/mapper.c
+src/crush/mapper.h fs/ceph/crush/mapper.h
+src/crush/hash.h fs/ceph/crush/hash.h
+src/crush/hash.c fs/ceph/crush/hash.c
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
new file mode 100644
index 000000000000..eab46b0a7aa1
--- /dev/null
+++ b/fs/ceph/addr.c
@@ -0,0 +1,1114 @@
+#include "ceph_debug.h"
+
+#include <linux/backing-dev.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/writeback.h> /* generic_writepages */
+#include <linux/pagevec.h>
+#include <linux/task_io_accounting_ops.h>
+
+#include "super.h"
+#include "osd_client.h"
+
+/*
+ * Ceph address space ops.
+ *
+ * There are a few funny things going on here.
+ *
+ * The page->private field is used to reference a struct
+ * ceph_snap_context for _every_ dirty page. This indicates which
+ * snapshot the page was logically dirtied in, and thus which snap
+ * context needs to be associated with the osd write during writeback.
+ *
+ * Similarly, struct ceph_inode_info maintains a set of counters to
+ * count dirty pages on the inode. In the absense of snapshots,
+ * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
+ *
+ * When a snapshot is taken (that is, when the client receives
+ * notification that a snapshot was taken), each inode with caps and
+ * with dirty pages (dirty pages implies there is a cap) gets a new
+ * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
+ * order, new snaps go to the tail). The i_wrbuffer_ref_head count is
+ * moved to capsnap->dirty. (Unless a sync write is currently in
+ * progress. In that case, the capsnap is said to be "pending", new
+ * writes cannot start, and the capsnap isn't "finalized" until the
+ * write completes (or fails) and a final size/mtime for the inode for
+ * that snap can be settled upon.) i_wrbuffer_ref_head is reset to 0.
+ *
+ * On writeback, we must submit writes to the osd IN SNAP ORDER. So,
+ * we look for the first capsnap in i_cap_snaps and write out pages in
+ * that snap context _only_. Then we move on to the next capsnap,
+ * eventually reaching the "live" or "head" context (i.e., pages that
+ * are not yet snapped) and are writing the most recently dirtied
+ * pages.
+ *
+ * Invalidate and so forth must take care to ensure the dirty page
+ * accounting is preserved.
+ */
+
+
+/*
+ * Dirty a page. Optimistically adjust accounting, on the assumption
+ * that we won't race with invalidate. If we do, readjust.
+ */
+static int ceph_set_page_dirty(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ struct inode *inode;
+ struct ceph_inode_info *ci;
+ int undo = 0;
+ struct ceph_snap_context *snapc;
+
+ if (unlikely(!mapping))
+ return !TestSetPageDirty(page);
+
+ if (TestSetPageDirty(page)) {
+ dout("%p set_page_dirty %p idx %lu -- already dirty\n",
+ mapping->host, page, page->index);
+ return 0;
+ }
+
+ inode = mapping->host;
+ ci = ceph_inode(inode);
+
+ /*
+ * Note that we're grabbing a snapc ref here without holding
+ * any locks!
+ */
+ snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
+
+ /* dirty the head */
+ spin_lock(&inode->i_lock);
+ if (ci->i_wrbuffer_ref_head == 0)
+ ci->i_head_snapc = ceph_get_snap_context(snapc);
+ ++ci->i_wrbuffer_ref_head;
+ if (ci->i_wrbuffer_ref == 0)
+ igrab(inode);
+ ++ci->i_wrbuffer_ref;
+ dout("%p set_page_dirty %p idx %lu head %d/%d -> %d/%d "
+ "snapc %p seq %lld (%d snaps)\n",
+ mapping->host, page, page->index,
+ ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
+ ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
+ snapc, snapc->seq, snapc->num_snaps);
+ spin_unlock(&inode->i_lock);
+
+ /* now adjust page */
+ spin_lock_irq(&mapping->tree_lock);
+ if (page->mapping) { /* Race with truncate? */
+ WARN_ON_ONCE(!PageUptodate(page));
+
+ if (mapping_cap_account_dirty(mapping)) {
+ __inc_zone_page_state(page, NR_FILE_DIRTY);
+ __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTY);
+ task_io_account_write(PAGE_CACHE_SIZE);
+ }
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+
+ /*
+ * Reference snap context in page->private. Also set
+ * PagePrivate so that we get invalidatepage callback.
+ */
+ page->private = (unsigned long)snapc;
+ SetPagePrivate(page);
+ } else {
+ dout("ANON set_page_dirty %p (raced truncate?)\n", page);
+ undo = 1;
+ }
+
+ spin_unlock_irq(&mapping->tree_lock);
+
+ if (undo)
+ /* whoops, we failed to dirty the page */
+ ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
+
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+
+ BUG_ON(!PageDirty(page));
+ return 1;
+}
+
+/*
+ * If we are truncating the full page (i.e. offset == 0), adjust the
+ * dirty page counters appropriately. Only called if there is private
+ * data on the page.
+ */
+static void ceph_invalidatepage(struct page *page, unsigned long offset)
+{
+ struct inode *inode = page->mapping->host;
+ struct ceph_inode_info *ci;
+ struct ceph_snap_context *snapc = (void *)page->private;
+
+ BUG_ON(!PageLocked(page));
+ BUG_ON(!page->private);
+ BUG_ON(!PagePrivate(page));
+ BUG_ON(!page->mapping);
+
+ /*
+ * We can get non-dirty pages here due to races between
+ * set_page_dirty and truncate_complete_page; just spit out a
+ * warning, in case we end up with accounting problems later.
+ */
+ if (!PageDirty(page))
+ pr_err("%p invalidatepage %p page not dirty\n", inode, page);
+
+ if (offset == 0)
+ ClearPageChecked(page);
+
+ ci = ceph_inode(inode);
+ if (offset == 0) {
+ dout("%p invalidatepage %p idx %lu full dirty page %lu\n",
+ inode, page, page->index, offset);
+ ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
+ ceph_put_snap_context(snapc);
+ page->private = 0;
+ ClearPagePrivate(page);
+ } else {
+ dout("%p invalidatepage %p idx %lu partial dirty page\n",
+ inode, page, page->index);
+ }
+}
+
+/* just a sanity check */
+static int ceph_releasepage(struct page *page, gfp_t g)
+{
+ struct inode *inode = page->mapping ? page->mapping->host : NULL;
+ dout("%p releasepage %p idx %lu\n", inode, page, page->index);
+ WARN_ON(PageDirty(page));
+ WARN_ON(page->private);
+ WARN_ON(PagePrivate(page));
+ return 0;
+}
+
+/*
+ * read a single page, without unlocking it.
+ */
+static int readpage_nounlock(struct file *filp, struct page *page)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
+ int err = 0;
+ u64 len = PAGE_CACHE_SIZE;
+
+ dout("readpage inode %p file %p page %p index %lu\n",
+ inode, filp, page, page->index);
+ err = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
+ page->index << PAGE_CACHE_SHIFT, &len,
+ ci->i_truncate_seq, ci->i_truncate_size,
+ &page, 1);
+ if (err == -ENOENT)
+ err = 0;
+ if (err < 0) {
+ SetPageError(page);
+ goto out;
+ } else if (err < PAGE_CACHE_SIZE) {
+ /* zero fill remainder of page */
+ zero_user_segment(page, err, PAGE_CACHE_SIZE);
+ }
+ SetPageUptodate(page);
+
+out:
+ return err < 0 ? err : 0;
+}
+
+static int ceph_readpage(struct file *filp, struct page *page)
+{
+ int r = readpage_nounlock(filp, page);
+ unlock_page(page);
+ return r;
+}
+
+/*
+ * Build a vector of contiguous pages from the provided page list.
+ */
+static struct page **page_vector_from_list(struct list_head *page_list,
+ unsigned *nr_pages)
+{
+ struct page **pages;
+ struct page *page;
+ int next_index, contig_pages = 0;
+
+ /* build page vector */
+ pages = kmalloc(sizeof(*pages) * *nr_pages, GFP_NOFS);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ BUG_ON(list_empty(page_list));
+ next_index = list_entry(page_list->prev, struct page, lru)->index;
+ list_for_each_entry_reverse(page, page_list, lru) {
+ if (page->index == next_index) {
+ dout("readpages page %d %p\n", contig_pages, page);
+ pages[contig_pages] = page;
+ contig_pages++;
+ next_index++;
+ } else {
+ break;
+ }
+ }
+ *nr_pages = contig_pages;
+ return pages;
+}
+
+/*
+ * Read multiple pages. Leave pages we don't read + unlock in page_list;
+ * the caller (VM) cleans them up.
+ */
+static int ceph_readpages(struct file *file, struct address_space *mapping,
+ struct list_head *page_list, unsigned nr_pages)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc;
+ int rc = 0;
+ struct page **pages;
+ struct pagevec pvec;
+ loff_t offset;
+ u64 len;
+
+ dout("readpages %p file %p nr_pages %d\n",
+ inode, file, nr_pages);
+
+ pages = page_vector_from_list(page_list, &nr_pages);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ /* guess read extent */
+ offset = pages[0]->index << PAGE_CACHE_SHIFT;
+ len = nr_pages << PAGE_CACHE_SHIFT;
+ rc = ceph_osdc_readpages(osdc, ceph_vino(inode), &ci->i_layout,
+ offset, &len,
+ ci->i_truncate_seq, ci->i_truncate_size,
+ pages, nr_pages);
+ if (rc == -ENOENT)
+ rc = 0;
+ if (rc < 0)
+ goto out;
+
+ /* set uptodate and add to lru in pagevec-sized chunks */
+ pagevec_init(&pvec, 0);
+ for (; !list_empty(page_list) && len > 0;
+ rc -= PAGE_CACHE_SIZE, len -= PAGE_CACHE_SIZE) {
+ struct page *page =
+ list_entry(page_list->prev, struct page, lru);
+
+ list_del(&page->lru);
+
+ if (rc < (int)PAGE_CACHE_SIZE) {
+ /* zero (remainder of) page */
+ int s = rc < 0 ? 0 : rc;
+ zero_user_segment(page, s, PAGE_CACHE_SIZE);
+ }
+
+ if (add_to_page_cache(page, mapping, page->index, GFP_NOFS)) {
+ page_cache_release(page);
+ dout("readpages %p add_to_page_cache failed %p\n",
+ inode, page);
+ continue;
+ }
+ dout("readpages %p adding %p idx %lu\n", inode, page,
+ page->index);
+ flush_dcache_page(page);
+ SetPageUptodate(page);
+ unlock_page(page);
+ if (pagevec_add(&pvec, page) == 0)
+ pagevec_lru_add_file(&pvec); /* add to lru */
+ }
+ pagevec_lru_add_file(&pvec);
+ rc = 0;
+
+out:
+ kfree(pages);
+ return rc;
+}
+
+/*
+ * Get ref for the oldest snapc for an inode with dirty data... that is, the
+ * only snap context we are allowed to write back.
+ *
+ * Caller holds i_lock.
+ */
+static struct ceph_snap_context *__get_oldest_context(struct inode *inode,
+ u64 *snap_size)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_snap_context *snapc = NULL;
+ struct ceph_cap_snap *capsnap = NULL;
+
+ list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
+ dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
+ capsnap->context, capsnap->dirty_pages);
+ if (capsnap->dirty_pages) {
+ snapc = ceph_get_snap_context(capsnap->context);
+ if (snap_size)
+ *snap_size = capsnap->size;
+ break;
+ }
+ }
+ if (!snapc && ci->i_snap_realm) {
+ snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
+ dout(" head snapc %p has %d dirty pages\n",
+ snapc, ci->i_wrbuffer_ref_head);
+ }
+ return snapc;
+}
+
+static struct ceph_snap_context *get_oldest_context(struct inode *inode,
+ u64 *snap_size)
+{
+ struct ceph_snap_context *snapc = NULL;
+
+ spin_lock(&inode->i_lock);
+ snapc = __get_oldest_context(inode, snap_size);
+ spin_unlock(&inode->i_lock);
+ return snapc;
+}
+
+/*
+ * Write a single page, but leave the page locked.
+ *
+ * If we get a write error, set the page error bit, but still adjust the
+ * dirty page accounting (i.e., page is no longer dirty).
+ */
+static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
+{
+ struct inode *inode;
+ struct ceph_inode_info *ci;
+ struct ceph_osd_client *osdc;
+ loff_t page_off = page->index << PAGE_CACHE_SHIFT;
+ int len = PAGE_CACHE_SIZE;
+ loff_t i_size;
+ int err = 0;
+ struct ceph_snap_context *snapc;
+ u64 snap_size = 0;
+
+ dout("writepage %p idx %lu\n", page, page->index);
+
+ if (!page->mapping || !page->mapping->host) {
+ dout("writepage %p - no mapping\n", page);
+ return -EFAULT;
+ }
+ inode = page->mapping->host;
+ ci = ceph_inode(inode);
+ osdc = &ceph_inode_to_client(inode)->osdc;
+
+ /* verify this is a writeable snap context */
+ snapc = (void *)page->private;
+ if (snapc == NULL) {
+ dout("writepage %p page %p not dirty?\n", inode, page);
+ goto out;
+ }
+ if (snapc != get_oldest_context(inode, &snap_size)) {
+ dout("writepage %p page %p snapc %p not writeable - noop\n",
+ inode, page, (void *)page->private);
+ /* we should only noop if called by kswapd */
+ WARN_ON((current->flags & PF_MEMALLOC) == 0);
+ goto out;
+ }
+
+ /* is this a partial page at end of file? */
+ if (snap_size)
+ i_size = snap_size;
+ else
+ i_size = i_size_read(inode);
+ if (i_size < page_off + len)
+ len = i_size - page_off;
+
+ dout("writepage %p page %p index %lu on %llu~%u\n",
+ inode, page, page->index, page_off, len);
+
+ set_page_writeback(page);
+ err = ceph_osdc_writepages(osdc, ceph_vino(inode),
+ &ci->i_layout, snapc,
+ page_off, len,
+ ci->i_truncate_seq, ci->i_truncate_size,
+ &inode->i_mtime,
+ &page, 1, 0, 0, true);
+ if (err < 0) {
+ dout("writepage setting page/mapping error %d %p\n", err, page);
+ SetPageError(page);
+ mapping_set_error(&inode->i_data, err);
+ if (wbc)
+ wbc->pages_skipped++;
+ } else {
+ dout("writepage cleaned page %p\n", page);
+ err = 0; /* vfs expects us to return 0 */
+ }
+ page->private = 0;
+ ClearPagePrivate(page);
+ end_page_writeback(page);
+ ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
+ ceph_put_snap_context(snapc);
+out:
+ return err;
+}
+
+static int ceph_writepage(struct page *page, struct writeback_control *wbc)
+{
+ int err = writepage_nounlock(page, wbc);
+ unlock_page(page);
+ return err;
+}
+
+
+/*
+ * lame release_pages helper. release_pages() isn't exported to
+ * modules.
+ */
+static void ceph_release_pages(struct page **pages, int num)
+{
+ struct pagevec pvec;
+ int i;
+
+ pagevec_init(&pvec, 0);
+ for (i = 0; i < num; i++) {
+ if (pagevec_add(&pvec, pages[i]) == 0)
+ pagevec_release(&pvec);
+ }
+ pagevec_release(&pvec);
+}
+
+
+/*
+ * async writeback completion handler.
+ *
+ * If we get an error, set the mapping error bit, but not the individual
+ * page error bits.
+ */
+static void writepages_finish(struct ceph_osd_request *req,
+ struct ceph_msg *msg)
+{
+ struct inode *inode = req->r_inode;
+ struct ceph_osd_reply_head *replyhead;
+ struct ceph_osd_op *op;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ unsigned wrote;
+ loff_t offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT;
+ struct page *page;
+ int i;
+ struct ceph_snap_context *snapc = req->r_snapc;
+ struct address_space *mapping = inode->i_mapping;
+ struct writeback_control *wbc = req->r_wbc;
+ __s32 rc = -EIO;
+ u64 bytes = 0;
+
+ /* parse reply */
+ replyhead = msg->front.iov_base;
+ WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
+ op = (void *)(replyhead + 1);
+ rc = le32_to_cpu(replyhead->result);
+ bytes = le64_to_cpu(op->extent.length);
+
+ if (rc >= 0) {
+ wrote = (bytes + (offset & ~PAGE_CACHE_MASK) + ~PAGE_CACHE_MASK)
+ >> PAGE_CACHE_SHIFT;
+ WARN_ON(wrote != req->r_num_pages);
+ } else {
+ wrote = 0;
+ mapping_set_error(mapping, rc);
+ }
+ dout("writepages_finish %p rc %d bytes %llu wrote %d (pages)\n",
+ inode, rc, bytes, wrote);
+
+ /* clean all pages */
+ for (i = 0; i < req->r_num_pages; i++) {
+ page = req->r_pages[i];
+ BUG_ON(!page);
+ WARN_ON(!PageUptodate(page));
+
+ if (i >= wrote) {
+ dout("inode %p skipping page %p\n", inode, page);
+ wbc->pages_skipped++;
+ }
+ page->private = 0;
+ ClearPagePrivate(page);
+ ceph_put_snap_context(snapc);
+ dout("unlocking %d %p\n", i, page);
+ end_page_writeback(page);
+ unlock_page(page);
+ }
+ dout("%p wrote+cleaned %d pages\n", inode, wrote);
+ ceph_put_wrbuffer_cap_refs(ci, req->r_num_pages, snapc);
+
+ ceph_release_pages(req->r_pages, req->r_num_pages);
+ if (req->r_pages_from_pool)
+ mempool_free(req->r_pages,
+ ceph_client(inode->i_sb)->wb_pagevec_pool);
+ else
+ kfree(req->r_pages);
+ ceph_osdc_put_request(req);
+}
+
+/*
+ * allocate a page vec, either directly, or if necessary, via a the
+ * mempool. we avoid the mempool if we can because req->r_num_pages
+ * may be less than the maximum write size.
+ */
+static void alloc_page_vec(struct ceph_client *client,
+ struct ceph_osd_request *req)
+{
+ req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages,
+ GFP_NOFS);
+ if (!req->r_pages) {
+ req->r_pages = mempool_alloc(client->wb_pagevec_pool, GFP_NOFS);
+ req->r_pages_from_pool = 1;
+ WARN_ON(!req->r_pages);
+ }
+}
+
+/*
+ * initiate async writeback
+ */
+static int ceph_writepages_start(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+ struct backing_dev_info *bdi = mapping->backing_dev_info;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *client = ceph_inode_to_client(inode);
+ pgoff_t index, start, end;
+ int range_whole = 0;
+ int should_loop = 1;
+ pgoff_t max_pages = 0, max_pages_ever = 0;
+ struct ceph_snap_context *snapc = NULL, *last_snapc = NULL;
+ struct pagevec pvec;
+ int done = 0;
+ int rc = 0;
+ unsigned wsize = 1 << inode->i_blkbits;
+ struct ceph_osd_request *req = NULL;
+ int do_sync;
+ u64 snap_size = 0;
+
+ /*
+ * Include a 'sync' in the OSD request if this is a data
+ * integrity write (e.g., O_SYNC write or fsync()), or if our
+ * cap is being revoked.
+ */
+ do_sync = wbc->sync_mode == WB_SYNC_ALL;
+ if (ceph_caps_revoking(ci, CEPH_CAP_FILE_BUFFER))
+ do_sync = 1;
+ dout("writepages_start %p dosync=%d (mode=%s)\n",
+ inode, do_sync,
+ wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
+ (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
+
+ client = ceph_inode_to_client(inode);
+ if (client->mount_state == CEPH_MOUNT_SHUTDOWN) {
+ pr_warning("writepage_start %p on forced umount\n", inode);
+ return -EIO; /* we're in a forced umount, don't write! */
+ }
+ if (client->mount_args->wsize && client->mount_args->wsize < wsize)
+ wsize = client->mount_args->wsize;
+ if (wsize < PAGE_CACHE_SIZE)
+ wsize = PAGE_CACHE_SIZE;
+ max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
+
+ pagevec_init(&pvec, 0);
+
+ /* ?? */
+ if (wbc->nonblocking && bdi_write_congested(bdi)) {
+ dout(" writepages congested\n");
+ wbc->encountered_congestion = 1;
+ goto out_final;
+ }
+
+ /* where to start/end? */
+ if (wbc->range_cyclic) {
+ start = mapping->writeback_index; /* Start from prev offset */
+ end = -1;
+ dout(" cyclic, start at %lu\n", start);
+ } else {
+ start = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end = wbc->range_end >> PAGE_CACHE_SHIFT;
+ if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
+ range_whole = 1;
+ should_loop = 0;
+ dout(" not cyclic, %lu to %lu\n", start, end);
+ }
+ index = start;
+
+retry:
+ /* find oldest snap context with dirty data */
+ ceph_put_snap_context(snapc);
+ snapc = get_oldest_context(inode, &snap_size);
+ if (!snapc) {
+ /* hmm, why does writepages get called when there
+ is no dirty data? */
+ dout(" no snap context with dirty data?\n");
+ goto out;
+ }
+ dout(" oldest snapc is %p seq %lld (%d snaps)\n",
+ snapc, snapc->seq, snapc->num_snaps);
+ if (last_snapc && snapc != last_snapc) {
+ /* if we switched to a newer snapc, restart our scan at the
+ * start of the original file range. */
+ dout(" snapc differs from last pass, restarting at %lu\n",
+ index);
+ index = start;
+ }
+ last_snapc = snapc;
+
+ while (!done && index <= end) {
+ unsigned i;
+ int first;
+ pgoff_t next;
+ int pvec_pages, locked_pages;
+ struct page *page;
+ int want;
+ u64 offset, len;
+ struct ceph_osd_request_head *reqhead;
+ struct ceph_osd_op *op;
+
+ next = 0;
+ locked_pages = 0;
+ max_pages = max_pages_ever;
+
+get_more_pages:
+ first = -1;
+ want = min(end - index,
+ min((pgoff_t)PAGEVEC_SIZE,
+ max_pages - (pgoff_t)locked_pages) - 1)
+ + 1;
+ pvec_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+ PAGECACHE_TAG_DIRTY,
+ want);
+ dout("pagevec_lookup_tag got %d\n", pvec_pages);
+ if (!pvec_pages && !locked_pages)
+ break;
+ for (i = 0; i < pvec_pages && locked_pages < max_pages; i++) {
+ page = pvec.pages[i];
+ dout("? %p idx %lu\n", page, page->index);
+ if (locked_pages == 0)
+ lock_page(page); /* first page */
+ else if (!trylock_page(page))
+ break;
+
+ /* only dirty pages, or our accounting breaks */
+ if (unlikely(!PageDirty(page)) ||
+ unlikely(page->mapping != mapping)) {
+ dout("!dirty or !mapping %p\n", page);
+ unlock_page(page);
+ break;
+ }
+ if (!wbc->range_cyclic && page->index > end) {
+ dout("end of range %p\n", page);
+ done = 1;
+ unlock_page(page);
+ break;
+ }
+ if (next && (page->index != next)) {
+ dout("not consecutive %p\n", page);
+ unlock_page(page);
+ break;
+ }
+ if (wbc->sync_mode != WB_SYNC_NONE) {
+ dout("waiting on writeback %p\n", page);
+ wait_on_page_writeback(page);
+ }
+ if ((snap_size && page_offset(page) > snap_size) ||
+ (!snap_size &&
+ page_offset(page) > i_size_read(inode))) {
+ dout("%p page eof %llu\n", page, snap_size ?
+ snap_size : i_size_read(inode));
+ done = 1;
+ unlock_page(page);
+ break;
+ }
+ if (PageWriteback(page)) {
+ dout("%p under writeback\n", page);
+ unlock_page(page);
+ break;
+ }
+
+ /* only if matching snap context */
+ if (snapc != (void *)page->private) {
+ dout("page snapc %p != oldest %p\n",
+ (void *)page->private, snapc);
+ unlock_page(page);
+ if (!locked_pages)
+ continue; /* keep looking for snap */
+ break;
+ }
+
+ if (!clear_page_dirty_for_io(page)) {
+ dout("%p !clear_page_dirty_for_io\n", page);
+ unlock_page(page);
+ break;
+ }
+
+ /* ok */
+ if (locked_pages == 0) {
+ /* prepare async write request */
+ offset = page->index << PAGE_CACHE_SHIFT;
+ len = wsize;
+ req = ceph_osdc_new_request(&client->osdc,
+ &ci->i_layout,
+ ceph_vino(inode),
+ offset, &len,
+ CEPH_OSD_OP_WRITE,
+ CEPH_OSD_FLAG_WRITE |
+ CEPH_OSD_FLAG_ONDISK,
+ snapc, do_sync,
+ ci->i_truncate_seq,
+ ci->i_truncate_size,
+ &inode->i_mtime, true, 1);
+ max_pages = req->r_num_pages;
+
+ alloc_page_vec(client, req);
+ req->r_callback = writepages_finish;
+ req->r_inode = inode;
+ req->r_wbc = wbc;
+ }
+
+ /* note position of first page in pvec */
+ if (first < 0)
+ first = i;
+ dout("%p will write page %p idx %lu\n",
+ inode, page, page->index);
+ set_page_writeback(page);
+ req->r_pages[locked_pages] = page;
+ locked_pages++;
+ next = page->index + 1;
+ }
+
+ /* did we get anything? */
+ if (!locked_pages)
+ goto release_pvec_pages;
+ if (i) {
+ int j;
+ BUG_ON(!locked_pages || first < 0);
+
+ if (pvec_pages && i == pvec_pages &&
+ locked_pages < max_pages) {
+ dout("reached end pvec, trying for more\n");
+ pagevec_reinit(&pvec);
+ goto get_more_pages;
+ }
+
+ /* shift unused pages over in the pvec... we
+ * will need to release them below. */
+ for (j = i; j < pvec_pages; j++) {
+ dout(" pvec leftover page %p\n",
+ pvec.pages[j]);
+ pvec.pages[j-i+first] = pvec.pages[j];
+ }
+ pvec.nr -= i-first;
+ }
+
+ /* submit the write */
+ offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT;
+ len = min((snap_size ? snap_size : i_size_read(inode)) - offset,
+ (u64)locked_pages << PAGE_CACHE_SHIFT);
+ dout("writepages got %d pages at %llu~%llu\n",
+ locked_pages, offset, len);
+
+ /* revise final length, page count */
+ req->r_num_pages = locked_pages;
+ reqhead = req->r_request->front.iov_base;
+ op = (void *)(reqhead + 1);
+ op->extent.length = cpu_to_le64(len);
+ op->payload_len = cpu_to_le32(len);
+ req->r_request->hdr.data_len = cpu_to_le32(len);
+
+ ceph_osdc_start_request(&client->osdc, req, true);
+ req = NULL;
+
+ /* continue? */
+ index = next;
+ wbc->nr_to_write -= locked_pages;
+ if (wbc->nr_to_write <= 0)
+ done = 1;
+
+release_pvec_pages:
+ dout("pagevec_release on %d pages (%p)\n", (int)pvec.nr,
+ pvec.nr ? pvec.pages[0] : NULL);
+ pagevec_release(&pvec);
+
+ if (locked_pages && !done)
+ goto retry;
+ }
+
+ if (should_loop && !done) {
+ /* more to do; loop back to beginning of file */
+ dout("writepages looping back to beginning of file\n");
+ should_loop = 0;
+ index = 0;
+ goto retry;
+ }
+
+ if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
+ mapping->writeback_index = index;
+
+out:
+ if (req)
+ ceph_osdc_put_request(req);
+ if (rc > 0)
+ rc = 0; /* vfs expects us to return 0 */
+ ceph_put_snap_context(snapc);
+ dout("writepages done, rc = %d\n", rc);
+out_final:
+ return rc;
+}
+
+
+
+/*
+ * See if a given @snapc is either writeable, or already written.
+ */
+static int context_is_writeable_or_written(struct inode *inode,
+ struct ceph_snap_context *snapc)
+{
+ struct ceph_snap_context *oldest = get_oldest_context(inode, NULL);
+ return !oldest || snapc->seq <= oldest->seq;
+}
+
+/*
+ * We are only allowed to write into/dirty the page if the page is
+ * clean, or already dirty within the same snap context.
+ */
+static int ceph_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+ struct page *page;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ loff_t page_off = pos & PAGE_CACHE_MASK;
+ int pos_in_page = pos & ~PAGE_CACHE_MASK;
+ int end_in_page = pos_in_page + len;
+ loff_t i_size;
+ struct ceph_snap_context *snapc;
+ int r;
+
+ /* get a page*/
+retry:
+ page = grab_cache_page_write_begin(mapping, index, 0);
+ if (!page)
+ return -ENOMEM;
+ *pagep = page;
+
+ dout("write_begin file %p inode %p page %p %d~%d\n", file,
+ inode, page, (int)pos, (int)len);
+
+retry_locked:
+ /* writepages currently holds page lock, but if we change that later, */
+ wait_on_page_writeback(page);
+
+ /* check snap context */
+ BUG_ON(!ci->i_snap_realm);
+ down_read(&mdsc->snap_rwsem);
+ BUG_ON(!ci->i_snap_realm->cached_context);
+ if (page->private &&
+ (void *)page->private != ci->i_snap_realm->cached_context) {
+ /*
+ * this page is already dirty in another (older) snap
+ * context! is it writeable now?
+ */
+ snapc = get_oldest_context(inode, NULL);
+ up_read(&mdsc->snap_rwsem);
+
+ if (snapc != (void *)page->private) {
+ dout(" page %p snapc %p not current or oldest\n",
+ page, (void *)page->private);
+ /*
+ * queue for writeback, and wait for snapc to
+ * be writeable or written
+ */
+ snapc = ceph_get_snap_context((void *)page->private);
+ unlock_page(page);
+ if (ceph_queue_writeback(inode))
+ igrab(inode);
+ wait_event_interruptible(ci->i_cap_wq,
+ context_is_writeable_or_written(inode, snapc));
+ ceph_put_snap_context(snapc);
+ goto retry;
+ }
+
+ /* yay, writeable, do it now (without dropping page lock) */
+ dout(" page %p snapc %p not current, but oldest\n",
+ page, snapc);
+ if (!clear_page_dirty_for_io(page))
+ goto retry_locked;
+ r = writepage_nounlock(page, NULL);
+ if (r < 0)
+ goto fail_nosnap;
+ goto retry_locked;
+ }
+
+ if (PageUptodate(page)) {
+ dout(" page %p already uptodate\n", page);
+ return 0;
+ }
+
+ /* full page? */
+ if (pos_in_page == 0 && len == PAGE_CACHE_SIZE)
+ return 0;
+
+ /* past end of file? */
+ i_size = inode->i_size; /* caller holds i_mutex */
+
+ if (i_size + len > inode->i_sb->s_maxbytes) {
+ /* file is too big */
+ r = -EINVAL;
+ goto fail;
+ }
+
+ if (page_off >= i_size ||
+ (pos_in_page == 0 && (pos+len) >= i_size &&
+ end_in_page - pos_in_page != PAGE_CACHE_SIZE)) {
+ dout(" zeroing %p 0 - %d and %d - %d\n",
+ page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE);
+ zero_user_segments(page,
+ 0, pos_in_page,
+ end_in_page, PAGE_CACHE_SIZE);
+ return 0;
+ }
+
+ /* we need to read it. */
+ up_read(&mdsc->snap_rwsem);
+ r = readpage_nounlock(file, page);
+ if (r < 0)
+ goto fail_nosnap;
+ goto retry_locked;
+
+fail:
+ up_read(&mdsc->snap_rwsem);
+fail_nosnap:
+ unlock_page(page);
+ return r;
+}
+
+/*
+ * we don't do anything in here that simple_write_end doesn't do
+ * except adjust dirty page accounting and drop read lock on
+ * mdsc->snap_rwsem.
+ */
+static int ceph_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+ unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ int check_cap = 0;
+
+ dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
+ inode, page, (int)pos, (int)copied, (int)len);
+
+ /* zero the stale part of the page if we did a short copy */
+ if (copied < len)
+ zero_user_segment(page, from+copied, len);
+
+ /* did file size increase? */
+ /* (no need for i_size_read(); we caller holds i_mutex */
+ if (pos+copied > inode->i_size)
+ check_cap = ceph_inode_set_size(inode, pos+copied);
+
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
+
+ set_page_dirty(page);
+
+ unlock_page(page);
+ up_read(&mdsc->snap_rwsem);
+ page_cache_release(page);
+
+ if (check_cap)
+ ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
+
+ return copied;
+}
+
+/*
+ * we set .direct_IO to indicate direct io is supported, but since we
+ * intercept O_DIRECT reads and writes early, this function should
+ * never get called.
+ */
+static ssize_t ceph_direct_io(int rw, struct kiocb *iocb,
+ const struct iovec *iov,
+ loff_t pos, unsigned long nr_segs)
+{
+ WARN_ON(1);
+ return -EINVAL;
+}
+
+const struct address_space_operations ceph_aops = {
+ .readpage = ceph_readpage,
+ .readpages = ceph_readpages,
+ .writepage = ceph_writepage,
+ .writepages = ceph_writepages_start,
+ .write_begin = ceph_write_begin,
+ .write_end = ceph_write_end,
+ .set_page_dirty = ceph_set_page_dirty,
+ .invalidatepage = ceph_invalidatepage,
+ .releasepage = ceph_releasepage,
+ .direct_IO = ceph_direct_io,
+};
+
+
+/*
+ * vm ops
+ */
+
+/*
+ * Reuse write_begin here for simplicity.
+ */
+static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct inode *inode = vma->vm_file->f_dentry->d_inode;
+ struct page *page = vmf->page;
+ struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+ loff_t off = page->index << PAGE_CACHE_SHIFT;
+ loff_t size, len;
+ struct page *locked_page = NULL;
+ void *fsdata = NULL;
+ int ret;
+
+ size = i_size_read(inode);
+ if (off + PAGE_CACHE_SIZE <= size)
+ len = PAGE_CACHE_SIZE;
+ else
+ len = size & ~PAGE_CACHE_MASK;
+
+ dout("page_mkwrite %p %llu~%llu page %p idx %lu\n", inode,
+ off, len, page, page->index);
+ ret = ceph_write_begin(vma->vm_file, inode->i_mapping, off, len, 0,
+ &locked_page, &fsdata);
+ WARN_ON(page != locked_page);
+ if (!ret) {
+ /*
+ * doing the following, instead of calling
+ * ceph_write_end. Note that we keep the
+ * page locked
+ */
+ set_page_dirty(page);
+ up_read(&mdsc->snap_rwsem);
+ page_cache_release(page);
+ ret = VM_FAULT_LOCKED;
+ } else {
+ ret = VM_FAULT_SIGBUS;
+ }
+ dout("page_mkwrite %p %llu~%llu = %d\n", inode, off, len, ret);
+ return ret;
+}
+
+static struct vm_operations_struct ceph_vmops = {
+ .fault = filemap_fault,
+ .page_mkwrite = ceph_page_mkwrite,
+};
+
+int ceph_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct address_space *mapping = file->f_mapping;
+
+ if (!mapping->a_ops->readpage)
+ return -ENOEXEC;
+ file_accessed(file);
+ vma->vm_ops = &ceph_vmops;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+ return 0;
+}
diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c
new file mode 100644
index 000000000000..32f2e2a021ab
--- /dev/null
+++ b/fs/ceph/auth.c
@@ -0,0 +1,225 @@
+#include "ceph_debug.h"
+
+#include <linux/module.h>
+#include <linux/err.h>
+
+#include "types.h"
+#include "auth_none.h"
+#include "decode.h"
+#include "super.h"
+
+#include "messenger.h"
+
+/*
+ * get protocol handler
+ */
+static u32 supported_protocols[] = {
+ CEPH_AUTH_NONE
+};
+
+int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol)
+{
+ switch (protocol) {
+ case CEPH_AUTH_NONE:
+ return ceph_auth_none_init(ac);
+ default:
+ return -ENOENT;
+ }
+}
+
+/*
+ * setup, teardown.
+ */
+struct ceph_auth_client *ceph_auth_init(const char *name, const char *secret)
+{
+ struct ceph_auth_client *ac;
+ int ret;
+
+ dout("auth_init name '%s' secret '%s'\n", name, secret);
+
+ ret = -ENOMEM;
+ ac = kzalloc(sizeof(*ac), GFP_NOFS);
+ if (!ac)
+ goto out;
+
+ ac->negotiating = true;
+ if (name)
+ ac->name = name;
+ else
+ ac->name = CEPH_AUTH_NAME_DEFAULT;
+ dout("auth_init name %s secret %s\n", ac->name, secret);
+ ac->secret = secret;
+ return ac;
+
+out:
+ return ERR_PTR(ret);
+}
+
+void ceph_auth_destroy(struct ceph_auth_client *ac)
+{
+ dout("auth_destroy %p\n", ac);
+ if (ac->ops)
+ ac->ops->destroy(ac);
+ kfree(ac);
+}
+
+/*
+ * Reset occurs when reconnecting to the monitor.
+ */
+void ceph_auth_reset(struct ceph_auth_client *ac)
+{
+ dout("auth_reset %p\n", ac);
+ if (ac->ops && !ac->negotiating)
+ ac->ops->reset(ac);
+ ac->negotiating = true;
+}
+
+int ceph_entity_name_encode(const char *name, void **p, void *end)
+{
+ int len = strlen(name);
+
+ if (*p + 2*sizeof(u32) + len > end)
+ return -ERANGE;
+ ceph_encode_32(p, CEPH_ENTITY_TYPE_CLIENT);
+ ceph_encode_32(p, len);
+ ceph_encode_copy(p, name, len);
+ return 0;
+}
+
+/*
+ * Initiate protocol negotiation with monitor. Include entity name
+ * and list supported protocols.
+ */
+int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len)
+{
+ struct ceph_mon_request_header *monhdr = buf;
+ void *p = monhdr + 1, *end = buf + len, *lenp;
+ int i, num;
+ int ret;
+
+ dout("auth_build_hello\n");
+ monhdr->have_version = 0;
+ monhdr->session_mon = cpu_to_le16(-1);
+ monhdr->session_mon_tid = 0;
+
+ ceph_encode_32(&p, 0); /* no protocol, yet */
+
+ lenp = p;
+ p += sizeof(u32);
+
+ num = ARRAY_SIZE(supported_protocols);
+ ceph_encode_32(&p, num);
+ for (i = 0; i < num; i++)
+ ceph_encode_32(&p, supported_protocols[i]);
+
+ ret = ceph_entity_name_encode(ac->name, &p, end);
+ if (ret < 0)
+ return ret;
+ ceph_decode_need(&p, end, sizeof(u64), bad);
+ ceph_encode_64(&p, ac->global_id);
+
+ ceph_encode_32(&lenp, p - lenp - sizeof(u32));
+ return p - buf;
+
+bad:
+ return -ERANGE;
+}
+
+/*
+ * Handle auth message from monitor.
+ */
+int ceph_handle_auth_reply(struct ceph_auth_client *ac,
+ void *buf, size_t len,
+ void *reply_buf, size_t reply_len)
+{
+ void *p = buf;
+ void *end = buf + len;
+ int protocol;
+ s32 result;
+ u64 global_id;
+ void *payload, *payload_end;
+ int payload_len;
+ char *result_msg;
+ int result_msg_len;
+ int ret = -EINVAL;
+
+ dout("handle_auth_reply %p %p\n", p, end);
+ ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad);
+ protocol = ceph_decode_32(&p);
+ result = ceph_decode_32(&p);
+ global_id = ceph_decode_64(&p);
+ payload_len = ceph_decode_32(&p);
+ payload = p;
+ p += payload_len;
+ ceph_decode_need(&p, end, sizeof(u32), bad);
+ result_msg_len = ceph_decode_32(&p);
+ result_msg = p;
+ p += result_msg_len;
+ if (p != end)
+ goto bad;
+
+ dout(" result %d '%.*s' gid %llu len %d\n", result, result_msg_len,
+ result_msg, global_id, payload_len);
+
+ payload_end = payload + payload_len;
+
+ if (global_id && ac->global_id != global_id) {
+ dout(" set global_id %lld -> %lld\n", ac->global_id, global_id);
+ ac->global_id = global_id;
+ }
+
+ if (ac->negotiating) {
+ /* server does not support our protocols? */
+ if (!protocol && result < 0) {
+ ret = result;
+ goto out;
+ }
+ /* set up (new) protocol handler? */
+ if (ac->protocol && ac->protocol != protocol) {
+ ac->ops->destroy(ac);
+ ac->protocol = 0;
+ ac->ops = NULL;
+ }
+ if (ac->protocol != protocol) {
+ ret = ceph_auth_init_protocol(ac, protocol);
+ if (ret) {
+ pr_err("error %d on auth protocol %d init\n",
+ ret, protocol);
+ goto out;
+ }
+ }
+ }
+
+ ret = ac->ops->handle_reply(ac, result, payload, payload_end);
+ if (ret == -EAGAIN) {
+ struct ceph_mon_request_header *monhdr = reply_buf;
+ void *p = reply_buf + 1;
+ void *end = reply_buf + reply_len;
+
+ monhdr->have_version = 0;
+ monhdr->session_mon = cpu_to_le16(-1);
+ monhdr->session_mon_tid = 0;
+
+ ceph_encode_32(&p, ac->protocol);
+
+ ret = ac->ops->build_request(ac, p + sizeof(u32), end);
+ if (ret < 0) {
+ pr_err("error %d building request\n", ret);
+ goto out;
+ }
+ dout(" built request %d bytes\n", ret);
+ ceph_encode_32(&p, ret);
+ return p + ret - reply_buf;
+ } else if (ret) {
+ pr_err("authentication error %d\n", ret);
+ return ret;
+ }
+ return 0;
+
+bad:
+ pr_err("failed to decode auth msg\n");
+out:
+ return ret;
+}
+
+
diff --git a/fs/ceph/auth.h b/fs/ceph/auth.h
new file mode 100644
index 000000000000..4d8cdf6bb3b6
--- /dev/null
+++ b/fs/ceph/auth.h
@@ -0,0 +1,77 @@
+#ifndef _FS_CEPH_AUTH_H
+#define _FS_CEPH_AUTH_H
+
+#include "types.h"
+#include "buffer.h"
+
+/*
+ * Abstract interface for communicating with the authenticate module.
+ * There is some handshake that takes place between us and the monitor
+ * to acquire the necessary keys. These are used to generate an
+ * 'authorizer' that we use when connecting to a service (mds, osd).
+ */
+
+struct ceph_auth_client;
+struct ceph_authorizer;
+
+struct ceph_auth_client_ops {
+ /*
+ * true if we are authenticated and can connect to
+ * services.
+ */
+ int (*is_authenticated)(struct ceph_auth_client *ac);
+
+ /*
+ * build requests and process replies during monitor
+ * handshake. if handle_reply returns -EAGAIN, we build
+ * another request.
+ */
+ int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
+ int (*handle_reply)(struct ceph_auth_client *ac, int result,
+ void *buf, void *end);
+
+ /*
+ * Create authorizer for connecting to a service, and verify
+ * the response to authenticate the service.
+ */
+ int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type,
+ struct ceph_authorizer **a,
+ void **buf, size_t *len,
+ void **reply_buf, size_t *reply_len);
+ int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a, size_t len);
+ void (*destroy_authorizer)(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a);
+
+ /* reset when we (re)connect to a monitor */
+ void (*reset)(struct ceph_auth_client *ac);
+
+ void (*destroy)(struct ceph_auth_client *ac);
+};
+
+struct ceph_auth_client {
+ u32 protocol; /* CEPH_AUTH_* */
+ void *private; /* for use by protocol implementation */
+ const struct ceph_auth_client_ops *ops; /* null iff protocol==0 */
+
+ bool negotiating; /* true if negotiating protocol */
+ const char *name; /* entity name */
+ u64 global_id; /* our unique id in system */
+ const char *secret; /* our secret key */
+ unsigned want_keys; /* which services we want */
+};
+
+extern struct ceph_auth_client *ceph_auth_init(const char *name,
+ const char *secret);
+extern void ceph_auth_destroy(struct ceph_auth_client *ac);
+
+extern void ceph_auth_reset(struct ceph_auth_client *ac);
+
+extern int ceph_auth_build_hello(struct ceph_auth_client *ac,
+ void *buf, size_t len);
+extern int ceph_handle_auth_reply(struct ceph_auth_client *ac,
+ void *buf, size_t len,
+ void *reply_buf, size_t reply_len);
+extern int ceph_entity_name_encode(const char *name, void **p, void *end);
+
+#endif
diff --git a/fs/ceph/auth_none.c b/fs/ceph/auth_none.c
new file mode 100644
index 000000000000..631017eb7117
--- /dev/null
+++ b/fs/ceph/auth_none.c
@@ -0,0 +1,120 @@
+
+#include "ceph_debug.h"
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/random.h>
+
+#include "auth_none.h"
+#include "auth.h"
+#include "decode.h"
+
+static void reset(struct ceph_auth_client *ac)
+{
+ struct ceph_auth_none_info *xi = ac->private;
+
+ xi->starting = true;
+ xi->built_authorizer = false;
+}
+
+static void destroy(struct ceph_auth_client *ac)
+{
+ kfree(ac->private);
+ ac->private = NULL;
+}
+
+static int is_authenticated(struct ceph_auth_client *ac)
+{
+ struct ceph_auth_none_info *xi = ac->private;
+
+ return !xi->starting;
+}
+
+/*
+ * the generic auth code decode the global_id, and we carry no actual
+ * authenticate state, so nothing happens here.
+ */
+static int handle_reply(struct ceph_auth_client *ac, int result,
+ void *buf, void *end)
+{
+ struct ceph_auth_none_info *xi = ac->private;
+
+ xi->starting = false;
+ return result;
+}
+
+/*
+ * build an 'authorizer' with our entity_name and global_id. we can
+ * reuse a single static copy since it is identical for all services
+ * we connect to.
+ */
+static int ceph_auth_none_create_authorizer(
+ struct ceph_auth_client *ac, int peer_type,
+ struct ceph_authorizer **a,
+ void **buf, size_t *len,
+ void **reply_buf, size_t *reply_len)
+{
+ struct ceph_auth_none_info *ai = ac->private;
+ struct ceph_none_authorizer *au = &ai->au;
+ void *p, *end;
+ int ret;
+
+ if (!ai->built_authorizer) {
+ p = au->buf;
+ end = p + sizeof(au->buf);
+ ret = ceph_entity_name_encode(ac->name, &p, end - 8);
+ if (ret < 0)
+ goto bad;
+ ceph_decode_need(&p, end, sizeof(u64), bad2);
+ ceph_encode_64(&p, ac->global_id);
+ au->buf_len = p - (void *)au->buf;
+ ai->built_authorizer = true;
+ dout("built authorizer len %d\n", au->buf_len);
+ }
+
+ *a = (struct ceph_authorizer *)au;
+ *buf = au->buf;
+ *len = au->buf_len;
+ *reply_buf = au->reply_buf;
+ *reply_len = sizeof(au->reply_buf);
+ return 0;
+
+bad2:
+ ret = -ERANGE;
+bad:
+ return ret;
+}
+
+static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a)
+{
+ /* nothing to do */
+}
+
+static const struct ceph_auth_client_ops ceph_auth_none_ops = {
+ .reset = reset,
+ .destroy = destroy,
+ .is_authenticated = is_authenticated,
+ .handle_reply = handle_reply,
+ .create_authorizer = ceph_auth_none_create_authorizer,
+ .destroy_authorizer = ceph_auth_none_destroy_authorizer,
+};
+
+int ceph_auth_none_init(struct ceph_auth_client *ac)
+{
+ struct ceph_auth_none_info *xi;
+
+ dout("ceph_auth_none_init %p\n", ac);
+ xi = kzalloc(sizeof(*xi), GFP_NOFS);
+ if (!xi)
+ return -ENOMEM;
+
+ xi->starting = true;
+ xi->built_authorizer = false;
+
+ ac->protocol = CEPH_AUTH_NONE;
+ ac->private = xi;
+ ac->ops = &ceph_auth_none_ops;
+ return 0;
+}
+
diff --git a/fs/ceph/auth_none.h b/fs/ceph/auth_none.h
new file mode 100644
index 000000000000..56c05533a31c
--- /dev/null
+++ b/fs/ceph/auth_none.h
@@ -0,0 +1,28 @@
+#ifndef _FS_CEPH_AUTH_NONE_H
+#define _FS_CEPH_AUTH_NONE_H
+
+#include "auth.h"
+
+/*
+ * null security mode.
+ *
+ * we use a single static authorizer that simply encodes our entity name
+ * and global id.
+ */
+
+struct ceph_none_authorizer {
+ char buf[128];
+ int buf_len;
+ char reply_buf[0];
+};
+
+struct ceph_auth_none_info {
+ bool starting;
+ bool built_authorizer;
+ struct ceph_none_authorizer au; /* we only need one; it's static */
+};
+
+extern int ceph_auth_none_init(struct ceph_auth_client *ac);
+
+#endif
+
diff --git a/fs/ceph/buffer.c b/fs/ceph/buffer.c
new file mode 100644
index 000000000000..2576bd452cb8
--- /dev/null
+++ b/fs/ceph/buffer.c
@@ -0,0 +1,61 @@
+
+#include "ceph_debug.h"
+#include "buffer.h"
+
+struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
+{
+ struct ceph_buffer *b;
+
+ b = kmalloc(sizeof(*b), gfp);
+ if (!b)
+ return NULL;
+
+ b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN);
+ if (b->vec.iov_base) {
+ b->is_vmalloc = false;
+ } else {
+ b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL);
+ if (!b->vec.iov_base) {
+ kfree(b);
+ return NULL;
+ }
+ b->is_vmalloc = true;
+ }
+
+ kref_init(&b->kref);
+ b->alloc_len = len;
+ b->vec.iov_len = len;
+ dout("buffer_new %p\n", b);
+ return b;
+}
+
+void ceph_buffer_release(struct kref *kref)
+{
+ struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref);
+
+ dout("buffer_release %p\n", b);
+ if (b->vec.iov_base) {
+ if (b->is_vmalloc)
+ vfree(b->vec.iov_base);
+ else
+ kfree(b->vec.iov_base);
+ }
+ kfree(b);
+}
+
+int ceph_buffer_alloc(struct ceph_buffer *b, int len, gfp_t gfp)
+{
+ b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN);
+ if (b->vec.iov_base) {
+ b->is_vmalloc = false;
+ } else {
+ b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL);
+ b->is_vmalloc = true;
+ }
+ if (!b->vec.iov_base)
+ return -ENOMEM;
+ b->alloc_len = len;
+ b->vec.iov_len = len;
+ return 0;
+}
+
diff --git a/fs/ceph/buffer.h b/fs/ceph/buffer.h
new file mode 100644
index 000000000000..47b9514c5bbd
--- /dev/null
+++ b/fs/ceph/buffer.h
@@ -0,0 +1,37 @@
+#ifndef __FS_CEPH_BUFFER_H
+#define __FS_CEPH_BUFFER_H
+
+#include <linux/kref.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/types.h>
+#include <linux/uio.h>
+
+/*
+ * a simple reference counted buffer.
+ *
+ * use kmalloc for small sizes (<= one page), vmalloc for larger
+ * sizes.
+ */
+struct ceph_buffer {
+ struct kref kref;
+ struct kvec vec;
+ size_t alloc_len;
+ bool is_vmalloc;
+};
+
+extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp);
+extern void ceph_buffer_release(struct kref *kref);
+
+static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
+{
+ kref_get(&b->kref);
+ return b;
+}
+
+static inline void ceph_buffer_put(struct ceph_buffer *b)
+{
+ kref_put(&b->kref, ceph_buffer_release);
+}
+
+#endif
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
new file mode 100644
index 000000000000..9b9ce143ac1f
--- /dev/null
+++ b/fs/ceph/caps.c
@@ -0,0 +1,2863 @@
+#include "ceph_debug.h"
+
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/wait.h>
+
+#include "super.h"
+#include "decode.h"
+#include "messenger.h"
+
+/*
+ * Capability management
+ *
+ * The Ceph metadata servers control client access to inode metadata
+ * and file data by issuing capabilities, granting clients permission
+ * to read and/or write both inode field and file data to OSDs
+ * (storage nodes). Each capability consists of a set of bits
+ * indicating which operations are allowed.
+ *
+ * If the client holds a *_SHARED cap, the client has a coherent value
+ * that can be safely read from the cached inode.
+ *
+ * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
+ * client is allowed to change inode attributes (e.g., file size,
+ * mtime), note its dirty state in the ceph_cap, and asynchronously
+ * flush that metadata change to the MDS.
+ *
+ * In the event of a conflicting operation (perhaps by another
+ * client), the MDS will revoke the conflicting client capabilities.
+ *
+ * In order for a client to cache an inode, it must hold a capability
+ * with at least one MDS server. When inodes are released, release
+ * notifications are batched and periodically sent en masse to the MDS
+ * cluster to release server state.
+ */
+
+
+/*
+ * Generate readable cap strings for debugging output.
+ */
+#define MAX_CAP_STR 20
+static char cap_str[MAX_CAP_STR][40];
+static DEFINE_SPINLOCK(cap_str_lock);
+static int last_cap_str;
+
+static char *gcap_string(char *s, int c)
+{
+ if (c & CEPH_CAP_GSHARED)
+ *s++ = 's';
+ if (c & CEPH_CAP_GEXCL)
+ *s++ = 'x';
+ if (c & CEPH_CAP_GCACHE)
+ *s++ = 'c';
+ if (c & CEPH_CAP_GRD)
+ *s++ = 'r';
+ if (c & CEPH_CAP_GWR)
+ *s++ = 'w';
+ if (c & CEPH_CAP_GBUFFER)
+ *s++ = 'b';
+ if (c & CEPH_CAP_GLAZYIO)
+ *s++ = 'l';
+ return s;
+}
+
+const char *ceph_cap_string(int caps)
+{
+ int i;
+ char *s;
+ int c;
+
+ spin_lock(&cap_str_lock);
+ i = last_cap_str++;
+ if (last_cap_str == MAX_CAP_STR)
+ last_cap_str = 0;
+ spin_unlock(&cap_str_lock);
+
+ s = cap_str[i];
+
+ if (caps & CEPH_CAP_PIN)
+ *s++ = 'p';
+
+ c = (caps >> CEPH_CAP_SAUTH) & 3;
+ if (c) {
+ *s++ = 'A';
+ s = gcap_string(s, c);
+ }
+
+ c = (caps >> CEPH_CAP_SLINK) & 3;
+ if (c) {
+ *s++ = 'L';
+ s = gcap_string(s, c);
+ }
+
+ c = (caps >> CEPH_CAP_SXATTR) & 3;
+ if (c) {
+ *s++ = 'X';
+ s = gcap_string(s, c);
+ }
+
+ c = caps >> CEPH_CAP_SFILE;
+ if (c) {
+ *s++ = 'F';
+ s = gcap_string(s, c);
+ }
+
+ if (s == cap_str[i])
+ *s++ = '-';
+ *s = 0;
+ return cap_str[i];
+}
+
+/*
+ * Cap reservations
+ *
+ * Maintain a global pool of preallocated struct ceph_caps, referenced
+ * by struct ceph_caps_reservations. This ensures that we preallocate
+ * memory needed to successfully process an MDS response. (If an MDS
+ * sends us cap information and we fail to process it, we will have
+ * problems due to the client and MDS being out of sync.)
+ *
+ * Reservations are 'owned' by a ceph_cap_reservation context.
+ */
+static spinlock_t caps_list_lock;
+static struct list_head caps_list; /* unused (reserved or unreserved) */
+static int caps_total_count; /* total caps allocated */
+static int caps_use_count; /* in use */
+static int caps_reserve_count; /* unused, reserved */
+static int caps_avail_count; /* unused, unreserved */
+
+void __init ceph_caps_init(void)
+{
+ INIT_LIST_HEAD(&caps_list);
+ spin_lock_init(&caps_list_lock);
+}
+
+void ceph_caps_finalize(void)
+{
+ struct ceph_cap *cap;
+
+ spin_lock(&caps_list_lock);
+ while (!list_empty(&caps_list)) {
+ cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
+ list_del(&cap->caps_item);
+ kmem_cache_free(ceph_cap_cachep, cap);
+ }
+ caps_total_count = 0;
+ caps_avail_count = 0;
+ caps_use_count = 0;
+ caps_reserve_count = 0;
+ spin_unlock(&caps_list_lock);
+}
+
+int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need)
+{
+ int i;
+ struct ceph_cap *cap;
+ int have;
+ int alloc = 0;
+ LIST_HEAD(newcaps);
+ int ret = 0;
+
+ dout("reserve caps ctx=%p need=%d\n", ctx, need);
+
+ /* first reserve any caps that are already allocated */
+ spin_lock(&caps_list_lock);
+ if (caps_avail_count >= need)
+ have = need;
+ else
+ have = caps_avail_count;
+ caps_avail_count -= have;
+ caps_reserve_count += have;
+ BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
+ caps_avail_count);
+ spin_unlock(&caps_list_lock);
+
+ for (i = have; i < need; i++) {
+ cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
+ if (!cap) {
+ ret = -ENOMEM;
+ goto out_alloc_count;
+ }
+ list_add(&cap->caps_item, &newcaps);
+ alloc++;
+ }
+ BUG_ON(have + alloc != need);
+
+ spin_lock(&caps_list_lock);
+ caps_total_count += alloc;
+ caps_reserve_count += alloc;
+ list_splice(&newcaps, &caps_list);
+
+ BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
+ caps_avail_count);
+ spin_unlock(&caps_list_lock);
+
+ ctx->count = need;
+ dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
+ ctx, caps_total_count, caps_use_count, caps_reserve_count,
+ caps_avail_count);
+ return 0;
+
+out_alloc_count:
+ /* we didn't manage to reserve as much as we needed */
+ pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
+ ctx, need, have);
+ return ret;
+}
+
+int ceph_unreserve_caps(struct ceph_cap_reservation *ctx)
+{
+ dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
+ if (ctx->count) {
+ spin_lock(&caps_list_lock);
+ BUG_ON(caps_reserve_count < ctx->count);
+ caps_reserve_count -= ctx->count;
+ caps_avail_count += ctx->count;
+ ctx->count = 0;
+ dout("unreserve caps %d = %d used + %d resv + %d avail\n",
+ caps_total_count, caps_use_count, caps_reserve_count,
+ caps_avail_count);
+ BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
+ caps_avail_count);
+ spin_unlock(&caps_list_lock);
+ }
+ return 0;
+}
+
+static struct ceph_cap *get_cap(struct ceph_cap_reservation *ctx)
+{
+ struct ceph_cap *cap = NULL;
+
+ /* temporary, until we do something about cap import/export */
+ if (!ctx)
+ return kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
+
+ spin_lock(&caps_list_lock);
+ dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
+ ctx, ctx->count, caps_total_count, caps_use_count,
+ caps_reserve_count, caps_avail_count);
+ BUG_ON(!ctx->count);
+ BUG_ON(ctx->count > caps_reserve_count);
+ BUG_ON(list_empty(&caps_list));
+
+ ctx->count--;
+ caps_reserve_count--;
+ caps_use_count++;
+
+ cap = list_first_entry(&caps_list, struct ceph_cap, caps_item);
+ list_del(&cap->caps_item);
+
+ BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
+ caps_avail_count);
+ spin_unlock(&caps_list_lock);
+ return cap;
+}
+
+static void put_cap(struct ceph_cap *cap,
+ struct ceph_cap_reservation *ctx)
+{
+ spin_lock(&caps_list_lock);
+ dout("put_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
+ ctx, ctx ? ctx->count : 0, caps_total_count, caps_use_count,
+ caps_reserve_count, caps_avail_count);
+ caps_use_count--;
+ /*
+ * Keep some preallocated caps around, at least enough to do a
+ * readdir (which needs to preallocate lots of them), to avoid
+ * lots of free/alloc churn.
+ */
+ if (caps_avail_count >= caps_reserve_count +
+ ceph_client(cap->ci->vfs_inode.i_sb)->mount_args->max_readdir) {
+ caps_total_count--;
+ kmem_cache_free(ceph_cap_cachep, cap);
+ } else {
+ if (ctx) {
+ ctx->count++;
+ caps_reserve_count++;
+ } else {
+ caps_avail_count++;
+ }
+ list_add(&cap->caps_item, &caps_list);
+ }
+
+ BUG_ON(caps_total_count != caps_use_count + caps_reserve_count +
+ caps_avail_count);
+ spin_unlock(&caps_list_lock);
+}
+
+void ceph_reservation_status(struct ceph_client *client,
+ int *total, int *avail, int *used, int *reserved)
+{
+ if (total)
+ *total = caps_total_count;
+ if (avail)
+ *avail = caps_avail_count;
+ if (used)
+ *used = caps_use_count;
+ if (reserved)
+ *reserved = caps_reserve_count;
+}
+
+/*
+ * Find ceph_cap for given mds, if any.
+ *
+ * Called with i_lock held.
+ */
+static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
+{
+ struct ceph_cap *cap;
+ struct rb_node *n = ci->i_caps.rb_node;
+
+ while (n) {
+ cap = rb_entry(n, struct ceph_cap, ci_node);
+ if (mds < cap->mds)
+ n = n->rb_left;
+ else if (mds > cap->mds)
+ n = n->rb_right;
+ else
+ return cap;
+ }
+ return NULL;
+}
+
+/*
+ * Return id of any MDS with a cap, preferably FILE_WR|WRBUFFER|EXCL, else
+ * -1.
+ */
+static int __ceph_get_cap_mds(struct ceph_inode_info *ci, u32 *mseq)
+{
+ struct ceph_cap *cap;
+ int mds = -1;
+ struct rb_node *p;
+
+ /* prefer mds with WR|WRBUFFER|EXCL caps */
+ for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
+ cap = rb_entry(p, struct ceph_cap, ci_node);
+ mds = cap->mds;
+ if (mseq)
+ *mseq = cap->mseq;
+ if (cap->issued & (CEPH_CAP_FILE_WR |
+ CEPH_CAP_FILE_BUFFER |
+ CEPH_CAP_FILE_EXCL))
+ break;
+ }
+ return mds;
+}
+
+int ceph_get_cap_mds(struct inode *inode)
+{
+ int mds;
+ spin_lock(&inode->i_lock);
+ mds = __ceph_get_cap_mds(ceph_inode(inode), NULL);
+ spin_unlock(&inode->i_lock);
+ return mds;
+}
+
+/*
+ * Called under i_lock.
+ */
+static void __insert_cap_node(struct ceph_inode_info *ci,
+ struct ceph_cap *new)
+{
+ struct rb_node **p = &ci->i_caps.rb_node;
+ struct rb_node *parent = NULL;
+ struct ceph_cap *cap = NULL;
+
+ while (*p) {
+ parent = *p;
+ cap = rb_entry(parent, struct ceph_cap, ci_node);
+ if (new->mds < cap->mds)
+ p = &(*p)->rb_left;
+ else if (new->mds > cap->mds)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&new->ci_node, parent, p);
+ rb_insert_color(&new->ci_node, &ci->i_caps);
+}
+
+/*
+ * (re)set cap hold timeouts, which control the delayed release
+ * of unused caps back to the MDS. Should be called on cap use.
+ */
+static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
+ struct ceph_inode_info *ci)
+{
+ struct ceph_mount_args *ma = mdsc->client->mount_args;
+
+ ci->i_hold_caps_min = round_jiffies(jiffies +
+ ma->caps_wanted_delay_min * HZ);
+ ci->i_hold_caps_max = round_jiffies(jiffies +
+ ma->caps_wanted_delay_max * HZ);
+ dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
+ ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
+}
+
+/*
+ * (Re)queue cap at the end of the delayed cap release list.
+ *
+ * If I_FLUSH is set, leave the inode at the front of the list.
+ *
+ * Caller holds i_lock
+ * -> we take mdsc->cap_delay_lock
+ */
+static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
+ struct ceph_inode_info *ci)
+{
+ __cap_set_timeouts(mdsc, ci);
+ dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
+ ci->i_ceph_flags, ci->i_hold_caps_max);
+ if (!mdsc->stopping) {
+ spin_lock(&mdsc->cap_delay_lock);
+ if (!list_empty(&ci->i_cap_delay_list)) {
+ if (ci->i_ceph_flags & CEPH_I_FLUSH)
+ goto no_change;
+ list_del_init(&ci->i_cap_delay_list);
+ }
+ list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
+no_change:
+ spin_unlock(&mdsc->cap_delay_lock);
+ }
+}
+
+/*
+ * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
+ * indicating we should send a cap message to flush dirty metadata
+ * asap, and move to the front of the delayed cap list.
+ */
+static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
+ struct ceph_inode_info *ci)
+{
+ dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
+ spin_lock(&mdsc->cap_delay_lock);
+ ci->i_ceph_flags |= CEPH_I_FLUSH;
+ if (!list_empty(&ci->i_cap_delay_list))
+ list_del_init(&ci->i_cap_delay_list);
+ list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
+ spin_unlock(&mdsc->cap_delay_lock);
+}
+
+/*
+ * Cancel delayed work on cap.
+ *
+ * Caller must hold i_lock.
+ */
+static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
+ struct ceph_inode_info *ci)
+{
+ dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
+ if (list_empty(&ci->i_cap_delay_list))
+ return;
+ spin_lock(&mdsc->cap_delay_lock);
+ list_del_init(&ci->i_cap_delay_list);
+ spin_unlock(&mdsc->cap_delay_lock);
+}
+
+/*
+ * Common issue checks for add_cap, handle_cap_grant.
+ */
+static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
+ unsigned issued)
+{
+ unsigned had = __ceph_caps_issued(ci, NULL);
+
+ /*
+ * Each time we receive FILE_CACHE anew, we increment
+ * i_rdcache_gen.
+ */
+ if ((issued & CEPH_CAP_FILE_CACHE) &&
+ (had & CEPH_CAP_FILE_CACHE) == 0)
+ ci->i_rdcache_gen++;
+
+ /*
+ * if we are newly issued FILE_SHARED, clear I_COMPLETE; we
+ * don't know what happened to this directory while we didn't
+ * have the cap.
+ */
+ if ((issued & CEPH_CAP_FILE_SHARED) &&
+ (had & CEPH_CAP_FILE_SHARED) == 0) {
+ ci->i_shared_gen++;
+ if (S_ISDIR(ci->vfs_inode.i_mode)) {
+ dout(" marking %p NOT complete\n", &ci->vfs_inode);
+ ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
+ }
+ }
+}
+
+/*
+ * Add a capability under the given MDS session.
+ *
+ * Caller should hold session snap_rwsem (read) and s_mutex.
+ *
+ * @fmode is the open file mode, if we are opening a file, otherwise
+ * it is < 0. (This is so we can atomically add the cap and add an
+ * open file reference to it.)
+ */
+int ceph_add_cap(struct inode *inode,
+ struct ceph_mds_session *session, u64 cap_id,
+ int fmode, unsigned issued, unsigned wanted,
+ unsigned seq, unsigned mseq, u64 realmino, int flags,
+ struct ceph_cap_reservation *caps_reservation)
+{
+ struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_cap *new_cap = NULL;
+ struct ceph_cap *cap;
+ int mds = session->s_mds;
+ int actual_wanted;
+
+ dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
+ session->s_mds, cap_id, ceph_cap_string(issued), seq);
+
+ /*
+ * If we are opening the file, include file mode wanted bits
+ * in wanted.
+ */
+ if (fmode >= 0)
+ wanted |= ceph_caps_for_mode(fmode);
+
+retry:
+ spin_lock(&inode->i_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ if (!cap) {
+ if (new_cap) {
+ cap = new_cap;
+ new_cap = NULL;
+ } else {
+ spin_unlock(&inode->i_lock);
+ new_cap = get_cap(caps_reservation);
+ if (new_cap == NULL)
+ return -ENOMEM;
+ goto retry;
+ }
+
+ cap->issued = 0;
+ cap->implemented = 0;
+ cap->mds = mds;
+ cap->mds_wanted = 0;
+
+ cap->ci = ci;
+ __insert_cap_node(ci, cap);
+
+ /* clear out old exporting info? (i.e. on cap import) */
+ if (ci->i_cap_exporting_mds == mds) {
+ ci->i_cap_exporting_issued = 0;
+ ci->i_cap_exporting_mseq = 0;
+ ci->i_cap_exporting_mds = -1;
+ }
+
+ /* add to session cap list */
+ cap->session = session;
+ spin_lock(&session->s_cap_lock);
+ list_add_tail(&cap->session_caps, &session->s_caps);
+ session->s_nr_caps++;
+ spin_unlock(&session->s_cap_lock);
+ }
+
+ if (!ci->i_snap_realm) {
+ /*
+ * add this inode to the appropriate snap realm
+ */
+ struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
+ realmino);
+ if (realm) {
+ ceph_get_snap_realm(mdsc, realm);
+ spin_lock(&realm->inodes_with_caps_lock);
+ ci->i_snap_realm = realm;
+ list_add(&ci->i_snap_realm_item,
+ &realm->inodes_with_caps);
+ spin_unlock(&realm->inodes_with_caps_lock);
+ } else {
+ pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
+ realmino);
+ }
+ }
+
+ __check_cap_issue(ci, cap, issued);
+
+ /*
+ * If we are issued caps we don't want, or the mds' wanted
+ * value appears to be off, queue a check so we'll release
+ * later and/or update the mds wanted value.
+ */
+ actual_wanted = __ceph_caps_wanted(ci);
+ if ((wanted & ~actual_wanted) ||
+ (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
+ dout(" issued %s, mds wanted %s, actual %s, queueing\n",
+ ceph_cap_string(issued), ceph_cap_string(wanted),
+ ceph_cap_string(actual_wanted));
+ __cap_delay_requeue(mdsc, ci);
+ }
+
+ if (flags & CEPH_CAP_FLAG_AUTH)
+ ci->i_auth_cap = cap;
+ else if (ci->i_auth_cap == cap)
+ ci->i_auth_cap = NULL;
+
+ dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
+ inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
+ ceph_cap_string(issued|cap->issued), seq, mds);
+ cap->cap_id = cap_id;
+ cap->issued = issued;
+ cap->implemented |= issued;
+ cap->mds_wanted |= wanted;
+ cap->seq = seq;
+ cap->issue_seq = seq;
+ cap->mseq = mseq;
+ cap->cap_gen = session->s_cap_gen;
+
+ if (fmode >= 0)
+ __ceph_get_fmode(ci, fmode);
+ spin_unlock(&inode->i_lock);
+ wake_up(&ci->i_cap_wq);
+ return 0;
+}
+
+/*
+ * Return true if cap has not timed out and belongs to the current
+ * generation of the MDS session (i.e. has not gone 'stale' due to
+ * us losing touch with the mds).
+ */
+static int __cap_is_valid(struct ceph_cap *cap)
+{
+ unsigned long ttl;
+ u32 gen;
+
+ spin_lock(&cap->session->s_cap_lock);
+ gen = cap->session->s_cap_gen;
+ ttl = cap->session->s_cap_ttl;
+ spin_unlock(&cap->session->s_cap_lock);
+
+ if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
+ dout("__cap_is_valid %p cap %p issued %s "
+ "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
+ cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Return set of valid cap bits issued to us. Note that caps time
+ * out, and may be invalidated in bulk if the client session times out
+ * and session->s_cap_gen is bumped.
+ */
+int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
+{
+ int have = ci->i_snap_caps;
+ struct ceph_cap *cap;
+ struct rb_node *p;
+
+ if (implemented)
+ *implemented = 0;
+ for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
+ cap = rb_entry(p, struct ceph_cap, ci_node);
+ if (!__cap_is_valid(cap))
+ continue;
+ dout("__ceph_caps_issued %p cap %p issued %s\n",
+ &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
+ have |= cap->issued;
+ if (implemented)
+ *implemented |= cap->implemented;
+ }
+ return have;
+}
+
+/*
+ * Get cap bits issued by caps other than @ocap
+ */
+int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
+{
+ int have = ci->i_snap_caps;
+ struct ceph_cap *cap;
+ struct rb_node *p;
+
+ for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
+ cap = rb_entry(p, struct ceph_cap, ci_node);
+ if (cap == ocap)
+ continue;
+ if (!__cap_is_valid(cap))
+ continue;
+ have |= cap->issued;
+ }
+ return have;
+}
+
+/*
+ * Move a cap to the end of the LRU (oldest caps at list head, newest
+ * at list tail).
+ */
+static void __touch_cap(struct ceph_cap *cap)
+{
+ struct ceph_mds_session *s = cap->session;
+
+ dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
+ s->s_mds);
+ spin_lock(&s->s_cap_lock);
+ list_move_tail(&cap->session_caps, &s->s_caps);
+ spin_unlock(&s->s_cap_lock);
+}
+
+/*
+ * Check if we hold the given mask. If so, move the cap(s) to the
+ * front of their respective LRUs. (This is the preferred way for
+ * callers to check for caps they want.)
+ */
+int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
+{
+ struct ceph_cap *cap;
+ struct rb_node *p;
+ int have = ci->i_snap_caps;
+
+ if ((have & mask) == mask) {
+ dout("__ceph_caps_issued_mask %p snap issued %s"
+ " (mask %s)\n", &ci->vfs_inode,
+ ceph_cap_string(have),
+ ceph_cap_string(mask));
+ return 1;
+ }
+
+ for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
+ cap = rb_entry(p, struct ceph_cap, ci_node);
+ if (!__cap_is_valid(cap))
+ continue;
+ if ((cap->issued & mask) == mask) {
+ dout("__ceph_caps_issued_mask %p cap %p issued %s"
+ " (mask %s)\n", &ci->vfs_inode, cap,
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(mask));
+ if (touch)
+ __touch_cap(cap);
+ return 1;
+ }
+
+ /* does a combination of caps satisfy mask? */
+ have |= cap->issued;
+ if ((have & mask) == mask) {
+ dout("__ceph_caps_issued_mask %p combo issued %s"
+ " (mask %s)\n", &ci->vfs_inode,
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(mask));
+ if (touch) {
+ struct rb_node *q;
+
+ /* touch this + preceeding caps */
+ __touch_cap(cap);
+ for (q = rb_first(&ci->i_caps); q != p;
+ q = rb_next(q)) {
+ cap = rb_entry(q, struct ceph_cap,
+ ci_node);
+ if (!__cap_is_valid(cap))
+ continue;
+ __touch_cap(cap);
+ }
+ }
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Return true if mask caps are currently being revoked by an MDS.
+ */
+int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
+{
+ struct inode *inode = &ci->vfs_inode;
+ struct ceph_cap *cap;
+ struct rb_node *p;
+ int ret = 0;
+
+ spin_lock(&inode->i_lock);
+ for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
+ cap = rb_entry(p, struct ceph_cap, ci_node);
+ if (__cap_is_valid(cap) &&
+ (cap->implemented & ~cap->issued & mask)) {
+ ret = 1;
+ break;
+ }
+ }
+ spin_unlock(&inode->i_lock);
+ dout("ceph_caps_revoking %p %s = %d\n", inode,
+ ceph_cap_string(mask), ret);
+ return ret;
+}
+
+int __ceph_caps_used(struct ceph_inode_info *ci)
+{
+ int used = 0;
+ if (ci->i_pin_ref)
+ used |= CEPH_CAP_PIN;
+ if (ci->i_rd_ref)
+ used |= CEPH_CAP_FILE_RD;
+ if (ci->i_rdcache_ref || ci->i_rdcache_gen)
+ used |= CEPH_CAP_FILE_CACHE;
+ if (ci->i_wr_ref)
+ used |= CEPH_CAP_FILE_WR;
+ if (ci->i_wrbuffer_ref)
+ used |= CEPH_CAP_FILE_BUFFER;
+ return used;
+}
+
+/*
+ * wanted, by virtue of open file modes
+ */
+int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
+{
+ int want = 0;
+ int mode;
+ for (mode = 0; mode < 4; mode++)
+ if (ci->i_nr_by_mode[mode])
+ want |= ceph_caps_for_mode(mode);
+ return want;
+}
+
+/*
+ * Return caps we have registered with the MDS(s) as 'wanted'.
+ */
+int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
+{
+ struct ceph_cap *cap;
+ struct rb_node *p;
+ int mds_wanted = 0;
+
+ for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
+ cap = rb_entry(p, struct ceph_cap, ci_node);
+ if (!__cap_is_valid(cap))
+ continue;
+ mds_wanted |= cap->mds_wanted;
+ }
+ return mds_wanted;
+}
+
+/*
+ * called under i_lock
+ */
+static int __ceph_is_any_caps(struct ceph_inode_info *ci)
+{
+ return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
+}
+
+/*
+ * caller should hold i_lock, and session s_mutex.
+ * returns true if this is the last cap. if so, caller should iput.
+ */
+void __ceph_remove_cap(struct ceph_cap *cap,
+ struct ceph_cap_reservation *ctx)
+{
+ struct ceph_mds_session *session = cap->session;
+ struct ceph_inode_info *ci = cap->ci;
+ struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
+
+ dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
+
+ /* remove from session list */
+ spin_lock(&session->s_cap_lock);
+ list_del_init(&cap->session_caps);
+ session->s_nr_caps--;
+ spin_unlock(&session->s_cap_lock);
+
+ /* remove from inode list */
+ rb_erase(&cap->ci_node, &ci->i_caps);
+ cap->session = NULL;
+ if (ci->i_auth_cap == cap)
+ ci->i_auth_cap = NULL;
+
+ put_cap(cap, ctx);
+
+ if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
+ struct ceph_snap_realm *realm = ci->i_snap_realm;
+ spin_lock(&realm->inodes_with_caps_lock);
+ list_del_init(&ci->i_snap_realm_item);
+ ci->i_snap_realm_counter++;
+ ci->i_snap_realm = NULL;
+ spin_unlock(&realm->inodes_with_caps_lock);
+ ceph_put_snap_realm(mdsc, realm);
+ }
+ if (!__ceph_is_any_real_caps(ci))
+ __cap_delay_cancel(mdsc, ci);
+}
+
+/*
+ * Build and send a cap message to the given MDS.
+ *
+ * Caller should be holding s_mutex.
+ */
+static int send_cap_msg(struct ceph_mds_session *session,
+ u64 ino, u64 cid, int op,
+ int caps, int wanted, int dirty,
+ u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
+ u64 size, u64 max_size,
+ struct timespec *mtime, struct timespec *atime,
+ u64 time_warp_seq,
+ uid_t uid, gid_t gid, mode_t mode,
+ u64 xattr_version,
+ struct ceph_buffer *xattrs_buf,
+ u64 follows)
+{
+ struct ceph_mds_caps *fc;
+ struct ceph_msg *msg;
+
+ dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
+ " seq %u/%u mseq %u follows %lld size %llu/%llu"
+ " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
+ cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
+ ceph_cap_string(dirty),
+ seq, issue_seq, mseq, follows, size, max_size,
+ xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
+
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), 0, 0, NULL);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ fc = msg->front.iov_base;
+
+ memset(fc, 0, sizeof(*fc));
+
+ fc->cap_id = cpu_to_le64(cid);
+ fc->op = cpu_to_le32(op);
+ fc->seq = cpu_to_le32(seq);
+ fc->client_tid = cpu_to_le64(flush_tid);
+ fc->issue_seq = cpu_to_le32(issue_seq);
+ fc->migrate_seq = cpu_to_le32(mseq);
+ fc->caps = cpu_to_le32(caps);
+ fc->wanted = cpu_to_le32(wanted);
+ fc->dirty = cpu_to_le32(dirty);
+ fc->ino = cpu_to_le64(ino);
+ fc->snap_follows = cpu_to_le64(follows);
+
+ fc->size = cpu_to_le64(size);
+ fc->max_size = cpu_to_le64(max_size);
+ if (mtime)
+ ceph_encode_timespec(&fc->mtime, mtime);
+ if (atime)
+ ceph_encode_timespec(&fc->atime, atime);
+ fc->time_warp_seq = cpu_to_le32(time_warp_seq);
+
+ fc->uid = cpu_to_le32(uid);
+ fc->gid = cpu_to_le32(gid);
+ fc->mode = cpu_to_le32(mode);
+
+ fc->xattr_version = cpu_to_le64(xattr_version);
+ if (xattrs_buf) {
+ msg->middle = ceph_buffer_get(xattrs_buf);
+ fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
+ msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
+ }
+
+ ceph_con_send(&session->s_con, msg);
+ return 0;
+}
+
+/*
+ * Queue cap releases when an inode is dropped from our
+ * cache.
+ */
+void ceph_queue_caps_release(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct rb_node *p;
+
+ spin_lock(&inode->i_lock);
+ p = rb_first(&ci->i_caps);
+ while (p) {
+ struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
+ struct ceph_mds_session *session = cap->session;
+ struct ceph_msg *msg;
+ struct ceph_mds_cap_release *head;
+ struct ceph_mds_cap_item *item;
+
+ spin_lock(&session->s_cap_lock);
+ BUG_ON(!session->s_num_cap_releases);
+ msg = list_first_entry(&session->s_cap_releases,
+ struct ceph_msg, list_head);
+
+ dout(" adding %p release to mds%d msg %p (%d left)\n",
+ inode, session->s_mds, msg, session->s_num_cap_releases);
+
+ BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
+ head = msg->front.iov_base;
+ head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
+ item = msg->front.iov_base + msg->front.iov_len;
+ item->ino = cpu_to_le64(ceph_ino(inode));
+ item->cap_id = cpu_to_le64(cap->cap_id);
+ item->migrate_seq = cpu_to_le32(cap->mseq);
+ item->seq = cpu_to_le32(cap->issue_seq);
+
+ session->s_num_cap_releases--;
+
+ msg->front.iov_len += sizeof(*item);
+ if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
+ dout(" release msg %p full\n", msg);
+ list_move_tail(&msg->list_head,
+ &session->s_cap_releases_done);
+ } else {
+ dout(" release msg %p at %d/%d (%d)\n", msg,
+ (int)le32_to_cpu(head->num),
+ (int)CEPH_CAPS_PER_RELEASE,
+ (int)msg->front.iov_len);
+ }
+ spin_unlock(&session->s_cap_lock);
+ p = rb_next(p);
+ __ceph_remove_cap(cap, NULL);
+
+ }
+ spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Send a cap msg on the given inode. Update our caps state, then
+ * drop i_lock and send the message.
+ *
+ * Make note of max_size reported/requested from mds, revoked caps
+ * that have now been implemented.
+ *
+ * Make half-hearted attempt ot to invalidate page cache if we are
+ * dropping RDCACHE. Note that this will leave behind locked pages
+ * that we'll then need to deal with elsewhere.
+ *
+ * Return non-zero if delayed release, or we experienced an error
+ * such that the caller should requeue + retry later.
+ *
+ * called with i_lock, then drops it.
+ * caller should hold snap_rwsem (read), s_mutex.
+ */
+static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
+ int op, int used, int want, int retain, int flushing,
+ unsigned *pflush_tid)
+ __releases(cap->ci->vfs_inode->i_lock)
+{
+ struct ceph_inode_info *ci = cap->ci;
+ struct inode *inode = &ci->vfs_inode;
+ u64 cap_id = cap->cap_id;
+ int held = cap->issued | cap->implemented;
+ int revoking = cap->implemented & ~cap->issued;
+ int dropping = cap->issued & ~retain;
+ int keep;
+ u64 seq, issue_seq, mseq, time_warp_seq, follows;
+ u64 size, max_size;
+ struct timespec mtime, atime;
+ int wake = 0;
+ mode_t mode;
+ uid_t uid;
+ gid_t gid;
+ struct ceph_mds_session *session;
+ u64 xattr_version = 0;
+ int delayed = 0;
+ u64 flush_tid = 0;
+ int i;
+ int ret;
+
+ dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
+ inode, cap, cap->session,
+ ceph_cap_string(held), ceph_cap_string(held & retain),
+ ceph_cap_string(revoking));
+ BUG_ON((retain & CEPH_CAP_PIN) == 0);
+
+ session = cap->session;
+
+ /* don't release wanted unless we've waited a bit. */
+ if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
+ time_before(jiffies, ci->i_hold_caps_min)) {
+ dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(cap->issued & retain),
+ ceph_cap_string(cap->mds_wanted),
+ ceph_cap_string(want));
+ want |= cap->mds_wanted;
+ retain |= cap->issued;
+ delayed = 1;
+ }
+ ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
+
+ cap->issued &= retain; /* drop bits we don't want */
+ if (cap->implemented & ~cap->issued) {
+ /*
+ * Wake up any waiters on wanted -> needed transition.
+ * This is due to the weird transition from buffered
+ * to sync IO... we need to flush dirty pages _before_
+ * allowing sync writes to avoid reordering.
+ */
+ wake = 1;
+ }
+ cap->implemented &= cap->issued | used;
+ cap->mds_wanted = want;
+
+ if (flushing) {
+ /*
+ * assign a tid for flush operations so we can avoid
+ * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
+ * clean type races. track latest tid for every bit
+ * so we can handle flush AxFw, flush Fw, and have the
+ * first ack clean Ax.
+ */
+ flush_tid = ++ci->i_cap_flush_last_tid;
+ if (pflush_tid)
+ *pflush_tid = flush_tid;
+ dout(" cap_flush_tid %d\n", (int)flush_tid);
+ for (i = 0; i < CEPH_CAP_BITS; i++)
+ if (flushing & (1 << i))
+ ci->i_cap_flush_tid[i] = flush_tid;
+ }
+
+ keep = cap->implemented;
+ seq = cap->seq;
+ issue_seq = cap->issue_seq;
+ mseq = cap->mseq;
+ size = inode->i_size;
+ ci->i_reported_size = size;
+ max_size = ci->i_wanted_max_size;
+ ci->i_requested_max_size = max_size;
+ mtime = inode->i_mtime;
+ atime = inode->i_atime;
+ time_warp_seq = ci->i_time_warp_seq;
+ follows = ci->i_snap_realm->cached_context->seq;
+ uid = inode->i_uid;
+ gid = inode->i_gid;
+ mode = inode->i_mode;
+
+ if (dropping & CEPH_CAP_XATTR_EXCL) {
+ __ceph_build_xattrs_blob(ci);
+ xattr_version = ci->i_xattrs.version + 1;
+ }
+
+ spin_unlock(&inode->i_lock);
+
+ if (dropping & CEPH_CAP_FILE_CACHE) {
+ /* invalidate what we can */
+ dout("invalidating pages on %p\n", inode);
+ invalidate_mapping_pages(&inode->i_data, 0, -1);
+ }
+
+ ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
+ op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
+ size, max_size, &mtime, &atime, time_warp_seq,
+ uid, gid, mode,
+ xattr_version,
+ (flushing & CEPH_CAP_XATTR_EXCL) ? ci->i_xattrs.blob : NULL,
+ follows);
+ if (ret < 0) {
+ dout("error sending cap msg, must requeue %p\n", inode);
+ delayed = 1;
+ }
+
+ if (wake)
+ wake_up(&ci->i_cap_wq);
+
+ return delayed;
+}
+
+/*
+ * When a snapshot is taken, clients accumulate dirty metadata on
+ * inodes with capabilities in ceph_cap_snaps to describe the file
+ * state at the time the snapshot was taken. This must be flushed
+ * asynchronously back to the MDS once sync writes complete and dirty
+ * data is written out.
+ *
+ * Called under i_lock. Takes s_mutex as needed.
+ */
+void __ceph_flush_snaps(struct ceph_inode_info *ci,
+ struct ceph_mds_session **psession)
+{
+ struct inode *inode = &ci->vfs_inode;
+ int mds;
+ struct ceph_cap_snap *capsnap;
+ u32 mseq;
+ struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
+ session->s_mutex */
+ u64 next_follows = 0; /* keep track of how far we've gotten through the
+ i_cap_snaps list, and skip these entries next time
+ around to avoid an infinite loop */
+
+ if (psession)
+ session = *psession;
+
+ dout("__flush_snaps %p\n", inode);
+retry:
+ list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
+ /* avoid an infiniute loop after retry */
+ if (capsnap->follows < next_follows)
+ continue;
+ /*
+ * we need to wait for sync writes to complete and for dirty
+ * pages to be written out.
+ */
+ if (capsnap->dirty_pages || capsnap->writing)
+ continue;
+
+ /* pick mds, take s_mutex */
+ mds = __ceph_get_cap_mds(ci, &mseq);
+ if (session && session->s_mds != mds) {
+ dout("oops, wrong session %p mutex\n", session);
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ session = NULL;
+ }
+ if (!session) {
+ spin_unlock(&inode->i_lock);
+ mutex_lock(&mdsc->mutex);
+ session = __ceph_lookup_mds_session(mdsc, mds);
+ mutex_unlock(&mdsc->mutex);
+ if (session) {
+ dout("inverting session/ino locks on %p\n",
+ session);
+ mutex_lock(&session->s_mutex);
+ }
+ /*
+ * if session == NULL, we raced against a cap
+ * deletion. retry, and we'll get a better
+ * @mds value next time.
+ */
+ spin_lock(&inode->i_lock);
+ goto retry;
+ }
+
+ capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
+ atomic_inc(&capsnap->nref);
+ if (!list_empty(&capsnap->flushing_item))
+ list_del_init(&capsnap->flushing_item);
+ list_add_tail(&capsnap->flushing_item,
+ &session->s_cap_snaps_flushing);
+ spin_unlock(&inode->i_lock);
+
+ dout("flush_snaps %p cap_snap %p follows %lld size %llu\n",
+ inode, capsnap, next_follows, capsnap->size);
+ send_cap_msg(session, ceph_vino(inode).ino, 0,
+ CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
+ capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
+ capsnap->size, 0,
+ &capsnap->mtime, &capsnap->atime,
+ capsnap->time_warp_seq,
+ capsnap->uid, capsnap->gid, capsnap->mode,
+ 0, NULL,
+ capsnap->follows);
+
+ next_follows = capsnap->follows + 1;
+ ceph_put_cap_snap(capsnap);
+
+ spin_lock(&inode->i_lock);
+ goto retry;
+ }
+
+ /* we flushed them all; remove this inode from the queue */
+ spin_lock(&mdsc->snap_flush_lock);
+ list_del_init(&ci->i_snap_flush_item);
+ spin_unlock(&mdsc->snap_flush_lock);
+
+ if (psession)
+ *psession = session;
+ else if (session) {
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ }
+}
+
+static void ceph_flush_snaps(struct ceph_inode_info *ci)
+{
+ struct inode *inode = &ci->vfs_inode;
+
+ spin_lock(&inode->i_lock);
+ __ceph_flush_snaps(ci, NULL);
+ spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Mark caps dirty. If inode is newly dirty, add to the global dirty
+ * list.
+ */
+void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
+{
+ struct ceph_mds_client *mdsc = &ceph_client(ci->vfs_inode.i_sb)->mdsc;
+ struct inode *inode = &ci->vfs_inode;
+ int was = ci->i_dirty_caps;
+ int dirty = 0;
+
+ dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
+ ceph_cap_string(mask), ceph_cap_string(was),
+ ceph_cap_string(was | mask));
+ ci->i_dirty_caps |= mask;
+ if (was == 0) {
+ dout(" inode %p now dirty\n", &ci->vfs_inode);
+ BUG_ON(!list_empty(&ci->i_dirty_item));
+ spin_lock(&mdsc->cap_dirty_lock);
+ list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
+ spin_unlock(&mdsc->cap_dirty_lock);
+ if (ci->i_flushing_caps == 0) {
+ igrab(inode);
+ dirty |= I_DIRTY_SYNC;
+ }
+ }
+ BUG_ON(list_empty(&ci->i_dirty_item));
+ if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
+ (mask & CEPH_CAP_FILE_BUFFER))
+ dirty |= I_DIRTY_DATASYNC;
+ if (dirty)
+ __mark_inode_dirty(inode, dirty);
+ __cap_delay_requeue(mdsc, ci);
+}
+
+/*
+ * Add dirty inode to the flushing list. Assigned a seq number so we
+ * can wait for caps to flush without starving.
+ *
+ * Called under i_lock.
+ */
+static int __mark_caps_flushing(struct inode *inode,
+ struct ceph_mds_session *session)
+{
+ struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int flushing;
+
+ BUG_ON(ci->i_dirty_caps == 0);
+ BUG_ON(list_empty(&ci->i_dirty_item));
+
+ flushing = ci->i_dirty_caps;
+ dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
+ ceph_cap_string(flushing),
+ ceph_cap_string(ci->i_flushing_caps),
+ ceph_cap_string(ci->i_flushing_caps | flushing));
+ ci->i_flushing_caps |= flushing;
+ ci->i_dirty_caps = 0;
+ dout(" inode %p now !dirty\n", inode);
+
+ spin_lock(&mdsc->cap_dirty_lock);
+ list_del_init(&ci->i_dirty_item);
+
+ ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
+ if (list_empty(&ci->i_flushing_item)) {
+ list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
+ mdsc->num_cap_flushing++;
+ dout(" inode %p now flushing seq %lld\n", inode,
+ ci->i_cap_flush_seq);
+ } else {
+ list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
+ dout(" inode %p now flushing (more) seq %lld\n", inode,
+ ci->i_cap_flush_seq);
+ }
+ spin_unlock(&mdsc->cap_dirty_lock);
+
+ return flushing;
+}
+
+/*
+ * Swiss army knife function to examine currently used and wanted
+ * versus held caps. Release, flush, ack revoked caps to mds as
+ * appropriate.
+ *
+ * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
+ * cap release further.
+ * CHECK_CAPS_AUTHONLY - we should only check the auth cap
+ * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
+ * further delay.
+ */
+void ceph_check_caps(struct ceph_inode_info *ci, int flags,
+ struct ceph_mds_session *session)
+{
+ struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct inode *inode = &ci->vfs_inode;
+ struct ceph_cap *cap;
+ int file_wanted, used;
+ int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
+ int drop_session_lock = session ? 0 : 1;
+ int want, retain, revoking, flushing = 0;
+ int mds = -1; /* keep track of how far we've gone through i_caps list
+ to avoid an infinite loop on retry */
+ struct rb_node *p;
+ int tried_invalidate = 0;
+ int delayed = 0, sent = 0, force_requeue = 0, num;
+ int is_delayed = flags & CHECK_CAPS_NODELAY;
+
+ /* if we are unmounting, flush any unused caps immediately. */
+ if (mdsc->stopping)
+ is_delayed = 1;
+
+ spin_lock(&inode->i_lock);
+
+ if (ci->i_ceph_flags & CEPH_I_FLUSH)
+ flags |= CHECK_CAPS_FLUSH;
+
+ /* flush snaps first time around only */
+ if (!list_empty(&ci->i_cap_snaps))
+ __ceph_flush_snaps(ci, &session);
+ goto retry_locked;
+retry:
+ spin_lock(&inode->i_lock);
+retry_locked:
+ file_wanted = __ceph_caps_file_wanted(ci);
+ used = __ceph_caps_used(ci);
+ want = file_wanted | used;
+
+ retain = want | CEPH_CAP_PIN;
+ if (!mdsc->stopping && inode->i_nlink > 0) {
+ if (want) {
+ retain |= CEPH_CAP_ANY; /* be greedy */
+ } else {
+ retain |= CEPH_CAP_ANY_SHARED;
+ /*
+ * keep RD only if we didn't have the file open RW,
+ * because then the mds would revoke it anyway to
+ * journal max_size=0.
+ */
+ if (ci->i_max_size == 0)
+ retain |= CEPH_CAP_ANY_RD;
+ }
+ }
+
+ dout("check_caps %p file_want %s used %s dirty %s flushing %s"
+ " issued %s retain %s %s%s%s\n", inode,
+ ceph_cap_string(file_wanted),
+ ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
+ ceph_cap_string(ci->i_flushing_caps),
+ ceph_cap_string(__ceph_caps_issued(ci, NULL)),
+ ceph_cap_string(retain),
+ (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
+ (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
+ (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
+
+ /*
+ * If we no longer need to hold onto old our caps, and we may
+ * have cached pages, but don't want them, then try to invalidate.
+ * If we fail, it's because pages are locked.... try again later.
+ */
+ if ((!is_delayed || mdsc->stopping) &&
+ ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
+ ci->i_rdcache_gen && /* may have cached pages */
+ file_wanted == 0 && /* no open files */
+ !ci->i_truncate_pending &&
+ !tried_invalidate) {
+ u32 invalidating_gen = ci->i_rdcache_gen;
+ int ret;
+
+ dout("check_caps trying to invalidate on %p\n", inode);
+ spin_unlock(&inode->i_lock);
+ ret = invalidate_mapping_pages(&inode->i_data, 0, -1);
+ spin_lock(&inode->i_lock);
+ if (ret == 0 && invalidating_gen == ci->i_rdcache_gen) {
+ /* success. */
+ ci->i_rdcache_gen = 0;
+ ci->i_rdcache_revoking = 0;
+ } else {
+ dout("check_caps failed to invalidate pages\n");
+ /* we failed to invalidate pages. check these
+ caps again later. */
+ force_requeue = 1;
+ __cap_set_timeouts(mdsc, ci);
+ }
+ tried_invalidate = 1;
+ goto retry_locked;
+ }
+
+ num = 0;
+ for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
+ cap = rb_entry(p, struct ceph_cap, ci_node);
+ num++;
+
+ /* avoid looping forever */
+ if (mds >= cap->mds ||
+ ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
+ continue;
+
+ /* NOTE: no side-effects allowed, until we take s_mutex */
+
+ revoking = cap->implemented & ~cap->issued;
+ if (revoking)
+ dout("mds%d revoking %s\n", cap->mds,
+ ceph_cap_string(revoking));
+
+ if (cap == ci->i_auth_cap &&
+ (cap->issued & CEPH_CAP_FILE_WR)) {
+ /* request larger max_size from MDS? */
+ if (ci->i_wanted_max_size > ci->i_max_size &&
+ ci->i_wanted_max_size > ci->i_requested_max_size) {
+ dout("requesting new max_size\n");
+ goto ack;
+ }
+
+ /* approaching file_max? */
+ if ((inode->i_size << 1) >= ci->i_max_size &&
+ (ci->i_reported_size << 1) < ci->i_max_size) {
+ dout("i_size approaching max_size\n");
+ goto ack;
+ }
+ }
+ /* flush anything dirty? */
+ if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
+ ci->i_dirty_caps) {
+ dout("flushing dirty caps\n");
+ goto ack;
+ }
+
+ /* completed revocation? going down and there are no caps? */
+ if (revoking && (revoking & used) == 0) {
+ dout("completed revocation of %s\n",
+ ceph_cap_string(cap->implemented & ~cap->issued));
+ goto ack;
+ }
+
+ /* want more caps from mds? */
+ if (want & ~(cap->mds_wanted | cap->issued))
+ goto ack;
+
+ /* things we might delay */
+ if ((cap->issued & ~retain) == 0 &&
+ cap->mds_wanted == want)
+ continue; /* nope, all good */
+
+ if (is_delayed)
+ goto ack;
+
+ /* delay? */
+ if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
+ time_before(jiffies, ci->i_hold_caps_max)) {
+ dout(" delaying issued %s -> %s, wanted %s -> %s\n",
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(cap->issued & retain),
+ ceph_cap_string(cap->mds_wanted),
+ ceph_cap_string(want));
+ delayed++;
+ continue;
+ }
+
+ack:
+ if (session && session != cap->session) {
+ dout("oops, wrong session %p mutex\n", session);
+ mutex_unlock(&session->s_mutex);
+ session = NULL;
+ }
+ if (!session) {
+ session = cap->session;
+ if (mutex_trylock(&session->s_mutex) == 0) {
+ dout("inverting session/ino locks on %p\n",
+ session);
+ spin_unlock(&inode->i_lock);
+ if (took_snap_rwsem) {
+ up_read(&mdsc->snap_rwsem);
+ took_snap_rwsem = 0;
+ }
+ mutex_lock(&session->s_mutex);
+ goto retry;
+ }
+ }
+ /* take snap_rwsem after session mutex */
+ if (!took_snap_rwsem) {
+ if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
+ dout("inverting snap/in locks on %p\n",
+ inode);
+ spin_unlock(&inode->i_lock);
+ down_read(&mdsc->snap_rwsem);
+ took_snap_rwsem = 1;
+ goto retry;
+ }
+ took_snap_rwsem = 1;
+ }
+
+ if (cap == ci->i_auth_cap && ci->i_dirty_caps)
+ flushing = __mark_caps_flushing(inode, session);
+
+ mds = cap->mds; /* remember mds, so we don't repeat */
+ sent++;
+
+ /* __send_cap drops i_lock */
+ delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
+ retain, flushing, NULL);
+ goto retry; /* retake i_lock and restart our cap scan. */
+ }
+
+ /*
+ * Reschedule delayed caps release if we delayed anything,
+ * otherwise cancel.
+ */
+ if (delayed && is_delayed)
+ force_requeue = 1; /* __send_cap delayed release; requeue */
+ if (!delayed && !is_delayed)
+ __cap_delay_cancel(mdsc, ci);
+ else if (!is_delayed || force_requeue)
+ __cap_delay_requeue(mdsc, ci);
+
+ spin_unlock(&inode->i_lock);
+
+ if (session && drop_session_lock)
+ mutex_unlock(&session->s_mutex);
+ if (took_snap_rwsem)
+ up_read(&mdsc->snap_rwsem);
+}
+
+/*
+ * Try to flush dirty caps back to the auth mds.
+ */
+static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
+ unsigned *flush_tid)
+{
+ struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int unlock_session = session ? 0 : 1;
+ int flushing = 0;
+
+retry:
+ spin_lock(&inode->i_lock);
+ if (ci->i_dirty_caps && ci->i_auth_cap) {
+ struct ceph_cap *cap = ci->i_auth_cap;
+ int used = __ceph_caps_used(ci);
+ int want = __ceph_caps_wanted(ci);
+ int delayed;
+
+ if (!session) {
+ spin_unlock(&inode->i_lock);
+ session = cap->session;
+ mutex_lock(&session->s_mutex);
+ goto retry;
+ }
+ BUG_ON(session != cap->session);
+ if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
+ goto out;
+
+ flushing = __mark_caps_flushing(inode, session);
+
+ /* __send_cap drops i_lock */
+ delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
+ cap->issued | cap->implemented, flushing,
+ flush_tid);
+ if (!delayed)
+ goto out_unlocked;
+
+ spin_lock(&inode->i_lock);
+ __cap_delay_requeue(mdsc, ci);
+ }
+out:
+ spin_unlock(&inode->i_lock);
+out_unlocked:
+ if (session && unlock_session)
+ mutex_unlock(&session->s_mutex);
+ return flushing;
+}
+
+/*
+ * Return true if we've flushed caps through the given flush_tid.
+ */
+static int caps_are_flushed(struct inode *inode, unsigned tid)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int dirty, i, ret = 1;
+
+ spin_lock(&inode->i_lock);
+ dirty = __ceph_caps_dirty(ci);
+ for (i = 0; i < CEPH_CAP_BITS; i++)
+ if ((ci->i_flushing_caps & (1 << i)) &&
+ ci->i_cap_flush_tid[i] <= tid) {
+ /* still flushing this bit */
+ ret = 0;
+ break;
+ }
+ spin_unlock(&inode->i_lock);
+ return ret;
+}
+
+/*
+ * Wait on any unsafe replies for the given inode. First wait on the
+ * newest request, and make that the upper bound. Then, if there are
+ * more requests, keep waiting on the oldest as long as it is still older
+ * than the original request.
+ */
+static void sync_write_wait(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct list_head *head = &ci->i_unsafe_writes;
+ struct ceph_osd_request *req;
+ u64 last_tid;
+
+ spin_lock(&ci->i_unsafe_lock);
+ if (list_empty(head))
+ goto out;
+
+ /* set upper bound as _last_ entry in chain */
+ req = list_entry(head->prev, struct ceph_osd_request,
+ r_unsafe_item);
+ last_tid = req->r_tid;
+
+ do {
+ ceph_osdc_get_request(req);
+ spin_unlock(&ci->i_unsafe_lock);
+ dout("sync_write_wait on tid %llu (until %llu)\n",
+ req->r_tid, last_tid);
+ wait_for_completion(&req->r_safe_completion);
+ spin_lock(&ci->i_unsafe_lock);
+ ceph_osdc_put_request(req);
+
+ /*
+ * from here on look at first entry in chain, since we
+ * only want to wait for anything older than last_tid
+ */
+ if (list_empty(head))
+ break;
+ req = list_entry(head->next, struct ceph_osd_request,
+ r_unsafe_item);
+ } while (req->r_tid < last_tid);
+out:
+ spin_unlock(&ci->i_unsafe_lock);
+}
+
+int ceph_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ unsigned flush_tid;
+ int ret;
+ int dirty;
+
+ dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
+ sync_write_wait(inode);
+
+ ret = filemap_write_and_wait(inode->i_mapping);
+ if (ret < 0)
+ return ret;
+
+ dirty = try_flush_caps(inode, NULL, &flush_tid);
+ dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
+
+ /*
+ * only wait on non-file metadata writeback (the mds
+ * can recover size and mtime, so we don't need to
+ * wait for that)
+ */
+ if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
+ dout("fsync waiting for flush_tid %u\n", flush_tid);
+ ret = wait_event_interruptible(ci->i_cap_wq,
+ caps_are_flushed(inode, flush_tid));
+ }
+
+ dout("fsync %p%s done\n", inode, datasync ? " datasync" : "");
+ return ret;
+}
+
+/*
+ * Flush any dirty caps back to the mds. If we aren't asked to wait,
+ * queue inode for flush but don't do so immediately, because we can
+ * get by with fewer MDS messages if we wait for data writeback to
+ * complete first.
+ */
+int ceph_write_inode(struct inode *inode, int wait)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ unsigned flush_tid;
+ int err = 0;
+ int dirty;
+
+ dout("write_inode %p wait=%d\n", inode, wait);
+ if (wait) {
+ dirty = try_flush_caps(inode, NULL, &flush_tid);
+ if (dirty)
+ err = wait_event_interruptible(ci->i_cap_wq,
+ caps_are_flushed(inode, flush_tid));
+ } else {
+ struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
+
+ spin_lock(&inode->i_lock);
+ if (__ceph_caps_dirty(ci))
+ __cap_delay_requeue_front(mdsc, ci);
+ spin_unlock(&inode->i_lock);
+ }
+ return err;
+}
+
+/*
+ * After a recovering MDS goes active, we need to resend any caps
+ * we were flushing.
+ *
+ * Caller holds session->s_mutex.
+ */
+static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ struct ceph_cap_snap *capsnap;
+
+ dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
+ list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
+ flushing_item) {
+ struct ceph_inode_info *ci = capsnap->ci;
+ struct inode *inode = &ci->vfs_inode;
+ struct ceph_cap *cap;
+
+ spin_lock(&inode->i_lock);
+ cap = ci->i_auth_cap;
+ if (cap && cap->session == session) {
+ dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
+ cap, capsnap);
+ __ceph_flush_snaps(ci, &session);
+ } else {
+ pr_err("%p auth cap %p not mds%d ???\n", inode,
+ cap, session->s_mds);
+ spin_unlock(&inode->i_lock);
+ }
+ }
+}
+
+void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ struct ceph_inode_info *ci;
+
+ kick_flushing_capsnaps(mdsc, session);
+
+ dout("kick_flushing_caps mds%d\n", session->s_mds);
+ list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
+ struct inode *inode = &ci->vfs_inode;
+ struct ceph_cap *cap;
+ int delayed = 0;
+
+ spin_lock(&inode->i_lock);
+ cap = ci->i_auth_cap;
+ if (cap && cap->session == session) {
+ dout("kick_flushing_caps %p cap %p %s\n", inode,
+ cap, ceph_cap_string(ci->i_flushing_caps));
+ delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
+ __ceph_caps_used(ci),
+ __ceph_caps_wanted(ci),
+ cap->issued | cap->implemented,
+ ci->i_flushing_caps, NULL);
+ if (delayed) {
+ spin_lock(&inode->i_lock);
+ __cap_delay_requeue(mdsc, ci);
+ spin_unlock(&inode->i_lock);
+ }
+ } else {
+ pr_err("%p auth cap %p not mds%d ???\n", inode,
+ cap, session->s_mds);
+ spin_unlock(&inode->i_lock);
+ }
+ }
+}
+
+
+/*
+ * Take references to capabilities we hold, so that we don't release
+ * them to the MDS prematurely.
+ *
+ * Protected by i_lock.
+ */
+static void __take_cap_refs(struct ceph_inode_info *ci, int got)
+{
+ if (got & CEPH_CAP_PIN)
+ ci->i_pin_ref++;
+ if (got & CEPH_CAP_FILE_RD)
+ ci->i_rd_ref++;
+ if (got & CEPH_CAP_FILE_CACHE)
+ ci->i_rdcache_ref++;
+ if (got & CEPH_CAP_FILE_WR)
+ ci->i_wr_ref++;
+ if (got & CEPH_CAP_FILE_BUFFER) {
+ if (ci->i_wrbuffer_ref == 0)
+ igrab(&ci->vfs_inode);
+ ci->i_wrbuffer_ref++;
+ dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
+ &ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
+ }
+}
+
+/*
+ * Try to grab cap references. Specify those refs we @want, and the
+ * minimal set we @need. Also include the larger offset we are writing
+ * to (when applicable), and check against max_size here as well.
+ * Note that caller is responsible for ensuring max_size increases are
+ * requested from the MDS.
+ */
+static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
+ int *got, loff_t endoff, int *check_max, int *err)
+{
+ struct inode *inode = &ci->vfs_inode;
+ int ret = 0;
+ int have, implemented;
+
+ dout("get_cap_refs %p need %s want %s\n", inode,
+ ceph_cap_string(need), ceph_cap_string(want));
+ spin_lock(&inode->i_lock);
+
+ /* make sure we _have_ some caps! */
+ if (!__ceph_is_any_caps(ci)) {
+ dout("get_cap_refs %p no real caps\n", inode);
+ *err = -EBADF;
+ ret = 1;
+ goto out;
+ }
+
+ if (need & CEPH_CAP_FILE_WR) {
+ if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
+ dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
+ inode, endoff, ci->i_max_size);
+ if (endoff > ci->i_wanted_max_size) {
+ *check_max = 1;
+ ret = 1;
+ }
+ goto out;
+ }
+ /*
+ * If a sync write is in progress, we must wait, so that we
+ * can get a final snapshot value for size+mtime.
+ */
+ if (__ceph_have_pending_cap_snap(ci)) {
+ dout("get_cap_refs %p cap_snap_pending\n", inode);
+ goto out;
+ }
+ }
+ have = __ceph_caps_issued(ci, &implemented);
+
+ /*
+ * disallow writes while a truncate is pending
+ */
+ if (ci->i_truncate_pending)
+ have &= ~CEPH_CAP_FILE_WR;
+
+ if ((have & need) == need) {
+ /*
+ * Look at (implemented & ~have & not) so that we keep waiting
+ * on transition from wanted -> needed caps. This is needed
+ * for WRBUFFER|WR -> WR to avoid a new WR sync write from
+ * going before a prior buffered writeback happens.
+ */
+ int not = want & ~(have & need);
+ int revoking = implemented & ~have;
+ dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
+ inode, ceph_cap_string(have), ceph_cap_string(not),
+ ceph_cap_string(revoking));
+ if ((revoking & not) == 0) {
+ *got = need | (have & want);
+ __take_cap_refs(ci, *got);
+ ret = 1;
+ }
+ } else {
+ dout("get_cap_refs %p have %s needed %s\n", inode,
+ ceph_cap_string(have), ceph_cap_string(need));
+ }
+out:
+ spin_unlock(&inode->i_lock);
+ dout("get_cap_refs %p ret %d got %s\n", inode,
+ ret, ceph_cap_string(*got));
+ return ret;
+}
+
+/*
+ * Check the offset we are writing up to against our current
+ * max_size. If necessary, tell the MDS we want to write to
+ * a larger offset.
+ */
+static void check_max_size(struct inode *inode, loff_t endoff)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int check = 0;
+
+ /* do we need to explicitly request a larger max_size? */
+ spin_lock(&inode->i_lock);
+ if ((endoff >= ci->i_max_size ||
+ endoff > (inode->i_size << 1)) &&
+ endoff > ci->i_wanted_max_size) {
+ dout("write %p at large endoff %llu, req max_size\n",
+ inode, endoff);
+ ci->i_wanted_max_size = endoff;
+ check = 1;
+ }
+ spin_unlock(&inode->i_lock);
+ if (check)
+ ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
+}
+
+/*
+ * Wait for caps, and take cap references. If we can't get a WR cap
+ * due to a small max_size, make sure we check_max_size (and possibly
+ * ask the mds) so we don't get hung up indefinitely.
+ */
+int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, int *got,
+ loff_t endoff)
+{
+ int check_max, ret, err;
+
+retry:
+ if (endoff > 0)
+ check_max_size(&ci->vfs_inode, endoff);
+ check_max = 0;
+ err = 0;
+ ret = wait_event_interruptible(ci->i_cap_wq,
+ try_get_cap_refs(ci, need, want,
+ got, endoff,
+ &check_max, &err));
+ if (err)
+ ret = err;
+ if (check_max)
+ goto retry;
+ return ret;
+}
+
+/*
+ * Take cap refs. Caller must already know we hold at least one ref
+ * on the caps in question or we don't know this is safe.
+ */
+void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
+{
+ spin_lock(&ci->vfs_inode.i_lock);
+ __take_cap_refs(ci, caps);
+ spin_unlock(&ci->vfs_inode.i_lock);
+}
+
+/*
+ * Release cap refs.
+ *
+ * If we released the last ref on any given cap, call ceph_check_caps
+ * to release (or schedule a release).
+ *
+ * If we are releasing a WR cap (from a sync write), finalize any affected
+ * cap_snap, and wake up any waiters.
+ */
+void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
+{
+ struct inode *inode = &ci->vfs_inode;
+ int last = 0, put = 0, flushsnaps = 0, wake = 0;
+ struct ceph_cap_snap *capsnap;
+
+ spin_lock(&inode->i_lock);
+ if (had & CEPH_CAP_PIN)
+ --ci->i_pin_ref;
+ if (had & CEPH_CAP_FILE_RD)
+ if (--ci->i_rd_ref == 0)
+ last++;
+ if (had & CEPH_CAP_FILE_CACHE)
+ if (--ci->i_rdcache_ref == 0)
+ last++;
+ if (had & CEPH_CAP_FILE_BUFFER) {
+ if (--ci->i_wrbuffer_ref == 0) {
+ last++;
+ put++;
+ }
+ dout("put_cap_refs %p wrbuffer %d -> %d (?)\n",
+ inode, ci->i_wrbuffer_ref+1, ci->i_wrbuffer_ref);
+ }
+ if (had & CEPH_CAP_FILE_WR)
+ if (--ci->i_wr_ref == 0) {
+ last++;
+ if (!list_empty(&ci->i_cap_snaps)) {
+ capsnap = list_first_entry(&ci->i_cap_snaps,
+ struct ceph_cap_snap,
+ ci_item);
+ if (capsnap->writing) {
+ capsnap->writing = 0;
+ flushsnaps =
+ __ceph_finish_cap_snap(ci,
+ capsnap);
+ wake = 1;
+ }
+ }
+ }
+ spin_unlock(&inode->i_lock);
+
+ dout("put_cap_refs %p had %s %s\n", inode, ceph_cap_string(had),
+ last ? "last" : "");
+
+ if (last && !flushsnaps)
+ ceph_check_caps(ci, 0, NULL);
+ else if (flushsnaps)
+ ceph_flush_snaps(ci);
+ if (wake)
+ wake_up(&ci->i_cap_wq);
+ if (put)
+ iput(inode);
+}
+
+/*
+ * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
+ * context. Adjust per-snap dirty page accounting as appropriate.
+ * Once all dirty data for a cap_snap is flushed, flush snapped file
+ * metadata back to the MDS. If we dropped the last ref, call
+ * ceph_check_caps.
+ */
+void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
+ struct ceph_snap_context *snapc)
+{
+ struct inode *inode = &ci->vfs_inode;
+ int last = 0;
+ int last_snap = 0;
+ int found = 0;
+ struct ceph_cap_snap *capsnap = NULL;
+
+ spin_lock(&inode->i_lock);
+ ci->i_wrbuffer_ref -= nr;
+ last = !ci->i_wrbuffer_ref;
+
+ if (ci->i_head_snapc == snapc) {
+ ci->i_wrbuffer_ref_head -= nr;
+ if (!ci->i_wrbuffer_ref_head) {
+ ceph_put_snap_context(ci->i_head_snapc);
+ ci->i_head_snapc = NULL;
+ }
+ dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
+ inode,
+ ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
+ ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
+ last ? " LAST" : "");
+ } else {
+ list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
+ if (capsnap->context == snapc) {
+ found = 1;
+ capsnap->dirty_pages -= nr;
+ last_snap = !capsnap->dirty_pages;
+ break;
+ }
+ }
+ BUG_ON(!found);
+ dout("put_wrbuffer_cap_refs on %p cap_snap %p "
+ " snap %lld %d/%d -> %d/%d %s%s\n",
+ inode, capsnap, capsnap->context->seq,
+ ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
+ ci->i_wrbuffer_ref, capsnap->dirty_pages,
+ last ? " (wrbuffer last)" : "",
+ last_snap ? " (capsnap last)" : "");
+ }
+
+ spin_unlock(&inode->i_lock);
+
+ if (last) {
+ ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
+ iput(inode);
+ } else if (last_snap) {
+ ceph_flush_snaps(ci);
+ wake_up(&ci->i_cap_wq);
+ }
+}
+
+/*
+ * Handle a cap GRANT message from the MDS. (Note that a GRANT may
+ * actually be a revocation if it specifies a smaller cap set.)
+ *
+ * caller holds s_mutex.
+ * return value:
+ * 0 - ok
+ * 1 - check_caps on auth cap only (writeback)
+ * 2 - check_caps (ack revoke)
+ */
+static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
+ struct ceph_mds_session *session,
+ struct ceph_cap *cap,
+ struct ceph_buffer *xattr_buf)
+ __releases(inode->i_lock)
+
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int mds = session->s_mds;
+ int seq = le32_to_cpu(grant->seq);
+ int newcaps = le32_to_cpu(grant->caps);
+ int issued, implemented, used, wanted, dirty;
+ u64 size = le64_to_cpu(grant->size);
+ u64 max_size = le64_to_cpu(grant->max_size);
+ struct timespec mtime, atime, ctime;
+ int reply = 0;
+ int wake = 0;
+ int writeback = 0;
+ int revoked_rdcache = 0;
+ int invalidate_async = 0;
+ int tried_invalidate = 0;
+ int ret;
+
+ dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
+ inode, cap, mds, seq, ceph_cap_string(newcaps));
+ dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
+ inode->i_size);
+
+ /*
+ * If CACHE is being revoked, and we have no dirty buffers,
+ * try to invalidate (once). (If there are dirty buffers, we
+ * will invalidate _after_ writeback.)
+ */
+restart:
+ if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
+ !ci->i_wrbuffer_ref && !tried_invalidate) {
+ dout("CACHE invalidation\n");
+ spin_unlock(&inode->i_lock);
+ tried_invalidate = 1;
+
+ ret = invalidate_mapping_pages(&inode->i_data, 0, -1);
+ spin_lock(&inode->i_lock);
+ if (ret < 0) {
+ /* there were locked pages.. invalidate later
+ in a separate thread. */
+ if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
+ invalidate_async = 1;
+ ci->i_rdcache_revoking = ci->i_rdcache_gen;
+ }
+ } else {
+ /* we successfully invalidated those pages */
+ revoked_rdcache = 1;
+ ci->i_rdcache_gen = 0;
+ ci->i_rdcache_revoking = 0;
+ }
+ goto restart;
+ }
+
+ /* side effects now are allowed */
+
+ issued = __ceph_caps_issued(ci, &implemented);
+ issued |= implemented | __ceph_caps_dirty(ci);
+
+ cap->cap_gen = session->s_cap_gen;
+
+ __check_cap_issue(ci, cap, newcaps);
+
+ if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
+ inode->i_mode = le32_to_cpu(grant->mode);
+ inode->i_uid = le32_to_cpu(grant->uid);
+ inode->i_gid = le32_to_cpu(grant->gid);
+ dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
+ inode->i_uid, inode->i_gid);
+ }
+
+ if ((issued & CEPH_CAP_LINK_EXCL) == 0)
+ inode->i_nlink = le32_to_cpu(grant->nlink);
+
+ if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
+ int len = le32_to_cpu(grant->xattr_len);
+ u64 version = le64_to_cpu(grant->xattr_version);
+
+ if (version > ci->i_xattrs.version) {
+ dout(" got new xattrs v%llu on %p len %d\n",
+ version, inode, len);
+ if (ci->i_xattrs.blob)
+ ceph_buffer_put(ci->i_xattrs.blob);
+ ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
+ ci->i_xattrs.version = version;
+ }
+ }
+
+ /* size/ctime/mtime/atime? */
+ ceph_fill_file_size(inode, issued,
+ le32_to_cpu(grant->truncate_seq),
+ le64_to_cpu(grant->truncate_size), size);
+ ceph_decode_timespec(&mtime, &grant->mtime);
+ ceph_decode_timespec(&atime, &grant->atime);
+ ceph_decode_timespec(&ctime, &grant->ctime);
+ ceph_fill_file_time(inode, issued,
+ le32_to_cpu(grant->time_warp_seq), &ctime, &mtime,
+ &atime);
+
+ /* max size increase? */
+ if (max_size != ci->i_max_size) {
+ dout("max_size %lld -> %llu\n", ci->i_max_size, max_size);
+ ci->i_max_size = max_size;
+ if (max_size >= ci->i_wanted_max_size) {
+ ci->i_wanted_max_size = 0; /* reset */
+ ci->i_requested_max_size = 0;
+ }
+ wake = 1;
+ }
+
+ /* check cap bits */
+ wanted = __ceph_caps_wanted(ci);
+ used = __ceph_caps_used(ci);
+ dirty = __ceph_caps_dirty(ci);
+ dout(" my wanted = %s, used = %s, dirty %s\n",
+ ceph_cap_string(wanted),
+ ceph_cap_string(used),
+ ceph_cap_string(dirty));
+ if (wanted != le32_to_cpu(grant->wanted)) {
+ dout("mds wanted %s -> %s\n",
+ ceph_cap_string(le32_to_cpu(grant->wanted)),
+ ceph_cap_string(wanted));
+ grant->wanted = cpu_to_le32(wanted);
+ }
+
+ cap->seq = seq;
+
+ /* file layout may have changed */
+ ci->i_layout = grant->layout;
+
+ /* revocation, grant, or no-op? */
+ if (cap->issued & ~newcaps) {
+ dout("revocation: %s -> %s\n", ceph_cap_string(cap->issued),
+ ceph_cap_string(newcaps));
+ if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER)
+ writeback = 1; /* will delay ack */
+ else if (dirty & ~newcaps)
+ reply = 1; /* initiate writeback in check_caps */
+ else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 ||
+ revoked_rdcache)
+ reply = 2; /* send revoke ack in check_caps */
+ cap->issued = newcaps;
+ } else if (cap->issued == newcaps) {
+ dout("caps unchanged: %s -> %s\n",
+ ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
+ } else {
+ dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
+ ceph_cap_string(newcaps));
+ cap->issued = newcaps;
+ cap->implemented |= newcaps; /* add bits only, to
+ * avoid stepping on a
+ * pending revocation */
+ wake = 1;
+ }
+
+ spin_unlock(&inode->i_lock);
+ if (writeback) {
+ /*
+ * queue inode for writeback: we can't actually call
+ * filemap_write_and_wait, etc. from message handler
+ * context.
+ */
+ dout("queueing %p for writeback\n", inode);
+ if (ceph_queue_writeback(inode))
+ igrab(inode);
+ }
+ if (invalidate_async) {
+ dout("queueing %p for page invalidation\n", inode);
+ if (ceph_queue_page_invalidation(inode))
+ igrab(inode);
+ }
+ if (wake)
+ wake_up(&ci->i_cap_wq);
+ return reply;
+}
+
+/*
+ * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
+ * MDS has been safely committed.
+ */
+static void handle_cap_flush_ack(struct inode *inode,
+ struct ceph_mds_caps *m,
+ struct ceph_mds_session *session,
+ struct ceph_cap *cap)
+ __releases(inode->i_lock)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
+ unsigned seq = le32_to_cpu(m->seq);
+ int dirty = le32_to_cpu(m->dirty);
+ int cleaned = 0;
+ u64 flush_tid = le64_to_cpu(m->client_tid);
+ int drop = 0;
+ int i;
+
+ for (i = 0; i < CEPH_CAP_BITS; i++)
+ if ((dirty & (1 << i)) &&
+ flush_tid == ci->i_cap_flush_tid[i])
+ cleaned |= 1 << i;
+
+ dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
+ " flushing %s -> %s\n",
+ inode, session->s_mds, seq, ceph_cap_string(dirty),
+ ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
+ ceph_cap_string(ci->i_flushing_caps & ~cleaned));
+
+ if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned))
+ goto out;
+
+ ci->i_flushing_caps &= ~cleaned;
+
+ spin_lock(&mdsc->cap_dirty_lock);
+ if (ci->i_flushing_caps == 0) {
+ list_del_init(&ci->i_flushing_item);
+ if (!list_empty(&session->s_cap_flushing))
+ dout(" mds%d still flushing cap on %p\n",
+ session->s_mds,
+ &list_entry(session->s_cap_flushing.next,
+ struct ceph_inode_info,
+ i_flushing_item)->vfs_inode);
+ mdsc->num_cap_flushing--;
+ wake_up(&mdsc->cap_flushing_wq);
+ dout(" inode %p now !flushing\n", inode);
+
+ if (ci->i_dirty_caps == 0) {
+ dout(" inode %p now clean\n", inode);
+ BUG_ON(!list_empty(&ci->i_dirty_item));
+ drop = 1;
+ } else {
+ BUG_ON(list_empty(&ci->i_dirty_item));
+ }
+ }
+ spin_unlock(&mdsc->cap_dirty_lock);
+ wake_up(&ci->i_cap_wq);
+
+out:
+ spin_unlock(&inode->i_lock);
+ if (drop)
+ iput(inode);
+}
+
+/*
+ * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
+ * throw away our cap_snap.
+ *
+ * Caller hold s_mutex.
+ */
+static void handle_cap_flushsnap_ack(struct inode *inode,
+ struct ceph_mds_caps *m,
+ struct ceph_mds_session *session)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ u64 follows = le64_to_cpu(m->snap_follows);
+ u64 flush_tid = le64_to_cpu(m->client_tid);
+ struct ceph_cap_snap *capsnap;
+ int drop = 0;
+
+ dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
+ inode, ci, session->s_mds, follows);
+
+ spin_lock(&inode->i_lock);
+ list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
+ if (capsnap->follows == follows) {
+ if (capsnap->flush_tid != flush_tid) {
+ dout(" cap_snap %p follows %lld tid %lld !="
+ " %lld\n", capsnap, follows,
+ flush_tid, capsnap->flush_tid);
+ break;
+ }
+ WARN_ON(capsnap->dirty_pages || capsnap->writing);
+ dout(" removing cap_snap %p follows %lld\n",
+ capsnap, follows);
+ ceph_put_snap_context(capsnap->context);
+ list_del(&capsnap->ci_item);
+ list_del(&capsnap->flushing_item);
+ ceph_put_cap_snap(capsnap);
+ drop = 1;
+ break;
+ } else {
+ dout(" skipping cap_snap %p follows %lld\n",
+ capsnap, capsnap->follows);
+ }
+ }
+ spin_unlock(&inode->i_lock);
+ if (drop)
+ iput(inode);
+}
+
+/*
+ * Handle TRUNC from MDS, indicating file truncation.
+ *
+ * caller hold s_mutex.
+ */
+static void handle_cap_trunc(struct inode *inode,
+ struct ceph_mds_caps *trunc,
+ struct ceph_mds_session *session)
+ __releases(inode->i_lock)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int mds = session->s_mds;
+ int seq = le32_to_cpu(trunc->seq);
+ u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
+ u64 truncate_size = le64_to_cpu(trunc->truncate_size);
+ u64 size = le64_to_cpu(trunc->size);
+ int implemented = 0;
+ int dirty = __ceph_caps_dirty(ci);
+ int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
+ int queue_trunc = 0;
+
+ issued |= implemented | dirty;
+
+ dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
+ inode, mds, seq, truncate_size, truncate_seq);
+ queue_trunc = ceph_fill_file_size(inode, issued,
+ truncate_seq, truncate_size, size);
+ spin_unlock(&inode->i_lock);
+
+ if (queue_trunc)
+ if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
+ &ci->i_vmtruncate_work))
+ igrab(inode);
+}
+
+/*
+ * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
+ * different one. If we are the most recent migration we've seen (as
+ * indicated by mseq), make note of the migrating cap bits for the
+ * duration (until we see the corresponding IMPORT).
+ *
+ * caller holds s_mutex
+ */
+static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
+ struct ceph_mds_session *session)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int mds = session->s_mds;
+ unsigned mseq = le32_to_cpu(ex->migrate_seq);
+ struct ceph_cap *cap = NULL, *t;
+ struct rb_node *p;
+ int remember = 1;
+
+ dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
+ inode, ci, mds, mseq);
+
+ spin_lock(&inode->i_lock);
+
+ /* make sure we haven't seen a higher mseq */
+ for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
+ t = rb_entry(p, struct ceph_cap, ci_node);
+ if (ceph_seq_cmp(t->mseq, mseq) > 0) {
+ dout(" higher mseq on cap from mds%d\n",
+ t->session->s_mds);
+ remember = 0;
+ }
+ if (t->session->s_mds == mds)
+ cap = t;
+ }
+
+ if (cap) {
+ if (remember) {
+ /* make note */
+ ci->i_cap_exporting_mds = mds;
+ ci->i_cap_exporting_mseq = mseq;
+ ci->i_cap_exporting_issued = cap->issued;
+ }
+ __ceph_remove_cap(cap, NULL);
+ } else {
+ WARN_ON(!cap);
+ }
+
+ spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Handle cap IMPORT. If there are temp bits from an older EXPORT,
+ * clean them up.
+ *
+ * caller holds s_mutex.
+ */
+static void handle_cap_import(struct ceph_mds_client *mdsc,
+ struct inode *inode, struct ceph_mds_caps *im,
+ struct ceph_mds_session *session,
+ void *snaptrace, int snaptrace_len)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int mds = session->s_mds;
+ unsigned issued = le32_to_cpu(im->caps);
+ unsigned wanted = le32_to_cpu(im->wanted);
+ unsigned seq = le32_to_cpu(im->seq);
+ unsigned mseq = le32_to_cpu(im->migrate_seq);
+ u64 realmino = le64_to_cpu(im->realm);
+ u64 cap_id = le64_to_cpu(im->cap_id);
+
+ if (ci->i_cap_exporting_mds >= 0 &&
+ ceph_seq_cmp(ci->i_cap_exporting_mseq, mseq) < 0) {
+ dout("handle_cap_import inode %p ci %p mds%d mseq %d"
+ " - cleared exporting from mds%d\n",
+ inode, ci, mds, mseq,
+ ci->i_cap_exporting_mds);
+ ci->i_cap_exporting_issued = 0;
+ ci->i_cap_exporting_mseq = 0;
+ ci->i_cap_exporting_mds = -1;
+ } else {
+ dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
+ inode, ci, mds, mseq);
+ }
+
+ down_write(&mdsc->snap_rwsem);
+ ceph_update_snap_trace(mdsc, snaptrace, snaptrace+snaptrace_len,
+ false);
+ downgrade_write(&mdsc->snap_rwsem);
+ ceph_add_cap(inode, session, cap_id, -1,
+ issued, wanted, seq, mseq, realmino, CEPH_CAP_FLAG_AUTH,
+ NULL /* no caps context */);
+ try_flush_caps(inode, session, NULL);
+ up_read(&mdsc->snap_rwsem);
+}
+
+/*
+ * Handle a caps message from the MDS.
+ *
+ * Identify the appropriate session, inode, and call the right handler
+ * based on the cap op.
+ */
+void ceph_handle_caps(struct ceph_mds_session *session,
+ struct ceph_msg *msg)
+{
+ struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct super_block *sb = mdsc->client->sb;
+ struct inode *inode;
+ struct ceph_cap *cap;
+ struct ceph_mds_caps *h;
+ int mds = le64_to_cpu(msg->hdr.src.name.num);
+ int op;
+ u32 seq;
+ struct ceph_vino vino;
+ u64 cap_id;
+ u64 size, max_size;
+ int check_caps = 0;
+ int r;
+
+ dout("handle_caps from mds%d\n", mds);
+
+ /* decode */
+ if (msg->front.iov_len < sizeof(*h))
+ goto bad;
+ h = msg->front.iov_base;
+ op = le32_to_cpu(h->op);
+ vino.ino = le64_to_cpu(h->ino);
+ vino.snap = CEPH_NOSNAP;
+ cap_id = le64_to_cpu(h->cap_id);
+ seq = le32_to_cpu(h->seq);
+ size = le64_to_cpu(h->size);
+ max_size = le64_to_cpu(h->max_size);
+
+ mutex_lock(&session->s_mutex);
+ session->s_seq++;
+ dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
+ (unsigned)seq);
+
+ /* lookup ino */
+ inode = ceph_find_inode(sb, vino);
+ dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
+ vino.snap, inode);
+ if (!inode) {
+ dout(" i don't have ino %llx\n", vino.ino);
+ goto done;
+ }
+
+ /* these will work even if we don't have a cap yet */
+ switch (op) {
+ case CEPH_CAP_OP_FLUSHSNAP_ACK:
+ handle_cap_flushsnap_ack(inode, h, session);
+ goto done;
+
+ case CEPH_CAP_OP_EXPORT:
+ handle_cap_export(inode, h, session);
+ goto done;
+
+ case CEPH_CAP_OP_IMPORT:
+ handle_cap_import(mdsc, inode, h, session,
+ msg->middle,
+ le32_to_cpu(h->snap_trace_len));
+ check_caps = 1; /* we may have sent a RELEASE to the old auth */
+ goto done;
+ }
+
+ /* the rest require a cap */
+ spin_lock(&inode->i_lock);
+ cap = __get_cap_for_mds(ceph_inode(inode), mds);
+ if (!cap) {
+ dout("no cap on %p ino %llx.%llx from mds%d, releasing\n",
+ inode, ceph_ino(inode), ceph_snap(inode), mds);
+ spin_unlock(&inode->i_lock);
+ goto done;
+ }
+
+ /* note that each of these drops i_lock for us */
+ switch (op) {
+ case CEPH_CAP_OP_REVOKE:
+ case CEPH_CAP_OP_GRANT:
+ r = handle_cap_grant(inode, h, session, cap, msg->middle);
+ if (r == 1)
+ ceph_check_caps(ceph_inode(inode),
+ CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
+ session);
+ else if (r == 2)
+ ceph_check_caps(ceph_inode(inode),
+ CHECK_CAPS_NODELAY,
+ session);
+ break;
+
+ case CEPH_CAP_OP_FLUSH_ACK:
+ handle_cap_flush_ack(inode, h, session, cap);
+ break;
+
+ case CEPH_CAP_OP_TRUNC:
+ handle_cap_trunc(inode, h, session);
+ break;
+
+ default:
+ spin_unlock(&inode->i_lock);
+ pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
+ ceph_cap_op_name(op));
+ }
+
+done:
+ mutex_unlock(&session->s_mutex);
+
+ if (check_caps)
+ ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY, NULL);
+ if (inode)
+ iput(inode);
+ return;
+
+bad:
+ pr_err("ceph_handle_caps: corrupt message\n");
+ return;
+}
+
+/*
+ * Delayed work handler to process end of delayed cap release LRU list.
+ */
+void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
+{
+ struct ceph_inode_info *ci;
+ int flags = CHECK_CAPS_NODELAY;
+
+ dout("check_delayed_caps\n");
+ while (1) {
+ spin_lock(&mdsc->cap_delay_lock);
+ if (list_empty(&mdsc->cap_delay_list))
+ break;
+ ci = list_first_entry(&mdsc->cap_delay_list,
+ struct ceph_inode_info,
+ i_cap_delay_list);
+ if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
+ time_before(jiffies, ci->i_hold_caps_max))
+ break;
+ list_del_init(&ci->i_cap_delay_list);
+ spin_unlock(&mdsc->cap_delay_lock);
+ dout("check_delayed_caps on %p\n", &ci->vfs_inode);
+ ceph_check_caps(ci, flags, NULL);
+ }
+ spin_unlock(&mdsc->cap_delay_lock);
+}
+
+/*
+ * Flush all dirty caps to the mds
+ */
+void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
+{
+ struct ceph_inode_info *ci;
+ struct inode *inode;
+
+ dout("flush_dirty_caps\n");
+ spin_lock(&mdsc->cap_dirty_lock);
+ while (!list_empty(&mdsc->cap_dirty)) {
+ ci = list_first_entry(&mdsc->cap_dirty,
+ struct ceph_inode_info,
+ i_dirty_item);
+ inode = igrab(&ci->vfs_inode);
+ spin_unlock(&mdsc->cap_dirty_lock);
+ if (inode) {
+ ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
+ NULL);
+ iput(inode);
+ }
+ spin_lock(&mdsc->cap_dirty_lock);
+ }
+ spin_unlock(&mdsc->cap_dirty_lock);
+}
+
+/*
+ * Drop open file reference. If we were the last open file,
+ * we may need to release capabilities to the MDS (or schedule
+ * their delayed release).
+ */
+void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
+{
+ struct inode *inode = &ci->vfs_inode;
+ int last = 0;
+
+ spin_lock(&inode->i_lock);
+ dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
+ ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
+ BUG_ON(ci->i_nr_by_mode[fmode] == 0);
+ if (--ci->i_nr_by_mode[fmode] == 0)
+ last++;
+ spin_unlock(&inode->i_lock);
+
+ if (last && ci->i_vino.snap == CEPH_NOSNAP)
+ ceph_check_caps(ci, 0, NULL);
+}
+
+/*
+ * Helpers for embedding cap and dentry lease releases into mds
+ * requests.
+ *
+ * @force is used by dentry_release (below) to force inclusion of a
+ * record for the directory inode, even when there aren't any caps to
+ * drop.
+ */
+int ceph_encode_inode_release(void **p, struct inode *inode,
+ int mds, int drop, int unless, int force)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_cap *cap;
+ struct ceph_mds_request_release *rel = *p;
+ int ret = 0;
+
+ dout("encode_inode_release %p mds%d drop %s unless %s\n", inode,
+ mds, ceph_cap_string(drop), ceph_cap_string(unless));
+
+ spin_lock(&inode->i_lock);
+ cap = __get_cap_for_mds(ci, mds);
+ if (cap && __cap_is_valid(cap)) {
+ if (force ||
+ ((cap->issued & drop) &&
+ (cap->issued & unless) == 0)) {
+ if ((cap->issued & drop) &&
+ (cap->issued & unless) == 0) {
+ dout("encode_inode_release %p cap %p %s -> "
+ "%s\n", inode, cap,
+ ceph_cap_string(cap->issued),
+ ceph_cap_string(cap->issued & ~drop));
+ cap->issued &= ~drop;
+ cap->implemented &= ~drop;
+ if (ci->i_ceph_flags & CEPH_I_NODELAY) {
+ int wanted = __ceph_caps_wanted(ci);
+ dout(" wanted %s -> %s (act %s)\n",
+ ceph_cap_string(cap->mds_wanted),
+ ceph_cap_string(cap->mds_wanted &
+ ~wanted),
+ ceph_cap_string(wanted));
+ cap->mds_wanted &= wanted;
+ }
+ } else {
+ dout("encode_inode_release %p cap %p %s"
+ " (force)\n", inode, cap,
+ ceph_cap_string(cap->issued));
+ }
+
+ rel->ino = cpu_to_le64(ceph_ino(inode));
+ rel->cap_id = cpu_to_le64(cap->cap_id);
+ rel->seq = cpu_to_le32(cap->seq);
+ rel->issue_seq = cpu_to_le32(cap->issue_seq),
+ rel->mseq = cpu_to_le32(cap->mseq);
+ rel->caps = cpu_to_le32(cap->issued);
+ rel->wanted = cpu_to_le32(cap->mds_wanted);
+ rel->dname_len = 0;
+ rel->dname_seq = 0;
+ *p += sizeof(*rel);
+ ret = 1;
+ } else {
+ dout("encode_inode_release %p cap %p %s\n",
+ inode, cap, ceph_cap_string(cap->issued));
+ }
+ }
+ spin_unlock(&inode->i_lock);
+ return ret;
+}
+
+int ceph_encode_dentry_release(void **p, struct dentry *dentry,
+ int mds, int drop, int unless)
+{
+ struct inode *dir = dentry->d_parent->d_inode;
+ struct ceph_mds_request_release *rel = *p;
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+ int force = 0;
+ int ret;
+
+ /*
+ * force an record for the directory caps if we have a dentry lease.
+ * this is racy (can't take i_lock and d_lock together), but it
+ * doesn't have to be perfect; the mds will revoke anything we don't
+ * release.
+ */
+ spin_lock(&dentry->d_lock);
+ if (di->lease_session && di->lease_session->s_mds == mds)
+ force = 1;
+ spin_unlock(&dentry->d_lock);
+
+ ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
+
+ spin_lock(&dentry->d_lock);
+ if (ret && di->lease_session && di->lease_session->s_mds == mds) {
+ dout("encode_dentry_release %p mds%d seq %d\n",
+ dentry, mds, (int)di->lease_seq);
+ rel->dname_len = cpu_to_le32(dentry->d_name.len);
+ memcpy(*p, dentry->d_name.name, dentry->d_name.len);
+ *p += dentry->d_name.len;
+ rel->dname_seq = cpu_to_le32(di->lease_seq);
+ }
+ spin_unlock(&dentry->d_lock);
+ return ret;
+}
diff --git a/fs/ceph/ceph_debug.h b/fs/ceph/ceph_debug.h
new file mode 100644
index 000000000000..1818c2305610
--- /dev/null
+++ b/fs/ceph/ceph_debug.h
@@ -0,0 +1,37 @@
+#ifndef _FS_CEPH_DEBUG_H
+#define _FS_CEPH_DEBUG_H
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#ifdef CONFIG_CEPH_FS_PRETTYDEBUG
+
+/*
+ * wrap pr_debug to include a filename:lineno prefix on each line.
+ * this incurs some overhead (kernel size and execution time) due to
+ * the extra function call at each call site.
+ */
+
+# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
+extern const char *ceph_file_part(const char *s, int len);
+# define dout(fmt, ...) \
+ pr_debug(" %12.12s:%-4d : " fmt, \
+ ceph_file_part(__FILE__, sizeof(__FILE__)), \
+ __LINE__, ##__VA_ARGS__)
+# else
+/* faux printk call just to see any compiler warnings. */
+# define dout(fmt, ...) do { \
+ if (0) \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+ } while (0)
+# endif
+
+#else
+
+/*
+ * or, just wrap pr_debug
+ */
+# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__)
+
+#endif
+
+#endif
diff --git a/fs/ceph/ceph_frag.c b/fs/ceph/ceph_frag.c
new file mode 100644
index 000000000000..ab6cf35c4091
--- /dev/null
+++ b/fs/ceph/ceph_frag.c
@@ -0,0 +1,21 @@
+/*
+ * Ceph 'frag' type
+ */
+#include "types.h"
+
+int ceph_frag_compare(__u32 a, __u32 b)
+{
+ unsigned va = ceph_frag_value(a);
+ unsigned vb = ceph_frag_value(b);
+ if (va < vb)
+ return -1;
+ if (va > vb)
+ return 1;
+ va = ceph_frag_bits(a);
+ vb = ceph_frag_bits(b);
+ if (va < vb)
+ return -1;
+ if (va > vb)
+ return 1;
+ return 0;
+}
diff --git a/fs/ceph/ceph_frag.h b/fs/ceph/ceph_frag.h
new file mode 100644
index 000000000000..793f50cb7c22
--- /dev/null
+++ b/fs/ceph/ceph_frag.h
@@ -0,0 +1,109 @@
+#ifndef _FS_CEPH_FRAG_H
+#define _FS_CEPH_FRAG_H
+
+/*
+ * "Frags" are a way to describe a subset of a 32-bit number space,
+ * using a mask and a value to match against that mask. Any given frag
+ * (subset of the number space) can be partitioned into 2^n sub-frags.
+ *
+ * Frags are encoded into a 32-bit word:
+ * 8 upper bits = "bits"
+ * 24 lower bits = "value"
+ * (We could go to 5+27 bits, but who cares.)
+ *
+ * We use the _most_ significant bits of the 24 bit value. This makes
+ * values logically sort.
+ *
+ * Unfortunately, because the "bits" field is still in the high bits, we
+ * can't sort encoded frags numerically. However, it does allow you
+ * to feed encoded frags as values into frag_contains_value.
+ */
+static inline __u32 ceph_frag_make(__u32 b, __u32 v)
+{
+ return (b << 24) |
+ (v & (0xffffffu << (24-b)) & 0xffffffu);
+}
+static inline __u32 ceph_frag_bits(__u32 f)
+{
+ return f >> 24;
+}
+static inline __u32 ceph_frag_value(__u32 f)
+{
+ return f & 0xffffffu;
+}
+static inline __u32 ceph_frag_mask(__u32 f)
+{
+ return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu;
+}
+static inline __u32 ceph_frag_mask_shift(__u32 f)
+{
+ return 24 - ceph_frag_bits(f);
+}
+
+static inline int ceph_frag_contains_value(__u32 f, __u32 v)
+{
+ return (v & ceph_frag_mask(f)) == ceph_frag_value(f);
+}
+static inline int ceph_frag_contains_frag(__u32 f, __u32 sub)
+{
+ /* is sub as specific as us, and contained by us? */
+ return ceph_frag_bits(sub) >= ceph_frag_bits(f) &&
+ (ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f);
+}
+
+static inline __u32 ceph_frag_parent(__u32 f)
+{
+ return ceph_frag_make(ceph_frag_bits(f) - 1,
+ ceph_frag_value(f) & (ceph_frag_mask(f) << 1));
+}
+static inline int ceph_frag_is_left_child(__u32 f)
+{
+ return ceph_frag_bits(f) > 0 &&
+ (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0;
+}
+static inline int ceph_frag_is_right_child(__u32 f)
+{
+ return ceph_frag_bits(f) > 0 &&
+ (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1;
+}
+static inline __u32 ceph_frag_sibling(__u32 f)
+{
+ return ceph_frag_make(ceph_frag_bits(f),
+ ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f)));
+}
+static inline __u32 ceph_frag_left_child(__u32 f)
+{
+ return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f));
+}
+static inline __u32 ceph_frag_right_child(__u32 f)
+{
+ return ceph_frag_make(ceph_frag_bits(f)+1,
+ ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f))));
+}
+static inline __u32 ceph_frag_make_child(__u32 f, int by, int i)
+{
+ int newbits = ceph_frag_bits(f) + by;
+ return ceph_frag_make(newbits,
+ ceph_frag_value(f) | (i << (24 - newbits)));
+}
+static inline int ceph_frag_is_leftmost(__u32 f)
+{
+ return ceph_frag_value(f) == 0;
+}
+static inline int ceph_frag_is_rightmost(__u32 f)
+{
+ return ceph_frag_value(f) == ceph_frag_mask(f);
+}
+static inline __u32 ceph_frag_next(__u32 f)
+{
+ return ceph_frag_make(ceph_frag_bits(f),
+ ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f)));
+}
+
+/*
+ * comparator to sort frags logically, as when traversing the
+ * number space in ascending order...
+ */
+int ceph_frag_compare(__u32 a, __u32 b);
+
+#endif
diff --git a/fs/ceph/ceph_fs.c b/fs/ceph/ceph_fs.c
new file mode 100644
index 000000000000..79d76bc4303f
--- /dev/null
+++ b/fs/ceph/ceph_fs.c
@@ -0,0 +1,74 @@
+/*
+ * Some non-inline ceph helpers
+ */
+#include "types.h"
+
+/*
+ * return true if @layout appears to be valid
+ */
+int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
+{
+ __u32 su = le32_to_cpu(layout->fl_stripe_unit);
+ __u32 sc = le32_to_cpu(layout->fl_stripe_count);
+ __u32 os = le32_to_cpu(layout->fl_object_size);
+
+ /* stripe unit, object size must be non-zero, 64k increment */
+ if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1)))
+ return 0;
+ if (!os || (os & (CEPH_MIN_STRIPE_UNIT-1)))
+ return 0;
+ /* object size must be a multiple of stripe unit */
+ if (os < su || os % su)
+ return 0;
+ /* stripe count must be non-zero */
+ if (!sc)
+ return 0;
+ return 1;
+}
+
+
+int ceph_flags_to_mode(int flags)
+{
+#ifdef O_DIRECTORY /* fixme */
+ if ((flags & O_DIRECTORY) == O_DIRECTORY)
+ return CEPH_FILE_MODE_PIN;
+#endif
+#ifdef O_LAZY
+ if (flags & O_LAZY)
+ return CEPH_FILE_MODE_LAZY;
+#endif
+ if ((flags & O_APPEND) == O_APPEND)
+ flags |= O_WRONLY;
+
+ flags &= O_ACCMODE;
+ if ((flags & O_RDWR) == O_RDWR)
+ return CEPH_FILE_MODE_RDWR;
+ if ((flags & O_WRONLY) == O_WRONLY)
+ return CEPH_FILE_MODE_WR;
+ return CEPH_FILE_MODE_RD;
+}
+
+int ceph_caps_for_mode(int mode)
+{
+ switch (mode) {
+ case CEPH_FILE_MODE_PIN:
+ return CEPH_CAP_PIN;
+ case CEPH_FILE_MODE_RD:
+ return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED |
+ CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE;
+ case CEPH_FILE_MODE_RDWR:
+ return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED |
+ CEPH_CAP_FILE_EXCL |
+ CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE |
+ CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
+ CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
+ CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
+ case CEPH_FILE_MODE_WR:
+ return CEPH_CAP_PIN | CEPH_CAP_FILE_SHARED |
+ CEPH_CAP_FILE_EXCL |
+ CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
+ CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
+ CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
+ }
+ return 0;
+}
diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h
new file mode 100644
index 000000000000..e2fd0247827e
--- /dev/null
+++ b/fs/ceph/ceph_fs.h
@@ -0,0 +1,648 @@
+/*
+ * ceph_fs.h - Ceph constants and data types to share between kernel and
+ * user space.
+ *
+ * Most types in this file are defined as little-endian, and are
+ * primarily intended to describe data structures that pass over the
+ * wire or that are stored on disk.
+ *
+ * LGPL2
+ */
+
+#ifndef _FS_CEPH_CEPH_FS_H
+#define _FS_CEPH_CEPH_FS_H
+
+#include "msgr.h"
+#include "rados.h"
+
+/*
+ * Ceph release version
+ */
+#define CEPH_VERSION_MAJOR 0
+#define CEPH_VERSION_MINOR 18
+#define CEPH_VERSION_PATCH 0
+
+#define _CEPH_STRINGIFY(x) #x
+#define CEPH_STRINGIFY(x) _CEPH_STRINGIFY(x)
+#define CEPH_MAKE_VERSION(x, y, z) CEPH_STRINGIFY(x) "." CEPH_STRINGIFY(y) \
+ "." CEPH_STRINGIFY(z)
+#define CEPH_VERSION CEPH_MAKE_VERSION(CEPH_VERSION_MAJOR, \
+ CEPH_VERSION_MINOR, CEPH_VERSION_PATCH)
+
+/*
+ * subprotocol versions. when specific messages types or high-level
+ * protocols change, bump the affected components. we keep rev
+ * internal cluster protocols separately from the public,
+ * client-facing protocol.
+ */
+#define CEPH_OSD_PROTOCOL 7 /* cluster internal */
+#define CEPH_MDS_PROTOCOL 9 /* cluster internal */
+#define CEPH_MON_PROTOCOL 5 /* cluster internal */
+#define CEPH_OSDC_PROTOCOL 22 /* server/client */
+#define CEPH_MDSC_PROTOCOL 30 /* server/client */
+#define CEPH_MONC_PROTOCOL 15 /* server/client */
+
+
+#define CEPH_INO_ROOT 1
+#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
+
+/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
+#define CEPH_MAX_MON 31
+
+
+
+/*
+ * ceph_file_layout - describe data layout for a file/inode
+ */
+struct ceph_file_layout {
+ /* file -> object mapping */
+ __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple
+ of page size. */
+ __le32 fl_stripe_count; /* over this many objects */
+ __le32 fl_object_size; /* until objects are this big, then move to
+ new objects */
+ __le32 fl_cas_hash; /* 0 = none; 1 = sha256 */
+
+ /* pg -> disk layout */
+ __le32 fl_object_stripe_unit; /* for per-object parity, if any */
+
+ /* object -> pg layout */
+ __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
+ __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */
+} __attribute__ ((packed));
+
+#define CEPH_MIN_STRIPE_UNIT 65536
+
+int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
+
+
+/* crypto algorithms */
+#define CEPH_CRYPTO_NONE 0x0
+#define CEPH_CRYPTO_AES 0x1
+
+/* security/authentication protocols */
+#define CEPH_AUTH_UNKNOWN 0x0
+#define CEPH_AUTH_NONE 0x1
+#define CEPH_AUTH_CEPHX 0x2
+
+
+/*********************************************
+ * message layer
+ */
+
+/*
+ * message types
+ */
+
+/* misc */
+#define CEPH_MSG_SHUTDOWN 1
+#define CEPH_MSG_PING 2
+
+/* client <-> monitor */
+#define CEPH_MSG_MON_MAP 4
+#define CEPH_MSG_MON_GET_MAP 5
+#define CEPH_MSG_STATFS 13
+#define CEPH_MSG_STATFS_REPLY 14
+#define CEPH_MSG_MON_SUBSCRIBE 15
+#define CEPH_MSG_MON_SUBSCRIBE_ACK 16
+#define CEPH_MSG_AUTH 17
+#define CEPH_MSG_AUTH_REPLY 18
+
+/* client <-> mds */
+#define CEPH_MSG_MDS_MAP 21
+
+#define CEPH_MSG_CLIENT_SESSION 22
+#define CEPH_MSG_CLIENT_RECONNECT 23
+
+#define CEPH_MSG_CLIENT_REQUEST 24
+#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25
+#define CEPH_MSG_CLIENT_REPLY 26
+#define CEPH_MSG_CLIENT_CAPS 0x310
+#define CEPH_MSG_CLIENT_LEASE 0x311
+#define CEPH_MSG_CLIENT_SNAP 0x312
+#define CEPH_MSG_CLIENT_CAPRELEASE 0x313
+
+/* osd */
+#define CEPH_MSG_OSD_MAP 41
+#define CEPH_MSG_OSD_OP 42
+#define CEPH_MSG_OSD_OPREPLY 43
+
+struct ceph_mon_request_header {
+ __le64 have_version;
+ __le16 session_mon;
+ __le64 session_mon_tid;
+} __attribute__ ((packed));
+
+struct ceph_mon_statfs {
+ struct ceph_mon_request_header monhdr;
+ struct ceph_fsid fsid;
+ __le64 tid;
+} __attribute__ ((packed));
+
+struct ceph_statfs {
+ __le64 kb, kb_used, kb_avail;
+ __le64 num_objects;
+} __attribute__ ((packed));
+
+struct ceph_mon_statfs_reply {
+ struct ceph_fsid fsid;
+ __le64 tid;
+ __le64 version;
+ struct ceph_statfs st;
+} __attribute__ ((packed));
+
+struct ceph_osd_getmap {
+ struct ceph_mon_request_header monhdr;
+ struct ceph_fsid fsid;
+ __le32 start;
+} __attribute__ ((packed));
+
+struct ceph_mds_getmap {
+ struct ceph_mon_request_header monhdr;
+ struct ceph_fsid fsid;
+} __attribute__ ((packed));
+
+struct ceph_client_mount {
+ struct ceph_mon_request_header monhdr;
+} __attribute__ ((packed));
+
+struct ceph_mon_subscribe_item {
+ __le64 have_version; __le64 have;
+ __u8 onetime;
+} __attribute__ ((packed));
+
+struct ceph_mon_subscribe_ack {
+ __le32 duration; /* seconds */
+ struct ceph_fsid fsid;
+} __attribute__ ((packed));
+
+/*
+ * mds states
+ * > 0 -> in
+ * <= 0 -> out
+ */
+#define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */
+#define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees.
+ empty log. */
+#define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */
+#define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */
+#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */
+#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */
+#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */
+
+#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */
+#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed
+ operations (import, rename, etc.) */
+#define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */
+#define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */
+#define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */
+#define CEPH_MDS_STATE_ACTIVE 13 /* up, active */
+#define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */
+
+extern const char *ceph_mds_state_name(int s);
+
+
+/*
+ * metadata lock types.
+ * - these are bitmasks.. we can compose them
+ * - they also define the lock ordering by the MDS
+ * - a few of these are internal to the mds
+ */
+#define CEPH_LOCK_DN 1
+#define CEPH_LOCK_ISNAP 2
+#define CEPH_LOCK_IVERSION 4 /* mds internal */
+#define CEPH_LOCK_IFILE 8 /* mds internal */
+#define CEPH_LOCK_IAUTH 32
+#define CEPH_LOCK_ILINK 64
+#define CEPH_LOCK_IDFT 128 /* dir frag tree */
+#define CEPH_LOCK_INEST 256 /* mds internal */
+#define CEPH_LOCK_IXATTR 512
+#define CEPH_LOCK_INO 2048 /* immutable inode bits; not a lock */
+
+/* client_session ops */
+enum {
+ CEPH_SESSION_REQUEST_OPEN,
+ CEPH_SESSION_OPEN,
+ CEPH_SESSION_REQUEST_CLOSE,
+ CEPH_SESSION_CLOSE,
+ CEPH_SESSION_REQUEST_RENEWCAPS,
+ CEPH_SESSION_RENEWCAPS,
+ CEPH_SESSION_STALE,
+ CEPH_SESSION_RECALL_STATE,
+};
+
+extern const char *ceph_session_op_name(int op);
+
+struct ceph_mds_session_head {
+ __le32 op;
+ __le64 seq;
+ struct ceph_timespec stamp;
+ __le32 max_caps, max_leases;
+} __attribute__ ((packed));
+
+/* client_request */
+/*
+ * metadata ops.
+ * & 0x001000 -> write op
+ * & 0x010000 -> follow symlink (e.g. stat(), not lstat()).
+ & & 0x100000 -> use weird ino/path trace
+ */
+#define CEPH_MDS_OP_WRITE 0x001000
+enum {
+ CEPH_MDS_OP_LOOKUP = 0x00100,
+ CEPH_MDS_OP_GETATTR = 0x00101,
+ CEPH_MDS_OP_LOOKUPHASH = 0x00102,
+ CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
+
+ CEPH_MDS_OP_SETXATTR = 0x01105,
+ CEPH_MDS_OP_RMXATTR = 0x01106,
+ CEPH_MDS_OP_SETLAYOUT = 0x01107,
+ CEPH_MDS_OP_SETATTR = 0x01108,
+
+ CEPH_MDS_OP_MKNOD = 0x01201,
+ CEPH_MDS_OP_LINK = 0x01202,
+ CEPH_MDS_OP_UNLINK = 0x01203,
+ CEPH_MDS_OP_RENAME = 0x01204,
+ CEPH_MDS_OP_MKDIR = 0x01220,
+ CEPH_MDS_OP_RMDIR = 0x01221,
+ CEPH_MDS_OP_SYMLINK = 0x01222,
+
+ CEPH_MDS_OP_CREATE = 0x00301,
+ CEPH_MDS_OP_OPEN = 0x00302,
+ CEPH_MDS_OP_READDIR = 0x00305,
+
+ CEPH_MDS_OP_LOOKUPSNAP = 0x00400,
+ CEPH_MDS_OP_MKSNAP = 0x01400,
+ CEPH_MDS_OP_RMSNAP = 0x01401,
+ CEPH_MDS_OP_LSSNAP = 0x00402,
+};
+
+extern const char *ceph_mds_op_name(int op);
+
+
+#define CEPH_SETATTR_MODE 1
+#define CEPH_SETATTR_UID 2
+#define CEPH_SETATTR_GID 4
+#define CEPH_SETATTR_MTIME 8
+#define CEPH_SETATTR_ATIME 16
+#define CEPH_SETATTR_SIZE 32
+#define CEPH_SETATTR_CTIME 64
+
+union ceph_mds_request_args {
+ struct {
+ __le32 mask; /* CEPH_CAP_* */
+ } __attribute__ ((packed)) getattr;
+ struct {
+ __le32 mode;
+ __le32 uid;
+ __le32 gid;
+ struct ceph_timespec mtime;
+ struct ceph_timespec atime;
+ __le64 size, old_size; /* old_size needed by truncate */
+ __le32 mask; /* CEPH_SETATTR_* */
+ } __attribute__ ((packed)) setattr;
+ struct {
+ __le32 frag; /* which dir fragment */
+ __le32 max_entries; /* how many dentries to grab */
+ } __attribute__ ((packed)) readdir;
+ struct {
+ __le32 mode;
+ __le32 rdev;
+ } __attribute__ ((packed)) mknod;
+ struct {
+ __le32 mode;
+ } __attribute__ ((packed)) mkdir;
+ struct {
+ __le32 flags;
+ __le32 mode;
+ __le32 stripe_unit; /* layout for newly created file */
+ __le32 stripe_count; /* ... */
+ __le32 object_size;
+ __le32 file_replication;
+ __le32 preferred;
+ } __attribute__ ((packed)) open;
+ struct {
+ __le32 flags;
+ } __attribute__ ((packed)) setxattr;
+ struct {
+ struct ceph_file_layout layout;
+ } __attribute__ ((packed)) setlayout;
+} __attribute__ ((packed));
+
+#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
+#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
+
+struct ceph_mds_request_head {
+ __le64 tid, oldest_client_tid;
+ __le32 mdsmap_epoch; /* on client */
+ __le32 flags; /* CEPH_MDS_FLAG_* */
+ __u8 num_retry, num_fwd; /* count retry, fwd attempts */
+ __le16 num_releases; /* # include cap/lease release records */
+ __le32 op; /* mds op code */
+ __le32 caller_uid, caller_gid;
+ __le64 ino; /* use this ino for openc, mkdir, mknod,
+ etc. (if replaying) */
+ union ceph_mds_request_args args;
+} __attribute__ ((packed));
+
+/* cap/lease release record */
+struct ceph_mds_request_release {
+ __le64 ino, cap_id; /* ino and unique cap id */
+ __le32 caps, wanted; /* new issued, wanted */
+ __le32 seq, issue_seq, mseq;
+ __le32 dname_seq; /* if releasing a dentry lease, a */
+ __le32 dname_len; /* string follows. */
+} __attribute__ ((packed));
+
+/* client reply */
+struct ceph_mds_reply_head {
+ __le64 tid;
+ __le32 op;
+ __le32 result;
+ __le32 mdsmap_epoch;
+ __u8 safe; /* true if committed to disk */
+ __u8 is_dentry, is_target; /* true if dentry, target inode records
+ are included with reply */
+} __attribute__ ((packed));
+
+/* one for each node split */
+struct ceph_frag_tree_split {
+ __le32 frag; /* this frag splits... */
+ __le32 by; /* ...by this many bits */
+} __attribute__ ((packed));
+
+struct ceph_frag_tree_head {
+ __le32 nsplits; /* num ceph_frag_tree_split records */
+ struct ceph_frag_tree_split splits[];
+} __attribute__ ((packed));
+
+/* capability issue, for bundling with mds reply */
+struct ceph_mds_reply_cap {
+ __le32 caps, wanted; /* caps issued, wanted */
+ __le64 cap_id;
+ __le32 seq, mseq;
+ __le64 realm; /* snap realm */
+ __u8 flags; /* CEPH_CAP_FLAG_* */
+} __attribute__ ((packed));
+
+#define CEPH_CAP_FLAG_AUTH 1 /* cap is issued by auth mds */
+
+/* inode record, for bundling with mds reply */
+struct ceph_mds_reply_inode {
+ __le64 ino;
+ __le64 snapid;
+ __le32 rdev;
+ __le64 version; /* inode version */
+ __le64 xattr_version; /* version for xattr blob */
+ struct ceph_mds_reply_cap cap; /* caps issued for this inode */
+ struct ceph_file_layout layout;
+ struct ceph_timespec ctime, mtime, atime;
+ __le32 time_warp_seq;
+ __le64 size, max_size, truncate_size;
+ __le32 truncate_seq;
+ __le32 mode, uid, gid;
+ __le32 nlink;
+ __le64 files, subdirs, rbytes, rfiles, rsubdirs; /* dir stats */
+ struct ceph_timespec rctime;
+ struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */
+} __attribute__ ((packed));
+/* followed by frag array, then symlink string, then xattr blob */
+
+/* reply_lease follows dname, and reply_inode */
+struct ceph_mds_reply_lease {
+ __le16 mask; /* lease type(s) */
+ __le32 duration_ms; /* lease duration */
+ __le32 seq;
+} __attribute__ ((packed));
+
+struct ceph_mds_reply_dirfrag {
+ __le32 frag; /* fragment */
+ __le32 auth; /* auth mds, if this is a delegation point */
+ __le32 ndist; /* number of mds' this is replicated on */
+ __le32 dist[];
+} __attribute__ ((packed));
+
+/* file access modes */
+#define CEPH_FILE_MODE_PIN 0
+#define CEPH_FILE_MODE_RD 1
+#define CEPH_FILE_MODE_WR 2
+#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */
+#define CEPH_FILE_MODE_LAZY 4 /* lazy io */
+#define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */
+
+int ceph_flags_to_mode(int flags);
+
+
+/* capability bits */
+#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */
+
+/* generic cap bits */
+#define CEPH_CAP_GSHARED 1 /* client can reads */
+#define CEPH_CAP_GEXCL 2 /* client can read and update */
+#define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */
+#define CEPH_CAP_GRD 8 /* (file) client can read */
+#define CEPH_CAP_GWR 16 /* (file) client can write */
+#define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */
+#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */
+#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */
+
+/* per-lock shift */
+#define CEPH_CAP_SAUTH 2
+#define CEPH_CAP_SLINK 4
+#define CEPH_CAP_SXATTR 6
+#define CEPH_CAP_SFILE 8 /* goes at the end (uses >2 cap bits) */
+
+#define CEPH_CAP_BITS 16
+
+/* composed values */
+#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH)
+#define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH)
+#define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK)
+#define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK)
+#define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR)
+#define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR)
+#define CEPH_CAP_FILE(x) (x << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE)
+#define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE)
+
+/* cap masks (for getattr) */
+#define CEPH_STAT_CAP_INODE CEPH_CAP_PIN
+#define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */
+#define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN
+#define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED
+#define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED
+#define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED
+#define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED
+#define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED
+#define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED
+#define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED
+#define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */
+#define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED
+#define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \
+ CEPH_CAP_AUTH_SHARED | \
+ CEPH_CAP_LINK_SHARED | \
+ CEPH_CAP_FILE_SHARED | \
+ CEPH_CAP_XATTR_SHARED)
+
+#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
+ CEPH_CAP_LINK_SHARED | \
+ CEPH_CAP_XATTR_SHARED | \
+ CEPH_CAP_FILE_SHARED)
+#define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \
+ CEPH_CAP_FILE_CACHE)
+
+#define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \
+ CEPH_CAP_LINK_EXCL | \
+ CEPH_CAP_XATTR_EXCL | \
+ CEPH_CAP_FILE_EXCL)
+#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \
+ CEPH_CAP_FILE_EXCL)
+#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR)
+#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \
+ CEPH_CAP_ANY_FILE_WR | CEPH_CAP_PIN)
+
+#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \
+ CEPH_LOCK_IXATTR)
+
+int ceph_caps_for_mode(int mode);
+
+enum {
+ CEPH_CAP_OP_GRANT, /* mds->client grant */
+ CEPH_CAP_OP_REVOKE, /* mds->client revoke */
+ CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */
+ CEPH_CAP_OP_EXPORT, /* mds has exported the cap */
+ CEPH_CAP_OP_IMPORT, /* mds has imported the cap */
+ CEPH_CAP_OP_UPDATE, /* client->mds update */
+ CEPH_CAP_OP_DROP, /* client->mds drop cap bits */
+ CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */
+ CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */
+ CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */
+ CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */
+ CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */
+ CEPH_CAP_OP_RENEW, /* client->mds renewal request */
+};
+
+extern const char *ceph_cap_op_name(int op);
+
+/*
+ * caps message, used for capability callbacks, acks, requests, etc.
+ */
+struct ceph_mds_caps {
+ __le32 op; /* CEPH_CAP_OP_* */
+ __le64 ino, realm;
+ __le64 cap_id;
+ __le32 seq, issue_seq;
+ __le32 caps, wanted, dirty; /* latest issued/wanted/dirty */
+ __le32 migrate_seq;
+ __le64 snap_follows;
+ __le32 snap_trace_len;
+ __le64 client_tid; /* for FLUSH(SNAP) -> FLUSH(SNAP)_ACK */
+
+ /* authlock */
+ __le32 uid, gid, mode;
+
+ /* linklock */
+ __le32 nlink;
+
+ /* xattrlock */
+ __le32 xattr_len;
+ __le64 xattr_version;
+
+ /* filelock */
+ __le64 size, max_size, truncate_size;
+ __le32 truncate_seq;
+ struct ceph_timespec mtime, atime, ctime;
+ struct ceph_file_layout layout;
+ __le32 time_warp_seq;
+} __attribute__ ((packed));
+
+/* cap release msg head */
+struct ceph_mds_cap_release {
+ __le32 num; /* number of cap_items that follow */
+} __attribute__ ((packed));
+
+struct ceph_mds_cap_item {
+ __le64 ino;
+ __le64 cap_id;
+ __le32 migrate_seq, seq;
+} __attribute__ ((packed));
+
+#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */
+#define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */
+#define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */
+#define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */
+
+extern const char *ceph_lease_op_name(int o);
+
+/* lease msg header */
+struct ceph_mds_lease {
+ __u8 action; /* CEPH_MDS_LEASE_* */
+ __le16 mask; /* which lease */
+ __le64 ino;
+ __le64 first, last; /* snap range */
+ __le32 seq;
+ __le32 duration_ms; /* duration of renewal */
+} __attribute__ ((packed));
+/* followed by a __le32+string for dname */
+
+/* client reconnect */
+struct ceph_mds_cap_reconnect {
+ __le64 cap_id;
+ __le32 wanted;
+ __le32 issued;
+ __le64 size;
+ struct ceph_timespec mtime, atime;
+ __le64 snaprealm;
+ __le64 pathbase; /* base ino for our path to this ino */
+} __attribute__ ((packed));
+/* followed by encoded string */
+
+struct ceph_mds_snaprealm_reconnect {
+ __le64 ino; /* snap realm base */
+ __le64 seq; /* snap seq for this snap realm */
+ __le64 parent; /* parent realm */
+} __attribute__ ((packed));
+
+/*
+ * snaps
+ */
+enum {
+ CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */
+ CEPH_SNAP_OP_CREATE,
+ CEPH_SNAP_OP_DESTROY,
+ CEPH_SNAP_OP_SPLIT,
+};
+
+extern const char *ceph_snap_op_name(int o);
+
+/* snap msg header */
+struct ceph_mds_snap_head {
+ __le32 op; /* CEPH_SNAP_OP_* */
+ __le64 split; /* ino to split off, if any */
+ __le32 num_split_inos; /* # inos belonging to new child realm */
+ __le32 num_split_realms; /* # child realms udner new child realm */
+ __le32 trace_len; /* size of snap trace blob */
+} __attribute__ ((packed));
+/* followed by split ino list, then split realms, then the trace blob */
+
+/*
+ * encode info about a snaprealm, as viewed by a client
+ */
+struct ceph_mds_snap_realm {
+ __le64 ino; /* ino */
+ __le64 created; /* snap: when created */
+ __le64 parent; /* ino: parent realm */
+ __le64 parent_since; /* snap: same parent since */
+ __le64 seq; /* snap: version */
+ __le32 num_snaps;
+ __le32 num_prior_parent_snaps;
+} __attribute__ ((packed));
+/* followed by my snap list, then prior parent snap list */
+
+#endif
diff --git a/fs/ceph/ceph_hash.c b/fs/ceph/ceph_hash.c
new file mode 100644
index 000000000000..bd570015d147
--- /dev/null
+++ b/fs/ceph/ceph_hash.c
@@ -0,0 +1,118 @@
+
+#include "types.h"
+
+/*
+ * Robert Jenkin's hash function.
+ * http://burtleburtle.net/bob/hash/evahash.html
+ * This is in the public domain.
+ */
+#define mix(a, b, c) \
+ do { \
+ a = a - b; a = a - c; a = a ^ (c >> 13); \
+ b = b - c; b = b - a; b = b ^ (a << 8); \
+ c = c - a; c = c - b; c = c ^ (b >> 13); \
+ a = a - b; a = a - c; a = a ^ (c >> 12); \
+ b = b - c; b = b - a; b = b ^ (a << 16); \
+ c = c - a; c = c - b; c = c ^ (b >> 5); \
+ a = a - b; a = a - c; a = a ^ (c >> 3); \
+ b = b - c; b = b - a; b = b ^ (a << 10); \
+ c = c - a; c = c - b; c = c ^ (b >> 15); \
+ } while (0)
+
+unsigned ceph_str_hash_rjenkins(const char *str, unsigned length)
+{
+ const unsigned char *k = (const unsigned char *)str;
+ __u32 a, b, c; /* the internal state */
+ __u32 len; /* how many key bytes still need mixing */
+
+ /* Set up the internal state */
+ len = length;
+ a = 0x9e3779b9; /* the golden ratio; an arbitrary value */
+ b = a;
+ c = 0; /* variable initialization of internal state */
+
+ /* handle most of the key */
+ while (len >= 12) {
+ a = a + (k[0] + ((__u32)k[1] << 8) + ((__u32)k[2] << 16) +
+ ((__u32)k[3] << 24));
+ b = b + (k[4] + ((__u32)k[5] << 8) + ((__u32)k[6] << 16) +
+ ((__u32)k[7] << 24));
+ c = c + (k[8] + ((__u32)k[9] << 8) + ((__u32)k[10] << 16) +
+ ((__u32)k[11] << 24));
+ mix(a, b, c);
+ k = k + 12;
+ len = len - 12;
+ }
+
+ /* handle the last 11 bytes */
+ c = c + length;
+ switch (len) { /* all the case statements fall through */
+ case 11:
+ c = c + ((__u32)k[10] << 24);
+ case 10:
+ c = c + ((__u32)k[9] << 16);
+ case 9:
+ c = c + ((__u32)k[8] << 8);
+ /* the first byte of c is reserved for the length */
+ case 8:
+ b = b + ((__u32)k[7] << 24);
+ case 7:
+ b = b + ((__u32)k[6] << 16);
+ case 6:
+ b = b + ((__u32)k[5] << 8);
+ case 5:
+ b = b + k[4];
+ case 4:
+ a = a + ((__u32)k[3] << 24);
+ case 3:
+ a = a + ((__u32)k[2] << 16);
+ case 2:
+ a = a + ((__u32)k[1] << 8);
+ case 1:
+ a = a + k[0];
+ /* case 0: nothing left to add */
+ }
+ mix(a, b, c);
+
+ return c;
+}
+
+/*
+ * linux dcache hash
+ */
+unsigned ceph_str_hash_linux(const char *str, unsigned length)
+{
+ unsigned long hash = 0;
+ unsigned char c;
+
+ while (length--) {
+ c = *str++;
+ hash = (hash + (c << 4) + (c >> 4)) * 11;
+ }
+ return hash;
+}
+
+
+unsigned ceph_str_hash(int type, const char *s, unsigned len)
+{
+ switch (type) {
+ case CEPH_STR_HASH_LINUX:
+ return ceph_str_hash_linux(s, len);
+ case CEPH_STR_HASH_RJENKINS:
+ return ceph_str_hash_rjenkins(s, len);
+ default:
+ return -1;
+ }
+}
+
+const char *ceph_str_hash_name(int type)
+{
+ switch (type) {
+ case CEPH_STR_HASH_LINUX:
+ return "linux";
+ case CEPH_STR_HASH_RJENKINS:
+ return "rjenkins";
+ default:
+ return "unknown";
+ }
+}
diff --git a/fs/ceph/ceph_hash.h b/fs/ceph/ceph_hash.h
new file mode 100644
index 000000000000..5ac470c433c9
--- /dev/null
+++ b/fs/ceph/ceph_hash.h
@@ -0,0 +1,13 @@
+#ifndef _FS_CEPH_HASH_H
+#define _FS_CEPH_HASH_H
+
+#define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */
+#define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */
+
+extern unsigned ceph_str_hash_linux(const char *s, unsigned len);
+extern unsigned ceph_str_hash_rjenkins(const char *s, unsigned len);
+
+extern unsigned ceph_str_hash(int type, const char *s, unsigned len);
+extern const char *ceph_str_hash_name(int type);
+
+#endif
diff --git a/fs/ceph/ceph_strings.c b/fs/ceph/ceph_strings.c
new file mode 100644
index 000000000000..8e4be6a80c62
--- /dev/null
+++ b/fs/ceph/ceph_strings.c
@@ -0,0 +1,176 @@
+/*
+ * Ceph string constants
+ */
+#include "types.h"
+
+const char *ceph_entity_type_name(int type)
+{
+ switch (type) {
+ case CEPH_ENTITY_TYPE_MDS: return "mds";
+ case CEPH_ENTITY_TYPE_OSD: return "osd";
+ case CEPH_ENTITY_TYPE_MON: return "mon";
+ case CEPH_ENTITY_TYPE_CLIENT: return "client";
+ case CEPH_ENTITY_TYPE_ADMIN: return "admin";
+ case CEPH_ENTITY_TYPE_AUTH: return "auth";
+ default: return "unknown";
+ }
+}
+
+const char *ceph_osd_op_name(int op)
+{
+ switch (op) {
+ case CEPH_OSD_OP_READ: return "read";
+ case CEPH_OSD_OP_STAT: return "stat";
+
+ case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
+
+ case CEPH_OSD_OP_WRITE: return "write";
+ case CEPH_OSD_OP_DELETE: return "delete";
+ case CEPH_OSD_OP_TRUNCATE: return "truncate";
+ case CEPH_OSD_OP_ZERO: return "zero";
+ case CEPH_OSD_OP_WRITEFULL: return "writefull";
+
+ case CEPH_OSD_OP_APPEND: return "append";
+ case CEPH_OSD_OP_STARTSYNC: return "startsync";
+ case CEPH_OSD_OP_SETTRUNC: return "settrunc";
+ case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc";
+
+ case CEPH_OSD_OP_TMAPUP: return "tmapup";
+ case CEPH_OSD_OP_TMAPGET: return "tmapget";
+ case CEPH_OSD_OP_TMAPPUT: return "tmapput";
+
+ case CEPH_OSD_OP_GETXATTR: return "getxattr";
+ case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
+ case CEPH_OSD_OP_SETXATTR: return "setxattr";
+ case CEPH_OSD_OP_SETXATTRS: return "setxattrs";
+ case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs";
+ case CEPH_OSD_OP_RMXATTR: return "rmxattr";
+
+ case CEPH_OSD_OP_PULL: return "pull";
+ case CEPH_OSD_OP_PUSH: return "push";
+ case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
+ case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
+ case CEPH_OSD_OP_SCRUB: return "scrub";
+
+ case CEPH_OSD_OP_WRLOCK: return "wrlock";
+ case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
+ case CEPH_OSD_OP_RDLOCK: return "rdlock";
+ case CEPH_OSD_OP_RDUNLOCK: return "rdunlock";
+ case CEPH_OSD_OP_UPLOCK: return "uplock";
+ case CEPH_OSD_OP_DNLOCK: return "dnlock";
+
+ case CEPH_OSD_OP_CALL: return "call";
+
+ case CEPH_OSD_OP_PGLS: return "pgls";
+ }
+ return "???";
+}
+
+const char *ceph_mds_state_name(int s)
+{
+ switch (s) {
+ /* down and out */
+ case CEPH_MDS_STATE_DNE: return "down:dne";
+ case CEPH_MDS_STATE_STOPPED: return "down:stopped";
+ /* up and out */
+ case CEPH_MDS_STATE_BOOT: return "up:boot";
+ case CEPH_MDS_STATE_STANDBY: return "up:standby";
+ case CEPH_MDS_STATE_STANDBY_REPLAY: return "up:standby-replay";
+ case CEPH_MDS_STATE_CREATING: return "up:creating";
+ case CEPH_MDS_STATE_STARTING: return "up:starting";
+ /* up and in */
+ case CEPH_MDS_STATE_REPLAY: return "up:replay";
+ case CEPH_MDS_STATE_RESOLVE: return "up:resolve";
+ case CEPH_MDS_STATE_RECONNECT: return "up:reconnect";
+ case CEPH_MDS_STATE_REJOIN: return "up:rejoin";
+ case CEPH_MDS_STATE_CLIENTREPLAY: return "up:clientreplay";
+ case CEPH_MDS_STATE_ACTIVE: return "up:active";
+ case CEPH_MDS_STATE_STOPPING: return "up:stopping";
+ }
+ return "???";
+}
+
+const char *ceph_session_op_name(int op)
+{
+ switch (op) {
+ case CEPH_SESSION_REQUEST_OPEN: return "request_open";
+ case CEPH_SESSION_OPEN: return "open";
+ case CEPH_SESSION_REQUEST_CLOSE: return "request_close";
+ case CEPH_SESSION_CLOSE: return "close";
+ case CEPH_SESSION_REQUEST_RENEWCAPS: return "request_renewcaps";
+ case CEPH_SESSION_RENEWCAPS: return "renewcaps";
+ case CEPH_SESSION_STALE: return "stale";
+ case CEPH_SESSION_RECALL_STATE: return "recall_state";
+ }
+ return "???";
+}
+
+const char *ceph_mds_op_name(int op)
+{
+ switch (op) {
+ case CEPH_MDS_OP_LOOKUP: return "lookup";
+ case CEPH_MDS_OP_LOOKUPHASH: return "lookuphash";
+ case CEPH_MDS_OP_LOOKUPPARENT: return "lookupparent";
+ case CEPH_MDS_OP_GETATTR: return "getattr";
+ case CEPH_MDS_OP_SETXATTR: return "setxattr";
+ case CEPH_MDS_OP_SETATTR: return "setattr";
+ case CEPH_MDS_OP_RMXATTR: return "rmxattr";
+ case CEPH_MDS_OP_READDIR: return "readdir";
+ case CEPH_MDS_OP_MKNOD: return "mknod";
+ case CEPH_MDS_OP_LINK: return "link";
+ case CEPH_MDS_OP_UNLINK: return "unlink";
+ case CEPH_MDS_OP_RENAME: return "rename";
+ case CEPH_MDS_OP_MKDIR: return "mkdir";
+ case CEPH_MDS_OP_RMDIR: return "rmdir";
+ case CEPH_MDS_OP_SYMLINK: return "symlink";
+ case CEPH_MDS_OP_CREATE: return "create";
+ case CEPH_MDS_OP_OPEN: return "open";
+ case CEPH_MDS_OP_LOOKUPSNAP: return "lookupsnap";
+ case CEPH_MDS_OP_LSSNAP: return "lssnap";
+ case CEPH_MDS_OP_MKSNAP: return "mksnap";
+ case CEPH_MDS_OP_RMSNAP: return "rmsnap";
+ }
+ return "???";
+}
+
+const char *ceph_cap_op_name(int op)
+{
+ switch (op) {
+ case CEPH_CAP_OP_GRANT: return "grant";
+ case CEPH_CAP_OP_REVOKE: return "revoke";
+ case CEPH_CAP_OP_TRUNC: return "trunc";
+ case CEPH_CAP_OP_EXPORT: return "export";
+ case CEPH_CAP_OP_IMPORT: return "import";
+ case CEPH_CAP_OP_UPDATE: return "update";
+ case CEPH_CAP_OP_DROP: return "drop";
+ case CEPH_CAP_OP_FLUSH: return "flush";
+ case CEPH_CAP_OP_FLUSH_ACK: return "flush_ack";
+ case CEPH_CAP_OP_FLUSHSNAP: return "flushsnap";
+ case CEPH_CAP_OP_FLUSHSNAP_ACK: return "flushsnap_ack";
+ case CEPH_CAP_OP_RELEASE: return "release";
+ case CEPH_CAP_OP_RENEW: return "renew";
+ }
+ return "???";
+}
+
+const char *ceph_lease_op_name(int o)
+{
+ switch (o) {
+ case CEPH_MDS_LEASE_REVOKE: return "revoke";
+ case CEPH_MDS_LEASE_RELEASE: return "release";
+ case CEPH_MDS_LEASE_RENEW: return "renew";
+ case CEPH_MDS_LEASE_REVOKE_ACK: return "revoke_ack";
+ }
+ return "???";
+}
+
+const char *ceph_snap_op_name(int o)
+{
+ switch (o) {
+ case CEPH_SNAP_OP_UPDATE: return "update";
+ case CEPH_SNAP_OP_CREATE: return "create";
+ case CEPH_SNAP_OP_DESTROY: return "destroy";
+ case CEPH_SNAP_OP_SPLIT: return "split";
+ }
+ return "???";
+}
diff --git a/fs/ceph/crush/crush.c b/fs/ceph/crush/crush.c
new file mode 100644
index 000000000000..fabd302e5779
--- /dev/null
+++ b/fs/ceph/crush/crush.c
@@ -0,0 +1,151 @@
+
+#ifdef __KERNEL__
+# include <linux/slab.h>
+#else
+# include <stdlib.h>
+# include <assert.h>
+# define kfree(x) do { if (x) free(x); } while (0)
+# define BUG_ON(x) assert(!(x))
+#endif
+
+#include "crush.h"
+
+const char *crush_bucket_alg_name(int alg)
+{
+ switch (alg) {
+ case CRUSH_BUCKET_UNIFORM: return "uniform";
+ case CRUSH_BUCKET_LIST: return "list";
+ case CRUSH_BUCKET_TREE: return "tree";
+ case CRUSH_BUCKET_STRAW: return "straw";
+ default: return "unknown";
+ }
+}
+
+/**
+ * crush_get_bucket_item_weight - Get weight of an item in given bucket
+ * @b: bucket pointer
+ * @p: item index in bucket
+ */
+int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
+{
+ if (p >= b->size)
+ return 0;
+
+ switch (b->alg) {
+ case CRUSH_BUCKET_UNIFORM:
+ return ((struct crush_bucket_uniform *)b)->item_weight;
+ case CRUSH_BUCKET_LIST:
+ return ((struct crush_bucket_list *)b)->item_weights[p];
+ case CRUSH_BUCKET_TREE:
+ if (p & 1)
+ return ((struct crush_bucket_tree *)b)->node_weights[p];
+ return 0;
+ case CRUSH_BUCKET_STRAW:
+ return ((struct crush_bucket_straw *)b)->item_weights[p];
+ }
+ return 0;
+}
+
+/**
+ * crush_calc_parents - Calculate parent vectors for the given crush map.
+ * @map: crush_map pointer
+ */
+void crush_calc_parents(struct crush_map *map)
+{
+ int i, b, c;
+
+ for (b = 0; b < map->max_buckets; b++) {
+ if (map->buckets[b] == NULL)
+ continue;
+ for (i = 0; i < map->buckets[b]->size; i++) {
+ c = map->buckets[b]->items[i];
+ BUG_ON(c >= map->max_devices ||
+ c < -map->max_buckets);
+ if (c >= 0)
+ map->device_parents[c] = map->buckets[b]->id;
+ else
+ map->bucket_parents[-1-c] = map->buckets[b]->id;
+ }
+ }
+}
+
+void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b)
+{
+ kfree(b->h.perm);
+ kfree(b->h.items);
+ kfree(b);
+}
+
+void crush_destroy_bucket_list(struct crush_bucket_list *b)
+{
+ kfree(b->item_weights);
+ kfree(b->sum_weights);
+ kfree(b->h.perm);
+ kfree(b->h.items);
+ kfree(b);
+}
+
+void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
+{
+ kfree(b->node_weights);
+ kfree(b);
+}
+
+void crush_destroy_bucket_straw(struct crush_bucket_straw *b)
+{
+ kfree(b->straws);
+ kfree(b->item_weights);
+ kfree(b->h.perm);
+ kfree(b->h.items);
+ kfree(b);
+}
+
+void crush_destroy_bucket(struct crush_bucket *b)
+{
+ switch (b->alg) {
+ case CRUSH_BUCKET_UNIFORM:
+ crush_destroy_bucket_uniform((struct crush_bucket_uniform *)b);
+ break;
+ case CRUSH_BUCKET_LIST:
+ crush_destroy_bucket_list((struct crush_bucket_list *)b);
+ break;
+ case CRUSH_BUCKET_TREE:
+ crush_destroy_bucket_tree((struct crush_bucket_tree *)b);
+ break;
+ case CRUSH_BUCKET_STRAW:
+ crush_destroy_bucket_straw((struct crush_bucket_straw *)b);
+ break;
+ }
+}
+
+/**
+ * crush_destroy - Destroy a crush_map
+ * @map: crush_map pointer
+ */
+void crush_destroy(struct crush_map *map)
+{
+ int b;
+
+ /* buckets */
+ if (map->buckets) {
+ for (b = 0; b < map->max_buckets; b++) {
+ if (map->buckets[b] == NULL)
+ continue;
+ crush_destroy_bucket(map->buckets[b]);
+ }
+ kfree(map->buckets);
+ }
+
+ /* rules */
+ if (map->rules) {
+ for (b = 0; b < map->max_rules; b++)
+ kfree(map->rules[b]);
+ kfree(map->rules);
+ }
+
+ kfree(map->bucket_parents);
+ kfree(map->device_parents);
+ kfree(map);
+}
+
+
diff --git a/fs/ceph/crush/crush.h b/fs/ceph/crush/crush.h
new file mode 100644
index 000000000000..dcd7e7523700
--- /dev/null
+++ b/fs/ceph/crush/crush.h
@@ -0,0 +1,180 @@
+#ifndef _CRUSH_CRUSH_H
+#define _CRUSH_CRUSH_H
+
+#include <linux/types.h>
+
+/*
+ * CRUSH is a pseudo-random data distribution algorithm that
+ * efficiently distributes input values (typically, data objects)
+ * across a heterogeneous, structured storage cluster.
+ *
+ * The algorithm was originally described in detail in this paper
+ * (although the algorithm has evolved somewhat since then):
+ *
+ * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
+ *
+ * LGPL2
+ */
+
+
+#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
+
+
+#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
+#define CRUSH_MAX_SET 10 /* max size of a mapping result */
+
+
+/*
+ * CRUSH uses user-defined "rules" to describe how inputs should be
+ * mapped to devices. A rule consists of sequence of steps to perform
+ * to generate the set of output devices.
+ */
+struct crush_rule_step {
+ __u32 op;
+ __s32 arg1;
+ __s32 arg2;
+};
+
+/* step op codes */
+enum {
+ CRUSH_RULE_NOOP = 0,
+ CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */
+ CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */
+ /* arg2 = type */
+ CRUSH_RULE_CHOOSE_INDEP = 3, /* same */
+ CRUSH_RULE_EMIT = 4, /* no args */
+ CRUSH_RULE_CHOOSE_LEAF_FIRSTN = 6,
+ CRUSH_RULE_CHOOSE_LEAF_INDEP = 7,
+};
+
+/*
+ * for specifying choose num (arg1) relative to the max parameter
+ * passed to do_rule
+ */
+#define CRUSH_CHOOSE_N 0
+#define CRUSH_CHOOSE_N_MINUS(x) (-(x))
+
+/*
+ * The rule mask is used to describe what the rule is intended for.
+ * Given a ruleset and size of output set, we search through the
+ * rule list for a matching rule_mask.
+ */
+struct crush_rule_mask {
+ __u8 ruleset;
+ __u8 type;
+ __u8 min_size;
+ __u8 max_size;
+};
+
+struct crush_rule {
+ __u32 len;
+ struct crush_rule_mask mask;
+ struct crush_rule_step steps[0];
+};
+
+#define crush_rule_size(len) (sizeof(struct crush_rule) + \
+ (len)*sizeof(struct crush_rule_step))
+
+
+
+/*
+ * A bucket is a named container of other items (either devices or
+ * other buckets). Items within a bucket are chosen using one of a
+ * few different algorithms. The table summarizes how the speed of
+ * each option measures up against mapping stability when items are
+ * added or removed.
+ *
+ * Bucket Alg Speed Additions Removals
+ * ------------------------------------------------
+ * uniform O(1) poor poor
+ * list O(n) optimal poor
+ * tree O(log n) good good
+ * straw O(n) optimal optimal
+ */
+enum {
+ CRUSH_BUCKET_UNIFORM = 1,
+ CRUSH_BUCKET_LIST = 2,
+ CRUSH_BUCKET_TREE = 3,
+ CRUSH_BUCKET_STRAW = 4
+};
+extern const char *crush_bucket_alg_name(int alg);
+
+struct crush_bucket {
+ __s32 id; /* this'll be negative */
+ __u16 type; /* non-zero; type=0 is reserved for devices */
+ __u8 alg; /* one of CRUSH_BUCKET_* */
+ __u8 hash; /* which hash function to use, CRUSH_HASH_* */
+ __u32 weight; /* 16-bit fixed point */
+ __u32 size; /* num items */
+ __s32 *items;
+
+ /*
+ * cached random permutation: used for uniform bucket and for
+ * the linear search fallback for the other bucket types.
+ */
+ __u32 perm_x; /* @x for which *perm is defined */
+ __u32 perm_n; /* num elements of *perm that are permuted/defined */
+ __u32 *perm;
+};
+
+struct crush_bucket_uniform {
+ struct crush_bucket h;
+ __u32 item_weight; /* 16-bit fixed point; all items equally weighted */
+};
+
+struct crush_bucket_list {
+ struct crush_bucket h;
+ __u32 *item_weights; /* 16-bit fixed point */
+ __u32 *sum_weights; /* 16-bit fixed point. element i is sum
+ of weights 0..i, inclusive */
+};
+
+struct crush_bucket_tree {
+ struct crush_bucket h; /* note: h.size is _tree_ size, not number of
+ actual items */
+ __u8 num_nodes;
+ __u32 *node_weights;
+};
+
+struct crush_bucket_straw {
+ struct crush_bucket h;
+ __u32 *item_weights; /* 16-bit fixed point */
+ __u32 *straws; /* 16-bit fixed point */
+};
+
+
+
+/*
+ * CRUSH map includes all buckets, rules, etc.
+ */
+struct crush_map {
+ struct crush_bucket **buckets;
+ struct crush_rule **rules;
+
+ /*
+ * Parent pointers to identify the parent bucket a device or
+ * bucket in the hierarchy. If an item appears more than
+ * once, this is the _last_ time it appeared (where buckets
+ * are processed in bucket id order, from -1 on down to
+ * -max_buckets.
+ */
+ __u32 *bucket_parents;
+ __u32 *device_parents;
+
+ __s32 max_buckets;
+ __u32 max_rules;
+ __s32 max_devices;
+};
+
+
+/* crush.c */
+extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos);
+extern void crush_calc_parents(struct crush_map *map);
+extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
+extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
+extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
+extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
+extern void crush_destroy_bucket(struct crush_bucket *b);
+extern void crush_destroy(struct crush_map *map);
+
+#endif
diff --git a/fs/ceph/crush/hash.c b/fs/ceph/crush/hash.c
new file mode 100644
index 000000000000..5873aed694bf
--- /dev/null
+++ b/fs/ceph/crush/hash.c
@@ -0,0 +1,149 @@
+
+#include <linux/types.h>
+#include "hash.h"
+
+/*
+ * Robert Jenkins' function for mixing 32-bit values
+ * http://burtleburtle.net/bob/hash/evahash.html
+ * a, b = random bits, c = input and output
+ */
+#define crush_hashmix(a, b, c) do { \
+ a = a-b; a = a-c; a = a^(c>>13); \
+ b = b-c; b = b-a; b = b^(a<<8); \
+ c = c-a; c = c-b; c = c^(b>>13); \
+ a = a-b; a = a-c; a = a^(c>>12); \
+ b = b-c; b = b-a; b = b^(a<<16); \
+ c = c-a; c = c-b; c = c^(b>>5); \
+ a = a-b; a = a-c; a = a^(c>>3); \
+ b = b-c; b = b-a; b = b^(a<<10); \
+ c = c-a; c = c-b; c = c^(b>>15); \
+ } while (0)
+
+#define crush_hash_seed 1315423911
+
+static __u32 crush_hash32_rjenkins1(__u32 a)
+{
+ __u32 hash = crush_hash_seed ^ a;
+ __u32 b = a;
+ __u32 x = 231232;
+ __u32 y = 1232;
+ crush_hashmix(b, x, hash);
+ crush_hashmix(y, a, hash);
+ return hash;
+}
+
+static __u32 crush_hash32_rjenkins1_2(__u32 a, __u32 b)
+{
+ __u32 hash = crush_hash_seed ^ a ^ b;
+ __u32 x = 231232;
+ __u32 y = 1232;
+ crush_hashmix(a, b, hash);
+ crush_hashmix(x, a, hash);
+ crush_hashmix(b, y, hash);
+ return hash;
+}
+
+static __u32 crush_hash32_rjenkins1_3(__u32 a, __u32 b, __u32 c)
+{
+ __u32 hash = crush_hash_seed ^ a ^ b ^ c;
+ __u32 x = 231232;
+ __u32 y = 1232;
+ crush_hashmix(a, b, hash);
+ crush_hashmix(c, x, hash);
+ crush_hashmix(y, a, hash);
+ crush_hashmix(b, x, hash);
+ crush_hashmix(y, c, hash);
+ return hash;
+}
+
+static __u32 crush_hash32_rjenkins1_4(__u32 a, __u32 b, __u32 c, __u32 d)
+{
+ __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d;
+ __u32 x = 231232;
+ __u32 y = 1232;
+ crush_hashmix(a, b, hash);
+ crush_hashmix(c, d, hash);
+ crush_hashmix(a, x, hash);
+ crush_hashmix(y, b, hash);
+ crush_hashmix(c, x, hash);
+ crush_hashmix(y, d, hash);
+ return hash;
+}
+
+static __u32 crush_hash32_rjenkins1_5(__u32 a, __u32 b, __u32 c, __u32 d,
+ __u32 e)
+{
+ __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e;
+ __u32 x = 231232;
+ __u32 y = 1232;
+ crush_hashmix(a, b, hash);
+ crush_hashmix(c, d, hash);
+ crush_hashmix(e, x, hash);
+ crush_hashmix(y, a, hash);
+ crush_hashmix(b, x, hash);
+ crush_hashmix(y, c, hash);
+ crush_hashmix(d, x, hash);
+ crush_hashmix(y, e, hash);
+ return hash;
+}
+
+
+__u32 crush_hash32(int type, __u32 a)
+{
+ switch (type) {
+ case CRUSH_HASH_RJENKINS1:
+ return crush_hash32_rjenkins1(a);
+ default:
+ return 0;
+ }
+}
+
+__u32 crush_hash32_2(int type, __u32 a, __u32 b)
+{
+ switch (type) {
+ case CRUSH_HASH_RJENKINS1:
+ return crush_hash32_rjenkins1_2(a, b);
+ default:
+ return 0;
+ }
+}
+
+__u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c)
+{
+ switch (type) {
+ case CRUSH_HASH_RJENKINS1:
+ return crush_hash32_rjenkins1_3(a, b, c);
+ default:
+ return 0;
+ }
+}
+
+__u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d)
+{
+ switch (type) {
+ case CRUSH_HASH_RJENKINS1:
+ return crush_hash32_rjenkins1_4(a, b, c, d);
+ default:
+ return 0;
+ }
+}
+
+__u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e)
+{
+ switch (type) {
+ case CRUSH_HASH_RJENKINS1:
+ return crush_hash32_rjenkins1_5(a, b, c, d, e);
+ default:
+ return 0;
+ }
+}
+
+const char *crush_hash_name(int type)
+{
+ switch (type) {
+ case CRUSH_HASH_RJENKINS1:
+ return "rjenkins1";
+ default:
+ return "unknown";
+ }
+}
diff --git a/fs/ceph/crush/hash.h b/fs/ceph/crush/hash.h
new file mode 100644
index 000000000000..ff48e110e4bb
--- /dev/null
+++ b/fs/ceph/crush/hash.h
@@ -0,0 +1,17 @@
+#ifndef _CRUSH_HASH_H
+#define _CRUSH_HASH_H
+
+#define CRUSH_HASH_RJENKINS1 0
+
+#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1
+
+extern const char *crush_hash_name(int type);
+
+extern __u32 crush_hash32(int type, __u32 a);
+extern __u32 crush_hash32_2(int type, __u32 a, __u32 b);
+extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c);
+extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d);
+extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d,
+ __u32 e);
+
+#endif
diff --git a/fs/ceph/crush/mapper.c b/fs/ceph/crush/mapper.c
new file mode 100644
index 000000000000..9ba54efb6543
--- /dev/null
+++ b/fs/ceph/crush/mapper.c
@@ -0,0 +1,596 @@
+
+#ifdef __KERNEL__
+# include <linux/string.h>
+# include <linux/slab.h>
+# include <linux/bug.h>
+# include <linux/kernel.h>
+# ifndef dprintk
+# define dprintk(args...)
+# endif
+#else
+# include <string.h>
+# include <stdio.h>
+# include <stdlib.h>
+# include <assert.h>
+# define BUG_ON(x) assert(!(x))
+# define dprintk(args...) /* printf(args) */
+# define kmalloc(x, f) malloc(x)
+# define kfree(x) free(x)
+#endif
+
+#include "crush.h"
+#include "hash.h"
+
+/*
+ * Implement the core CRUSH mapping algorithm.
+ */
+
+/**
+ * crush_find_rule - find a crush_rule id for a given ruleset, type, and size.
+ * @map: the crush_map
+ * @ruleset: the storage ruleset id (user defined)
+ * @type: storage ruleset type (user defined)
+ * @size: output set size
+ */
+int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
+{
+ int i;
+
+ for (i = 0; i < map->max_rules; i++) {
+ if (map->rules[i] &&
+ map->rules[i]->mask.ruleset == ruleset &&
+ map->rules[i]->mask.type == type &&
+ map->rules[i]->mask.min_size <= size &&
+ map->rules[i]->mask.max_size >= size)
+ return i;
+ }
+ return -1;
+}
+
+
+/*
+ * bucket choose methods
+ *
+ * For each bucket algorithm, we have a "choose" method that, given a
+ * crush input @x and replica position (usually, position in output set) @r,
+ * will produce an item in the bucket.
+ */
+
+/*
+ * Choose based on a random permutation of the bucket.
+ *
+ * We used to use some prime number arithmetic to do this, but it
+ * wasn't very random, and had some other bad behaviors. Instead, we
+ * calculate an actual random permutation of the bucket members.
+ * Since this is expensive, we optimize for the r=0 case, which
+ * captures the vast majority of calls.
+ */
+static int bucket_perm_choose(struct crush_bucket *bucket,
+ int x, int r)
+{
+ unsigned pr = r % bucket->size;
+ unsigned i, s;
+
+ /* start a new permutation if @x has changed */
+ if (bucket->perm_x != x || bucket->perm_n == 0) {
+ dprintk("bucket %d new x=%d\n", bucket->id, x);
+ bucket->perm_x = x;
+
+ /* optimize common r=0 case */
+ if (pr == 0) {
+ s = crush_hash32_3(bucket->hash, x, bucket->id, 0) %
+ bucket->size;
+ bucket->perm[0] = s;
+ bucket->perm_n = 0xffff; /* magic value, see below */
+ goto out;
+ }
+
+ for (i = 0; i < bucket->size; i++)
+ bucket->perm[i] = i;
+ bucket->perm_n = 0;
+ } else if (bucket->perm_n == 0xffff) {
+ /* clean up after the r=0 case above */
+ for (i = 1; i < bucket->size; i++)
+ bucket->perm[i] = i;
+ bucket->perm[bucket->perm[0]] = 0;
+ bucket->perm_n = 1;
+ }
+
+ /* calculate permutation up to pr */
+ for (i = 0; i < bucket->perm_n; i++)
+ dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]);
+ while (bucket->perm_n <= pr) {
+ unsigned p = bucket->perm_n;
+ /* no point in swapping the final entry */
+ if (p < bucket->size - 1) {
+ i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
+ (bucket->size - p);
+ if (i) {
+ unsigned t = bucket->perm[p + i];
+ bucket->perm[p + i] = bucket->perm[p];
+ bucket->perm[p] = t;
+ }
+ dprintk(" perm_choose swap %d with %d\n", p, p+i);
+ }
+ bucket->perm_n++;
+ }
+ for (i = 0; i < bucket->size; i++)
+ dprintk(" perm_choose %d: %d\n", i, bucket->perm[i]);
+
+ s = bucket->perm[pr];
+out:
+ dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket->id,
+ bucket->size, x, r, pr, s);
+ return bucket->items[s];
+}
+
+/* uniform */
+static int bucket_uniform_choose(struct crush_bucket_uniform *bucket,
+ int x, int r)
+{
+ return bucket_perm_choose(&bucket->h, x, r);
+}
+
+/* list */
+static int bucket_list_choose(struct crush_bucket_list *bucket,
+ int x, int r)
+{
+ int i;
+
+ for (i = bucket->h.size-1; i >= 0; i--) {
+ __u64 w = crush_hash32_4(bucket->h.hash,x, bucket->h.items[i],
+ r, bucket->h.id);
+ w &= 0xffff;
+ dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
+ "sw %x rand %llx",
+ i, x, r, bucket->h.items[i], bucket->item_weights[i],
+ bucket->sum_weights[i], w);
+ w *= bucket->sum_weights[i];
+ w = w >> 16;
+ /*dprintk(" scaled %llx\n", w);*/
+ if (w < bucket->item_weights[i])
+ return bucket->h.items[i];
+ }
+
+ BUG_ON(1);
+ return 0;
+}
+
+
+/* (binary) tree */
+static int height(int n)
+{
+ int h = 0;
+ while ((n & 1) == 0) {
+ h++;
+ n = n >> 1;
+ }
+ return h;
+}
+
+static int left(int x)
+{
+ int h = height(x);
+ return x - (1 << (h-1));
+}
+
+static int right(int x)
+{
+ int h = height(x);
+ return x + (1 << (h-1));
+}
+
+static int terminal(int x)
+{
+ return x & 1;
+}
+
+static int bucket_tree_choose(struct crush_bucket_tree *bucket,
+ int x, int r)
+{
+ int n, l;
+ __u32 w;
+ __u64 t;
+
+ /* start at root */
+ n = bucket->num_nodes >> 1;
+
+ while (!terminal(n)) {
+ /* pick point in [0, w) */
+ w = bucket->node_weights[n];
+ t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r,
+ bucket->h.id) * (__u64)w;
+ t = t >> 32;
+
+ /* descend to the left or right? */
+ l = left(n);
+ if (t < bucket->node_weights[l])
+ n = l;
+ else
+ n = right(n);
+ }
+
+ return bucket->h.items[n >> 1];
+}
+
+
+/* straw */
+
+static int bucket_straw_choose(struct crush_bucket_straw *bucket,
+ int x, int r)
+{
+ int i;
+ int high = 0;
+ __u64 high_draw = 0;
+ __u64 draw;
+
+ for (i = 0; i < bucket->h.size; i++) {
+ draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r);
+ draw &= 0xffff;
+ draw *= bucket->straws[i];
+ if (i == 0 || draw > high_draw) {
+ high = i;
+ high_draw = draw;
+ }
+ }
+ return bucket->h.items[high];
+}
+
+static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
+{
+ dprintk("choose %d x=%d r=%d\n", in->id, x, r);
+ switch (in->alg) {
+ case CRUSH_BUCKET_UNIFORM:
+ return bucket_uniform_choose((struct crush_bucket_uniform *)in,
+ x, r);
+ case CRUSH_BUCKET_LIST:
+ return bucket_list_choose((struct crush_bucket_list *)in,
+ x, r);
+ case CRUSH_BUCKET_TREE:
+ return bucket_tree_choose((struct crush_bucket_tree *)in,
+ x, r);
+ case CRUSH_BUCKET_STRAW:
+ return bucket_straw_choose((struct crush_bucket_straw *)in,
+ x, r);
+ default:
+ BUG_ON(1);
+ return in->items[0];
+ }
+}
+
+/*
+ * true if device is marked "out" (failed, fully offloaded)
+ * of the cluster
+ */
+static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
+{
+ if (weight[item] >= 0x1000)
+ return 0;
+ if (weight[item] == 0)
+ return 1;
+ if ((crush_hash32_2(CRUSH_HASH_RJENKINS1, x, item) & 0xffff)
+ < weight[item])
+ return 0;
+ return 1;
+}
+
+/**
+ * crush_choose - choose numrep distinct items of given type
+ * @map: the crush_map
+ * @bucket: the bucket we are choose an item from
+ * @x: crush input value
+ * @numrep: the number of items to choose
+ * @type: the type of item to choose
+ * @out: pointer to output vector
+ * @outpos: our position in that vector
+ * @firstn: true if choosing "first n" items, false if choosing "indep"
+ * @recurse_to_leaf: true if we want one device under each item of given type
+ * @out2: second output vector for leaf items (if @recurse_to_leaf)
+ */
+static int crush_choose(struct crush_map *map,
+ struct crush_bucket *bucket,
+ __u32 *weight,
+ int x, int numrep, int type,
+ int *out, int outpos,
+ int firstn, int recurse_to_leaf,
+ int *out2)
+{
+ int rep;
+ int ftotal, flocal;
+ int retry_descent, retry_bucket, skip_rep;
+ struct crush_bucket *in = bucket;
+ int r;
+ int i;
+ int item = 0;
+ int itemtype;
+ int collide, reject;
+ const int orig_tries = 5; /* attempts before we fall back to search */
+ dprintk("choose bucket %d x %d outpos %d\n", bucket->id, x, outpos);
+
+ for (rep = outpos; rep < numrep; rep++) {
+ /* keep trying until we get a non-out, non-colliding item */
+ ftotal = 0;
+ skip_rep = 0;
+ do {
+ retry_descent = 0;
+ in = bucket; /* initial bucket */
+
+ /* choose through intervening buckets */
+ flocal = 0;
+ do {
+ collide = 0;
+ retry_bucket = 0;
+ r = rep;
+ if (in->alg == CRUSH_BUCKET_UNIFORM) {
+ /* be careful */
+ if (firstn || numrep >= in->size)
+ /* r' = r + f_total */
+ r += ftotal;
+ else if (in->size % numrep == 0)
+ /* r'=r+(n+1)*f_local */
+ r += (numrep+1) *
+ (flocal+ftotal);
+ else
+ /* r' = r + n*f_local */
+ r += numrep * (flocal+ftotal);
+ } else {
+ if (firstn)
+ /* r' = r + f_total */
+ r += ftotal;
+ else
+ /* r' = r + n*f_local */
+ r += numrep * (flocal+ftotal);
+ }
+
+ /* bucket choose */
+ if (in->size == 0) {
+ reject = 1;
+ goto reject;
+ }
+ if (flocal >= (in->size>>1) &&
+ flocal > orig_tries)
+ item = bucket_perm_choose(in, x, r);
+ else
+ item = crush_bucket_choose(in, x, r);
+ BUG_ON(item >= map->max_devices);
+
+ /* desired type? */
+ if (item < 0)
+ itemtype = map->buckets[-1-item]->type;
+ else
+ itemtype = 0;
+ dprintk(" item %d type %d\n", item, itemtype);
+
+ /* keep going? */
+ if (itemtype != type) {
+ BUG_ON(item >= 0 ||
+ (-1-item) >= map->max_buckets);
+ in = map->buckets[-1-item];
+ continue;
+ }
+
+ /* collision? */
+ for (i = 0; i < outpos; i++) {
+ if (out[i] == item) {
+ collide = 1;
+ break;
+ }
+ }
+
+ if (recurse_to_leaf &&
+ item < 0 &&
+ crush_choose(map, map->buckets[-1-item],
+ weight,
+ x, outpos+1, 0,
+ out2, outpos,
+ firstn, 0, NULL) <= outpos) {
+ reject = 1;
+ } else {
+ /* out? */
+ if (itemtype == 0)
+ reject = is_out(map, weight,
+ item, x);
+ else
+ reject = 0;
+ }
+
+reject:
+ if (reject || collide) {
+ ftotal++;
+ flocal++;
+
+ if (collide && flocal < 3)
+ /* retry locally a few times */
+ retry_bucket = 1;
+ else if (flocal < in->size + orig_tries)
+ /* exhaustive bucket search */
+ retry_bucket = 1;
+ else if (ftotal < 20)
+ /* then retry descent */
+ retry_descent = 1;
+ else
+ /* else give up */
+ skip_rep = 1;
+ dprintk(" reject %d collide %d "
+ "ftotal %d flocal %d\n",
+ reject, collide, ftotal,
+ flocal);
+ }
+ } while (retry_bucket);
+ } while (retry_descent);
+
+ if (skip_rep) {
+ dprintk("skip rep\n");
+ continue;
+ }
+
+ dprintk("choose got %d\n", item);
+ out[outpos] = item;
+ outpos++;
+ }
+
+ dprintk("choose returns %d\n", outpos);
+ return outpos;
+}
+
+
+/**
+ * crush_do_rule - calculate a mapping with the given input and rule
+ * @map: the crush_map
+ * @ruleno: the rule id
+ * @x: hash input
+ * @result: pointer to result vector
+ * @result_max: maximum result size
+ * @force: force initial replica choice; -1 for none
+ */
+int crush_do_rule(struct crush_map *map,
+ int ruleno, int x, int *result, int result_max,
+ int force, __u32 *weight)
+{
+ int result_len;
+ int force_context[CRUSH_MAX_DEPTH];
+ int force_pos = -1;
+ int a[CRUSH_MAX_SET];
+ int b[CRUSH_MAX_SET];
+ int c[CRUSH_MAX_SET];
+ int recurse_to_leaf;
+ int *w;
+ int wsize = 0;
+ int *o;
+ int osize;
+ int *tmp;
+ struct crush_rule *rule;
+ int step;
+ int i, j;
+ int numrep;
+ int firstn;
+ int rc = -1;
+
+ BUG_ON(ruleno >= map->max_rules);
+
+ rule = map->rules[ruleno];
+ result_len = 0;
+ w = a;
+ o = b;
+
+ /*
+ * determine hierarchical context of force, if any. note
+ * that this may or may not correspond to the specific types
+ * referenced by the crush rule.
+ */
+ if (force >= 0) {
+ if (force >= map->max_devices ||
+ map->device_parents[force] == 0) {
+ /*dprintk("CRUSH: forcefed device dne\n");*/
+ rc = -1; /* force fed device dne */
+ goto out;
+ }
+ if (!is_out(map, weight, force, x)) {
+ while (1) {
+ force_context[++force_pos] = force;
+ if (force >= 0)
+ force = map->device_parents[force];
+ else
+ force = map->bucket_parents[-1-force];
+ if (force == 0)
+ break;
+ }
+ }
+ }
+
+ for (step = 0; step < rule->len; step++) {
+ firstn = 0;
+ switch (rule->steps[step].op) {
+ case CRUSH_RULE_TAKE:
+ w[0] = rule->steps[step].arg1;
+ if (force_pos >= 0) {
+ BUG_ON(force_context[force_pos] != w[0]);
+ force_pos--;
+ }
+ wsize = 1;
+ break;
+
+ case CRUSH_RULE_CHOOSE_LEAF_FIRSTN:
+ case CRUSH_RULE_CHOOSE_FIRSTN:
+ firstn = 1;
+ case CRUSH_RULE_CHOOSE_LEAF_INDEP:
+ case CRUSH_RULE_CHOOSE_INDEP:
+ BUG_ON(wsize == 0);
+
+ recurse_to_leaf =
+ rule->steps[step].op ==
+ CRUSH_RULE_CHOOSE_LEAF_FIRSTN ||
+ rule->steps[step].op ==
+ CRUSH_RULE_CHOOSE_LEAF_INDEP;
+
+ /* reset output */
+ osize = 0;
+
+ for (i = 0; i < wsize; i++) {
+ /*
+ * see CRUSH_N, CRUSH_N_MINUS macros.
+ * basically, numrep <= 0 means relative to
+ * the provided result_max
+ */
+ numrep = rule->steps[step].arg1;
+ if (numrep <= 0) {
+ numrep += result_max;
+ if (numrep <= 0)
+ continue;
+ }
+ j = 0;
+ if (osize == 0 && force_pos >= 0) {
+ /* skip any intermediate types */
+ while (force_pos &&
+ force_context[force_pos] < 0 &&
+ rule->steps[step].arg2 !=
+ map->buckets[-1 -
+ force_context[force_pos]]->type)
+ force_pos--;
+ o[osize] = force_context[force_pos];
+ if (recurse_to_leaf)
+ c[osize] = force_context[0];
+ j++;
+ force_pos--;
+ }
+ osize += crush_choose(map,
+ map->buckets[-1-w[i]],
+ weight,
+ x, numrep,
+ rule->steps[step].arg2,
+ o+osize, j,
+ firstn,
+ recurse_to_leaf, c+osize);
+ }
+
+ if (recurse_to_leaf)
+ /* copy final _leaf_ values to output set */
+ memcpy(o, c, osize*sizeof(*o));
+
+ /* swap t and w arrays */
+ tmp = o;
+ o = w;
+ w = tmp;
+ wsize = osize;
+ break;
+
+
+ case CRUSH_RULE_EMIT:
+ for (i = 0; i < wsize && result_len < result_max; i++) {
+ result[result_len] = w[i];
+ result_len++;
+ }
+ wsize = 0;
+ break;
+
+ default:
+ BUG_ON(1);
+ }
+ }
+ rc = result_len;
+
+out:
+ return rc;
+}
+
+
diff --git a/fs/ceph/crush/mapper.h b/fs/ceph/crush/mapper.h
new file mode 100644
index 000000000000..98e90046fd9f
--- /dev/null
+++ b/fs/ceph/crush/mapper.h
@@ -0,0 +1,20 @@
+#ifndef _CRUSH_MAPPER_H
+#define _CRUSH_MAPPER_H
+
+/*
+ * CRUSH functions for find rules and then mapping an input to an
+ * output set.
+ *
+ * LGPL2
+ */
+
+#include "crush.h"
+
+extern int crush_find_rule(struct crush_map *map, int pool, int type, int size);
+extern int crush_do_rule(struct crush_map *map,
+ int ruleno,
+ int x, int *result, int result_max,
+ int forcefeed, /* -1 for none */
+ __u32 *weights);
+
+#endif
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
new file mode 100644
index 000000000000..b90fc3e1ff70
--- /dev/null
+++ b/fs/ceph/debugfs.c
@@ -0,0 +1,450 @@
+#include "ceph_debug.h"
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "super.h"
+#include "mds_client.h"
+#include "mon_client.h"
+#include "auth.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+/*
+ * Implement /sys/kernel/debug/ceph fun
+ *
+ * /sys/kernel/debug/ceph/client* - an instance of the ceph client
+ * .../osdmap - current osdmap
+ * .../mdsmap - current mdsmap
+ * .../monmap - current monmap
+ * .../osdc - active osd requests
+ * .../mdsc - active mds requests
+ * .../monc - mon client state
+ * .../dentry_lru - dump contents of dentry lru
+ * .../caps - expose cap (reservation) stats
+ */
+
+static struct dentry *ceph_debugfs_dir;
+
+static int monmap_show(struct seq_file *s, void *p)
+{
+ int i;
+ struct ceph_client *client = s->private;
+
+ if (client->monc.monmap == NULL)
+ return 0;
+
+ seq_printf(s, "epoch %d\n", client->monc.monmap->epoch);
+ for (i = 0; i < client->monc.monmap->num_mon; i++) {
+ struct ceph_entity_inst *inst =
+ &client->monc.monmap->mon_inst[i];
+
+ seq_printf(s, "\t%s%lld\t%s\n",
+ ENTITY_NAME(inst->name),
+ pr_addr(&inst->addr.in_addr));
+ }
+ return 0;
+}
+
+static int mdsmap_show(struct seq_file *s, void *p)
+{
+ int i;
+ struct ceph_client *client = s->private;
+
+ if (client->mdsc.mdsmap == NULL)
+ return 0;
+ seq_printf(s, "epoch %d\n", client->mdsc.mdsmap->m_epoch);
+ seq_printf(s, "root %d\n", client->mdsc.mdsmap->m_root);
+ seq_printf(s, "session_timeout %d\n",
+ client->mdsc.mdsmap->m_session_timeout);
+ seq_printf(s, "session_autoclose %d\n",
+ client->mdsc.mdsmap->m_session_autoclose);
+ for (i = 0; i < client->mdsc.mdsmap->m_max_mds; i++) {
+ struct ceph_entity_addr *addr =
+ &client->mdsc.mdsmap->m_info[i].addr;
+ int state = client->mdsc.mdsmap->m_info[i].state;
+
+ seq_printf(s, "\tmds%d\t%s\t(%s)\n", i, pr_addr(&addr->in_addr),
+ ceph_mds_state_name(state));
+ }
+ return 0;
+}
+
+static int osdmap_show(struct seq_file *s, void *p)
+{
+ int i;
+ struct ceph_client *client = s->private;
+
+ if (client->osdc.osdmap == NULL)
+ return 0;
+ seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch);
+ seq_printf(s, "flags%s%s\n",
+ (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ?
+ " NEARFULL" : "",
+ (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ?
+ " FULL" : "");
+ for (i = 0; i < client->osdc.osdmap->num_pools; i++) {
+ struct ceph_pg_pool_info *pool =
+ &client->osdc.osdmap->pg_pool[i];
+ seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
+ i, pool->v.pg_num, pool->pg_num_mask,
+ pool->v.lpg_num, pool->lpg_num_mask);
+ }
+ for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
+ struct ceph_entity_addr *addr =
+ &client->osdc.osdmap->osd_addr[i];
+ int state = client->osdc.osdmap->osd_state[i];
+ char sb[64];
+
+ seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n",
+ i, pr_addr(&addr->in_addr),
+ ((client->osdc.osdmap->osd_weight[i]*100) >> 16),
+ ceph_osdmap_state_str(sb, sizeof(sb), state));
+ }
+ return 0;
+}
+
+static int monc_show(struct seq_file *s, void *p)
+{
+ struct ceph_client *client = s->private;
+ struct ceph_mon_statfs_request *req;
+ u64 nexttid = 0;
+ int got;
+ struct ceph_mon_client *monc = &client->monc;
+
+ mutex_lock(&monc->mutex);
+
+ if (monc->have_mdsmap)
+ seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
+ if (monc->have_osdmap)
+ seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
+ if (monc->want_next_osdmap)
+ seq_printf(s, "want next osdmap\n");
+
+ while (nexttid < monc->last_tid) {
+ got = radix_tree_gang_lookup(&monc->statfs_request_tree,
+ (void **)&req, nexttid, 1);
+ if (got == 0)
+ break;
+ nexttid = req->tid + 1;
+
+ seq_printf(s, "%lld statfs\n", req->tid);
+ }
+ mutex_unlock(&monc->mutex);
+
+ return 0;
+}
+
+static int mdsc_show(struct seq_file *s, void *p)
+{
+ struct ceph_client *client = s->private;
+ struct ceph_mds_request *req;
+ u64 nexttid = 0;
+ int got;
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ int pathlen;
+ u64 pathbase;
+ char *path;
+
+ mutex_lock(&mdsc->mutex);
+ while (nexttid < mdsc->last_tid) {
+ got = radix_tree_gang_lookup(&mdsc->request_tree,
+ (void **)&req, nexttid, 1);
+ if (got == 0)
+ break;
+ nexttid = req->r_tid + 1;
+
+ if (req->r_request)
+ seq_printf(s, "%lld\tmds%d\t", req->r_tid, req->r_mds);
+ else
+ seq_printf(s, "%lld\t(no request)\t", req->r_tid);
+
+ seq_printf(s, "%s", ceph_mds_op_name(req->r_op));
+
+ if (req->r_got_unsafe)
+ seq_printf(s, "\t(unsafe)");
+ else
+ seq_printf(s, "\t");
+
+ if (req->r_inode) {
+ seq_printf(s, " #%llx", ceph_ino(req->r_inode));
+ } else if (req->r_dentry) {
+ path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
+ &pathbase, 0);
+ spin_lock(&req->r_dentry->d_lock);
+ seq_printf(s, " #%llx/%.*s (%s)",
+ ceph_ino(req->r_dentry->d_parent->d_inode),
+ req->r_dentry->d_name.len,
+ req->r_dentry->d_name.name,
+ path ? path : "");
+ spin_unlock(&req->r_dentry->d_lock);
+ kfree(path);
+ } else if (req->r_path1) {
+ seq_printf(s, " #%llx/%s", req->r_ino1.ino,
+ req->r_path1);
+ }
+
+ if (req->r_old_dentry) {
+ path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
+ &pathbase, 0);
+ spin_lock(&req->r_old_dentry->d_lock);
+ seq_printf(s, " #%llx/%.*s (%s)",
+ ceph_ino(req->r_old_dentry->d_parent->d_inode),
+ req->r_old_dentry->d_name.len,
+ req->r_old_dentry->d_name.name,
+ path ? path : "");
+ spin_unlock(&req->r_old_dentry->d_lock);
+ kfree(path);
+ } else if (req->r_path2) {
+ if (req->r_ino2.ino)
+ seq_printf(s, " #%llx/%s", req->r_ino2.ino,
+ req->r_path2);
+ else
+ seq_printf(s, " %s", req->r_path2);
+ }
+
+ seq_printf(s, "\n");
+ }
+ mutex_unlock(&mdsc->mutex);
+
+ return 0;
+}
+
+static int osdc_show(struct seq_file *s, void *pp)
+{
+ struct ceph_client *client = s->private;
+ struct ceph_osd_client *osdc = &client->osdc;
+ struct rb_node *p;
+
+ mutex_lock(&osdc->request_mutex);
+ for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
+ struct ceph_osd_request *req;
+ struct ceph_osd_request_head *head;
+ struct ceph_osd_op *op;
+ int num_ops;
+ int opcode, olen;
+ int i;
+
+ req = rb_entry(p, struct ceph_osd_request, r_node);
+
+ seq_printf(s, "%lld\tosd%d\t", req->r_tid,
+ req->r_osd ? req->r_osd->o_osd : -1);
+
+ head = req->r_request->front.iov_base;
+ op = (void *)(head + 1);
+
+ num_ops = le16_to_cpu(head->num_ops);
+ olen = le32_to_cpu(head->object_len);
+ seq_printf(s, "%.*s", olen,
+ (const char *)(head->ops + num_ops));
+
+ if (req->r_reassert_version.epoch)
+ seq_printf(s, "\t%u'%llu",
+ (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
+ le64_to_cpu(req->r_reassert_version.version));
+ else
+ seq_printf(s, "\t");
+
+ for (i = 0; i < num_ops; i++) {
+ opcode = le16_to_cpu(op->op);
+ seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
+ op++;
+ }
+
+ seq_printf(s, "\n");
+ }
+ mutex_unlock(&osdc->request_mutex);
+ return 0;
+}
+
+static int caps_show(struct seq_file *s, void *p)
+{
+ struct ceph_client *client = p;
+ int total, avail, used, reserved;
+
+ ceph_reservation_status(client, &total, &avail, &used, &reserved);
+ seq_printf(s, "total\t\t%d\n"
+ "avail\t\t%d\n"
+ "used\t\t%d\n"
+ "reserved\t%d\n",
+ total, avail, used, reserved);
+ return 0;
+}
+
+static int dentry_lru_show(struct seq_file *s, void *ptr)
+{
+ struct ceph_client *client = s->private;
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_dentry_info *di;
+
+ spin_lock(&mdsc->dentry_lru_lock);
+ list_for_each_entry(di, &mdsc->dentry_lru, lru) {
+ struct dentry *dentry = di->dentry;
+ seq_printf(s, "%p %p\t%.*s\n",
+ di, dentry, dentry->d_name.len, dentry->d_name.name);
+ }
+ spin_unlock(&mdsc->dentry_lru_lock);
+
+ return 0;
+}
+
+#define DEFINE_SHOW_FUNC(name) \
+static int name##_open(struct inode *inode, struct file *file) \
+{ \
+ struct seq_file *sf; \
+ int ret; \
+ \
+ ret = single_open(file, name, NULL); \
+ sf = file->private_data; \
+ sf->private = inode->i_private; \
+ return ret; \
+} \
+ \
+static const struct file_operations name##_fops = { \
+ .open = name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+};
+
+DEFINE_SHOW_FUNC(monmap_show)
+DEFINE_SHOW_FUNC(mdsmap_show)
+DEFINE_SHOW_FUNC(osdmap_show)
+DEFINE_SHOW_FUNC(monc_show)
+DEFINE_SHOW_FUNC(mdsc_show)
+DEFINE_SHOW_FUNC(osdc_show)
+DEFINE_SHOW_FUNC(dentry_lru_show)
+DEFINE_SHOW_FUNC(caps_show)
+
+int __init ceph_debugfs_init(void)
+{
+ ceph_debugfs_dir = debugfs_create_dir("ceph", NULL);
+ if (!ceph_debugfs_dir)
+ return -ENOMEM;
+ return 0;
+}
+
+void ceph_debugfs_cleanup(void)
+{
+ debugfs_remove(ceph_debugfs_dir);
+}
+
+int ceph_debugfs_client_init(struct ceph_client *client)
+{
+ int ret = 0;
+ char name[80];
+
+ snprintf(name, sizeof(name), FSID_FORMAT ".client%lld",
+ PR_FSID(&client->fsid), client->monc.auth->global_id);
+
+ client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
+ if (!client->debugfs_dir)
+ goto out;
+
+ client->monc.debugfs_file = debugfs_create_file("monc",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &monc_show_fops);
+ if (!client->monc.debugfs_file)
+ goto out;
+
+ client->mdsc.debugfs_file = debugfs_create_file("mdsc",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &mdsc_show_fops);
+ if (!client->mdsc.debugfs_file)
+ goto out;
+
+ client->osdc.debugfs_file = debugfs_create_file("osdc",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &osdc_show_fops);
+ if (!client->osdc.debugfs_file)
+ goto out;
+
+ client->debugfs_monmap = debugfs_create_file("monmap",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &monmap_show_fops);
+ if (!client->debugfs_monmap)
+ goto out;
+
+ client->debugfs_mdsmap = debugfs_create_file("mdsmap",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &mdsmap_show_fops);
+ if (!client->debugfs_mdsmap)
+ goto out;
+
+ client->debugfs_osdmap = debugfs_create_file("osdmap",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &osdmap_show_fops);
+ if (!client->debugfs_osdmap)
+ goto out;
+
+ client->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
+ 0600,
+ client->debugfs_dir,
+ client,
+ &dentry_lru_show_fops);
+ if (!client->debugfs_dentry_lru)
+ goto out;
+
+ client->debugfs_caps = debugfs_create_file("caps",
+ 0400,
+ client->debugfs_dir,
+ client,
+ &caps_show_fops);
+ if (!client->debugfs_caps)
+ goto out;
+
+ return 0;
+
+out:
+ ceph_debugfs_client_cleanup(client);
+ return ret;
+}
+
+void ceph_debugfs_client_cleanup(struct ceph_client *client)
+{
+ debugfs_remove(client->debugfs_caps);
+ debugfs_remove(client->debugfs_dentry_lru);
+ debugfs_remove(client->debugfs_osdmap);
+ debugfs_remove(client->debugfs_mdsmap);
+ debugfs_remove(client->debugfs_monmap);
+ debugfs_remove(client->osdc.debugfs_file);
+ debugfs_remove(client->mdsc.debugfs_file);
+ debugfs_remove(client->monc.debugfs_file);
+ debugfs_remove(client->debugfs_dir);
+}
+
+#else // CONFIG_DEBUG_FS
+
+int __init ceph_debugfs_init(void)
+{
+ return 0;
+}
+
+void ceph_debugfs_cleanup(void)
+{
+}
+
+int ceph_debugfs_client_init(struct ceph_client *client)
+{
+ return 0;
+}
+
+void ceph_debugfs_client_cleanup(struct ceph_client *client)
+{
+}
+
+#endif // CONFIG_DEBUG_FS
diff --git a/fs/ceph/decode.h b/fs/ceph/decode.h
new file mode 100644
index 000000000000..10de84896244
--- /dev/null
+++ b/fs/ceph/decode.h
@@ -0,0 +1,159 @@
+#ifndef __CEPH_DECODE_H
+#define __CEPH_DECODE_H
+
+#include <asm/unaligned.h>
+
+#include "types.h"
+
+/*
+ * in all cases,
+ * void **p pointer to position pointer
+ * void *end pointer to end of buffer (last byte + 1)
+ */
+
+static inline u64 ceph_decode_64(void **p)
+{
+ u64 v = get_unaligned_le64(*p);
+ *p += sizeof(u64);
+ return v;
+}
+static inline u32 ceph_decode_32(void **p)
+{
+ u32 v = get_unaligned_le32(*p);
+ *p += sizeof(u32);
+ return v;
+}
+static inline u16 ceph_decode_16(void **p)
+{
+ u16 v = get_unaligned_le16(*p);
+ *p += sizeof(u16);
+ return v;
+}
+static inline u8 ceph_decode_8(void **p)
+{
+ u8 v = *(u8 *)*p;
+ (*p)++;
+ return v;
+}
+static inline void ceph_decode_copy(void **p, void *pv, size_t n)
+{
+ memcpy(pv, *p, n);
+ *p += n;
+}
+
+/*
+ * bounds check input.
+ */
+#define ceph_decode_need(p, end, n, bad) \
+ do { \
+ if (unlikely(*(p) + (n) > (end))) \
+ goto bad; \
+ } while (0)
+
+#define ceph_decode_64_safe(p, end, v, bad) \
+ do { \
+ ceph_decode_need(p, end, sizeof(u64), bad); \
+ v = ceph_decode_64(p); \
+ } while (0)
+#define ceph_decode_32_safe(p, end, v, bad) \
+ do { \
+ ceph_decode_need(p, end, sizeof(u32), bad); \
+ v = ceph_decode_32(p); \
+ } while (0)
+#define ceph_decode_16_safe(p, end, v, bad) \
+ do { \
+ ceph_decode_need(p, end, sizeof(u16), bad); \
+ v = ceph_decode_16(p); \
+ } while (0)
+
+#define ceph_decode_copy_safe(p, end, pv, n, bad) \
+ do { \
+ ceph_decode_need(p, end, n, bad); \
+ ceph_decode_copy(p, pv, n); \
+ } while (0)
+
+/*
+ * struct ceph_timespec <-> struct timespec
+ */
+static inline void ceph_decode_timespec(struct timespec *ts,
+ const struct ceph_timespec *tv)
+{
+ ts->tv_sec = le32_to_cpu(tv->tv_sec);
+ ts->tv_nsec = le32_to_cpu(tv->tv_nsec);
+}
+static inline void ceph_encode_timespec(struct ceph_timespec *tv,
+ const struct timespec *ts)
+{
+ tv->tv_sec = cpu_to_le32(ts->tv_sec);
+ tv->tv_nsec = cpu_to_le32(ts->tv_nsec);
+}
+
+/*
+ * sockaddr_storage <-> ceph_sockaddr
+ */
+static inline void ceph_encode_addr(struct ceph_entity_addr *a)
+{
+ a->in_addr.ss_family = htons(a->in_addr.ss_family);
+}
+static inline void ceph_decode_addr(struct ceph_entity_addr *a)
+{
+ a->in_addr.ss_family = ntohs(a->in_addr.ss_family);
+ WARN_ON(a->in_addr.ss_family == 512);
+}
+
+/*
+ * encoders
+ */
+static inline void ceph_encode_64(void **p, u64 v)
+{
+ put_unaligned_le64(v, (__le64 *)*p);
+ *p += sizeof(u64);
+}
+static inline void ceph_encode_32(void **p, u32 v)
+{
+ put_unaligned_le32(v, (__le32 *)*p);
+ *p += sizeof(u32);
+}
+static inline void ceph_encode_16(void **p, u16 v)
+{
+ put_unaligned_le16(v, (__le16 *)*p);
+ *p += sizeof(u16);
+}
+static inline void ceph_encode_8(void **p, u8 v)
+{
+ *(u8 *)*p = v;
+ (*p)++;
+}
+static inline void ceph_encode_copy(void **p, const void *s, int len)
+{
+ memcpy(*p, s, len);
+ *p += len;
+}
+
+/*
+ * filepath, string encoders
+ */
+static inline void ceph_encode_filepath(void **p, void *end,
+ u64 ino, const char *path)
+{
+ u32 len = path ? strlen(path) : 0;
+ BUG_ON(*p + sizeof(ino) + sizeof(len) + len > end);
+ ceph_encode_64(p, ino);
+ ceph_encode_32(p, len);
+ if (len)
+ memcpy(*p, path, len);
+ *p += len;
+}
+
+static inline void ceph_encode_string(void **p, void *end,
+ const char *s, u32 len)
+{
+ BUG_ON(*p + sizeof(len) + len > end);
+ ceph_encode_32(p, len);
+ if (len)
+ memcpy(*p, s, len);
+ *p += len;
+}
+
+
+#endif
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
new file mode 100644
index 000000000000..89ce3ba4a614
--- /dev/null
+++ b/fs/ceph/dir.c
@@ -0,0 +1,1222 @@
+#include "ceph_debug.h"
+
+#include <linux/spinlock.h>
+#include <linux/fs_struct.h>
+#include <linux/namei.h>
+#include <linux/sched.h>
+
+#include "super.h"
+
+/*
+ * Directory operations: readdir, lookup, create, link, unlink,
+ * rename, etc.
+ */
+
+/*
+ * Ceph MDS operations are specified in terms of a base ino and
+ * relative path. Thus, the client can specify an operation on a
+ * specific inode (e.g., a getattr due to fstat(2)), or as a path
+ * relative to, say, the root directory.
+ *
+ * Normally, we limit ourselves to strict inode ops (no path component)
+ * or dentry operations (a single path component relative to an ino). The
+ * exception to this is open_root_dentry(), which will open the mount
+ * point by name.
+ */
+
+const struct inode_operations ceph_dir_iops;
+const struct file_operations ceph_dir_fops;
+struct dentry_operations ceph_dentry_ops;
+
+/*
+ * Initialize ceph dentry state.
+ */
+int ceph_init_dentry(struct dentry *dentry)
+{
+ struct ceph_dentry_info *di;
+
+ if (dentry->d_fsdata)
+ return 0;
+
+ if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
+ dentry->d_op = &ceph_dentry_ops;
+ else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
+ dentry->d_op = &ceph_snapdir_dentry_ops;
+ else
+ dentry->d_op = &ceph_snap_dentry_ops;
+
+ di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS);
+ if (!di)
+ return -ENOMEM; /* oh well */
+
+ spin_lock(&dentry->d_lock);
+ if (dentry->d_fsdata) /* lost a race */
+ goto out_unlock;
+ di->dentry = dentry;
+ di->lease_session = NULL;
+ dentry->d_fsdata = di;
+ dentry->d_time = jiffies;
+ ceph_dentry_lru_add(dentry);
+out_unlock:
+ spin_unlock(&dentry->d_lock);
+ return 0;
+}
+
+
+
+/*
+ * for readdir, we encode the directory frag and offset within that
+ * frag into f_pos.
+ */
+static unsigned fpos_frag(loff_t p)
+{
+ return p >> 32;
+}
+static unsigned fpos_off(loff_t p)
+{
+ return p & 0xffffffff;
+}
+
+/*
+ * When possible, we try to satisfy a readdir by peeking at the
+ * dcache. We make this work by carefully ordering dentries on
+ * d_u.d_child when we initially get results back from the MDS, and
+ * falling back to a "normal" sync readdir if any dentries in the dir
+ * are dropped.
+ *
+ * I_COMPLETE tells indicates we have all dentries in the dir. It is
+ * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
+ * the MDS if/when the directory is modified).
+ */
+static int __dcache_readdir(struct file *filp,
+ void *dirent, filldir_t filldir)
+{
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct ceph_file_info *fi = filp->private_data;
+ struct dentry *parent = filp->f_dentry;
+ struct inode *dir = parent->d_inode;
+ struct list_head *p;
+ struct dentry *dentry, *last;
+ struct ceph_dentry_info *di;
+ int err = 0;
+
+ /* claim ref on last dentry we returned */
+ last = fi->dentry;
+ fi->dentry = NULL;
+
+ dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
+ last);
+
+ spin_lock(&dcache_lock);
+
+ /* start at beginning? */
+ if (filp->f_pos == 2 || (last &&
+ filp->f_pos < ceph_dentry(last)->offset)) {
+ if (list_empty(&parent->d_subdirs))
+ goto out_unlock;
+ p = parent->d_subdirs.prev;
+ dout(" initial p %p/%p\n", p->prev, p->next);
+ } else {
+ p = last->d_u.d_child.prev;
+ }
+
+more:
+ dentry = list_entry(p, struct dentry, d_u.d_child);
+ di = ceph_dentry(dentry);
+ while (1) {
+ dout(" p %p/%p d_subdirs %p/%p\n", p->prev, p->next,
+ parent->d_subdirs.prev, parent->d_subdirs.next);
+ if (p == &parent->d_subdirs) {
+ fi->at_end = 1;
+ goto out_unlock;
+ }
+ if (!d_unhashed(dentry) && dentry->d_inode &&
+ ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
+ ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
+ filp->f_pos <= di->offset)
+ break;
+ dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
+ dentry->d_name.len, dentry->d_name.name, di->offset,
+ filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
+ !dentry->d_inode ? " null" : "");
+ p = p->prev;
+ dentry = list_entry(p, struct dentry, d_u.d_child);
+ di = ceph_dentry(dentry);
+ }
+
+ atomic_inc(&dentry->d_count);
+ spin_unlock(&dcache_lock);
+ spin_unlock(&inode->i_lock);
+
+ dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
+ dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
+ filp->f_pos = di->offset;
+ err = filldir(dirent, dentry->d_name.name,
+ dentry->d_name.len, di->offset,
+ dentry->d_inode->i_ino,
+ dentry->d_inode->i_mode >> 12);
+
+ if (last) {
+ if (err < 0) {
+ /* remember our position */
+ fi->dentry = last;
+ fi->next_offset = di->offset;
+ } else {
+ dput(last);
+ }
+ last = NULL;
+ }
+
+ spin_lock(&inode->i_lock);
+ spin_lock(&dcache_lock);
+
+ if (err < 0)
+ goto out_unlock;
+
+ last = dentry;
+
+ p = p->prev;
+ filp->f_pos++;
+
+ /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
+ if ((ceph_inode(dir)->i_ceph_flags & CEPH_I_COMPLETE))
+ goto more;
+ dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
+ err = -EAGAIN;
+
+out_unlock:
+ spin_unlock(&dcache_lock);
+
+ if (last) {
+ spin_unlock(&inode->i_lock);
+ dput(last);
+ spin_lock(&inode->i_lock);
+ }
+
+ return err;
+}
+
+/*
+ * make note of the last dentry we read, so we can
+ * continue at the same lexicographical point,
+ * regardless of what dir changes take place on the
+ * server.
+ */
+static int note_last_dentry(struct ceph_file_info *fi, const char *name,
+ int len)
+{
+ kfree(fi->last_name);
+ fi->last_name = kmalloc(len+1, GFP_NOFS);
+ if (!fi->last_name)
+ return -ENOMEM;
+ memcpy(fi->last_name, name, len);
+ fi->last_name[len] = 0;
+ dout("note_last_dentry '%s'\n", fi->last_name);
+ return 0;
+}
+
+static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+ struct ceph_file_info *fi = filp->private_data;
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *client = ceph_inode_to_client(inode);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ unsigned frag = fpos_frag(filp->f_pos);
+ int off = fpos_off(filp->f_pos);
+ int err;
+ u32 ftype;
+ struct ceph_mds_reply_info_parsed *rinfo;
+ const int max_entries = client->mount_args->max_readdir;
+
+ dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
+ if (fi->at_end)
+ return 0;
+
+ /* always start with . and .. */
+ if (filp->f_pos == 0) {
+ /* note dir version at start of readdir so we can tell
+ * if any dentries get dropped */
+ fi->dir_release_count = ci->i_release_count;
+
+ dout("readdir off 0 -> '.'\n");
+ if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
+ inode->i_ino, inode->i_mode >> 12) < 0)
+ return 0;
+ filp->f_pos = 1;
+ off = 1;
+ }
+ if (filp->f_pos == 1) {
+ dout("readdir off 1 -> '..'\n");
+ if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
+ filp->f_dentry->d_parent->d_inode->i_ino,
+ inode->i_mode >> 12) < 0)
+ return 0;
+ filp->f_pos = 2;
+ off = 2;
+ }
+
+ /* can we use the dcache? */
+ spin_lock(&inode->i_lock);
+ if ((filp->f_pos == 2 || fi->dentry) &&
+ !ceph_test_opt(client, NOASYNCREADDIR) &&
+ (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
+ __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
+ err = __dcache_readdir(filp, dirent, filldir);
+ if (err != -EAGAIN) {
+ spin_unlock(&inode->i_lock);
+ return err;
+ }
+ }
+ spin_unlock(&inode->i_lock);
+ if (fi->dentry) {
+ err = note_last_dentry(fi, fi->dentry->d_name.name,
+ fi->dentry->d_name.len);
+ if (err)
+ return err;
+ dput(fi->dentry);
+ fi->dentry = NULL;
+ }
+
+ /* proceed with a normal readdir */
+
+more:
+ /* do we have the correct frag content buffered? */
+ if (fi->frag != frag || fi->last_readdir == NULL) {
+ struct ceph_mds_request *req;
+ int op = ceph_snap(inode) == CEPH_SNAPDIR ?
+ CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
+
+ /* discard old result, if any */
+ if (fi->last_readdir)
+ ceph_mdsc_put_request(fi->last_readdir);
+
+ /* requery frag tree, as the frag topology may have changed */
+ frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
+
+ dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
+ ceph_vinop(inode), frag, fi->last_name);
+ req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->r_inode = igrab(inode);
+ req->r_dentry = dget(filp->f_dentry);
+ /* hints to request -> mds selection code */
+ req->r_direct_mode = USE_AUTH_MDS;
+ req->r_direct_hash = ceph_frag_value(frag);
+ req->r_direct_is_hash = true;
+ req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
+ req->r_readdir_offset = fi->next_offset;
+ req->r_args.readdir.frag = cpu_to_le32(frag);
+ req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
+ req->r_num_caps = max_entries;
+ err = ceph_mdsc_do_request(mdsc, NULL, req);
+ if (err < 0) {
+ ceph_mdsc_put_request(req);
+ return err;
+ }
+ dout("readdir got and parsed readdir result=%d"
+ " on frag %x, end=%d, complete=%d\n", err, frag,
+ (int)req->r_reply_info.dir_end,
+ (int)req->r_reply_info.dir_complete);
+
+ if (!req->r_did_prepopulate) {
+ dout("readdir !did_prepopulate");
+ fi->dir_release_count--; /* preclude I_COMPLETE */
+ }
+
+ /* note next offset and last dentry name */
+ fi->offset = fi->next_offset;
+ fi->last_readdir = req;
+
+ if (req->r_reply_info.dir_end) {
+ kfree(fi->last_name);
+ fi->last_name = NULL;
+ fi->next_offset = 0;
+ } else {
+ rinfo = &req->r_reply_info;
+ err = note_last_dentry(fi,
+ rinfo->dir_dname[rinfo->dir_nr-1],
+ rinfo->dir_dname_len[rinfo->dir_nr-1]);
+ if (err)
+ return err;
+ fi->next_offset += rinfo->dir_nr;
+ }
+ }
+
+ rinfo = &fi->last_readdir->r_reply_info;
+ dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
+ rinfo->dir_nr, off, fi->offset);
+ while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
+ u64 pos = ceph_make_fpos(frag, off);
+ struct ceph_mds_reply_inode *in =
+ rinfo->dir_in[off - fi->offset].in;
+ dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
+ off, off - fi->offset, rinfo->dir_nr, pos,
+ rinfo->dir_dname_len[off - fi->offset],
+ rinfo->dir_dname[off - fi->offset], in);
+ BUG_ON(!in);
+ ftype = le32_to_cpu(in->mode) >> 12;
+ if (filldir(dirent,
+ rinfo->dir_dname[off - fi->offset],
+ rinfo->dir_dname_len[off - fi->offset],
+ pos,
+ le64_to_cpu(in->ino),
+ ftype) < 0) {
+ dout("filldir stopping us...\n");
+ return 0;
+ }
+ off++;
+ filp->f_pos = pos + 1;
+ }
+
+ if (fi->last_name) {
+ ceph_mdsc_put_request(fi->last_readdir);
+ fi->last_readdir = NULL;
+ goto more;
+ }
+
+ /* more frags? */
+ if (!ceph_frag_is_rightmost(frag)) {
+ frag = ceph_frag_next(frag);
+ off = 0;
+ filp->f_pos = ceph_make_fpos(frag, off);
+ dout("readdir next frag is %x\n", frag);
+ goto more;
+ }
+ fi->at_end = 1;
+
+ /*
+ * if dir_release_count still matches the dir, no dentries
+ * were released during the whole readdir, and we should have
+ * the complete dir contents in our cache.
+ */
+ spin_lock(&inode->i_lock);
+ if (ci->i_release_count == fi->dir_release_count) {
+ dout(" marking %p complete\n", inode);
+ ci->i_ceph_flags |= CEPH_I_COMPLETE;
+ ci->i_max_offset = filp->f_pos;
+ }
+ spin_unlock(&inode->i_lock);
+
+ dout("readdir %p filp %p done.\n", inode, filp);
+ return 0;
+}
+
+static void reset_readdir(struct ceph_file_info *fi)
+{
+ if (fi->last_readdir) {
+ ceph_mdsc_put_request(fi->last_readdir);
+ fi->last_readdir = NULL;
+ }
+ kfree(fi->last_name);
+ fi->next_offset = 2; /* compensate for . and .. */
+ if (fi->dentry) {
+ dput(fi->dentry);
+ fi->dentry = NULL;
+ }
+ fi->at_end = 0;
+}
+
+static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct ceph_file_info *fi = file->private_data;
+ struct inode *inode = file->f_mapping->host;
+ loff_t old_offset = offset;
+ loff_t retval;
+
+ mutex_lock(&inode->i_mutex);
+ switch (origin) {
+ case SEEK_END:
+ offset += inode->i_size + 2; /* FIXME */
+ break;
+ case SEEK_CUR:
+ offset += file->f_pos;
+ }
+ retval = -EINVAL;
+ if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ fi->at_end = 0;
+ }
+ retval = offset;
+
+ /*
+ * discard buffered readdir content on seekdir(0), or
+ * seek to new frag, or seek prior to current chunk.
+ */
+ if (offset == 0 ||
+ fpos_frag(offset) != fpos_frag(old_offset) ||
+ fpos_off(offset) < fi->offset) {
+ dout("dir_llseek dropping %p content\n", file);
+ reset_readdir(fi);
+ }
+
+ /* bump dir_release_count if we did a forward seek */
+ if (offset > old_offset)
+ fi->dir_release_count--;
+ }
+ mutex_unlock(&inode->i_mutex);
+ return retval;
+}
+
+/*
+ * Process result of a lookup/open request.
+ *
+ * Mainly, make sure we return the final req->r_dentry (if it already
+ * existed) in place of the original VFS-provided dentry when they
+ * differ.
+ *
+ * Gracefully handle the case where the MDS replies with -ENOENT and
+ * no trace (which it may do, at its discretion, e.g., if it doesn't
+ * care to issue a lease on the negative dentry).
+ */
+struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
+ struct dentry *dentry, int err)
+{
+ struct ceph_client *client = ceph_client(dentry->d_sb);
+ struct inode *parent = dentry->d_parent->d_inode;
+
+ /* .snap dir? */
+ if (err == -ENOENT &&
+ ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */
+ strcmp(dentry->d_name.name,
+ client->mount_args->snapdir_name) == 0) {
+ struct inode *inode = ceph_get_snapdir(parent);
+ dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
+ dentry, dentry->d_name.len, dentry->d_name.name, inode);
+ d_add(dentry, inode);
+ err = 0;
+ }
+
+ if (err == -ENOENT) {
+ /* no trace? */
+ err = 0;
+ if (!req->r_reply_info.head->is_dentry) {
+ dout("ENOENT and no trace, dentry %p inode %p\n",
+ dentry, dentry->d_inode);
+ if (dentry->d_inode) {
+ d_drop(dentry);
+ err = -ENOENT;
+ } else {
+ d_add(dentry, NULL);
+ }
+ }
+ }
+ if (err)
+ dentry = ERR_PTR(err);
+ else if (dentry != req->r_dentry)
+ dentry = dget(req->r_dentry); /* we got spliced */
+ else
+ dentry = NULL;
+ return dentry;
+}
+
+static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
+{
+ return ceph_ino(inode) == CEPH_INO_ROOT &&
+ strncmp(dentry->d_name.name, ".ceph", 5) == 0;
+}
+
+/*
+ * Look up a single dir entry. If there is a lookup intent, inform
+ * the MDS so that it gets our 'caps wanted' value in a single op.
+ */
+static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd)
+{
+ struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_request *req;
+ int op;
+ int err;
+
+ dout("lookup %p dentry %p '%.*s'\n",
+ dir, dentry, dentry->d_name.len, dentry->d_name.name);
+
+ if (dentry->d_name.len > NAME_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ err = ceph_init_dentry(dentry);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ /* open (but not create!) intent? */
+ if (nd &&
+ (nd->flags & LOOKUP_OPEN) &&
+ (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */
+ !(nd->intent.open.flags & O_CREAT)) {
+ int mode = nd->intent.open.create_mode & ~current->fs->umask;
+ return ceph_lookup_open(dir, dentry, nd, mode, 1);
+ }
+
+ /* can we conclude ENOENT locally? */
+ if (dentry->d_inode == NULL) {
+ struct ceph_inode_info *ci = ceph_inode(dir);
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+
+ spin_lock(&dir->i_lock);
+ dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
+ if (strncmp(dentry->d_name.name,
+ client->mount_args->snapdir_name,
+ dentry->d_name.len) &&
+ !is_root_ceph_dentry(dir, dentry) &&
+ (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
+ (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
+ di->offset = ci->i_max_offset++;
+ spin_unlock(&dir->i_lock);
+ dout(" dir %p complete, -ENOENT\n", dir);
+ d_add(dentry, NULL);
+ di->lease_shared_gen = ci->i_shared_gen;
+ return NULL;
+ }
+ spin_unlock(&dir->i_lock);
+ }
+
+ op = ceph_snap(dir) == CEPH_SNAPDIR ?
+ CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
+ req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
+ if (IS_ERR(req))
+ return ERR_PTR(PTR_ERR(req));
+ req->r_dentry = dget(dentry);
+ req->r_num_caps = 2;
+ /* we only need inode linkage */
+ req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
+ req->r_locked_dir = dir;
+ err = ceph_mdsc_do_request(mdsc, NULL, req);
+ dentry = ceph_finish_lookup(req, dentry, err);
+ ceph_mdsc_put_request(req); /* will dput(dentry) */
+ dout("lookup result=%p\n", dentry);
+ return dentry;
+}
+
+/*
+ * If we do a create but get no trace back from the MDS, follow up with
+ * a lookup (the VFS expects us to link up the provided dentry).
+ */
+int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
+{
+ struct dentry *result = ceph_lookup(dir, dentry, NULL);
+
+ if (result && !IS_ERR(result)) {
+ /*
+ * We created the item, then did a lookup, and found
+ * it was already linked to another inode we already
+ * had in our cache (and thus got spliced). Link our
+ * dentry to that inode, but don't hash it, just in
+ * case the VFS wants to dereference it.
+ */
+ BUG_ON(!result->d_inode);
+ d_instantiate(dentry, result->d_inode);
+ return 0;
+ }
+ return PTR_ERR(result);
+}
+
+static int ceph_mknod(struct inode *dir, struct dentry *dentry,
+ int mode, dev_t rdev)
+{
+ struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_request *req;
+ int err;
+
+ if (ceph_snap(dir) != CEPH_NOSNAP)
+ return -EROFS;
+
+ dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
+ dir, dentry, mode, rdev);
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
+ if (IS_ERR(req)) {
+ d_drop(dentry);
+ return PTR_ERR(req);
+ }
+ req->r_dentry = dget(dentry);
+ req->r_num_caps = 2;
+ req->r_locked_dir = dir;
+ req->r_args.mknod.mode = cpu_to_le32(mode);
+ req->r_args.mknod.rdev = cpu_to_le32(rdev);
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
+ err = ceph_mdsc_do_request(mdsc, dir, req);
+ if (!err && !req->r_reply_info.head->is_dentry)
+ err = ceph_handle_notrace_create(dir, dentry);
+ ceph_mdsc_put_request(req);
+ if (err)
+ d_drop(dentry);
+ return err;
+}
+
+static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *nd)
+{
+ dout("create in dir %p dentry %p name '%.*s'\n",
+ dir, dentry, dentry->d_name.len, dentry->d_name.name);
+
+ if (ceph_snap(dir) != CEPH_NOSNAP)
+ return -EROFS;
+
+ if (nd) {
+ BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
+ dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
+ /* hrm, what should i do here if we get aliased? */
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ return 0;
+ }
+
+ /* fall back to mknod */
+ return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
+}
+
+static int ceph_symlink(struct inode *dir, struct dentry *dentry,
+ const char *dest)
+{
+ struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_request *req;
+ int err;
+
+ if (ceph_snap(dir) != CEPH_NOSNAP)
+ return -EROFS;
+
+ dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
+ if (IS_ERR(req)) {
+ d_drop(dentry);
+ return PTR_ERR(req);
+ }
+ req->r_dentry = dget(dentry);
+ req->r_num_caps = 2;
+ req->r_path2 = kstrdup(dest, GFP_NOFS);
+ req->r_locked_dir = dir;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
+ err = ceph_mdsc_do_request(mdsc, dir, req);
+ if (!err && !req->r_reply_info.head->is_dentry)
+ err = ceph_handle_notrace_create(dir, dentry);
+ ceph_mdsc_put_request(req);
+ if (err)
+ d_drop(dentry);
+ return err;
+}
+
+static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_request *req;
+ int err = -EROFS;
+ int op;
+
+ if (ceph_snap(dir) == CEPH_SNAPDIR) {
+ /* mkdir .snap/foo is a MKSNAP */
+ op = CEPH_MDS_OP_MKSNAP;
+ dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
+ dentry->d_name.len, dentry->d_name.name, dentry);
+ } else if (ceph_snap(dir) == CEPH_NOSNAP) {
+ dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
+ op = CEPH_MDS_OP_MKDIR;
+ } else {
+ goto out;
+ }
+ req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out;
+ }
+
+ req->r_dentry = dget(dentry);
+ req->r_num_caps = 2;
+ req->r_locked_dir = dir;
+ req->r_args.mkdir.mode = cpu_to_le32(mode);
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
+ err = ceph_mdsc_do_request(mdsc, dir, req);
+ if (!err && !req->r_reply_info.head->is_dentry)
+ err = ceph_handle_notrace_create(dir, dentry);
+ ceph_mdsc_put_request(req);
+out:
+ if (err < 0)
+ d_drop(dentry);
+ return err;
+}
+
+static int ceph_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_request *req;
+ int err;
+
+ if (ceph_snap(dir) != CEPH_NOSNAP)
+ return -EROFS;
+
+ dout("link in dir %p old_dentry %p dentry %p\n", dir,
+ old_dentry, dentry);
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
+ if (IS_ERR(req)) {
+ d_drop(dentry);
+ return PTR_ERR(req);
+ }
+ req->r_dentry = dget(dentry);
+ req->r_num_caps = 2;
+ req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
+ req->r_locked_dir = dir;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
+ err = ceph_mdsc_do_request(mdsc, dir, req);
+ if (err)
+ d_drop(dentry);
+ else if (!req->r_reply_info.head->is_dentry)
+ d_instantiate(dentry, igrab(old_dentry->d_inode));
+ ceph_mdsc_put_request(req);
+ return err;
+}
+
+/*
+ * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
+ * looks like the link count will hit 0, drop any other caps (other
+ * than PIN) we don't specifically want (due to the file still being
+ * open).
+ */
+static int drop_caps_for_unlink(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
+
+ spin_lock(&inode->i_lock);
+ if (inode->i_nlink == 1) {
+ drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
+ ci->i_ceph_flags |= CEPH_I_NODELAY;
+ }
+ spin_unlock(&inode->i_lock);
+ return drop;
+}
+
+/*
+ * rmdir and unlink are differ only by the metadata op code
+ */
+static int ceph_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct inode *inode = dentry->d_inode;
+ struct ceph_mds_request *req;
+ int err = -EROFS;
+ int op;
+
+ if (ceph_snap(dir) == CEPH_SNAPDIR) {
+ /* rmdir .snap/foo is RMSNAP */
+ dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
+ dentry->d_name.name, dentry);
+ op = CEPH_MDS_OP_RMSNAP;
+ } else if (ceph_snap(dir) == CEPH_NOSNAP) {
+ dout("unlink/rmdir dir %p dn %p inode %p\n",
+ dir, dentry, inode);
+ op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
+ CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
+ } else
+ goto out;
+ req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out;
+ }
+ req->r_dentry = dget(dentry);
+ req->r_num_caps = 2;
+ req->r_locked_dir = dir;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
+ req->r_inode_drop = drop_caps_for_unlink(inode);
+ err = ceph_mdsc_do_request(mdsc, dir, req);
+ if (!err && !req->r_reply_info.head->is_dentry)
+ d_delete(dentry);
+ ceph_mdsc_put_request(req);
+out:
+ return err;
+}
+
+static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_request *req;
+ int err;
+
+ if (ceph_snap(old_dir) != ceph_snap(new_dir))
+ return -EXDEV;
+ if (ceph_snap(old_dir) != CEPH_NOSNAP ||
+ ceph_snap(new_dir) != CEPH_NOSNAP)
+ return -EROFS;
+ dout("rename dir %p dentry %p to dir %p dentry %p\n",
+ old_dir, old_dentry, new_dir, new_dentry);
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->r_dentry = dget(new_dentry);
+ req->r_num_caps = 2;
+ req->r_old_dentry = dget(old_dentry);
+ req->r_locked_dir = new_dir;
+ req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
+ /* release LINK_RDCACHE on source inode (mds will lock it) */
+ req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
+ if (new_dentry->d_inode)
+ req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
+ err = ceph_mdsc_do_request(mdsc, old_dir, req);
+ if (!err && !req->r_reply_info.head->is_dentry) {
+ /*
+ * Normally d_move() is done by fill_trace (called by
+ * do_request, above). If there is no trace, we need
+ * to do it here.
+ */
+ d_move(old_dentry, new_dentry);
+ }
+ ceph_mdsc_put_request(req);
+ return err;
+}
+
+
+/*
+ * Check if dentry lease is valid. If not, delete the lease. Try to
+ * renew if the least is more than half up.
+ */
+static int dentry_lease_is_valid(struct dentry *dentry)
+{
+ struct ceph_dentry_info *di;
+ struct ceph_mds_session *s;
+ int valid = 0;
+ u32 gen;
+ unsigned long ttl;
+ struct ceph_mds_session *session = NULL;
+ struct inode *dir = NULL;
+ u32 seq = 0;
+
+ spin_lock(&dentry->d_lock);
+ di = ceph_dentry(dentry);
+ if (di && di->lease_session) {
+ s = di->lease_session;
+ spin_lock(&s->s_cap_lock);
+ gen = s->s_cap_gen;
+ ttl = s->s_cap_ttl;
+ spin_unlock(&s->s_cap_lock);
+
+ if (di->lease_gen == gen &&
+ time_before(jiffies, dentry->d_time) &&
+ time_before(jiffies, ttl)) {
+ valid = 1;
+ if (di->lease_renew_after &&
+ time_after(jiffies, di->lease_renew_after)) {
+ /* we should renew */
+ dir = dentry->d_parent->d_inode;
+ session = ceph_get_mds_session(s);
+ seq = di->lease_seq;
+ di->lease_renew_after = 0;
+ di->lease_renew_from = jiffies;
+ }
+ } else {
+ __ceph_mdsc_drop_dentry_lease(dentry);
+ }
+ }
+ spin_unlock(&dentry->d_lock);
+
+ if (session) {
+ ceph_mdsc_lease_send_msg(session, dir, dentry,
+ CEPH_MDS_LEASE_RENEW, seq);
+ ceph_put_mds_session(session);
+ }
+ dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
+ return valid;
+}
+
+/*
+ * Check if directory-wide content lease/cap is valid.
+ */
+static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
+{
+ struct ceph_inode_info *ci = ceph_inode(dir);
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+ int valid = 0;
+
+ spin_lock(&dir->i_lock);
+ if (ci->i_shared_gen == di->lease_shared_gen)
+ valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
+ spin_unlock(&dir->i_lock);
+ dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
+ dir, (unsigned)ci->i_shared_gen, dentry,
+ (unsigned)di->lease_shared_gen, valid);
+ return valid;
+}
+
+/*
+ * Check if cached dentry can be trusted.
+ */
+static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ struct inode *dir = dentry->d_parent->d_inode;
+
+ dout("d_revalidate %p '%.*s' inode %p\n", dentry,
+ dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
+
+ /* always trust cached snapped dentries, snapdir dentry */
+ if (ceph_snap(dir) != CEPH_NOSNAP) {
+ dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
+ dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
+ goto out_touch;
+ }
+ if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
+ goto out_touch;
+
+ if (dentry_lease_is_valid(dentry) ||
+ dir_lease_is_valid(dir, dentry))
+ goto out_touch;
+
+ dout("d_revalidate %p invalid\n", dentry);
+ d_drop(dentry);
+ return 0;
+out_touch:
+ ceph_dentry_lru_touch(dentry);
+ return 1;
+}
+
+/*
+ * When a dentry is released, clear the dir I_COMPLETE if it was part
+ * of the current dir gen.
+ */
+static void ceph_dentry_release(struct dentry *dentry)
+{
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+ struct inode *parent_inode = dentry->d_parent->d_inode;
+
+ if (parent_inode) {
+ struct ceph_inode_info *ci = ceph_inode(parent_inode);
+
+ spin_lock(&parent_inode->i_lock);
+ if (ci->i_shared_gen == di->lease_shared_gen) {
+ dout(" clearing %p complete (d_release)\n",
+ parent_inode);
+ ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
+ ci->i_release_count++;
+ }
+ spin_unlock(&parent_inode->i_lock);
+ }
+ if (di) {
+ ceph_dentry_lru_del(dentry);
+ if (di->lease_session)
+ ceph_put_mds_session(di->lease_session);
+ kmem_cache_free(ceph_dentry_cachep, di);
+ dentry->d_fsdata = NULL;
+ }
+}
+
+static int ceph_snapdir_d_revalidate(struct dentry *dentry,
+ struct nameidata *nd)
+{
+ /*
+ * Eventually, we'll want to revalidate snapped metadata
+ * too... probably...
+ */
+ return 1;
+}
+
+
+
+/*
+ * read() on a dir. This weird interface hack only works if mounted
+ * with '-o dirstat'.
+ */
+static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
+ loff_t *ppos)
+{
+ struct ceph_file_info *cf = file->private_data;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int left;
+
+ if (!ceph_test_opt(ceph_client(inode->i_sb), DIRSTAT))
+ return -EISDIR;
+
+ if (!cf->dir_info) {
+ cf->dir_info = kmalloc(1024, GFP_NOFS);
+ if (!cf->dir_info)
+ return -ENOMEM;
+ cf->dir_info_len =
+ sprintf(cf->dir_info,
+ "entries: %20lld\n"
+ " files: %20lld\n"
+ " subdirs: %20lld\n"
+ "rentries: %20lld\n"
+ " rfiles: %20lld\n"
+ " rsubdirs: %20lld\n"
+ "rbytes: %20lld\n"
+ "rctime: %10ld.%09ld\n",
+ ci->i_files + ci->i_subdirs,
+ ci->i_files,
+ ci->i_subdirs,
+ ci->i_rfiles + ci->i_rsubdirs,
+ ci->i_rfiles,
+ ci->i_rsubdirs,
+ ci->i_rbytes,
+ (long)ci->i_rctime.tv_sec,
+ (long)ci->i_rctime.tv_nsec);
+ }
+
+ if (*ppos >= cf->dir_info_len)
+ return 0;
+ size = min_t(unsigned, size, cf->dir_info_len-*ppos);
+ left = copy_to_user(buf, cf->dir_info + *ppos, size);
+ if (left == size)
+ return -EFAULT;
+ *ppos += (size - left);
+ return size - left;
+}
+
+/*
+ * an fsync() on a dir will wait for any uncommitted directory
+ * operations to commit.
+ */
+static int ceph_dir_fsync(struct file *file, struct dentry *dentry,
+ int datasync)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct list_head *head = &ci->i_unsafe_dirops;
+ struct ceph_mds_request *req;
+ u64 last_tid;
+ int ret = 0;
+
+ dout("dir_fsync %p\n", inode);
+ spin_lock(&ci->i_unsafe_lock);
+ if (list_empty(head))
+ goto out;
+
+ req = list_entry(head->prev,
+ struct ceph_mds_request, r_unsafe_dir_item);
+ last_tid = req->r_tid;
+
+ do {
+ ceph_mdsc_get_request(req);
+ spin_unlock(&ci->i_unsafe_lock);
+ dout("dir_fsync %p wait on tid %llu (until %llu)\n",
+ inode, req->r_tid, last_tid);
+ if (req->r_timeout) {
+ ret = wait_for_completion_timeout(
+ &req->r_safe_completion, req->r_timeout);
+ if (ret > 0)
+ ret = 0;
+ else if (ret == 0)
+ ret = -EIO; /* timed out */
+ } else {
+ wait_for_completion(&req->r_safe_completion);
+ }
+ spin_lock(&ci->i_unsafe_lock);
+ ceph_mdsc_put_request(req);
+
+ if (ret || list_empty(head))
+ break;
+ req = list_entry(head->next,
+ struct ceph_mds_request, r_unsafe_dir_item);
+ } while (req->r_tid < last_tid);
+out:
+ spin_unlock(&ci->i_unsafe_lock);
+ return ret;
+}
+
+/*
+ * We maintain a private dentry LRU.
+ *
+ * FIXME: this needs to be changed to a per-mds lru to be useful.
+ */
+void ceph_dentry_lru_add(struct dentry *dn)
+{
+ struct ceph_dentry_info *di = ceph_dentry(dn);
+ struct ceph_mds_client *mdsc;
+ dout("dentry_lru_add %p %p\t%.*s\n",
+ di, dn, dn->d_name.len, dn->d_name.name);
+
+ if (di) {
+ mdsc = &ceph_client(dn->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_lru_lock);
+ list_add_tail(&di->lru, &mdsc->dentry_lru);
+ mdsc->num_dentry++;
+ spin_unlock(&mdsc->dentry_lru_lock);
+ }
+}
+
+void ceph_dentry_lru_touch(struct dentry *dn)
+{
+ struct ceph_dentry_info *di = ceph_dentry(dn);
+ struct ceph_mds_client *mdsc;
+ dout("dentry_lru_touch %p %p\t%.*s\n",
+ di, dn, dn->d_name.len, dn->d_name.name);
+
+ if (di) {
+ mdsc = &ceph_client(dn->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_lru_lock);
+ list_move_tail(&di->lru, &mdsc->dentry_lru);
+ spin_unlock(&mdsc->dentry_lru_lock);
+ }
+}
+
+void ceph_dentry_lru_del(struct dentry *dn)
+{
+ struct ceph_dentry_info *di = ceph_dentry(dn);
+ struct ceph_mds_client *mdsc;
+
+ dout("dentry_lru_del %p %p\t%.*s\n",
+ di, dn, dn->d_name.len, dn->d_name.name);
+ if (di) {
+ mdsc = &ceph_client(dn->d_sb)->mdsc;
+ spin_lock(&mdsc->dentry_lru_lock);
+ list_del_init(&di->lru);
+ mdsc->num_dentry--;
+ spin_unlock(&mdsc->dentry_lru_lock);
+ }
+}
+
+const struct file_operations ceph_dir_fops = {
+ .read = ceph_read_dir,
+ .readdir = ceph_readdir,
+ .llseek = ceph_dir_llseek,
+ .open = ceph_open,
+ .release = ceph_release,
+ .unlocked_ioctl = ceph_ioctl,
+ .fsync = ceph_dir_fsync,
+};
+
+const struct inode_operations ceph_dir_iops = {
+ .lookup = ceph_lookup,
+ .permission = ceph_permission,
+ .getattr = ceph_getattr,
+ .setattr = ceph_setattr,
+ .setxattr = ceph_setxattr,
+ .getxattr = ceph_getxattr,
+ .listxattr = ceph_listxattr,
+ .removexattr = ceph_removexattr,
+ .mknod = ceph_mknod,
+ .symlink = ceph_symlink,
+ .mkdir = ceph_mkdir,
+ .link = ceph_link,
+ .unlink = ceph_unlink,
+ .rmdir = ceph_unlink,
+ .rename = ceph_rename,
+ .create = ceph_create,
+};
+
+struct dentry_operations ceph_dentry_ops = {
+ .d_revalidate = ceph_d_revalidate,
+ .d_release = ceph_dentry_release,
+};
+
+struct dentry_operations ceph_snapdir_dentry_ops = {
+ .d_revalidate = ceph_snapdir_d_revalidate,
+};
+
+struct dentry_operations ceph_snap_dentry_ops = {
+};
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
new file mode 100644
index 000000000000..fc68e39cbad6
--- /dev/null
+++ b/fs/ceph/export.c
@@ -0,0 +1,223 @@
+#include "ceph_debug.h"
+
+#include <linux/exportfs.h>
+#include <asm/unaligned.h>
+
+#include "super.h"
+
+/*
+ * NFS export support
+ *
+ * NFS re-export of a ceph mount is, at present, only semireliable.
+ * The basic issue is that the Ceph architectures doesn't lend itself
+ * well to generating filehandles that will remain valid forever.
+ *
+ * So, we do our best. If you're lucky, your inode will be in the
+ * client's cache. If it's not, and you have a connectable fh, then
+ * the MDS server may be able to find it for you. Otherwise, you get
+ * ESTALE.
+ *
+ * There are ways to this more reliable, but in the non-connectable fh
+ * case, we won't every work perfectly, and in the connectable case,
+ * some changes are needed on the MDS side to work better.
+ */
+
+/*
+ * Basic fh
+ */
+struct ceph_nfs_fh {
+ u64 ino;
+} __attribute__ ((packed));
+
+/*
+ * Larger 'connectable' fh that includes parent ino and name hash.
+ * Use this whenever possible, as it works more reliably.
+ */
+struct ceph_nfs_confh {
+ u64 ino, parent_ino;
+ u32 parent_name_hash;
+} __attribute__ ((packed));
+
+static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
+ int connectable)
+{
+ struct ceph_nfs_fh *fh = (void *)rawfh;
+ struct ceph_nfs_confh *cfh = (void *)rawfh;
+ struct dentry *parent = dentry->d_parent;
+ struct inode *inode = dentry->d_inode;
+ int type;
+
+ /* don't re-export snaps */
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+ return -EINVAL;
+
+ if (*max_len >= sizeof(*cfh)) {
+ dout("encode_fh %p connectable\n", dentry);
+ cfh->ino = ceph_ino(dentry->d_inode);
+ cfh->parent_ino = ceph_ino(parent->d_inode);
+ cfh->parent_name_hash = parent->d_name.hash;
+ *max_len = sizeof(*cfh);
+ type = 2;
+ } else if (*max_len > sizeof(*fh)) {
+ if (connectable)
+ return -ENOSPC;
+ dout("encode_fh %p\n", dentry);
+ fh->ino = ceph_ino(dentry->d_inode);
+ *max_len = sizeof(*fh);
+ type = 1;
+ } else {
+ return -ENOSPC;
+ }
+ return type;
+}
+
+/*
+ * convert regular fh to dentry
+ *
+ * FIXME: we should try harder by querying the mds for the ino.
+ */
+static struct dentry *__fh_to_dentry(struct super_block *sb,
+ struct ceph_nfs_fh *fh)
+{
+ struct inode *inode;
+ struct dentry *dentry;
+ struct ceph_vino vino;
+ int err;
+
+ dout("__fh_to_dentry %llx\n", fh->ino);
+ vino.ino = fh->ino;
+ vino.snap = CEPH_NOSNAP;
+ inode = ceph_find_inode(sb, vino);
+ if (!inode)
+ return ERR_PTR(-ESTALE);
+
+ dentry = d_obtain_alias(inode);
+ if (!dentry) {
+ pr_err("fh_to_dentry %llx -- inode %p but ENOMEM\n",
+ fh->ino, inode);
+ iput(inode);
+ return ERR_PTR(-ENOMEM);
+ }
+ err = ceph_init_dentry(dentry);
+
+ if (err < 0) {
+ iput(inode);
+ return ERR_PTR(err);
+ }
+ dout("__fh_to_dentry %llx %p dentry %p\n", fh->ino, inode, dentry);
+ return dentry;
+}
+
+/*
+ * convert connectable fh to dentry
+ */
+static struct dentry *__cfh_to_dentry(struct super_block *sb,
+ struct ceph_nfs_confh *cfh)
+{
+ struct ceph_mds_client *mdsc = &ceph_client(sb)->mdsc;
+ struct inode *inode;
+ struct dentry *dentry;
+ struct ceph_vino vino;
+ int err;
+
+ dout("__cfh_to_dentry %llx (%llx/%x)\n",
+ cfh->ino, cfh->parent_ino, cfh->parent_name_hash);
+
+ vino.ino = cfh->ino;
+ vino.snap = CEPH_NOSNAP;
+ inode = ceph_find_inode(sb, vino);
+ if (!inode) {
+ struct ceph_mds_request *req;
+
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPHASH,
+ USE_ANY_MDS);
+ if (IS_ERR(req))
+ return ERR_PTR(PTR_ERR(req));
+
+ req->r_ino1 = vino;
+ req->r_ino2.ino = cfh->parent_ino;
+ req->r_ino2.snap = CEPH_NOSNAP;
+ req->r_path2 = kmalloc(16, GFP_NOFS);
+ snprintf(req->r_path2, 16, "%d", cfh->parent_name_hash);
+ req->r_num_caps = 1;
+ err = ceph_mdsc_do_request(mdsc, NULL, req);
+ ceph_mdsc_put_request(req);
+ inode = ceph_find_inode(sb, vino);
+ if (!inode)
+ return ERR_PTR(err ? err : -ESTALE);
+ }
+
+ dentry = d_obtain_alias(inode);
+ if (!dentry) {
+ pr_err("cfh_to_dentry %llx -- inode %p but ENOMEM\n",
+ cfh->ino, inode);
+ iput(inode);
+ return ERR_PTR(-ENOMEM);
+ }
+ err = ceph_init_dentry(dentry);
+ if (err < 0) {
+ iput(inode);
+ return ERR_PTR(err);
+ }
+ dout("__cfh_to_dentry %llx %p dentry %p\n", cfh->ino, inode, dentry);
+ return dentry;
+}
+
+static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ int fh_len, int fh_type)
+{
+ if (fh_type == 1)
+ return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw);
+ else
+ return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw);
+}
+
+/*
+ * get parent, if possible.
+ *
+ * FIXME: we could do better by querying the mds to discover the
+ * parent.
+ */
+static struct dentry *ceph_fh_to_parent(struct super_block *sb,
+ struct fid *fid,
+ int fh_len, int fh_type)
+{
+ struct ceph_nfs_confh *cfh = (void *)fid->raw;
+ struct ceph_vino vino;
+ struct inode *inode;
+ struct dentry *dentry;
+ int err;
+
+ if (fh_type == 1)
+ return ERR_PTR(-ESTALE);
+
+ pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino,
+ cfh->parent_name_hash);
+
+ vino.ino = cfh->ino;
+ vino.snap = CEPH_NOSNAP;
+ inode = ceph_find_inode(sb, vino);
+ if (!inode)
+ return ERR_PTR(-ESTALE);
+
+ dentry = d_obtain_alias(inode);
+ if (!dentry) {
+ pr_err("fh_to_parent %llx -- inode %p but ENOMEM\n",
+ cfh->ino, inode);
+ iput(inode);
+ return ERR_PTR(-ENOMEM);
+ }
+ err = ceph_init_dentry(dentry);
+ if (err < 0) {
+ iput(inode);
+ return ERR_PTR(err);
+ }
+ dout("fh_to_parent %llx %p dentry %p\n", cfh->ino, inode, dentry);
+ return dentry;
+}
+
+const struct export_operations ceph_export_ops = {
+ .encode_fh = ceph_encode_fh,
+ .fh_to_dentry = ceph_fh_to_dentry,
+ .fh_to_parent = ceph_fh_to_parent,
+};
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
new file mode 100644
index 000000000000..fc8aff4767d3
--- /dev/null
+++ b/fs/ceph/file.c
@@ -0,0 +1,904 @@
+#include "ceph_debug.h"
+
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/namei.h>
+#include <linux/writeback.h>
+
+#include "super.h"
+#include "mds_client.h"
+
+/*
+ * Ceph file operations
+ *
+ * Implement basic open/close functionality, and implement
+ * read/write.
+ *
+ * We implement three modes of file I/O:
+ * - buffered uses the generic_file_aio_{read,write} helpers
+ *
+ * - synchronous is used when there is multi-client read/write
+ * sharing, avoids the page cache, and synchronously waits for an
+ * ack from the OSD.
+ *
+ * - direct io takes the variant of the sync path that references
+ * user pages directly.
+ *
+ * fsync() flushes and waits on dirty pages, but just queues metadata
+ * for writeback: since the MDS can recover size and mtime there is no
+ * need to wait for MDS acknowledgement.
+ */
+
+
+/*
+ * Prepare an open request. Preallocate ceph_cap to avoid an
+ * inopportune ENOMEM later.
+ */
+static struct ceph_mds_request *
+prepare_open_request(struct super_block *sb, int flags, int create_mode)
+{
+ struct ceph_client *client = ceph_sb_to_client(sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_request *req;
+ int want_auth = USE_ANY_MDS;
+ int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
+
+ if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
+ want_auth = USE_AUTH_MDS;
+
+ req = ceph_mdsc_create_request(mdsc, op, want_auth);
+ if (IS_ERR(req))
+ goto out;
+ req->r_fmode = ceph_flags_to_mode(flags);
+ req->r_args.open.flags = cpu_to_le32(flags);
+ req->r_args.open.mode = cpu_to_le32(create_mode);
+ req->r_args.open.preferred = cpu_to_le32(-1);
+out:
+ return req;
+}
+
+/*
+ * initialize private struct file data.
+ * if we fail, clean up by dropping fmode reference on the ceph_inode
+ */
+static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
+{
+ struct ceph_file_info *cf;
+ int ret = 0;
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ case S_IFDIR:
+ dout("init_file %p %p 0%o (regular)\n", inode, file,
+ inode->i_mode);
+ cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO);
+ if (cf == NULL) {
+ ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
+ return -ENOMEM;
+ }
+ cf->fmode = fmode;
+ cf->next_offset = 2;
+ file->private_data = cf;
+ BUG_ON(inode->i_fop->release != ceph_release);
+ break;
+
+ case S_IFLNK:
+ dout("init_file %p %p 0%o (symlink)\n", inode, file,
+ inode->i_mode);
+ ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
+ break;
+
+ default:
+ dout("init_file %p %p 0%o (special)\n", inode, file,
+ inode->i_mode);
+ /*
+ * we need to drop the open ref now, since we don't
+ * have .release set to ceph_release.
+ */
+ ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
+ BUG_ON(inode->i_fop->release == ceph_release);
+
+ /* call the proper open fop */
+ ret = inode->i_fop->open(inode, file);
+ }
+ return ret;
+}
+
+/*
+ * If the filp already has private_data, that means the file was
+ * already opened by intent during lookup, and we do nothing.
+ *
+ * If we already have the requisite capabilities, we can satisfy
+ * the open request locally (no need to request new caps from the
+ * MDS). We do, however, need to inform the MDS (asynchronously)
+ * if our wanted caps set expands.
+ */
+int ceph_open(struct inode *inode, struct file *file)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_request *req;
+ struct ceph_file_info *cf = file->private_data;
+ struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
+ int err;
+ int flags, fmode, wanted;
+
+ if (cf) {
+ dout("open file %p is already opened\n", file);
+ return 0;
+ }
+
+ /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
+ flags = file->f_flags & ~(O_CREAT|O_EXCL);
+ if (S_ISDIR(inode->i_mode))
+ flags = O_DIRECTORY; /* mds likes to know */
+
+ dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
+ ceph_vinop(inode), file, flags, file->f_flags);
+ fmode = ceph_flags_to_mode(flags);
+ wanted = ceph_caps_for_mode(fmode);
+
+ /* snapped files are read-only */
+ if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
+ return -EROFS;
+
+ /* trivially open snapdir */
+ if (ceph_snap(inode) == CEPH_SNAPDIR) {
+ spin_lock(&inode->i_lock);
+ __ceph_get_fmode(ci, fmode);
+ spin_unlock(&inode->i_lock);
+ return ceph_init_file(inode, file, fmode);
+ }
+
+ /*
+ * No need to block if we have any caps. Update wanted set
+ * asynchronously.
+ */
+ spin_lock(&inode->i_lock);
+ if (__ceph_is_any_real_caps(ci)) {
+ int mds_wanted = __ceph_caps_mds_wanted(ci);
+ int issued = __ceph_caps_issued(ci, NULL);
+
+ dout("open %p fmode %d want %s issued %s using existing\n",
+ inode, fmode, ceph_cap_string(wanted),
+ ceph_cap_string(issued));
+ __ceph_get_fmode(ci, fmode);
+ spin_unlock(&inode->i_lock);
+
+ /* adjust wanted? */
+ if ((issued & wanted) != wanted &&
+ (mds_wanted & wanted) != wanted &&
+ ceph_snap(inode) != CEPH_SNAPDIR)
+ ceph_check_caps(ci, 0, NULL);
+
+ return ceph_init_file(inode, file, fmode);
+ } else if (ceph_snap(inode) != CEPH_NOSNAP &&
+ (ci->i_snap_caps & wanted) == wanted) {
+ __ceph_get_fmode(ci, fmode);
+ spin_unlock(&inode->i_lock);
+ return ceph_init_file(inode, file, fmode);
+ }
+ spin_unlock(&inode->i_lock);
+
+ dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
+ req = prepare_open_request(inode->i_sb, flags, 0);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out;
+ }
+ req->r_inode = igrab(inode);
+ req->r_num_caps = 1;
+ err = ceph_mdsc_do_request(mdsc, parent_inode, req);
+ if (!err)
+ err = ceph_init_file(inode, file, req->r_fmode);
+ ceph_mdsc_put_request(req);
+ dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
+out:
+ return err;
+}
+
+
+/*
+ * Do a lookup + open with a single request.
+ *
+ * If this succeeds, but some subsequent check in the vfs
+ * may_open() fails, the struct *file gets cleaned up (i.e.
+ * ceph_release gets called). So fear not!
+ */
+/*
+ * flags
+ * path_lookup_open -> LOOKUP_OPEN
+ * path_lookup_create -> LOOKUP_OPEN|LOOKUP_CREATE
+ */
+struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd, int mode,
+ int locked_dir)
+{
+ struct ceph_client *client = ceph_sb_to_client(dir->i_sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct file *file = nd->intent.open.file;
+ struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry);
+ struct ceph_mds_request *req;
+ int err;
+ int flags = nd->intent.open.flags - 1; /* silly vfs! */
+
+ dout("ceph_lookup_open dentry %p '%.*s' flags %d mode 0%o\n",
+ dentry, dentry->d_name.len, dentry->d_name.name, flags, mode);
+
+ /* do the open */
+ req = prepare_open_request(dir->i_sb, flags, mode);
+ if (IS_ERR(req))
+ return ERR_PTR(PTR_ERR(req));
+ req->r_dentry = dget(dentry);
+ req->r_num_caps = 2;
+ if (flags & O_CREAT) {
+ req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
+ req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
+ }
+ req->r_locked_dir = dir; /* caller holds dir->i_mutex */
+ err = ceph_mdsc_do_request(mdsc, parent_inode, req);
+ dentry = ceph_finish_lookup(req, dentry, err);
+ if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
+ err = ceph_handle_notrace_create(dir, dentry);
+ if (!err)
+ err = ceph_init_file(req->r_dentry->d_inode, file,
+ req->r_fmode);
+ ceph_mdsc_put_request(req);
+ dout("ceph_lookup_open result=%p\n", dentry);
+ return dentry;
+}
+
+int ceph_release(struct inode *inode, struct file *file)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_file_info *cf = file->private_data;
+
+ dout("release inode %p file %p\n", inode, file);
+ ceph_put_fmode(ci, cf->fmode);
+ if (cf->last_readdir)
+ ceph_mdsc_put_request(cf->last_readdir);
+ kfree(cf->last_name);
+ kfree(cf->dir_info);
+ dput(cf->dentry);
+ kmem_cache_free(ceph_file_cachep, cf);
+ return 0;
+}
+
+/*
+ * build a vector of user pages
+ */
+static struct page **get_direct_page_vector(const char __user *data,
+ int num_pages,
+ loff_t off, size_t len)
+{
+ struct page **pages;
+ int rc;
+
+ pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ down_read(&current->mm->mmap_sem);
+ rc = get_user_pages(current, current->mm, (unsigned long)data,
+ num_pages, 0, 0, pages, NULL);
+ up_read(&current->mm->mmap_sem);
+ if (rc < 0)
+ goto fail;
+ return pages;
+
+fail:
+ kfree(pages);
+ return ERR_PTR(rc);
+}
+
+static void put_page_vector(struct page **pages, int num_pages)
+{
+ int i;
+
+ for (i = 0; i < num_pages; i++)
+ put_page(pages[i]);
+ kfree(pages);
+}
+
+void ceph_release_page_vector(struct page **pages, int num_pages)
+{
+ int i;
+
+ for (i = 0; i < num_pages; i++)
+ __free_pages(pages[i], 0);
+ kfree(pages);
+}
+
+/*
+ * allocate a vector new pages
+ */
+static struct page **alloc_page_vector(int num_pages)
+{
+ struct page **pages;
+ int i;
+
+ pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+ for (i = 0; i < num_pages; i++) {
+ pages[i] = alloc_page(GFP_NOFS);
+ if (pages[i] == NULL) {
+ ceph_release_page_vector(pages, i);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+ return pages;
+}
+
+/*
+ * copy user data into a page vector
+ */
+static int copy_user_to_page_vector(struct page **pages,
+ const char __user *data,
+ loff_t off, size_t len)
+{
+ int i = 0;
+ int po = off & ~PAGE_CACHE_MASK;
+ int left = len;
+ int l, bad;
+
+ while (left > 0) {
+ l = min_t(int, PAGE_CACHE_SIZE-po, left);
+ bad = copy_from_user(page_address(pages[i]) + po, data, l);
+ if (bad == l)
+ return -EFAULT;
+ data += l - bad;
+ left -= l - bad;
+ if (po) {
+ po += l - bad;
+ if (po == PAGE_CACHE_SIZE)
+ po = 0;
+ }
+ }
+ return len;
+}
+
+/*
+ * copy user data from a page vector into a user pointer
+ */
+static int copy_page_vector_to_user(struct page **pages, char __user *data,
+ loff_t off, size_t len)
+{
+ int i = 0;
+ int po = off & ~PAGE_CACHE_MASK;
+ int left = len;
+ int l, bad;
+
+ while (left > 0) {
+ l = min_t(int, left, PAGE_CACHE_SIZE-po);
+ bad = copy_to_user(data, page_address(pages[i]) + po, l);
+ if (bad == l)
+ return -EFAULT;
+ data += l - bad;
+ left -= l - bad;
+ if (po) {
+ po += l - bad;
+ if (po == PAGE_CACHE_SIZE)
+ po = 0;
+ }
+ i++;
+ }
+ return len;
+}
+
+/*
+ * Zero an extent within a page vector. Offset is relative to the
+ * start of the first page.
+ */
+static void zero_page_vector_range(int off, int len, struct page **pages)
+{
+ int i = off >> PAGE_CACHE_SHIFT;
+
+ dout("zero_page_vector_page %u~%u\n", off, len);
+ BUG_ON(len < PAGE_CACHE_SIZE);
+
+ /* leading partial page? */
+ if (off & ~PAGE_CACHE_MASK) {
+ dout("zeroing %d %p head from %d\n", i, pages[i],
+ (int)(off & ~PAGE_CACHE_MASK));
+ zero_user_segment(pages[i], off & ~PAGE_CACHE_MASK,
+ PAGE_CACHE_SIZE);
+ off += PAGE_CACHE_SIZE;
+ off &= PAGE_CACHE_MASK;
+ i++;
+ }
+ while (len >= PAGE_CACHE_SIZE) {
+ dout("zeroing %d %p\n", i, pages[i]);
+ zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
+ off += PAGE_CACHE_SIZE;
+ len -= PAGE_CACHE_SIZE;
+ i++;
+ }
+ /* trailing partial page? */
+ if (len) {
+ dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
+ zero_user_segment(pages[i], 0, len);
+ }
+}
+
+
+/*
+ * Read a range of bytes striped over one or more objects. Iterate over
+ * objects we stripe over. (That's not atomic, but good enough for now.)
+ *
+ * If we get a short result from the OSD, check against i_size; we need to
+ * only return a short read to the caller if we hit EOF.
+ */
+static int striped_read(struct inode *inode,
+ u64 off, u64 len,
+ struct page **pages, int num_pages)
+{
+ struct ceph_client *client = ceph_inode_to_client(inode);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ u64 pos, this_len;
+ int page_off = off & ~PAGE_CACHE_SIZE; /* first byte's offset in page */
+ int left, pages_left;
+ int read;
+ struct page **page_pos;
+ int ret;
+ bool hit_stripe, was_short;
+
+ /*
+ * we may need to do multiple reads. not atomic, unfortunately.
+ */
+ pos = off;
+ left = len;
+ page_pos = pages;
+ pages_left = num_pages;
+ read = 0;
+
+more:
+ this_len = left;
+ ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode),
+ &ci->i_layout, pos, &this_len,
+ ci->i_truncate_seq,
+ ci->i_truncate_size,
+ page_pos, pages_left);
+ hit_stripe = this_len < left;
+ was_short = ret >= 0 && ret < this_len;
+ if (ret == -ENOENT)
+ ret = 0;
+ dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
+ ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
+
+ if (ret > 0) {
+ int didpages =
+ ((pos & ~PAGE_CACHE_MASK) + ret) >> PAGE_CACHE_SHIFT;
+
+ if (read < pos - off) {
+ dout(" zero gap %llu to %llu\n", off + read, pos);
+ zero_page_vector_range(page_off + read,
+ pos - off - read, pages);
+ }
+ pos += ret;
+ read = pos - off;
+ left -= ret;
+ page_pos += didpages;
+ pages_left -= didpages;
+
+ /* hit stripe? */
+ if (left && hit_stripe)
+ goto more;
+ }
+
+ if (was_short) {
+ /* was original extent fully inside i_size? */
+ if (pos + left <= inode->i_size) {
+ dout("zero tail\n");
+ zero_page_vector_range(page_off + read, len - read,
+ pages);
+ goto out;
+ }
+
+ /* check i_size */
+ ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
+ if (ret < 0)
+ goto out;
+
+ /* hit EOF? */
+ if (pos >= inode->i_size)
+ goto out;
+
+ goto more;
+ }
+
+out:
+ if (ret >= 0)
+ ret = read;
+ dout("striped_read returns %d\n", ret);
+ return ret;
+}
+
+/*
+ * Completely synchronous read and write methods. Direct from __user
+ * buffer to osd, or directly to user pages (if O_DIRECT).
+ *
+ * If the read spans object boundary, just do multiple reads.
+ */
+static ssize_t ceph_sync_read(struct file *file, char __user *data,
+ unsigned len, loff_t *poff)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct page **pages;
+ u64 off = *poff;
+ int num_pages = calc_pages_for(off, len);
+ int ret;
+
+ dout("sync_read on file %p %llu~%u %s\n", file, off, len,
+ (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
+
+ if (file->f_flags & O_DIRECT) {
+ pages = get_direct_page_vector(data, num_pages, off, len);
+
+ /*
+ * flush any page cache pages in this range. this
+ * will make concurrent normal and O_DIRECT io slow,
+ * but it will at least behave sensibly when they are
+ * in sequence.
+ */
+ filemap_write_and_wait(inode->i_mapping);
+ } else {
+ pages = alloc_page_vector(num_pages);
+ }
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+
+ ret = striped_read(inode, off, len, pages, num_pages);
+
+ if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
+ ret = copy_page_vector_to_user(pages, data, off, ret);
+ if (ret >= 0)
+ *poff = off + ret;
+
+ if (file->f_flags & O_DIRECT)
+ put_page_vector(pages, num_pages);
+ else
+ ceph_release_page_vector(pages, num_pages);
+ dout("sync_read result %d\n", ret);
+ return ret;
+}
+
+/*
+ * Write commit callback, called if we requested both an ACK and
+ * ONDISK commit reply from the OSD.
+ */
+static void sync_write_commit(struct ceph_osd_request *req,
+ struct ceph_msg *msg)
+{
+ struct ceph_inode_info *ci = ceph_inode(req->r_inode);
+
+ dout("sync_write_commit %p tid %llu\n", req, req->r_tid);
+ spin_lock(&ci->i_unsafe_lock);
+ list_del_init(&req->r_unsafe_item);
+ spin_unlock(&ci->i_unsafe_lock);
+ ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
+}
+
+/*
+ * Synchronous write, straight from __user pointer or user pages (if
+ * O_DIRECT).
+ *
+ * If write spans object boundary, just do multiple writes. (For a
+ * correct atomic write, we should e.g. take write locks on all
+ * objects, rollback on failure, etc.)
+ */
+static ssize_t ceph_sync_write(struct file *file, const char __user *data,
+ size_t left, loff_t *offset)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_client *client = ceph_inode_to_client(inode);
+ struct ceph_osd_request *req;
+ struct page **pages;
+ int num_pages;
+ long long unsigned pos;
+ u64 len;
+ int written = 0;
+ int flags;
+ int do_sync = 0;
+ int check_caps = 0;
+ int ret;
+ struct timespec mtime = CURRENT_TIME;
+
+ if (ceph_snap(file->f_dentry->d_inode) != CEPH_NOSNAP)
+ return -EROFS;
+
+ dout("sync_write on file %p %lld~%u %s\n", file, *offset,
+ (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
+
+ if (file->f_flags & O_APPEND)
+ pos = i_size_read(inode);
+ else
+ pos = *offset;
+
+ flags = CEPH_OSD_FLAG_ORDERSNAP |
+ CEPH_OSD_FLAG_ONDISK |
+ CEPH_OSD_FLAG_WRITE;
+ if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0)
+ flags |= CEPH_OSD_FLAG_ACK;
+ else
+ do_sync = 1;
+
+ /*
+ * we may need to do multiple writes here if we span an object
+ * boundary. this isn't atomic, unfortunately. :(
+ */
+more:
+ len = left;
+ req = ceph_osdc_new_request(&client->osdc, &ci->i_layout,
+ ceph_vino(inode), pos, &len,
+ CEPH_OSD_OP_WRITE, flags,
+ ci->i_snap_realm->cached_context,
+ do_sync,
+ ci->i_truncate_seq, ci->i_truncate_size,
+ &mtime, false, 2);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ num_pages = calc_pages_for(pos, len);
+
+ if (file->f_flags & O_DIRECT) {
+ pages = get_direct_page_vector(data, num_pages, pos, len);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto out;
+ }
+
+ /*
+ * throw out any page cache pages in this range. this
+ * may block.
+ */
+ truncate_inode_pages_range(inode->i_mapping, pos, pos+len);
+ } else {
+ pages = alloc_page_vector(num_pages);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto out;
+ }
+ ret = copy_user_to_page_vector(pages, data, pos, len);
+ if (ret < 0) {
+ ceph_release_page_vector(pages, num_pages);
+ goto out;
+ }
+
+ if ((file->f_flags & O_SYNC) == 0) {
+ /* get a second commit callback */
+ req->r_safe_callback = sync_write_commit;
+ req->r_own_pages = 1;
+ }
+ }
+ req->r_pages = pages;
+ req->r_num_pages = num_pages;
+ req->r_inode = inode;
+
+ ret = ceph_osdc_start_request(&client->osdc, req, false);
+ if (!ret) {
+ if (req->r_safe_callback) {
+ /*
+ * Add to inode unsafe list only after we
+ * start_request so that a tid has been assigned.
+ */
+ spin_lock(&ci->i_unsafe_lock);
+ list_add(&ci->i_unsafe_writes, &req->r_unsafe_item);
+ spin_unlock(&ci->i_unsafe_lock);
+ ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
+ }
+ ret = ceph_osdc_wait_request(&client->osdc, req);
+ }
+
+ if (file->f_flags & O_DIRECT)
+ put_page_vector(pages, num_pages);
+ else if (file->f_flags & O_SYNC)
+ ceph_release_page_vector(pages, num_pages);
+
+out:
+ ceph_osdc_put_request(req);
+ if (ret == 0) {
+ pos += len;
+ written += len;
+ left -= len;
+ if (left)
+ goto more;
+
+ ret = written;
+ *offset = pos;
+ if (pos > i_size_read(inode))
+ check_caps = ceph_inode_set_size(inode, pos);
+ if (check_caps)
+ ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY,
+ NULL);
+ }
+ return ret;
+}
+
+/*
+ * Wrap generic_file_aio_read with checks for cap bits on the inode.
+ * Atomically grab references, so that those bits are not released
+ * back to the MDS mid-read.
+ *
+ * Hmm, the sync read case isn't actually async... should it be?
+ */
+static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct file *filp = iocb->ki_filp;
+ loff_t *ppos = &iocb->ki_pos;
+ size_t len = iov->iov_len;
+ struct inode *inode = filp->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ ssize_t ret;
+ int got = 0;
+
+ dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
+ inode, ceph_vinop(inode), pos, (unsigned)len, inode);
+ __ceph_do_pending_vmtruncate(inode);
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, CEPH_CAP_FILE_CACHE,
+ &got, -1);
+ if (ret < 0)
+ goto out;
+ dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
+ inode, ceph_vinop(inode), pos, (unsigned)len,
+ ceph_cap_string(got));
+
+ if ((got & CEPH_CAP_FILE_CACHE) == 0 ||
+ (iocb->ki_filp->f_flags & O_DIRECT) ||
+ (inode->i_sb->s_flags & MS_SYNCHRONOUS))
+ /* hmm, this isn't really async... */
+ ret = ceph_sync_read(filp, iov->iov_base, len, ppos);
+ else
+ ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
+
+out:
+ dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
+ inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
+ ceph_put_cap_refs(ci, got);
+ return ret;
+}
+
+/*
+ * Take cap references to avoid releasing caps to MDS mid-write.
+ *
+ * If we are synchronous, and write with an old snap context, the OSD
+ * may return EOLDSNAPC. In that case, retry the write.. _after_
+ * dropping our cap refs and allowing the pending snap to logically
+ * complete _before_ this write occurs.
+ *
+ * If we are near ENOSPC, write synchronously.
+ */
+static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_osd_client *osdc = &ceph_client(inode->i_sb)->osdc;
+ loff_t endoff = pos + iov->iov_len;
+ int got = 0;
+ int ret;
+
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+ return -EROFS;
+
+retry_snap:
+ if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL))
+ return -ENOSPC;
+ __ceph_do_pending_vmtruncate(inode);
+ dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n",
+ inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
+ inode->i_size);
+ ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
+ &got, endoff);
+ if (ret < 0)
+ goto out;
+
+ dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n",
+ inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
+ ceph_cap_string(got));
+
+ if ((got & CEPH_CAP_FILE_BUFFER) == 0 ||
+ (iocb->ki_filp->f_flags & O_DIRECT) ||
+ (inode->i_sb->s_flags & MS_SYNCHRONOUS)) {
+ ret = ceph_sync_write(file, iov->iov_base, iov->iov_len,
+ &iocb->ki_pos);
+ } else {
+ ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
+
+ if ((ret >= 0 || ret == -EIOCBQUEUED) &&
+ ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host)
+ || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL)))
+ ret = vfs_fsync_range(file, file->f_path.dentry,
+ pos, pos + ret - 1, 1);
+ }
+ if (ret >= 0) {
+ spin_lock(&inode->i_lock);
+ __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
+ spin_unlock(&inode->i_lock);
+ }
+
+out:
+ dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
+ inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len,
+ ceph_cap_string(got));
+ ceph_put_cap_refs(ci, got);
+
+ if (ret == -EOLDSNAPC) {
+ dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n",
+ inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len);
+ goto retry_snap;
+ }
+
+ return ret;
+}
+
+/*
+ * llseek. be sure to verify file size on SEEK_END.
+ */
+static loff_t ceph_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct inode *inode = file->f_mapping->host;
+ int ret;
+
+ mutex_lock(&inode->i_mutex);
+ __ceph_do_pending_vmtruncate(inode);
+ switch (origin) {
+ case SEEK_END:
+ ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
+ if (ret < 0) {
+ offset = ret;
+ goto out;
+ }
+ offset += inode->i_size;
+ break;
+ case SEEK_CUR:
+ /*
+ * Here we special-case the lseek(fd, 0, SEEK_CUR)
+ * position-querying operation. Avoid rewriting the "same"
+ * f_pos value back to the file because a concurrent read(),
+ * write() or lseek() might have altered it
+ */
+ if (offset == 0) {
+ offset = file->f_pos;
+ goto out;
+ }
+ offset += file->f_pos;
+ break;
+ }
+
+ if (offset < 0 || offset > inode->i_sb->s_maxbytes) {
+ offset = -EINVAL;
+ goto out;
+ }
+
+ /* Special lock needed here? */
+ if (offset != file->f_pos) {
+ file->f_pos = offset;
+ file->f_version = 0;
+ }
+
+out:
+ mutex_unlock(&inode->i_mutex);
+ return offset;
+}
+
+const struct file_operations ceph_file_fops = {
+ .open = ceph_open,
+ .release = ceph_release,
+ .llseek = ceph_llseek,
+ .read = do_sync_read,
+ .write = do_sync_write,
+ .aio_read = ceph_aio_read,
+ .aio_write = ceph_aio_write,
+ .mmap = ceph_mmap,
+ .fsync = ceph_fsync,
+ .splice_read = generic_file_splice_read,
+ .splice_write = generic_file_splice_write,
+ .unlocked_ioctl = ceph_ioctl,
+ .compat_ioctl = ceph_ioctl,
+};
+
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
new file mode 100644
index 000000000000..db684686f48a
--- /dev/null
+++ b/fs/ceph/inode.c
@@ -0,0 +1,1627 @@
+#include "ceph_debug.h"
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/smp_lock.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/namei.h>
+#include <linux/writeback.h>
+#include <linux/vmalloc.h>
+
+#include "super.h"
+#include "decode.h"
+
+/*
+ * Ceph inode operations
+ *
+ * Implement basic inode helpers (get, alloc) and inode ops (getattr,
+ * setattr, etc.), xattr helpers, and helpers for assimilating
+ * metadata returned by the MDS into our cache.
+ *
+ * Also define helpers for doing asynchronous writeback, invalidation,
+ * and truncation for the benefit of those who can't afford to block
+ * (typically because they are in the message handler path).
+ */
+
+static const struct inode_operations ceph_symlink_iops;
+
+static void ceph_inode_invalidate_pages(struct work_struct *work);
+
+/*
+ * find or create an inode, given the ceph ino number
+ */
+struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
+{
+ struct inode *inode;
+ ino_t t = ceph_vino_to_ino(vino);
+
+ inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
+ if (inode == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (inode->i_state & I_NEW) {
+ dout("get_inode created new inode %p %llx.%llx ino %llx\n",
+ inode, ceph_vinop(inode), (u64)inode->i_ino);
+ unlock_new_inode(inode);
+ }
+
+ dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
+ vino.snap, inode);
+ return inode;
+}
+
+/*
+ * get/constuct snapdir inode for a given directory
+ */
+struct inode *ceph_get_snapdir(struct inode *parent)
+{
+ struct ceph_vino vino = {
+ .ino = ceph_ino(parent),
+ .snap = CEPH_SNAPDIR,
+ };
+ struct inode *inode = ceph_get_inode(parent->i_sb, vino);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ BUG_ON(!S_ISDIR(parent->i_mode));
+ if (IS_ERR(inode))
+ return ERR_PTR(PTR_ERR(inode));
+ inode->i_mode = parent->i_mode;
+ inode->i_uid = parent->i_uid;
+ inode->i_gid = parent->i_gid;
+ inode->i_op = &ceph_dir_iops;
+ inode->i_fop = &ceph_dir_fops;
+ ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
+ ci->i_rbytes = 0;
+ return inode;
+}
+
+const struct inode_operations ceph_file_iops = {
+ .permission = ceph_permission,
+ .setattr = ceph_setattr,
+ .getattr = ceph_getattr,
+ .setxattr = ceph_setxattr,
+ .getxattr = ceph_getxattr,
+ .listxattr = ceph_listxattr,
+ .removexattr = ceph_removexattr,
+};
+
+
+/*
+ * We use a 'frag tree' to keep track of the MDS's directory fragments
+ * for a given inode (usually there is just a single fragment). We
+ * need to know when a child frag is delegated to a new MDS, or when
+ * it is flagged as replicated, so we can direct our requests
+ * accordingly.
+ */
+
+/*
+ * find/create a frag in the tree
+ */
+static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
+ u32 f)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct ceph_inode_frag *frag;
+ int c;
+
+ p = &ci->i_fragtree.rb_node;
+ while (*p) {
+ parent = *p;
+ frag = rb_entry(parent, struct ceph_inode_frag, node);
+ c = ceph_frag_compare(f, frag->frag);
+ if (c < 0)
+ p = &(*p)->rb_left;
+ else if (c > 0)
+ p = &(*p)->rb_right;
+ else
+ return frag;
+ }
+
+ frag = kmalloc(sizeof(*frag), GFP_NOFS);
+ if (!frag) {
+ pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
+ "frag %x\n", &ci->vfs_inode,
+ ceph_vinop(&ci->vfs_inode), f);
+ return ERR_PTR(-ENOMEM);
+ }
+ frag->frag = f;
+ frag->split_by = 0;
+ frag->mds = -1;
+ frag->ndist = 0;
+
+ rb_link_node(&frag->node, parent, p);
+ rb_insert_color(&frag->node, &ci->i_fragtree);
+
+ dout("get_or_create_frag added %llx.%llx frag %x\n",
+ ceph_vinop(&ci->vfs_inode), f);
+ return frag;
+}
+
+/*
+ * find a specific frag @f
+ */
+struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
+{
+ struct rb_node *n = ci->i_fragtree.rb_node;
+
+ while (n) {
+ struct ceph_inode_frag *frag =
+ rb_entry(n, struct ceph_inode_frag, node);
+ int c = ceph_frag_compare(f, frag->frag);
+ if (c < 0)
+ n = n->rb_left;
+ else if (c > 0)
+ n = n->rb_right;
+ else
+ return frag;
+ }
+ return NULL;
+}
+
+/*
+ * Choose frag containing the given value @v. If @pfrag is
+ * specified, copy the frag delegation info to the caller if
+ * it is present.
+ */
+u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
+ struct ceph_inode_frag *pfrag,
+ int *found)
+{
+ u32 t = ceph_frag_make(0, 0);
+ struct ceph_inode_frag *frag;
+ unsigned nway, i;
+ u32 n;
+
+ if (found)
+ *found = 0;
+
+ mutex_lock(&ci->i_fragtree_mutex);
+ while (1) {
+ WARN_ON(!ceph_frag_contains_value(t, v));
+ frag = __ceph_find_frag(ci, t);
+ if (!frag)
+ break; /* t is a leaf */
+ if (frag->split_by == 0) {
+ if (pfrag)
+ memcpy(pfrag, frag, sizeof(*pfrag));
+ if (found)
+ *found = 1;
+ break;
+ }
+
+ /* choose child */
+ nway = 1 << frag->split_by;
+ dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
+ frag->split_by, nway);
+ for (i = 0; i < nway; i++) {
+ n = ceph_frag_make_child(t, frag->split_by, i);
+ if (ceph_frag_contains_value(n, v)) {
+ t = n;
+ break;
+ }
+ }
+ BUG_ON(i == nway);
+ }
+ dout("choose_frag(%x) = %x\n", v, t);
+
+ mutex_unlock(&ci->i_fragtree_mutex);
+ return t;
+}
+
+/*
+ * Process dirfrag (delegation) info from the mds. Include leaf
+ * fragment in tree ONLY if ndist > 0. Otherwise, only
+ * branches/splits are included in i_fragtree)
+ */
+static int ceph_fill_dirfrag(struct inode *inode,
+ struct ceph_mds_reply_dirfrag *dirinfo)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_inode_frag *frag;
+ u32 id = le32_to_cpu(dirinfo->frag);
+ int mds = le32_to_cpu(dirinfo->auth);
+ int ndist = le32_to_cpu(dirinfo->ndist);
+ int i;
+ int err = 0;
+
+ mutex_lock(&ci->i_fragtree_mutex);
+ if (ndist == 0) {
+ /* no delegation info needed. */
+ frag = __ceph_find_frag(ci, id);
+ if (!frag)
+ goto out;
+ if (frag->split_by == 0) {
+ /* tree leaf, remove */
+ dout("fill_dirfrag removed %llx.%llx frag %x"
+ " (no ref)\n", ceph_vinop(inode), id);
+ rb_erase(&frag->node, &ci->i_fragtree);
+ kfree(frag);
+ } else {
+ /* tree branch, keep and clear */
+ dout("fill_dirfrag cleared %llx.%llx frag %x"
+ " referral\n", ceph_vinop(inode), id);
+ frag->mds = -1;
+ frag->ndist = 0;
+ }
+ goto out;
+ }
+
+
+ /* find/add this frag to store mds delegation info */
+ frag = __get_or_create_frag(ci, id);
+ if (IS_ERR(frag)) {
+ /* this is not the end of the world; we can continue
+ with bad/inaccurate delegation info */
+ pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
+ ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
+ err = -ENOMEM;
+ goto out;
+ }
+
+ frag->mds = mds;
+ frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
+ for (i = 0; i < frag->ndist; i++)
+ frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
+ dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
+ ceph_vinop(inode), frag->frag, frag->ndist);
+
+out:
+ mutex_unlock(&ci->i_fragtree_mutex);
+ return err;
+}
+
+
+/*
+ * initialize a newly allocated inode.
+ */
+struct inode *ceph_alloc_inode(struct super_block *sb)
+{
+ struct ceph_inode_info *ci;
+ int i;
+
+ ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
+ if (!ci)
+ return NULL;
+
+ dout("alloc_inode %p\n", &ci->vfs_inode);
+
+ ci->i_version = 0;
+ ci->i_time_warp_seq = 0;
+ ci->i_ceph_flags = 0;
+ ci->i_release_count = 0;
+ ci->i_symlink = NULL;
+
+ ci->i_fragtree = RB_ROOT;
+ mutex_init(&ci->i_fragtree_mutex);
+
+ ci->i_xattrs.blob = NULL;
+ ci->i_xattrs.prealloc_blob = NULL;
+ ci->i_xattrs.dirty = false;
+ ci->i_xattrs.index = RB_ROOT;
+ ci->i_xattrs.count = 0;
+ ci->i_xattrs.names_size = 0;
+ ci->i_xattrs.vals_size = 0;
+ ci->i_xattrs.version = 0;
+ ci->i_xattrs.index_version = 0;
+
+ ci->i_caps = RB_ROOT;
+ ci->i_auth_cap = NULL;
+ ci->i_dirty_caps = 0;
+ ci->i_flushing_caps = 0;
+ INIT_LIST_HEAD(&ci->i_dirty_item);
+ INIT_LIST_HEAD(&ci->i_flushing_item);
+ ci->i_cap_flush_seq = 0;
+ ci->i_cap_flush_last_tid = 0;
+ memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
+ init_waitqueue_head(&ci->i_cap_wq);
+ ci->i_hold_caps_min = 0;
+ ci->i_hold_caps_max = 0;
+ INIT_LIST_HEAD(&ci->i_cap_delay_list);
+ ci->i_cap_exporting_mds = 0;
+ ci->i_cap_exporting_mseq = 0;
+ ci->i_cap_exporting_issued = 0;
+ INIT_LIST_HEAD(&ci->i_cap_snaps);
+ ci->i_head_snapc = NULL;
+ ci->i_snap_caps = 0;
+
+ for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
+ ci->i_nr_by_mode[i] = 0;
+
+ ci->i_truncate_seq = 0;
+ ci->i_truncate_size = 0;
+ ci->i_truncate_pending = 0;
+
+ ci->i_max_size = 0;
+ ci->i_reported_size = 0;
+ ci->i_wanted_max_size = 0;
+ ci->i_requested_max_size = 0;
+
+ ci->i_pin_ref = 0;
+ ci->i_rd_ref = 0;
+ ci->i_rdcache_ref = 0;
+ ci->i_wr_ref = 0;
+ ci->i_wrbuffer_ref = 0;
+ ci->i_wrbuffer_ref_head = 0;
+ ci->i_shared_gen = 0;
+ ci->i_rdcache_gen = 0;
+ ci->i_rdcache_revoking = 0;
+
+ INIT_LIST_HEAD(&ci->i_unsafe_writes);
+ INIT_LIST_HEAD(&ci->i_unsafe_dirops);
+ spin_lock_init(&ci->i_unsafe_lock);
+
+ ci->i_snap_realm = NULL;
+ INIT_LIST_HEAD(&ci->i_snap_realm_item);
+ INIT_LIST_HEAD(&ci->i_snap_flush_item);
+
+ INIT_WORK(&ci->i_wb_work, ceph_inode_writeback);
+ INIT_WORK(&ci->i_pg_inv_work, ceph_inode_invalidate_pages);
+
+ INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
+
+ return &ci->vfs_inode;
+}
+
+void ceph_destroy_inode(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_inode_frag *frag;
+ struct rb_node *n;
+
+ dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
+
+ ceph_queue_caps_release(inode);
+
+ kfree(ci->i_symlink);
+ while ((n = rb_first(&ci->i_fragtree)) != NULL) {
+ frag = rb_entry(n, struct ceph_inode_frag, node);
+ rb_erase(n, &ci->i_fragtree);
+ kfree(frag);
+ }
+
+ __ceph_destroy_xattrs(ci);
+ if (ci->i_xattrs.blob)
+ ceph_buffer_put(ci->i_xattrs.blob);
+ if (ci->i_xattrs.prealloc_blob)
+ ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+
+ kmem_cache_free(ceph_inode_cachep, ci);
+}
+
+
+/*
+ * Helpers to fill in size, ctime, mtime, and atime. We have to be
+ * careful because either the client or MDS may have more up to date
+ * info, depending on which capabilities are held, and whether
+ * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime
+ * and size are monotonically increasing, except when utimes() or
+ * truncate() increments the corresponding _seq values.)
+ */
+int ceph_fill_file_size(struct inode *inode, int issued,
+ u32 truncate_seq, u64 truncate_size, u64 size)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int queue_trunc = 0;
+
+ if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
+ (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
+ dout("size %lld -> %llu\n", inode->i_size, size);
+ inode->i_size = size;
+ inode->i_blocks = (size + (1<<9) - 1) >> 9;
+ ci->i_reported_size = size;
+ if (truncate_seq != ci->i_truncate_seq) {
+ dout("truncate_seq %u -> %u\n",
+ ci->i_truncate_seq, truncate_seq);
+ ci->i_truncate_seq = truncate_seq;
+ if (issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
+ CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
+ CEPH_CAP_FILE_EXCL)) {
+ ci->i_truncate_pending++;
+ queue_trunc = 1;
+ }
+ }
+ }
+ if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
+ ci->i_truncate_size != truncate_size) {
+ dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
+ truncate_size);
+ ci->i_truncate_size = truncate_size;
+ }
+ return queue_trunc;
+}
+
+void ceph_fill_file_time(struct inode *inode, int issued,
+ u64 time_warp_seq, struct timespec *ctime,
+ struct timespec *mtime, struct timespec *atime)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int warn = 0;
+
+ if (issued & (CEPH_CAP_FILE_EXCL|
+ CEPH_CAP_FILE_WR|
+ CEPH_CAP_FILE_BUFFER)) {
+ if (timespec_compare(ctime, &inode->i_ctime) > 0) {
+ dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
+ inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
+ ctime->tv_sec, ctime->tv_nsec);
+ inode->i_ctime = *ctime;
+ }
+ if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
+ /* the MDS did a utimes() */
+ dout("mtime %ld.%09ld -> %ld.%09ld "
+ "tw %d -> %d\n",
+ inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
+ mtime->tv_sec, mtime->tv_nsec,
+ ci->i_time_warp_seq, (int)time_warp_seq);
+
+ inode->i_mtime = *mtime;
+ inode->i_atime = *atime;
+ ci->i_time_warp_seq = time_warp_seq;
+ } else if (time_warp_seq == ci->i_time_warp_seq) {
+ /* nobody did utimes(); take the max */
+ if (timespec_compare(mtime, &inode->i_mtime) > 0) {
+ dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
+ inode->i_mtime.tv_sec,
+ inode->i_mtime.tv_nsec,
+ mtime->tv_sec, mtime->tv_nsec);
+ inode->i_mtime = *mtime;
+ }
+ if (timespec_compare(atime, &inode->i_atime) > 0) {
+ dout("atime %ld.%09ld -> %ld.%09ld inc\n",
+ inode->i_atime.tv_sec,
+ inode->i_atime.tv_nsec,
+ atime->tv_sec, atime->tv_nsec);
+ inode->i_atime = *atime;
+ }
+ } else if (issued & CEPH_CAP_FILE_EXCL) {
+ /* we did a utimes(); ignore mds values */
+ } else {
+ warn = 1;
+ }
+ } else {
+ /* we have no write caps; whatever the MDS says is true */
+ if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
+ inode->i_ctime = *ctime;
+ inode->i_mtime = *mtime;
+ inode->i_atime = *atime;
+ ci->i_time_warp_seq = time_warp_seq;
+ } else {
+ warn = 1;
+ }
+ }
+ if (warn) /* time_warp_seq shouldn't go backwards */
+ dout("%p mds time_warp_seq %llu < %u\n",
+ inode, time_warp_seq, ci->i_time_warp_seq);
+}
+
+/*
+ * Populate an inode based on info from mds. May be called on new or
+ * existing inodes.
+ */
+static int fill_inode(struct inode *inode,
+ struct ceph_mds_reply_info_in *iinfo,
+ struct ceph_mds_reply_dirfrag *dirinfo,
+ struct ceph_mds_session *session,
+ unsigned long ttl_from, int cap_fmode,
+ struct ceph_cap_reservation *caps_reservation)
+{
+ struct ceph_mds_reply_inode *info = iinfo->in;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int i;
+ int issued, implemented;
+ struct timespec mtime, atime, ctime;
+ u32 nsplits;
+ struct ceph_buffer *xattr_blob = NULL;
+ int err = 0;
+ int queue_trunc = 0;
+
+ dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
+ inode, ceph_vinop(inode), le64_to_cpu(info->version),
+ ci->i_version);
+
+ /*
+ * prealloc xattr data, if it looks like we'll need it. only
+ * if len > 4 (meaning there are actually xattrs; the first 4
+ * bytes are the xattr count).
+ */
+ if (iinfo->xattr_len > 4) {
+ xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
+ if (!xattr_blob)
+ pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
+ iinfo->xattr_len);
+ }
+
+ spin_lock(&inode->i_lock);
+
+ /*
+ * provided version will be odd if inode value is projected,
+ * even if stable. skip the update if we have a newer info
+ * (e.g., due to inode info racing form multiple MDSs), or if
+ * we are getting projected (unstable) inode info.
+ */
+ if (le64_to_cpu(info->version) > 0 &&
+ (ci->i_version & ~1) > le64_to_cpu(info->version))
+ goto no_change;
+
+ issued = __ceph_caps_issued(ci, &implemented);
+ issued |= implemented | __ceph_caps_dirty(ci);
+
+ /* update inode */
+ ci->i_version = le64_to_cpu(info->version);
+ inode->i_version++;
+ inode->i_rdev = le32_to_cpu(info->rdev);
+
+ if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
+ inode->i_mode = le32_to_cpu(info->mode);
+ inode->i_uid = le32_to_cpu(info->uid);
+ inode->i_gid = le32_to_cpu(info->gid);
+ dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
+ inode->i_uid, inode->i_gid);
+ }
+
+ if ((issued & CEPH_CAP_LINK_EXCL) == 0)
+ inode->i_nlink = le32_to_cpu(info->nlink);
+
+ /* be careful with mtime, atime, size */
+ ceph_decode_timespec(&atime, &info->atime);
+ ceph_decode_timespec(&mtime, &info->mtime);
+ ceph_decode_timespec(&ctime, &info->ctime);
+ queue_trunc = ceph_fill_file_size(inode, issued,
+ le32_to_cpu(info->truncate_seq),
+ le64_to_cpu(info->truncate_size),
+ le64_to_cpu(info->size));
+ ceph_fill_file_time(inode, issued,
+ le32_to_cpu(info->time_warp_seq),
+ &ctime, &mtime, &atime);
+
+ ci->i_max_size = le64_to_cpu(info->max_size);
+ ci->i_layout = info->layout;
+ inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
+
+ /* xattrs */
+ /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
+ if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
+ le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
+ if (ci->i_xattrs.blob)
+ ceph_buffer_put(ci->i_xattrs.blob);
+ ci->i_xattrs.blob = xattr_blob;
+ if (xattr_blob)
+ memcpy(ci->i_xattrs.blob->vec.iov_base,
+ iinfo->xattr_data, iinfo->xattr_len);
+ ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
+ }
+
+ inode->i_mapping->a_ops = &ceph_aops;
+ inode->i_mapping->backing_dev_info =
+ &ceph_client(inode->i_sb)->backing_dev_info;
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFIFO:
+ case S_IFBLK:
+ case S_IFCHR:
+ case S_IFSOCK:
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ inode->i_op = &ceph_file_iops;
+ break;
+ case S_IFREG:
+ inode->i_op = &ceph_file_iops;
+ inode->i_fop = &ceph_file_fops;
+ break;
+ case S_IFLNK:
+ inode->i_op = &ceph_symlink_iops;
+ if (!ci->i_symlink) {
+ int symlen = iinfo->symlink_len;
+ char *sym;
+
+ BUG_ON(symlen != inode->i_size);
+ spin_unlock(&inode->i_lock);
+
+ err = -ENOMEM;
+ sym = kmalloc(symlen+1, GFP_NOFS);
+ if (!sym)
+ goto out;
+ memcpy(sym, iinfo->symlink, symlen);
+ sym[symlen] = 0;
+
+ spin_lock(&inode->i_lock);
+ if (!ci->i_symlink)
+ ci->i_symlink = sym;
+ else
+ kfree(sym); /* lost a race */
+ }
+ break;
+ case S_IFDIR:
+ inode->i_op = &ceph_dir_iops;
+ inode->i_fop = &ceph_dir_fops;
+
+ ci->i_files = le64_to_cpu(info->files);
+ ci->i_subdirs = le64_to_cpu(info->subdirs);
+ ci->i_rbytes = le64_to_cpu(info->rbytes);
+ ci->i_rfiles = le64_to_cpu(info->rfiles);
+ ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
+ ceph_decode_timespec(&ci->i_rctime, &info->rctime);
+
+ /* set dir completion flag? */
+ if (ci->i_files == 0 && ci->i_subdirs == 0 &&
+ ceph_snap(inode) == CEPH_NOSNAP &&
+ (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED)) {
+ dout(" marking %p complete (empty)\n", inode);
+ ci->i_ceph_flags |= CEPH_I_COMPLETE;
+ ci->i_max_offset = 2;
+ }
+
+ /* it may be better to set st_size in getattr instead? */
+ if (ceph_test_opt(ceph_client(inode->i_sb), RBYTES))
+ inode->i_size = ci->i_rbytes;
+ break;
+ default:
+ pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
+ ceph_vinop(inode), inode->i_mode);
+ }
+
+no_change:
+ spin_unlock(&inode->i_lock);
+
+ /* queue truncate if we saw i_size decrease */
+ if (queue_trunc)
+ if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
+ &ci->i_vmtruncate_work))
+ igrab(inode);
+
+ /* populate frag tree */
+ /* FIXME: move me up, if/when version reflects fragtree changes */
+ nsplits = le32_to_cpu(info->fragtree.nsplits);
+ mutex_lock(&ci->i_fragtree_mutex);
+ for (i = 0; i < nsplits; i++) {
+ u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
+ struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
+
+ if (IS_ERR(frag))
+ continue;
+ frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
+ dout(" frag %x split by %d\n", frag->frag, frag->split_by);
+ }
+ mutex_unlock(&ci->i_fragtree_mutex);
+
+ /* were we issued a capability? */
+ if (info->cap.caps) {
+ if (ceph_snap(inode) == CEPH_NOSNAP) {
+ ceph_add_cap(inode, session,
+ le64_to_cpu(info->cap.cap_id),
+ cap_fmode,
+ le32_to_cpu(info->cap.caps),
+ le32_to_cpu(info->cap.wanted),
+ le32_to_cpu(info->cap.seq),
+ le32_to_cpu(info->cap.mseq),
+ le64_to_cpu(info->cap.realm),
+ info->cap.flags,
+ caps_reservation);
+ } else {
+ spin_lock(&inode->i_lock);
+ dout(" %p got snap_caps %s\n", inode,
+ ceph_cap_string(le32_to_cpu(info->cap.caps)));
+ ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
+ if (cap_fmode >= 0)
+ __ceph_get_fmode(ci, cap_fmode);
+ spin_unlock(&inode->i_lock);
+ }
+ }
+
+ /* update delegation info? */
+ if (dirinfo)
+ ceph_fill_dirfrag(inode, dirinfo);
+
+ err = 0;
+
+out:
+ if (xattr_blob)
+ ceph_buffer_put(xattr_blob);
+ return err;
+}
+
+/*
+ * caller should hold session s_mutex.
+ */
+static void update_dentry_lease(struct dentry *dentry,
+ struct ceph_mds_reply_lease *lease,
+ struct ceph_mds_session *session,
+ unsigned long from_time)
+{
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+ long unsigned duration = le32_to_cpu(lease->duration_ms);
+ long unsigned ttl = from_time + (duration * HZ) / 1000;
+ long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
+ struct inode *dir;
+
+ /* only track leases on regular dentries */
+ if (dentry->d_op != &ceph_dentry_ops)
+ return;
+
+ spin_lock(&dentry->d_lock);
+ dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
+ dentry, le16_to_cpu(lease->mask), duration, ttl);
+
+ /* make lease_rdcache_gen match directory */
+ dir = dentry->d_parent->d_inode;
+ di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
+
+ if (lease->mask == 0)
+ goto out_unlock;
+
+ if (di->lease_gen == session->s_cap_gen &&
+ time_before(ttl, dentry->d_time))
+ goto out_unlock; /* we already have a newer lease. */
+
+ if (di->lease_session && di->lease_session != session)
+ goto out_unlock;
+
+ ceph_dentry_lru_touch(dentry);
+
+ if (!di->lease_session)
+ di->lease_session = ceph_get_mds_session(session);
+ di->lease_gen = session->s_cap_gen;
+ di->lease_seq = le32_to_cpu(lease->seq);
+ di->lease_renew_after = half_ttl;
+ di->lease_renew_from = 0;
+ dentry->d_time = ttl;
+out_unlock:
+ spin_unlock(&dentry->d_lock);
+ return;
+}
+
+/*
+ * splice a dentry to an inode.
+ * caller must hold directory i_mutex for this to be safe.
+ *
+ * we will only rehash the resulting dentry if @prehash is
+ * true; @prehash will be set to false (for the benefit of
+ * the caller) if we fail.
+ */
+static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
+ bool *prehash)
+{
+ struct dentry *realdn;
+
+ /* dn must be unhashed */
+ if (!d_unhashed(dn))
+ d_drop(dn);
+ realdn = d_materialise_unique(dn, in);
+ if (IS_ERR(realdn)) {
+ pr_err("splice_dentry error %p inode %p ino %llx.%llx\n",
+ dn, in, ceph_vinop(in));
+ if (prehash)
+ *prehash = false; /* don't rehash on error */
+ dn = realdn; /* note realdn contains the error */
+ goto out;
+ } else if (realdn) {
+ dout("dn %p (%d) spliced with %p (%d) "
+ "inode %p ino %llx.%llx\n",
+ dn, atomic_read(&dn->d_count),
+ realdn, atomic_read(&realdn->d_count),
+ realdn->d_inode, ceph_vinop(realdn->d_inode));
+ dput(dn);
+ dn = realdn;
+ } else {
+ BUG_ON(!ceph_dentry(dn));
+
+ dout("dn %p attached to %p ino %llx.%llx\n",
+ dn, dn->d_inode, ceph_vinop(dn->d_inode));
+ }
+ if ((!prehash || *prehash) && d_unhashed(dn))
+ d_rehash(dn);
+out:
+ return dn;
+}
+
+/*
+ * Incorporate results into the local cache. This is either just
+ * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
+ * after a lookup).
+ *
+ * A reply may contain
+ * a directory inode along with a dentry.
+ * and/or a target inode
+ *
+ * Called with snap_rwsem (read).
+ */
+int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
+ struct ceph_mds_session *session)
+{
+ struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+ struct inode *in = NULL;
+ struct ceph_mds_reply_inode *ininfo;
+ struct ceph_vino vino;
+ int i = 0;
+ int err = 0;
+
+ dout("fill_trace %p is_dentry %d is_target %d\n", req,
+ rinfo->head->is_dentry, rinfo->head->is_target);
+
+#if 0
+ /*
+ * Debugging hook:
+ *
+ * If we resend completed ops to a recovering mds, we get no
+ * trace. Since that is very rare, pretend this is the case
+ * to ensure the 'no trace' handlers in the callers behave.
+ *
+ * Fill in inodes unconditionally to avoid breaking cap
+ * invariants.
+ */
+ if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
+ pr_info("fill_trace faking empty trace on %lld %s\n",
+ req->r_tid, ceph_mds_op_name(rinfo->head->op));
+ if (rinfo->head->is_dentry) {
+ rinfo->head->is_dentry = 0;
+ err = fill_inode(req->r_locked_dir,
+ &rinfo->diri, rinfo->dirfrag,
+ session, req->r_request_started, -1);
+ }
+ if (rinfo->head->is_target) {
+ rinfo->head->is_target = 0;
+ ininfo = rinfo->targeti.in;
+ vino.ino = le64_to_cpu(ininfo->ino);
+ vino.snap = le64_to_cpu(ininfo->snapid);
+ in = ceph_get_inode(sb, vino);
+ err = fill_inode(in, &rinfo->targeti, NULL,
+ session, req->r_request_started,
+ req->r_fmode);
+ iput(in);
+ }
+ }
+#endif
+
+ if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
+ dout("fill_trace reply is empty!\n");
+ if (rinfo->head->result == 0 && req->r_locked_dir) {
+ struct ceph_inode_info *ci =
+ ceph_inode(req->r_locked_dir);
+ dout(" clearing %p complete (empty trace)\n",
+ req->r_locked_dir);
+ ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
+ ci->i_release_count++;
+ }
+ return 0;
+ }
+
+ if (rinfo->head->is_dentry) {
+ /*
+ * lookup link rename : null -> possibly existing inode
+ * mknod symlink mkdir : null -> new inode
+ * unlink : linked -> null
+ */
+ struct inode *dir = req->r_locked_dir;
+ struct dentry *dn = req->r_dentry;
+ bool have_dir_cap, have_lease;
+
+ BUG_ON(!dn);
+ BUG_ON(!dir);
+ BUG_ON(dn->d_parent->d_inode != dir);
+ BUG_ON(ceph_ino(dir) !=
+ le64_to_cpu(rinfo->diri.in->ino));
+ BUG_ON(ceph_snap(dir) !=
+ le64_to_cpu(rinfo->diri.in->snapid));
+
+ err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
+ session, req->r_request_started, -1,
+ &req->r_caps_reservation);
+ if (err < 0)
+ return err;
+
+ /* do we have a lease on the whole dir? */
+ have_dir_cap =
+ (le32_to_cpu(rinfo->diri.in->cap.caps) &
+ CEPH_CAP_FILE_SHARED);
+
+ /* do we have a dn lease? */
+ have_lease = have_dir_cap ||
+ (le16_to_cpu(rinfo->dlease->mask) &
+ CEPH_LOCK_DN);
+
+ if (!have_lease)
+ dout("fill_trace no dentry lease or dir cap\n");
+
+ /* rename? */
+ if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
+ dout(" src %p '%.*s' dst %p '%.*s'\n",
+ req->r_old_dentry,
+ req->r_old_dentry->d_name.len,
+ req->r_old_dentry->d_name.name,
+ dn, dn->d_name.len, dn->d_name.name);
+ dout("fill_trace doing d_move %p -> %p\n",
+ req->r_old_dentry, dn);
+ d_move(req->r_old_dentry, dn);
+ dout(" src %p '%.*s' dst %p '%.*s'\n",
+ req->r_old_dentry,
+ req->r_old_dentry->d_name.len,
+ req->r_old_dentry->d_name.name,
+ dn, dn->d_name.len, dn->d_name.name);
+ /* take overwritten dentry's readdir offset */
+ ceph_dentry(req->r_old_dentry)->offset =
+ ceph_dentry(dn)->offset;
+ dn = req->r_old_dentry; /* use old_dentry */
+ in = dn->d_inode;
+ }
+
+ /* null dentry? */
+ if (!rinfo->head->is_target) {
+ dout("fill_trace null dentry\n");
+ if (dn->d_inode) {
+ dout("d_delete %p\n", dn);
+ d_delete(dn);
+ } else {
+ dout("d_instantiate %p NULL\n", dn);
+ d_instantiate(dn, NULL);
+ if (have_lease && d_unhashed(dn))
+ d_rehash(dn);
+ update_dentry_lease(dn, rinfo->dlease,
+ session,
+ req->r_request_started);
+ }
+ goto done;
+ }
+
+ /* attach proper inode */
+ ininfo = rinfo->targeti.in;
+ vino.ino = le64_to_cpu(ininfo->ino);
+ vino.snap = le64_to_cpu(ininfo->snapid);
+ if (!dn->d_inode) {
+ in = ceph_get_inode(sb, vino);
+ if (IS_ERR(in)) {
+ pr_err("fill_trace bad get_inode "
+ "%llx.%llx\n", vino.ino, vino.snap);
+ err = PTR_ERR(in);
+ d_delete(dn);
+ goto done;
+ }
+ dn = splice_dentry(dn, in, &have_lease);
+ if (IS_ERR(dn)) {
+ err = PTR_ERR(dn);
+ goto done;
+ }
+ req->r_dentry = dn; /* may have spliced */
+ igrab(in);
+ } else if (ceph_ino(in) == vino.ino &&
+ ceph_snap(in) == vino.snap) {
+ igrab(in);
+ } else {
+ dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
+ dn, in, ceph_ino(in), ceph_snap(in),
+ vino.ino, vino.snap);
+ have_lease = false;
+ in = NULL;
+ }
+
+ if (have_lease)
+ update_dentry_lease(dn, rinfo->dlease, session,
+ req->r_request_started);
+ dout(" final dn %p\n", dn);
+ i++;
+ } else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
+ req->r_op == CEPH_MDS_OP_MKSNAP) {
+ struct dentry *dn = req->r_dentry;
+
+ /* fill out a snapdir LOOKUPSNAP dentry */
+ BUG_ON(!dn);
+ BUG_ON(!req->r_locked_dir);
+ BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
+ ininfo = rinfo->targeti.in;
+ vino.ino = le64_to_cpu(ininfo->ino);
+ vino.snap = le64_to_cpu(ininfo->snapid);
+ in = ceph_get_inode(sb, vino);
+ if (IS_ERR(in)) {
+ pr_err("fill_inode get_inode badness %llx.%llx\n",
+ vino.ino, vino.snap);
+ err = PTR_ERR(in);
+ d_delete(dn);
+ goto done;
+ }
+ dout(" linking snapped dir %p to dn %p\n", in, dn);
+ dn = splice_dentry(dn, in, NULL);
+ if (IS_ERR(dn)) {
+ err = PTR_ERR(dn);
+ goto done;
+ }
+ req->r_dentry = dn; /* may have spliced */
+ igrab(in);
+ rinfo->head->is_dentry = 1; /* fool notrace handlers */
+ }
+
+ if (rinfo->head->is_target) {
+ vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
+ vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
+
+ if (in == NULL || ceph_ino(in) != vino.ino ||
+ ceph_snap(in) != vino.snap) {
+ in = ceph_get_inode(sb, vino);
+ if (IS_ERR(in)) {
+ err = PTR_ERR(in);
+ goto done;
+ }
+ }
+ req->r_target_inode = in;
+
+ err = fill_inode(in,
+ &rinfo->targeti, NULL,
+ session, req->r_request_started,
+ (le32_to_cpu(rinfo->head->result) == 0) ?
+ req->r_fmode : -1,
+ &req->r_caps_reservation);
+ if (err < 0) {
+ pr_err("fill_inode badness %p %llx.%llx\n",
+ in, ceph_vinop(in));
+ goto done;
+ }
+ }
+
+done:
+ dout("fill_trace done err=%d\n", err);
+ return err;
+}
+
+/*
+ * Prepopulate our cache with readdir results, leases, etc.
+ */
+int ceph_readdir_prepopulate(struct ceph_mds_request *req,
+ struct ceph_mds_session *session)
+{
+ struct dentry *parent = req->r_dentry;
+ struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+ struct qstr dname;
+ struct dentry *dn;
+ struct inode *in;
+ int err = 0, i;
+ struct inode *snapdir = NULL;
+ struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
+ u64 frag = le32_to_cpu(rhead->args.readdir.frag);
+ struct ceph_dentry_info *di;
+
+ if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
+ snapdir = ceph_get_snapdir(parent->d_inode);
+ parent = d_find_alias(snapdir);
+ dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
+ rinfo->dir_nr, parent);
+ } else {
+ dout("readdir_prepopulate %d items under dn %p\n",
+ rinfo->dir_nr, parent);
+ if (rinfo->dir_dir)
+ ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
+ }
+
+ for (i = 0; i < rinfo->dir_nr; i++) {
+ struct ceph_vino vino;
+
+ dname.name = rinfo->dir_dname[i];
+ dname.len = rinfo->dir_dname_len[i];
+ dname.hash = full_name_hash(dname.name, dname.len);
+
+ vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
+ vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
+
+retry_lookup:
+ dn = d_lookup(parent, &dname);
+ dout("d_lookup on parent=%p name=%.*s got %p\n",
+ parent, dname.len, dname.name, dn);
+
+ if (!dn) {
+ dn = d_alloc(parent, &dname);
+ dout("d_alloc %p '%.*s' = %p\n", parent,
+ dname.len, dname.name, dn);
+ if (dn == NULL) {
+ dout("d_alloc badness\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ err = ceph_init_dentry(dn);
+ if (err < 0)
+ goto out;
+ } else if (dn->d_inode &&
+ (ceph_ino(dn->d_inode) != vino.ino ||
+ ceph_snap(dn->d_inode) != vino.snap)) {
+ dout(" dn %p points to wrong inode %p\n",
+ dn, dn->d_inode);
+ d_delete(dn);
+ dput(dn);
+ goto retry_lookup;
+ } else {
+ /* reorder parent's d_subdirs */
+ spin_lock(&dcache_lock);
+ spin_lock(&dn->d_lock);
+ list_move(&dn->d_u.d_child, &parent->d_subdirs);
+ spin_unlock(&dn->d_lock);
+ spin_unlock(&dcache_lock);
+ }
+
+ di = dn->d_fsdata;
+ di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
+
+ /* inode */
+ if (dn->d_inode) {
+ in = dn->d_inode;
+ } else {
+ in = ceph_get_inode(parent->d_sb, vino);
+ if (in == NULL) {
+ dout("new_inode badness\n");
+ d_delete(dn);
+ dput(dn);
+ err = -ENOMEM;
+ goto out;
+ }
+ dn = splice_dentry(dn, in, NULL);
+ }
+
+ if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
+ req->r_request_started, -1,
+ &req->r_caps_reservation) < 0) {
+ pr_err("fill_inode badness on %p\n", in);
+ dput(dn);
+ continue;
+ }
+ update_dentry_lease(dn, rinfo->dir_dlease[i],
+ req->r_session, req->r_request_started);
+ dput(dn);
+ }
+ req->r_did_prepopulate = true;
+
+out:
+ if (snapdir) {
+ iput(snapdir);
+ dput(parent);
+ }
+ dout("readdir_prepopulate done\n");
+ return err;
+}
+
+int ceph_inode_set_size(struct inode *inode, loff_t size)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int ret = 0;
+
+ spin_lock(&inode->i_lock);
+ dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
+ inode->i_size = size;
+ inode->i_blocks = (size + (1 << 9) - 1) >> 9;
+
+ /* tell the MDS if we are approaching max_size */
+ if ((size << 1) >= ci->i_max_size &&
+ (ci->i_reported_size << 1) < ci->i_max_size)
+ ret = 1;
+
+ spin_unlock(&inode->i_lock);
+ return ret;
+}
+
+/*
+ * Write back inode data in a worker thread. (This can't be done
+ * in the message handler context.)
+ */
+void ceph_inode_writeback(struct work_struct *work)
+{
+ struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
+ i_wb_work);
+ struct inode *inode = &ci->vfs_inode;
+
+ dout("writeback %p\n", inode);
+ filemap_fdatawrite(&inode->i_data);
+ iput(inode);
+}
+
+/*
+ * Invalidate inode pages in a worker thread. (This can't be done
+ * in the message handler context.)
+ */
+static void ceph_inode_invalidate_pages(struct work_struct *work)
+{
+ struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
+ i_pg_inv_work);
+ struct inode *inode = &ci->vfs_inode;
+ u32 orig_gen;
+ int check = 0;
+
+ spin_lock(&inode->i_lock);
+ dout("invalidate_pages %p gen %d revoking %d\n", inode,
+ ci->i_rdcache_gen, ci->i_rdcache_revoking);
+ if (ci->i_rdcache_gen == 0 ||
+ ci->i_rdcache_revoking != ci->i_rdcache_gen) {
+ BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen);
+ /* nevermind! */
+ ci->i_rdcache_revoking = 0;
+ spin_unlock(&inode->i_lock);
+ goto out;
+ }
+ orig_gen = ci->i_rdcache_gen;
+ spin_unlock(&inode->i_lock);
+
+ truncate_inode_pages(&inode->i_data, 0);
+
+ spin_lock(&inode->i_lock);
+ if (orig_gen == ci->i_rdcache_gen) {
+ dout("invalidate_pages %p gen %d successful\n", inode,
+ ci->i_rdcache_gen);
+ ci->i_rdcache_gen = 0;
+ ci->i_rdcache_revoking = 0;
+ check = 1;
+ } else {
+ dout("invalidate_pages %p gen %d raced, gen now %d\n",
+ inode, orig_gen, ci->i_rdcache_gen);
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (check)
+ ceph_check_caps(ci, 0, NULL);
+out:
+ iput(inode);
+}
+
+
+/*
+ * called by trunc_wq; take i_mutex ourselves
+ *
+ * We also truncate in a separate thread as well.
+ */
+void ceph_vmtruncate_work(struct work_struct *work)
+{
+ struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
+ i_vmtruncate_work);
+ struct inode *inode = &ci->vfs_inode;
+
+ dout("vmtruncate_work %p\n", inode);
+ mutex_lock(&inode->i_mutex);
+ __ceph_do_pending_vmtruncate(inode);
+ mutex_unlock(&inode->i_mutex);
+ iput(inode);
+}
+
+/*
+ * called with i_mutex held.
+ *
+ * Make sure any pending truncation is applied before doing anything
+ * that may depend on it.
+ */
+void __ceph_do_pending_vmtruncate(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ u64 to;
+ int wrbuffer_refs, wake = 0;
+
+retry:
+ spin_lock(&inode->i_lock);
+ if (ci->i_truncate_pending == 0) {
+ dout("__do_pending_vmtruncate %p none pending\n", inode);
+ spin_unlock(&inode->i_lock);
+ return;
+ }
+
+ /*
+ * make sure any dirty snapped pages are flushed before we
+ * possibly truncate them.. so write AND block!
+ */
+ if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
+ dout("__do_pending_vmtruncate %p flushing snaps first\n",
+ inode);
+ spin_unlock(&inode->i_lock);
+ filemap_write_and_wait_range(&inode->i_data, 0,
+ inode->i_sb->s_maxbytes);
+ goto retry;
+ }
+
+ to = ci->i_truncate_size;
+ wrbuffer_refs = ci->i_wrbuffer_ref;
+ dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
+ ci->i_truncate_pending, to);
+ spin_unlock(&inode->i_lock);
+
+ truncate_inode_pages(inode->i_mapping, to);
+
+ spin_lock(&inode->i_lock);
+ ci->i_truncate_pending--;
+ if (ci->i_truncate_pending == 0)
+ wake = 1;
+ spin_unlock(&inode->i_lock);
+
+ if (wrbuffer_refs == 0)
+ ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
+ if (wake)
+ wake_up(&ci->i_cap_wq);
+}
+
+
+/*
+ * symlinks
+ */
+static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
+ nd_set_link(nd, ci->i_symlink);
+ return NULL;
+}
+
+static const struct inode_operations ceph_symlink_iops = {
+ .readlink = generic_readlink,
+ .follow_link = ceph_sym_follow_link,
+};
+
+/*
+ * setattr
+ */
+int ceph_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct inode *parent_inode = dentry->d_parent->d_inode;
+ const unsigned int ia_valid = attr->ia_valid;
+ struct ceph_mds_request *req;
+ struct ceph_mds_client *mdsc = &ceph_client(dentry->d_sb)->mdsc;
+ int issued;
+ int release = 0, dirtied = 0;
+ int mask = 0;
+ int err = 0;
+ int queue_trunc = 0;
+
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+ return -EROFS;
+
+ __ceph_do_pending_vmtruncate(inode);
+
+ err = inode_change_ok(inode, attr);
+ if (err != 0)
+ return err;
+
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
+ USE_AUTH_MDS);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ spin_lock(&inode->i_lock);
+ issued = __ceph_caps_issued(ci, NULL);
+ dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
+
+ if (ia_valid & ATTR_UID) {
+ dout("setattr %p uid %d -> %d\n", inode,
+ inode->i_uid, attr->ia_uid);
+ if (issued & CEPH_CAP_AUTH_EXCL) {
+ inode->i_uid = attr->ia_uid;
+ dirtied |= CEPH_CAP_AUTH_EXCL;
+ } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
+ attr->ia_uid != inode->i_uid) {
+ req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
+ mask |= CEPH_SETATTR_UID;
+ release |= CEPH_CAP_AUTH_SHARED;
+ }
+ }
+ if (ia_valid & ATTR_GID) {
+ dout("setattr %p gid %d -> %d\n", inode,
+ inode->i_gid, attr->ia_gid);
+ if (issued & CEPH_CAP_AUTH_EXCL) {
+ inode->i_gid = attr->ia_gid;
+ dirtied |= CEPH_CAP_AUTH_EXCL;
+ } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
+ attr->ia_gid != inode->i_gid) {
+ req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
+ mask |= CEPH_SETATTR_GID;
+ release |= CEPH_CAP_AUTH_SHARED;
+ }
+ }
+ if (ia_valid & ATTR_MODE) {
+ dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
+ attr->ia_mode);
+ if (issued & CEPH_CAP_AUTH_EXCL) {
+ inode->i_mode = attr->ia_mode;
+ dirtied |= CEPH_CAP_AUTH_EXCL;
+ } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
+ attr->ia_mode != inode->i_mode) {
+ req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
+ mask |= CEPH_SETATTR_MODE;
+ release |= CEPH_CAP_AUTH_SHARED;
+ }
+ }
+
+ if (ia_valid & ATTR_ATIME) {
+ dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
+ inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
+ attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
+ if (issued & CEPH_CAP_FILE_EXCL) {
+ ci->i_time_warp_seq++;
+ inode->i_atime = attr->ia_atime;
+ dirtied |= CEPH_CAP_FILE_EXCL;
+ } else if ((issued & CEPH_CAP_FILE_WR) &&
+ timespec_compare(&inode->i_atime,
+ &attr->ia_atime) < 0) {
+ inode->i_atime = attr->ia_atime;
+ dirtied |= CEPH_CAP_FILE_WR;
+ } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
+ !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
+ ceph_encode_timespec(&req->r_args.setattr.atime,
+ &attr->ia_atime);
+ mask |= CEPH_SETATTR_ATIME;
+ release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
+ CEPH_CAP_FILE_WR;
+ }
+ }
+ if (ia_valid & ATTR_MTIME) {
+ dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
+ inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
+ attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
+ if (issued & CEPH_CAP_FILE_EXCL) {
+ ci->i_time_warp_seq++;
+ inode->i_mtime = attr->ia_mtime;
+ dirtied |= CEPH_CAP_FILE_EXCL;
+ } else if ((issued & CEPH_CAP_FILE_WR) &&
+ timespec_compare(&inode->i_mtime,
+ &attr->ia_mtime) < 0) {
+ inode->i_mtime = attr->ia_mtime;
+ dirtied |= CEPH_CAP_FILE_WR;
+ } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
+ !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
+ ceph_encode_timespec(&req->r_args.setattr.mtime,
+ &attr->ia_mtime);
+ mask |= CEPH_SETATTR_MTIME;
+ release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
+ CEPH_CAP_FILE_WR;
+ }
+ }
+ if (ia_valid & ATTR_SIZE) {
+ dout("setattr %p size %lld -> %lld\n", inode,
+ inode->i_size, attr->ia_size);
+ if (attr->ia_size > inode->i_sb->s_maxbytes) {
+ err = -EINVAL;
+ goto out;
+ }
+ if ((issued & CEPH_CAP_FILE_EXCL) &&
+ attr->ia_size > inode->i_size) {
+ inode->i_size = attr->ia_size;
+ if (attr->ia_size < inode->i_size) {
+ ci->i_truncate_size = attr->ia_size;
+ ci->i_truncate_pending++;
+ queue_trunc = 1;
+ }
+ inode->i_blocks =
+ (attr->ia_size + (1 << 9) - 1) >> 9;
+ inode->i_ctime = attr->ia_ctime;
+ ci->i_reported_size = attr->ia_size;
+ dirtied |= CEPH_CAP_FILE_EXCL;
+ } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
+ attr->ia_size != inode->i_size) {
+ req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
+ req->r_args.setattr.old_size =
+ cpu_to_le64(inode->i_size);
+ mask |= CEPH_SETATTR_SIZE;
+ release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
+ CEPH_CAP_FILE_WR;
+ }
+ }
+
+ /* these do nothing */
+ if (ia_valid & ATTR_CTIME) {
+ bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
+ ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
+ dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
+ inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
+ attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
+ only ? "ctime only" : "ignored");
+ inode->i_ctime = attr->ia_ctime;
+ if (only) {
+ /*
+ * if kernel wants to dirty ctime but nothing else,
+ * we need to choose a cap to dirty under, or do
+ * a almost-no-op setattr
+ */
+ if (issued & CEPH_CAP_AUTH_EXCL)
+ dirtied |= CEPH_CAP_AUTH_EXCL;
+ else if (issued & CEPH_CAP_FILE_EXCL)
+ dirtied |= CEPH_CAP_FILE_EXCL;
+ else if (issued & CEPH_CAP_XATTR_EXCL)
+ dirtied |= CEPH_CAP_XATTR_EXCL;
+ else
+ mask |= CEPH_SETATTR_CTIME;
+ }
+ }
+ if (ia_valid & ATTR_FILE)
+ dout("setattr %p ATTR_FILE ... hrm!\n", inode);
+
+ if (dirtied) {
+ __ceph_mark_dirty_caps(ci, dirtied);
+ inode->i_ctime = CURRENT_TIME;
+ }
+
+ release &= issued;
+ spin_unlock(&inode->i_lock);
+
+ if (queue_trunc)
+ __ceph_do_pending_vmtruncate(inode);
+
+ if (mask) {
+ req->r_inode = igrab(inode);
+ req->r_inode_drop = release;
+ req->r_args.setattr.mask = cpu_to_le32(mask);
+ req->r_num_caps = 1;
+ err = ceph_mdsc_do_request(mdsc, parent_inode, req);
+ }
+ dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
+ ceph_cap_string(dirtied), mask);
+
+ ceph_mdsc_put_request(req);
+ __ceph_do_pending_vmtruncate(inode);
+ return err;
+out:
+ spin_unlock(&inode->i_lock);
+ ceph_mdsc_put_request(req);
+ return err;
+}
+
+/*
+ * Verify that we have a lease on the given mask. If not,
+ * do a getattr against an mds.
+ */
+int ceph_do_getattr(struct inode *inode, int mask)
+{
+ struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_request *req;
+ int err;
+
+ if (ceph_snap(inode) == CEPH_SNAPDIR) {
+ dout("do_getattr inode %p SNAPDIR\n", inode);
+ return 0;
+ }
+
+ dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask));
+ if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
+ return 0;
+
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->r_inode = igrab(inode);
+ req->r_num_caps = 1;
+ req->r_args.getattr.mask = cpu_to_le32(mask);
+ err = ceph_mdsc_do_request(mdsc, NULL, req);
+ ceph_mdsc_put_request(req);
+ dout("do_getattr result=%d\n", err);
+ return err;
+}
+
+
+/*
+ * Check inode permissions. We verify we have a valid value for
+ * the AUTH cap, then call the generic handler.
+ */
+int ceph_permission(struct inode *inode, int mask)
+{
+ int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
+
+ if (!err)
+ err = generic_permission(inode, mask, NULL);
+ return err;
+}
+
+/*
+ * Get all attributes. Hopefully somedata we'll have a statlite()
+ * and can limit the fields we require to be accurate.
+ */
+int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int err;
+
+ err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
+ if (!err) {
+ generic_fillattr(inode, stat);
+ stat->ino = inode->i_ino;
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+ stat->dev = ceph_snap(inode);
+ else
+ stat->dev = 0;
+ if (S_ISDIR(inode->i_mode)) {
+ stat->size = ci->i_rbytes;
+ stat->blocks = 0;
+ stat->blksize = 65536;
+ }
+ }
+ return err;
+}
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
new file mode 100644
index 000000000000..8a5bcae62846
--- /dev/null
+++ b/fs/ceph/ioctl.c
@@ -0,0 +1,160 @@
+#include <linux/in.h>
+
+#include "ioctl.h"
+#include "super.h"
+#include "ceph_debug.h"
+
+
+/*
+ * ioctls
+ */
+
+/*
+ * get and set the file layout
+ */
+static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
+{
+ struct ceph_inode_info *ci = ceph_inode(file->f_dentry->d_inode);
+ struct ceph_ioctl_layout l;
+ int err;
+
+ err = ceph_do_getattr(file->f_dentry->d_inode, CEPH_STAT_CAP_LAYOUT);
+ if (!err) {
+ l.stripe_unit = ceph_file_layout_su(ci->i_layout);
+ l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
+ l.object_size = ceph_file_layout_object_size(ci->i_layout);
+ l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
+ l.preferred_osd =
+ (s32)le32_to_cpu(ci->i_layout.fl_pg_preferred);
+ if (copy_to_user(arg, &l, sizeof(l)))
+ return -EFAULT;
+ }
+
+ return err;
+}
+
+static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
+ struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_mds_request *req;
+ struct ceph_ioctl_layout l;
+ int err, i;
+
+ /* copy and validate */
+ if (copy_from_user(&l, arg, sizeof(l)))
+ return -EFAULT;
+
+ if ((l.object_size & ~PAGE_MASK) ||
+ (l.stripe_unit & ~PAGE_MASK) ||
+ !l.stripe_unit ||
+ (l.object_size &&
+ (unsigned)l.object_size % (unsigned)l.stripe_unit))
+ return -EINVAL;
+
+ /* make sure it's a valid data pool */
+ if (l.data_pool > 0) {
+ mutex_lock(&mdsc->mutex);
+ err = -EINVAL;
+ for (i = 0; i < mdsc->mdsmap->m_num_data_pg_pools; i++)
+ if (mdsc->mdsmap->m_data_pg_pools[i] == l.data_pool) {
+ err = 0;
+ break;
+ }
+ mutex_unlock(&mdsc->mutex);
+ if (err)
+ return err;
+ }
+
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT,
+ USE_AUTH_MDS);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->r_inode = igrab(inode);
+ req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
+
+ req->r_args.setlayout.layout.fl_stripe_unit =
+ cpu_to_le32(l.stripe_unit);
+ req->r_args.setlayout.layout.fl_stripe_count =
+ cpu_to_le32(l.stripe_count);
+ req->r_args.setlayout.layout.fl_object_size =
+ cpu_to_le32(l.object_size);
+ req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool);
+ req->r_args.setlayout.layout.fl_pg_preferred =
+ cpu_to_le32(l.preferred_osd);
+
+ err = ceph_mdsc_do_request(mdsc, parent_inode, req);
+ ceph_mdsc_put_request(req);
+ return err;
+}
+
+/*
+ * Return object name, size/offset information, and location (OSD
+ * number, network address) for a given file offset.
+ */
+static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
+{
+ struct ceph_ioctl_dataloc dl;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_osd_client *osdc = &ceph_client(inode->i_sb)->osdc;
+ u64 len = 1, olen;
+ u64 tmp;
+ struct ceph_object_layout ol;
+ struct ceph_pg pgid;
+
+ /* copy and validate */
+ if (copy_from_user(&dl, arg, sizeof(dl)))
+ return -EFAULT;
+
+ down_read(&osdc->map_sem);
+ ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, &len,
+ &dl.object_no, &dl.object_offset, &olen);
+ dl.file_offset -= dl.object_offset;
+ dl.object_size = ceph_file_layout_object_size(ci->i_layout);
+ dl.block_size = ceph_file_layout_su(ci->i_layout);
+
+ /* block_offset = object_offset % block_size */
+ tmp = dl.object_offset;
+ dl.block_offset = do_div(tmp, dl.block_size);
+
+ snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx",
+ ceph_ino(inode), dl.object_no);
+ ceph_calc_object_layout(&ol, dl.object_name, &ci->i_layout,
+ osdc->osdmap);
+
+ pgid = ol.ol_pgid;
+ dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid);
+ if (dl.osd >= 0) {
+ struct ceph_entity_addr *a =
+ ceph_osd_addr(osdc->osdmap, dl.osd);
+ if (a)
+ memcpy(&dl.osd_addr, &a->in_addr, sizeof(dl.osd_addr));
+ } else {
+ memset(&dl.osd_addr, 0, sizeof(dl.osd_addr));
+ }
+ up_read(&osdc->map_sem);
+
+ /* send result back to user */
+ if (copy_to_user(arg, &dl, sizeof(dl)))
+ return -EFAULT;
+
+ return 0;
+}
+
+long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ dout("ioctl file %p cmd %u arg %lu\n", file, cmd, arg);
+ switch (cmd) {
+ case CEPH_IOC_GET_LAYOUT:
+ return ceph_ioctl_get_layout(file, (void __user *)arg);
+
+ case CEPH_IOC_SET_LAYOUT:
+ return ceph_ioctl_set_layout(file, (void __user *)arg);
+
+ case CEPH_IOC_GET_DATALOC:
+ return ceph_ioctl_get_dataloc(file, (void __user *)arg);
+ }
+ return -ENOTTY;
+}
diff --git a/fs/ceph/ioctl.h b/fs/ceph/ioctl.h
new file mode 100644
index 000000000000..25e4f1a9d059
--- /dev/null
+++ b/fs/ceph/ioctl.h
@@ -0,0 +1,40 @@
+#ifndef FS_CEPH_IOCTL_H
+#define FS_CEPH_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define CEPH_IOCTL_MAGIC 0x97
+
+/* just use u64 to align sanely on all archs */
+struct ceph_ioctl_layout {
+ __u64 stripe_unit, stripe_count, object_size;
+ __u64 data_pool;
+ __s64 preferred_osd;
+};
+
+#define CEPH_IOC_GET_LAYOUT _IOR(CEPH_IOCTL_MAGIC, 1, \
+ struct ceph_ioctl_layout)
+#define CEPH_IOC_SET_LAYOUT _IOW(CEPH_IOCTL_MAGIC, 2, \
+ struct ceph_ioctl_layout)
+
+/*
+ * Extract identity, address of the OSD and object storing a given
+ * file offset.
+ */
+struct ceph_ioctl_dataloc {
+ __u64 file_offset; /* in+out: file offset */
+ __u64 object_offset; /* out: offset in object */
+ __u64 object_no; /* out: object # */
+ __u64 object_size; /* out: object size */
+ char object_name[64]; /* out: object name */
+ __u64 block_offset; /* out: offset in block */
+ __u64 block_size; /* out: block length */
+ __s64 osd; /* out: osd # */
+ struct sockaddr_storage osd_addr; /* out: osd address */
+};
+
+#define CEPH_IOC_GET_DATALOC _IOWR(CEPH_IOCTL_MAGIC, 3, \
+ struct ceph_ioctl_dataloc)
+
+#endif
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
new file mode 100644
index 000000000000..739093f281d0
--- /dev/null
+++ b/fs/ceph/mds_client.c
@@ -0,0 +1,2975 @@
+#include "ceph_debug.h"
+
+#include <linux/wait.h>
+#include <linux/sched.h>
+
+#include "mds_client.h"
+#include "mon_client.h"
+#include "super.h"
+#include "messenger.h"
+#include "decode.h"
+#include "auth.h"
+
+/*
+ * A cluster of MDS (metadata server) daemons is responsible for
+ * managing the file system namespace (the directory hierarchy and
+ * inodes) and for coordinating shared access to storage. Metadata is
+ * partitioning hierarchically across a number of servers, and that
+ * partition varies over time as the cluster adjusts the distribution
+ * in order to balance load.
+ *
+ * The MDS client is primarily responsible to managing synchronous
+ * metadata requests for operations like open, unlink, and so forth.
+ * If there is a MDS failure, we find out about it when we (possibly
+ * request and) receive a new MDS map, and can resubmit affected
+ * requests.
+ *
+ * For the most part, though, we take advantage of a lossless
+ * communications channel to the MDS, and do not need to worry about
+ * timing out or resubmitting requests.
+ *
+ * We maintain a stateful "session" with each MDS we interact with.
+ * Within each session, we sent periodic heartbeat messages to ensure
+ * any capabilities or leases we have been issues remain valid. If
+ * the session times out and goes stale, our leases and capabilities
+ * are no longer valid.
+ */
+
+static void __wake_requests(struct ceph_mds_client *mdsc,
+ struct list_head *head);
+
+const static struct ceph_connection_operations mds_con_ops;
+
+
+/*
+ * mds reply parsing
+ */
+
+/*
+ * parse individual inode info
+ */
+static int parse_reply_info_in(void **p, void *end,
+ struct ceph_mds_reply_info_in *info)
+{
+ int err = -EIO;
+
+ info->in = *p;
+ *p += sizeof(struct ceph_mds_reply_inode) +
+ sizeof(*info->in->fragtree.splits) *
+ le32_to_cpu(info->in->fragtree.nsplits);
+
+ ceph_decode_32_safe(p, end, info->symlink_len, bad);
+ ceph_decode_need(p, end, info->symlink_len, bad);
+ info->symlink = *p;
+ *p += info->symlink_len;
+
+ ceph_decode_32_safe(p, end, info->xattr_len, bad);
+ ceph_decode_need(p, end, info->xattr_len, bad);
+ info->xattr_data = *p;
+ *p += info->xattr_len;
+ return 0;
+bad:
+ return err;
+}
+
+/*
+ * parse a normal reply, which may contain a (dir+)dentry and/or a
+ * target inode.
+ */
+static int parse_reply_info_trace(void **p, void *end,
+ struct ceph_mds_reply_info_parsed *info)
+{
+ int err;
+
+ if (info->head->is_dentry) {
+ err = parse_reply_info_in(p, end, &info->diri);
+ if (err < 0)
+ goto out_bad;
+
+ if (unlikely(*p + sizeof(*info->dirfrag) > end))
+ goto bad;
+ info->dirfrag = *p;
+ *p += sizeof(*info->dirfrag) +
+ sizeof(u32)*le32_to_cpu(info->dirfrag->ndist);
+ if (unlikely(*p > end))
+ goto bad;
+
+ ceph_decode_32_safe(p, end, info->dname_len, bad);
+ ceph_decode_need(p, end, info->dname_len, bad);
+ info->dname = *p;
+ *p += info->dname_len;
+ info->dlease = *p;
+ *p += sizeof(*info->dlease);
+ }
+
+ if (info->head->is_target) {
+ err = parse_reply_info_in(p, end, &info->targeti);
+ if (err < 0)
+ goto out_bad;
+ }
+
+ if (unlikely(*p != end))
+ goto bad;
+ return 0;
+
+bad:
+ err = -EIO;
+out_bad:
+ pr_err("problem parsing mds trace %d\n", err);
+ return err;
+}
+
+/*
+ * parse readdir results
+ */
+static int parse_reply_info_dir(void **p, void *end,
+ struct ceph_mds_reply_info_parsed *info)
+{
+ u32 num, i = 0;
+ int err;
+
+ info->dir_dir = *p;
+ if (*p + sizeof(*info->dir_dir) > end)
+ goto bad;
+ *p += sizeof(*info->dir_dir) +
+ sizeof(u32)*le32_to_cpu(info->dir_dir->ndist);
+ if (*p > end)
+ goto bad;
+
+ ceph_decode_need(p, end, sizeof(num) + 2, bad);
+ num = ceph_decode_32(p);
+ info->dir_end = ceph_decode_8(p);
+ info->dir_complete = ceph_decode_8(p);
+ if (num == 0)
+ goto done;
+
+ /* alloc large array */
+ info->dir_nr = num;
+ info->dir_in = kcalloc(num, sizeof(*info->dir_in) +
+ sizeof(*info->dir_dname) +
+ sizeof(*info->dir_dname_len) +
+ sizeof(*info->dir_dlease),
+ GFP_NOFS);
+ if (info->dir_in == NULL) {
+ err = -ENOMEM;
+ goto out_bad;
+ }
+ info->dir_dname = (void *)(info->dir_in + num);
+ info->dir_dname_len = (void *)(info->dir_dname + num);
+ info->dir_dlease = (void *)(info->dir_dname_len + num);
+
+ while (num) {
+ /* dentry */
+ ceph_decode_need(p, end, sizeof(u32)*2, bad);
+ info->dir_dname_len[i] = ceph_decode_32(p);
+ ceph_decode_need(p, end, info->dir_dname_len[i], bad);
+ info->dir_dname[i] = *p;
+ *p += info->dir_dname_len[i];
+ dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i],
+ info->dir_dname[i]);
+ info->dir_dlease[i] = *p;
+ *p += sizeof(struct ceph_mds_reply_lease);
+
+ /* inode */
+ err = parse_reply_info_in(p, end, &info->dir_in[i]);
+ if (err < 0)
+ goto out_bad;
+ i++;
+ num--;
+ }
+
+done:
+ if (*p != end)
+ goto bad;
+ return 0;
+
+bad:
+ err = -EIO;
+out_bad:
+ pr_err("problem parsing dir contents %d\n", err);
+ return err;
+}
+
+/*
+ * parse entire mds reply
+ */
+static int parse_reply_info(struct ceph_msg *msg,
+ struct ceph_mds_reply_info_parsed *info)
+{
+ void *p, *end;
+ u32 len;
+ int err;
+
+ info->head = msg->front.iov_base;
+ p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
+ end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
+
+ /* trace */
+ ceph_decode_32_safe(&p, end, len, bad);
+ if (len > 0) {
+ err = parse_reply_info_trace(&p, p+len, info);
+ if (err < 0)
+ goto out_bad;
+ }
+
+ /* dir content */
+ ceph_decode_32_safe(&p, end, len, bad);
+ if (len > 0) {
+ err = parse_reply_info_dir(&p, p+len, info);
+ if (err < 0)
+ goto out_bad;
+ }
+
+ /* snap blob */
+ ceph_decode_32_safe(&p, end, len, bad);
+ info->snapblob_len = len;
+ info->snapblob = p;
+ p += len;
+
+ if (p != end)
+ goto bad;
+ return 0;
+
+bad:
+ err = -EIO;
+out_bad:
+ pr_err("mds parse_reply err %d\n", err);
+ return err;
+}
+
+static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
+{
+ kfree(info->dir_in);
+}
+
+
+/*
+ * sessions
+ */
+static const char *session_state_name(int s)
+{
+ switch (s) {
+ case CEPH_MDS_SESSION_NEW: return "new";
+ case CEPH_MDS_SESSION_OPENING: return "opening";
+ case CEPH_MDS_SESSION_OPEN: return "open";
+ case CEPH_MDS_SESSION_HUNG: return "hung";
+ case CEPH_MDS_SESSION_CLOSING: return "closing";
+ case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
+ default: return "???";
+ }
+}
+
+static struct ceph_mds_session *get_session(struct ceph_mds_session *s)
+{
+ if (atomic_inc_not_zero(&s->s_ref)) {
+ dout("mdsc get_session %p %d -> %d\n", s,
+ atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref));
+ return s;
+ } else {
+ dout("mdsc get_session %p 0 -- FAIL", s);
+ return NULL;
+ }
+}
+
+void ceph_put_mds_session(struct ceph_mds_session *s)
+{
+ dout("mdsc put_session %p %d -> %d\n", s,
+ atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
+ if (atomic_dec_and_test(&s->s_ref)) {
+ if (s->s_authorizer)
+ s->s_mdsc->client->monc.auth->ops->destroy_authorizer(
+ s->s_mdsc->client->monc.auth, s->s_authorizer);
+ kfree(s);
+ }
+}
+
+/*
+ * called under mdsc->mutex
+ */
+struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
+ int mds)
+{
+ struct ceph_mds_session *session;
+
+ if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL)
+ return NULL;
+ session = mdsc->sessions[mds];
+ dout("lookup_mds_session %p %d\n", session,
+ atomic_read(&session->s_ref));
+ get_session(session);
+ return session;
+}
+
+static bool __have_session(struct ceph_mds_client *mdsc, int mds)
+{
+ if (mds >= mdsc->max_sessions)
+ return false;
+ return mdsc->sessions[mds];
+}
+
+/*
+ * create+register a new session for given mds.
+ * called under mdsc->mutex.
+ */
+static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
+ int mds)
+{
+ struct ceph_mds_session *s;
+
+ s = kzalloc(sizeof(*s), GFP_NOFS);
+ s->s_mdsc = mdsc;
+ s->s_mds = mds;
+ s->s_state = CEPH_MDS_SESSION_NEW;
+ s->s_ttl = 0;
+ s->s_seq = 0;
+ mutex_init(&s->s_mutex);
+
+ ceph_con_init(mdsc->client->msgr, &s->s_con);
+ s->s_con.private = s;
+ s->s_con.ops = &mds_con_ops;
+ s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
+ s->s_con.peer_name.num = cpu_to_le64(mds);
+
+ spin_lock_init(&s->s_cap_lock);
+ s->s_cap_gen = 0;
+ s->s_cap_ttl = 0;
+ s->s_renew_requested = 0;
+ s->s_renew_seq = 0;
+ INIT_LIST_HEAD(&s->s_caps);
+ s->s_nr_caps = 0;
+ atomic_set(&s->s_ref, 1);
+ INIT_LIST_HEAD(&s->s_waiting);
+ INIT_LIST_HEAD(&s->s_unsafe);
+ s->s_num_cap_releases = 0;
+ INIT_LIST_HEAD(&s->s_cap_releases);
+ INIT_LIST_HEAD(&s->s_cap_releases_done);
+ INIT_LIST_HEAD(&s->s_cap_flushing);
+ INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
+
+ dout("register_session mds%d\n", mds);
+ if (mds >= mdsc->max_sessions) {
+ int newmax = 1 << get_count_order(mds+1);
+ struct ceph_mds_session **sa;
+
+ dout("register_session realloc to %d\n", newmax);
+ sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
+ if (sa == NULL)
+ goto fail_realloc;
+ if (mdsc->sessions) {
+ memcpy(sa, mdsc->sessions,
+ mdsc->max_sessions * sizeof(void *));
+ kfree(mdsc->sessions);
+ }
+ mdsc->sessions = sa;
+ mdsc->max_sessions = newmax;
+ }
+ mdsc->sessions[mds] = s;
+ atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */
+
+ ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
+
+ return s;
+
+fail_realloc:
+ kfree(s);
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * called under mdsc->mutex
+ */
+static void unregister_session(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *s)
+{
+ dout("unregister_session mds%d %p\n", s->s_mds, s);
+ mdsc->sessions[s->s_mds] = NULL;
+ ceph_con_close(&s->s_con);
+ ceph_put_mds_session(s);
+}
+
+/*
+ * drop session refs in request.
+ *
+ * should be last request ref, or hold mdsc->mutex
+ */
+static void put_request_session(struct ceph_mds_request *req)
+{
+ if (req->r_session) {
+ ceph_put_mds_session(req->r_session);
+ req->r_session = NULL;
+ }
+}
+
+void ceph_mdsc_release_request(struct kref *kref)
+{
+ struct ceph_mds_request *req = container_of(kref,
+ struct ceph_mds_request,
+ r_kref);
+ if (req->r_request)
+ ceph_msg_put(req->r_request);
+ if (req->r_reply) {
+ ceph_msg_put(req->r_reply);
+ destroy_reply_info(&req->r_reply_info);
+ }
+ if (req->r_inode) {
+ ceph_put_cap_refs(ceph_inode(req->r_inode),
+ CEPH_CAP_PIN);
+ iput(req->r_inode);
+ }
+ if (req->r_locked_dir)
+ ceph_put_cap_refs(ceph_inode(req->r_locked_dir),
+ CEPH_CAP_PIN);
+ if (req->r_target_inode)
+ iput(req->r_target_inode);
+ if (req->r_dentry)
+ dput(req->r_dentry);
+ if (req->r_old_dentry) {
+ ceph_put_cap_refs(
+ ceph_inode(req->r_old_dentry->d_parent->d_inode),
+ CEPH_CAP_PIN);
+ dput(req->r_old_dentry);
+ }
+ kfree(req->r_path1);
+ kfree(req->r_path2);
+ put_request_session(req);
+ ceph_unreserve_caps(&req->r_caps_reservation);
+ kfree(req);
+}
+
+/*
+ * lookup session, bump ref if found.
+ *
+ * called under mdsc->mutex.
+ */
+static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc,
+ u64 tid)
+{
+ struct ceph_mds_request *req;
+ req = radix_tree_lookup(&mdsc->request_tree, tid);
+ if (req)
+ ceph_mdsc_get_request(req);
+ return req;
+}
+
+/*
+ * Register an in-flight request, and assign a tid. Link to directory
+ * are modifying (if any).
+ *
+ * Called under mdsc->mutex.
+ */
+static void __register_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req,
+ struct inode *dir)
+{
+ req->r_tid = ++mdsc->last_tid;
+ if (req->r_num_caps)
+ ceph_reserve_caps(&req->r_caps_reservation, req->r_num_caps);
+ dout("__register_request %p tid %lld\n", req, req->r_tid);
+ ceph_mdsc_get_request(req);
+ radix_tree_insert(&mdsc->request_tree, req->r_tid, (void *)req);
+
+ if (dir) {
+ struct ceph_inode_info *ci = ceph_inode(dir);
+
+ spin_lock(&ci->i_unsafe_lock);
+ req->r_unsafe_dir = dir;
+ list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
+ spin_unlock(&ci->i_unsafe_lock);
+ }
+}
+
+static void __unregister_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req)
+{
+ dout("__unregister_request %p tid %lld\n", req, req->r_tid);
+ radix_tree_delete(&mdsc->request_tree, req->r_tid);
+ ceph_mdsc_put_request(req);
+
+ if (req->r_unsafe_dir) {
+ struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
+
+ spin_lock(&ci->i_unsafe_lock);
+ list_del_init(&req->r_unsafe_dir_item);
+ spin_unlock(&ci->i_unsafe_lock);
+ }
+}
+
+/*
+ * Choose mds to send request to next. If there is a hint set in the
+ * request (e.g., due to a prior forward hint from the mds), use that.
+ * Otherwise, consult frag tree and/or caps to identify the
+ * appropriate mds. If all else fails, choose randomly.
+ *
+ * Called under mdsc->mutex.
+ */
+static int __choose_mds(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req)
+{
+ struct inode *inode;
+ struct ceph_inode_info *ci;
+ struct ceph_cap *cap;
+ int mode = req->r_direct_mode;
+ int mds = -1;
+ u32 hash = req->r_direct_hash;
+ bool is_hash = req->r_direct_is_hash;
+
+ /*
+ * is there a specific mds we should try? ignore hint if we have
+ * no session and the mds is not up (active or recovering).
+ */
+ if (req->r_resend_mds >= 0 &&
+ (__have_session(mdsc, req->r_resend_mds) ||
+ ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
+ dout("choose_mds using resend_mds mds%d\n",
+ req->r_resend_mds);
+ return req->r_resend_mds;
+ }
+
+ if (mode == USE_RANDOM_MDS)
+ goto random;
+
+ inode = NULL;
+ if (req->r_inode) {
+ inode = req->r_inode;
+ } else if (req->r_dentry) {
+ if (req->r_dentry->d_inode) {
+ inode = req->r_dentry->d_inode;
+ } else {
+ inode = req->r_dentry->d_parent->d_inode;
+ hash = req->r_dentry->d_name.hash;
+ is_hash = true;
+ }
+ }
+ dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash,
+ (int)hash, mode);
+ if (!inode)
+ goto random;
+ ci = ceph_inode(inode);
+
+ if (is_hash && S_ISDIR(inode->i_mode)) {
+ struct ceph_inode_frag frag;
+ int found;
+
+ ceph_choose_frag(ci, hash, &frag, &found);
+ if (found) {
+ if (mode == USE_ANY_MDS && frag.ndist > 0) {
+ u8 r;
+
+ /* choose a random replica */
+ get_random_bytes(&r, 1);
+ r %= frag.ndist;
+ mds = frag.dist[r];
+ dout("choose_mds %p %llx.%llx "
+ "frag %u mds%d (%d/%d)\n",
+ inode, ceph_vinop(inode),
+ frag.frag, frag.mds,
+ (int)r, frag.ndist);
+ return mds;
+ }
+
+ /* since this file/dir wasn't known to be
+ * replicated, then we want to look for the
+ * authoritative mds. */
+ mode = USE_AUTH_MDS;
+ if (frag.mds >= 0) {
+ /* choose auth mds */
+ mds = frag.mds;
+ dout("choose_mds %p %llx.%llx "
+ "frag %u mds%d (auth)\n",
+ inode, ceph_vinop(inode), frag.frag, mds);
+ return mds;
+ }
+ }
+ }
+
+ spin_lock(&inode->i_lock);
+ cap = NULL;
+ if (mode == USE_AUTH_MDS)
+ cap = ci->i_auth_cap;
+ if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
+ cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
+ if (!cap) {
+ spin_unlock(&inode->i_lock);
+ goto random;
+ }
+ mds = cap->session->s_mds;
+ dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
+ inode, ceph_vinop(inode), mds,
+ cap == ci->i_auth_cap ? "auth " : "", cap);
+ spin_unlock(&inode->i_lock);
+ return mds;
+
+random:
+ mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
+ dout("choose_mds chose random mds%d\n", mds);
+ return mds;
+}
+
+
+/*
+ * session messages
+ */
+static struct ceph_msg *create_session_msg(u32 op, u64 seq)
+{
+ struct ceph_msg *msg;
+ struct ceph_mds_session_head *h;
+
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), 0, 0, NULL);
+ if (IS_ERR(msg)) {
+ pr_err("create_session_msg ENOMEM creating msg\n");
+ return ERR_PTR(PTR_ERR(msg));
+ }
+ h = msg->front.iov_base;
+ h->op = cpu_to_le32(op);
+ h->seq = cpu_to_le64(seq);
+ return msg;
+}
+
+/*
+ * send session open request.
+ *
+ * called under mdsc->mutex
+ */
+static int __open_session(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ struct ceph_msg *msg;
+ int mstate;
+ int mds = session->s_mds;
+ int err = 0;
+
+ /* wait for mds to go active? */
+ mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
+ dout("open_session to mds%d (%s)\n", mds,
+ ceph_mds_state_name(mstate));
+ session->s_state = CEPH_MDS_SESSION_OPENING;
+ session->s_renew_requested = jiffies;
+
+ /* send connect message */
+ msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq);
+ if (IS_ERR(msg)) {
+ err = PTR_ERR(msg);
+ goto out;
+ }
+ ceph_con_send(&session->s_con, msg);
+
+out:
+ return 0;
+}
+
+/*
+ * session caps
+ */
+
+/*
+ * Free preallocated cap messages assigned to this session
+ */
+static void cleanup_cap_releases(struct ceph_mds_session *session)
+{
+ struct ceph_msg *msg;
+
+ spin_lock(&session->s_cap_lock);
+ while (!list_empty(&session->s_cap_releases)) {
+ msg = list_first_entry(&session->s_cap_releases,
+ struct ceph_msg, list_head);
+ list_del_init(&msg->list_head);
+ ceph_msg_put(msg);
+ }
+ while (!list_empty(&session->s_cap_releases_done)) {
+ msg = list_first_entry(&session->s_cap_releases_done,
+ struct ceph_msg, list_head);
+ list_del_init(&msg->list_head);
+ ceph_msg_put(msg);
+ }
+ spin_unlock(&session->s_cap_lock);
+}
+
+/*
+ * Helper to safely iterate over all caps associated with a session.
+ *
+ * caller must hold session s_mutex
+ */
+static int iterate_session_caps(struct ceph_mds_session *session,
+ int (*cb)(struct inode *, struct ceph_cap *,
+ void *), void *arg)
+{
+ struct ceph_cap *cap, *ncap;
+ struct inode *inode;
+ int ret;
+
+ dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
+ spin_lock(&session->s_cap_lock);
+ list_for_each_entry_safe(cap, ncap, &session->s_caps, session_caps) {
+ inode = igrab(&cap->ci->vfs_inode);
+ if (!inode)
+ continue;
+ spin_unlock(&session->s_cap_lock);
+ ret = cb(inode, cap, arg);
+ iput(inode);
+ if (ret < 0)
+ return ret;
+ spin_lock(&session->s_cap_lock);
+ }
+ spin_unlock(&session->s_cap_lock);
+
+ return 0;
+}
+
+static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
+ void *arg)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ dout("removing cap %p, ci is %p, inode is %p\n",
+ cap, ci, &ci->vfs_inode);
+ ceph_remove_cap(cap);
+ return 0;
+}
+
+/*
+ * caller must hold session s_mutex
+ */
+static void remove_session_caps(struct ceph_mds_session *session)
+{
+ dout("remove_session_caps on %p\n", session);
+ iterate_session_caps(session, remove_session_caps_cb, NULL);
+ BUG_ON(session->s_nr_caps > 0);
+ cleanup_cap_releases(session);
+}
+
+/*
+ * wake up any threads waiting on this session's caps. if the cap is
+ * old (didn't get renewed on the client reconnect), remove it now.
+ *
+ * caller must hold s_mutex.
+ */
+static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
+ void *arg)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ wake_up(&ci->i_cap_wq);
+ if (arg) {
+ spin_lock(&inode->i_lock);
+ ci->i_wanted_max_size = 0;
+ ci->i_requested_max_size = 0;
+ spin_unlock(&inode->i_lock);
+ }
+ return 0;
+}
+
+static void wake_up_session_caps(struct ceph_mds_session *session,
+ int reconnect)
+{
+ dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
+ iterate_session_caps(session, wake_up_session_cb,
+ (void *)(unsigned long)reconnect);
+}
+
+/*
+ * Send periodic message to MDS renewing all currently held caps. The
+ * ack will reset the expiration for all caps from this session.
+ *
+ * caller holds s_mutex
+ */
+static int send_renew_caps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ struct ceph_msg *msg;
+ int state;
+
+ if (time_after_eq(jiffies, session->s_cap_ttl) &&
+ time_after_eq(session->s_cap_ttl, session->s_renew_requested))
+ pr_info("mds%d caps stale\n", session->s_mds);
+
+ /* do not try to renew caps until a recovering mds has reconnected
+ * with its clients. */
+ state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
+ if (state < CEPH_MDS_STATE_RECONNECT) {
+ dout("send_renew_caps ignoring mds%d (%s)\n",
+ session->s_mds, ceph_mds_state_name(state));
+ return 0;
+ }
+
+ dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
+ ceph_mds_state_name(state));
+ session->s_renew_requested = jiffies;
+ msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
+ ++session->s_renew_seq);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+ ceph_con_send(&session->s_con, msg);
+ return 0;
+}
+
+/*
+ * Note new cap ttl, and any transition from stale -> not stale (fresh?).
+ *
+ * Called under session->s_mutex
+ */
+static void renewed_caps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session, int is_renew)
+{
+ int was_stale;
+ int wake = 0;
+
+ spin_lock(&session->s_cap_lock);
+ was_stale = is_renew && (session->s_cap_ttl == 0 ||
+ time_after_eq(jiffies, session->s_cap_ttl));
+
+ session->s_cap_ttl = session->s_renew_requested +
+ mdsc->mdsmap->m_session_timeout*HZ;
+
+ if (was_stale) {
+ if (time_before(jiffies, session->s_cap_ttl)) {
+ pr_info("mds%d caps renewed\n", session->s_mds);
+ wake = 1;
+ } else {
+ pr_info("mds%d caps still stale\n", session->s_mds);
+ }
+ }
+ dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
+ session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
+ time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
+ spin_unlock(&session->s_cap_lock);
+
+ if (wake)
+ wake_up_session_caps(session, 0);
+}
+
+/*
+ * send a session close request
+ */
+static int request_close_session(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ struct ceph_msg *msg;
+ int err = 0;
+
+ dout("request_close_session mds%d state %s seq %lld\n",
+ session->s_mds, session_state_name(session->s_state),
+ session->s_seq);
+ msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
+ if (IS_ERR(msg))
+ err = PTR_ERR(msg);
+ else
+ ceph_con_send(&session->s_con, msg);
+ return err;
+}
+
+/*
+ * Called with s_mutex held.
+ */
+static int __close_session(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
+ return 0;
+ session->s_state = CEPH_MDS_SESSION_CLOSING;
+ return request_close_session(mdsc, session);
+}
+
+/*
+ * Trim old(er) caps.
+ *
+ * Because we can't cache an inode without one or more caps, we do
+ * this indirectly: if a cap is unused, we prune its aliases, at which
+ * point the inode will hopefully get dropped to.
+ *
+ * Yes, this is a bit sloppy. Our only real goal here is to respond to
+ * memory pressure from the MDS, though, so it needn't be perfect.
+ */
+static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
+{
+ struct ceph_mds_session *session = arg;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int used, oissued, mine;
+
+ if (session->s_trim_caps <= 0)
+ return -1;
+
+ spin_lock(&inode->i_lock);
+ mine = cap->issued | cap->implemented;
+ used = __ceph_caps_used(ci);
+ oissued = __ceph_caps_issued_other(ci, cap);
+
+ dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n",
+ inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
+ ceph_cap_string(used));
+ if (ci->i_dirty_caps)
+ goto out; /* dirty caps */
+ if ((used & ~oissued) & mine)
+ goto out; /* we need these caps */
+
+ session->s_trim_caps--;
+ if (oissued) {
+ /* we aren't the only cap.. just remove us */
+ __ceph_remove_cap(cap, NULL);
+ } else {
+ /* try to drop referring dentries */
+ spin_unlock(&inode->i_lock);
+ d_prune_aliases(inode);
+ dout("trim_caps_cb %p cap %p pruned, count now %d\n",
+ inode, cap, atomic_read(&inode->i_count));
+ return 0;
+ }
+
+out:
+ spin_unlock(&inode->i_lock);
+ return 0;
+}
+
+/*
+ * Trim session cap count down to some max number.
+ */
+static int trim_caps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ int max_caps)
+{
+ int trim_caps = session->s_nr_caps - max_caps;
+
+ dout("trim_caps mds%d start: %d / %d, trim %d\n",
+ session->s_mds, session->s_nr_caps, max_caps, trim_caps);
+ if (trim_caps > 0) {
+ session->s_trim_caps = trim_caps;
+ iterate_session_caps(session, trim_caps_cb, session);
+ dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
+ session->s_mds, session->s_nr_caps, max_caps,
+ trim_caps - session->s_trim_caps);
+ }
+ return 0;
+}
+
+/*
+ * Allocate cap_release messages. If there is a partially full message
+ * in the queue, try to allocate enough to cover it's remainder, so that
+ * we can send it immediately.
+ *
+ * Called under s_mutex.
+ */
+static int add_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ int extra)
+{
+ struct ceph_msg *msg;
+ struct ceph_mds_cap_release *head;
+ int err = -ENOMEM;
+
+ if (extra < 0)
+ extra = mdsc->client->mount_args->cap_release_safety;
+
+ spin_lock(&session->s_cap_lock);
+
+ if (!list_empty(&session->s_cap_releases)) {
+ msg = list_first_entry(&session->s_cap_releases,
+ struct ceph_msg,
+ list_head);
+ head = msg->front.iov_base;
+ extra += CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
+ }
+
+ while (session->s_num_cap_releases < session->s_nr_caps + extra) {
+ spin_unlock(&session->s_cap_lock);
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE,
+ 0, 0, NULL);
+ if (!msg)
+ goto out_unlocked;
+ dout("add_cap_releases %p msg %p now %d\n", session, msg,
+ (int)msg->front.iov_len);
+ head = msg->front.iov_base;
+ head->num = cpu_to_le32(0);
+ msg->front.iov_len = sizeof(*head);
+ spin_lock(&session->s_cap_lock);
+ list_add(&msg->list_head, &session->s_cap_releases);
+ session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE;
+ }
+
+ if (!list_empty(&session->s_cap_releases)) {
+ msg = list_first_entry(&session->s_cap_releases,
+ struct ceph_msg,
+ list_head);
+ head = msg->front.iov_base;
+ if (head->num) {
+ dout(" queueing non-full %p (%d)\n", msg,
+ le32_to_cpu(head->num));
+ list_move_tail(&msg->list_head,
+ &session->s_cap_releases_done);
+ session->s_num_cap_releases -=
+ CEPH_CAPS_PER_RELEASE - le32_to_cpu(head->num);
+ }
+ }
+ err = 0;
+ spin_unlock(&session->s_cap_lock);
+out_unlocked:
+ return err;
+}
+
+/*
+ * flush all dirty inode data to disk.
+ *
+ * returns true if we've flushed through want_flush_seq
+ */
+static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
+{
+ int mds, ret = 1;
+
+ dout("check_cap_flush want %lld\n", want_flush_seq);
+ mutex_lock(&mdsc->mutex);
+ for (mds = 0; ret && mds < mdsc->max_sessions; mds++) {
+ struct ceph_mds_session *session = mdsc->sessions[mds];
+
+ if (!session)
+ continue;
+ get_session(session);
+ mutex_unlock(&mdsc->mutex);
+
+ mutex_lock(&session->s_mutex);
+ if (!list_empty(&session->s_cap_flushing)) {
+ struct ceph_inode_info *ci =
+ list_entry(session->s_cap_flushing.next,
+ struct ceph_inode_info,
+ i_flushing_item);
+ struct inode *inode = &ci->vfs_inode;
+
+ spin_lock(&inode->i_lock);
+ if (ci->i_cap_flush_seq <= want_flush_seq) {
+ dout("check_cap_flush still flushing %p "
+ "seq %lld <= %lld to mds%d\n", inode,
+ ci->i_cap_flush_seq, want_flush_seq,
+ session->s_mds);
+ ret = 0;
+ }
+ spin_unlock(&inode->i_lock);
+ }
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+
+ if (!ret)
+ return ret;
+ mutex_lock(&mdsc->mutex);
+ }
+
+ mutex_unlock(&mdsc->mutex);
+ dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
+ return ret;
+}
+
+/*
+ * called under s_mutex
+ */
+static void send_cap_releases(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ struct ceph_msg *msg;
+
+ dout("send_cap_releases mds%d\n", session->s_mds);
+ while (1) {
+ spin_lock(&session->s_cap_lock);
+ if (list_empty(&session->s_cap_releases_done))
+ break;
+ msg = list_first_entry(&session->s_cap_releases_done,
+ struct ceph_msg, list_head);
+ list_del_init(&msg->list_head);
+ spin_unlock(&session->s_cap_lock);
+ msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
+ dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
+ ceph_con_send(&session->s_con, msg);
+ }
+ spin_unlock(&session->s_cap_lock);
+}
+
+/*
+ * requests
+ */
+
+/*
+ * Create an mds request.
+ */
+struct ceph_mds_request *
+ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
+{
+ struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS);
+
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->r_started = jiffies;
+ req->r_resend_mds = -1;
+ INIT_LIST_HEAD(&req->r_unsafe_dir_item);
+ req->r_fmode = -1;
+ kref_init(&req->r_kref);
+ INIT_LIST_HEAD(&req->r_wait);
+ init_completion(&req->r_completion);
+ init_completion(&req->r_safe_completion);
+ INIT_LIST_HEAD(&req->r_unsafe_item);
+
+ req->r_op = op;
+ req->r_direct_mode = mode;
+ return req;
+}
+
+/*
+ * return oldest (lowest) tid in request tree, 0 if none.
+ *
+ * called under mdsc->mutex.
+ */
+static u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
+{
+ struct ceph_mds_request *first;
+ if (radix_tree_gang_lookup(&mdsc->request_tree,
+ (void **)&first, 0, 1) <= 0)
+ return 0;
+ return first->r_tid;
+}
+
+/*
+ * Build a dentry's path. Allocate on heap; caller must kfree. Based
+ * on build_path_from_dentry in fs/cifs/dir.c.
+ *
+ * If @stop_on_nosnap, generate path relative to the first non-snapped
+ * inode.
+ *
+ * Encode hidden .snap dirs as a double /, i.e.
+ * foo/.snap/bar -> foo//bar
+ */
+char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
+ int stop_on_nosnap)
+{
+ struct dentry *temp;
+ char *path;
+ int len, pos;
+
+ if (dentry == NULL)
+ return ERR_PTR(-EINVAL);
+
+retry:
+ len = 0;
+ for (temp = dentry; !IS_ROOT(temp);) {
+ struct inode *inode = temp->d_inode;
+ if (inode && ceph_snap(inode) == CEPH_SNAPDIR)
+ len++; /* slash only */
+ else if (stop_on_nosnap && inode &&
+ ceph_snap(inode) == CEPH_NOSNAP)
+ break;
+ else
+ len += 1 + temp->d_name.len;
+ temp = temp->d_parent;
+ if (temp == NULL) {
+ pr_err("build_path_dentry corrupt dentry %p\n", dentry);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ if (len)
+ len--; /* no leading '/' */
+
+ path = kmalloc(len+1, GFP_NOFS);
+ if (path == NULL)
+ return ERR_PTR(-ENOMEM);
+ pos = len;
+ path[pos] = 0; /* trailing null */
+ for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) {
+ struct inode *inode = temp->d_inode;
+
+ if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
+ dout("build_path_dentry path+%d: %p SNAPDIR\n",
+ pos, temp);
+ } else if (stop_on_nosnap && inode &&
+ ceph_snap(inode) == CEPH_NOSNAP) {
+ break;
+ } else {
+ pos -= temp->d_name.len;
+ if (pos < 0)
+ break;
+ strncpy(path + pos, temp->d_name.name,
+ temp->d_name.len);
+ dout("build_path_dentry path+%d: %p '%.*s'\n",
+ pos, temp, temp->d_name.len, path + pos);
+ }
+ if (pos)
+ path[--pos] = '/';
+ temp = temp->d_parent;
+ if (temp == NULL) {
+ pr_err("build_path_dentry corrupt dentry\n");
+ kfree(path);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ if (pos != 0) {
+ pr_err("build_path_dentry did not end path lookup where "
+ "expected, namelen is %d, pos is %d\n", len, pos);
+ /* presumably this is only possible if racing with a
+ rename of one of the parent directories (we can not
+ lock the dentries above us to prevent this, but
+ retrying should be harmless) */
+ kfree(path);
+ goto retry;
+ }
+
+ *base = ceph_ino(temp->d_inode);
+ *plen = len;
+ dout("build_path_dentry on %p %d built %llx '%.*s'\n",
+ dentry, atomic_read(&dentry->d_count), *base, len, path);
+ return path;
+}
+
+static int build_dentry_path(struct dentry *dentry,
+ const char **ppath, int *ppathlen, u64 *pino,
+ int *pfreepath)
+{
+ char *path;
+
+ if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) {
+ *pino = ceph_ino(dentry->d_parent->d_inode);
+ *ppath = dentry->d_name.name;
+ *ppathlen = dentry->d_name.len;
+ return 0;
+ }
+ path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ *ppath = path;
+ *pfreepath = 1;
+ return 0;
+}
+
+static int build_inode_path(struct inode *inode,
+ const char **ppath, int *ppathlen, u64 *pino,
+ int *pfreepath)
+{
+ struct dentry *dentry;
+ char *path;
+
+ if (ceph_snap(inode) == CEPH_NOSNAP) {
+ *pino = ceph_ino(inode);
+ *ppathlen = 0;
+ return 0;
+ }
+ dentry = d_find_alias(inode);
+ path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
+ dput(dentry);
+ if (IS_ERR(path))
+ return PTR_ERR(path);
+ *ppath = path;
+ *pfreepath = 1;
+ return 0;
+}
+
+/*
+ * request arguments may be specified via an inode *, a dentry *, or
+ * an explicit ino+path.
+ */
+static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
+ const char *rpath, u64 rino,
+ const char **ppath, int *pathlen,
+ u64 *ino, int *freepath)
+{
+ int r = 0;
+
+ if (rinode) {
+ r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
+ dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
+ ceph_snap(rinode));
+ } else if (rdentry) {
+ r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath);
+ dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
+ *ppath);
+ } else if (rpath) {
+ *ino = rino;
+ *ppath = rpath;
+ *pathlen = strlen(rpath);
+ dout(" path %.*s\n", *pathlen, rpath);
+ }
+
+ return r;
+}
+
+/*
+ * called under mdsc->mutex
+ */
+static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req,
+ int mds)
+{
+ struct ceph_msg *msg;
+ struct ceph_mds_request_head *head;
+ const char *path1 = NULL;
+ const char *path2 = NULL;
+ u64 ino1 = 0, ino2 = 0;
+ int pathlen1 = 0, pathlen2 = 0;
+ int freepath1 = 0, freepath2 = 0;
+ int len;
+ u16 releases;
+ void *p, *end;
+ int ret;
+
+ ret = set_request_path_attr(req->r_inode, req->r_dentry,
+ req->r_path1, req->r_ino1.ino,
+ &path1, &pathlen1, &ino1, &freepath1);
+ if (ret < 0) {
+ msg = ERR_PTR(ret);
+ goto out;
+ }
+
+ ret = set_request_path_attr(NULL, req->r_old_dentry,
+ req->r_path2, req->r_ino2.ino,
+ &path2, &pathlen2, &ino2, &freepath2);
+ if (ret < 0) {
+ msg = ERR_PTR(ret);
+ goto out_free1;
+ }
+
+ len = sizeof(*head) +
+ pathlen1 + pathlen2 + 2*(sizeof(u32) + sizeof(u64));
+
+ /* calculate (max) length for cap releases */
+ len += sizeof(struct ceph_mds_request_release) *
+ (!!req->r_inode_drop + !!req->r_dentry_drop +
+ !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
+ if (req->r_dentry_drop)
+ len += req->r_dentry->d_name.len;
+ if (req->r_old_dentry_drop)
+ len += req->r_old_dentry->d_name.len;
+
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, 0, 0, NULL);
+ if (IS_ERR(msg))
+ goto out_free2;
+
+ head = msg->front.iov_base;
+ p = msg->front.iov_base + sizeof(*head);
+ end = msg->front.iov_base + msg->front.iov_len;
+
+ head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
+ head->op = cpu_to_le32(req->r_op);
+ head->caller_uid = cpu_to_le32(current_fsuid());
+ head->caller_gid = cpu_to_le32(current_fsgid());
+ head->args = req->r_args;
+
+ ceph_encode_filepath(&p, end, ino1, path1);
+ ceph_encode_filepath(&p, end, ino2, path2);
+
+ /* cap releases */
+ releases = 0;
+ if (req->r_inode_drop)
+ releases += ceph_encode_inode_release(&p,
+ req->r_inode ? req->r_inode : req->r_dentry->d_inode,
+ mds, req->r_inode_drop, req->r_inode_unless, 0);
+ if (req->r_dentry_drop)
+ releases += ceph_encode_dentry_release(&p, req->r_dentry,
+ mds, req->r_dentry_drop, req->r_dentry_unless);
+ if (req->r_old_dentry_drop)
+ releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
+ mds, req->r_old_dentry_drop, req->r_old_dentry_unless);
+ if (req->r_old_inode_drop)
+ releases += ceph_encode_inode_release(&p,
+ req->r_old_dentry->d_inode,
+ mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
+ head->num_releases = cpu_to_le16(releases);
+
+ BUG_ON(p > end);
+ msg->front.iov_len = p - msg->front.iov_base;
+ msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
+
+ msg->pages = req->r_pages;
+ msg->nr_pages = req->r_num_pages;
+ msg->hdr.data_len = cpu_to_le32(req->r_data_len);
+ msg->hdr.data_off = cpu_to_le16(0);
+
+out_free2:
+ if (freepath2)
+ kfree((char *)path2);
+out_free1:
+ if (freepath1)
+ kfree((char *)path1);
+out:
+ return msg;
+}
+
+/*
+ * called under mdsc->mutex if error, under no mutex if
+ * success.
+ */
+static void complete_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req)
+{
+ if (req->r_callback)
+ req->r_callback(mdsc, req);
+ else
+ complete(&req->r_completion);
+}
+
+/*
+ * called under mdsc->mutex
+ */
+static int __prepare_send_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req,
+ int mds)
+{
+ struct ceph_mds_request_head *rhead;
+ struct ceph_msg *msg;
+ int flags = 0;
+
+ req->r_mds = mds;
+ req->r_attempts++;
+ dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
+ req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
+
+ if (req->r_request) {
+ ceph_msg_put(req->r_request);
+ req->r_request = NULL;
+ }
+ msg = create_request_message(mdsc, req, mds);
+ if (IS_ERR(msg)) {
+ req->r_reply = ERR_PTR(PTR_ERR(msg));
+ complete_request(mdsc, req);
+ return -PTR_ERR(msg);
+ }
+ req->r_request = msg;
+
+ rhead = msg->front.iov_base;
+ rhead->tid = cpu_to_le64(req->r_tid);
+ rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
+ if (req->r_got_unsafe)
+ flags |= CEPH_MDS_FLAG_REPLAY;
+ if (req->r_locked_dir)
+ flags |= CEPH_MDS_FLAG_WANT_DENTRY;
+ rhead->flags = cpu_to_le32(flags);
+ rhead->num_fwd = req->r_num_fwd;
+ rhead->num_retry = req->r_attempts - 1;
+
+ dout(" r_locked_dir = %p\n", req->r_locked_dir);
+
+ if (req->r_target_inode && req->r_got_unsafe)
+ rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
+ else
+ rhead->ino = 0;
+ return 0;
+}
+
+/*
+ * send request, or put it on the appropriate wait list.
+ */
+static int __do_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req)
+{
+ struct ceph_mds_session *session = NULL;
+ int mds = -1;
+ int err = -EAGAIN;
+
+ if (req->r_reply)
+ goto out;
+
+ if (req->r_timeout &&
+ time_after_eq(jiffies, req->r_started + req->r_timeout)) {
+ dout("do_request timed out\n");
+ err = -EIO;
+ goto finish;
+ }
+
+ mds = __choose_mds(mdsc, req);
+ if (mds < 0 ||
+ ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
+ dout("do_request no mds or not active, waiting for map\n");
+ list_add(&req->r_wait, &mdsc->waiting_for_map);
+ goto out;
+ }
+
+ /* get, open session */
+ session = __ceph_lookup_mds_session(mdsc, mds);
+ if (!session)
+ session = register_session(mdsc, mds);
+ dout("do_request mds%d session %p state %s\n", mds, session,
+ session_state_name(session->s_state));
+ if (session->s_state != CEPH_MDS_SESSION_OPEN &&
+ session->s_state != CEPH_MDS_SESSION_HUNG) {
+ if (session->s_state == CEPH_MDS_SESSION_NEW ||
+ session->s_state == CEPH_MDS_SESSION_CLOSING)
+ __open_session(mdsc, session);
+ list_add(&req->r_wait, &session->s_waiting);
+ goto out_session;
+ }
+
+ /* send request */
+ req->r_session = get_session(session);
+ req->r_resend_mds = -1; /* forget any previous mds hint */
+
+ if (req->r_request_started == 0) /* note request start time */
+ req->r_request_started = jiffies;
+
+ err = __prepare_send_request(mdsc, req, mds);
+ if (!err) {
+ ceph_msg_get(req->r_request);
+ ceph_con_send(&session->s_con, req->r_request);
+ }
+
+out_session:
+ ceph_put_mds_session(session);
+out:
+ return err;
+
+finish:
+ req->r_reply = ERR_PTR(err);
+ complete_request(mdsc, req);
+ goto out;
+}
+
+/*
+ * called under mdsc->mutex
+ */
+static void __wake_requests(struct ceph_mds_client *mdsc,
+ struct list_head *head)
+{
+ struct ceph_mds_request *req, *nreq;
+
+ list_for_each_entry_safe(req, nreq, head, r_wait) {
+ list_del_init(&req->r_wait);
+ __do_request(mdsc, req);
+ }
+}
+
+/*
+ * Wake up threads with requests pending for @mds, so that they can
+ * resubmit their requests to a possibly different mds. If @all is set,
+ * wake up if their requests has been forwarded to @mds, too.
+ */
+static void kick_requests(struct ceph_mds_client *mdsc, int mds, int all)
+{
+ struct ceph_mds_request *reqs[10];
+ u64 nexttid = 0;
+ int i, got;
+
+ dout("kick_requests mds%d\n", mds);
+ while (nexttid <= mdsc->last_tid) {
+ got = radix_tree_gang_lookup(&mdsc->request_tree,
+ (void **)&reqs, nexttid, 10);
+ if (got == 0)
+ break;
+ nexttid = reqs[got-1]->r_tid + 1;
+ for (i = 0; i < got; i++) {
+ if (reqs[i]->r_got_unsafe)
+ continue;
+ if (reqs[i]->r_session &&
+ reqs[i]->r_session->s_mds == mds) {
+ dout(" kicking tid %llu\n", reqs[i]->r_tid);
+ put_request_session(reqs[i]);
+ __do_request(mdsc, reqs[i]);
+ }
+ }
+ }
+}
+
+void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req)
+{
+ dout("submit_request on %p\n", req);
+ mutex_lock(&mdsc->mutex);
+ __register_request(mdsc, req, NULL);
+ __do_request(mdsc, req);
+ mutex_unlock(&mdsc->mutex);
+}
+
+/*
+ * Synchrously perform an mds request. Take care of all of the
+ * session setup, forwarding, retry details.
+ */
+int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
+ struct inode *dir,
+ struct ceph_mds_request *req)
+{
+ int err;
+
+ dout("do_request on %p\n", req);
+
+ /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
+ if (req->r_inode)
+ ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
+ if (req->r_locked_dir)
+ ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN);
+ if (req->r_old_dentry)
+ ceph_get_cap_refs(
+ ceph_inode(req->r_old_dentry->d_parent->d_inode),
+ CEPH_CAP_PIN);
+
+ /* issue */
+ mutex_lock(&mdsc->mutex);
+ __register_request(mdsc, req, dir);
+ __do_request(mdsc, req);
+
+ /* wait */
+ if (!req->r_reply) {
+ mutex_unlock(&mdsc->mutex);
+ if (req->r_timeout) {
+ err = wait_for_completion_timeout(&req->r_completion,
+ req->r_timeout);
+ if (err > 0)
+ err = 0;
+ else if (err == 0)
+ req->r_reply = ERR_PTR(-EIO);
+ } else {
+ wait_for_completion(&req->r_completion);
+ }
+ mutex_lock(&mdsc->mutex);
+ }
+
+ if (IS_ERR(req->r_reply)) {
+ err = PTR_ERR(req->r_reply);
+ req->r_reply = NULL;
+
+ /* clean up */
+ __unregister_request(mdsc, req);
+ if (!list_empty(&req->r_unsafe_item))
+ list_del_init(&req->r_unsafe_item);
+ complete(&req->r_safe_completion);
+ } else if (req->r_err) {
+ err = req->r_err;
+ } else {
+ err = le32_to_cpu(req->r_reply_info.head->result);
+ }
+ mutex_unlock(&mdsc->mutex);
+
+ dout("do_request %p done, result %d\n", req, err);
+ return err;
+}
+
+/*
+ * Handle mds reply.
+ *
+ * We take the session mutex and parse and process the reply immediately.
+ * This preserves the logical ordering of replies, capabilities, etc., sent
+ * by the MDS as they are applied to our local cache.
+ */
+static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
+{
+ struct ceph_mds_client *mdsc = session->s_mdsc;
+ struct ceph_mds_request *req;
+ struct ceph_mds_reply_head *head = msg->front.iov_base;
+ struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */
+ u64 tid;
+ int err, result;
+ int mds;
+
+ if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
+ return;
+ if (msg->front.iov_len < sizeof(*head)) {
+ pr_err("mdsc_handle_reply got corrupt (short) reply\n");
+ return;
+ }
+
+ /* get request, session */
+ tid = le64_to_cpu(head->tid);
+ mutex_lock(&mdsc->mutex);
+ req = __lookup_request(mdsc, tid);
+ if (!req) {
+ dout("handle_reply on unknown tid %llu\n", tid);
+ mutex_unlock(&mdsc->mutex);
+ return;
+ }
+ dout("handle_reply %p\n", req);
+ mds = le64_to_cpu(msg->hdr.src.name.num);
+
+ /* correct session? */
+ if (!req->r_session && req->r_session != session) {
+ pr_err("mdsc_handle_reply got %llu on session mds%d"
+ " not mds%d\n", tid, session->s_mds,
+ req->r_session ? req->r_session->s_mds : -1);
+ mutex_unlock(&mdsc->mutex);
+ goto out;
+ }
+
+ /* dup? */
+ if ((req->r_got_unsafe && !head->safe) ||
+ (req->r_got_safe && head->safe)) {
+ pr_warning("got a dup %s reply on %llu from mds%d\n",
+ head->safe ? "safe" : "unsafe", tid, mds);
+ mutex_unlock(&mdsc->mutex);
+ goto out;
+ }
+
+ result = le32_to_cpu(head->result);
+
+ /*
+ * Tolerate 2 consecutive ESTALEs from the same mds.
+ * FIXME: we should be looking at the cap migrate_seq.
+ */
+ if (result == -ESTALE) {
+ req->r_direct_mode = USE_AUTH_MDS;
+ req->r_num_stale++;
+ if (req->r_num_stale <= 2) {
+ __do_request(mdsc, req);
+ mutex_unlock(&mdsc->mutex);
+ goto out;
+ }
+ } else {
+ req->r_num_stale = 0;
+ }
+
+ if (head->safe) {
+ req->r_got_safe = true;
+ __unregister_request(mdsc, req);
+ complete(&req->r_safe_completion);
+
+ if (req->r_got_unsafe) {
+ /*
+ * We already handled the unsafe response, now do the
+ * cleanup. No need to examine the response; the MDS
+ * doesn't include any result info in the safe
+ * response. And even if it did, there is nothing
+ * useful we could do with a revised return value.
+ */
+ dout("got safe reply %llu, mds%d\n", tid, mds);
+ list_del_init(&req->r_unsafe_item);
+
+ /* last unsafe request during umount? */
+ if (mdsc->stopping && !__get_oldest_tid(mdsc))
+ complete(&mdsc->safe_umount_waiters);
+ mutex_unlock(&mdsc->mutex);
+ goto out;
+ }
+ }
+
+ BUG_ON(req->r_reply);
+
+ if (!head->safe) {
+ req->r_got_unsafe = true;
+ list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
+ }
+
+ dout("handle_reply tid %lld result %d\n", tid, result);
+ rinfo = &req->r_reply_info;
+ err = parse_reply_info(msg, rinfo);
+ mutex_unlock(&mdsc->mutex);
+
+ mutex_lock(&session->s_mutex);
+ if (err < 0) {
+ pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds);
+ goto out_err;
+ }
+
+ /* snap trace */
+ if (rinfo->snapblob_len) {
+ down_write(&mdsc->snap_rwsem);
+ ceph_update_snap_trace(mdsc, rinfo->snapblob,
+ rinfo->snapblob + rinfo->snapblob_len,
+ le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP);
+ downgrade_write(&mdsc->snap_rwsem);
+ } else {
+ down_read(&mdsc->snap_rwsem);
+ }
+
+ /* insert trace into our cache */
+ err = ceph_fill_trace(mdsc->client->sb, req, req->r_session);
+ if (err == 0) {
+ if (result == 0 && rinfo->dir_nr)
+ ceph_readdir_prepopulate(req, req->r_session);
+ ceph_unreserve_caps(&req->r_caps_reservation);
+ }
+
+ up_read(&mdsc->snap_rwsem);
+out_err:
+ if (err) {
+ req->r_err = err;
+ } else {
+ req->r_reply = msg;
+ ceph_msg_get(msg);
+ }
+
+ add_cap_releases(mdsc, req->r_session, -1);
+ mutex_unlock(&session->s_mutex);
+
+ /* kick calling process */
+ complete_request(mdsc, req);
+out:
+ ceph_mdsc_put_request(req);
+ return;
+}
+
+
+
+/*
+ * handle mds notification that our request has been forwarded.
+ */
+static void handle_forward(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+{
+ struct ceph_mds_request *req;
+ u64 tid;
+ u32 next_mds;
+ u32 fwd_seq;
+ u8 must_resend;
+ int err = -EINVAL;
+ void *p = msg->front.iov_base;
+ void *end = p + msg->front.iov_len;
+ int from_mds, state;
+
+ if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
+ goto bad;
+ from_mds = le64_to_cpu(msg->hdr.src.name.num);
+
+ ceph_decode_need(&p, end, sizeof(u64)+2*sizeof(u32), bad);
+ tid = ceph_decode_64(&p);
+ next_mds = ceph_decode_32(&p);
+ fwd_seq = ceph_decode_32(&p);
+ must_resend = ceph_decode_8(&p);
+
+ WARN_ON(must_resend); /* shouldn't happen. */
+
+ mutex_lock(&mdsc->mutex);
+ req = __lookup_request(mdsc, tid);
+ if (!req) {
+ dout("forward %llu dne\n", tid);
+ goto out; /* dup reply? */
+ }
+
+ state = mdsc->sessions[next_mds]->s_state;
+ if (fwd_seq <= req->r_num_fwd) {
+ dout("forward %llu to mds%d - old seq %d <= %d\n",
+ tid, next_mds, req->r_num_fwd, fwd_seq);
+ } else {
+ /* resend. forward race not possible; mds would drop */
+ dout("forward %llu to mds%d (we resend)\n", tid, next_mds);
+ req->r_num_fwd = fwd_seq;
+ req->r_resend_mds = next_mds;
+ put_request_session(req);
+ __do_request(mdsc, req);
+ }
+ ceph_mdsc_put_request(req);
+out:
+ mutex_unlock(&mdsc->mutex);
+ return;
+
+bad:
+ pr_err("mdsc_handle_forward decode error err=%d\n", err);
+}
+
+/*
+ * handle a mds session control message
+ */
+static void handle_session(struct ceph_mds_session *session,
+ struct ceph_msg *msg)
+{
+ struct ceph_mds_client *mdsc = session->s_mdsc;
+ u32 op;
+ u64 seq;
+ int mds;
+ struct ceph_mds_session_head *h = msg->front.iov_base;
+ int wake = 0;
+
+ if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
+ return;
+ mds = le64_to_cpu(msg->hdr.src.name.num);
+
+ /* decode */
+ if (msg->front.iov_len != sizeof(*h))
+ goto bad;
+ op = le32_to_cpu(h->op);
+ seq = le64_to_cpu(h->seq);
+
+ mutex_lock(&mdsc->mutex);
+ /* FIXME: this ttl calculation is generous */
+ session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
+ mutex_unlock(&mdsc->mutex);
+
+ mutex_lock(&session->s_mutex);
+
+ dout("handle_session mds%d %s %p state %s seq %llu\n",
+ mds, ceph_session_op_name(op), session,
+ session_state_name(session->s_state), seq);
+
+ if (session->s_state == CEPH_MDS_SESSION_HUNG) {
+ session->s_state = CEPH_MDS_SESSION_OPEN;
+ pr_info("mds%d came back\n", session->s_mds);
+ }
+
+ switch (op) {
+ case CEPH_SESSION_OPEN:
+ session->s_state = CEPH_MDS_SESSION_OPEN;
+ renewed_caps(mdsc, session, 0);
+ wake = 1;
+ if (mdsc->stopping)
+ __close_session(mdsc, session);
+ break;
+
+ case CEPH_SESSION_RENEWCAPS:
+ if (session->s_renew_seq == seq)
+ renewed_caps(mdsc, session, 1);
+ break;
+
+ case CEPH_SESSION_CLOSE:
+ unregister_session(mdsc, session);
+ remove_session_caps(session);
+ wake = 1; /* for good measure */
+ complete(&mdsc->session_close_waiters);
+ kick_requests(mdsc, mds, 0); /* cur only */
+ break;
+
+ case CEPH_SESSION_STALE:
+ pr_info("mds%d caps went stale, renewing\n",
+ session->s_mds);
+ spin_lock(&session->s_cap_lock);
+ session->s_cap_gen++;
+ session->s_cap_ttl = 0;
+ spin_unlock(&session->s_cap_lock);
+ send_renew_caps(mdsc, session);
+ break;
+
+ case CEPH_SESSION_RECALL_STATE:
+ trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
+ break;
+
+ default:
+ pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
+ WARN_ON(1);
+ }
+
+ mutex_unlock(&session->s_mutex);
+ if (wake) {
+ mutex_lock(&mdsc->mutex);
+ __wake_requests(mdsc, &session->s_waiting);
+ mutex_unlock(&mdsc->mutex);
+ }
+ return;
+
+bad:
+ pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
+ (int)msg->front.iov_len);
+ return;
+}
+
+
+/*
+ * called under session->mutex.
+ */
+static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session)
+{
+ struct ceph_mds_request *req, *nreq;
+ int err;
+
+ dout("replay_unsafe_requests mds%d\n", session->s_mds);
+
+ mutex_lock(&mdsc->mutex);
+ list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) {
+ err = __prepare_send_request(mdsc, req, session->s_mds);
+ if (!err) {
+ ceph_msg_get(req->r_request);
+ ceph_con_send(&session->s_con, req->r_request);
+ }
+ }
+ mutex_unlock(&mdsc->mutex);
+}
+
+/*
+ * Encode information about a cap for a reconnect with the MDS.
+ */
+struct encode_caps_data {
+ void **pp;
+ void *end;
+ int *num_caps;
+};
+
+static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
+ void *arg)
+{
+ struct ceph_mds_cap_reconnect *rec;
+ struct ceph_inode_info *ci;
+ struct encode_caps_data *data = (struct encode_caps_data *)arg;
+ void *p = *(data->pp);
+ void *end = data->end;
+ char *path;
+ int pathlen, err;
+ u64 pathbase;
+ struct dentry *dentry;
+
+ ci = cap->ci;
+
+ dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
+ inode, ceph_vinop(inode), cap, cap->cap_id,
+ ceph_cap_string(cap->issued));
+ ceph_decode_need(&p, end, sizeof(u64), needmore);
+ ceph_encode_64(&p, ceph_ino(inode));
+
+ dentry = d_find_alias(inode);
+ if (dentry) {
+ path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0);
+ if (IS_ERR(path)) {
+ err = PTR_ERR(path);
+ BUG_ON(err);
+ }
+ } else {
+ path = NULL;
+ pathlen = 0;
+ }
+ ceph_decode_need(&p, end, pathlen+4, needmore);
+ ceph_encode_string(&p, end, path, pathlen);
+
+ ceph_decode_need(&p, end, sizeof(*rec), needmore);
+ rec = p;
+ p += sizeof(*rec);
+ BUG_ON(p > end);
+ spin_lock(&inode->i_lock);
+ cap->seq = 0; /* reset cap seq */
+ cap->issue_seq = 0; /* and issue_seq */
+ rec->cap_id = cpu_to_le64(cap->cap_id);
+ rec->pathbase = cpu_to_le64(pathbase);
+ rec->wanted = cpu_to_le32(__ceph_caps_wanted(ci));
+ rec->issued = cpu_to_le32(cap->issued);
+ rec->size = cpu_to_le64(inode->i_size);
+ ceph_encode_timespec(&rec->mtime, &inode->i_mtime);
+ ceph_encode_timespec(&rec->atime, &inode->i_atime);
+ rec->snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
+ spin_unlock(&inode->i_lock);
+
+ kfree(path);
+ dput(dentry);
+ (*data->num_caps)++;
+ *(data->pp) = p;
+ return 0;
+needmore:
+ return -ENOSPC;
+}
+
+
+/*
+ * If an MDS fails and recovers, clients need to reconnect in order to
+ * reestablish shared state. This includes all caps issued through
+ * this session _and_ the snap_realm hierarchy. Because it's not
+ * clear which snap realms the mds cares about, we send everything we
+ * know about.. that ensures we'll then get any new info the
+ * recovering MDS might have.
+ *
+ * This is a relatively heavyweight operation, but it's rare.
+ *
+ * called with mdsc->mutex held.
+ */
+static void send_mds_reconnect(struct ceph_mds_client *mdsc, int mds)
+{
+ struct ceph_mds_session *session;
+ struct ceph_msg *reply;
+ int newlen, len = 4 + 1;
+ void *p, *end;
+ int err;
+ int num_caps, num_realms = 0;
+ int got;
+ u64 next_snap_ino = 0;
+ __le32 *pnum_caps, *pnum_realms;
+ struct encode_caps_data iter_args;
+
+ pr_info("reconnect to recovering mds%d\n", mds);
+
+ /* find session */
+ session = __ceph_lookup_mds_session(mdsc, mds);
+ mutex_unlock(&mdsc->mutex); /* drop lock for duration */
+
+ if (session) {
+ mutex_lock(&session->s_mutex);
+
+ session->s_state = CEPH_MDS_SESSION_RECONNECTING;
+ session->s_seq = 0;
+
+ ceph_con_open(&session->s_con,
+ ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
+
+ /* replay unsafe requests */
+ replay_unsafe_requests(mdsc, session);
+
+ /* estimate needed space */
+ len += session->s_nr_caps *
+ (100+sizeof(struct ceph_mds_cap_reconnect));
+ pr_info("estimating i need %d bytes for %d caps\n",
+ len, session->s_nr_caps);
+ } else {
+ dout("no session for mds%d, will send short reconnect\n",
+ mds);
+ }
+
+ down_read(&mdsc->snap_rwsem);
+
+retry:
+ /* build reply */
+ reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, len, 0, 0, NULL);
+ if (IS_ERR(reply)) {
+ err = PTR_ERR(reply);
+ pr_err("send_mds_reconnect ENOMEM on %d for mds%d\n",
+ len, mds);
+ goto out;
+ }
+ p = reply->front.iov_base;
+ end = p + len;
+
+ if (!session) {
+ ceph_encode_8(&p, 1); /* session was closed */
+ ceph_encode_32(&p, 0);
+ goto send;
+ }
+ dout("session %p state %s\n", session,
+ session_state_name(session->s_state));
+
+ /* traverse this session's caps */
+ ceph_encode_8(&p, 0);
+ pnum_caps = p;
+ ceph_encode_32(&p, session->s_nr_caps);
+ num_caps = 0;
+
+ iter_args.pp = &p;
+ iter_args.end = end;
+ iter_args.num_caps = &num_caps;
+ err = iterate_session_caps(session, encode_caps_cb, &iter_args);
+ if (err == -ENOSPC)
+ goto needmore;
+ if (err < 0)
+ goto out;
+ *pnum_caps = cpu_to_le32(num_caps);
+
+ /*
+ * snaprealms. we provide mds with the ino, seq (version), and
+ * parent for all of our realms. If the mds has any newer info,
+ * it will tell us.
+ */
+ next_snap_ino = 0;
+ /* save some space for the snaprealm count */
+ pnum_realms = p;
+ ceph_decode_need(&p, end, sizeof(*pnum_realms), needmore);
+ p += sizeof(*pnum_realms);
+ num_realms = 0;
+ while (1) {
+ struct ceph_snap_realm *realm;
+ struct ceph_mds_snaprealm_reconnect *sr_rec;
+ got = radix_tree_gang_lookup(&mdsc->snap_realms,
+ (void **)&realm, next_snap_ino, 1);
+ if (!got)
+ break;
+
+ dout(" adding snap realm %llx seq %lld parent %llx\n",
+ realm->ino, realm->seq, realm->parent_ino);
+ ceph_decode_need(&p, end, sizeof(*sr_rec), needmore);
+ sr_rec = p;
+ sr_rec->ino = cpu_to_le64(realm->ino);
+ sr_rec->seq = cpu_to_le64(realm->seq);
+ sr_rec->parent = cpu_to_le64(realm->parent_ino);
+ p += sizeof(*sr_rec);
+ num_realms++;
+ next_snap_ino = realm->ino + 1;
+ }
+ *pnum_realms = cpu_to_le32(num_realms);
+
+send:
+ reply->front.iov_len = p - reply->front.iov_base;
+ reply->hdr.front_len = cpu_to_le32(reply->front.iov_len);
+ dout("final len was %u (guessed %d)\n",
+ (unsigned)reply->front.iov_len, len);
+ ceph_con_send(&session->s_con, reply);
+
+ if (session) {
+ session->s_state = CEPH_MDS_SESSION_OPEN;
+ __wake_requests(mdsc, &session->s_waiting);
+ }
+
+out:
+ up_read(&mdsc->snap_rwsem);
+ if (session) {
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ }
+ mutex_lock(&mdsc->mutex);
+ return;
+
+needmore:
+ /*
+ * we need a larger buffer. this doesn't very accurately
+ * factor in snap realms, but it's safe.
+ */
+ num_caps += num_realms;
+ newlen = len * ((100 * (session->s_nr_caps+3)) / (num_caps + 1)) / 100;
+ pr_info("i guessed %d, and did %d of %d caps, retrying with %d\n",
+ len, num_caps, session->s_nr_caps, newlen);
+ len = newlen;
+ ceph_msg_put(reply);
+ goto retry;
+}
+
+
+/*
+ * compare old and new mdsmaps, kicking requests
+ * and closing out old connections as necessary
+ *
+ * called under mdsc->mutex.
+ */
+static void check_new_map(struct ceph_mds_client *mdsc,
+ struct ceph_mdsmap *newmap,
+ struct ceph_mdsmap *oldmap)
+{
+ int i;
+ int oldstate, newstate;
+ struct ceph_mds_session *s;
+
+ dout("check_new_map new %u old %u\n",
+ newmap->m_epoch, oldmap->m_epoch);
+
+ for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) {
+ if (mdsc->sessions[i] == NULL)
+ continue;
+ s = mdsc->sessions[i];
+ oldstate = ceph_mdsmap_get_state(oldmap, i);
+ newstate = ceph_mdsmap_get_state(newmap, i);
+
+ dout("check_new_map mds%d state %s -> %s (session %s)\n",
+ i, ceph_mds_state_name(oldstate),
+ ceph_mds_state_name(newstate),
+ session_state_name(s->s_state));
+
+ if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
+ ceph_mdsmap_get_addr(newmap, i),
+ sizeof(struct ceph_entity_addr))) {
+ if (s->s_state == CEPH_MDS_SESSION_OPENING) {
+ /* the session never opened, just close it
+ * out now */
+ __wake_requests(mdsc, &s->s_waiting);
+ unregister_session(mdsc, s);
+ } else {
+ /* just close it */
+ mutex_unlock(&mdsc->mutex);
+ mutex_lock(&s->s_mutex);
+ mutex_lock(&mdsc->mutex);
+ ceph_con_close(&s->s_con);
+ mutex_unlock(&s->s_mutex);
+ s->s_state = CEPH_MDS_SESSION_RESTARTING;
+ }
+
+ /* kick any requests waiting on the recovering mds */
+ kick_requests(mdsc, i, 1);
+ } else if (oldstate == newstate) {
+ continue; /* nothing new with this mds */
+ }
+
+ /*
+ * send reconnect?
+ */
+ if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
+ newstate >= CEPH_MDS_STATE_RECONNECT)
+ send_mds_reconnect(mdsc, i);
+
+ /*
+ * kick requests on any mds that has gone active.
+ *
+ * kick requests on cur or forwarder: we may have sent
+ * the request to mds1, mds1 told us it forwarded it
+ * to mds2, but then we learn mds1 failed and can't be
+ * sure it successfully forwarded our request before
+ * it died.
+ */
+ if (oldstate < CEPH_MDS_STATE_ACTIVE &&
+ newstate >= CEPH_MDS_STATE_ACTIVE) {
+ pr_info("mds%d reconnect completed\n", s->s_mds);
+ kick_requests(mdsc, i, 1);
+ ceph_kick_flushing_caps(mdsc, s);
+ wake_up_session_caps(s, 1);
+ }
+ }
+}
+
+
+
+/*
+ * leases
+ */
+
+/*
+ * caller must hold session s_mutex, dentry->d_lock
+ */
+void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
+{
+ struct ceph_dentry_info *di = ceph_dentry(dentry);
+
+ ceph_put_mds_session(di->lease_session);
+ di->lease_session = NULL;
+}
+
+static void handle_lease(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+{
+ struct super_block *sb = mdsc->client->sb;
+ struct inode *inode;
+ struct ceph_mds_session *session;
+ struct ceph_inode_info *ci;
+ struct dentry *parent, *dentry;
+ struct ceph_dentry_info *di;
+ int mds;
+ struct ceph_mds_lease *h = msg->front.iov_base;
+ struct ceph_vino vino;
+ int mask;
+ struct qstr dname;
+ int release = 0;
+
+ if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
+ return;
+ mds = le64_to_cpu(msg->hdr.src.name.num);
+ dout("handle_lease from mds%d\n", mds);
+
+ /* decode */
+ if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
+ goto bad;
+ vino.ino = le64_to_cpu(h->ino);
+ vino.snap = CEPH_NOSNAP;
+ mask = le16_to_cpu(h->mask);
+ dname.name = (void *)h + sizeof(*h) + sizeof(u32);
+ dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32);
+ if (dname.len != get_unaligned_le32(h+1))
+ goto bad;
+
+ /* find session */
+ mutex_lock(&mdsc->mutex);
+ session = __ceph_lookup_mds_session(mdsc, mds);
+ mutex_unlock(&mdsc->mutex);
+ if (!session) {
+ pr_err("handle_lease got lease but no session mds%d\n", mds);
+ return;
+ }
+
+ mutex_lock(&session->s_mutex);
+ session->s_seq++;
+
+ /* lookup inode */
+ inode = ceph_find_inode(sb, vino);
+ dout("handle_lease '%s', mask %d, ino %llx %p\n",
+ ceph_lease_op_name(h->action), mask, vino.ino, inode);
+ if (inode == NULL) {
+ dout("handle_lease no inode %llx\n", vino.ino);
+ goto release;
+ }
+ ci = ceph_inode(inode);
+
+ /* dentry */
+ parent = d_find_alias(inode);
+ if (!parent) {
+ dout("no parent dentry on inode %p\n", inode);
+ WARN_ON(1);
+ goto release; /* hrm... */
+ }
+ dname.hash = full_name_hash(dname.name, dname.len);
+ dentry = d_lookup(parent, &dname);
+ dput(parent);
+ if (!dentry)
+ goto release;
+
+ spin_lock(&dentry->d_lock);
+ di = ceph_dentry(dentry);
+ switch (h->action) {
+ case CEPH_MDS_LEASE_REVOKE:
+ if (di && di->lease_session == session) {
+ h->seq = cpu_to_le32(di->lease_seq);
+ __ceph_mdsc_drop_dentry_lease(dentry);
+ }
+ release = 1;
+ break;
+
+ case CEPH_MDS_LEASE_RENEW:
+ if (di && di->lease_session == session &&
+ di->lease_gen == session->s_cap_gen &&
+ di->lease_renew_from &&
+ di->lease_renew_after == 0) {
+ unsigned long duration =
+ le32_to_cpu(h->duration_ms) * HZ / 1000;
+
+ di->lease_seq = le32_to_cpu(h->seq);
+ dentry->d_time = di->lease_renew_from + duration;
+ di->lease_renew_after = di->lease_renew_from +
+ (duration >> 1);
+ di->lease_renew_from = 0;
+ }
+ break;
+ }
+ spin_unlock(&dentry->d_lock);
+ dput(dentry);
+
+ if (!release)
+ goto out;
+
+release:
+ /* let's just reuse the same message */
+ h->action = CEPH_MDS_LEASE_REVOKE_ACK;
+ ceph_msg_get(msg);
+ ceph_con_send(&session->s_con, msg);
+
+out:
+ iput(inode);
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ return;
+
+bad:
+ pr_err("corrupt lease message\n");
+}
+
+void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
+ struct inode *inode,
+ struct dentry *dentry, char action,
+ u32 seq)
+{
+ struct ceph_msg *msg;
+ struct ceph_mds_lease *lease;
+ int len = sizeof(*lease) + sizeof(u32);
+ int dnamelen = 0;
+
+ dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
+ inode, dentry, ceph_lease_op_name(action), session->s_mds);
+ dnamelen = dentry->d_name.len;
+ len += dnamelen;
+
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, 0, 0, NULL);
+ if (IS_ERR(msg))
+ return;
+ lease = msg->front.iov_base;
+ lease->action = action;
+ lease->mask = cpu_to_le16(CEPH_LOCK_DN);
+ lease->ino = cpu_to_le64(ceph_vino(inode).ino);
+ lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap);
+ lease->seq = cpu_to_le32(seq);
+ put_unaligned_le32(dnamelen, lease + 1);
+ memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen);
+
+ /*
+ * if this is a preemptive lease RELEASE, no need to
+ * flush request stream, since the actual request will
+ * soon follow.
+ */
+ msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
+
+ ceph_con_send(&session->s_con, msg);
+}
+
+/*
+ * Preemptively release a lease we expect to invalidate anyway.
+ * Pass @inode always, @dentry is optional.
+ */
+void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
+ struct dentry *dentry, int mask)
+{
+ struct ceph_dentry_info *di;
+ struct ceph_mds_session *session;
+ u32 seq;
+
+ BUG_ON(inode == NULL);
+ BUG_ON(dentry == NULL);
+ BUG_ON(mask != CEPH_LOCK_DN);
+
+ /* is dentry lease valid? */
+ spin_lock(&dentry->d_lock);
+ di = ceph_dentry(dentry);
+ if (!di || !di->lease_session ||
+ di->lease_session->s_mds < 0 ||
+ di->lease_gen != di->lease_session->s_cap_gen ||
+ !time_before(jiffies, dentry->d_time)) {
+ dout("lease_release inode %p dentry %p -- "
+ "no lease on %d\n",
+ inode, dentry, mask);
+ spin_unlock(&dentry->d_lock);
+ return;
+ }
+
+ /* we do have a lease on this dentry; note mds and seq */
+ session = ceph_get_mds_session(di->lease_session);
+ seq = di->lease_seq;
+ __ceph_mdsc_drop_dentry_lease(dentry);
+ spin_unlock(&dentry->d_lock);
+
+ dout("lease_release inode %p dentry %p mask %d to mds%d\n",
+ inode, dentry, mask, session->s_mds);
+ ceph_mdsc_lease_send_msg(session, inode, dentry,
+ CEPH_MDS_LEASE_RELEASE, seq);
+ ceph_put_mds_session(session);
+}
+
+/*
+ * drop all leases (and dentry refs) in preparation for umount
+ */
+static void drop_leases(struct ceph_mds_client *mdsc)
+{
+ int i;
+
+ dout("drop_leases\n");
+ mutex_lock(&mdsc->mutex);
+ for (i = 0; i < mdsc->max_sessions; i++) {
+ struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
+ if (!s)
+ continue;
+ mutex_unlock(&mdsc->mutex);
+ mutex_lock(&s->s_mutex);
+ mutex_unlock(&s->s_mutex);
+ ceph_put_mds_session(s);
+ mutex_lock(&mdsc->mutex);
+ }
+ mutex_unlock(&mdsc->mutex);
+}
+
+
+
+/*
+ * delayed work -- periodically trim expired leases, renew caps with mds
+ */
+static void schedule_delayed(struct ceph_mds_client *mdsc)
+{
+ int delay = 5;
+ unsigned hz = round_jiffies_relative(HZ * delay);
+ schedule_delayed_work(&mdsc->delayed_work, hz);
+}
+
+static void delayed_work(struct work_struct *work)
+{
+ int i;
+ struct ceph_mds_client *mdsc =
+ container_of(work, struct ceph_mds_client, delayed_work.work);
+ int renew_interval;
+ int renew_caps;
+
+ dout("mdsc delayed_work\n");
+ ceph_check_delayed_caps(mdsc);
+
+ mutex_lock(&mdsc->mutex);
+ renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
+ renew_caps = time_after_eq(jiffies, HZ*renew_interval +
+ mdsc->last_renew_caps);
+ if (renew_caps)
+ mdsc->last_renew_caps = jiffies;
+
+ for (i = 0; i < mdsc->max_sessions; i++) {
+ struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
+ if (s == NULL)
+ continue;
+ if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
+ dout("resending session close request for mds%d\n",
+ s->s_mds);
+ request_close_session(mdsc, s);
+ ceph_put_mds_session(s);
+ continue;
+ }
+ if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
+ if (s->s_state == CEPH_MDS_SESSION_OPEN) {
+ s->s_state = CEPH_MDS_SESSION_HUNG;
+ pr_info("mds%d hung\n", s->s_mds);
+ }
+ }
+ if (s->s_state < CEPH_MDS_SESSION_OPEN) {
+ /* this mds is failed or recovering, just wait */
+ ceph_put_mds_session(s);
+ continue;
+ }
+ mutex_unlock(&mdsc->mutex);
+
+ mutex_lock(&s->s_mutex);
+ if (renew_caps)
+ send_renew_caps(mdsc, s);
+ else
+ ceph_con_keepalive(&s->s_con);
+ add_cap_releases(mdsc, s, -1);
+ send_cap_releases(mdsc, s);
+ mutex_unlock(&s->s_mutex);
+ ceph_put_mds_session(s);
+
+ mutex_lock(&mdsc->mutex);
+ }
+ mutex_unlock(&mdsc->mutex);
+
+ schedule_delayed(mdsc);
+}
+
+
+int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
+{
+ mdsc->client = client;
+ mutex_init(&mdsc->mutex);
+ mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
+ init_completion(&mdsc->safe_umount_waiters);
+ init_completion(&mdsc->session_close_waiters);
+ INIT_LIST_HEAD(&mdsc->waiting_for_map);
+ mdsc->sessions = NULL;
+ mdsc->max_sessions = 0;
+ mdsc->stopping = 0;
+ init_rwsem(&mdsc->snap_rwsem);
+ INIT_RADIX_TREE(&mdsc->snap_realms, GFP_NOFS);
+ INIT_LIST_HEAD(&mdsc->snap_empty);
+ spin_lock_init(&mdsc->snap_empty_lock);
+ mdsc->last_tid = 0;
+ INIT_RADIX_TREE(&mdsc->request_tree, GFP_NOFS);
+ INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
+ mdsc->last_renew_caps = jiffies;
+ INIT_LIST_HEAD(&mdsc->cap_delay_list);
+ spin_lock_init(&mdsc->cap_delay_lock);
+ INIT_LIST_HEAD(&mdsc->snap_flush_list);
+ spin_lock_init(&mdsc->snap_flush_lock);
+ mdsc->cap_flush_seq = 0;
+ INIT_LIST_HEAD(&mdsc->cap_dirty);
+ mdsc->num_cap_flushing = 0;
+ spin_lock_init(&mdsc->cap_dirty_lock);
+ init_waitqueue_head(&mdsc->cap_flushing_wq);
+ spin_lock_init(&mdsc->dentry_lru_lock);
+ INIT_LIST_HEAD(&mdsc->dentry_lru);
+ return 0;
+}
+
+/*
+ * Wait for safe replies on open mds requests. If we time out, drop
+ * all requests from the tree to avoid dangling dentry refs.
+ */
+static void wait_requests(struct ceph_mds_client *mdsc)
+{
+ struct ceph_mds_request *req;
+ struct ceph_client *client = mdsc->client;
+
+ mutex_lock(&mdsc->mutex);
+ if (__get_oldest_tid(mdsc)) {
+ mutex_unlock(&mdsc->mutex);
+ dout("wait_requests waiting for requests\n");
+ wait_for_completion_timeout(&mdsc->safe_umount_waiters,
+ client->mount_args->mount_timeout * HZ);
+ mutex_lock(&mdsc->mutex);
+
+ /* tear down remaining requests */
+ while (radix_tree_gang_lookup(&mdsc->request_tree,
+ (void **)&req, 0, 1)) {
+ dout("wait_requests timed out on tid %llu\n",
+ req->r_tid);
+ radix_tree_delete(&mdsc->request_tree, req->r_tid);
+ ceph_mdsc_put_request(req);
+ }
+ }
+ mutex_unlock(&mdsc->mutex);
+ dout("wait_requests done\n");
+}
+
+/*
+ * called before mount is ro, and before dentries are torn down.
+ * (hmm, does this still race with new lookups?)
+ */
+void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
+{
+ dout("pre_umount\n");
+ mdsc->stopping = 1;
+
+ drop_leases(mdsc);
+ ceph_flush_dirty_caps(mdsc);
+ wait_requests(mdsc);
+}
+
+/*
+ * wait for all write mds requests to flush.
+ */
+static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
+{
+ struct ceph_mds_request *req;
+ u64 next_tid = 0;
+ int got;
+
+ mutex_lock(&mdsc->mutex);
+ dout("wait_unsafe_requests want %lld\n", want_tid);
+ while (1) {
+ got = radix_tree_gang_lookup(&mdsc->request_tree, (void **)&req,
+ next_tid, 1);
+ if (!got)
+ break;
+ if (req->r_tid > want_tid)
+ break;
+
+ next_tid = req->r_tid + 1;
+ if ((req->r_op & CEPH_MDS_OP_WRITE) == 0)
+ continue; /* not a write op */
+
+ ceph_mdsc_get_request(req);
+ mutex_unlock(&mdsc->mutex);
+ dout("wait_unsafe_requests wait on %llu (want %llu)\n",
+ req->r_tid, want_tid);
+ wait_for_completion(&req->r_safe_completion);
+ mutex_lock(&mdsc->mutex);
+ ceph_mdsc_put_request(req);
+ }
+ mutex_unlock(&mdsc->mutex);
+ dout("wait_unsafe_requests done\n");
+}
+
+void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
+{
+ u64 want_tid, want_flush;
+
+ dout("sync\n");
+ mutex_lock(&mdsc->mutex);
+ want_tid = mdsc->last_tid;
+ want_flush = mdsc->cap_flush_seq;
+ mutex_unlock(&mdsc->mutex);
+ dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
+
+ ceph_flush_dirty_caps(mdsc);
+
+ wait_unsafe_requests(mdsc, want_tid);
+ wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
+}
+
+
+/*
+ * called after sb is ro.
+ */
+void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
+{
+ struct ceph_mds_session *session;
+ int i;
+ int n;
+ struct ceph_client *client = mdsc->client;
+ unsigned long started, timeout = client->mount_args->mount_timeout * HZ;
+
+ dout("close_sessions\n");
+
+ mutex_lock(&mdsc->mutex);
+
+ /* close sessions */
+ started = jiffies;
+ while (time_before(jiffies, started + timeout)) {
+ dout("closing sessions\n");
+ n = 0;
+ for (i = 0; i < mdsc->max_sessions; i++) {
+ session = __ceph_lookup_mds_session(mdsc, i);
+ if (!session)
+ continue;
+ mutex_unlock(&mdsc->mutex);
+ mutex_lock(&session->s_mutex);
+ __close_session(mdsc, session);
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ mutex_lock(&mdsc->mutex);
+ n++;
+ }
+ if (n == 0)
+ break;
+
+ if (client->mount_state == CEPH_MOUNT_SHUTDOWN)
+ break;
+
+ dout("waiting for sessions to close\n");
+ mutex_unlock(&mdsc->mutex);
+ wait_for_completion_timeout(&mdsc->session_close_waiters,
+ timeout);
+ mutex_lock(&mdsc->mutex);
+ }
+
+ /* tear down remaining sessions */
+ for (i = 0; i < mdsc->max_sessions; i++) {
+ if (mdsc->sessions[i]) {
+ session = get_session(mdsc->sessions[i]);
+ unregister_session(mdsc, session);
+ mutex_unlock(&mdsc->mutex);
+ mutex_lock(&session->s_mutex);
+ remove_session_caps(session);
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ mutex_lock(&mdsc->mutex);
+ }
+ }
+
+ WARN_ON(!list_empty(&mdsc->cap_delay_list));
+
+ mutex_unlock(&mdsc->mutex);
+
+ ceph_cleanup_empty_realms(mdsc);
+
+ cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
+
+ dout("stopped\n");
+}
+
+void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
+{
+ dout("stop\n");
+ cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
+ if (mdsc->mdsmap)
+ ceph_mdsmap_destroy(mdsc->mdsmap);
+ kfree(mdsc->sessions);
+}
+
+
+/*
+ * handle mds map update.
+ */
+void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+{
+ u32 epoch;
+ u32 maplen;
+ void *p = msg->front.iov_base;
+ void *end = p + msg->front.iov_len;
+ struct ceph_mdsmap *newmap, *oldmap;
+ struct ceph_fsid fsid;
+ int err = -EINVAL;
+
+ ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
+ ceph_decode_copy(&p, &fsid, sizeof(fsid));
+ if (ceph_check_fsid(mdsc->client, &fsid) < 0)
+ return;
+ epoch = ceph_decode_32(&p);
+ maplen = ceph_decode_32(&p);
+ dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
+
+ /* do we need it? */
+ ceph_monc_got_mdsmap(&mdsc->client->monc, epoch);
+ mutex_lock(&mdsc->mutex);
+ if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
+ dout("handle_map epoch %u <= our %u\n",
+ epoch, mdsc->mdsmap->m_epoch);
+ mutex_unlock(&mdsc->mutex);
+ return;
+ }
+
+ newmap = ceph_mdsmap_decode(&p, end);
+ if (IS_ERR(newmap)) {
+ err = PTR_ERR(newmap);
+ goto bad_unlock;
+ }
+
+ /* swap into place */
+ if (mdsc->mdsmap) {
+ oldmap = mdsc->mdsmap;
+ mdsc->mdsmap = newmap;
+ check_new_map(mdsc, newmap, oldmap);
+ ceph_mdsmap_destroy(oldmap);
+ } else {
+ mdsc->mdsmap = newmap; /* first mds map */
+ }
+ mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
+
+ __wake_requests(mdsc, &mdsc->waiting_for_map);
+
+ mutex_unlock(&mdsc->mutex);
+ schedule_delayed(mdsc);
+ return;
+
+bad_unlock:
+ mutex_unlock(&mdsc->mutex);
+bad:
+ pr_err("error decoding mdsmap %d\n", err);
+ return;
+}
+
+static struct ceph_connection *con_get(struct ceph_connection *con)
+{
+ struct ceph_mds_session *s = con->private;
+
+ if (get_session(s)) {
+ dout("mdsc con_get %p %d -> %d\n", s,
+ atomic_read(&s->s_ref) - 1, atomic_read(&s->s_ref));
+ return con;
+ }
+ dout("mdsc con_get %p FAIL\n", s);
+ return NULL;
+}
+
+static void con_put(struct ceph_connection *con)
+{
+ struct ceph_mds_session *s = con->private;
+
+ dout("mdsc con_put %p %d -> %d\n", s, atomic_read(&s->s_ref),
+ atomic_read(&s->s_ref) - 1);
+ ceph_put_mds_session(s);
+}
+
+/*
+ * if the client is unresponsive for long enough, the mds will kill
+ * the session entirely.
+ */
+static void peer_reset(struct ceph_connection *con)
+{
+ struct ceph_mds_session *s = con->private;
+
+ pr_err("mds%d gave us the boot. IMPLEMENT RECONNECT.\n",
+ s->s_mds);
+}
+
+static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_mds_session *s = con->private;
+ struct ceph_mds_client *mdsc = s->s_mdsc;
+ int type = le16_to_cpu(msg->hdr.type);
+
+ switch (type) {
+ case CEPH_MSG_MDS_MAP:
+ ceph_mdsc_handle_map(mdsc, msg);
+ break;
+ case CEPH_MSG_CLIENT_SESSION:
+ handle_session(s, msg);
+ break;
+ case CEPH_MSG_CLIENT_REPLY:
+ handle_reply(s, msg);
+ break;
+ case CEPH_MSG_CLIENT_REQUEST_FORWARD:
+ handle_forward(mdsc, msg);
+ break;
+ case CEPH_MSG_CLIENT_CAPS:
+ ceph_handle_caps(s, msg);
+ break;
+ case CEPH_MSG_CLIENT_SNAP:
+ ceph_handle_snap(mdsc, msg);
+ break;
+ case CEPH_MSG_CLIENT_LEASE:
+ handle_lease(mdsc, msg);
+ break;
+
+ default:
+ pr_err("received unknown message type %d %s\n", type,
+ ceph_msg_type_name(type));
+ }
+ ceph_msg_put(msg);
+}
+
+/*
+ * authentication
+ */
+static int get_authorizer(struct ceph_connection *con,
+ void **buf, int *len, int *proto,
+ void **reply_buf, int *reply_len, int force_new)
+{
+ struct ceph_mds_session *s = con->private;
+ struct ceph_mds_client *mdsc = s->s_mdsc;
+ struct ceph_auth_client *ac = mdsc->client->monc.auth;
+ int ret = 0;
+
+ if (force_new && s->s_authorizer) {
+ ac->ops->destroy_authorizer(ac, s->s_authorizer);
+ s->s_authorizer = NULL;
+ }
+ if (s->s_authorizer == NULL) {
+ if (ac->ops->create_authorizer) {
+ ret = ac->ops->create_authorizer(
+ ac, CEPH_ENTITY_TYPE_MDS,
+ &s->s_authorizer,
+ &s->s_authorizer_buf,
+ &s->s_authorizer_buf_len,
+ &s->s_authorizer_reply_buf,
+ &s->s_authorizer_reply_buf_len);
+ if (ret)
+ return ret;
+ }
+ }
+
+ *proto = ac->protocol;
+ *buf = s->s_authorizer_buf;
+ *len = s->s_authorizer_buf_len;
+ *reply_buf = s->s_authorizer_reply_buf;
+ *reply_len = s->s_authorizer_reply_buf_len;
+ return 0;
+}
+
+
+static int verify_authorizer_reply(struct ceph_connection *con, int len)
+{
+ struct ceph_mds_session *s = con->private;
+ struct ceph_mds_client *mdsc = s->s_mdsc;
+ struct ceph_auth_client *ac = mdsc->client->monc.auth;
+
+ return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
+}
+
+const static struct ceph_connection_operations mds_con_ops = {
+ .get = con_get,
+ .put = con_put,
+ .dispatch = dispatch,
+ .get_authorizer = get_authorizer,
+ .verify_authorizer_reply = verify_authorizer_reply,
+ .peer_reset = peer_reset,
+ .alloc_msg = ceph_alloc_msg,
+ .alloc_middle = ceph_alloc_middle,
+};
+
+
+
+
+/* eof */
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
new file mode 100644
index 000000000000..41af5ca316e6
--- /dev/null
+++ b/fs/ceph/mds_client.h
@@ -0,0 +1,332 @@
+#ifndef _FS_CEPH_MDS_CLIENT_H
+#define _FS_CEPH_MDS_CLIENT_H
+
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/spinlock.h>
+
+#include "types.h"
+#include "messenger.h"
+#include "mdsmap.h"
+
+/*
+ * Some lock dependencies:
+ *
+ * session->s_mutex
+ * mdsc->mutex
+ *
+ * mdsc->snap_rwsem
+ *
+ * inode->i_lock
+ * mdsc->snap_flush_lock
+ * mdsc->cap_delay_lock
+ *
+ */
+
+struct ceph_client;
+struct ceph_cap;
+
+/*
+ * parsed info about a single inode. pointers are into the encoded
+ * on-wire structures within the mds reply message payload.
+ */
+struct ceph_mds_reply_info_in {
+ struct ceph_mds_reply_inode *in;
+ u32 symlink_len;
+ char *symlink;
+ u32 xattr_len;
+ char *xattr_data;
+};
+
+/*
+ * parsed info about an mds reply, including information about the
+ * target inode and/or its parent directory and dentry, and directory
+ * contents (for readdir results).
+ */
+struct ceph_mds_reply_info_parsed {
+ struct ceph_mds_reply_head *head;
+
+ struct ceph_mds_reply_info_in diri, targeti;
+ struct ceph_mds_reply_dirfrag *dirfrag;
+ char *dname;
+ u32 dname_len;
+ struct ceph_mds_reply_lease *dlease;
+
+ struct ceph_mds_reply_dirfrag *dir_dir;
+ int dir_nr;
+ char **dir_dname;
+ u32 *dir_dname_len;
+ struct ceph_mds_reply_lease **dir_dlease;
+ struct ceph_mds_reply_info_in *dir_in;
+ u8 dir_complete, dir_end;
+
+ /* encoded blob describing snapshot contexts for certain
+ operations (e.g., open) */
+ void *snapblob;
+ int snapblob_len;
+};
+
+
+/*
+ * cap releases are batched and sent to the MDS en masse.
+ */
+#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \
+ sizeof(struct ceph_mds_cap_release)) / \
+ sizeof(struct ceph_mds_cap_item))
+
+
+/*
+ * state associated with each MDS<->client session
+ */
+enum {
+ CEPH_MDS_SESSION_NEW = 1,
+ CEPH_MDS_SESSION_OPENING = 2,
+ CEPH_MDS_SESSION_OPEN = 3,
+ CEPH_MDS_SESSION_HUNG = 4,
+ CEPH_MDS_SESSION_CLOSING = 5,
+ CEPH_MDS_SESSION_RESTARTING = 6,
+ CEPH_MDS_SESSION_RECONNECTING = 7,
+};
+
+struct ceph_mds_session {
+ struct ceph_mds_client *s_mdsc;
+ int s_mds;
+ int s_state;
+ unsigned long s_ttl; /* time until mds kills us */
+ u64 s_seq; /* incoming msg seq # */
+ struct mutex s_mutex; /* serialize session messages */
+
+ struct ceph_connection s_con;
+
+ struct ceph_authorizer *s_authorizer;
+ void *s_authorizer_buf, *s_authorizer_reply_buf;
+ size_t s_authorizer_buf_len, s_authorizer_reply_buf_len;
+
+ /* protected by s_cap_lock */
+ spinlock_t s_cap_lock;
+ u32 s_cap_gen; /* inc each time we get mds stale msg */
+ unsigned long s_cap_ttl; /* when session caps expire */
+ struct list_head s_caps; /* all caps issued by this session */
+ int s_nr_caps, s_trim_caps;
+ int s_num_cap_releases;
+ struct list_head s_cap_releases; /* waiting cap_release messages */
+ struct list_head s_cap_releases_done; /* ready to send */
+
+ /* protected by mutex */
+ struct list_head s_cap_flushing; /* inodes w/ flushing caps */
+ struct list_head s_cap_snaps_flushing;
+ unsigned long s_renew_requested; /* last time we sent a renew req */
+ u64 s_renew_seq;
+
+ atomic_t s_ref;
+ struct list_head s_waiting; /* waiting requests */
+ struct list_head s_unsafe; /* unsafe requests */
+};
+
+/*
+ * modes of choosing which MDS to send a request to
+ */
+enum {
+ USE_ANY_MDS,
+ USE_RANDOM_MDS,
+ USE_AUTH_MDS, /* prefer authoritative mds for this metadata item */
+};
+
+struct ceph_mds_request;
+struct ceph_mds_client;
+
+/*
+ * request completion callback
+ */
+typedef void (*ceph_mds_request_callback_t) (struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req);
+
+/*
+ * an in-flight mds request
+ */
+struct ceph_mds_request {
+ u64 r_tid; /* transaction id */
+
+ int r_op; /* mds op code */
+ int r_mds;
+
+ /* operation on what? */
+ struct inode *r_inode; /* arg1 */
+ struct dentry *r_dentry; /* arg1 */
+ struct dentry *r_old_dentry; /* arg2: rename from or link from */
+ char *r_path1, *r_path2;
+ struct ceph_vino r_ino1, r_ino2;
+
+ struct inode *r_locked_dir; /* dir (if any) i_mutex locked by vfs */
+ struct inode *r_target_inode; /* resulting inode */
+
+ union ceph_mds_request_args r_args;
+ int r_fmode; /* file mode, if expecting cap */
+
+ /* for choosing which mds to send this request to */
+ int r_direct_mode;
+ u32 r_direct_hash; /* choose dir frag based on this dentry hash */
+ bool r_direct_is_hash; /* true if r_direct_hash is valid */
+
+ /* data payload is used for xattr ops */
+ struct page **r_pages;
+ int r_num_pages;
+ int r_data_len;
+
+ /* what caps shall we drop? */
+ int r_inode_drop, r_inode_unless;
+ int r_dentry_drop, r_dentry_unless;
+ int r_old_dentry_drop, r_old_dentry_unless;
+ struct inode *r_old_inode;
+ int r_old_inode_drop, r_old_inode_unless;
+
+ struct ceph_msg *r_request; /* original request */
+ struct ceph_msg *r_reply;
+ struct ceph_mds_reply_info_parsed r_reply_info;
+ int r_err;
+
+ unsigned long r_timeout; /* optional. jiffies */
+ unsigned long r_started; /* start time to measure timeout against */
+ unsigned long r_request_started; /* start time for mds request only,
+ used to measure lease durations */
+
+ /* link unsafe requests to parent directory, for fsync */
+ struct inode *r_unsafe_dir;
+ struct list_head r_unsafe_dir_item;
+
+ struct ceph_mds_session *r_session;
+
+ int r_attempts; /* resend attempts */
+ int r_num_fwd; /* number of forward attempts */
+ int r_num_stale;
+ int r_resend_mds; /* mds to resend to next, if any*/
+
+ struct kref r_kref;
+ struct list_head r_wait;
+ struct completion r_completion;
+ struct completion r_safe_completion;
+ ceph_mds_request_callback_t r_callback;
+ struct list_head r_unsafe_item; /* per-session unsafe list item */
+ bool r_got_unsafe, r_got_safe;
+
+ bool r_did_prepopulate;
+ u32 r_readdir_offset;
+
+ struct ceph_cap_reservation r_caps_reservation;
+ int r_num_caps;
+};
+
+/*
+ * mds client state
+ */
+struct ceph_mds_client {
+ struct ceph_client *client;
+ struct mutex mutex; /* all nested structures */
+
+ struct ceph_mdsmap *mdsmap;
+ struct completion safe_umount_waiters, session_close_waiters;
+ struct list_head waiting_for_map;
+
+ struct ceph_mds_session **sessions; /* NULL for mds if no session */
+ int max_sessions; /* len of s_mds_sessions */
+ int stopping; /* true if shutting down */
+
+ /*
+ * snap_rwsem will cover cap linkage into snaprealms, and
+ * realm snap contexts. (later, we can do per-realm snap
+ * contexts locks..) the empty list contains realms with no
+ * references (implying they contain no inodes with caps) that
+ * should be destroyed.
+ */
+ struct rw_semaphore snap_rwsem;
+ struct radix_tree_root snap_realms;
+ struct list_head snap_empty;
+ spinlock_t snap_empty_lock; /* protect snap_empty */
+
+ u64 last_tid; /* most recent mds request */
+ struct radix_tree_root request_tree; /* pending mds requests */
+ struct delayed_work delayed_work; /* delayed work */
+ unsigned long last_renew_caps; /* last time we renewed our caps */
+ struct list_head cap_delay_list; /* caps with delayed release */
+ spinlock_t cap_delay_lock; /* protects cap_delay_list */
+ struct list_head snap_flush_list; /* cap_snaps ready to flush */
+ spinlock_t snap_flush_lock;
+
+ u64 cap_flush_seq;
+ struct list_head cap_dirty; /* inodes with dirty caps */
+ int num_cap_flushing; /* # caps we are flushing */
+ spinlock_t cap_dirty_lock; /* protects above items */
+ wait_queue_head_t cap_flushing_wq;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_file;
+#endif
+
+ spinlock_t dentry_lru_lock;
+ struct list_head dentry_lru;
+ int num_dentry;
+};
+
+extern const char *ceph_mds_op_name(int op);
+
+extern struct ceph_mds_session *
+__ceph_lookup_mds_session(struct ceph_mds_client *, int mds);
+
+static inline struct ceph_mds_session *
+ceph_get_mds_session(struct ceph_mds_session *s)
+{
+ atomic_inc(&s->s_ref);
+ return s;
+}
+
+extern void ceph_put_mds_session(struct ceph_mds_session *s);
+
+extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
+ struct ceph_msg *msg, int mds);
+
+extern int ceph_mdsc_init(struct ceph_mds_client *mdsc,
+ struct ceph_client *client);
+extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
+extern void ceph_mdsc_stop(struct ceph_mds_client *mdsc);
+
+extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
+
+extern void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc,
+ struct inode *inode,
+ struct dentry *dn, int mask);
+
+extern struct ceph_mds_request *
+ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode);
+extern void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req);
+extern int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
+ struct inode *dir,
+ struct ceph_mds_request *req);
+static inline void ceph_mdsc_get_request(struct ceph_mds_request *req)
+{
+ kref_get(&req->r_kref);
+}
+extern void ceph_mdsc_release_request(struct kref *kref);
+static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
+{
+ kref_put(&req->r_kref, ceph_mdsc_release_request);
+}
+
+extern void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc);
+
+extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
+ int stop_on_nosnap);
+
+extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
+extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
+ struct inode *inode,
+ struct dentry *dentry, char action,
+ u32 seq);
+
+extern void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc,
+ struct ceph_msg *msg);
+
+#endif
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
new file mode 100644
index 000000000000..cad8d25861e5
--- /dev/null
+++ b/fs/ceph/mdsmap.c
@@ -0,0 +1,170 @@
+#include "ceph_debug.h"
+
+#include <linux/bug.h>
+#include <linux/err.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "mdsmap.h"
+#include "messenger.h"
+#include "decode.h"
+
+#include "super.h"
+
+
+/*
+ * choose a random mds that is "up" (i.e. has a state > 0), or -1.
+ */
+int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m)
+{
+ int n = 0;
+ int i;
+ char r;
+
+ /* count */
+ for (i = 0; i < m->m_max_mds; i++)
+ if (m->m_info[i].state > 0)
+ n++;
+ if (n == 0)
+ return -1;
+
+ /* pick */
+ get_random_bytes(&r, 1);
+ n = r % n;
+ i = 0;
+ for (i = 0; n > 0; i++, n--)
+ while (m->m_info[i].state <= 0)
+ i++;
+
+ return i;
+}
+
+/*
+ * Decode an MDS map
+ *
+ * Ignore any fields we don't care about (there are quite a few of
+ * them).
+ */
+struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
+{
+ struct ceph_mdsmap *m;
+ int i, j, n;
+ int err = -EINVAL;
+ u16 version;
+
+ m = kzalloc(sizeof(*m), GFP_NOFS);
+ if (m == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ceph_decode_16_safe(p, end, version, bad);
+
+ ceph_decode_need(p, end, 8*sizeof(u32) + sizeof(u64), bad);
+ m->m_epoch = ceph_decode_32(p);
+ m->m_client_epoch = ceph_decode_32(p);
+ m->m_last_failure = ceph_decode_32(p);
+ m->m_root = ceph_decode_32(p);
+ m->m_session_timeout = ceph_decode_32(p);
+ m->m_session_autoclose = ceph_decode_32(p);
+ m->m_max_file_size = ceph_decode_64(p);
+ m->m_max_mds = ceph_decode_32(p);
+
+ m->m_info = kcalloc(m->m_max_mds, sizeof(*m->m_info), GFP_NOFS);
+ if (m->m_info == NULL)
+ goto badmem;
+
+ /* pick out active nodes from mds_info (state > 0) */
+ n = ceph_decode_32(p);
+ for (i = 0; i < n; i++) {
+ u64 global_id;
+ u32 namelen;
+ s32 mds, inc, state;
+ u64 state_seq;
+ u8 infoversion;
+ struct ceph_entity_addr addr;
+ u32 num_export_targets;
+ void *pexport_targets = NULL;
+
+ ceph_decode_need(p, end, sizeof(u64)*2 + 1 + sizeof(u32), bad);
+ global_id = ceph_decode_64(p);
+ infoversion = ceph_decode_8(p);
+ *p += sizeof(u64);
+ namelen = ceph_decode_32(p); /* skip mds name */
+ *p += namelen;
+
+ ceph_decode_need(p, end,
+ 4*sizeof(u32) + sizeof(u64) +
+ sizeof(addr) + sizeof(struct ceph_timespec),
+ bad);
+ mds = ceph_decode_32(p);
+ inc = ceph_decode_32(p);
+ state = ceph_decode_32(p);
+ state_seq = ceph_decode_64(p);
+ ceph_decode_copy(p, &addr, sizeof(addr));
+ ceph_decode_addr(&addr);
+ *p += sizeof(struct ceph_timespec);
+ *p += sizeof(u32);
+ ceph_decode_32_safe(p, end, namelen, bad);
+ *p += namelen;
+ if (infoversion >= 2) {
+ ceph_decode_32_safe(p, end, num_export_targets, bad);
+ pexport_targets = *p;
+ *p += num_export_targets * sizeof(u32);
+ } else {
+ num_export_targets = 0;
+ }
+
+ dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
+ i+1, n, global_id, mds, inc, pr_addr(&addr.in_addr),
+ ceph_mds_state_name(state));
+ if (mds >= 0 && mds < m->m_max_mds && state > 0) {
+ m->m_info[mds].global_id = global_id;
+ m->m_info[mds].state = state;
+ m->m_info[mds].addr = addr;
+ m->m_info[mds].num_export_targets = num_export_targets;
+ if (num_export_targets) {
+ m->m_info[mds].export_targets =
+ kcalloc(num_export_targets, sizeof(u32),
+ GFP_NOFS);
+ for (j = 0; j < num_export_targets; j++)
+ m->m_info[mds].export_targets[j] =
+ ceph_decode_32(&pexport_targets);
+ } else {
+ m->m_info[mds].export_targets = NULL;
+ }
+ }
+ }
+
+ /* pg_pools */
+ ceph_decode_32_safe(p, end, n, bad);
+ m->m_num_data_pg_pools = n;
+ m->m_data_pg_pools = kcalloc(n, sizeof(u32), GFP_NOFS);
+ if (!m->m_data_pg_pools)
+ goto badmem;
+ ceph_decode_need(p, end, sizeof(u32)*(n+1), bad);
+ for (i = 0; i < n; i++)
+ m->m_data_pg_pools[i] = ceph_decode_32(p);
+ m->m_cas_pg_pool = ceph_decode_32(p);
+
+ /* ok, we don't care about the rest. */
+ dout("mdsmap_decode success epoch %u\n", m->m_epoch);
+ return m;
+
+badmem:
+ err = -ENOMEM;
+bad:
+ pr_err("corrupt mdsmap\n");
+ ceph_mdsmap_destroy(m);
+ return ERR_PTR(-EINVAL);
+}
+
+void ceph_mdsmap_destroy(struct ceph_mdsmap *m)
+{
+ int i;
+
+ for (i = 0; i < m->m_max_mds; i++)
+ kfree(m->m_info[i].export_targets);
+ kfree(m->m_info);
+ kfree(m->m_data_pg_pools);
+ kfree(m);
+}
diff --git a/fs/ceph/mdsmap.h b/fs/ceph/mdsmap.h
new file mode 100644
index 000000000000..eacc131aa5cb
--- /dev/null
+++ b/fs/ceph/mdsmap.h
@@ -0,0 +1,54 @@
+#ifndef _FS_CEPH_MDSMAP_H
+#define _FS_CEPH_MDSMAP_H
+
+#include "types.h"
+
+/*
+ * mds map - describe servers in the mds cluster.
+ *
+ * we limit fields to those the client actually xcares about
+ */
+struct ceph_mds_info {
+ u64 global_id;
+ struct ceph_entity_addr addr;
+ s32 state;
+ int num_export_targets;
+ u32 *export_targets;
+};
+
+struct ceph_mdsmap {
+ u32 m_epoch, m_client_epoch, m_last_failure;
+ u32 m_root;
+ u32 m_session_timeout; /* seconds */
+ u32 m_session_autoclose; /* seconds */
+ u64 m_max_file_size;
+ u32 m_max_mds; /* size of m_addr, m_state arrays */
+ struct ceph_mds_info *m_info;
+
+ /* which object pools file data can be stored in */
+ int m_num_data_pg_pools;
+ u32 *m_data_pg_pools;
+ u32 m_cas_pg_pool;
+};
+
+static inline struct ceph_entity_addr *
+ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
+{
+ if (w >= m->m_max_mds)
+ return NULL;
+ return &m->m_info[w].addr;
+}
+
+static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
+{
+ BUG_ON(w < 0);
+ if (w >= m->m_max_mds)
+ return CEPH_MDS_STATE_DNE;
+ return m->m_info[w].state;
+}
+
+extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
+extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
+extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
+
+#endif
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
new file mode 100644
index 000000000000..b0571b01b19f
--- /dev/null
+++ b/fs/ceph/messenger.c
@@ -0,0 +1,2092 @@
+#include "ceph_debug.h"
+
+#include <linux/crc32c.h>
+#include <linux/ctype.h>
+#include <linux/highmem.h>
+#include <linux/inet.h>
+#include <linux/kthread.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/string.h>
+#include <net/tcp.h>
+
+#include "super.h"
+#include "messenger.h"
+#include "decode.h"
+
+/*
+ * Ceph uses the messenger to exchange ceph_msg messages with other
+ * hosts in the system. The messenger provides ordered and reliable
+ * delivery. We tolerate TCP disconnects by reconnecting (with
+ * exponential backoff) in the case of a fault (disconnection, bad
+ * crc, protocol error). Acks allow sent messages to be discarded by
+ * the sender.
+ */
+
+/* static tag bytes (protocol control messages) */
+static char tag_msg = CEPH_MSGR_TAG_MSG;
+static char tag_ack = CEPH_MSGR_TAG_ACK;
+static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
+
+
+static void queue_con(struct ceph_connection *con);
+static void con_work(struct work_struct *);
+static void ceph_fault(struct ceph_connection *con);
+
+const char *ceph_name_type_str(int t)
+{
+ switch (t) {
+ case CEPH_ENTITY_TYPE_MON: return "mon";
+ case CEPH_ENTITY_TYPE_MDS: return "mds";
+ case CEPH_ENTITY_TYPE_OSD: return "osd";
+ case CEPH_ENTITY_TYPE_CLIENT: return "client";
+ case CEPH_ENTITY_TYPE_ADMIN: return "admin";
+ default: return "???";
+ }
+}
+
+/*
+ * nicely render a sockaddr as a string.
+ */
+#define MAX_ADDR_STR 20
+static char addr_str[MAX_ADDR_STR][40];
+static DEFINE_SPINLOCK(addr_str_lock);
+static int last_addr_str;
+
+const char *pr_addr(const struct sockaddr_storage *ss)
+{
+ int i;
+ char *s;
+ struct sockaddr_in *in4 = (void *)ss;
+ unsigned char *quad = (void *)&in4->sin_addr.s_addr;
+ struct sockaddr_in6 *in6 = (void *)ss;
+
+ spin_lock(&addr_str_lock);
+ i = last_addr_str++;
+ if (last_addr_str == MAX_ADDR_STR)
+ last_addr_str = 0;
+ spin_unlock(&addr_str_lock);
+ s = addr_str[i];
+
+ switch (ss->ss_family) {
+ case AF_INET:
+ sprintf(s, "%u.%u.%u.%u:%u",
+ (unsigned int)quad[0],
+ (unsigned int)quad[1],
+ (unsigned int)quad[2],
+ (unsigned int)quad[3],
+ (unsigned int)ntohs(in4->sin_port));
+ break;
+
+ case AF_INET6:
+ sprintf(s, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u",
+ in6->sin6_addr.s6_addr16[0],
+ in6->sin6_addr.s6_addr16[1],
+ in6->sin6_addr.s6_addr16[2],
+ in6->sin6_addr.s6_addr16[3],
+ in6->sin6_addr.s6_addr16[4],
+ in6->sin6_addr.s6_addr16[5],
+ in6->sin6_addr.s6_addr16[6],
+ in6->sin6_addr.s6_addr16[7],
+ (unsigned int)ntohs(in6->sin6_port));
+ break;
+
+ default:
+ sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family);
+ }
+
+ return s;
+}
+
+static void encode_my_addr(struct ceph_messenger *msgr)
+{
+ memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
+ ceph_encode_addr(&msgr->my_enc_addr);
+}
+
+/*
+ * work queue for all reading and writing to/from the socket.
+ */
+struct workqueue_struct *ceph_msgr_wq;
+
+int __init ceph_msgr_init(void)
+{
+ ceph_msgr_wq = create_workqueue("ceph-msgr");
+ if (IS_ERR(ceph_msgr_wq)) {
+ int ret = PTR_ERR(ceph_msgr_wq);
+ pr_err("msgr_init failed to create workqueue: %d\n", ret);
+ ceph_msgr_wq = NULL;
+ return ret;
+ }
+ return 0;
+}
+
+void ceph_msgr_exit(void)
+{
+ destroy_workqueue(ceph_msgr_wq);
+}
+
+/*
+ * socket callback functions
+ */
+
+/* data available on socket, or listen socket received a connect */
+static void ceph_data_ready(struct sock *sk, int count_unused)
+{
+ struct ceph_connection *con =
+ (struct ceph_connection *)sk->sk_user_data;
+ if (sk->sk_state != TCP_CLOSE_WAIT) {
+ dout("ceph_data_ready on %p state = %lu, queueing work\n",
+ con, con->state);
+ queue_con(con);
+ }
+}
+
+/* socket has buffer space for writing */
+static void ceph_write_space(struct sock *sk)
+{
+ struct ceph_connection *con =
+ (struct ceph_connection *)sk->sk_user_data;
+
+ /* only queue to workqueue if there is data we want to write. */
+ if (test_bit(WRITE_PENDING, &con->state)) {
+ dout("ceph_write_space %p queueing write work\n", con);
+ queue_con(con);
+ } else {
+ dout("ceph_write_space %p nothing to write\n", con);
+ }
+
+ /* since we have our own write_space, clear the SOCK_NOSPACE flag */
+ clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+}
+
+/* socket's state has changed */
+static void ceph_state_change(struct sock *sk)
+{
+ struct ceph_connection *con =
+ (struct ceph_connection *)sk->sk_user_data;
+
+ dout("ceph_state_change %p state = %lu sk_state = %u\n",
+ con, con->state, sk->sk_state);
+
+ if (test_bit(CLOSED, &con->state))
+ return;
+
+ switch (sk->sk_state) {
+ case TCP_CLOSE:
+ dout("ceph_state_change TCP_CLOSE\n");
+ case TCP_CLOSE_WAIT:
+ dout("ceph_state_change TCP_CLOSE_WAIT\n");
+ if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
+ if (test_bit(CONNECTING, &con->state))
+ con->error_msg = "connection failed";
+ else
+ con->error_msg = "socket closed";
+ queue_con(con);
+ }
+ break;
+ case TCP_ESTABLISHED:
+ dout("ceph_state_change TCP_ESTABLISHED\n");
+ queue_con(con);
+ break;
+ }
+}
+
+/*
+ * set up socket callbacks
+ */
+static void set_sock_callbacks(struct socket *sock,
+ struct ceph_connection *con)
+{
+ struct sock *sk = sock->sk;
+ sk->sk_user_data = (void *)con;
+ sk->sk_data_ready = ceph_data_ready;
+ sk->sk_write_space = ceph_write_space;
+ sk->sk_state_change = ceph_state_change;
+}
+
+
+/*
+ * socket helpers
+ */
+
+/*
+ * initiate connection to a remote socket.
+ */
+static struct socket *ceph_tcp_connect(struct ceph_connection *con)
+{
+ struct sockaddr *paddr = (struct sockaddr *)&con->peer_addr.in_addr;
+ struct socket *sock;
+ int ret;
+
+ BUG_ON(con->sock);
+ ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ if (ret)
+ return ERR_PTR(ret);
+ con->sock = sock;
+ sock->sk->sk_allocation = GFP_NOFS;
+
+ set_sock_callbacks(sock, con);
+
+ dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
+
+ ret = sock->ops->connect(sock, paddr, sizeof(*paddr), O_NONBLOCK);
+ if (ret == -EINPROGRESS) {
+ dout("connect %s EINPROGRESS sk_state = %u\n",
+ pr_addr(&con->peer_addr.in_addr),
+ sock->sk->sk_state);
+ ret = 0;
+ }
+ if (ret < 0) {
+ pr_err("connect %s error %d\n",
+ pr_addr(&con->peer_addr.in_addr), ret);
+ sock_release(sock);
+ con->sock = NULL;
+ con->error_msg = "connect error";
+ }
+
+ if (ret < 0)
+ return ERR_PTR(ret);
+ return sock;
+}
+
+static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
+{
+ struct kvec iov = {buf, len};
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
+
+ return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
+}
+
+/*
+ * write something. @more is true if caller will be sending more data
+ * shortly.
+ */
+static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
+ size_t kvlen, size_t len, int more)
+{
+ struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
+
+ if (more)
+ msg.msg_flags |= MSG_MORE;
+ else
+ msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
+
+ return kernel_sendmsg(sock, &msg, iov, kvlen, len);
+}
+
+
+/*
+ * Shutdown/close the socket for the given connection.
+ */
+static int con_close_socket(struct ceph_connection *con)
+{
+ int rc;
+
+ dout("con_close_socket on %p sock %p\n", con, con->sock);
+ if (!con->sock)
+ return 0;
+ set_bit(SOCK_CLOSED, &con->state);
+ rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
+ sock_release(con->sock);
+ con->sock = NULL;
+ clear_bit(SOCK_CLOSED, &con->state);
+ return rc;
+}
+
+/*
+ * Reset a connection. Discard all incoming and outgoing messages
+ * and clear *_seq state.
+ */
+static void ceph_msg_remove(struct ceph_msg *msg)
+{
+ list_del_init(&msg->list_head);
+ ceph_msg_put(msg);
+}
+static void ceph_msg_remove_list(struct list_head *head)
+{
+ while (!list_empty(head)) {
+ struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
+ list_head);
+ ceph_msg_remove(msg);
+ }
+}
+
+static void reset_connection(struct ceph_connection *con)
+{
+ /* reset connection, out_queue, msg_ and connect_seq */
+ /* discard existing out_queue and msg_seq */
+ mutex_lock(&con->out_mutex);
+ ceph_msg_remove_list(&con->out_queue);
+ ceph_msg_remove_list(&con->out_sent);
+
+ con->connect_seq = 0;
+ con->out_seq = 0;
+ con->out_msg = NULL;
+ con->in_seq = 0;
+ mutex_unlock(&con->out_mutex);
+}
+
+/*
+ * mark a peer down. drop any open connections.
+ */
+void ceph_con_close(struct ceph_connection *con)
+{
+ dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr));
+ set_bit(CLOSED, &con->state); /* in case there's queued work */
+ clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
+ reset_connection(con);
+ queue_con(con);
+}
+
+/*
+ * Reopen a closed connection, with a new peer address.
+ */
+void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
+{
+ dout("con_open %p %s\n", con, pr_addr(&addr->in_addr));
+ set_bit(OPENING, &con->state);
+ clear_bit(CLOSED, &con->state);
+ memcpy(&con->peer_addr, addr, sizeof(*addr));
+ con->delay = 0; /* reset backoff memory */
+ queue_con(con);
+}
+
+/*
+ * generic get/put
+ */
+struct ceph_connection *ceph_con_get(struct ceph_connection *con)
+{
+ dout("con_get %p nref = %d -> %d\n", con,
+ atomic_read(&con->nref), atomic_read(&con->nref) + 1);
+ if (atomic_inc_not_zero(&con->nref))
+ return con;
+ return NULL;
+}
+
+void ceph_con_put(struct ceph_connection *con)
+{
+ dout("con_put %p nref = %d -> %d\n", con,
+ atomic_read(&con->nref), atomic_read(&con->nref) - 1);
+ BUG_ON(atomic_read(&con->nref) == 0);
+ if (atomic_dec_and_test(&con->nref)) {
+ BUG_ON(con->sock);
+ kfree(con);
+ }
+}
+
+/*
+ * initialize a new connection.
+ */
+void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
+{
+ dout("con_init %p\n", con);
+ memset(con, 0, sizeof(*con));
+ atomic_set(&con->nref, 1);
+ con->msgr = msgr;
+ mutex_init(&con->out_mutex);
+ INIT_LIST_HEAD(&con->out_queue);
+ INIT_LIST_HEAD(&con->out_sent);
+ INIT_DELAYED_WORK(&con->work, con_work);
+}
+
+
+/*
+ * We maintain a global counter to order connection attempts. Get
+ * a unique seq greater than @gt.
+ */
+static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
+{
+ u32 ret;
+
+ spin_lock(&msgr->global_seq_lock);
+ if (msgr->global_seq < gt)
+ msgr->global_seq = gt;
+ ret = ++msgr->global_seq;
+ spin_unlock(&msgr->global_seq_lock);
+ return ret;
+}
+
+
+/*
+ * Prepare footer for currently outgoing message, and finish things
+ * off. Assumes out_kvec* are already valid.. we just add on to the end.
+ */
+static void prepare_write_message_footer(struct ceph_connection *con, int v)
+{
+ struct ceph_msg *m = con->out_msg;
+
+ dout("prepare_write_message_footer %p\n", con);
+ con->out_kvec_is_msg = true;
+ con->out_kvec[v].iov_base = &m->footer;
+ con->out_kvec[v].iov_len = sizeof(m->footer);
+ con->out_kvec_bytes += sizeof(m->footer);
+ con->out_kvec_left++;
+ con->out_more = m->more_to_follow;
+ con->out_msg = NULL; /* we're done with this one */
+}
+
+/*
+ * Prepare headers for the next outgoing message.
+ */
+static void prepare_write_message(struct ceph_connection *con)
+{
+ struct ceph_msg *m;
+ int v = 0;
+
+ con->out_kvec_bytes = 0;
+ con->out_kvec_is_msg = true;
+
+ /* Sneak an ack in there first? If we can get it into the same
+ * TCP packet that's a good thing. */
+ if (con->in_seq > con->in_seq_acked) {
+ con->in_seq_acked = con->in_seq;
+ con->out_kvec[v].iov_base = &tag_ack;
+ con->out_kvec[v++].iov_len = 1;
+ con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
+ con->out_kvec[v].iov_base = &con->out_temp_ack;
+ con->out_kvec[v++].iov_len = sizeof(con->out_temp_ack);
+ con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
+ }
+
+ /* move message to sending/sent list */
+ m = list_first_entry(&con->out_queue,
+ struct ceph_msg, list_head);
+ list_move_tail(&m->list_head, &con->out_sent);
+ con->out_msg = m; /* we don't bother taking a reference here. */
+
+ m->hdr.seq = cpu_to_le64(++con->out_seq);
+
+ dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
+ m, con->out_seq, le16_to_cpu(m->hdr.type),
+ le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
+ le32_to_cpu(m->hdr.data_len),
+ m->nr_pages);
+ BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
+
+ /* tag + hdr + front + middle */
+ con->out_kvec[v].iov_base = &tag_msg;
+ con->out_kvec[v++].iov_len = 1;
+ con->out_kvec[v].iov_base = &m->hdr;
+ con->out_kvec[v++].iov_len = sizeof(m->hdr);
+ con->out_kvec[v++] = m->front;
+ if (m->middle)
+ con->out_kvec[v++] = m->middle->vec;
+ con->out_kvec_left = v;
+ con->out_kvec_bytes += 1 + sizeof(m->hdr) + m->front.iov_len +
+ (m->middle ? m->middle->vec.iov_len : 0);
+ con->out_kvec_cur = con->out_kvec;
+
+ /* fill in crc (except data pages), footer */
+ con->out_msg->hdr.crc =
+ cpu_to_le32(crc32c(0, (void *)&m->hdr,
+ sizeof(m->hdr) - sizeof(m->hdr.crc)));
+ con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
+ con->out_msg->footer.front_crc =
+ cpu_to_le32(crc32c(0, m->front.iov_base, m->front.iov_len));
+ if (m->middle)
+ con->out_msg->footer.middle_crc =
+ cpu_to_le32(crc32c(0, m->middle->vec.iov_base,
+ m->middle->vec.iov_len));
+ else
+ con->out_msg->footer.middle_crc = 0;
+ con->out_msg->footer.data_crc = 0;
+ dout("prepare_write_message front_crc %u data_crc %u\n",
+ le32_to_cpu(con->out_msg->footer.front_crc),
+ le32_to_cpu(con->out_msg->footer.middle_crc));
+
+ /* is there a data payload? */
+ if (le32_to_cpu(m->hdr.data_len) > 0) {
+ /* initialize page iterator */
+ con->out_msg_pos.page = 0;
+ con->out_msg_pos.page_pos =
+ le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
+ con->out_msg_pos.data_pos = 0;
+ con->out_msg_pos.did_page_crc = 0;
+ con->out_more = 1; /* data + footer will follow */
+ } else {
+ /* no, queue up footer too and be done */
+ prepare_write_message_footer(con, v);
+ }
+
+ set_bit(WRITE_PENDING, &con->state);
+}
+
+/*
+ * Prepare an ack.
+ */
+static void prepare_write_ack(struct ceph_connection *con)
+{
+ dout("prepare_write_ack %p %llu -> %llu\n", con,
+ con->in_seq_acked, con->in_seq);
+ con->in_seq_acked = con->in_seq;
+
+ con->out_kvec[0].iov_base = &tag_ack;
+ con->out_kvec[0].iov_len = 1;
+ con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
+ con->out_kvec[1].iov_base = &con->out_temp_ack;
+ con->out_kvec[1].iov_len = sizeof(con->out_temp_ack);
+ con->out_kvec_left = 2;
+ con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
+ con->out_kvec_cur = con->out_kvec;
+ con->out_more = 1; /* more will follow.. eventually.. */
+ set_bit(WRITE_PENDING, &con->state);
+}
+
+/*
+ * Prepare to write keepalive byte.
+ */
+static void prepare_write_keepalive(struct ceph_connection *con)
+{
+ dout("prepare_write_keepalive %p\n", con);
+ con->out_kvec[0].iov_base = &tag_keepalive;
+ con->out_kvec[0].iov_len = 1;
+ con->out_kvec_left = 1;
+ con->out_kvec_bytes = 1;
+ con->out_kvec_cur = con->out_kvec;
+ set_bit(WRITE_PENDING, &con->state);
+}
+
+/*
+ * Connection negotiation.
+ */
+
+static void prepare_connect_authorizer(struct ceph_connection *con)
+{
+ void *auth_buf;
+ int auth_len = 0;
+ int auth_protocol = 0;
+
+ if (con->ops->get_authorizer)
+ con->ops->get_authorizer(con, &auth_buf, &auth_len,
+ &auth_protocol, &con->auth_reply_buf,
+ &con->auth_reply_buf_len,
+ con->auth_retry);
+
+ con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
+ con->out_connect.authorizer_len = cpu_to_le32(auth_len);
+
+ con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
+ con->out_kvec[con->out_kvec_left].iov_len = auth_len;
+ con->out_kvec_left++;
+ con->out_kvec_bytes += auth_len;
+}
+
+/*
+ * We connected to a peer and are saying hello.
+ */
+static void prepare_write_banner(struct ceph_messenger *msgr,
+ struct ceph_connection *con)
+{
+ int len = strlen(CEPH_BANNER);
+
+ con->out_kvec[0].iov_base = CEPH_BANNER;
+ con->out_kvec[0].iov_len = len;
+ con->out_kvec[1].iov_base = &msgr->my_enc_addr;
+ con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr);
+ con->out_kvec_left = 2;
+ con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr);
+ con->out_kvec_cur = con->out_kvec;
+ con->out_more = 0;
+ set_bit(WRITE_PENDING, &con->state);
+}
+
+static void prepare_write_connect(struct ceph_messenger *msgr,
+ struct ceph_connection *con,
+ int after_banner)
+{
+ unsigned global_seq = get_global_seq(con->msgr, 0);
+ int proto;
+
+ switch (con->peer_name.type) {
+ case CEPH_ENTITY_TYPE_MON:
+ proto = CEPH_MONC_PROTOCOL;
+ break;
+ case CEPH_ENTITY_TYPE_OSD:
+ proto = CEPH_OSDC_PROTOCOL;
+ break;
+ case CEPH_ENTITY_TYPE_MDS:
+ proto = CEPH_MDSC_PROTOCOL;
+ break;
+ default:
+ BUG();
+ }
+
+ dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
+ con->connect_seq, global_seq, proto);
+
+ con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
+ con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
+ con->out_connect.global_seq = cpu_to_le32(global_seq);
+ con->out_connect.protocol_version = cpu_to_le32(proto);
+ con->out_connect.flags = 0;
+ if (test_bit(LOSSYTX, &con->state))
+ con->out_connect.flags = CEPH_MSG_CONNECT_LOSSY;
+
+ if (!after_banner) {
+ con->out_kvec_left = 0;
+ con->out_kvec_bytes = 0;
+ }
+ con->out_kvec[con->out_kvec_left].iov_base = &con->out_connect;
+ con->out_kvec[con->out_kvec_left].iov_len = sizeof(con->out_connect);
+ con->out_kvec_left++;
+ con->out_kvec_bytes += sizeof(con->out_connect);
+ con->out_kvec_cur = con->out_kvec;
+ con->out_more = 0;
+ set_bit(WRITE_PENDING, &con->state);
+
+ prepare_connect_authorizer(con);
+}
+
+
+/*
+ * write as much of pending kvecs to the socket as we can.
+ * 1 -> done
+ * 0 -> socket full, but more to do
+ * <0 -> error
+ */
+static int write_partial_kvec(struct ceph_connection *con)
+{
+ int ret;
+
+ dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
+ while (con->out_kvec_bytes > 0) {
+ ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
+ con->out_kvec_left, con->out_kvec_bytes,
+ con->out_more);
+ if (ret <= 0)
+ goto out;
+ con->out_kvec_bytes -= ret;
+ if (con->out_kvec_bytes == 0)
+ break; /* done */
+ while (ret > 0) {
+ if (ret >= con->out_kvec_cur->iov_len) {
+ ret -= con->out_kvec_cur->iov_len;
+ con->out_kvec_cur++;
+ con->out_kvec_left--;
+ } else {
+ con->out_kvec_cur->iov_len -= ret;
+ con->out_kvec_cur->iov_base += ret;
+ ret = 0;
+ break;
+ }
+ }
+ }
+ con->out_kvec_left = 0;
+ con->out_kvec_is_msg = false;
+ ret = 1;
+out:
+ dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
+ con->out_kvec_bytes, con->out_kvec_left, ret);
+ return ret; /* done! */
+}
+
+/*
+ * Write as much message data payload as we can. If we finish, queue
+ * up the footer.
+ * 1 -> done, footer is now queued in out_kvec[].
+ * 0 -> socket full, but more to do
+ * <0 -> error
+ */
+static int write_partial_msg_pages(struct ceph_connection *con)
+{
+ struct ceph_msg *msg = con->out_msg;
+ unsigned data_len = le32_to_cpu(msg->hdr.data_len);
+ size_t len;
+ int crc = con->msgr->nocrc;
+ int ret;
+
+ dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
+ con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
+ con->out_msg_pos.page_pos);
+
+ while (con->out_msg_pos.page < con->out_msg->nr_pages) {
+ struct page *page = NULL;
+ void *kaddr = NULL;
+
+ /*
+ * if we are calculating the data crc (the default), we need
+ * to map the page. if our pages[] has been revoked, use the
+ * zero page.
+ */
+ if (msg->pages) {
+ page = msg->pages[con->out_msg_pos.page];
+ if (crc)
+ kaddr = kmap(page);
+ } else {
+ page = con->msgr->zero_page;
+ if (crc)
+ kaddr = page_address(con->msgr->zero_page);
+ }
+ len = min((int)(PAGE_SIZE - con->out_msg_pos.page_pos),
+ (int)(data_len - con->out_msg_pos.data_pos));
+ if (crc && !con->out_msg_pos.did_page_crc) {
+ void *base = kaddr + con->out_msg_pos.page_pos;
+ u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
+
+ BUG_ON(kaddr == NULL);
+ con->out_msg->footer.data_crc =
+ cpu_to_le32(crc32c(tmpcrc, base, len));
+ con->out_msg_pos.did_page_crc = 1;
+ }
+
+ ret = kernel_sendpage(con->sock, page,
+ con->out_msg_pos.page_pos, len,
+ MSG_DONTWAIT | MSG_NOSIGNAL |
+ MSG_MORE);
+
+ if (crc && msg->pages)
+ kunmap(page);
+
+ if (ret <= 0)
+ goto out;
+
+ con->out_msg_pos.data_pos += ret;
+ con->out_msg_pos.page_pos += ret;
+ if (ret == len) {
+ con->out_msg_pos.page_pos = 0;
+ con->out_msg_pos.page++;
+ con->out_msg_pos.did_page_crc = 0;
+ }
+ }
+
+ dout("write_partial_msg_pages %p msg %p done\n", con, msg);
+
+ /* prepare and queue up footer, too */
+ if (!crc)
+ con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
+ con->out_kvec_bytes = 0;
+ con->out_kvec_left = 0;
+ con->out_kvec_cur = con->out_kvec;
+ prepare_write_message_footer(con, 0);
+ ret = 1;
+out:
+ return ret;
+}
+
+/*
+ * write some zeros
+ */
+static int write_partial_skip(struct ceph_connection *con)
+{
+ int ret;
+
+ while (con->out_skip > 0) {
+ struct kvec iov = {
+ .iov_base = page_address(con->msgr->zero_page),
+ .iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
+ };
+
+ ret = ceph_tcp_sendmsg(con->sock, &iov, 1, iov.iov_len, 1);
+ if (ret <= 0)
+ goto out;
+ con->out_skip -= ret;
+ }
+ ret = 1;
+out:
+ return ret;
+}
+
+/*
+ * Prepare to read connection handshake, or an ack.
+ */
+static void prepare_read_banner(struct ceph_connection *con)
+{
+ dout("prepare_read_banner %p\n", con);
+ con->in_base_pos = 0;
+}
+
+static void prepare_read_connect(struct ceph_connection *con)
+{
+ dout("prepare_read_connect %p\n", con);
+ con->in_base_pos = 0;
+}
+
+static void prepare_read_connect_retry(struct ceph_connection *con)
+{
+ dout("prepare_read_connect_retry %p\n", con);
+ con->in_base_pos = strlen(CEPH_BANNER) + sizeof(con->actual_peer_addr)
+ + sizeof(con->peer_addr_for_me);
+}
+
+static void prepare_read_ack(struct ceph_connection *con)
+{
+ dout("prepare_read_ack %p\n", con);
+ con->in_base_pos = 0;
+}
+
+static void prepare_read_tag(struct ceph_connection *con)
+{
+ dout("prepare_read_tag %p\n", con);
+ con->in_base_pos = 0;
+ con->in_tag = CEPH_MSGR_TAG_READY;
+}
+
+/*
+ * Prepare to read a message.
+ */
+static int prepare_read_message(struct ceph_connection *con)
+{
+ dout("prepare_read_message %p\n", con);
+ BUG_ON(con->in_msg != NULL);
+ con->in_base_pos = 0;
+ con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
+ return 0;
+}
+
+
+static int read_partial(struct ceph_connection *con,
+ int *to, int size, void *object)
+{
+ *to += size;
+ while (con->in_base_pos < *to) {
+ int left = *to - con->in_base_pos;
+ int have = size - left;
+ int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
+ if (ret <= 0)
+ return ret;
+ con->in_base_pos += ret;
+ }
+ return 1;
+}
+
+
+/*
+ * Read all or part of the connect-side handshake on a new connection
+ */
+static int read_partial_banner(struct ceph_connection *con)
+{
+ int ret, to = 0;
+
+ dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
+
+ /* peer's banner */
+ ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
+ if (ret <= 0)
+ goto out;
+ ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
+ &con->actual_peer_addr);
+ if (ret <= 0)
+ goto out;
+ ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
+ &con->peer_addr_for_me);
+ if (ret <= 0)
+ goto out;
+out:
+ return ret;
+}
+
+static int read_partial_connect(struct ceph_connection *con)
+{
+ int ret, to = 0;
+
+ dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
+
+ ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
+ if (ret <= 0)
+ goto out;
+ ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
+ con->auth_reply_buf);
+ if (ret <= 0)
+ goto out;
+
+ dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
+ con, (int)con->in_reply.tag,
+ le32_to_cpu(con->in_reply.connect_seq),
+ le32_to_cpu(con->in_reply.global_seq));
+out:
+ return ret;
+
+}
+
+/*
+ * Verify the hello banner looks okay.
+ */
+static int verify_hello(struct ceph_connection *con)
+{
+ if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
+ pr_err("connect to %s got bad banner\n",
+ pr_addr(&con->peer_addr.in_addr));
+ con->error_msg = "protocol error, bad banner";
+ return -1;
+ }
+ return 0;
+}
+
+static bool addr_is_blank(struct sockaddr_storage *ss)
+{
+ switch (ss->ss_family) {
+ case AF_INET:
+ return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
+ case AF_INET6:
+ return
+ ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
+ ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
+ ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
+ ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
+ }
+ return false;
+}
+
+static int addr_port(struct sockaddr_storage *ss)
+{
+ switch (ss->ss_family) {
+ case AF_INET:
+ return ntohs(((struct sockaddr_in *)ss)->sin_port);
+ case AF_INET6:
+ return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
+ }
+ return 0;
+}
+
+static void addr_set_port(struct sockaddr_storage *ss, int p)
+{
+ switch (ss->ss_family) {
+ case AF_INET:
+ ((struct sockaddr_in *)ss)->sin_port = htons(p);
+ case AF_INET6:
+ ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
+ }
+}
+
+/*
+ * Parse an ip[:port] list into an addr array. Use the default
+ * monitor port if a port isn't specified.
+ */
+int ceph_parse_ips(const char *c, const char *end,
+ struct ceph_entity_addr *addr,
+ int max_count, int *count)
+{
+ int i;
+ const char *p = c;
+
+ dout("parse_ips on '%.*s'\n", (int)(end-c), c);
+ for (i = 0; i < max_count; i++) {
+ const char *ipend;
+ struct sockaddr_storage *ss = &addr[i].in_addr;
+ struct sockaddr_in *in4 = (void *)ss;
+ struct sockaddr_in6 *in6 = (void *)ss;
+ int port;
+
+ memset(ss, 0, sizeof(*ss));
+ if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
+ ',', &ipend)) {
+ ss->ss_family = AF_INET;
+ } else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
+ ',', &ipend)) {
+ ss->ss_family = AF_INET6;
+ } else {
+ goto bad;
+ }
+ p = ipend;
+
+ /* port? */
+ if (p < end && *p == ':') {
+ port = 0;
+ p++;
+ while (p < end && *p >= '0' && *p <= '9') {
+ port = (port * 10) + (*p - '0');
+ p++;
+ }
+ if (port > 65535 || port == 0)
+ goto bad;
+ } else {
+ port = CEPH_MON_PORT;
+ }
+
+ addr_set_port(ss, port);
+
+ dout("parse_ips got %s\n", pr_addr(ss));
+
+ if (p == end)
+ break;
+ if (*p != ',')
+ goto bad;
+ p++;
+ }
+
+ if (p != end)
+ goto bad;
+
+ if (count)
+ *count = i + 1;
+ return 0;
+
+bad:
+ pr_err("parse_ips bad ip '%s'\n", c);
+ return -EINVAL;
+}
+
+static int process_banner(struct ceph_connection *con)
+{
+ dout("process_banner on %p\n", con);
+
+ if (verify_hello(con) < 0)
+ return -1;
+
+ ceph_decode_addr(&con->actual_peer_addr);
+ ceph_decode_addr(&con->peer_addr_for_me);
+
+ /*
+ * Make sure the other end is who we wanted. note that the other
+ * end may not yet know their ip address, so if it's 0.0.0.0, give
+ * them the benefit of the doubt.
+ */
+ if (!ceph_entity_addr_is_local(&con->peer_addr,
+ &con->actual_peer_addr) &&
+ !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
+ con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
+ pr_err("wrong peer, want %s/%d, "
+ "got %s/%d, wtf\n",
+ pr_addr(&con->peer_addr.in_addr),
+ con->peer_addr.nonce,
+ pr_addr(&con->actual_peer_addr.in_addr),
+ con->actual_peer_addr.nonce);
+ con->error_msg = "protocol error, wrong peer";
+ return -1;
+ }
+
+ /*
+ * did we learn our address?
+ */
+ if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
+ int port = addr_port(&con->msgr->inst.addr.in_addr);
+
+ memcpy(&con->msgr->inst.addr.in_addr,
+ &con->peer_addr_for_me.in_addr,
+ sizeof(con->peer_addr_for_me.in_addr));
+ addr_set_port(&con->msgr->inst.addr.in_addr, port);
+ encode_my_addr(con->msgr);
+ dout("process_banner learned my addr is %s\n",
+ pr_addr(&con->msgr->inst.addr.in_addr));
+ }
+
+ set_bit(NEGOTIATING, &con->state);
+ prepare_read_connect(con);
+ return 0;
+}
+
+static int process_connect(struct ceph_connection *con)
+{
+ dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
+
+ switch (con->in_reply.tag) {
+ case CEPH_MSGR_TAG_BADPROTOVER:
+ dout("process_connect got BADPROTOVER my %d != their %d\n",
+ le32_to_cpu(con->out_connect.protocol_version),
+ le32_to_cpu(con->in_reply.protocol_version));
+ pr_err("%s%lld %s protocol version mismatch,"
+ " my %d != server's %d\n",
+ ENTITY_NAME(con->peer_name),
+ pr_addr(&con->peer_addr.in_addr),
+ le32_to_cpu(con->out_connect.protocol_version),
+ le32_to_cpu(con->in_reply.protocol_version));
+ con->error_msg = "protocol version mismatch";
+ if (con->ops->bad_proto)
+ con->ops->bad_proto(con);
+ reset_connection(con);
+ set_bit(CLOSED, &con->state); /* in case there's queued work */
+ return -1;
+
+ case CEPH_MSGR_TAG_BADAUTHORIZER:
+ con->auth_retry++;
+ dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
+ con->auth_retry);
+ if (con->auth_retry == 2) {
+ con->error_msg = "connect authorization failure";
+ reset_connection(con);
+ set_bit(CLOSED, &con->state);
+ return -1;
+ }
+ con->auth_retry = 1;
+ prepare_write_connect(con->msgr, con, 0);
+ prepare_read_connect_retry(con);
+ break;
+
+ case CEPH_MSGR_TAG_RESETSESSION:
+ /*
+ * If we connected with a large connect_seq but the peer
+ * has no record of a session with us (no connection, or
+ * connect_seq == 0), they will send RESETSESION to indicate
+ * that they must have reset their session, and may have
+ * dropped messages.
+ */
+ dout("process_connect got RESET peer seq %u\n",
+ le32_to_cpu(con->in_connect.connect_seq));
+ pr_err("%s%lld %s connection reset\n",
+ ENTITY_NAME(con->peer_name),
+ pr_addr(&con->peer_addr.in_addr));
+ reset_connection(con);
+ prepare_write_connect(con->msgr, con, 0);
+ prepare_read_connect(con);
+
+ /* Tell ceph about it. */
+ pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
+ if (con->ops->peer_reset)
+ con->ops->peer_reset(con);
+ break;
+
+ case CEPH_MSGR_TAG_RETRY_SESSION:
+ /*
+ * If we sent a smaller connect_seq than the peer has, try
+ * again with a larger value.
+ */
+ dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
+ le32_to_cpu(con->out_connect.connect_seq),
+ le32_to_cpu(con->in_connect.connect_seq));
+ con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
+ prepare_write_connect(con->msgr, con, 0);
+ prepare_read_connect(con);
+ break;
+
+ case CEPH_MSGR_TAG_RETRY_GLOBAL:
+ /*
+ * If we sent a smaller global_seq than the peer has, try
+ * again with a larger value.
+ */
+ dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
+ con->peer_global_seq,
+ le32_to_cpu(con->in_connect.global_seq));
+ get_global_seq(con->msgr,
+ le32_to_cpu(con->in_connect.global_seq));
+ prepare_write_connect(con->msgr, con, 0);
+ prepare_read_connect(con);
+ break;
+
+ case CEPH_MSGR_TAG_READY:
+ clear_bit(CONNECTING, &con->state);
+ con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
+ con->connect_seq++;
+ dout("process_connect got READY gseq %d cseq %d (%d)\n",
+ con->peer_global_seq,
+ le32_to_cpu(con->in_reply.connect_seq),
+ con->connect_seq);
+ WARN_ON(con->connect_seq !=
+ le32_to_cpu(con->in_reply.connect_seq));
+ prepare_read_tag(con);
+ break;
+
+ case CEPH_MSGR_TAG_WAIT:
+ /*
+ * If there is a connection race (we are opening
+ * connections to each other), one of us may just have
+ * to WAIT. This shouldn't happen if we are the
+ * client.
+ */
+ pr_err("process_connect peer connecting WAIT\n");
+
+ default:
+ pr_err("connect protocol error, will retry\n");
+ con->error_msg = "protocol error, garbage tag during connect";
+ return -1;
+ }
+ return 0;
+}
+
+
+/*
+ * read (part of) an ack
+ */
+static int read_partial_ack(struct ceph_connection *con)
+{
+ int to = 0;
+
+ return read_partial(con, &to, sizeof(con->in_temp_ack),
+ &con->in_temp_ack);
+}
+
+
+/*
+ * We can finally discard anything that's been acked.
+ */
+static void process_ack(struct ceph_connection *con)
+{
+ struct ceph_msg *m;
+ u64 ack = le64_to_cpu(con->in_temp_ack);
+ u64 seq;
+
+ mutex_lock(&con->out_mutex);
+ while (!list_empty(&con->out_sent)) {
+ m = list_first_entry(&con->out_sent, struct ceph_msg,
+ list_head);
+ seq = le64_to_cpu(m->hdr.seq);
+ if (seq > ack)
+ break;
+ dout("got ack for seq %llu type %d at %p\n", seq,
+ le16_to_cpu(m->hdr.type), m);
+ ceph_msg_remove(m);
+ }
+ mutex_unlock(&con->out_mutex);
+ prepare_read_tag(con);
+}
+
+
+
+
+
+
+/*
+ * read (part of) a message.
+ */
+static int read_partial_message(struct ceph_connection *con)
+{
+ struct ceph_msg *m = con->in_msg;
+ void *p;
+ int ret;
+ int to, want, left;
+ unsigned front_len, middle_len, data_len, data_off;
+ int datacrc = con->msgr->nocrc;
+
+ dout("read_partial_message con %p msg %p\n", con, m);
+
+ /* header */
+ while (con->in_base_pos < sizeof(con->in_hdr)) {
+ left = sizeof(con->in_hdr) - con->in_base_pos;
+ ret = ceph_tcp_recvmsg(con->sock,
+ (char *)&con->in_hdr + con->in_base_pos,
+ left);
+ if (ret <= 0)
+ return ret;
+ con->in_base_pos += ret;
+ if (con->in_base_pos == sizeof(con->in_hdr)) {
+ u32 crc = crc32c(0, (void *)&con->in_hdr,
+ sizeof(con->in_hdr) - sizeof(con->in_hdr.crc));
+ if (crc != le32_to_cpu(con->in_hdr.crc)) {
+ pr_err("read_partial_message bad hdr "
+ " crc %u != expected %u\n",
+ crc, con->in_hdr.crc);
+ return -EBADMSG;
+ }
+ }
+ }
+
+ front_len = le32_to_cpu(con->in_hdr.front_len);
+ if (front_len > CEPH_MSG_MAX_FRONT_LEN)
+ return -EIO;
+ middle_len = le32_to_cpu(con->in_hdr.middle_len);
+ if (middle_len > CEPH_MSG_MAX_DATA_LEN)
+ return -EIO;
+ data_len = le32_to_cpu(con->in_hdr.data_len);
+ if (data_len > CEPH_MSG_MAX_DATA_LEN)
+ return -EIO;
+
+ /* allocate message? */
+ if (!con->in_msg) {
+ dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
+ con->in_hdr.front_len, con->in_hdr.data_len);
+ con->in_msg = con->ops->alloc_msg(con, &con->in_hdr);
+ if (!con->in_msg) {
+ /* skip this message */
+ dout("alloc_msg returned NULL, skipping message\n");
+ con->in_base_pos = -front_len - middle_len - data_len -
+ sizeof(m->footer);
+ con->in_tag = CEPH_MSGR_TAG_READY;
+ return 0;
+ }
+ if (IS_ERR(con->in_msg)) {
+ ret = PTR_ERR(con->in_msg);
+ con->in_msg = NULL;
+ con->error_msg = "out of memory for incoming message";
+ return ret;
+ }
+ m = con->in_msg;
+ m->front.iov_len = 0; /* haven't read it yet */
+ memcpy(&m->hdr, &con->in_hdr, sizeof(con->in_hdr));
+ }
+
+ /* front */
+ while (m->front.iov_len < front_len) {
+ BUG_ON(m->front.iov_base == NULL);
+ left = front_len - m->front.iov_len;
+ ret = ceph_tcp_recvmsg(con->sock, (char *)m->front.iov_base +
+ m->front.iov_len, left);
+ if (ret <= 0)
+ return ret;
+ m->front.iov_len += ret;
+ if (m->front.iov_len == front_len)
+ con->in_front_crc = crc32c(0, m->front.iov_base,
+ m->front.iov_len);
+ }
+
+ /* middle */
+ while (middle_len > 0 && (!m->middle ||
+ m->middle->vec.iov_len < middle_len)) {
+ if (m->middle == NULL) {
+ ret = -EOPNOTSUPP;
+ if (con->ops->alloc_middle)
+ ret = con->ops->alloc_middle(con, m);
+ if (ret < 0) {
+ dout("alloc_middle failed, skipping payload\n");
+ con->in_base_pos = -middle_len - data_len
+ - sizeof(m->footer);
+ ceph_msg_put(con->in_msg);
+ con->in_msg = NULL;
+ con->in_tag = CEPH_MSGR_TAG_READY;
+ return 0;
+ }
+ m->middle->vec.iov_len = 0;
+ }
+ left = middle_len - m->middle->vec.iov_len;
+ ret = ceph_tcp_recvmsg(con->sock,
+ (char *)m->middle->vec.iov_base +
+ m->middle->vec.iov_len, left);
+ if (ret <= 0)
+ return ret;
+ m->middle->vec.iov_len += ret;
+ if (m->middle->vec.iov_len == middle_len)
+ con->in_middle_crc = crc32c(0, m->middle->vec.iov_base,
+ m->middle->vec.iov_len);
+ }
+
+ /* (page) data */
+ data_off = le16_to_cpu(m->hdr.data_off);
+ if (data_len == 0)
+ goto no_data;
+
+ if (m->nr_pages == 0) {
+ con->in_msg_pos.page = 0;
+ con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
+ con->in_msg_pos.data_pos = 0;
+ /* find pages for data payload */
+ want = calc_pages_for(data_off & ~PAGE_MASK, data_len);
+ ret = -1;
+ if (con->ops->prepare_pages)
+ ret = con->ops->prepare_pages(con, m, want);
+ if (ret < 0) {
+ dout("%p prepare_pages failed, skipping payload\n", m);
+ con->in_base_pos = -data_len - sizeof(m->footer);
+ ceph_msg_put(con->in_msg);
+ con->in_msg = NULL;
+ con->in_tag = CEPH_MSGR_TAG_READY;
+ return 0;
+ }
+ BUG_ON(m->nr_pages < want);
+ }
+ while (con->in_msg_pos.data_pos < data_len) {
+ left = min((int)(data_len - con->in_msg_pos.data_pos),
+ (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
+ BUG_ON(m->pages == NULL);
+ p = kmap(m->pages[con->in_msg_pos.page]);
+ ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
+ left);
+ if (ret > 0 && datacrc)
+ con->in_data_crc =
+ crc32c(con->in_data_crc,
+ p + con->in_msg_pos.page_pos, ret);
+ kunmap(m->pages[con->in_msg_pos.page]);
+ if (ret <= 0)
+ return ret;
+ con->in_msg_pos.data_pos += ret;
+ con->in_msg_pos.page_pos += ret;
+ if (con->in_msg_pos.page_pos == PAGE_SIZE) {
+ con->in_msg_pos.page_pos = 0;
+ con->in_msg_pos.page++;
+ }
+ }
+
+no_data:
+ /* footer */
+ to = sizeof(m->hdr) + sizeof(m->footer);
+ while (con->in_base_pos < to) {
+ left = to - con->in_base_pos;
+ ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
+ (con->in_base_pos - sizeof(m->hdr)),
+ left);
+ if (ret <= 0)
+ return ret;
+ con->in_base_pos += ret;
+ }
+ dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
+ m, front_len, m->footer.front_crc, middle_len,
+ m->footer.middle_crc, data_len, m->footer.data_crc);
+
+ /* crc ok? */
+ if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
+ pr_err("read_partial_message %p front crc %u != exp. %u\n",
+ m, con->in_front_crc, m->footer.front_crc);
+ return -EBADMSG;
+ }
+ if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
+ pr_err("read_partial_message %p middle crc %u != exp %u\n",
+ m, con->in_middle_crc, m->footer.middle_crc);
+ return -EBADMSG;
+ }
+ if (datacrc &&
+ (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
+ con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
+ pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
+ con->in_data_crc, le32_to_cpu(m->footer.data_crc));
+ return -EBADMSG;
+ }
+
+ return 1; /* done! */
+}
+
+/*
+ * Process message. This happens in the worker thread. The callback should
+ * be careful not to do anything that waits on other incoming messages or it
+ * may deadlock.
+ */
+static void process_message(struct ceph_connection *con)
+{
+ struct ceph_msg *msg = con->in_msg;
+
+ con->in_msg = NULL;
+
+ /* if first message, set peer_name */
+ if (con->peer_name.type == 0)
+ con->peer_name = msg->hdr.src.name;
+
+ mutex_lock(&con->out_mutex);
+ con->in_seq++;
+ mutex_unlock(&con->out_mutex);
+
+ dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
+ msg, le64_to_cpu(msg->hdr.seq),
+ ENTITY_NAME(msg->hdr.src.name),
+ le16_to_cpu(msg->hdr.type),
+ ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
+ le32_to_cpu(msg->hdr.front_len),
+ le32_to_cpu(msg->hdr.data_len),
+ con->in_front_crc, con->in_middle_crc, con->in_data_crc);
+ con->ops->dispatch(con, msg);
+ prepare_read_tag(con);
+}
+
+
+/*
+ * Write something to the socket. Called in a worker thread when the
+ * socket appears to be writeable and we have something ready to send.
+ */
+static int try_write(struct ceph_connection *con)
+{
+ struct ceph_messenger *msgr = con->msgr;
+ int ret = 1;
+
+ dout("try_write start %p state %lu nref %d\n", con, con->state,
+ atomic_read(&con->nref));
+
+ mutex_lock(&con->out_mutex);
+more:
+ dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
+
+ /* open the socket first? */
+ if (con->sock == NULL) {
+ /*
+ * if we were STANDBY and are reconnecting _this_
+ * connection, bump connect_seq now. Always bump
+ * global_seq.
+ */
+ if (test_and_clear_bit(STANDBY, &con->state))
+ con->connect_seq++;
+
+ prepare_write_banner(msgr, con);
+ prepare_write_connect(msgr, con, 1);
+ prepare_read_banner(con);
+ set_bit(CONNECTING, &con->state);
+ clear_bit(NEGOTIATING, &con->state);
+
+ con->in_tag = CEPH_MSGR_TAG_READY;
+ dout("try_write initiating connect on %p new state %lu\n",
+ con, con->state);
+ con->sock = ceph_tcp_connect(con);
+ if (IS_ERR(con->sock)) {
+ con->sock = NULL;
+ con->error_msg = "connect error";
+ ret = -1;
+ goto out;
+ }
+ }
+
+more_kvec:
+ /* kvec data queued? */
+ if (con->out_skip) {
+ ret = write_partial_skip(con);
+ if (ret <= 0)
+ goto done;
+ if (ret < 0) {
+ dout("try_write write_partial_skip err %d\n", ret);
+ goto done;
+ }
+ }
+ if (con->out_kvec_left) {
+ ret = write_partial_kvec(con);
+ if (ret <= 0)
+ goto done;
+ if (ret < 0) {
+ dout("try_write write_partial_kvec err %d\n", ret);
+ goto done;
+ }
+ }
+
+ /* msg pages? */
+ if (con->out_msg) {
+ ret = write_partial_msg_pages(con);
+ if (ret == 1)
+ goto more_kvec; /* we need to send the footer, too! */
+ if (ret == 0)
+ goto done;
+ if (ret < 0) {
+ dout("try_write write_partial_msg_pages err %d\n",
+ ret);
+ goto done;
+ }
+ }
+
+ if (!test_bit(CONNECTING, &con->state)) {
+ /* is anything else pending? */
+ if (!list_empty(&con->out_queue)) {
+ prepare_write_message(con);
+ goto more;
+ }
+ if (con->in_seq > con->in_seq_acked) {
+ prepare_write_ack(con);
+ goto more;
+ }
+ if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
+ prepare_write_keepalive(con);
+ goto more;
+ }
+ }
+
+ /* Nothing to do! */
+ clear_bit(WRITE_PENDING, &con->state);
+ dout("try_write nothing else to write.\n");
+done:
+ ret = 0;
+out:
+ mutex_unlock(&con->out_mutex);
+ dout("try_write done on %p\n", con);
+ return ret;
+}
+
+
+
+/*
+ * Read what we can from the socket.
+ */
+static int try_read(struct ceph_connection *con)
+{
+ struct ceph_messenger *msgr;
+ int ret = -1;
+
+ if (!con->sock)
+ return 0;
+
+ if (test_bit(STANDBY, &con->state))
+ return 0;
+
+ dout("try_read start on %p\n", con);
+ msgr = con->msgr;
+
+more:
+ dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
+ con->in_base_pos);
+ if (test_bit(CONNECTING, &con->state)) {
+ if (!test_bit(NEGOTIATING, &con->state)) {
+ dout("try_read connecting\n");
+ ret = read_partial_banner(con);
+ if (ret <= 0)
+ goto done;
+ if (process_banner(con) < 0) {
+ ret = -1;
+ goto out;
+ }
+ }
+ ret = read_partial_connect(con);
+ if (ret <= 0)
+ goto done;
+ if (process_connect(con) < 0) {
+ ret = -1;
+ goto out;
+ }
+ goto more;
+ }
+
+ if (con->in_base_pos < 0) {
+ /*
+ * skipping + discarding content.
+ *
+ * FIXME: there must be a better way to do this!
+ */
+ static char buf[1024];
+ int skip = min(1024, -con->in_base_pos);
+ dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
+ ret = ceph_tcp_recvmsg(con->sock, buf, skip);
+ if (ret <= 0)
+ goto done;
+ con->in_base_pos += ret;
+ if (con->in_base_pos)
+ goto more;
+ }
+ if (con->in_tag == CEPH_MSGR_TAG_READY) {
+ /*
+ * what's next?
+ */
+ ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
+ if (ret <= 0)
+ goto done;
+ dout("try_read got tag %d\n", (int)con->in_tag);
+ switch (con->in_tag) {
+ case CEPH_MSGR_TAG_MSG:
+ prepare_read_message(con);
+ break;
+ case CEPH_MSGR_TAG_ACK:
+ prepare_read_ack(con);
+ break;
+ case CEPH_MSGR_TAG_CLOSE:
+ set_bit(CLOSED, &con->state); /* fixme */
+ goto done;
+ default:
+ goto bad_tag;
+ }
+ }
+ if (con->in_tag == CEPH_MSGR_TAG_MSG) {
+ ret = read_partial_message(con);
+ if (ret <= 0) {
+ switch (ret) {
+ case -EBADMSG:
+ con->error_msg = "bad crc";
+ ret = -EIO;
+ goto out;
+ case -EIO:
+ con->error_msg = "io error";
+ goto out;
+ default:
+ goto done;
+ }
+ }
+ if (con->in_tag == CEPH_MSGR_TAG_READY)
+ goto more;
+ process_message(con);
+ goto more;
+ }
+ if (con->in_tag == CEPH_MSGR_TAG_ACK) {
+ ret = read_partial_ack(con);
+ if (ret <= 0)
+ goto done;
+ process_ack(con);
+ goto more;
+ }
+
+done:
+ ret = 0;
+out:
+ dout("try_read done on %p\n", con);
+ return ret;
+
+bad_tag:
+ pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
+ con->error_msg = "protocol error, garbage tag";
+ ret = -1;
+ goto out;
+}
+
+
+/*
+ * Atomically queue work on a connection. Bump @con reference to
+ * avoid races with connection teardown.
+ *
+ * There is some trickery going on with QUEUED and BUSY because we
+ * only want a _single_ thread operating on each connection at any
+ * point in time, but we want to use all available CPUs.
+ *
+ * The worker thread only proceeds if it can atomically set BUSY. It
+ * clears QUEUED and does it's thing. When it thinks it's done, it
+ * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
+ * (tries again to set BUSY).
+ *
+ * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
+ * try to queue work. If that fails (work is already queued, or BUSY)
+ * we give up (work also already being done or is queued) but leave QUEUED
+ * set so that the worker thread will loop if necessary.
+ */
+static void queue_con(struct ceph_connection *con)
+{
+ if (test_bit(DEAD, &con->state)) {
+ dout("queue_con %p ignoring: DEAD\n",
+ con);
+ return;
+ }
+
+ if (!con->ops->get(con)) {
+ dout("queue_con %p ref count 0\n", con);
+ return;
+ }
+
+ set_bit(QUEUED, &con->state);
+ if (test_bit(BUSY, &con->state)) {
+ dout("queue_con %p - already BUSY\n", con);
+ con->ops->put(con);
+ } else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
+ dout("queue_con %p - already queued\n", con);
+ con->ops->put(con);
+ } else {
+ dout("queue_con %p\n", con);
+ }
+}
+
+/*
+ * Do some work on a connection. Drop a connection ref when we're done.
+ */
+static void con_work(struct work_struct *work)
+{
+ struct ceph_connection *con = container_of(work, struct ceph_connection,
+ work.work);
+ int backoff = 0;
+
+more:
+ if (test_and_set_bit(BUSY, &con->state) != 0) {
+ dout("con_work %p BUSY already set\n", con);
+ goto out;
+ }
+ dout("con_work %p start, clearing QUEUED\n", con);
+ clear_bit(QUEUED, &con->state);
+
+ if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
+ dout("con_work CLOSED\n");
+ con_close_socket(con);
+ goto done;
+ }
+ if (test_and_clear_bit(OPENING, &con->state)) {
+ /* reopen w/ new peer */
+ dout("con_work OPENING\n");
+ con_close_socket(con);
+ }
+
+ if (test_and_clear_bit(SOCK_CLOSED, &con->state) ||
+ try_read(con) < 0 ||
+ try_write(con) < 0) {
+ backoff = 1;
+ ceph_fault(con); /* error/fault path */
+ }
+
+done:
+ clear_bit(BUSY, &con->state);
+ dout("con->state=%lu\n", con->state);
+ if (test_bit(QUEUED, &con->state)) {
+ if (!backoff) {
+ dout("con_work %p QUEUED reset, looping\n", con);
+ goto more;
+ }
+ dout("con_work %p QUEUED reset, but just faulted\n", con);
+ clear_bit(QUEUED, &con->state);
+ }
+ dout("con_work %p done\n", con);
+
+out:
+ con->ops->put(con);
+}
+
+
+/*
+ * Generic error/fault handler. A retry mechanism is used with
+ * exponential backoff
+ */
+static void ceph_fault(struct ceph_connection *con)
+{
+ pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
+ pr_addr(&con->peer_addr.in_addr), con->error_msg);
+ dout("fault %p state %lu to peer %s\n",
+ con, con->state, pr_addr(&con->peer_addr.in_addr));
+
+ if (test_bit(LOSSYTX, &con->state)) {
+ dout("fault on LOSSYTX channel\n");
+ goto out;
+ }
+
+ clear_bit(BUSY, &con->state); /* to avoid an improbable race */
+
+ con_close_socket(con);
+ con->in_msg = NULL;
+
+ /* If there are no messages in the queue, place the connection
+ * in a STANDBY state (i.e., don't try to reconnect just yet). */
+ mutex_lock(&con->out_mutex);
+ if (list_empty(&con->out_queue) && !con->out_keepalive_pending) {
+ dout("fault setting STANDBY\n");
+ set_bit(STANDBY, &con->state);
+ mutex_unlock(&con->out_mutex);
+ goto out;
+ }
+
+ /* Requeue anything that hasn't been acked, and retry after a
+ * delay. */
+ list_splice_init(&con->out_sent, &con->out_queue);
+ mutex_unlock(&con->out_mutex);
+
+ if (con->delay == 0)
+ con->delay = BASE_DELAY_INTERVAL;
+ else if (con->delay < MAX_DELAY_INTERVAL)
+ con->delay *= 2;
+
+ /* explicitly schedule work to try to reconnect again later. */
+ dout("fault queueing %p delay %lu\n", con, con->delay);
+ con->ops->get(con);
+ if (queue_delayed_work(ceph_msgr_wq, &con->work,
+ round_jiffies_relative(con->delay)) == 0)
+ con->ops->put(con);
+
+out:
+ if (con->ops->fault)
+ con->ops->fault(con);
+}
+
+
+
+/*
+ * create a new messenger instance
+ */
+struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
+{
+ struct ceph_messenger *msgr;
+
+ msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
+ if (msgr == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&msgr->global_seq_lock);
+
+ /* the zero page is needed if a request is "canceled" while the message
+ * is being written over the socket */
+ msgr->zero_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!msgr->zero_page) {
+ kfree(msgr);
+ return ERR_PTR(-ENOMEM);
+ }
+ kmap(msgr->zero_page);
+
+ if (myaddr)
+ msgr->inst.addr = *myaddr;
+
+ /* select a random nonce */
+ get_random_bytes(&msgr->inst.addr.nonce,
+ sizeof(msgr->inst.addr.nonce));
+ encode_my_addr(msgr);
+
+ dout("messenger_create %p\n", msgr);
+ return msgr;
+}
+
+void ceph_messenger_destroy(struct ceph_messenger *msgr)
+{
+ dout("destroy %p\n", msgr);
+ kunmap(msgr->zero_page);
+ __free_page(msgr->zero_page);
+ kfree(msgr);
+ dout("destroyed messenger %p\n", msgr);
+}
+
+/*
+ * Queue up an outgoing message on the given connection.
+ */
+void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ if (test_bit(CLOSED, &con->state)) {
+ dout("con_send %p closed, dropping %p\n", con, msg);
+ ceph_msg_put(msg);
+ return;
+ }
+
+ /* set src+dst */
+ msg->hdr.src.name = con->msgr->inst.name;
+ msg->hdr.src.addr = con->msgr->my_enc_addr;
+ msg->hdr.orig_src = msg->hdr.src;
+ msg->hdr.dst_erank = con->peer_addr.erank;
+
+ /* queue */
+ mutex_lock(&con->out_mutex);
+ BUG_ON(!list_empty(&msg->list_head));
+ list_add_tail(&msg->list_head, &con->out_queue);
+ dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
+ ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
+ ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
+ le32_to_cpu(msg->hdr.front_len),
+ le32_to_cpu(msg->hdr.middle_len),
+ le32_to_cpu(msg->hdr.data_len));
+ mutex_unlock(&con->out_mutex);
+
+ /* if there wasn't anything waiting to send before, queue
+ * new work */
+ if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
+ queue_con(con);
+}
+
+/*
+ * Revoke a message that was previously queued for send
+ */
+void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ mutex_lock(&con->out_mutex);
+ if (!list_empty(&msg->list_head)) {
+ dout("con_revoke %p msg %p\n", con, msg);
+ list_del_init(&msg->list_head);
+ ceph_msg_put(msg);
+ msg->hdr.seq = 0;
+ if (con->out_msg == msg)
+ con->out_msg = NULL;
+ if (con->out_kvec_is_msg) {
+ con->out_skip = con->out_kvec_bytes;
+ con->out_kvec_is_msg = false;
+ }
+ } else {
+ dout("con_revoke %p msg %p - not queued (sent?)\n", con, msg);
+ }
+ mutex_unlock(&con->out_mutex);
+}
+
+/*
+ * Queue a keepalive byte to ensure the tcp connection is alive.
+ */
+void ceph_con_keepalive(struct ceph_connection *con)
+{
+ if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
+ test_and_set_bit(WRITE_PENDING, &con->state) == 0)
+ queue_con(con);
+}
+
+
+/*
+ * construct a new message with given type, size
+ * the new msg has a ref count of 1.
+ */
+struct ceph_msg *ceph_msg_new(int type, int front_len,
+ int page_len, int page_off, struct page **pages)
+{
+ struct ceph_msg *m;
+
+ m = kmalloc(sizeof(*m), GFP_NOFS);
+ if (m == NULL)
+ goto out;
+ kref_init(&m->kref);
+ INIT_LIST_HEAD(&m->list_head);
+
+ m->hdr.type = cpu_to_le16(type);
+ m->hdr.front_len = cpu_to_le32(front_len);
+ m->hdr.middle_len = 0;
+ m->hdr.data_len = cpu_to_le32(page_len);
+ m->hdr.data_off = cpu_to_le16(page_off);
+ m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
+ m->footer.front_crc = 0;
+ m->footer.middle_crc = 0;
+ m->footer.data_crc = 0;
+ m->front_max = front_len;
+ m->front_is_vmalloc = false;
+ m->more_to_follow = false;
+ m->pool = NULL;
+
+ /* front */
+ if (front_len) {
+ if (front_len > PAGE_CACHE_SIZE) {
+ m->front.iov_base = __vmalloc(front_len, GFP_NOFS,
+ PAGE_KERNEL);
+ m->front_is_vmalloc = true;
+ } else {
+ m->front.iov_base = kmalloc(front_len, GFP_NOFS);
+ }
+ if (m->front.iov_base == NULL) {
+ pr_err("msg_new can't allocate %d bytes\n",
+ front_len);
+ goto out2;
+ }
+ } else {
+ m->front.iov_base = NULL;
+ }
+ m->front.iov_len = front_len;
+
+ /* middle */
+ m->middle = NULL;
+
+ /* data */
+ m->nr_pages = calc_pages_for(page_off, page_len);
+ m->pages = pages;
+
+ dout("ceph_msg_new %p page %d~%d -> %d\n", m, page_off, page_len,
+ m->nr_pages);
+ return m;
+
+out2:
+ ceph_msg_put(m);
+out:
+ pr_err("msg_new can't create type %d len %d\n", type, front_len);
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * Generic message allocator, for incoming messages.
+ */
+struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
+ struct ceph_msg_header *hdr)
+{
+ int type = le16_to_cpu(hdr->type);
+ int front_len = le32_to_cpu(hdr->front_len);
+ struct ceph_msg *msg = ceph_msg_new(type, front_len, 0, 0, NULL);
+
+ if (!msg) {
+ pr_err("unable to allocate msg type %d len %d\n",
+ type, front_len);
+ return ERR_PTR(-ENOMEM);
+ }
+ return msg;
+}
+
+/*
+ * Allocate "middle" portion of a message, if it is needed and wasn't
+ * allocated by alloc_msg. This allows us to read a small fixed-size
+ * per-type header in the front and then gracefully fail (i.e.,
+ * propagate the error to the caller based on info in the front) when
+ * the middle is too large.
+ */
+int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ int type = le16_to_cpu(msg->hdr.type);
+ int middle_len = le32_to_cpu(msg->hdr.middle_len);
+
+ dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
+ ceph_msg_type_name(type), middle_len);
+ BUG_ON(!middle_len);
+ BUG_ON(msg->middle);
+
+ msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
+ if (!msg->middle)
+ return -ENOMEM;
+ return 0;
+}
+
+
+/*
+ * Free a generically kmalloc'd message.
+ */
+void ceph_msg_kfree(struct ceph_msg *m)
+{
+ dout("msg_kfree %p\n", m);
+ if (m->front_is_vmalloc)
+ vfree(m->front.iov_base);
+ else
+ kfree(m->front.iov_base);
+ kfree(m);
+}
+
+/*
+ * Drop a msg ref. Destroy as needed.
+ */
+void ceph_msg_last_put(struct kref *kref)
+{
+ struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
+
+ dout("ceph_msg_put last one on %p\n", m);
+ WARN_ON(!list_empty(&m->list_head));
+
+ /* drop middle, data, if any */
+ if (m->middle) {
+ ceph_buffer_put(m->middle);
+ m->middle = NULL;
+ }
+ m->nr_pages = 0;
+ m->pages = NULL;
+
+ if (m->pool)
+ ceph_msgpool_put(m->pool, m);
+ else
+ ceph_msg_kfree(m);
+}
diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h
new file mode 100644
index 000000000000..981b7c08ad82
--- /dev/null
+++ b/fs/ceph/messenger.h
@@ -0,0 +1,256 @@
+#ifndef __FS_CEPH_MESSENGER_H
+#define __FS_CEPH_MESSENGER_H
+
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/net.h>
+#include <linux/radix-tree.h>
+#include <linux/uio.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+
+#include "types.h"
+#include "buffer.h"
+
+struct ceph_msg;
+struct ceph_connection;
+
+extern struct workqueue_struct *ceph_msgr_wq; /* receive work queue */
+
+/*
+ * Ceph defines these callbacks for handling connection events.
+ */
+struct ceph_connection_operations {
+ struct ceph_connection *(*get)(struct ceph_connection *);
+ void (*put)(struct ceph_connection *);
+
+ /* handle an incoming message. */
+ void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m);
+
+ /* authorize an outgoing connection */
+ int (*get_authorizer) (struct ceph_connection *con,
+ void **buf, int *len, int *proto,
+ void **reply_buf, int *reply_len, int force_new);
+ int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
+
+ /* protocol version mismatch */
+ void (*bad_proto) (struct ceph_connection *con);
+
+ /* there was some error on the socket (disconnect, whatever) */
+ void (*fault) (struct ceph_connection *con);
+
+ /* a remote host as terminated a message exchange session, and messages
+ * we sent (or they tried to send us) may be lost. */
+ void (*peer_reset) (struct ceph_connection *con);
+
+ struct ceph_msg * (*alloc_msg) (struct ceph_connection *con,
+ struct ceph_msg_header *hdr);
+ int (*alloc_middle) (struct ceph_connection *con,
+ struct ceph_msg *msg);
+ /* an incoming message has a data payload; tell me what pages I
+ * should read the data into. */
+ int (*prepare_pages) (struct ceph_connection *con, struct ceph_msg *m,
+ int want);
+};
+
+extern const char *ceph_name_type_str(int t);
+
+/* use format string %s%d */
+#define ENTITY_NAME(n) ceph_name_type_str((n).type), le64_to_cpu((n).num)
+
+struct ceph_messenger {
+ struct ceph_entity_inst inst; /* my name+address */
+ struct ceph_entity_addr my_enc_addr;
+ struct page *zero_page; /* used in certain error cases */
+
+ bool nocrc;
+
+ /*
+ * the global_seq counts connections i (attempt to) initiate
+ * in order to disambiguate certain connect race conditions.
+ */
+ u32 global_seq;
+ spinlock_t global_seq_lock;
+};
+
+/*
+ * a single message. it contains a header (src, dest, message type, etc.),
+ * footer (crc values, mainly), a "front" message body, and possibly a
+ * data payload (stored in some number of pages).
+ */
+struct ceph_msg {
+ struct ceph_msg_header hdr; /* header */
+ struct ceph_msg_footer footer; /* footer */
+ struct kvec front; /* unaligned blobs of message */
+ struct ceph_buffer *middle;
+ struct page **pages; /* data payload. NOT OWNER. */
+ unsigned nr_pages; /* size of page array */
+ struct list_head list_head;
+ struct kref kref;
+ bool front_is_vmalloc;
+ bool more_to_follow;
+ int front_max;
+
+ struct ceph_msgpool *pool;
+};
+
+struct ceph_msg_pos {
+ int page, page_pos; /* which page; offset in page */
+ int data_pos; /* offset in data payload */
+ int did_page_crc; /* true if we've calculated crc for current page */
+};
+
+/* ceph connection fault delay defaults, for exponential backoff */
+#define BASE_DELAY_INTERVAL (HZ/2)
+#define MAX_DELAY_INTERVAL (5 * 60 * HZ)
+
+/*
+ * ceph_connection state bit flags
+ *
+ * QUEUED and BUSY are used together to ensure that only a single
+ * thread is currently opening, reading or writing data to the socket.
+ */
+#define LOSSYTX 0 /* we can close channel or drop messages on errors */
+#define CONNECTING 1
+#define NEGOTIATING 2
+#define KEEPALIVE_PENDING 3
+#define WRITE_PENDING 4 /* we have data ready to send */
+#define QUEUED 5 /* there is work queued on this connection */
+#define BUSY 6 /* work is being done */
+#define STANDBY 8 /* no outgoing messages, socket closed. we keep
+ * the ceph_connection around to maintain shared
+ * state with the peer. */
+#define CLOSED 10 /* we've closed the connection */
+#define SOCK_CLOSED 11 /* socket state changed to closed */
+#define REGISTERED 12 /* connection appears in con_tree */
+#define OPENING 13 /* open connection w/ (possibly new) peer */
+#define DEAD 14 /* dead, about to kfree */
+
+/*
+ * A single connection with another host.
+ *
+ * We maintain a queue of outgoing messages, and some session state to
+ * ensure that we can preserve the lossless, ordered delivery of
+ * messages in the case of a TCP disconnect.
+ */
+struct ceph_connection {
+ void *private;
+ atomic_t nref;
+
+ const struct ceph_connection_operations *ops;
+
+ struct ceph_messenger *msgr;
+ struct socket *sock;
+ unsigned long state; /* connection state (see flags above) */
+ const char *error_msg; /* error message, if any */
+
+ struct ceph_entity_addr peer_addr; /* peer address */
+ struct ceph_entity_name peer_name; /* peer name */
+ struct ceph_entity_addr peer_addr_for_me;
+ u32 connect_seq; /* identify the most recent connection
+ attempt for this connection, client */
+ u32 peer_global_seq; /* peer's global seq for this connection */
+
+ int auth_retry; /* true if we need a newer authorizer */
+ void *auth_reply_buf; /* where to put the authorizer reply */
+ int auth_reply_buf_len;
+
+ /* out queue */
+ struct mutex out_mutex;
+ struct list_head out_queue;
+ struct list_head out_sent; /* sending or sent but unacked */
+ u64 out_seq; /* last message queued for send */
+ u64 out_seq_sent; /* last message sent */
+ bool out_keepalive_pending;
+
+ u64 in_seq, in_seq_acked; /* last message received, acked */
+
+ /* connection negotiation temps */
+ char in_banner[CEPH_BANNER_MAX_LEN];
+ union {
+ struct { /* outgoing connection */
+ struct ceph_msg_connect out_connect;
+ struct ceph_msg_connect_reply in_reply;
+ };
+ struct { /* incoming */
+ struct ceph_msg_connect in_connect;
+ struct ceph_msg_connect_reply out_reply;
+ };
+ };
+ struct ceph_entity_addr actual_peer_addr;
+
+ /* message out temps */
+ struct ceph_msg *out_msg; /* sending message (== tail of
+ out_sent) */
+ struct ceph_msg_pos out_msg_pos;
+
+ struct kvec out_kvec[8], /* sending header/footer data */
+ *out_kvec_cur;
+ int out_kvec_left; /* kvec's left in out_kvec */
+ int out_skip; /* skip this many bytes */
+ int out_kvec_bytes; /* total bytes left */
+ bool out_kvec_is_msg; /* kvec refers to out_msg */
+ int out_more; /* there is more data after the kvecs */
+ __le64 out_temp_ack; /* for writing an ack */
+
+ /* message in temps */
+ struct ceph_msg_header in_hdr;
+ struct ceph_msg *in_msg;
+ struct ceph_msg_pos in_msg_pos;
+ u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
+
+ char in_tag; /* protocol control byte */
+ int in_base_pos; /* bytes read */
+ __le64 in_temp_ack; /* for reading an ack */
+
+ struct delayed_work work; /* send|recv work */
+ unsigned long delay; /* current delay interval */
+};
+
+
+extern const char *pr_addr(const struct sockaddr_storage *ss);
+extern int ceph_parse_ips(const char *c, const char *end,
+ struct ceph_entity_addr *addr,
+ int max_count, int *count);
+
+
+extern int ceph_msgr_init(void);
+extern void ceph_msgr_exit(void);
+
+extern struct ceph_messenger *ceph_messenger_create(
+ struct ceph_entity_addr *myaddr);
+extern void ceph_messenger_destroy(struct ceph_messenger *);
+
+extern void ceph_con_init(struct ceph_messenger *msgr,
+ struct ceph_connection *con);
+extern void ceph_con_open(struct ceph_connection *con,
+ struct ceph_entity_addr *addr);
+extern void ceph_con_close(struct ceph_connection *con);
+extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg);
+extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg);
+extern void ceph_con_keepalive(struct ceph_connection *con);
+extern struct ceph_connection *ceph_con_get(struct ceph_connection *con);
+extern void ceph_con_put(struct ceph_connection *con);
+
+extern struct ceph_msg *ceph_msg_new(int type, int front_len,
+ int page_len, int page_off,
+ struct page **pages);
+extern void ceph_msg_kfree(struct ceph_msg *m);
+
+extern struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
+ struct ceph_msg_header *hdr);
+extern int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg);
+
+
+static inline struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
+{
+ kref_get(&msg->kref);
+ return msg;
+}
+extern void ceph_msg_last_put(struct kref *kref);
+static inline void ceph_msg_put(struct ceph_msg *msg)
+{
+ kref_put(&msg->kref, ceph_msg_last_put);
+}
+
+#endif
diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c
new file mode 100644
index 000000000000..1dd0dc258c50
--- /dev/null
+++ b/fs/ceph/mon_client.c
@@ -0,0 +1,751 @@
+#include "ceph_debug.h"
+
+#include <linux/types.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+
+#include "mon_client.h"
+#include "super.h"
+#include "auth.h"
+#include "decode.h"
+
+/*
+ * Interact with Ceph monitor cluster. Handle requests for new map
+ * versions, and periodically resend as needed. Also implement
+ * statfs() and umount().
+ *
+ * A small cluster of Ceph "monitors" are responsible for managing critical
+ * cluster configuration and state information. An odd number (e.g., 3, 5)
+ * of cmon daemons use a modified version of the Paxos part-time parliament
+ * algorithm to manage the MDS map (mds cluster membership), OSD map, and
+ * list of clients who have mounted the file system.
+ *
+ * We maintain an open, active session with a monitor at all times in order to
+ * receive timely MDSMap updates. We periodically send a keepalive byte on the
+ * TCP socket to ensure we detect a failure. If the connection does break, we
+ * randomly hunt for a new monitor. Once the connection is reestablished, we
+ * resend any outstanding requests.
+ */
+
+const static struct ceph_connection_operations mon_con_ops;
+
+/*
+ * Decode a monmap blob (e.g., during mount).
+ */
+struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
+{
+ struct ceph_monmap *m = NULL;
+ int i, err = -EINVAL;
+ struct ceph_fsid fsid;
+ u32 epoch, num_mon;
+ u16 version;
+ u32 len;
+
+ ceph_decode_32_safe(&p, end, len, bad);
+ ceph_decode_need(&p, end, len, bad);
+
+ dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
+
+ ceph_decode_16_safe(&p, end, version, bad);
+
+ ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
+ ceph_decode_copy(&p, &fsid, sizeof(fsid));
+ epoch = ceph_decode_32(&p);
+
+ num_mon = ceph_decode_32(&p);
+ ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad);
+
+ if (num_mon >= CEPH_MAX_MON)
+ goto bad;
+ m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS);
+ if (m == NULL)
+ return ERR_PTR(-ENOMEM);
+ m->fsid = fsid;
+ m->epoch = epoch;
+ m->num_mon = num_mon;
+ ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0]));
+ for (i = 0; i < num_mon; i++)
+ ceph_decode_addr(&m->mon_inst[i].addr);
+
+ dout("monmap_decode epoch %d, num_mon %d\n", m->epoch,
+ m->num_mon);
+ for (i = 0; i < m->num_mon; i++)
+ dout("monmap_decode mon%d is %s\n", i,
+ pr_addr(&m->mon_inst[i].addr.in_addr));
+ return m;
+
+bad:
+ dout("monmap_decode failed with %d\n", err);
+ kfree(m);
+ return ERR_PTR(err);
+}
+
+/*
+ * return true if *addr is included in the monmap.
+ */
+int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
+{
+ int i;
+
+ for (i = 0; i < m->num_mon; i++)
+ if (ceph_entity_addr_equal(addr, &m->mon_inst[i].addr))
+ return 1;
+ return 0;
+}
+
+/*
+ * Close monitor session, if any.
+ */
+static void __close_session(struct ceph_mon_client *monc)
+{
+ if (monc->con) {
+ dout("__close_session closing mon%d\n", monc->cur_mon);
+ ceph_con_revoke(monc->con, monc->m_auth);
+ ceph_con_close(monc->con);
+ monc->cur_mon = -1;
+ ceph_auth_reset(monc->auth);
+ }
+}
+
+/*
+ * Open a session with a (new) monitor.
+ */
+static int __open_session(struct ceph_mon_client *monc)
+{
+ char r;
+ int ret;
+
+ if (monc->cur_mon < 0) {
+ get_random_bytes(&r, 1);
+ monc->cur_mon = r % monc->monmap->num_mon;
+ dout("open_session num=%d r=%d -> mon%d\n",
+ monc->monmap->num_mon, r, monc->cur_mon);
+ monc->sub_sent = 0;
+ monc->sub_renew_after = jiffies; /* i.e., expired */
+ monc->want_next_osdmap = !!monc->want_next_osdmap;
+
+ dout("open_session mon%d opening\n", monc->cur_mon);
+ monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON;
+ monc->con->peer_name.num = cpu_to_le64(monc->cur_mon);
+ ceph_con_open(monc->con,
+ &monc->monmap->mon_inst[monc->cur_mon].addr);
+
+ /* initiatiate authentication handshake */
+ ret = ceph_auth_build_hello(monc->auth,
+ monc->m_auth->front.iov_base,
+ monc->m_auth->front_max);
+ monc->m_auth->front.iov_len = ret;
+ monc->m_auth->hdr.front_len = cpu_to_le32(ret);
+ ceph_msg_get(monc->m_auth); /* keep our ref */
+ ceph_con_send(monc->con, monc->m_auth);
+ } else {
+ dout("open_session mon%d already open\n", monc->cur_mon);
+ }
+ return 0;
+}
+
+static bool __sub_expired(struct ceph_mon_client *monc)
+{
+ return time_after_eq(jiffies, monc->sub_renew_after);
+}
+
+/*
+ * Reschedule delayed work timer.
+ */
+static void __schedule_delayed(struct ceph_mon_client *monc)
+{
+ unsigned delay;
+
+ if (monc->cur_mon < 0 || __sub_expired(monc))
+ delay = 10 * HZ;
+ else
+ delay = 20 * HZ;
+ dout("__schedule_delayed after %u\n", delay);
+ schedule_delayed_work(&monc->delayed_work, delay);
+}
+
+/*
+ * Send subscribe request for mdsmap and/or osdmap.
+ */
+static void __send_subscribe(struct ceph_mon_client *monc)
+{
+ dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
+ (unsigned)monc->sub_sent, __sub_expired(monc),
+ monc->want_next_osdmap);
+ if ((__sub_expired(monc) && !monc->sub_sent) ||
+ monc->want_next_osdmap == 1) {
+ struct ceph_msg *msg;
+ struct ceph_mon_subscribe_item *i;
+ void *p, *end;
+
+ msg = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, 0, 0, NULL);
+ if (!msg)
+ return;
+
+ p = msg->front.iov_base;
+ end = p + msg->front.iov_len;
+
+ dout("__send_subscribe to 'mdsmap' %u+\n",
+ (unsigned)monc->have_mdsmap);
+ if (monc->want_next_osdmap) {
+ dout("__send_subscribe to 'osdmap' %u\n",
+ (unsigned)monc->have_osdmap);
+ ceph_encode_32(&p, 3);
+ ceph_encode_string(&p, end, "osdmap", 6);
+ i = p;
+ i->have = cpu_to_le64(monc->have_osdmap);
+ i->onetime = 1;
+ p += sizeof(*i);
+ monc->want_next_osdmap = 2; /* requested */
+ } else {
+ ceph_encode_32(&p, 2);
+ }
+ ceph_encode_string(&p, end, "mdsmap", 6);
+ i = p;
+ i->have = cpu_to_le64(monc->have_mdsmap);
+ i->onetime = 0;
+ p += sizeof(*i);
+ ceph_encode_string(&p, end, "monmap", 6);
+ i = p;
+ i->have = 0;
+ i->onetime = 0;
+ p += sizeof(*i);
+
+ msg->front.iov_len = p - msg->front.iov_base;
+ msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
+ ceph_con_send(monc->con, msg);
+
+ monc->sub_sent = jiffies | 1; /* never 0 */
+ }
+}
+
+static void handle_subscribe_ack(struct ceph_mon_client *monc,
+ struct ceph_msg *msg)
+{
+ unsigned seconds;
+ struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
+
+ if (msg->front.iov_len < sizeof(*h))
+ goto bad;
+ seconds = le32_to_cpu(h->duration);
+
+ mutex_lock(&monc->mutex);
+ if (monc->hunting) {
+ pr_info("mon%d %s session established\n",
+ monc->cur_mon, pr_addr(&monc->con->peer_addr.in_addr));
+ monc->hunting = false;
+ }
+ dout("handle_subscribe_ack after %d seconds\n", seconds);
+ monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1;
+ monc->sub_sent = 0;
+ mutex_unlock(&monc->mutex);
+ return;
+bad:
+ pr_err("got corrupt subscribe-ack msg\n");
+}
+
+/*
+ * Keep track of which maps we have
+ */
+int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got)
+{
+ mutex_lock(&monc->mutex);
+ monc->have_mdsmap = got;
+ mutex_unlock(&monc->mutex);
+ return 0;
+}
+
+int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got)
+{
+ mutex_lock(&monc->mutex);
+ monc->have_osdmap = got;
+ monc->want_next_osdmap = 0;
+ mutex_unlock(&monc->mutex);
+ return 0;
+}
+
+/*
+ * Register interest in the next osdmap
+ */
+void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
+{
+ dout("request_next_osdmap have %u\n", monc->have_osdmap);
+ mutex_lock(&monc->mutex);
+ if (!monc->want_next_osdmap)
+ monc->want_next_osdmap = 1;
+ if (monc->want_next_osdmap < 2)
+ __send_subscribe(monc);
+ mutex_unlock(&monc->mutex);
+}
+
+/*
+ *
+ */
+int ceph_monc_open_session(struct ceph_mon_client *monc)
+{
+ if (!monc->con) {
+ monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
+ if (!monc->con)
+ return -ENOMEM;
+ ceph_con_init(monc->client->msgr, monc->con);
+ monc->con->private = monc;
+ monc->con->ops = &mon_con_ops;
+ }
+
+ mutex_lock(&monc->mutex);
+ __open_session(monc);
+ __schedule_delayed(monc);
+ mutex_unlock(&monc->mutex);
+ return 0;
+}
+
+/*
+ * The monitor responds with mount ack indicate mount success. The
+ * included client ticket allows the client to talk to MDSs and OSDs.
+ */
+static void ceph_monc_handle_map(struct ceph_mon_client *monc,
+ struct ceph_msg *msg)
+{
+ struct ceph_client *client = monc->client;
+ struct ceph_monmap *monmap = NULL, *old = monc->monmap;
+ void *p, *end;
+
+ mutex_lock(&monc->mutex);
+
+ dout("handle_monmap\n");
+ p = msg->front.iov_base;
+ end = p + msg->front.iov_len;
+
+ monmap = ceph_monmap_decode(p, end);
+ if (IS_ERR(monmap)) {
+ pr_err("problem decoding monmap, %d\n",
+ (int)PTR_ERR(monmap));
+ return;
+ }
+
+ if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) {
+ kfree(monmap);
+ return;
+ }
+
+ client->monc.monmap = monmap;
+ kfree(old);
+
+ mutex_unlock(&monc->mutex);
+ wake_up(&client->mount_wq);
+}
+
+/*
+ * statfs
+ */
+static void handle_statfs_reply(struct ceph_mon_client *monc,
+ struct ceph_msg *msg)
+{
+ struct ceph_mon_statfs_request *req;
+ struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
+ u64 tid;
+
+ if (msg->front.iov_len != sizeof(*reply))
+ goto bad;
+ tid = le64_to_cpu(reply->tid);
+ dout("handle_statfs_reply %p tid %llu\n", msg, tid);
+
+ mutex_lock(&monc->mutex);
+ req = radix_tree_lookup(&monc->statfs_request_tree, tid);
+ if (req) {
+ *req->buf = reply->st;
+ req->result = 0;
+ }
+ mutex_unlock(&monc->mutex);
+ if (req)
+ complete(&req->completion);
+ return;
+
+bad:
+ pr_err("corrupt statfs reply, no tid\n");
+}
+
+/*
+ * (re)send a statfs request
+ */
+static int send_statfs(struct ceph_mon_client *monc,
+ struct ceph_mon_statfs_request *req)
+{
+ struct ceph_msg *msg;
+ struct ceph_mon_statfs *h;
+
+ dout("send_statfs tid %llu\n", req->tid);
+ msg = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+ req->request = msg;
+ h = msg->front.iov_base;
+ h->monhdr.have_version = 0;
+ h->monhdr.session_mon = cpu_to_le16(-1);
+ h->monhdr.session_mon_tid = 0;
+ h->fsid = monc->monmap->fsid;
+ h->tid = cpu_to_le64(req->tid);
+ ceph_con_send(monc->con, msg);
+ return 0;
+}
+
+/*
+ * Do a synchronous statfs().
+ */
+int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
+{
+ struct ceph_mon_statfs_request req;
+ int err;
+
+ req.buf = buf;
+ init_completion(&req.completion);
+
+ /* allocate memory for reply */
+ err = ceph_msgpool_resv(&monc->msgpool_statfs_reply, 1);
+ if (err)
+ return err;
+
+ /* register request */
+ mutex_lock(&monc->mutex);
+ req.tid = ++monc->last_tid;
+ req.last_attempt = jiffies;
+ req.delay = BASE_DELAY_INTERVAL;
+ if (radix_tree_insert(&monc->statfs_request_tree, req.tid, &req) < 0) {
+ mutex_unlock(&monc->mutex);
+ pr_err("ENOMEM in do_statfs\n");
+ return -ENOMEM;
+ }
+ monc->num_statfs_requests++;
+ mutex_unlock(&monc->mutex);
+
+ /* send request and wait */
+ err = send_statfs(monc, &req);
+ if (!err)
+ err = wait_for_completion_interruptible(&req.completion);
+
+ mutex_lock(&monc->mutex);
+ radix_tree_delete(&monc->statfs_request_tree, req.tid);
+ monc->num_statfs_requests--;
+ ceph_msgpool_resv(&monc->msgpool_statfs_reply, -1);
+ mutex_unlock(&monc->mutex);
+
+ if (!err)
+ err = req.result;
+ return err;
+}
+
+/*
+ * Resend pending statfs requests.
+ */
+static void __resend_statfs(struct ceph_mon_client *monc)
+{
+ u64 next_tid = 0;
+ int got;
+ int did = 0;
+ struct ceph_mon_statfs_request *req;
+
+ while (1) {
+ got = radix_tree_gang_lookup(&monc->statfs_request_tree,
+ (void **)&req,
+ next_tid, 1);
+ if (got == 0)
+ break;
+ did++;
+ next_tid = req->tid + 1;
+
+ send_statfs(monc, req);
+ }
+}
+
+/*
+ * Delayed work. If we haven't mounted yet, retry. Otherwise,
+ * renew/retry subscription as needed (in case it is timing out, or we
+ * got an ENOMEM). And keep the monitor connection alive.
+ */
+static void delayed_work(struct work_struct *work)
+{
+ struct ceph_mon_client *monc =
+ container_of(work, struct ceph_mon_client, delayed_work.work);
+
+ dout("monc delayed_work\n");
+ mutex_lock(&monc->mutex);
+ if (monc->hunting) {
+ __close_session(monc);
+ __open_session(monc); /* continue hunting */
+ } else {
+ ceph_con_keepalive(monc->con);
+ if (monc->auth->ops->is_authenticated(monc->auth))
+ __send_subscribe(monc);
+ }
+ __schedule_delayed(monc);
+ mutex_unlock(&monc->mutex);
+}
+
+/*
+ * On startup, we build a temporary monmap populated with the IPs
+ * provided by mount(2).
+ */
+static int build_initial_monmap(struct ceph_mon_client *monc)
+{
+ struct ceph_mount_args *args = monc->client->mount_args;
+ struct ceph_entity_addr *mon_addr = args->mon_addr;
+ int num_mon = args->num_mon;
+ int i;
+
+ /* build initial monmap */
+ monc->monmap = kzalloc(sizeof(*monc->monmap) +
+ num_mon*sizeof(monc->monmap->mon_inst[0]),
+ GFP_KERNEL);
+ if (!monc->monmap)
+ return -ENOMEM;
+ for (i = 0; i < num_mon; i++) {
+ monc->monmap->mon_inst[i].addr = mon_addr[i];
+ monc->monmap->mon_inst[i].addr.erank = 0;
+ monc->monmap->mon_inst[i].addr.nonce = 0;
+ monc->monmap->mon_inst[i].name.type =
+ CEPH_ENTITY_TYPE_MON;
+ monc->monmap->mon_inst[i].name.num = cpu_to_le64(i);
+ }
+ monc->monmap->num_mon = num_mon;
+ monc->have_fsid = false;
+
+ /* release addr memory */
+ kfree(args->mon_addr);
+ args->mon_addr = NULL;
+ args->num_mon = 0;
+ return 0;
+}
+
+int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
+{
+ int err = 0;
+
+ dout("init\n");
+ memset(monc, 0, sizeof(*monc));
+ monc->client = cl;
+ monc->monmap = NULL;
+ mutex_init(&monc->mutex);
+
+ err = build_initial_monmap(monc);
+ if (err)
+ goto out;
+
+ monc->con = NULL;
+
+ /* authentication */
+ monc->auth = ceph_auth_init(cl->mount_args->name,
+ cl->mount_args->secret);
+ if (IS_ERR(monc->auth))
+ return PTR_ERR(monc->auth);
+ monc->auth->want_keys =
+ CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
+ CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
+
+ /* msg pools */
+ err = ceph_msgpool_init(&monc->msgpool_subscribe_ack,
+ sizeof(struct ceph_mon_subscribe_ack), 1, false);
+ if (err < 0)
+ goto out_monmap;
+ err = ceph_msgpool_init(&monc->msgpool_statfs_reply,
+ sizeof(struct ceph_mon_statfs_reply), 0, false);
+ if (err < 0)
+ goto out_pool1;
+ err = ceph_msgpool_init(&monc->msgpool_auth_reply, 4096, 1, false);
+ if (err < 0)
+ goto out_pool2;
+
+ monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, 0, 0, NULL);
+ if (IS_ERR(monc->m_auth)) {
+ err = PTR_ERR(monc->m_auth);
+ monc->m_auth = NULL;
+ goto out_pool3;
+ }
+
+ monc->cur_mon = -1;
+ monc->hunting = true;
+ monc->sub_renew_after = jiffies;
+ monc->sub_sent = 0;
+
+ INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
+ INIT_RADIX_TREE(&monc->statfs_request_tree, GFP_NOFS);
+ monc->num_statfs_requests = 0;
+ monc->last_tid = 0;
+
+ monc->have_mdsmap = 0;
+ monc->have_osdmap = 0;
+ monc->want_next_osdmap = 1;
+ return 0;
+
+out_pool3:
+ ceph_msgpool_destroy(&monc->msgpool_auth_reply);
+out_pool2:
+ ceph_msgpool_destroy(&monc->msgpool_subscribe_ack);
+out_pool1:
+ ceph_msgpool_destroy(&monc->msgpool_statfs_reply);
+out_monmap:
+ kfree(monc->monmap);
+out:
+ return err;
+}
+
+void ceph_monc_stop(struct ceph_mon_client *monc)
+{
+ dout("stop\n");
+ cancel_delayed_work_sync(&monc->delayed_work);
+
+ mutex_lock(&monc->mutex);
+ __close_session(monc);
+ if (monc->con) {
+ monc->con->private = NULL;
+ monc->con->ops->put(monc->con);
+ monc->con = NULL;
+ }
+ mutex_unlock(&monc->mutex);
+
+ ceph_auth_destroy(monc->auth);
+
+ ceph_msg_put(monc->m_auth);
+ ceph_msgpool_destroy(&monc->msgpool_subscribe_ack);
+ ceph_msgpool_destroy(&monc->msgpool_statfs_reply);
+ ceph_msgpool_destroy(&monc->msgpool_auth_reply);
+
+ kfree(monc->monmap);
+}
+
+
+static void handle_auth_reply(struct ceph_mon_client *monc,
+ struct ceph_msg *msg)
+{
+ int ret;
+
+ mutex_lock(&monc->mutex);
+ ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
+ msg->front.iov_len,
+ monc->m_auth->front.iov_base,
+ monc->m_auth->front_max);
+ if (ret < 0) {
+ monc->client->mount_err = ret;
+ wake_up(&monc->client->mount_wq);
+ } else if (ret > 0) {
+ monc->m_auth->front.iov_len = ret;
+ monc->m_auth->hdr.front_len = cpu_to_le32(ret);
+ ceph_msg_get(monc->m_auth); /* keep our ref */
+ ceph_con_send(monc->con, monc->m_auth);
+ } else if (monc->auth->ops->is_authenticated(monc->auth)) {
+ dout("authenticated, starting session\n");
+
+ monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
+ monc->client->msgr->inst.name.num = monc->auth->global_id;
+
+ __send_subscribe(monc);
+ __resend_statfs(monc);
+ }
+ mutex_unlock(&monc->mutex);
+}
+
+/*
+ * handle incoming message
+ */
+static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_mon_client *monc = con->private;
+ int type = le16_to_cpu(msg->hdr.type);
+
+ if (!monc)
+ return;
+
+ switch (type) {
+ case CEPH_MSG_AUTH_REPLY:
+ handle_auth_reply(monc, msg);
+ break;
+
+ case CEPH_MSG_MON_SUBSCRIBE_ACK:
+ handle_subscribe_ack(monc, msg);
+ break;
+
+ case CEPH_MSG_STATFS_REPLY:
+ handle_statfs_reply(monc, msg);
+ break;
+
+ case CEPH_MSG_MON_MAP:
+ ceph_monc_handle_map(monc, msg);
+ break;
+
+ case CEPH_MSG_MDS_MAP:
+ ceph_mdsc_handle_map(&monc->client->mdsc, msg);
+ break;
+
+ case CEPH_MSG_OSD_MAP:
+ ceph_osdc_handle_map(&monc->client->osdc, msg);
+ break;
+
+ default:
+ pr_err("received unknown message type %d %s\n", type,
+ ceph_msg_type_name(type));
+ }
+ ceph_msg_put(msg);
+}
+
+/*
+ * Allocate memory for incoming message
+ */
+static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
+ struct ceph_msg_header *hdr)
+{
+ struct ceph_mon_client *monc = con->private;
+ int type = le16_to_cpu(hdr->type);
+ int front = le32_to_cpu(hdr->front_len);
+
+ switch (type) {
+ case CEPH_MSG_MON_SUBSCRIBE_ACK:
+ return ceph_msgpool_get(&monc->msgpool_subscribe_ack, front);
+ case CEPH_MSG_STATFS_REPLY:
+ return ceph_msgpool_get(&monc->msgpool_statfs_reply, front);
+ case CEPH_MSG_AUTH_REPLY:
+ return ceph_msgpool_get(&monc->msgpool_auth_reply, front);
+ }
+ return ceph_alloc_msg(con, hdr);
+}
+
+/*
+ * If the monitor connection resets, pick a new monitor and resubmit
+ * any pending requests.
+ */
+static void mon_fault(struct ceph_connection *con)
+{
+ struct ceph_mon_client *monc = con->private;
+
+ if (!monc)
+ return;
+
+ dout("mon_fault\n");
+ mutex_lock(&monc->mutex);
+ if (!con->private)
+ goto out;
+
+ if (monc->con && !monc->hunting)
+ pr_info("mon%d %s session lost, "
+ "hunting for new mon\n", monc->cur_mon,
+ pr_addr(&monc->con->peer_addr.in_addr));
+
+ __close_session(monc);
+ if (!monc->hunting) {
+ /* start hunting */
+ monc->hunting = true;
+ __open_session(monc);
+ } else {
+ /* already hunting, let's wait a bit */
+ __schedule_delayed(monc);
+ }
+out:
+ mutex_unlock(&monc->mutex);
+}
+
+const static struct ceph_connection_operations mon_con_ops = {
+ .get = ceph_con_get,
+ .put = ceph_con_put,
+ .dispatch = dispatch,
+ .fault = mon_fault,
+ .alloc_msg = mon_alloc_msg,
+ .alloc_middle = ceph_alloc_middle,
+};
diff --git a/fs/ceph/mon_client.h b/fs/ceph/mon_client.h
new file mode 100644
index 000000000000..c75b53302ecc
--- /dev/null
+++ b/fs/ceph/mon_client.h
@@ -0,0 +1,115 @@
+#ifndef _FS_CEPH_MON_CLIENT_H
+#define _FS_CEPH_MON_CLIENT_H
+
+#include <linux/completion.h>
+#include <linux/radix-tree.h>
+
+#include "messenger.h"
+#include "msgpool.h"
+
+struct ceph_client;
+struct ceph_mount_args;
+struct ceph_auth_client;
+
+/*
+ * The monitor map enumerates the set of all monitors.
+ */
+struct ceph_monmap {
+ struct ceph_fsid fsid;
+ u32 epoch;
+ u32 num_mon;
+ struct ceph_entity_inst mon_inst[0];
+};
+
+struct ceph_mon_client;
+struct ceph_mon_statfs_request;
+
+
+/*
+ * Generic mechanism for resending monitor requests.
+ */
+typedef void (*ceph_monc_request_func_t)(struct ceph_mon_client *monc,
+ int newmon);
+
+/* a pending monitor request */
+struct ceph_mon_request {
+ struct ceph_mon_client *monc;
+ struct delayed_work delayed_work;
+ unsigned long delay;
+ ceph_monc_request_func_t do_request;
+};
+
+/*
+ * statfs() is done a bit differently because we need to get data back
+ * to the caller
+ */
+struct ceph_mon_statfs_request {
+ u64 tid;
+ int result;
+ struct ceph_statfs *buf;
+ struct completion completion;
+ unsigned long last_attempt, delay; /* jiffies */
+ struct ceph_msg *request; /* original request */
+};
+
+struct ceph_mon_client {
+ struct ceph_client *client;
+ struct ceph_monmap *monmap;
+
+ struct mutex mutex;
+ struct delayed_work delayed_work;
+
+ struct ceph_auth_client *auth;
+ struct ceph_msg *m_auth;
+
+ bool hunting;
+ int cur_mon; /* last monitor i contacted */
+ unsigned long sub_sent, sub_renew_after;
+ struct ceph_connection *con;
+ bool have_fsid;
+
+ /* msg pools */
+ struct ceph_msgpool msgpool_subscribe_ack;
+ struct ceph_msgpool msgpool_statfs_reply;
+ struct ceph_msgpool msgpool_auth_reply;
+
+ /* pending statfs requests */
+ struct radix_tree_root statfs_request_tree;
+ int num_statfs_requests;
+ u64 last_tid;
+
+ /* mds/osd map */
+ int want_next_osdmap; /* 1 = want, 2 = want+asked */
+ u32 have_osdmap, have_mdsmap;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_file;
+#endif
+};
+
+extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
+extern int ceph_monmap_contains(struct ceph_monmap *m,
+ struct ceph_entity_addr *addr);
+
+extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
+extern void ceph_monc_stop(struct ceph_mon_client *monc);
+
+/*
+ * The model here is to indicate that we need a new map of at least
+ * epoch @want, and also call in when we receive a map. We will
+ * periodically rerequest the map from the monitor cluster until we
+ * get what we want.
+ */
+extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have);
+extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have);
+
+extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc);
+
+extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
+ struct ceph_statfs *buf);
+
+extern int ceph_monc_open_session(struct ceph_mon_client *monc);
+
+
+
+#endif
diff --git a/fs/ceph/msgpool.c b/fs/ceph/msgpool.c
new file mode 100644
index 000000000000..ad5482c0267b
--- /dev/null
+++ b/fs/ceph/msgpool.c
@@ -0,0 +1,181 @@
+#include "ceph_debug.h"
+
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+
+#include "msgpool.h"
+
+/*
+ * We use msg pools to preallocate memory for messages we expect to
+ * receive over the wire, to avoid getting ourselves into OOM
+ * conditions at unexpected times. We take use a few different
+ * strategies:
+ *
+ * - for request/response type interactions, we preallocate the
+ * memory needed for the response when we generate the request.
+ *
+ * - for messages we can receive at any time from the MDS, we preallocate
+ * a pool of messages we can re-use.
+ *
+ * - for writeback, we preallocate some number of messages to use for
+ * requests and their replies, so that we always make forward
+ * progress.
+ *
+ * The msgpool behaves like a mempool_t, but keeps preallocated
+ * ceph_msgs strung together on a list_head instead of using a pointer
+ * vector. This avoids vector reallocation when we adjust the number
+ * of preallocated items (which happens frequently).
+ */
+
+
+/*
+ * Allocate or release as necessary to meet our target pool size.
+ */
+static int __fill_msgpool(struct ceph_msgpool *pool)
+{
+ struct ceph_msg *msg;
+
+ while (pool->num < pool->min) {
+ dout("fill_msgpool %p %d/%d allocating\n", pool, pool->num,
+ pool->min);
+ spin_unlock(&pool->lock);
+ msg = ceph_msg_new(0, pool->front_len, 0, 0, NULL);
+ spin_lock(&pool->lock);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+ msg->pool = pool;
+ list_add(&msg->list_head, &pool->msgs);
+ pool->num++;
+ }
+ while (pool->num > pool->min) {
+ msg = list_first_entry(&pool->msgs, struct ceph_msg, list_head);
+ dout("fill_msgpool %p %d/%d releasing %p\n", pool, pool->num,
+ pool->min, msg);
+ list_del_init(&msg->list_head);
+ pool->num--;
+ ceph_msg_kfree(msg);
+ }
+ return 0;
+}
+
+int ceph_msgpool_init(struct ceph_msgpool *pool,
+ int front_len, int min, bool blocking)
+{
+ int ret;
+
+ dout("msgpool_init %p front_len %d min %d\n", pool, front_len, min);
+ spin_lock_init(&pool->lock);
+ pool->front_len = front_len;
+ INIT_LIST_HEAD(&pool->msgs);
+ pool->num = 0;
+ pool->min = min;
+ pool->blocking = blocking;
+ init_waitqueue_head(&pool->wait);
+
+ spin_lock(&pool->lock);
+ ret = __fill_msgpool(pool);
+ spin_unlock(&pool->lock);
+ return ret;
+}
+
+void ceph_msgpool_destroy(struct ceph_msgpool *pool)
+{
+ dout("msgpool_destroy %p\n", pool);
+ spin_lock(&pool->lock);
+ pool->min = 0;
+ __fill_msgpool(pool);
+ spin_unlock(&pool->lock);
+}
+
+int ceph_msgpool_resv(struct ceph_msgpool *pool, int delta)
+{
+ int ret;
+
+ spin_lock(&pool->lock);
+ dout("msgpool_resv %p delta %d\n", pool, delta);
+ pool->min += delta;
+ ret = __fill_msgpool(pool);
+ spin_unlock(&pool->lock);
+ return ret;
+}
+
+struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool, int front_len)
+{
+ wait_queue_t wait;
+ struct ceph_msg *msg;
+
+ if (front_len && front_len > pool->front_len) {
+ pr_err("msgpool_get pool %p need front %d, pool size is %d\n",
+ pool, front_len, pool->front_len);
+ WARN_ON(1);
+
+ /* try to alloc a fresh message */
+ msg = ceph_msg_new(0, front_len, 0, 0, NULL);
+ if (!IS_ERR(msg))
+ return msg;
+ }
+
+ if (!front_len)
+ front_len = pool->front_len;
+
+ if (pool->blocking) {
+ /* mempool_t behavior; first try to alloc */
+ msg = ceph_msg_new(0, front_len, 0, 0, NULL);
+ if (!IS_ERR(msg))
+ return msg;
+ }
+
+ while (1) {
+ spin_lock(&pool->lock);
+ if (likely(pool->num)) {
+ msg = list_entry(pool->msgs.next, struct ceph_msg,
+ list_head);
+ list_del_init(&msg->list_head);
+ pool->num--;
+ dout("msgpool_get %p got %p, now %d/%d\n", pool, msg,
+ pool->num, pool->min);
+ spin_unlock(&pool->lock);
+ return msg;
+ }
+ pr_err("msgpool_get %p now %d/%d, %s\n", pool, pool->num,
+ pool->min, pool->blocking ? "waiting" : "failing");
+ spin_unlock(&pool->lock);
+
+ if (!pool->blocking) {
+ WARN_ON(1);
+
+ /* maybe we can allocate it now? */
+ msg = ceph_msg_new(0, front_len, 0, 0, NULL);
+ if (!IS_ERR(msg))
+ return msg;
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+ init_wait(&wait);
+ prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
+ schedule();
+ finish_wait(&pool->wait, &wait);
+ }
+}
+
+void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
+{
+ spin_lock(&pool->lock);
+ if (pool->num < pool->min) {
+ kref_set(&msg->kref, 1); /* retake a single ref */
+ list_add(&msg->list_head, &pool->msgs);
+ pool->num++;
+ dout("msgpool_put %p reclaim %p, now %d/%d\n", pool, msg,
+ pool->num, pool->min);
+ spin_unlock(&pool->lock);
+ wake_up(&pool->wait);
+ } else {
+ dout("msgpool_put %p drop %p, at %d/%d\n", pool, msg,
+ pool->num, pool->min);
+ spin_unlock(&pool->lock);
+ ceph_msg_kfree(msg);
+ }
+}
diff --git a/fs/ceph/msgpool.h b/fs/ceph/msgpool.h
new file mode 100644
index 000000000000..bc834bfcd720
--- /dev/null
+++ b/fs/ceph/msgpool.h
@@ -0,0 +1,27 @@
+#ifndef _FS_CEPH_MSGPOOL
+#define _FS_CEPH_MSGPOOL
+
+#include "messenger.h"
+
+/*
+ * we use memory pools for preallocating messages we may receive, to
+ * avoid unexpected OOM conditions.
+ */
+struct ceph_msgpool {
+ spinlock_t lock;
+ int front_len; /* preallocated payload size */
+ struct list_head msgs; /* msgs in the pool; each has 1 ref */
+ int num, min; /* cur, min # msgs in the pool */
+ bool blocking;
+ wait_queue_head_t wait;
+};
+
+extern int ceph_msgpool_init(struct ceph_msgpool *pool,
+ int front_len, int size, bool blocking);
+extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
+extern int ceph_msgpool_resv(struct ceph_msgpool *, int delta);
+extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
+ int front_len);
+extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
+
+#endif
diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h
new file mode 100644
index 000000000000..c758e8f8f71b
--- /dev/null
+++ b/fs/ceph/msgr.h
@@ -0,0 +1,167 @@
+#ifndef __MSGR_H
+#define __MSGR_H
+
+/*
+ * Data types for message passing layer used by Ceph.
+ */
+
+#define CEPH_MON_PORT 6789 /* default monitor port */
+
+/*
+ * client-side processes will try to bind to ports in this
+ * range, simply for the benefit of tools like nmap or wireshark
+ * that would like to identify the protocol.
+ */
+#define CEPH_PORT_FIRST 6789
+#define CEPH_PORT_START 6800 /* non-monitors start here */
+#define CEPH_PORT_LAST 6900
+
+/*
+ * tcp connection banner. include a protocol version. and adjust
+ * whenever the wire protocol changes. try to keep this string length
+ * constant.
+ */
+#define CEPH_BANNER "ceph v024"
+#define CEPH_BANNER_MAX_LEN 30
+
+
+/*
+ * Rollover-safe type and comparator for 32-bit sequence numbers.
+ * Comparator returns -1, 0, or 1.
+ */
+typedef __u32 ceph_seq_t;
+
+static inline __s32 ceph_seq_cmp(__u32 a, __u32 b)
+{
+ return (__s32)a - (__s32)b;
+}
+
+
+/*
+ * entity_name -- logical name for a process participating in the
+ * network, e.g. 'mds0' or 'osd3'.
+ */
+struct ceph_entity_name {
+ __u8 type; /* CEPH_ENTITY_TYPE_* */
+ __le64 num;
+} __attribute__ ((packed));
+
+#define CEPH_ENTITY_TYPE_MON 0x01
+#define CEPH_ENTITY_TYPE_MDS 0x02
+#define CEPH_ENTITY_TYPE_OSD 0x04
+#define CEPH_ENTITY_TYPE_CLIENT 0x08
+#define CEPH_ENTITY_TYPE_ADMIN 0x10
+#define CEPH_ENTITY_TYPE_AUTH 0x20
+
+#define CEPH_ENTITY_TYPE_ANY 0xFF
+
+extern const char *ceph_entity_type_name(int type);
+
+/*
+ * entity_addr -- network address
+ */
+struct ceph_entity_addr {
+ __le32 erank; /* entity's rank in process */
+ __le32 nonce; /* unique id for process (e.g. pid) */
+ struct sockaddr_storage in_addr;
+} __attribute__ ((packed));
+
+static inline bool ceph_entity_addr_is_local(const struct ceph_entity_addr *a,
+ const struct ceph_entity_addr *b)
+{
+ return a->nonce == b->nonce &&
+ memcmp(&a->in_addr, &b->in_addr, sizeof(a->in_addr)) == 0;
+}
+
+static inline bool ceph_entity_addr_equal(const struct ceph_entity_addr *a,
+ const struct ceph_entity_addr *b)
+{
+ return memcmp(a, b, sizeof(*a)) == 0;
+}
+
+struct ceph_entity_inst {
+ struct ceph_entity_name name;
+ struct ceph_entity_addr addr;
+} __attribute__ ((packed));
+
+
+/* used by message exchange protocol */
+#define CEPH_MSGR_TAG_READY 1 /* server->client: ready for messages */
+#define CEPH_MSGR_TAG_RESETSESSION 2 /* server->client: reset, try again */
+#define CEPH_MSGR_TAG_WAIT 3 /* server->client: wait for racing
+ incoming connection */
+#define CEPH_MSGR_TAG_RETRY_SESSION 4 /* server->client + cseq: try again
+ with higher cseq */
+#define CEPH_MSGR_TAG_RETRY_GLOBAL 5 /* server->client + gseq: try again
+ with higher gseq */
+#define CEPH_MSGR_TAG_CLOSE 6 /* closing pipe */
+#define CEPH_MSGR_TAG_MSG 7 /* message */
+#define CEPH_MSGR_TAG_ACK 8 /* message ack */
+#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */
+#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
+#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */
+
+
+/*
+ * connection negotiation
+ */
+struct ceph_msg_connect {
+ __le32 host_type; /* CEPH_ENTITY_TYPE_* */
+ __le32 global_seq; /* count connections initiated by this host */
+ __le32 connect_seq; /* count connections initiated in this session */
+ __le32 protocol_version;
+ __le32 authorizer_protocol;
+ __le32 authorizer_len;
+ __u8 flags; /* CEPH_MSG_CONNECT_* */
+} __attribute__ ((packed));
+
+struct ceph_msg_connect_reply {
+ __u8 tag;
+ __le32 global_seq;
+ __le32 connect_seq;
+ __le32 protocol_version;
+ __le32 authorizer_len;
+ __u8 flags;
+} __attribute__ ((packed));
+
+#define CEPH_MSG_CONNECT_LOSSY 1 /* messages i send may be safely dropped */
+
+
+/*
+ * message header
+ */
+struct ceph_msg_header {
+ __le64 seq; /* message seq# for this session */
+ __le16 type; /* message type */
+ __le16 priority; /* priority. higher value == higher priority */
+ __le16 version; /* version of message encoding */
+
+ __le32 front_len; /* bytes in main payload */
+ __le32 middle_len;/* bytes in middle payload */
+ __le32 data_len; /* bytes of data payload */
+ __le16 data_off; /* sender: include full offset;
+ receiver: mask against ~PAGE_MASK */
+
+ struct ceph_entity_inst src, orig_src;
+ __le32 dst_erank;
+ __le32 crc; /* header crc32c */
+} __attribute__ ((packed));
+
+#define CEPH_MSG_PRIO_LOW 64
+#define CEPH_MSG_PRIO_DEFAULT 127
+#define CEPH_MSG_PRIO_HIGH 196
+#define CEPH_MSG_PRIO_HIGHEST 255
+
+/*
+ * follows data payload
+ */
+struct ceph_msg_footer {
+ __le32 front_crc, middle_crc, data_crc;
+ __u8 flags;
+} __attribute__ ((packed));
+
+#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */
+#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */
+
+
+#endif
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
new file mode 100644
index 000000000000..67ef8ab06af4
--- /dev/null
+++ b/fs/ceph/osd_client.c
@@ -0,0 +1,1363 @@
+#include "ceph_debug.h"
+
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "super.h"
+#include "osd_client.h"
+#include "messenger.h"
+#include "decode.h"
+#include "auth.h"
+
+const static struct ceph_connection_operations osd_con_ops;
+
+static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
+
+/*
+ * Implement client access to distributed object storage cluster.
+ *
+ * All data objects are stored within a cluster/cloud of OSDs, or
+ * "object storage devices." (Note that Ceph OSDs have _nothing_ to
+ * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
+ * remote daemons serving up and coordinating consistent and safe
+ * access to storage.
+ *
+ * Cluster membership and the mapping of data objects onto storage devices
+ * are described by the osd map.
+ *
+ * We keep track of pending OSD requests (read, write), resubmit
+ * requests to different OSDs when the cluster topology/data layout
+ * change, or retry the affected requests when the communications
+ * channel with an OSD is reset.
+ */
+
+/*
+ * calculate the mapping of a file extent onto an object, and fill out the
+ * request accordingly. shorten extent as necessary if it crosses an
+ * object boundary.
+ *
+ * fill osd op in request message.
+ */
+static void calc_layout(struct ceph_osd_client *osdc,
+ struct ceph_vino vino, struct ceph_file_layout *layout,
+ u64 off, u64 *plen,
+ struct ceph_osd_request *req)
+{
+ struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
+ struct ceph_osd_op *op = (void *)(reqhead + 1);
+ u64 orig_len = *plen;
+ u64 objoff, objlen; /* extent in object */
+ u64 bno;
+
+ reqhead->snapid = cpu_to_le64(vino.snap);
+
+ /* object extent? */
+ ceph_calc_file_object_mapping(layout, off, plen, &bno,
+ &objoff, &objlen);
+ if (*plen < orig_len)
+ dout(" skipping last %llu, final file extent %llu~%llu\n",
+ orig_len - *plen, off, *plen);
+
+ sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno);
+ req->r_oid_len = strlen(req->r_oid);
+
+ op->extent.offset = cpu_to_le64(objoff);
+ op->extent.length = cpu_to_le64(objlen);
+ req->r_num_pages = calc_pages_for(off, *plen);
+
+ dout("calc_layout %s (%d) %llu~%llu (%d pages)\n",
+ req->r_oid, req->r_oid_len, objoff, objlen, req->r_num_pages);
+}
+
+
+/*
+ * requests
+ */
+void ceph_osdc_release_request(struct kref *kref)
+{
+ struct ceph_osd_request *req = container_of(kref,
+ struct ceph_osd_request,
+ r_kref);
+
+ if (req->r_request)
+ ceph_msg_put(req->r_request);
+ if (req->r_reply)
+ ceph_msg_put(req->r_reply);
+ if (req->r_own_pages)
+ ceph_release_page_vector(req->r_pages,
+ req->r_num_pages);
+ ceph_put_snap_context(req->r_snapc);
+ if (req->r_mempool)
+ mempool_free(req, req->r_osdc->req_mempool);
+ else
+ kfree(req);
+}
+
+/*
+ * build new request AND message, calculate layout, and adjust file
+ * extent as needed.
+ *
+ * if the file was recently truncated, we include information about its
+ * old and new size so that the object can be updated appropriately. (we
+ * avoid synchronously deleting truncated objects because it's slow.)
+ *
+ * if @do_sync, include a 'startsync' command so that the osd will flush
+ * data quickly.
+ */
+struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
+ struct ceph_file_layout *layout,
+ struct ceph_vino vino,
+ u64 off, u64 *plen,
+ int opcode, int flags,
+ struct ceph_snap_context *snapc,
+ int do_sync,
+ u32 truncate_seq,
+ u64 truncate_size,
+ struct timespec *mtime,
+ bool use_mempool, int num_reply)
+{
+ struct ceph_osd_request *req;
+ struct ceph_msg *msg;
+ struct ceph_osd_request_head *head;
+ struct ceph_osd_op *op;
+ void *p;
+ int do_trunc = truncate_seq && (off + *plen > truncate_size);
+ int num_op = 1 + do_sync + do_trunc;
+ size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
+ int err, i;
+ u64 prevofs;
+
+ if (use_mempool) {
+ req = mempool_alloc(osdc->req_mempool, GFP_NOFS);
+ memset(req, 0, sizeof(*req));
+ } else {
+ req = kzalloc(sizeof(*req), GFP_NOFS);
+ }
+ if (req == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ err = ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply);
+ if (err) {
+ ceph_osdc_put_request(req);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ req->r_osdc = osdc;
+ req->r_mempool = use_mempool;
+ kref_init(&req->r_kref);
+ init_completion(&req->r_completion);
+ init_completion(&req->r_safe_completion);
+ INIT_LIST_HEAD(&req->r_unsafe_item);
+ req->r_flags = flags;
+
+ WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
+
+ /* create message; allow space for oid */
+ msg_size += 40;
+ if (snapc)
+ msg_size += sizeof(u64) * snapc->num_snaps;
+ if (use_mempool)
+ msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
+ else
+ msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, 0, 0, NULL);
+ if (IS_ERR(msg)) {
+ ceph_msgpool_resv(&osdc->msgpool_op_reply, num_reply);
+ ceph_osdc_put_request(req);
+ return ERR_PTR(PTR_ERR(msg));
+ }
+ msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
+ memset(msg->front.iov_base, 0, msg->front.iov_len);
+ head = msg->front.iov_base;
+ op = (void *)(head + 1);
+ p = (void *)(op + num_op);
+
+ req->r_request = msg;
+ req->r_snapc = ceph_get_snap_context(snapc);
+
+ head->client_inc = cpu_to_le32(1); /* always, for now. */
+ head->flags = cpu_to_le32(flags);
+ if (flags & CEPH_OSD_FLAG_WRITE)
+ ceph_encode_timespec(&head->mtime, mtime);
+ head->num_ops = cpu_to_le16(num_op);
+ op->op = cpu_to_le16(opcode);
+
+ /* calculate max write size */
+ calc_layout(osdc, vino, layout, off, plen, req);
+ req->r_file_layout = *layout; /* keep a copy */
+
+ if (flags & CEPH_OSD_FLAG_WRITE) {
+ req->r_request->hdr.data_off = cpu_to_le16(off);
+ req->r_request->hdr.data_len = cpu_to_le32(*plen);
+ op->payload_len = cpu_to_le32(*plen);
+ }
+
+ /* fill in oid */
+ head->object_len = cpu_to_le32(req->r_oid_len);
+ memcpy(p, req->r_oid, req->r_oid_len);
+ p += req->r_oid_len;
+
+ /* additional ops */
+ if (do_trunc) {
+ op++;
+ op->op = cpu_to_le16(opcode == CEPH_OSD_OP_READ ?
+ CEPH_OSD_OP_MASKTRUNC : CEPH_OSD_OP_SETTRUNC);
+ op->trunc.truncate_seq = cpu_to_le32(truncate_seq);
+ prevofs = le64_to_cpu((op-1)->extent.offset);
+ op->trunc.truncate_size = cpu_to_le64(truncate_size -
+ (off-prevofs));
+ }
+ if (do_sync) {
+ op++;
+ op->op = cpu_to_le16(CEPH_OSD_OP_STARTSYNC);
+ }
+ if (snapc) {
+ head->snap_seq = cpu_to_le64(snapc->seq);
+ head->num_snaps = cpu_to_le32(snapc->num_snaps);
+ for (i = 0; i < snapc->num_snaps; i++) {
+ put_unaligned_le64(snapc->snaps[i], p);
+ p += sizeof(u64);
+ }
+ }
+
+ BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
+ return req;
+}
+
+/*
+ * We keep osd requests in an rbtree, sorted by ->r_tid.
+ */
+static void __insert_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *new)
+{
+ struct rb_node **p = &osdc->requests.rb_node;
+ struct rb_node *parent = NULL;
+ struct ceph_osd_request *req = NULL;
+
+ while (*p) {
+ parent = *p;
+ req = rb_entry(parent, struct ceph_osd_request, r_node);
+ if (new->r_tid < req->r_tid)
+ p = &(*p)->rb_left;
+ else if (new->r_tid > req->r_tid)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&new->r_node, parent, p);
+ rb_insert_color(&new->r_node, &osdc->requests);
+}
+
+static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
+ u64 tid)
+{
+ struct ceph_osd_request *req;
+ struct rb_node *n = osdc->requests.rb_node;
+
+ while (n) {
+ req = rb_entry(n, struct ceph_osd_request, r_node);
+ if (tid < req->r_tid)
+ n = n->rb_left;
+ else if (tid > req->r_tid)
+ n = n->rb_right;
+ else
+ return req;
+ }
+ return NULL;
+}
+
+static struct ceph_osd_request *
+__lookup_request_ge(struct ceph_osd_client *osdc,
+ u64 tid)
+{
+ struct ceph_osd_request *req;
+ struct rb_node *n = osdc->requests.rb_node;
+
+ while (n) {
+ req = rb_entry(n, struct ceph_osd_request, r_node);
+ if (tid < req->r_tid) {
+ if (!n->rb_left)
+ return req;
+ n = n->rb_left;
+ } else if (tid > req->r_tid) {
+ n = n->rb_right;
+ } else {
+ return req;
+ }
+ }
+ return NULL;
+}
+
+
+/*
+ * If the osd connection drops, we need to resubmit all requests.
+ */
+static void osd_reset(struct ceph_connection *con)
+{
+ struct ceph_osd *osd = con->private;
+ struct ceph_osd_client *osdc;
+
+ if (!osd)
+ return;
+ dout("osd_reset osd%d\n", osd->o_osd);
+ osdc = osd->o_osdc;
+ osd->o_incarnation++;
+ down_read(&osdc->map_sem);
+ kick_requests(osdc, osd);
+ up_read(&osdc->map_sem);
+}
+
+/*
+ * Track open sessions with osds.
+ */
+static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
+{
+ struct ceph_osd *osd;
+
+ osd = kzalloc(sizeof(*osd), GFP_NOFS);
+ if (!osd)
+ return NULL;
+
+ atomic_set(&osd->o_ref, 1);
+ osd->o_osdc = osdc;
+ INIT_LIST_HEAD(&osd->o_requests);
+ osd->o_incarnation = 1;
+
+ ceph_con_init(osdc->client->msgr, &osd->o_con);
+ osd->o_con.private = osd;
+ osd->o_con.ops = &osd_con_ops;
+ osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD;
+
+ return osd;
+}
+
+static struct ceph_osd *get_osd(struct ceph_osd *osd)
+{
+ if (atomic_inc_not_zero(&osd->o_ref)) {
+ dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
+ atomic_read(&osd->o_ref));
+ return osd;
+ } else {
+ dout("get_osd %p FAIL\n", osd);
+ return NULL;
+ }
+}
+
+static void put_osd(struct ceph_osd *osd)
+{
+ dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
+ atomic_read(&osd->o_ref) - 1);
+ if (atomic_dec_and_test(&osd->o_ref))
+ kfree(osd);
+}
+
+/*
+ * remove an osd from our map
+ */
+static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+{
+ dout("remove_osd %p\n", osd);
+ BUG_ON(!list_empty(&osd->o_requests));
+ rb_erase(&osd->o_node, &osdc->osds);
+ ceph_con_close(&osd->o_con);
+ put_osd(osd);
+}
+
+/*
+ * reset osd connect
+ */
+static int reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+{
+ int ret = 0;
+
+ dout("reset_osd %p osd%d\n", osd, osd->o_osd);
+ if (list_empty(&osd->o_requests)) {
+ remove_osd(osdc, osd);
+ } else {
+ ceph_con_close(&osd->o_con);
+ ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
+ osd->o_incarnation++;
+ }
+ return ret;
+}
+
+static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
+{
+ struct rb_node **p = &osdc->osds.rb_node;
+ struct rb_node *parent = NULL;
+ struct ceph_osd *osd = NULL;
+
+ while (*p) {
+ parent = *p;
+ osd = rb_entry(parent, struct ceph_osd, o_node);
+ if (new->o_osd < osd->o_osd)
+ p = &(*p)->rb_left;
+ else if (new->o_osd > osd->o_osd)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&new->o_node, parent, p);
+ rb_insert_color(&new->o_node, &osdc->osds);
+}
+
+static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
+{
+ struct ceph_osd *osd;
+ struct rb_node *n = osdc->osds.rb_node;
+
+ while (n) {
+ osd = rb_entry(n, struct ceph_osd, o_node);
+ if (o < osd->o_osd)
+ n = n->rb_left;
+ else if (o > osd->o_osd)
+ n = n->rb_right;
+ else
+ return osd;
+ }
+ return NULL;
+}
+
+
+/*
+ * Register request, assign tid. If this is the first request, set up
+ * the timeout event.
+ */
+static void register_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req)
+{
+ struct ceph_osd_request_head *head = req->r_request->front.iov_base;
+
+ mutex_lock(&osdc->request_mutex);
+ req->r_tid = ++osdc->last_tid;
+ head->tid = cpu_to_le64(req->r_tid);
+
+ dout("register_request %p tid %lld\n", req, req->r_tid);
+ __insert_request(osdc, req);
+ ceph_osdc_get_request(req);
+ osdc->num_requests++;
+
+ req->r_timeout_stamp =
+ jiffies + osdc->client->mount_args->osd_timeout*HZ;
+
+ if (osdc->num_requests == 1) {
+ osdc->timeout_tid = req->r_tid;
+ dout(" timeout on tid %llu at %lu\n", req->r_tid,
+ req->r_timeout_stamp);
+ schedule_delayed_work(&osdc->timeout_work,
+ round_jiffies_relative(req->r_timeout_stamp - jiffies));
+ }
+ mutex_unlock(&osdc->request_mutex);
+}
+
+/*
+ * called under osdc->request_mutex
+ */
+static void __unregister_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req)
+{
+ dout("__unregister_request %p tid %lld\n", req, req->r_tid);
+ rb_erase(&req->r_node, &osdc->requests);
+ osdc->num_requests--;
+
+ if (req->r_osd) {
+ /* make sure the original request isn't in flight. */
+ ceph_con_revoke(&req->r_osd->o_con, req->r_request);
+
+ list_del_init(&req->r_osd_item);
+ if (list_empty(&req->r_osd->o_requests))
+ remove_osd(osdc, req->r_osd);
+ req->r_osd = NULL;
+ }
+
+ ceph_osdc_put_request(req);
+
+ if (req->r_tid == osdc->timeout_tid) {
+ if (osdc->num_requests == 0) {
+ dout("no requests, canceling timeout\n");
+ osdc->timeout_tid = 0;
+ cancel_delayed_work(&osdc->timeout_work);
+ } else {
+ req = rb_entry(rb_first(&osdc->requests),
+ struct ceph_osd_request, r_node);
+ osdc->timeout_tid = req->r_tid;
+ dout("rescheduled timeout on tid %llu at %lu\n",
+ req->r_tid, req->r_timeout_stamp);
+ schedule_delayed_work(&osdc->timeout_work,
+ round_jiffies_relative(req->r_timeout_stamp -
+ jiffies));
+ }
+ }
+}
+
+/*
+ * Cancel a previously queued request message
+ */
+static void __cancel_request(struct ceph_osd_request *req)
+{
+ if (req->r_sent) {
+ ceph_con_revoke(&req->r_osd->o_con, req->r_request);
+ req->r_sent = 0;
+ }
+}
+
+/*
+ * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
+ * (as needed), and set the request r_osd appropriately. If there is
+ * no up osd, set r_osd to NULL.
+ *
+ * Return 0 if unchanged, 1 if changed, or negative on error.
+ *
+ * Caller should hold map_sem for read and request_mutex.
+ */
+static int __map_osds(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req)
+{
+ struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
+ struct ceph_pg pgid;
+ int o = -1;
+ int err;
+ struct ceph_osd *newosd = NULL;
+
+ dout("map_osds %p tid %lld\n", req, req->r_tid);
+ err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
+ &req->r_file_layout, osdc->osdmap);
+ if (err)
+ return err;
+ pgid = reqhead->layout.ol_pgid;
+ o = ceph_calc_pg_primary(osdc->osdmap, pgid);
+
+ if ((req->r_osd && req->r_osd->o_osd == o &&
+ req->r_sent >= req->r_osd->o_incarnation) ||
+ (req->r_osd == NULL && o == -1))
+ return 0; /* no change */
+
+ dout("map_osds tid %llu pgid %d.%x osd%d (was osd%d)\n",
+ req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
+ req->r_osd ? req->r_osd->o_osd : -1);
+
+ if (req->r_osd) {
+ __cancel_request(req);
+ list_del_init(&req->r_osd_item);
+ if (list_empty(&req->r_osd->o_requests)) {
+ /* try to re-use r_osd if possible */
+ newosd = get_osd(req->r_osd);
+ remove_osd(osdc, newosd);
+ }
+ req->r_osd = NULL;
+ }
+
+ req->r_osd = __lookup_osd(osdc, o);
+ if (!req->r_osd && o >= 0) {
+ if (newosd) {
+ req->r_osd = newosd;
+ newosd = NULL;
+ } else {
+ err = -ENOMEM;
+ req->r_osd = create_osd(osdc);
+ if (!req->r_osd)
+ goto out;
+ }
+
+ dout("map_osds osd %p is osd%d\n", req->r_osd, o);
+ req->r_osd->o_osd = o;
+ req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
+ __insert_osd(osdc, req->r_osd);
+
+ ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
+ }
+
+ if (req->r_osd)
+ list_add(&req->r_osd_item, &req->r_osd->o_requests);
+ err = 1; /* osd changed */
+
+out:
+ if (newosd)
+ put_osd(newosd);
+ return err;
+}
+
+/*
+ * caller should hold map_sem (for read) and request_mutex
+ */
+static int __send_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req)
+{
+ struct ceph_osd_request_head *reqhead;
+ int err;
+
+ err = __map_osds(osdc, req);
+ if (err < 0)
+ return err;
+ if (req->r_osd == NULL) {
+ dout("send_request %p no up osds in pg\n", req);
+ ceph_monc_request_next_osdmap(&osdc->client->monc);
+ return 0;
+ }
+
+ dout("send_request %p tid %llu to osd%d flags %d\n",
+ req, req->r_tid, req->r_osd->o_osd, req->r_flags);
+
+ reqhead = req->r_request->front.iov_base;
+ reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
+ reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */
+ reqhead->reassert_version = req->r_reassert_version;
+
+ req->r_timeout_stamp = jiffies+osdc->client->mount_args->osd_timeout*HZ;
+
+ ceph_msg_get(req->r_request); /* send consumes a ref */
+ ceph_con_send(&req->r_osd->o_con, req->r_request);
+ req->r_sent = req->r_osd->o_incarnation;
+ return 0;
+}
+
+/*
+ * Timeout callback, called every N seconds when 1 or more osd
+ * requests has been active for more than N seconds. When this
+ * happens, we ping all OSDs with requests who have timed out to
+ * ensure any communications channel reset is detected. Reset the
+ * request timeouts another N seconds in the future as we go.
+ * Reschedule the timeout event another N seconds in future (unless
+ * there are no open requests).
+ */
+static void handle_timeout(struct work_struct *work)
+{
+ struct ceph_osd_client *osdc =
+ container_of(work, struct ceph_osd_client, timeout_work.work);
+ struct ceph_osd_request *req;
+ struct ceph_osd *osd;
+ unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ;
+ unsigned long next_timeout = timeout + jiffies;
+ struct rb_node *p;
+
+ dout("timeout\n");
+ down_read(&osdc->map_sem);
+
+ ceph_monc_request_next_osdmap(&osdc->client->monc);
+
+ mutex_lock(&osdc->request_mutex);
+ for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
+ req = rb_entry(p, struct ceph_osd_request, r_node);
+
+ if (req->r_resend) {
+ int err;
+
+ dout("osdc resending prev failed %lld\n", req->r_tid);
+ err = __send_request(osdc, req);
+ if (err)
+ dout("osdc failed again on %lld\n", req->r_tid);
+ else
+ req->r_resend = false;
+ continue;
+ }
+ }
+ for (p = rb_first(&osdc->osds); p; p = rb_next(p)) {
+ osd = rb_entry(p, struct ceph_osd, o_node);
+ if (list_empty(&osd->o_requests))
+ continue;
+ req = list_first_entry(&osd->o_requests,
+ struct ceph_osd_request, r_osd_item);
+ if (time_before(jiffies, req->r_timeout_stamp))
+ continue;
+
+ dout(" tid %llu (at least) timed out on osd%d\n",
+ req->r_tid, osd->o_osd);
+ req->r_timeout_stamp = next_timeout;
+ ceph_con_keepalive(&osd->o_con);
+ }
+
+ if (osdc->timeout_tid)
+ schedule_delayed_work(&osdc->timeout_work,
+ round_jiffies_relative(timeout));
+
+ mutex_unlock(&osdc->request_mutex);
+
+ up_read(&osdc->map_sem);
+}
+
+/*
+ * handle osd op reply. either call the callback if it is specified,
+ * or do the completion to wake up the waiting thread.
+ */
+static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
+{
+ struct ceph_osd_reply_head *rhead = msg->front.iov_base;
+ struct ceph_osd_request *req;
+ u64 tid;
+ int numops, object_len, flags;
+
+ if (msg->front.iov_len < sizeof(*rhead))
+ goto bad;
+ tid = le64_to_cpu(rhead->tid);
+ numops = le32_to_cpu(rhead->num_ops);
+ object_len = le32_to_cpu(rhead->object_len);
+ if (msg->front.iov_len != sizeof(*rhead) + object_len +
+ numops * sizeof(struct ceph_osd_op))
+ goto bad;
+ dout("handle_reply %p tid %llu\n", msg, tid);
+
+ /* lookup */
+ mutex_lock(&osdc->request_mutex);
+ req = __lookup_request(osdc, tid);
+ if (req == NULL) {
+ dout("handle_reply tid %llu dne\n", tid);
+ mutex_unlock(&osdc->request_mutex);
+ return;
+ }
+ ceph_osdc_get_request(req);
+ flags = le32_to_cpu(rhead->flags);
+
+ if (req->r_reply) {
+ /*
+ * once we see the message has been received, we don't
+ * need a ref (which is only needed for revoking
+ * pages)
+ */
+ ceph_msg_put(req->r_reply);
+ req->r_reply = NULL;
+ }
+
+ if (!req->r_got_reply) {
+ unsigned bytes;
+
+ req->r_result = le32_to_cpu(rhead->result);
+ bytes = le32_to_cpu(msg->hdr.data_len);
+ dout("handle_reply result %d bytes %d\n", req->r_result,
+ bytes);
+ if (req->r_result == 0)
+ req->r_result = bytes;
+
+ /* in case this is a write and we need to replay, */
+ req->r_reassert_version = rhead->reassert_version;
+
+ req->r_got_reply = 1;
+ } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
+ dout("handle_reply tid %llu dup ack\n", tid);
+ mutex_unlock(&osdc->request_mutex);
+ goto done;
+ }
+
+ dout("handle_reply tid %llu flags %d\n", tid, flags);
+
+ /* either this is a read, or we got the safe response */
+ if ((flags & CEPH_OSD_FLAG_ONDISK) ||
+ ((flags & CEPH_OSD_FLAG_WRITE) == 0))
+ __unregister_request(osdc, req);
+
+ mutex_unlock(&osdc->request_mutex);
+
+ if (req->r_callback)
+ req->r_callback(req, msg);
+ else
+ complete(&req->r_completion);
+
+ if (flags & CEPH_OSD_FLAG_ONDISK) {
+ if (req->r_safe_callback)
+ req->r_safe_callback(req, msg);
+ complete(&req->r_safe_completion); /* fsync waiter */
+ }
+
+done:
+ ceph_osdc_put_request(req);
+ return;
+
+bad:
+ pr_err("corrupt osd_op_reply got %d %d expected %d\n",
+ (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
+ (int)sizeof(*rhead));
+}
+
+
+/*
+ * Resubmit osd requests whose osd or osd address has changed. Request
+ * a new osd map if osds are down, or we are otherwise unable to determine
+ * how to direct a request.
+ *
+ * Close connections to down osds.
+ *
+ * If @who is specified, resubmit requests for that specific osd.
+ *
+ * Caller should hold map_sem for read and request_mutex.
+ */
+static void kick_requests(struct ceph_osd_client *osdc,
+ struct ceph_osd *kickosd)
+{
+ struct ceph_osd_request *req;
+ struct rb_node *p, *n;
+ int needmap = 0;
+ int err;
+
+ dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1);
+ mutex_lock(&osdc->request_mutex);
+ if (!kickosd) {
+ for (p = rb_first(&osdc->osds); p; p = n) {
+ struct ceph_osd *osd =
+ rb_entry(p, struct ceph_osd, o_node);
+
+ n = rb_next(p);
+ if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
+ !ceph_entity_addr_equal(&osd->o_con.peer_addr,
+ ceph_osd_addr(osdc->osdmap,
+ osd->o_osd)))
+ reset_osd(osdc, osd);
+ }
+ }
+
+ for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
+ req = rb_entry(p, struct ceph_osd_request, r_node);
+
+ if (req->r_resend) {
+ dout(" r_resend set on tid %llu\n", req->r_tid);
+ __cancel_request(req);
+ goto kick;
+ }
+ if (req->r_osd && kickosd == req->r_osd) {
+ __cancel_request(req);
+ goto kick;
+ }
+
+ err = __map_osds(osdc, req);
+ if (err == 0)
+ continue; /* no change */
+ if (err < 0) {
+ /*
+ * FIXME: really, we should set the request
+ * error and fail if this isn't a 'nofail'
+ * request, but that's a fair bit more
+ * complicated to do. So retry!
+ */
+ dout(" setting r_resend on %llu\n", req->r_tid);
+ req->r_resend = true;
+ continue;
+ }
+ if (req->r_osd == NULL) {
+ dout("tid %llu maps to no valid osd\n", req->r_tid);
+ needmap++; /* request a newer map */
+ continue;
+ }
+
+kick:
+ dout("kicking %p tid %llu osd%d\n", req, req->r_tid,
+ req->r_osd->o_osd);
+ req->r_flags |= CEPH_OSD_FLAG_RETRY;
+ err = __send_request(osdc, req);
+ if (err) {
+ dout(" setting r_resend on %llu\n", req->r_tid);
+ req->r_resend = true;
+ }
+ }
+ mutex_unlock(&osdc->request_mutex);
+
+ if (needmap) {
+ dout("%d requests for down osds, need new map\n", needmap);
+ ceph_monc_request_next_osdmap(&osdc->client->monc);
+ }
+}
+
+/*
+ * Process updated osd map.
+ *
+ * The message contains any number of incremental and full maps, normally
+ * indicating some sort of topology change in the cluster. Kick requests
+ * off to different OSDs as needed.
+ */
+void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
+{
+ void *p, *end, *next;
+ u32 nr_maps, maplen;
+ u32 epoch;
+ struct ceph_osdmap *newmap = NULL, *oldmap;
+ int err;
+ struct ceph_fsid fsid;
+
+ dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
+ p = msg->front.iov_base;
+ end = p + msg->front.iov_len;
+
+ /* verify fsid */
+ ceph_decode_need(&p, end, sizeof(fsid), bad);
+ ceph_decode_copy(&p, &fsid, sizeof(fsid));
+ if (ceph_check_fsid(osdc->client, &fsid) < 0)
+ return;
+
+ down_write(&osdc->map_sem);
+
+ /* incremental maps */
+ ceph_decode_32_safe(&p, end, nr_maps, bad);
+ dout(" %d inc maps\n", nr_maps);
+ while (nr_maps > 0) {
+ ceph_decode_need(&p, end, 2*sizeof(u32), bad);
+ epoch = ceph_decode_32(&p);
+ maplen = ceph_decode_32(&p);
+ ceph_decode_need(&p, end, maplen, bad);
+ next = p + maplen;
+ if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
+ dout("applying incremental map %u len %d\n",
+ epoch, maplen);
+ newmap = osdmap_apply_incremental(&p, next,
+ osdc->osdmap,
+ osdc->client->msgr);
+ if (IS_ERR(newmap)) {
+ err = PTR_ERR(newmap);
+ goto bad;
+ }
+ if (newmap != osdc->osdmap) {
+ ceph_osdmap_destroy(osdc->osdmap);
+ osdc->osdmap = newmap;
+ }
+ } else {
+ dout("ignoring incremental map %u len %d\n",
+ epoch, maplen);
+ }
+ p = next;
+ nr_maps--;
+ }
+ if (newmap)
+ goto done;
+
+ /* full maps */
+ ceph_decode_32_safe(&p, end, nr_maps, bad);
+ dout(" %d full maps\n", nr_maps);
+ while (nr_maps) {
+ ceph_decode_need(&p, end, 2*sizeof(u32), bad);
+ epoch = ceph_decode_32(&p);
+ maplen = ceph_decode_32(&p);
+ ceph_decode_need(&p, end, maplen, bad);
+ if (nr_maps > 1) {
+ dout("skipping non-latest full map %u len %d\n",
+ epoch, maplen);
+ } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
+ dout("skipping full map %u len %d, "
+ "older than our %u\n", epoch, maplen,
+ osdc->osdmap->epoch);
+ } else {
+ dout("taking full map %u len %d\n", epoch, maplen);
+ newmap = osdmap_decode(&p, p+maplen);
+ if (IS_ERR(newmap)) {
+ err = PTR_ERR(newmap);
+ goto bad;
+ }
+ oldmap = osdc->osdmap;
+ osdc->osdmap = newmap;
+ if (oldmap)
+ ceph_osdmap_destroy(oldmap);
+ }
+ p += maplen;
+ nr_maps--;
+ }
+
+done:
+ downgrade_write(&osdc->map_sem);
+ ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
+ if (newmap)
+ kick_requests(osdc, NULL);
+ up_read(&osdc->map_sem);
+ return;
+
+bad:
+ pr_err("osdc handle_map corrupt msg\n");
+ up_write(&osdc->map_sem);
+ return;
+}
+
+
+/*
+ * A read request prepares specific pages that data is to be read into.
+ * When a message is being read off the wire, we call prepare_pages to
+ * find those pages.
+ * 0 = success, -1 failure.
+ */
+static int prepare_pages(struct ceph_connection *con, struct ceph_msg *m,
+ int want)
+{
+ struct ceph_osd *osd = con->private;
+ struct ceph_osd_client *osdc;
+ struct ceph_osd_reply_head *rhead = m->front.iov_base;
+ struct ceph_osd_request *req;
+ u64 tid;
+ int ret = -1;
+ int type = le16_to_cpu(m->hdr.type);
+
+ if (!osd)
+ return -1;
+ osdc = osd->o_osdc;
+
+ dout("prepare_pages on msg %p want %d\n", m, want);
+ if (unlikely(type != CEPH_MSG_OSD_OPREPLY))
+ return -1; /* hmm! */
+
+ tid = le64_to_cpu(rhead->tid);
+ mutex_lock(&osdc->request_mutex);
+ req = __lookup_request(osdc, tid);
+ if (!req) {
+ dout("prepare_pages unknown tid %llu\n", tid);
+ goto out;
+ }
+ dout("prepare_pages tid %llu has %d pages, want %d\n",
+ tid, req->r_num_pages, want);
+ if (likely(req->r_num_pages >= want && !req->r_prepared_pages)) {
+ m->pages = req->r_pages;
+ m->nr_pages = req->r_num_pages;
+ req->r_reply = m; /* only for duration of read over socket */
+ ceph_msg_get(m);
+ req->r_prepared_pages = 1;
+ ret = 0; /* success */
+ }
+out:
+ mutex_unlock(&osdc->request_mutex);
+ return ret;
+}
+
+/*
+ * Register request, send initial attempt.
+ */
+int ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req,
+ bool nofail)
+{
+ int rc = 0;
+
+ req->r_request->pages = req->r_pages;
+ req->r_request->nr_pages = req->r_num_pages;
+
+ register_request(osdc, req);
+
+ down_read(&osdc->map_sem);
+ mutex_lock(&osdc->request_mutex);
+ /*
+ * a racing kick_requests() may have sent the message for us
+ * while we dropped request_mutex above, so only send now if
+ * the request still han't been touched yet.
+ */
+ if (req->r_sent == 0) {
+ rc = __send_request(osdc, req);
+ if (rc) {
+ if (nofail) {
+ dout("osdc_start_request failed send, "
+ " marking %lld\n", req->r_tid);
+ req->r_resend = true;
+ rc = 0;
+ } else {
+ __unregister_request(osdc, req);
+ }
+ }
+ }
+ mutex_unlock(&osdc->request_mutex);
+ up_read(&osdc->map_sem);
+ return rc;
+}
+
+/*
+ * wait for a request to complete
+ */
+int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req)
+{
+ int rc;
+
+ rc = wait_for_completion_interruptible(&req->r_completion);
+ if (rc < 0) {
+ mutex_lock(&osdc->request_mutex);
+ __cancel_request(req);
+ mutex_unlock(&osdc->request_mutex);
+ dout("wait_request tid %llu timed out\n", req->r_tid);
+ return rc;
+ }
+
+ dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
+ return req->r_result;
+}
+
+/*
+ * sync - wait for all in-flight requests to flush. avoid starvation.
+ */
+void ceph_osdc_sync(struct ceph_osd_client *osdc)
+{
+ struct ceph_osd_request *req;
+ u64 last_tid, next_tid = 0;
+
+ mutex_lock(&osdc->request_mutex);
+ last_tid = osdc->last_tid;
+ while (1) {
+ req = __lookup_request_ge(osdc, next_tid);
+ if (!req)
+ break;
+ if (req->r_tid > last_tid)
+ break;
+
+ next_tid = req->r_tid + 1;
+ if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
+ continue;
+
+ ceph_osdc_get_request(req);
+ mutex_unlock(&osdc->request_mutex);
+ dout("sync waiting on tid %llu (last is %llu)\n",
+ req->r_tid, last_tid);
+ wait_for_completion(&req->r_safe_completion);
+ mutex_lock(&osdc->request_mutex);
+ ceph_osdc_put_request(req);
+ }
+ mutex_unlock(&osdc->request_mutex);
+ dout("sync done (thru tid %llu)\n", last_tid);
+}
+
+/*
+ * init, shutdown
+ */
+int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
+{
+ int err;
+
+ dout("init\n");
+ osdc->client = client;
+ osdc->osdmap = NULL;
+ init_rwsem(&osdc->map_sem);
+ init_completion(&osdc->map_waiters);
+ osdc->last_requested_map = 0;
+ mutex_init(&osdc->request_mutex);
+ osdc->timeout_tid = 0;
+ osdc->last_tid = 0;
+ osdc->osds = RB_ROOT;
+ osdc->requests = RB_ROOT;
+ osdc->num_requests = 0;
+ INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
+
+ err = -ENOMEM;
+ osdc->req_mempool = mempool_create_kmalloc_pool(10,
+ sizeof(struct ceph_osd_request));
+ if (!osdc->req_mempool)
+ goto out;
+
+ err = ceph_msgpool_init(&osdc->msgpool_op, 4096, 10, true);
+ if (err < 0)
+ goto out_mempool;
+ err = ceph_msgpool_init(&osdc->msgpool_op_reply, 512, 0, false);
+ if (err < 0)
+ goto out_msgpool;
+ return 0;
+
+out_msgpool:
+ ceph_msgpool_destroy(&osdc->msgpool_op);
+out_mempool:
+ mempool_destroy(osdc->req_mempool);
+out:
+ return err;
+}
+
+void ceph_osdc_stop(struct ceph_osd_client *osdc)
+{
+ cancel_delayed_work_sync(&osdc->timeout_work);
+ if (osdc->osdmap) {
+ ceph_osdmap_destroy(osdc->osdmap);
+ osdc->osdmap = NULL;
+ }
+ mempool_destroy(osdc->req_mempool);
+ ceph_msgpool_destroy(&osdc->msgpool_op);
+ ceph_msgpool_destroy(&osdc->msgpool_op_reply);
+}
+
+/*
+ * Read some contiguous pages. If we cross a stripe boundary, shorten
+ * *plen. Return number of bytes read, or error.
+ */
+int ceph_osdc_readpages(struct ceph_osd_client *osdc,
+ struct ceph_vino vino, struct ceph_file_layout *layout,
+ u64 off, u64 *plen,
+ u32 truncate_seq, u64 truncate_size,
+ struct page **pages, int num_pages)
+{
+ struct ceph_osd_request *req;
+ int rc = 0;
+
+ dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
+ vino.snap, off, *plen);
+ req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
+ CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
+ NULL, 0, truncate_seq, truncate_size, NULL,
+ false, 1);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ /* it may be a short read due to an object boundary */
+ req->r_pages = pages;
+ num_pages = calc_pages_for(off, *plen);
+ req->r_num_pages = num_pages;
+
+ dout("readpages final extent is %llu~%llu (%d pages)\n",
+ off, *plen, req->r_num_pages);
+
+ rc = ceph_osdc_start_request(osdc, req, false);
+ if (!rc)
+ rc = ceph_osdc_wait_request(osdc, req);
+
+ ceph_osdc_put_request(req);
+ dout("readpages result %d\n", rc);
+ return rc;
+}
+
+/*
+ * do a synchronous write on N pages
+ */
+int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
+ struct ceph_file_layout *layout,
+ struct ceph_snap_context *snapc,
+ u64 off, u64 len,
+ u32 truncate_seq, u64 truncate_size,
+ struct timespec *mtime,
+ struct page **pages, int num_pages,
+ int flags, int do_sync, bool nofail)
+{
+ struct ceph_osd_request *req;
+ int rc = 0;
+
+ BUG_ON(vino.snap != CEPH_NOSNAP);
+ req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
+ CEPH_OSD_OP_WRITE,
+ flags | CEPH_OSD_FLAG_ONDISK |
+ CEPH_OSD_FLAG_WRITE,
+ snapc, do_sync,
+ truncate_seq, truncate_size, mtime,
+ nofail, 1);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ /* it may be a short write due to an object boundary */
+ req->r_pages = pages;
+ req->r_num_pages = calc_pages_for(off, len);
+ dout("writepages %llu~%llu (%d pages)\n", off, len,
+ req->r_num_pages);
+
+ rc = ceph_osdc_start_request(osdc, req, nofail);
+ if (!rc)
+ rc = ceph_osdc_wait_request(osdc, req);
+
+ ceph_osdc_put_request(req);
+ if (rc == 0)
+ rc = len;
+ dout("writepages result %d\n", rc);
+ return rc;
+}
+
+/*
+ * handle incoming message
+ */
+static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
+{
+ struct ceph_osd *osd = con->private;
+ struct ceph_osd_client *osdc;
+ int type = le16_to_cpu(msg->hdr.type);
+
+ if (!osd)
+ return;
+ osdc = osd->o_osdc;
+
+ switch (type) {
+ case CEPH_MSG_OSD_MAP:
+ ceph_osdc_handle_map(osdc, msg);
+ break;
+ case CEPH_MSG_OSD_OPREPLY:
+ handle_reply(osdc, msg);
+ break;
+
+ default:
+ pr_err("received unknown message type %d %s\n", type,
+ ceph_msg_type_name(type));
+ }
+ ceph_msg_put(msg);
+}
+
+static struct ceph_msg *alloc_msg(struct ceph_connection *con,
+ struct ceph_msg_header *hdr)
+{
+ struct ceph_osd *osd = con->private;
+ struct ceph_osd_client *osdc = osd->o_osdc;
+ int type = le16_to_cpu(hdr->type);
+ int front = le32_to_cpu(hdr->front_len);
+
+ switch (type) {
+ case CEPH_MSG_OSD_OPREPLY:
+ return ceph_msgpool_get(&osdc->msgpool_op_reply, front);
+ }
+ return ceph_alloc_msg(con, hdr);
+}
+
+/*
+ * Wrappers to refcount containing ceph_osd struct
+ */
+static struct ceph_connection *get_osd_con(struct ceph_connection *con)
+{
+ struct ceph_osd *osd = con->private;
+ if (get_osd(osd))
+ return con;
+ return NULL;
+}
+
+static void put_osd_con(struct ceph_connection *con)
+{
+ struct ceph_osd *osd = con->private;
+ put_osd(osd);
+}
+
+/*
+ * authentication
+ */
+static int get_authorizer(struct ceph_connection *con,
+ void **buf, int *len, int *proto,
+ void **reply_buf, int *reply_len, int force_new)
+{
+ struct ceph_osd *o = con->private;
+ struct ceph_osd_client *osdc = o->o_osdc;
+ struct ceph_auth_client *ac = osdc->client->monc.auth;
+ int ret = 0;
+
+ if (force_new && o->o_authorizer) {
+ ac->ops->destroy_authorizer(ac, o->o_authorizer);
+ o->o_authorizer = NULL;
+ }
+ if (o->o_authorizer == NULL) {
+ ret = ac->ops->create_authorizer(
+ ac, CEPH_ENTITY_TYPE_OSD,
+ &o->o_authorizer,
+ &o->o_authorizer_buf,
+ &o->o_authorizer_buf_len,
+ &o->o_authorizer_reply_buf,
+ &o->o_authorizer_reply_buf_len);
+ if (ret)
+ return ret;
+ }
+
+ *proto = ac->protocol;
+ *buf = o->o_authorizer_buf;
+ *len = o->o_authorizer_buf_len;
+ *reply_buf = o->o_authorizer_reply_buf;
+ *reply_len = o->o_authorizer_reply_buf_len;
+ return 0;
+}
+
+
+static int verify_authorizer_reply(struct ceph_connection *con, int len)
+{
+ struct ceph_osd *o = con->private;
+ struct ceph_osd_client *osdc = o->o_osdc;
+ struct ceph_auth_client *ac = osdc->client->monc.auth;
+
+ return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
+}
+
+
+const static struct ceph_connection_operations osd_con_ops = {
+ .get = get_osd_con,
+ .put = put_osd_con,
+ .dispatch = dispatch,
+ .get_authorizer = get_authorizer,
+ .verify_authorizer_reply = verify_authorizer_reply,
+ .alloc_msg = alloc_msg,
+ .fault = osd_reset,
+ .alloc_middle = ceph_alloc_middle,
+ .prepare_pages = prepare_pages,
+};
diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h
new file mode 100644
index 000000000000..20ee61847416
--- /dev/null
+++ b/fs/ceph/osd_client.h
@@ -0,0 +1,155 @@
+#ifndef _FS_CEPH_OSD_CLIENT_H
+#define _FS_CEPH_OSD_CLIENT_H
+
+#include <linux/completion.h>
+#include <linux/kref.h>
+#include <linux/mempool.h>
+#include <linux/rbtree.h>
+
+#include "types.h"
+#include "osdmap.h"
+#include "messenger.h"
+
+struct ceph_msg;
+struct ceph_snap_context;
+struct ceph_osd_request;
+struct ceph_osd_client;
+struct ceph_authorizer;
+
+/*
+ * completion callback for async writepages
+ */
+typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *,
+ struct ceph_msg *);
+
+/* a given osd we're communicating with */
+struct ceph_osd {
+ atomic_t o_ref;
+ struct ceph_osd_client *o_osdc;
+ int o_osd;
+ int o_incarnation;
+ struct rb_node o_node;
+ struct ceph_connection o_con;
+ struct list_head o_requests;
+ struct ceph_authorizer *o_authorizer;
+ void *o_authorizer_buf, *o_authorizer_reply_buf;
+ size_t o_authorizer_buf_len, o_authorizer_reply_buf_len;
+};
+
+/* an in-flight request */
+struct ceph_osd_request {
+ u64 r_tid; /* unique for this client */
+ struct rb_node r_node;
+ struct list_head r_osd_item;
+ struct ceph_osd *r_osd;
+
+ struct ceph_msg *r_request, *r_reply;
+ int r_result;
+ int r_flags; /* any additional flags for the osd */
+ u32 r_sent; /* >0 if r_request is sending/sent */
+ int r_prepared_pages, r_got_reply;
+
+ struct ceph_osd_client *r_osdc;
+ struct kref r_kref;
+ bool r_mempool;
+ struct completion r_completion, r_safe_completion;
+ ceph_osdc_callback_t r_callback, r_safe_callback;
+ struct ceph_eversion r_reassert_version;
+ struct list_head r_unsafe_item;
+
+ struct inode *r_inode; /* for use by callbacks */
+ struct writeback_control *r_wbc; /* ditto */
+
+ char r_oid[40]; /* object name */
+ int r_oid_len;
+ unsigned long r_timeout_stamp;
+ bool r_resend; /* msg send failed, needs retry */
+
+ struct ceph_file_layout r_file_layout;
+ struct ceph_snap_context *r_snapc; /* snap context for writes */
+ unsigned r_num_pages; /* size of page array (follows) */
+ struct page **r_pages; /* pages for data payload */
+ int r_pages_from_pool;
+ int r_own_pages; /* if true, i own page list */
+};
+
+struct ceph_osd_client {
+ struct ceph_client *client;
+
+ struct ceph_osdmap *osdmap; /* current map */
+ struct rw_semaphore map_sem;
+ struct completion map_waiters;
+ u64 last_requested_map;
+
+ struct mutex request_mutex;
+ struct rb_root osds; /* osds */
+ u64 timeout_tid; /* tid of timeout triggering rq */
+ u64 last_tid; /* tid of last request */
+ struct rb_root requests; /* pending requests */
+ int num_requests;
+ struct delayed_work timeout_work;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_file;
+#endif
+
+ mempool_t *req_mempool;
+
+ struct ceph_msgpool msgpool_op;
+ struct ceph_msgpool msgpool_op_reply;
+};
+
+extern int ceph_osdc_init(struct ceph_osd_client *osdc,
+ struct ceph_client *client);
+extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
+
+extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
+ struct ceph_msg *msg);
+extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
+ struct ceph_msg *msg);
+
+extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
+ struct ceph_file_layout *layout,
+ struct ceph_vino vino,
+ u64 offset, u64 *len, int op, int flags,
+ struct ceph_snap_context *snapc,
+ int do_sync, u32 truncate_seq,
+ u64 truncate_size,
+ struct timespec *mtime,
+ bool use_mempool, int num_reply);
+
+static inline void ceph_osdc_get_request(struct ceph_osd_request *req)
+{
+ kref_get(&req->r_kref);
+}
+extern void ceph_osdc_release_request(struct kref *kref);
+static inline void ceph_osdc_put_request(struct ceph_osd_request *req)
+{
+ kref_put(&req->r_kref, ceph_osdc_release_request);
+}
+
+extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req,
+ bool nofail);
+extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req);
+extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
+
+extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
+ struct ceph_vino vino,
+ struct ceph_file_layout *layout,
+ u64 off, u64 *plen,
+ u32 truncate_seq, u64 truncate_size,
+ struct page **pages, int nr_pages);
+
+extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
+ struct ceph_vino vino,
+ struct ceph_file_layout *layout,
+ struct ceph_snap_context *sc,
+ u64 off, u64 len,
+ u32 truncate_seq, u64 truncate_size,
+ struct timespec *mtime,
+ struct page **pages, int nr_pages,
+ int flags, int do_sync, bool nofail);
+
+#endif
+
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
new file mode 100644
index 000000000000..8c994c714781
--- /dev/null
+++ b/fs/ceph/osdmap.c
@@ -0,0 +1,916 @@
+
+#include <asm/div64.h>
+
+#include "super.h"
+#include "osdmap.h"
+#include "crush/hash.h"
+#include "crush/mapper.h"
+#include "decode.h"
+#include "ceph_debug.h"
+
+char *ceph_osdmap_state_str(char *str, int len, int state)
+{
+ int flag = 0;
+
+ if (!len)
+ goto done;
+
+ *str = '\0';
+ if (state) {
+ if (state & CEPH_OSD_EXISTS) {
+ snprintf(str, len, "exists");
+ flag = 1;
+ }
+ if (state & CEPH_OSD_UP) {
+ snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
+ "up");
+ flag = 1;
+ }
+ } else {
+ snprintf(str, len, "doesn't exist");
+ }
+done:
+ return str;
+}
+
+/* maps */
+
+static int calc_bits_of(unsigned t)
+{
+ int b = 0;
+ while (t) {
+ t = t >> 1;
+ b++;
+ }
+ return b;
+}
+
+/*
+ * the foo_mask is the smallest value 2^n-1 that is >= foo.
+ */
+static void calc_pg_masks(struct ceph_pg_pool_info *pi)
+{
+ pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
+ pi->pgp_num_mask =
+ (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
+ pi->lpg_num_mask =
+ (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
+ pi->lpgp_num_mask =
+ (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
+}
+
+/*
+ * decode crush map
+ */
+static int crush_decode_uniform_bucket(void **p, void *end,
+ struct crush_bucket_uniform *b)
+{
+ dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
+ ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
+ b->item_weight = ceph_decode_32(p);
+ return 0;
+bad:
+ return -EINVAL;
+}
+
+static int crush_decode_list_bucket(void **p, void *end,
+ struct crush_bucket_list *b)
+{
+ int j;
+ dout("crush_decode_list_bucket %p to %p\n", *p, end);
+ b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
+ if (b->item_weights == NULL)
+ return -ENOMEM;
+ b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
+ if (b->sum_weights == NULL)
+ return -ENOMEM;
+ ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
+ for (j = 0; j < b->h.size; j++) {
+ b->item_weights[j] = ceph_decode_32(p);
+ b->sum_weights[j] = ceph_decode_32(p);
+ }
+ return 0;
+bad:
+ return -EINVAL;
+}
+
+static int crush_decode_tree_bucket(void **p, void *end,
+ struct crush_bucket_tree *b)
+{
+ int j;
+ dout("crush_decode_tree_bucket %p to %p\n", *p, end);
+ ceph_decode_32_safe(p, end, b->num_nodes, bad);
+ b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
+ if (b->node_weights == NULL)
+ return -ENOMEM;
+ ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
+ for (j = 0; j < b->num_nodes; j++)
+ b->node_weights[j] = ceph_decode_32(p);
+ return 0;
+bad:
+ return -EINVAL;
+}
+
+static int crush_decode_straw_bucket(void **p, void *end,
+ struct crush_bucket_straw *b)
+{
+ int j;
+ dout("crush_decode_straw_bucket %p to %p\n", *p, end);
+ b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
+ if (b->item_weights == NULL)
+ return -ENOMEM;
+ b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
+ if (b->straws == NULL)
+ return -ENOMEM;
+ ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
+ for (j = 0; j < b->h.size; j++) {
+ b->item_weights[j] = ceph_decode_32(p);
+ b->straws[j] = ceph_decode_32(p);
+ }
+ return 0;
+bad:
+ return -EINVAL;
+}
+
+static struct crush_map *crush_decode(void *pbyval, void *end)
+{
+ struct crush_map *c;
+ int err = -EINVAL;
+ int i, j;
+ void **p = &pbyval;
+ void *start = pbyval;
+ u32 magic;
+
+ dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
+
+ c = kzalloc(sizeof(*c), GFP_NOFS);
+ if (c == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ceph_decode_need(p, end, 4*sizeof(u32), bad);
+ magic = ceph_decode_32(p);
+ if (magic != CRUSH_MAGIC) {
+ pr_err("crush_decode magic %x != current %x\n",
+ (unsigned)magic, (unsigned)CRUSH_MAGIC);
+ goto bad;
+ }
+ c->max_buckets = ceph_decode_32(p);
+ c->max_rules = ceph_decode_32(p);
+ c->max_devices = ceph_decode_32(p);
+
+ c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
+ if (c->device_parents == NULL)
+ goto badmem;
+ c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
+ if (c->bucket_parents == NULL)
+ goto badmem;
+
+ c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
+ if (c->buckets == NULL)
+ goto badmem;
+ c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
+ if (c->rules == NULL)
+ goto badmem;
+
+ /* buckets */
+ for (i = 0; i < c->max_buckets; i++) {
+ int size = 0;
+ u32 alg;
+ struct crush_bucket *b;
+
+ ceph_decode_32_safe(p, end, alg, bad);
+ if (alg == 0) {
+ c->buckets[i] = NULL;
+ continue;
+ }
+ dout("crush_decode bucket %d off %x %p to %p\n",
+ i, (int)(*p-start), *p, end);
+
+ switch (alg) {
+ case CRUSH_BUCKET_UNIFORM:
+ size = sizeof(struct crush_bucket_uniform);
+ break;
+ case CRUSH_BUCKET_LIST:
+ size = sizeof(struct crush_bucket_list);
+ break;
+ case CRUSH_BUCKET_TREE:
+ size = sizeof(struct crush_bucket_tree);
+ break;
+ case CRUSH_BUCKET_STRAW:
+ size = sizeof(struct crush_bucket_straw);
+ break;
+ default:
+ goto bad;
+ }
+ BUG_ON(size == 0);
+ b = c->buckets[i] = kzalloc(size, GFP_NOFS);
+ if (b == NULL)
+ goto badmem;
+
+ ceph_decode_need(p, end, 4*sizeof(u32), bad);
+ b->id = ceph_decode_32(p);
+ b->type = ceph_decode_16(p);
+ b->alg = ceph_decode_8(p);
+ b->hash = ceph_decode_8(p);
+ b->weight = ceph_decode_32(p);
+ b->size = ceph_decode_32(p);
+
+ dout("crush_decode bucket size %d off %x %p to %p\n",
+ b->size, (int)(*p-start), *p, end);
+
+ b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
+ if (b->items == NULL)
+ goto badmem;
+ b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
+ if (b->perm == NULL)
+ goto badmem;
+ b->perm_n = 0;
+
+ ceph_decode_need(p, end, b->size*sizeof(u32), bad);
+ for (j = 0; j < b->size; j++)
+ b->items[j] = ceph_decode_32(p);
+
+ switch (b->alg) {
+ case CRUSH_BUCKET_UNIFORM:
+ err = crush_decode_uniform_bucket(p, end,
+ (struct crush_bucket_uniform *)b);
+ if (err < 0)
+ goto bad;
+ break;
+ case CRUSH_BUCKET_LIST:
+ err = crush_decode_list_bucket(p, end,
+ (struct crush_bucket_list *)b);
+ if (err < 0)
+ goto bad;
+ break;
+ case CRUSH_BUCKET_TREE:
+ err = crush_decode_tree_bucket(p, end,
+ (struct crush_bucket_tree *)b);
+ if (err < 0)
+ goto bad;
+ break;
+ case CRUSH_BUCKET_STRAW:
+ err = crush_decode_straw_bucket(p, end,
+ (struct crush_bucket_straw *)b);
+ if (err < 0)
+ goto bad;
+ break;
+ }
+ }
+
+ /* rules */
+ dout("rule vec is %p\n", c->rules);
+ for (i = 0; i < c->max_rules; i++) {
+ u32 yes;
+ struct crush_rule *r;
+
+ ceph_decode_32_safe(p, end, yes, bad);
+ if (!yes) {
+ dout("crush_decode NO rule %d off %x %p to %p\n",
+ i, (int)(*p-start), *p, end);
+ c->rules[i] = NULL;
+ continue;
+ }
+
+ dout("crush_decode rule %d off %x %p to %p\n",
+ i, (int)(*p-start), *p, end);
+
+ /* len */
+ ceph_decode_32_safe(p, end, yes, bad);
+#if BITS_PER_LONG == 32
+ if (yes > ULONG_MAX / sizeof(struct crush_rule_step))
+ goto bad;
+#endif
+ r = c->rules[i] = kmalloc(sizeof(*r) +
+ yes*sizeof(struct crush_rule_step),
+ GFP_NOFS);
+ if (r == NULL)
+ goto badmem;
+ dout(" rule %d is at %p\n", i, r);
+ r->len = yes;
+ ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
+ ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
+ for (j = 0; j < r->len; j++) {
+ r->steps[j].op = ceph_decode_32(p);
+ r->steps[j].arg1 = ceph_decode_32(p);
+ r->steps[j].arg2 = ceph_decode_32(p);
+ }
+ }
+
+ /* ignore trailing name maps. */
+
+ dout("crush_decode success\n");
+ return c;
+
+badmem:
+ err = -ENOMEM;
+bad:
+ dout("crush_decode fail %d\n", err);
+ crush_destroy(c);
+ return ERR_PTR(err);
+}
+
+
+/*
+ * osd map
+ */
+void ceph_osdmap_destroy(struct ceph_osdmap *map)
+{
+ dout("osdmap_destroy %p\n", map);
+ if (map->crush)
+ crush_destroy(map->crush);
+ while (!RB_EMPTY_ROOT(&map->pg_temp))
+ rb_erase(rb_first(&map->pg_temp), &map->pg_temp);
+ kfree(map->osd_state);
+ kfree(map->osd_weight);
+ kfree(map->pg_pool);
+ kfree(map->osd_addr);
+ kfree(map);
+}
+
+/*
+ * adjust max osd value. reallocate arrays.
+ */
+static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
+{
+ u8 *state;
+ struct ceph_entity_addr *addr;
+ u32 *weight;
+
+ state = kcalloc(max, sizeof(*state), GFP_NOFS);
+ addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
+ weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
+ if (state == NULL || addr == NULL || weight == NULL) {
+ kfree(state);
+ kfree(addr);
+ kfree(weight);
+ return -ENOMEM;
+ }
+
+ /* copy old? */
+ if (map->osd_state) {
+ memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
+ memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
+ memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
+ kfree(map->osd_state);
+ kfree(map->osd_addr);
+ kfree(map->osd_weight);
+ }
+
+ map->osd_state = state;
+ map->osd_weight = weight;
+ map->osd_addr = addr;
+ map->max_osd = max;
+ return 0;
+}
+
+/*
+ * Insert a new pg_temp mapping
+ */
+static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
+{
+ u64 a = *(u64 *)&l;
+ u64 b = *(u64 *)&r;
+
+ if (a < b)
+ return -1;
+ if (a > b)
+ return 1;
+ return 0;
+}
+
+static int __insert_pg_mapping(struct ceph_pg_mapping *new,
+ struct rb_root *root)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct ceph_pg_mapping *pg = NULL;
+ int c;
+
+ while (*p) {
+ parent = *p;
+ pg = rb_entry(parent, struct ceph_pg_mapping, node);
+ c = pgid_cmp(new->pgid, pg->pgid);
+ if (c < 0)
+ p = &(*p)->rb_left;
+ else if (c > 0)
+ p = &(*p)->rb_right;
+ else
+ return -EEXIST;
+ }
+
+ rb_link_node(&new->node, parent, p);
+ rb_insert_color(&new->node, root);
+ return 0;
+}
+
+/*
+ * decode a full map.
+ */
+struct ceph_osdmap *osdmap_decode(void **p, void *end)
+{
+ struct ceph_osdmap *map;
+ u16 version;
+ u32 len, max, i;
+ int err = -EINVAL;
+ void *start = *p;
+
+ dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
+
+ map = kzalloc(sizeof(*map), GFP_NOFS);
+ if (map == NULL)
+ return ERR_PTR(-ENOMEM);
+ map->pg_temp = RB_ROOT;
+
+ ceph_decode_16_safe(p, end, version, bad);
+
+ ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
+ ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
+ map->epoch = ceph_decode_32(p);
+ ceph_decode_copy(p, &map->created, sizeof(map->created));
+ ceph_decode_copy(p, &map->modified, sizeof(map->modified));
+
+ map->num_pools = ceph_decode_32(p);
+ map->pg_pool = kcalloc(map->num_pools, sizeof(*map->pg_pool),
+ GFP_NOFS);
+ if (!map->pg_pool) {
+ err = -ENOMEM;
+ goto bad;
+ }
+ ceph_decode_32_safe(p, end, max, bad);
+ while (max--) {
+ ceph_decode_need(p, end, 4+sizeof(map->pg_pool->v), bad);
+ i = ceph_decode_32(p);
+ if (i >= map->num_pools)
+ goto bad;
+ ceph_decode_copy(p, &map->pg_pool[i].v,
+ sizeof(map->pg_pool->v));
+ calc_pg_masks(&map->pg_pool[i]);
+ p += le32_to_cpu(map->pg_pool[i].v.num_snaps) * sizeof(u64);
+ p += le32_to_cpu(map->pg_pool[i].v.num_removed_snap_intervals)
+ * sizeof(u64) * 2;
+ }
+
+ ceph_decode_32_safe(p, end, map->flags, bad);
+
+ max = ceph_decode_32(p);
+
+ /* (re)alloc osd arrays */
+ err = osdmap_set_max_osd(map, max);
+ if (err < 0)
+ goto bad;
+ dout("osdmap_decode max_osd = %d\n", map->max_osd);
+
+ /* osds */
+ err = -EINVAL;
+ ceph_decode_need(p, end, 3*sizeof(u32) +
+ map->max_osd*(1 + sizeof(*map->osd_weight) +
+ sizeof(*map->osd_addr)), bad);
+ *p += 4; /* skip length field (should match max) */
+ ceph_decode_copy(p, map->osd_state, map->max_osd);
+
+ *p += 4; /* skip length field (should match max) */
+ for (i = 0; i < map->max_osd; i++)
+ map->osd_weight[i] = ceph_decode_32(p);
+
+ *p += 4; /* skip length field (should match max) */
+ ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
+ for (i = 0; i < map->max_osd; i++)
+ ceph_decode_addr(&map->osd_addr[i]);
+
+ /* pg_temp */
+ ceph_decode_32_safe(p, end, len, bad);
+ for (i = 0; i < len; i++) {
+ int n, j;
+ struct ceph_pg pgid;
+ struct ceph_pg_mapping *pg;
+
+ ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
+ ceph_decode_copy(p, &pgid, sizeof(pgid));
+ n = ceph_decode_32(p);
+ ceph_decode_need(p, end, n * sizeof(u32), bad);
+ pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
+ if (!pg) {
+ err = -ENOMEM;
+ goto bad;
+ }
+ pg->pgid = pgid;
+ pg->len = n;
+ for (j = 0; j < n; j++)
+ pg->osds[j] = ceph_decode_32(p);
+
+ err = __insert_pg_mapping(pg, &map->pg_temp);
+ if (err)
+ goto bad;
+ dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len);
+ }
+
+ /* crush */
+ ceph_decode_32_safe(p, end, len, bad);
+ dout("osdmap_decode crush len %d from off 0x%x\n", len,
+ (int)(*p - start));
+ ceph_decode_need(p, end, len, bad);
+ map->crush = crush_decode(*p, end);
+ *p += len;
+ if (IS_ERR(map->crush)) {
+ err = PTR_ERR(map->crush);
+ map->crush = NULL;
+ goto bad;
+ }
+
+ /* ignore the rest of the map */
+ *p = end;
+
+ dout("osdmap_decode done %p %p\n", *p, end);
+ return map;
+
+bad:
+ dout("osdmap_decode fail\n");
+ ceph_osdmap_destroy(map);
+ return ERR_PTR(err);
+}
+
+/*
+ * decode and apply an incremental map update.
+ */
+struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+ struct ceph_osdmap *map,
+ struct ceph_messenger *msgr)
+{
+ struct ceph_osdmap *newmap = map;
+ struct crush_map *newcrush = NULL;
+ struct ceph_fsid fsid;
+ u32 epoch = 0;
+ struct ceph_timespec modified;
+ u32 len, pool;
+ __s32 new_flags, max;
+ void *start = *p;
+ int err = -EINVAL;
+ u16 version;
+ struct rb_node *rbp;
+
+ ceph_decode_16_safe(p, end, version, bad);
+
+ ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
+ bad);
+ ceph_decode_copy(p, &fsid, sizeof(fsid));
+ epoch = ceph_decode_32(p);
+ BUG_ON(epoch != map->epoch+1);
+ ceph_decode_copy(p, &modified, sizeof(modified));
+ new_flags = ceph_decode_32(p);
+
+ /* full map? */
+ ceph_decode_32_safe(p, end, len, bad);
+ if (len > 0) {
+ dout("apply_incremental full map len %d, %p to %p\n",
+ len, *p, end);
+ newmap = osdmap_decode(p, min(*p+len, end));
+ return newmap; /* error or not */
+ }
+
+ /* new crush? */
+ ceph_decode_32_safe(p, end, len, bad);
+ if (len > 0) {
+ dout("apply_incremental new crush map len %d, %p to %p\n",
+ len, *p, end);
+ newcrush = crush_decode(*p, min(*p+len, end));
+ if (IS_ERR(newcrush))
+ return ERR_PTR(PTR_ERR(newcrush));
+ }
+
+ /* new flags? */
+ if (new_flags >= 0)
+ map->flags = new_flags;
+
+ ceph_decode_need(p, end, 5*sizeof(u32), bad);
+
+ /* new max? */
+ max = ceph_decode_32(p);
+ if (max >= 0) {
+ err = osdmap_set_max_osd(map, max);
+ if (err < 0)
+ goto bad;
+ }
+
+ map->epoch++;
+ map->modified = map->modified;
+ if (newcrush) {
+ if (map->crush)
+ crush_destroy(map->crush);
+ map->crush = newcrush;
+ newcrush = NULL;
+ }
+
+ /* new_pool */
+ ceph_decode_32_safe(p, end, len, bad);
+ while (len--) {
+ ceph_decode_32_safe(p, end, pool, bad);
+ if (pool >= map->num_pools) {
+ void *pg_pool = kcalloc(pool + 1,
+ sizeof(*map->pg_pool),
+ GFP_NOFS);
+ if (!pg_pool) {
+ err = -ENOMEM;
+ goto bad;
+ }
+ memcpy(pg_pool, map->pg_pool,
+ map->num_pools * sizeof(*map->pg_pool));
+ kfree(map->pg_pool);
+ map->pg_pool = pg_pool;
+ map->num_pools = pool+1;
+ }
+ ceph_decode_copy(p, &map->pg_pool[pool].v,
+ sizeof(map->pg_pool->v));
+ calc_pg_masks(&map->pg_pool[pool]);
+ }
+
+ /* old_pool (ignore) */
+ ceph_decode_32_safe(p, end, len, bad);
+ *p += len * sizeof(u32);
+
+ /* new_up */
+ err = -EINVAL;
+ ceph_decode_32_safe(p, end, len, bad);
+ while (len--) {
+ u32 osd;
+ struct ceph_entity_addr addr;
+ ceph_decode_32_safe(p, end, osd, bad);
+ ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
+ ceph_decode_addr(&addr);
+ pr_info("osd%d up\n", osd);
+ BUG_ON(osd >= map->max_osd);
+ map->osd_state[osd] |= CEPH_OSD_UP;
+ map->osd_addr[osd] = addr;
+ }
+
+ /* new_down */
+ ceph_decode_32_safe(p, end, len, bad);
+ while (len--) {
+ u32 osd;
+ ceph_decode_32_safe(p, end, osd, bad);
+ (*p)++; /* clean flag */
+ pr_info("osd%d down\n", osd);
+ if (osd < map->max_osd)
+ map->osd_state[osd] &= ~CEPH_OSD_UP;
+ }
+
+ /* new_weight */
+ ceph_decode_32_safe(p, end, len, bad);
+ while (len--) {
+ u32 osd, off;
+ ceph_decode_need(p, end, sizeof(u32)*2, bad);
+ osd = ceph_decode_32(p);
+ off = ceph_decode_32(p);
+ pr_info("osd%d weight 0x%x %s\n", osd, off,
+ off == CEPH_OSD_IN ? "(in)" :
+ (off == CEPH_OSD_OUT ? "(out)" : ""));
+ if (osd < map->max_osd)
+ map->osd_weight[osd] = off;
+ }
+
+ /* new_pg_temp */
+ rbp = rb_first(&map->pg_temp);
+ ceph_decode_32_safe(p, end, len, bad);
+ while (len--) {
+ struct ceph_pg_mapping *pg;
+ int j;
+ struct ceph_pg pgid;
+ u32 pglen;
+ ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
+ ceph_decode_copy(p, &pgid, sizeof(pgid));
+ pglen = ceph_decode_32(p);
+
+ /* remove any? */
+ while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
+ node)->pgid, pgid) <= 0) {
+ struct rb_node *cur = rbp;
+ rbp = rb_next(rbp);
+ dout(" removed pg_temp %llx\n",
+ *(u64 *)&rb_entry(cur, struct ceph_pg_mapping,
+ node)->pgid);
+ rb_erase(cur, &map->pg_temp);
+ }
+
+ if (pglen) {
+ /* insert */
+ ceph_decode_need(p, end, pglen*sizeof(u32), bad);
+ pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
+ if (!pg) {
+ err = -ENOMEM;
+ goto bad;
+ }
+ pg->pgid = pgid;
+ pg->len = pglen;
+ for (j = 0; j < len; j++)
+ pg->osds[j] = ceph_decode_32(p);
+ err = __insert_pg_mapping(pg, &map->pg_temp);
+ if (err)
+ goto bad;
+ dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
+ pglen);
+ }
+ }
+ while (rbp) {
+ struct rb_node *cur = rbp;
+ rbp = rb_next(rbp);
+ dout(" removed pg_temp %llx\n",
+ *(u64 *)&rb_entry(cur, struct ceph_pg_mapping,
+ node)->pgid);
+ rb_erase(cur, &map->pg_temp);
+ }
+
+ /* ignore the rest */
+ *p = end;
+ return map;
+
+bad:
+ pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
+ epoch, (int)(*p - start), *p, start, end);
+ if (newcrush)
+ crush_destroy(newcrush);
+ return ERR_PTR(err);
+}
+
+
+
+
+/*
+ * calculate file layout from given offset, length.
+ * fill in correct oid, logical length, and object extent
+ * offset, length.
+ *
+ * for now, we write only a single su, until we can
+ * pass a stride back to the caller.
+ */
+void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
+ u64 off, u64 *plen,
+ u64 *ono,
+ u64 *oxoff, u64 *oxlen)
+{
+ u32 osize = le32_to_cpu(layout->fl_object_size);
+ u32 su = le32_to_cpu(layout->fl_stripe_unit);
+ u32 sc = le32_to_cpu(layout->fl_stripe_count);
+ u32 bl, stripeno, stripepos, objsetno;
+ u32 su_per_object;
+ u64 t, su_offset;
+
+ dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen,
+ osize, su);
+ su_per_object = osize / su;
+ dout("osize %u / su %u = su_per_object %u\n", osize, su,
+ su_per_object);
+
+ BUG_ON((su & ~PAGE_MASK) != 0);
+ /* bl = *off / su; */
+ t = off;
+ do_div(t, su);
+ bl = t;
+ dout("off %llu / su %u = bl %u\n", off, su, bl);
+
+ stripeno = bl / sc;
+ stripepos = bl % sc;
+ objsetno = stripeno / su_per_object;
+
+ *ono = objsetno * sc + stripepos;
+ dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono);
+
+ /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
+ t = off;
+ su_offset = do_div(t, su);
+ *oxoff = su_offset + (stripeno % su_per_object) * su;
+
+ /*
+ * Calculate the length of the extent being written to the selected
+ * object. This is the minimum of the full length requested (plen) or
+ * the remainder of the current stripe being written to.
+ */
+ *oxlen = min_t(u64, *plen, su - su_offset);
+ *plen = *oxlen;
+
+ dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
+}
+
+/*
+ * calculate an object layout (i.e. pgid) from an oid,
+ * file_layout, and osdmap
+ */
+int ceph_calc_object_layout(struct ceph_object_layout *ol,
+ const char *oid,
+ struct ceph_file_layout *fl,
+ struct ceph_osdmap *osdmap)
+{
+ unsigned num, num_mask;
+ struct ceph_pg pgid;
+ s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
+ int poolid = le32_to_cpu(fl->fl_pg_pool);
+ struct ceph_pg_pool_info *pool;
+ unsigned ps;
+
+ if (poolid >= osdmap->num_pools)
+ return -EIO;
+
+ pool = &osdmap->pg_pool[poolid];
+ ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
+ if (preferred >= 0) {
+ ps += preferred;
+ num = le32_to_cpu(pool->v.lpg_num);
+ num_mask = pool->lpg_num_mask;
+ } else {
+ num = le32_to_cpu(pool->v.pg_num);
+ num_mask = pool->pg_num_mask;
+ }
+
+ pgid.ps = cpu_to_le16(ps);
+ pgid.preferred = cpu_to_le16(preferred);
+ pgid.pool = fl->fl_pg_pool;
+ if (preferred >= 0)
+ dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
+ (int)preferred);
+ else
+ dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
+
+ ol->ol_pgid = pgid;
+ ol->ol_stripe_unit = fl->fl_object_stripe_unit;
+ return 0;
+}
+
+/*
+ * Calculate raw osd vector for the given pgid. Return pointer to osd
+ * array, or NULL on failure.
+ */
+static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
+ int *osds, int *num)
+{
+ struct rb_node *n = osdmap->pg_temp.rb_node;
+ struct ceph_pg_mapping *pg;
+ struct ceph_pg_pool_info *pool;
+ int ruleno;
+ unsigned poolid, ps, pps;
+ int preferred;
+ int c;
+
+ /* pg_temp? */
+ while (n) {
+ pg = rb_entry(n, struct ceph_pg_mapping, node);
+ c = pgid_cmp(pgid, pg->pgid);
+ if (c < 0)
+ n = n->rb_left;
+ else if (c > 0)
+ n = n->rb_right;
+ else {
+ *num = pg->len;
+ return pg->osds;
+ }
+ }
+
+ /* crush */
+ poolid = le32_to_cpu(pgid.pool);
+ ps = le16_to_cpu(pgid.ps);
+ preferred = (s16)le16_to_cpu(pgid.preferred);
+
+ if (poolid >= osdmap->num_pools)
+ return NULL;
+ pool = &osdmap->pg_pool[poolid];
+ ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
+ pool->v.type, pool->v.size);
+ if (ruleno < 0) {
+ pr_err("no crush rule pool %d type %d size %d\n",
+ poolid, pool->v.type, pool->v.size);
+ return NULL;
+ }
+
+ if (preferred >= 0)
+ pps = ceph_stable_mod(ps,
+ le32_to_cpu(pool->v.lpgp_num),
+ pool->lpgp_num_mask);
+ else
+ pps = ceph_stable_mod(ps,
+ le32_to_cpu(pool->v.pgp_num),
+ pool->pgp_num_mask);
+ pps += poolid;
+ *num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
+ min_t(int, pool->v.size, *num),
+ preferred, osdmap->osd_weight);
+ return osds;
+}
+
+/*
+ * Return primary osd for given pgid, or -1 if none.
+ */
+int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
+{
+ int rawosds[10], *osds;
+ int i, num = ARRAY_SIZE(rawosds);
+
+ osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
+ if (!osds)
+ return -1;
+
+ /* primary is first up osd */
+ for (i = 0; i < num; i++)
+ if (ceph_osd_is_up(osdmap, osds[i])) {
+ return osds[i];
+ break;
+ }
+ return -1;
+}
diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h
new file mode 100644
index 000000000000..c4af8418aa00
--- /dev/null
+++ b/fs/ceph/osdmap.h
@@ -0,0 +1,124 @@
+#ifndef _FS_CEPH_OSDMAP_H
+#define _FS_CEPH_OSDMAP_H
+
+#include <linux/rbtree.h>
+#include "types.h"
+#include "ceph_fs.h"
+#include "crush/crush.h"
+
+/*
+ * The osd map describes the current membership of the osd cluster and
+ * specifies the mapping of objects to placement groups and placement
+ * groups to (sets of) osds. That is, it completely specifies the
+ * (desired) distribution of all data objects in the system at some
+ * point in time.
+ *
+ * Each map version is identified by an epoch, which increases monotonically.
+ *
+ * The map can be updated either via an incremental map (diff) describing
+ * the change between two successive epochs, or as a fully encoded map.
+ */
+struct ceph_pg_pool_info {
+ struct ceph_pg_pool v;
+ int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
+};
+
+struct ceph_pg_mapping {
+ struct rb_node node;
+ struct ceph_pg pgid;
+ int len;
+ int osds[];
+};
+
+struct ceph_osdmap {
+ struct ceph_fsid fsid;
+ u32 epoch;
+ u32 mkfs_epoch;
+ struct ceph_timespec created, modified;
+
+ u32 flags; /* CEPH_OSDMAP_* */
+
+ u32 max_osd; /* size of osd_state, _offload, _addr arrays */
+ u8 *osd_state; /* CEPH_OSD_* */
+ u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */
+ struct ceph_entity_addr *osd_addr;
+
+ struct rb_root pg_temp;
+
+ u32 num_pools;
+ struct ceph_pg_pool_info *pg_pool;
+
+ /* the CRUSH map specifies the mapping of placement groups to
+ * the list of osds that store+replicate them. */
+ struct crush_map *crush;
+};
+
+/*
+ * file layout helpers
+ */
+#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit))
+#define ceph_file_layout_stripe_count(l) \
+ ((__s32)le32_to_cpu((l).fl_stripe_count))
+#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size))
+#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
+#define ceph_file_layout_object_su(l) \
+ ((__s32)le32_to_cpu((l).fl_object_stripe_unit))
+#define ceph_file_layout_pg_preferred(l) \
+ ((__s32)le32_to_cpu((l).fl_pg_preferred))
+#define ceph_file_layout_pg_pool(l) \
+ ((__s32)le32_to_cpu((l).fl_pg_pool))
+
+static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l)
+{
+ return le32_to_cpu(l->fl_stripe_unit) *
+ le32_to_cpu(l->fl_stripe_count);
+}
+
+/* "period" == bytes before i start on a new set of objects */
+static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l)
+{
+ return le32_to_cpu(l->fl_object_size) *
+ le32_to_cpu(l->fl_stripe_count);
+}
+
+
+static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd)
+{
+ return (osd < map->max_osd) && (map->osd_state[osd] & CEPH_OSD_UP);
+}
+
+static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
+{
+ return map && (map->flags & flag);
+}
+
+extern char *ceph_osdmap_state_str(char *str, int len, int state);
+
+static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map,
+ int osd)
+{
+ if (osd >= map->max_osd)
+ return NULL;
+ return &map->osd_addr[osd];
+}
+
+extern struct ceph_osdmap *osdmap_decode(void **p, void *end);
+extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
+ struct ceph_osdmap *map,
+ struct ceph_messenger *msgr);
+extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
+
+/* calculate mapping of a file extent to an object */
+extern void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
+ u64 off, u64 *plen,
+ u64 *bno, u64 *oxoff, u64 *oxlen);
+
+/* calculate mapping of object to a placement group */
+extern int ceph_calc_object_layout(struct ceph_object_layout *ol,
+ const char *oid,
+ struct ceph_file_layout *fl,
+ struct ceph_osdmap *osdmap);
+extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
+ struct ceph_pg pgid);
+
+#endif
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h
new file mode 100644
index 000000000000..12bfb2f7c275
--- /dev/null
+++ b/fs/ceph/rados.h
@@ -0,0 +1,370 @@
+#ifndef __RADOS_H
+#define __RADOS_H
+
+/*
+ * Data types for the Ceph distributed object storage layer RADOS
+ * (Reliable Autonomic Distributed Object Store).
+ */
+
+#include "msgr.h"
+
+/*
+ * fs id
+ */
+struct ceph_fsid {
+ unsigned char fsid[16];
+};
+
+static inline int ceph_fsid_compare(const struct ceph_fsid *a,
+ const struct ceph_fsid *b)
+{
+ return memcmp(a, b, sizeof(*a));
+}
+
+/*
+ * ino, object, etc.
+ */
+typedef __le64 ceph_snapid_t;
+#define CEPH_SNAPDIR ((__u64)(-1)) /* reserved for hidden .snap dir */
+#define CEPH_NOSNAP ((__u64)(-2)) /* "head", "live" revision */
+#define CEPH_MAXSNAP ((__u64)(-3)) /* largest valid snapid */
+
+struct ceph_timespec {
+ __le32 tv_sec;
+ __le32 tv_nsec;
+} __attribute__ ((packed));
+
+
+/*
+ * object layout - how objects are mapped into PGs
+ */
+#define CEPH_OBJECT_LAYOUT_HASH 1
+#define CEPH_OBJECT_LAYOUT_LINEAR 2
+#define CEPH_OBJECT_LAYOUT_HASHINO 3
+
+/*
+ * pg layout -- how PGs are mapped onto (sets of) OSDs
+ */
+#define CEPH_PG_LAYOUT_CRUSH 0
+#define CEPH_PG_LAYOUT_HASH 1
+#define CEPH_PG_LAYOUT_LINEAR 2
+#define CEPH_PG_LAYOUT_HYBRID 3
+
+
+/*
+ * placement group.
+ * we encode this into one __le64.
+ */
+struct ceph_pg {
+ __le16 preferred; /* preferred primary osd */
+ __le16 ps; /* placement seed */
+ __le32 pool; /* object pool */
+} __attribute__ ((packed));
+
+/*
+ * pg_pool is a set of pgs storing a pool of objects
+ *
+ * pg_num -- base number of pseudorandomly placed pgs
+ *
+ * pgp_num -- effective number when calculating pg placement. this
+ * is used for pg_num increases. new pgs result in data being "split"
+ * into new pgs. for this to proceed smoothly, new pgs are intiially
+ * colocated with their parents; that is, pgp_num doesn't increase
+ * until the new pgs have successfully split. only _then_ are the new
+ * pgs placed independently.
+ *
+ * lpg_num -- localized pg count (per device). replicas are randomly
+ * selected.
+ *
+ * lpgp_num -- as above.
+ */
+#define CEPH_PG_TYPE_REP 1
+#define CEPH_PG_TYPE_RAID4 2
+struct ceph_pg_pool {
+ __u8 type; /* CEPH_PG_TYPE_* */
+ __u8 size; /* number of osds in each pg */
+ __u8 crush_ruleset; /* crush placement rule */
+ __u8 object_hash; /* hash mapping object name to ps */
+ __le32 pg_num, pgp_num; /* number of pg's */
+ __le32 lpg_num, lpgp_num; /* number of localized pg's */
+ __le32 last_change; /* most recent epoch changed */
+ __le64 snap_seq; /* seq for per-pool snapshot */
+ __le32 snap_epoch; /* epoch of last snap */
+ __le32 num_snaps;
+ __le32 num_removed_snap_intervals;
+} __attribute__ ((packed));
+
+/*
+ * stable_mod func is used to control number of placement groups.
+ * similar to straight-up modulo, but produces a stable mapping as b
+ * increases over time. b is the number of bins, and bmask is the
+ * containing power of 2 minus 1.
+ *
+ * b <= bmask and bmask=(2**n)-1
+ * e.g., b=12 -> bmask=15, b=123 -> bmask=127
+ */
+static inline int ceph_stable_mod(int x, int b, int bmask)
+{
+ if ((x & bmask) < b)
+ return x & bmask;
+ else
+ return x & (bmask >> 1);
+}
+
+/*
+ * object layout - how a given object should be stored.
+ */
+struct ceph_object_layout {
+ struct ceph_pg ol_pgid; /* raw pg, with _full_ ps precision. */
+ __le32 ol_stripe_unit; /* for per-object parity, if any */
+} __attribute__ ((packed));
+
+/*
+ * compound epoch+version, used by storage layer to serialize mutations
+ */
+struct ceph_eversion {
+ __le32 epoch;
+ __le64 version;
+} __attribute__ ((packed));
+
+/*
+ * osd map bits
+ */
+
+/* status bits */
+#define CEPH_OSD_EXISTS 1
+#define CEPH_OSD_UP 2
+
+/* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */
+#define CEPH_OSD_IN 0x10000
+#define CEPH_OSD_OUT 0
+
+
+/*
+ * osd map flag bits
+ */
+#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
+#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
+#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
+#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
+#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
+
+/*
+ * osd ops
+ */
+#define CEPH_OSD_OP_MODE 0xf000
+#define CEPH_OSD_OP_MODE_RD 0x1000
+#define CEPH_OSD_OP_MODE_WR 0x2000
+#define CEPH_OSD_OP_MODE_RMW 0x3000
+#define CEPH_OSD_OP_MODE_SUB 0x4000
+
+#define CEPH_OSD_OP_TYPE 0x0f00
+#define CEPH_OSD_OP_TYPE_LOCK 0x0100
+#define CEPH_OSD_OP_TYPE_DATA 0x0200
+#define CEPH_OSD_OP_TYPE_ATTR 0x0300
+#define CEPH_OSD_OP_TYPE_EXEC 0x0400
+#define CEPH_OSD_OP_TYPE_PG 0x0500
+
+enum {
+ /** data **/
+ /* read */
+ CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1,
+ CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2,
+
+ /* fancy read */
+ CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4,
+
+ /* write */
+ CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1,
+ CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2,
+ CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3,
+ CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4,
+ CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5,
+
+ /* fancy write */
+ CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6,
+ CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7,
+ CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8,
+ CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9,
+
+ CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10,
+ CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11,
+ CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12,
+
+ CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13,
+
+ /** attrs **/
+ /* read */
+ CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
+ CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2,
+
+ /* write */
+ CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1,
+ CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2,
+ CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3,
+ CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4,
+
+ /** subop **/
+ CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1,
+ CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2,
+ CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3,
+ CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4,
+ CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5,
+
+ /** lock **/
+ CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
+ CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2,
+ CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3,
+ CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4,
+ CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5,
+ CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6,
+
+ /** exec **/
+ CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1,
+
+ /** pg **/
+ CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1,
+};
+
+static inline int ceph_osd_op_type_lock(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_LOCK;
+}
+static inline int ceph_osd_op_type_data(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_DATA;
+}
+static inline int ceph_osd_op_type_attr(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_ATTR;
+}
+static inline int ceph_osd_op_type_exec(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_EXEC;
+}
+static inline int ceph_osd_op_type_pg(int op)
+{
+ return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG;
+}
+
+static inline int ceph_osd_op_mode_subop(int op)
+{
+ return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_SUB;
+}
+static inline int ceph_osd_op_mode_read(int op)
+{
+ return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_RD;
+}
+static inline int ceph_osd_op_mode_modify(int op)
+{
+ return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR;
+}
+
+#define CEPH_OSD_TMAP_HDR 'h'
+#define CEPH_OSD_TMAP_SET 's'
+#define CEPH_OSD_TMAP_RM 'r'
+
+extern const char *ceph_osd_op_name(int op);
+
+
+/*
+ * osd op flags
+ *
+ * An op may be READ, WRITE, or READ|WRITE.
+ */
+enum {
+ CEPH_OSD_FLAG_ACK = 1, /* want (or is) "ack" ack */
+ CEPH_OSD_FLAG_ONNVRAM = 2, /* want (or is) "onnvram" ack */
+ CEPH_OSD_FLAG_ONDISK = 4, /* want (or is) "ondisk" ack */
+ CEPH_OSD_FLAG_RETRY = 8, /* resend attempt */
+ CEPH_OSD_FLAG_READ = 16, /* op may read */
+ CEPH_OSD_FLAG_WRITE = 32, /* op may write */
+ CEPH_OSD_FLAG_ORDERSNAP = 64, /* EOLDSNAP if snapc is out of order */
+ CEPH_OSD_FLAG_PEERSTAT = 128, /* msg includes osd_peer_stat */
+ CEPH_OSD_FLAG_BALANCE_READS = 256,
+ CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */
+ CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */
+ CEPH_OSD_FLAG_EXEC = 2048, /* op may exec */
+};
+
+enum {
+ CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
+};
+
+#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
+#define EBLACKLISTED ESHUTDOWN /* blacklisted */
+
+/*
+ * an individual object operation. each may be accompanied by some data
+ * payload
+ */
+struct ceph_osd_op {
+ __le16 op; /* CEPH_OSD_OP_* */
+ __le32 flags; /* CEPH_OSD_FLAG_* */
+ union {
+ struct {
+ __le64 offset, length;
+ } __attribute__ ((packed)) extent;
+ struct {
+ __le32 name_len;
+ __le32 value_len;
+ } __attribute__ ((packed)) xattr;
+ struct {
+ __le64 truncate_size;
+ __le32 truncate_seq;
+ } __attribute__ ((packed)) trunc;
+ struct {
+ __u8 class_len;
+ __u8 method_len;
+ __u8 argc;
+ __le32 indata_len;
+ } __attribute__ ((packed)) cls;
+ struct {
+ __le64 cookie, count;
+ } __attribute__ ((packed)) pgls;
+ };
+ __le32 payload_len;
+} __attribute__ ((packed));
+
+/*
+ * osd request message header. each request may include multiple
+ * ceph_osd_op object operations.
+ */
+struct ceph_osd_request_head {
+ __le64 tid; /* transaction id */
+ __le32 client_inc; /* client incarnation */
+ struct ceph_object_layout layout; /* pgid */
+ __le32 osdmap_epoch; /* client's osdmap epoch */
+
+ __le32 flags;
+
+ struct ceph_timespec mtime; /* for mutations only */
+ struct ceph_eversion reassert_version; /* if we are replaying op */
+
+ __le32 object_len; /* length of object name */
+
+ __le64 snapid; /* snapid to read */
+ __le64 snap_seq; /* writer's snap context */
+ __le32 num_snaps;
+
+ __le16 num_ops;
+ struct ceph_osd_op ops[]; /* followed by ops[], obj, ticket, snaps */
+} __attribute__ ((packed));
+
+struct ceph_osd_reply_head {
+ __le64 tid; /* transaction id */
+ __le32 client_inc; /* client incarnation */
+ __le32 flags;
+ struct ceph_object_layout layout;
+ __le32 osdmap_epoch;
+ struct ceph_eversion reassert_version; /* for replaying uncommitted */
+
+ __le32 result; /* result code */
+
+ __le32 object_len; /* length of object name */
+ __le32 num_ops;
+ struct ceph_osd_op ops[0]; /* ops[], object */
+} __attribute__ ((packed));
+
+
+#endif
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
new file mode 100644
index 000000000000..52f46a1208f5
--- /dev/null
+++ b/fs/ceph/snap.c
@@ -0,0 +1,887 @@
+#include "ceph_debug.h"
+
+#include <linux/radix-tree.h>
+#include <linux/sort.h>
+
+#include "super.h"
+#include "decode.h"
+
+/*
+ * Snapshots in ceph are driven in large part by cooperation from the
+ * client. In contrast to local file systems or file servers that
+ * implement snapshots at a single point in the system, ceph's
+ * distributed access to storage requires clients to help decide
+ * whether a write logically occurs before or after a recently created
+ * snapshot.
+ *
+ * This provides a perfect instantanous client-wide snapshot. Between
+ * clients, however, snapshots may appear to be applied at slightly
+ * different points in time, depending on delays in delivering the
+ * snapshot notification.
+ *
+ * Snapshots are _not_ file system-wide. Instead, each snapshot
+ * applies to the subdirectory nested beneath some directory. This
+ * effectively divides the hierarchy into multiple "realms," where all
+ * of the files contained by each realm share the same set of
+ * snapshots. An individual realm's snap set contains snapshots
+ * explicitly created on that realm, as well as any snaps in its
+ * parent's snap set _after_ the point at which the parent became it's
+ * parent (due to, say, a rename). Similarly, snaps from prior parents
+ * during the time intervals during which they were the parent are included.
+ *
+ * The client is spared most of this detail, fortunately... it must only
+ * maintains a hierarchy of realms reflecting the current parent/child
+ * realm relationship, and for each realm has an explicit list of snaps
+ * inherited from prior parents.
+ *
+ * A snap_realm struct is maintained for realms containing every inode
+ * with an open cap in the system. (The needed snap realm information is
+ * provided by the MDS whenever a cap is issued, i.e., on open.) A 'seq'
+ * version number is used to ensure that as realm parameters change (new
+ * snapshot, new parent, etc.) the client's realm hierarchy is updated.
+ *
+ * The realm hierarchy drives the generation of a 'snap context' for each
+ * realm, which simply lists the resulting set of snaps for the realm. This
+ * is attached to any writes sent to OSDs.
+ */
+/*
+ * Unfortunately error handling is a bit mixed here. If we get a snap
+ * update, but don't have enough memory to update our realm hierarchy,
+ * it's not clear what we can do about it (besides complaining to the
+ * console).
+ */
+
+
+/*
+ * increase ref count for the realm
+ *
+ * caller must hold snap_rwsem for write.
+ */
+void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm)
+{
+ dout("get_realm %p %d -> %d\n", realm,
+ atomic_read(&realm->nref), atomic_read(&realm->nref)+1);
+ /*
+ * since we _only_ increment realm refs or empty the empty
+ * list with snap_rwsem held, adjusting the empty list here is
+ * safe. we do need to protect against concurrent empty list
+ * additions, however.
+ */
+ if (atomic_read(&realm->nref) == 0) {
+ spin_lock(&mdsc->snap_empty_lock);
+ list_del_init(&realm->empty_item);
+ spin_unlock(&mdsc->snap_empty_lock);
+ }
+
+ atomic_inc(&realm->nref);
+}
+
+/*
+ * create and get the realm rooted at @ino and bump its ref count.
+ *
+ * caller must hold snap_rwsem for write.
+ */
+static struct ceph_snap_realm *ceph_create_snap_realm(
+ struct ceph_mds_client *mdsc,
+ u64 ino)
+{
+ struct ceph_snap_realm *realm;
+
+ realm = kzalloc(sizeof(*realm), GFP_NOFS);
+ if (!realm)
+ return ERR_PTR(-ENOMEM);
+
+ radix_tree_insert(&mdsc->snap_realms, ino, realm);
+
+ atomic_set(&realm->nref, 0); /* tree does not take a ref */
+ realm->ino = ino;
+ INIT_LIST_HEAD(&realm->children);
+ INIT_LIST_HEAD(&realm->child_item);
+ INIT_LIST_HEAD(&realm->empty_item);
+ INIT_LIST_HEAD(&realm->inodes_with_caps);
+ spin_lock_init(&realm->inodes_with_caps_lock);
+ dout("create_snap_realm %llx %p\n", realm->ino, realm);
+ return realm;
+}
+
+/*
+ * find and get (if found) the realm rooted at @ino and bump its ref count.
+ *
+ * caller must hold snap_rwsem for write.
+ */
+struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
+ u64 ino)
+{
+ struct ceph_snap_realm *realm;
+
+ realm = radix_tree_lookup(&mdsc->snap_realms, ino);
+ if (realm)
+ dout("lookup_snap_realm %llx %p\n", realm->ino, realm);
+ return realm;
+}
+
+static void __put_snap_realm(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm);
+
+/*
+ * called with snap_rwsem (write)
+ */
+static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm)
+{
+ dout("__destroy_snap_realm %p %llx\n", realm, realm->ino);
+
+ radix_tree_delete(&mdsc->snap_realms, realm->ino);
+
+ if (realm->parent) {
+ list_del_init(&realm->child_item);
+ __put_snap_realm(mdsc, realm->parent);
+ }
+
+ kfree(realm->prior_parent_snaps);
+ kfree(realm->snaps);
+ ceph_put_snap_context(realm->cached_context);
+ kfree(realm);
+}
+
+/*
+ * caller holds snap_rwsem (write)
+ */
+static void __put_snap_realm(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm)
+{
+ dout("__put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
+ atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
+ if (atomic_dec_and_test(&realm->nref))
+ __destroy_snap_realm(mdsc, realm);
+}
+
+/*
+ * caller needn't hold any locks
+ */
+void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm)
+{
+ dout("put_snap_realm %llx %p %d -> %d\n", realm->ino, realm,
+ atomic_read(&realm->nref), atomic_read(&realm->nref)-1);
+ if (!atomic_dec_and_test(&realm->nref))
+ return;
+
+ if (down_write_trylock(&mdsc->snap_rwsem)) {
+ __destroy_snap_realm(mdsc, realm);
+ up_write(&mdsc->snap_rwsem);
+ } else {
+ spin_lock(&mdsc->snap_empty_lock);
+ list_add(&mdsc->snap_empty, &realm->empty_item);
+ spin_unlock(&mdsc->snap_empty_lock);
+ }
+}
+
+/*
+ * Clean up any realms whose ref counts have dropped to zero. Note
+ * that this does not include realms who were created but not yet
+ * used.
+ *
+ * Called under snap_rwsem (write)
+ */
+static void __cleanup_empty_realms(struct ceph_mds_client *mdsc)
+{
+ struct ceph_snap_realm *realm;
+
+ spin_lock(&mdsc->snap_empty_lock);
+ while (!list_empty(&mdsc->snap_empty)) {
+ realm = list_first_entry(&mdsc->snap_empty,
+ struct ceph_snap_realm, empty_item);
+ list_del(&realm->empty_item);
+ spin_unlock(&mdsc->snap_empty_lock);
+ __destroy_snap_realm(mdsc, realm);
+ spin_lock(&mdsc->snap_empty_lock);
+ }
+ spin_unlock(&mdsc->snap_empty_lock);
+}
+
+void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc)
+{
+ down_write(&mdsc->snap_rwsem);
+ __cleanup_empty_realms(mdsc);
+ up_write(&mdsc->snap_rwsem);
+}
+
+/*
+ * adjust the parent realm of a given @realm. adjust child list, and parent
+ * pointers, and ref counts appropriately.
+ *
+ * return true if parent was changed, 0 if unchanged, <0 on error.
+ *
+ * caller must hold snap_rwsem for write.
+ */
+static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm,
+ u64 parentino)
+{
+ struct ceph_snap_realm *parent;
+
+ if (realm->parent_ino == parentino)
+ return 0;
+
+ parent = ceph_lookup_snap_realm(mdsc, parentino);
+ if (!parent) {
+ parent = ceph_create_snap_realm(mdsc, parentino);
+ if (IS_ERR(parent))
+ return PTR_ERR(parent);
+ }
+ dout("adjust_snap_realm_parent %llx %p: %llx %p -> %llx %p\n",
+ realm->ino, realm, realm->parent_ino, realm->parent,
+ parentino, parent);
+ if (realm->parent) {
+ list_del_init(&realm->child_item);
+ ceph_put_snap_realm(mdsc, realm->parent);
+ }
+ realm->parent_ino = parentino;
+ realm->parent = parent;
+ ceph_get_snap_realm(mdsc, parent);
+ list_add(&realm->child_item, &parent->children);
+ return 1;
+}
+
+
+static int cmpu64_rev(const void *a, const void *b)
+{
+ if (*(u64 *)a < *(u64 *)b)
+ return 1;
+ if (*(u64 *)a > *(u64 *)b)
+ return -1;
+ return 0;
+}
+
+/*
+ * build the snap context for a given realm.
+ */
+static int build_snap_context(struct ceph_snap_realm *realm)
+{
+ struct ceph_snap_realm *parent = realm->parent;
+ struct ceph_snap_context *snapc;
+ int err = 0;
+ int i;
+ int num = realm->num_prior_parent_snaps + realm->num_snaps;
+
+ /*
+ * build parent context, if it hasn't been built.
+ * conservatively estimate that all parent snaps might be
+ * included by us.
+ */
+ if (parent) {
+ if (!parent->cached_context) {
+ err = build_snap_context(parent);
+ if (err)
+ goto fail;
+ }
+ num += parent->cached_context->num_snaps;
+ }
+
+ /* do i actually need to update? not if my context seq
+ matches realm seq, and my parents' does to. (this works
+ because we rebuild_snap_realms() works _downward_ in
+ hierarchy after each update.) */
+ if (realm->cached_context &&
+ realm->cached_context->seq <= realm->seq &&
+ (!parent ||
+ realm->cached_context->seq <= parent->cached_context->seq)) {
+ dout("build_snap_context %llx %p: %p seq %lld (%d snaps)"
+ " (unchanged)\n",
+ realm->ino, realm, realm->cached_context,
+ realm->cached_context->seq,
+ realm->cached_context->num_snaps);
+ return 0;
+ }
+
+ /* alloc new snap context */
+ err = -ENOMEM;
+ if (num > ULONG_MAX / sizeof(u64) - sizeof(*snapc))
+ goto fail;
+ snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
+ if (!snapc)
+ goto fail;
+ atomic_set(&snapc->nref, 1);
+
+ /* build (reverse sorted) snap vector */
+ num = 0;
+ snapc->seq = realm->seq;
+ if (parent) {
+ /* include any of parent's snaps occuring _after_ my
+ parent became my parent */
+ for (i = 0; i < parent->cached_context->num_snaps; i++)
+ if (parent->cached_context->snaps[i] >=
+ realm->parent_since)
+ snapc->snaps[num++] =
+ parent->cached_context->snaps[i];
+ if (parent->cached_context->seq > snapc->seq)
+ snapc->seq = parent->cached_context->seq;
+ }
+ memcpy(snapc->snaps + num, realm->snaps,
+ sizeof(u64)*realm->num_snaps);
+ num += realm->num_snaps;
+ memcpy(snapc->snaps + num, realm->prior_parent_snaps,
+ sizeof(u64)*realm->num_prior_parent_snaps);
+ num += realm->num_prior_parent_snaps;
+
+ sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
+ snapc->num_snaps = num;
+ dout("build_snap_context %llx %p: %p seq %lld (%d snaps)\n",
+ realm->ino, realm, snapc, snapc->seq, snapc->num_snaps);
+
+ if (realm->cached_context)
+ ceph_put_snap_context(realm->cached_context);
+ realm->cached_context = snapc;
+ return 0;
+
+fail:
+ /*
+ * if we fail, clear old (incorrect) cached_context... hopefully
+ * we'll have better luck building it later
+ */
+ if (realm->cached_context) {
+ ceph_put_snap_context(realm->cached_context);
+ realm->cached_context = NULL;
+ }
+ pr_err("build_snap_context %llx %p fail %d\n", realm->ino,
+ realm, err);
+ return err;
+}
+
+/*
+ * rebuild snap context for the given realm and all of its children.
+ */
+static void rebuild_snap_realms(struct ceph_snap_realm *realm)
+{
+ struct ceph_snap_realm *child;
+
+ dout("rebuild_snap_realms %llx %p\n", realm->ino, realm);
+ build_snap_context(realm);
+
+ list_for_each_entry(child, &realm->children, child_item)
+ rebuild_snap_realms(child);
+}
+
+
+/*
+ * helper to allocate and decode an array of snapids. free prior
+ * instance, if any.
+ */
+static int dup_array(u64 **dst, __le64 *src, int num)
+{
+ int i;
+
+ kfree(*dst);
+ if (num) {
+ *dst = kcalloc(num, sizeof(u64), GFP_NOFS);
+ if (!*dst)
+ return -ENOMEM;
+ for (i = 0; i < num; i++)
+ (*dst)[i] = get_unaligned_le64(src + i);
+ } else {
+ *dst = NULL;
+ }
+ return 0;
+}
+
+
+/*
+ * When a snapshot is applied, the size/mtime inode metadata is queued
+ * in a ceph_cap_snap (one for each snapshot) until writeback
+ * completes and the metadata can be flushed back to the MDS.
+ *
+ * However, if a (sync) write is currently in-progress when we apply
+ * the snapshot, we have to wait until the write succeeds or fails
+ * (and a final size/mtime is known). In this case the
+ * cap_snap->writing = 1, and is said to be "pending." When the write
+ * finishes, we __ceph_finish_cap_snap().
+ *
+ * Caller must hold snap_rwsem for read (i.e., the realm topology won't
+ * change).
+ */
+void ceph_queue_cap_snap(struct ceph_inode_info *ci,
+ struct ceph_snap_context *snapc)
+{
+ struct inode *inode = &ci->vfs_inode;
+ struct ceph_cap_snap *capsnap;
+ int used;
+
+ capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
+ if (!capsnap) {
+ pr_err("ENOMEM allocating ceph_cap_snap on %p\n", inode);
+ return;
+ }
+
+ spin_lock(&inode->i_lock);
+ used = __ceph_caps_used(ci);
+ if (__ceph_have_pending_cap_snap(ci)) {
+ /* there is no point in queuing multiple "pending" cap_snaps,
+ as no new writes are allowed to start when pending, so any
+ writes in progress now were started before the previous
+ cap_snap. lucky us. */
+ dout("queue_cap_snap %p snapc %p seq %llu used %d"
+ " already pending\n", inode, snapc, snapc->seq, used);
+ kfree(capsnap);
+ } else if (ci->i_wrbuffer_ref_head || (used & CEPH_CAP_FILE_WR)) {
+ igrab(inode);
+
+ atomic_set(&capsnap->nref, 1);
+ capsnap->ci = ci;
+ INIT_LIST_HEAD(&capsnap->ci_item);
+ INIT_LIST_HEAD(&capsnap->flushing_item);
+
+ capsnap->follows = snapc->seq - 1;
+ capsnap->context = ceph_get_snap_context(snapc);
+ capsnap->issued = __ceph_caps_issued(ci, NULL);
+ capsnap->dirty = __ceph_caps_dirty(ci);
+
+ capsnap->mode = inode->i_mode;
+ capsnap->uid = inode->i_uid;
+ capsnap->gid = inode->i_gid;
+
+ /* fixme? */
+ capsnap->xattr_blob = NULL;
+ capsnap->xattr_len = 0;
+
+ /* dirty page count moved from _head to this cap_snap;
+ all subsequent writes page dirties occur _after_ this
+ snapshot. */
+ capsnap->dirty_pages = ci->i_wrbuffer_ref_head;
+ ci->i_wrbuffer_ref_head = 0;
+ ceph_put_snap_context(ci->i_head_snapc);
+ ci->i_head_snapc = NULL;
+ list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
+
+ if (used & CEPH_CAP_FILE_WR) {
+ dout("queue_cap_snap %p cap_snap %p snapc %p"
+ " seq %llu used WR, now pending\n", inode,
+ capsnap, snapc, snapc->seq);
+ capsnap->writing = 1;
+ } else {
+ /* note mtime, size NOW. */
+ __ceph_finish_cap_snap(ci, capsnap);
+ }
+ } else {
+ dout("queue_cap_snap %p nothing dirty|writing\n", inode);
+ kfree(capsnap);
+ }
+
+ spin_unlock(&inode->i_lock);
+}
+
+/*
+ * Finalize the size, mtime for a cap_snap.. that is, settle on final values
+ * to be used for the snapshot, to be flushed back to the mds.
+ *
+ * If capsnap can now be flushed, add to snap_flush list, and return 1.
+ *
+ * Caller must hold i_lock.
+ */
+int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
+ struct ceph_cap_snap *capsnap)
+{
+ struct inode *inode = &ci->vfs_inode;
+ struct ceph_mds_client *mdsc = &ceph_client(inode->i_sb)->mdsc;
+
+ BUG_ON(capsnap->writing);
+ capsnap->size = inode->i_size;
+ capsnap->mtime = inode->i_mtime;
+ capsnap->atime = inode->i_atime;
+ capsnap->ctime = inode->i_ctime;
+ capsnap->time_warp_seq = ci->i_time_warp_seq;
+ if (capsnap->dirty_pages) {
+ dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu "
+ "still has %d dirty pages\n", inode, capsnap,
+ capsnap->context, capsnap->context->seq,
+ capsnap->size, capsnap->dirty_pages);
+ return 0;
+ }
+ dout("finish_cap_snap %p cap_snap %p snapc %p %llu s=%llu clean\n",
+ inode, capsnap, capsnap->context,
+ capsnap->context->seq, capsnap->size);
+
+ spin_lock(&mdsc->snap_flush_lock);
+ list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
+ spin_unlock(&mdsc->snap_flush_lock);
+ return 1; /* caller may want to ceph_flush_snaps */
+}
+
+
+/*
+ * Parse and apply a snapblob "snap trace" from the MDS. This specifies
+ * the snap realm parameters from a given realm and all of its ancestors,
+ * up to the root.
+ *
+ * Caller must hold snap_rwsem for write.
+ */
+int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
+ void *p, void *e, bool deletion)
+{
+ struct ceph_mds_snap_realm *ri; /* encoded */
+ __le64 *snaps; /* encoded */
+ __le64 *prior_parent_snaps; /* encoded */
+ struct ceph_snap_realm *realm;
+ int invalidate = 0;
+ int err = -ENOMEM;
+
+ dout("update_snap_trace deletion=%d\n", deletion);
+more:
+ ceph_decode_need(&p, e, sizeof(*ri), bad);
+ ri = p;
+ p += sizeof(*ri);
+ ceph_decode_need(&p, e, sizeof(u64)*(le32_to_cpu(ri->num_snaps) +
+ le32_to_cpu(ri->num_prior_parent_snaps)), bad);
+ snaps = p;
+ p += sizeof(u64) * le32_to_cpu(ri->num_snaps);
+ prior_parent_snaps = p;
+ p += sizeof(u64) * le32_to_cpu(ri->num_prior_parent_snaps);
+
+ realm = ceph_lookup_snap_realm(mdsc, le64_to_cpu(ri->ino));
+ if (!realm) {
+ realm = ceph_create_snap_realm(mdsc, le64_to_cpu(ri->ino));
+ if (IS_ERR(realm)) {
+ err = PTR_ERR(realm);
+ goto fail;
+ }
+ }
+
+ if (le64_to_cpu(ri->seq) > realm->seq) {
+ dout("update_snap_trace updating %llx %p %lld -> %lld\n",
+ realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
+ /*
+ * if the realm seq has changed, queue a cap_snap for every
+ * inode with open caps. we do this _before_ we update
+ * the realm info so that we prepare for writeback under the
+ * _previous_ snap context.
+ *
+ * ...unless it's a snap deletion!
+ */
+ if (!deletion) {
+ struct ceph_inode_info *ci;
+ struct inode *lastinode = NULL;
+
+ spin_lock(&realm->inodes_with_caps_lock);
+ list_for_each_entry(ci, &realm->inodes_with_caps,
+ i_snap_realm_item) {
+ struct inode *inode = igrab(&ci->vfs_inode);
+ if (!inode)
+ continue;
+ spin_unlock(&realm->inodes_with_caps_lock);
+ if (lastinode)
+ iput(lastinode);
+ lastinode = inode;
+ ceph_queue_cap_snap(ci, realm->cached_context);
+ spin_lock(&realm->inodes_with_caps_lock);
+ }
+ spin_unlock(&realm->inodes_with_caps_lock);
+ if (lastinode)
+ iput(lastinode);
+ dout("update_snap_trace cap_snaps queued\n");
+ }
+
+ } else {
+ dout("update_snap_trace %llx %p seq %lld unchanged\n",
+ realm->ino, realm, realm->seq);
+ }
+
+ /* ensure the parent is correct */
+ err = adjust_snap_realm_parent(mdsc, realm, le64_to_cpu(ri->parent));
+ if (err < 0)
+ goto fail;
+ invalidate += err;
+
+ if (le64_to_cpu(ri->seq) > realm->seq) {
+ /* update realm parameters, snap lists */
+ realm->seq = le64_to_cpu(ri->seq);
+ realm->created = le64_to_cpu(ri->created);
+ realm->parent_since = le64_to_cpu(ri->parent_since);
+
+ realm->num_snaps = le32_to_cpu(ri->num_snaps);
+ err = dup_array(&realm->snaps, snaps, realm->num_snaps);
+ if (err < 0)
+ goto fail;
+
+ realm->num_prior_parent_snaps =
+ le32_to_cpu(ri->num_prior_parent_snaps);
+ err = dup_array(&realm->prior_parent_snaps, prior_parent_snaps,
+ realm->num_prior_parent_snaps);
+ if (err < 0)
+ goto fail;
+
+ invalidate = 1;
+ } else if (!realm->cached_context) {
+ invalidate = 1;
+ }
+
+ dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
+ realm, invalidate, p, e);
+
+ if (p < e)
+ goto more;
+
+ /* invalidate when we reach the _end_ (root) of the trace */
+ if (invalidate)
+ rebuild_snap_realms(realm);
+
+ __cleanup_empty_realms(mdsc);
+ return 0;
+
+bad:
+ err = -EINVAL;
+fail:
+ pr_err("update_snap_trace error %d\n", err);
+ return err;
+}
+
+
+/*
+ * Send any cap_snaps that are queued for flush. Try to carry
+ * s_mutex across multiple snap flushes to avoid locking overhead.
+ *
+ * Caller holds no locks.
+ */
+static void flush_snaps(struct ceph_mds_client *mdsc)
+{
+ struct ceph_inode_info *ci;
+ struct inode *inode;
+ struct ceph_mds_session *session = NULL;
+
+ dout("flush_snaps\n");
+ spin_lock(&mdsc->snap_flush_lock);
+ while (!list_empty(&mdsc->snap_flush_list)) {
+ ci = list_first_entry(&mdsc->snap_flush_list,
+ struct ceph_inode_info, i_snap_flush_item);
+ inode = &ci->vfs_inode;
+ igrab(inode);
+ spin_unlock(&mdsc->snap_flush_lock);
+ spin_lock(&inode->i_lock);
+ __ceph_flush_snaps(ci, &session);
+ spin_unlock(&inode->i_lock);
+ iput(inode);
+ spin_lock(&mdsc->snap_flush_lock);
+ }
+ spin_unlock(&mdsc->snap_flush_lock);
+
+ if (session) {
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ }
+ dout("flush_snaps done\n");
+}
+
+
+/*
+ * Handle a snap notification from the MDS.
+ *
+ * This can take two basic forms: the simplest is just a snap creation
+ * or deletion notification on an existing realm. This should update the
+ * realm and its children.
+ *
+ * The more difficult case is realm creation, due to snap creation at a
+ * new point in the file hierarchy, or due to a rename that moves a file or
+ * directory into another realm.
+ */
+void ceph_handle_snap(struct ceph_mds_client *mdsc,
+ struct ceph_msg *msg)
+{
+ struct super_block *sb = mdsc->client->sb;
+ struct ceph_mds_session *session;
+ int mds;
+ u64 split;
+ int op;
+ int trace_len;
+ struct ceph_snap_realm *realm = NULL;
+ void *p = msg->front.iov_base;
+ void *e = p + msg->front.iov_len;
+ struct ceph_mds_snap_head *h;
+ int num_split_inos, num_split_realms;
+ __le64 *split_inos = NULL, *split_realms = NULL;
+ int i;
+ int locked_rwsem = 0;
+
+ if (msg->hdr.src.name.type != CEPH_ENTITY_TYPE_MDS)
+ return;
+ mds = le64_to_cpu(msg->hdr.src.name.num);
+
+ /* decode */
+ if (msg->front.iov_len < sizeof(*h))
+ goto bad;
+ h = p;
+ op = le32_to_cpu(h->op);
+ split = le64_to_cpu(h->split); /* non-zero if we are splitting an
+ * existing realm */
+ num_split_inos = le32_to_cpu(h->num_split_inos);
+ num_split_realms = le32_to_cpu(h->num_split_realms);
+ trace_len = le32_to_cpu(h->trace_len);
+ p += sizeof(*h);
+
+ dout("handle_snap from mds%d op %s split %llx tracelen %d\n", mds,
+ ceph_snap_op_name(op), split, trace_len);
+
+ /* find session */
+ mutex_lock(&mdsc->mutex);
+ session = __ceph_lookup_mds_session(mdsc, mds);
+ mutex_unlock(&mdsc->mutex);
+ if (!session) {
+ dout("WTF, got snap but no session for mds%d\n", mds);
+ return;
+ }
+
+ mutex_lock(&session->s_mutex);
+ session->s_seq++;
+ mutex_unlock(&session->s_mutex);
+
+ down_write(&mdsc->snap_rwsem);
+ locked_rwsem = 1;
+
+ if (op == CEPH_SNAP_OP_SPLIT) {
+ struct ceph_mds_snap_realm *ri;
+
+ /*
+ * A "split" breaks part of an existing realm off into
+ * a new realm. The MDS provides a list of inodes
+ * (with caps) and child realms that belong to the new
+ * child.
+ */
+ split_inos = p;
+ p += sizeof(u64) * num_split_inos;
+ split_realms = p;
+ p += sizeof(u64) * num_split_realms;
+ ceph_decode_need(&p, e, sizeof(*ri), bad);
+ /* we will peek at realm info here, but will _not_
+ * advance p, as the realm update will occur below in
+ * ceph_update_snap_trace. */
+ ri = p;
+
+ realm = ceph_lookup_snap_realm(mdsc, split);
+ if (!realm) {
+ realm = ceph_create_snap_realm(mdsc, split);
+ if (IS_ERR(realm))
+ goto out;
+ }
+ ceph_get_snap_realm(mdsc, realm);
+
+ dout("splitting snap_realm %llx %p\n", realm->ino, realm);
+ for (i = 0; i < num_split_inos; i++) {
+ struct ceph_vino vino = {
+ .ino = le64_to_cpu(split_inos[i]),
+ .snap = CEPH_NOSNAP,
+ };
+ struct inode *inode = ceph_find_inode(sb, vino);
+ struct ceph_inode_info *ci;
+
+ if (!inode)
+ continue;
+ ci = ceph_inode(inode);
+
+ spin_lock(&inode->i_lock);
+ if (!ci->i_snap_realm)
+ goto skip_inode;
+ /*
+ * If this inode belongs to a realm that was
+ * created after our new realm, we experienced
+ * a race (due to another split notifications
+ * arriving from a different MDS). So skip
+ * this inode.
+ */
+ if (ci->i_snap_realm->created >
+ le64_to_cpu(ri->created)) {
+ dout(" leaving %p in newer realm %llx %p\n",
+ inode, ci->i_snap_realm->ino,
+ ci->i_snap_realm);
+ goto skip_inode;
+ }
+ dout(" will move %p to split realm %llx %p\n",
+ inode, realm->ino, realm);
+ /*
+ * Remove the inode from the realm's inode
+ * list, but don't add it to the new realm
+ * yet. We don't want the cap_snap to be
+ * queued (again) by ceph_update_snap_trace()
+ * below. Queue it _now_, under the old context.
+ */
+ list_del_init(&ci->i_snap_realm_item);
+ spin_unlock(&inode->i_lock);
+
+ ceph_queue_cap_snap(ci,
+ ci->i_snap_realm->cached_context);
+
+ iput(inode);
+ continue;
+
+skip_inode:
+ spin_unlock(&inode->i_lock);
+ iput(inode);
+ }
+
+ /* we may have taken some of the old realm's children. */
+ for (i = 0; i < num_split_realms; i++) {
+ struct ceph_snap_realm *child =
+ ceph_lookup_snap_realm(mdsc,
+ le64_to_cpu(split_realms[i]));
+ if (!child)
+ continue;
+ adjust_snap_realm_parent(mdsc, child, realm->ino);
+ }
+ }
+
+ /*
+ * update using the provided snap trace. if we are deleting a
+ * snap, we can avoid queueing cap_snaps.
+ */
+ ceph_update_snap_trace(mdsc, p, e,
+ op == CEPH_SNAP_OP_DESTROY);
+
+ if (op == CEPH_SNAP_OP_SPLIT) {
+ /*
+ * ok, _now_ add the inodes into the new realm.
+ */
+ for (i = 0; i < num_split_inos; i++) {
+ struct ceph_vino vino = {
+ .ino = le64_to_cpu(split_inos[i]),
+ .snap = CEPH_NOSNAP,
+ };
+ struct inode *inode = ceph_find_inode(sb, vino);
+ struct ceph_inode_info *ci;
+
+ if (!inode)
+ continue;
+ ci = ceph_inode(inode);
+ spin_lock(&inode->i_lock);
+ if (!ci->i_snap_realm)
+ goto split_skip_inode;
+ ceph_put_snap_realm(mdsc, ci->i_snap_realm);
+ spin_lock(&realm->inodes_with_caps_lock);
+ list_add(&ci->i_snap_realm_item,
+ &realm->inodes_with_caps);
+ ci->i_snap_realm = realm;
+ spin_unlock(&realm->inodes_with_caps_lock);
+ ceph_get_snap_realm(mdsc, realm);
+split_skip_inode:
+ spin_unlock(&inode->i_lock);
+ iput(inode);
+ }
+
+ /* we took a reference when we created the realm, above */
+ ceph_put_snap_realm(mdsc, realm);
+ }
+
+ __cleanup_empty_realms(mdsc);
+
+ up_write(&mdsc->snap_rwsem);
+
+ flush_snaps(mdsc);
+ return;
+
+bad:
+ pr_err("corrupt snap message from mds%d\n", mds);
+out:
+ if (locked_rwsem)
+ up_write(&mdsc->snap_rwsem);
+ return;
+}
+
+
+
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
new file mode 100644
index 000000000000..a828943296c5
--- /dev/null
+++ b/fs/ceph/super.c
@@ -0,0 +1,984 @@
+
+#include "ceph_debug.h"
+
+#include <linux/backing-dev.h>
+#include <linux/fs.h>
+#include <linux/inet.h>
+#include <linux/in6.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/parser.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include <linux/string.h>
+#include <linux/version.h>
+#include <linux/vmalloc.h>
+
+#include "decode.h"
+#include "super.h"
+#include "mon_client.h"
+#include "auth.h"
+
+/*
+ * Ceph superblock operations
+ *
+ * Handle the basics of mounting, unmounting.
+ */
+
+
+/*
+ * find filename portion of a path (/foo/bar/baz -> baz)
+ */
+const char *ceph_file_part(const char *s, int len)
+{
+ const char *e = s + len;
+
+ while (e != s && *(e-1) != '/')
+ e--;
+ return e;
+}
+
+
+/*
+ * super ops
+ */
+static void ceph_put_super(struct super_block *s)
+{
+ struct ceph_client *cl = ceph_client(s);
+
+ dout("put_super\n");
+ ceph_mdsc_close_sessions(&cl->mdsc);
+ return;
+}
+
+static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct ceph_client *client = ceph_inode_to_client(dentry->d_inode);
+ struct ceph_monmap *monmap = client->monc.monmap;
+ struct ceph_statfs st;
+ u64 fsid;
+ int err;
+
+ dout("statfs\n");
+ err = ceph_monc_do_statfs(&client->monc, &st);
+ if (err < 0)
+ return err;
+
+ /* fill in kstatfs */
+ buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
+
+ /*
+ * express utilization in terms of large blocks to avoid
+ * overflow on 32-bit machines.
+ */
+ buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
+ buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
+ buf->f_bfree = (le64_to_cpu(st.kb) - le64_to_cpu(st.kb_used)) >>
+ (CEPH_BLOCK_SHIFT-10);
+ buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
+
+ buf->f_files = le64_to_cpu(st.num_objects);
+ buf->f_ffree = -1;
+ buf->f_namelen = PATH_MAX;
+ buf->f_frsize = PAGE_CACHE_SIZE;
+
+ /* leave fsid little-endian, regardless of host endianness */
+ fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
+ buf->f_fsid.val[0] = fsid & 0xffffffff;
+ buf->f_fsid.val[1] = fsid >> 32;
+
+ return 0;
+}
+
+
+static int ceph_syncfs(struct super_block *sb, int wait)
+{
+ dout("sync_fs %d\n", wait);
+ ceph_osdc_sync(&ceph_client(sb)->osdc);
+ ceph_mdsc_sync(&ceph_client(sb)->mdsc);
+ dout("sync_fs %d done\n", wait);
+ return 0;
+}
+
+
+/**
+ * ceph_show_options - Show mount options in /proc/mounts
+ * @m: seq_file to write to
+ * @mnt: mount descriptor
+ */
+static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
+{
+ struct ceph_client *client = ceph_sb_to_client(mnt->mnt_sb);
+ struct ceph_mount_args *args = client->mount_args;
+
+ if (args->flags & CEPH_OPT_FSID)
+ seq_printf(m, ",fsidmajor=%llu,fsidminor%llu",
+ le64_to_cpu(*(__le64 *)&args->fsid.fsid[0]),
+ le64_to_cpu(*(__le64 *)&args->fsid.fsid[8]));
+ if (args->flags & CEPH_OPT_NOSHARE)
+ seq_puts(m, ",noshare");
+ if (args->flags & CEPH_OPT_DIRSTAT)
+ seq_puts(m, ",dirstat");
+ if ((args->flags & CEPH_OPT_RBYTES) == 0)
+ seq_puts(m, ",norbytes");
+ if (args->flags & CEPH_OPT_NOCRC)
+ seq_puts(m, ",nocrc");
+ if (args->flags & CEPH_OPT_NOASYNCREADDIR)
+ seq_puts(m, ",noasyncreaddir");
+ if (strcmp(args->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
+ seq_printf(m, ",snapdirname=%s", args->snapdir_name);
+ if (args->name)
+ seq_printf(m, ",name=%s", args->name);
+ if (args->secret)
+ seq_puts(m, ",secret=<hidden>");
+ return 0;
+}
+
+/*
+ * caches
+ */
+struct kmem_cache *ceph_inode_cachep;
+struct kmem_cache *ceph_cap_cachep;
+struct kmem_cache *ceph_dentry_cachep;
+struct kmem_cache *ceph_file_cachep;
+
+static void ceph_inode_init_once(void *foo)
+{
+ struct ceph_inode_info *ci = foo;
+ inode_init_once(&ci->vfs_inode);
+}
+
+static int __init init_caches(void)
+{
+ ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
+ sizeof(struct ceph_inode_info),
+ __alignof__(struct ceph_inode_info),
+ (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
+ ceph_inode_init_once);
+ if (ceph_inode_cachep == NULL)
+ return -ENOMEM;
+
+ ceph_cap_cachep = KMEM_CACHE(ceph_cap,
+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
+ if (ceph_cap_cachep == NULL)
+ goto bad_cap;
+
+ ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
+ if (ceph_dentry_cachep == NULL)
+ goto bad_dentry;
+
+ ceph_file_cachep = KMEM_CACHE(ceph_file_info,
+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
+ if (ceph_file_cachep == NULL)
+ goto bad_file;
+
+ return 0;
+
+bad_file:
+ kmem_cache_destroy(ceph_dentry_cachep);
+bad_dentry:
+ kmem_cache_destroy(ceph_cap_cachep);
+bad_cap:
+ kmem_cache_destroy(ceph_inode_cachep);
+ return -ENOMEM;
+}
+
+static void destroy_caches(void)
+{
+ kmem_cache_destroy(ceph_inode_cachep);
+ kmem_cache_destroy(ceph_cap_cachep);
+ kmem_cache_destroy(ceph_dentry_cachep);
+ kmem_cache_destroy(ceph_file_cachep);
+}
+
+
+/*
+ * ceph_umount_begin - initiate forced umount. Tear down down the
+ * mount, skipping steps that may hang while waiting for server(s).
+ */
+static void ceph_umount_begin(struct super_block *sb)
+{
+ struct ceph_client *client = ceph_sb_to_client(sb);
+
+ dout("ceph_umount_begin - starting forced umount\n");
+ if (!client)
+ return;
+ client->mount_state = CEPH_MOUNT_SHUTDOWN;
+ return;
+}
+
+static const struct super_operations ceph_super_ops = {
+ .alloc_inode = ceph_alloc_inode,
+ .destroy_inode = ceph_destroy_inode,
+ .write_inode = ceph_write_inode,
+ .sync_fs = ceph_syncfs,
+ .put_super = ceph_put_super,
+ .show_options = ceph_show_options,
+ .statfs = ceph_statfs,
+ .umount_begin = ceph_umount_begin,
+};
+
+
+const char *ceph_msg_type_name(int type)
+{
+ switch (type) {
+ case CEPH_MSG_SHUTDOWN: return "shutdown";
+ case CEPH_MSG_PING: return "ping";
+ case CEPH_MSG_AUTH: return "auth";
+ case CEPH_MSG_AUTH_REPLY: return "auth_reply";
+ case CEPH_MSG_MON_MAP: return "mon_map";
+ case CEPH_MSG_MON_GET_MAP: return "mon_get_map";
+ case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe";
+ case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
+ case CEPH_MSG_STATFS: return "statfs";
+ case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
+ case CEPH_MSG_MDS_MAP: return "mds_map";
+ case CEPH_MSG_CLIENT_SESSION: return "client_session";
+ case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
+ case CEPH_MSG_CLIENT_REQUEST: return "client_request";
+ case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward";
+ case CEPH_MSG_CLIENT_REPLY: return "client_reply";
+ case CEPH_MSG_CLIENT_CAPS: return "client_caps";
+ case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release";
+ case CEPH_MSG_CLIENT_SNAP: return "client_snap";
+ case CEPH_MSG_CLIENT_LEASE: return "client_lease";
+ case CEPH_MSG_OSD_MAP: return "osd_map";
+ case CEPH_MSG_OSD_OP: return "osd_op";
+ case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
+ default: return "unknown";
+ }
+}
+
+
+/*
+ * mount options
+ */
+enum {
+ Opt_fsidmajor,
+ Opt_fsidminor,
+ Opt_monport,
+ Opt_wsize,
+ Opt_rsize,
+ Opt_osdtimeout,
+ Opt_mount_timeout,
+ Opt_caps_wanted_delay_min,
+ Opt_caps_wanted_delay_max,
+ Opt_readdir_max_entries,
+ Opt_last_int,
+ /* int args above */
+ Opt_snapdirname,
+ Opt_name,
+ Opt_secret,
+ Opt_last_string,
+ /* string args above */
+ Opt_ip,
+ Opt_noshare,
+ Opt_dirstat,
+ Opt_nodirstat,
+ Opt_rbytes,
+ Opt_norbytes,
+ Opt_nocrc,
+ Opt_noasyncreaddir,
+};
+
+static match_table_t arg_tokens = {
+ {Opt_fsidmajor, "fsidmajor=%ld"},
+ {Opt_fsidminor, "fsidminor=%ld"},
+ {Opt_monport, "monport=%d"},
+ {Opt_wsize, "wsize=%d"},
+ {Opt_rsize, "rsize=%d"},
+ {Opt_osdtimeout, "osdtimeout=%d"},
+ {Opt_mount_timeout, "mount_timeout=%d"},
+ {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
+ {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
+ {Opt_readdir_max_entries, "readdir_max_entries=%d"},
+ /* int args above */
+ {Opt_snapdirname, "snapdirname=%s"},
+ {Opt_name, "name=%s"},
+ {Opt_secret, "secret=%s"},
+ /* string args above */
+ {Opt_ip, "ip=%s"},
+ {Opt_noshare, "noshare"},
+ {Opt_dirstat, "dirstat"},
+ {Opt_nodirstat, "nodirstat"},
+ {Opt_rbytes, "rbytes"},
+ {Opt_norbytes, "norbytes"},
+ {Opt_nocrc, "nocrc"},
+ {Opt_noasyncreaddir, "noasyncreaddir"},
+ {-1, NULL}
+};
+
+
+static struct ceph_mount_args *parse_mount_args(int flags, char *options,
+ const char *dev_name,
+ const char **path)
+{
+ struct ceph_mount_args *args;
+ const char *c;
+ int err = -ENOMEM;
+ substring_t argstr[MAX_OPT_ARGS];
+
+ args = kzalloc(sizeof(*args), GFP_KERNEL);
+ if (!args)
+ return ERR_PTR(-ENOMEM);
+ args->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*args->mon_addr),
+ GFP_KERNEL);
+ if (!args->mon_addr)
+ goto out;
+
+ dout("parse_mount_args %p, dev_name '%s'\n", args, dev_name);
+
+ /* start with defaults */
+ args->sb_flags = flags;
+ args->flags = CEPH_OPT_DEFAULT;
+ args->osd_timeout = 5; /* seconds */
+ args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */
+ args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
+ args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
+ args->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
+ args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
+ args->cap_release_safety = CEPH_CAPS_PER_RELEASE * 4;
+ args->max_readdir = 1024;
+
+ /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
+ err = -EINVAL;
+ if (!dev_name)
+ goto out;
+ *path = strstr(dev_name, ":/");
+ if (*path == NULL) {
+ pr_err("device name is missing path (no :/ in %s)\n",
+ dev_name);
+ goto out;
+ }
+
+ /* get mon ip(s) */
+ err = ceph_parse_ips(dev_name, *path, args->mon_addr,
+ CEPH_MAX_MON, &args->num_mon);
+ if (err < 0)
+ goto out;
+
+ /* path on server */
+ *path += 2;
+ dout("server path '%s'\n", *path);
+
+ /* parse mount options */
+ while ((c = strsep(&options, ",")) != NULL) {
+ int token, intval, ret;
+ if (!*c)
+ continue;
+ err = -EINVAL;
+ token = match_token((char *)c, arg_tokens, argstr);
+ if (token < 0) {
+ pr_err("bad mount option at '%s'\n", c);
+ goto out;
+ }
+ if (token < Opt_last_int) {
+ ret = match_int(&argstr[0], &intval);
+ if (ret < 0) {
+ pr_err("bad mount option arg (not int) "
+ "at '%s'\n", c);
+ continue;
+ }
+ dout("got int token %d val %d\n", token, intval);
+ } else if (token > Opt_last_int && token < Opt_last_string) {
+ dout("got string token %d val %s\n", token,
+ argstr[0].from);
+ } else {
+ dout("got token %d\n", token);
+ }
+ switch (token) {
+ case Opt_fsidmajor:
+ *(__le64 *)&args->fsid.fsid[0] = cpu_to_le64(intval);
+ break;
+ case Opt_fsidminor:
+ *(__le64 *)&args->fsid.fsid[8] = cpu_to_le64(intval);
+ break;
+ case Opt_ip:
+ err = ceph_parse_ips(argstr[0].from,
+ argstr[0].to,
+ &args->my_addr,
+ 1, NULL);
+ if (err < 0)
+ goto out;
+ args->flags |= CEPH_OPT_MYIP;
+ break;
+
+ case Opt_snapdirname:
+ kfree(args->snapdir_name);
+ args->snapdir_name = kstrndup(argstr[0].from,
+ argstr[0].to-argstr[0].from,
+ GFP_KERNEL);
+ break;
+ case Opt_name:
+ args->name = kstrndup(argstr[0].from,
+ argstr[0].to-argstr[0].from,
+ GFP_KERNEL);
+ break;
+ case Opt_secret:
+ args->secret = kstrndup(argstr[0].from,
+ argstr[0].to-argstr[0].from,
+ GFP_KERNEL);
+ break;
+
+ /* misc */
+ case Opt_wsize:
+ args->wsize = intval;
+ break;
+ case Opt_rsize:
+ args->rsize = intval;
+ break;
+ case Opt_osdtimeout:
+ args->osd_timeout = intval;
+ break;
+ case Opt_mount_timeout:
+ args->mount_timeout = intval;
+ break;
+ case Opt_caps_wanted_delay_min:
+ args->caps_wanted_delay_min = intval;
+ break;
+ case Opt_caps_wanted_delay_max:
+ args->caps_wanted_delay_max = intval;
+ break;
+ case Opt_readdir_max_entries:
+ args->max_readdir = intval;
+ break;
+
+ case Opt_noshare:
+ args->flags |= CEPH_OPT_NOSHARE;
+ break;
+
+ case Opt_dirstat:
+ args->flags |= CEPH_OPT_DIRSTAT;
+ break;
+ case Opt_nodirstat:
+ args->flags &= ~CEPH_OPT_DIRSTAT;
+ break;
+ case Opt_rbytes:
+ args->flags |= CEPH_OPT_RBYTES;
+ break;
+ case Opt_norbytes:
+ args->flags &= ~CEPH_OPT_RBYTES;
+ break;
+ case Opt_nocrc:
+ args->flags |= CEPH_OPT_NOCRC;
+ break;
+ case Opt_noasyncreaddir:
+ args->flags |= CEPH_OPT_NOASYNCREADDIR;
+ break;
+
+ default:
+ BUG_ON(token);
+ }
+ }
+ return args;
+
+out:
+ kfree(args->mon_addr);
+ kfree(args);
+ return ERR_PTR(err);
+}
+
+static void destroy_mount_args(struct ceph_mount_args *args)
+{
+ dout("destroy_mount_args %p\n", args);
+ kfree(args->snapdir_name);
+ args->snapdir_name = NULL;
+ kfree(args->name);
+ args->name = NULL;
+ kfree(args->secret);
+ args->secret = NULL;
+ kfree(args);
+}
+
+/*
+ * create a fresh client instance
+ */
+static struct ceph_client *ceph_create_client(struct ceph_mount_args *args)
+{
+ struct ceph_client *client;
+ int err = -ENOMEM;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (client == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&client->mount_mutex);
+
+ init_waitqueue_head(&client->mount_wq);
+
+ client->sb = NULL;
+ client->mount_state = CEPH_MOUNT_MOUNTING;
+ client->mount_args = args;
+
+ client->msgr = NULL;
+
+ client->mount_err = 0;
+
+ err = bdi_init(&client->backing_dev_info);
+ if (err < 0)
+ goto fail;
+
+ err = -ENOMEM;
+ client->wb_wq = create_workqueue("ceph-writeback");
+ if (client->wb_wq == NULL)
+ goto fail_bdi;
+ client->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
+ if (client->pg_inv_wq == NULL)
+ goto fail_wb_wq;
+ client->trunc_wq = create_singlethread_workqueue("ceph-trunc");
+ if (client->trunc_wq == NULL)
+ goto fail_pg_inv_wq;
+
+ /* set up mempools */
+ err = -ENOMEM;
+ client->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
+ client->mount_args->wsize >> PAGE_CACHE_SHIFT);
+ if (!client->wb_pagevec_pool)
+ goto fail_trunc_wq;
+
+
+ /* subsystems */
+ err = ceph_monc_init(&client->monc, client);
+ if (err < 0)
+ goto fail_mempool;
+ err = ceph_osdc_init(&client->osdc, client);
+ if (err < 0)
+ goto fail_monc;
+ err = ceph_mdsc_init(&client->mdsc, client);
+ if (err < 0)
+ goto fail_osdc;
+ return client;
+
+fail_osdc:
+ ceph_osdc_stop(&client->osdc);
+fail_monc:
+ ceph_monc_stop(&client->monc);
+fail_mempool:
+ mempool_destroy(client->wb_pagevec_pool);
+fail_trunc_wq:
+ destroy_workqueue(client->trunc_wq);
+fail_pg_inv_wq:
+ destroy_workqueue(client->pg_inv_wq);
+fail_wb_wq:
+ destroy_workqueue(client->wb_wq);
+fail_bdi:
+ bdi_destroy(&client->backing_dev_info);
+fail:
+ kfree(client);
+ return ERR_PTR(err);
+}
+
+static void ceph_destroy_client(struct ceph_client *client)
+{
+ dout("destroy_client %p\n", client);
+
+ /* unmount */
+ ceph_mdsc_stop(&client->mdsc);
+ ceph_monc_stop(&client->monc);
+ ceph_osdc_stop(&client->osdc);
+
+ ceph_debugfs_client_cleanup(client);
+ destroy_workqueue(client->wb_wq);
+ destroy_workqueue(client->pg_inv_wq);
+ destroy_workqueue(client->trunc_wq);
+
+ if (client->msgr)
+ ceph_messenger_destroy(client->msgr);
+ mempool_destroy(client->wb_pagevec_pool);
+
+ destroy_mount_args(client->mount_args);
+
+ kfree(client);
+ dout("destroy_client %p done\n", client);
+}
+
+/*
+ * Initially learn our fsid, or verify an fsid matches.
+ */
+int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
+{
+ if (client->have_fsid) {
+ if (ceph_fsid_compare(&client->fsid, fsid)) {
+ print_hex_dump(KERN_ERR, "this fsid: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ (void *)fsid, 16, 0);
+ print_hex_dump(KERN_ERR, " old fsid: ",
+ DUMP_PREFIX_NONE, 16, 1,
+ (void *)&client->fsid, 16, 0);
+ pr_err("fsid mismatch\n");
+ return -1;
+ }
+ } else {
+ pr_info("client%lld fsid " FSID_FORMAT "\n",
+ client->monc.auth->global_id, PR_FSID(fsid));
+ memcpy(&client->fsid, fsid, sizeof(*fsid));
+ ceph_debugfs_client_init(client);
+ client->have_fsid = true;
+ }
+ return 0;
+}
+
+/*
+ * true if we have the mon map (and have thus joined the cluster)
+ */
+static int have_mon_map(struct ceph_client *client)
+{
+ return client->monc.monmap && client->monc.monmap->epoch;
+}
+
+/*
+ * Bootstrap mount by opening the root directory. Note the mount
+ * @started time from caller, and time out if this takes too long.
+ */
+static struct dentry *open_root_dentry(struct ceph_client *client,
+ const char *path,
+ unsigned long started)
+{
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct ceph_mds_request *req = NULL;
+ int err;
+ struct dentry *root;
+
+ /* open dir */
+ dout("open_root_inode opening '%s'\n", path);
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
+ if (IS_ERR(req))
+ return ERR_PTR(PTR_ERR(req));
+ req->r_path1 = kstrdup(path, GFP_NOFS);
+ req->r_ino1.ino = CEPH_INO_ROOT;
+ req->r_ino1.snap = CEPH_NOSNAP;
+ req->r_started = started;
+ req->r_timeout = client->mount_args->mount_timeout * HZ;
+ req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
+ req->r_num_caps = 2;
+ err = ceph_mdsc_do_request(mdsc, NULL, req);
+ if (err == 0) {
+ dout("open_root_inode success\n");
+ if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
+ client->sb->s_root == NULL)
+ root = d_alloc_root(req->r_target_inode);
+ else
+ root = d_obtain_alias(req->r_target_inode);
+ req->r_target_inode = NULL;
+ dout("open_root_inode success, root dentry is %p\n", root);
+ } else {
+ root = ERR_PTR(err);
+ }
+ ceph_mdsc_put_request(req);
+ return root;
+}
+
+/*
+ * mount: join the ceph cluster, and open root directory.
+ */
+static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt,
+ const char *path)
+{
+ struct ceph_entity_addr *myaddr = NULL;
+ int err;
+ unsigned long timeout = client->mount_args->mount_timeout * HZ;
+ unsigned long started = jiffies; /* note the start time */
+ struct dentry *root;
+
+ dout("mount start\n");
+ mutex_lock(&client->mount_mutex);
+
+ /* initialize the messenger */
+ if (client->msgr == NULL) {
+ if (ceph_test_opt(client, MYIP))
+ myaddr = &client->mount_args->my_addr;
+ client->msgr = ceph_messenger_create(myaddr);
+ if (IS_ERR(client->msgr)) {
+ err = PTR_ERR(client->msgr);
+ client->msgr = NULL;
+ goto out;
+ }
+ client->msgr->nocrc = ceph_test_opt(client, NOCRC);
+ }
+
+ /* open session, and wait for mon, mds, and osd maps */
+ err = ceph_monc_open_session(&client->monc);
+ if (err < 0)
+ goto out;
+
+ while (!have_mon_map(client)) {
+ err = -EIO;
+ if (timeout && time_after_eq(jiffies, started + timeout))
+ goto out;
+
+ /* wait */
+ dout("mount waiting for mon_map\n");
+ err = wait_event_interruptible_timeout(client->mount_wq, /* FIXME */
+ have_mon_map(client) || (client->mount_err < 0),
+ timeout);
+ if (err == -EINTR || err == -ERESTARTSYS)
+ goto out;
+ if (client->mount_err < 0) {
+ err = client->mount_err;
+ goto out;
+ }
+ }
+
+ dout("mount opening root\n");
+ root = open_root_dentry(client, "", started);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ goto out;
+ }
+ if (client->sb->s_root)
+ dput(root);
+ else
+ client->sb->s_root = root;
+
+ if (path[0] == 0) {
+ dget(root);
+ } else {
+ dout("mount opening base mountpoint\n");
+ root = open_root_dentry(client, path, started);
+ if (IS_ERR(root)) {
+ err = PTR_ERR(root);
+ dput(client->sb->s_root);
+ client->sb->s_root = NULL;
+ goto out;
+ }
+ }
+
+ mnt->mnt_root = root;
+ mnt->mnt_sb = client->sb;
+
+ client->mount_state = CEPH_MOUNT_MOUNTED;
+ dout("mount success\n");
+ err = 0;
+
+out:
+ mutex_unlock(&client->mount_mutex);
+ return err;
+}
+
+static int ceph_set_super(struct super_block *s, void *data)
+{
+ struct ceph_client *client = data;
+ int ret;
+
+ dout("set_super %p data %p\n", s, data);
+
+ s->s_flags = client->mount_args->sb_flags;
+ s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
+
+ s->s_fs_info = client;
+ client->sb = s;
+
+ s->s_op = &ceph_super_ops;
+ s->s_export_op = &ceph_export_ops;
+
+ s->s_time_gran = 1000; /* 1000 ns == 1 us */
+
+ ret = set_anon_super(s, NULL); /* what is that second arg for? */
+ if (ret != 0)
+ goto fail;
+
+ return ret;
+
+fail:
+ s->s_fs_info = NULL;
+ client->sb = NULL;
+ return ret;
+}
+
+/*
+ * share superblock if same fs AND options
+ */
+static int ceph_compare_super(struct super_block *sb, void *data)
+{
+ struct ceph_client *new = data;
+ struct ceph_mount_args *args = new->mount_args;
+ struct ceph_client *other = ceph_sb_to_client(sb);
+ int i;
+
+ dout("ceph_compare_super %p\n", sb);
+ if (args->flags & CEPH_OPT_FSID) {
+ if (ceph_fsid_compare(&args->fsid, &other->fsid)) {
+ dout("fsid doesn't match\n");
+ return 0;
+ }
+ } else {
+ /* do we share (a) monitor? */
+ for (i = 0; i < new->monc.monmap->num_mon; i++)
+ if (ceph_monmap_contains(other->monc.monmap,
+ &new->monc.monmap->mon_inst[i].addr))
+ break;
+ if (i == new->monc.monmap->num_mon) {
+ dout("mon ip not part of monmap\n");
+ return 0;
+ }
+ dout("mon ip matches existing sb %p\n", sb);
+ }
+ if (args->sb_flags != other->mount_args->sb_flags) {
+ dout("flags differ\n");
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * construct our own bdi so we can control readahead, etc.
+ */
+static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client)
+{
+ int err;
+
+ sb->s_bdi = &client->backing_dev_info;
+
+ /* set ra_pages based on rsize mount option? */
+ if (client->mount_args->rsize >= PAGE_CACHE_SIZE)
+ client->backing_dev_info.ra_pages =
+ (client->mount_args->rsize + PAGE_CACHE_SIZE - 1)
+ >> PAGE_SHIFT;
+ err = bdi_register_dev(&client->backing_dev_info, sb->s_dev);
+ return err;
+}
+
+static int ceph_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ struct super_block *sb;
+ struct ceph_client *client;
+ int err;
+ int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
+ const char *path = NULL;
+ struct ceph_mount_args *args;
+
+ dout("ceph_get_sb\n");
+ args = parse_mount_args(flags, data, dev_name, &path);
+ if (IS_ERR(args)) {
+ err = PTR_ERR(args);
+ goto out_final;
+ }
+
+ /* create client (which we may/may not use) */
+ client = ceph_create_client(args);
+ if (IS_ERR(client)) {
+ err = PTR_ERR(client);
+ goto out_final;
+ }
+
+ if (client->mount_args->flags & CEPH_OPT_NOSHARE)
+ compare_super = NULL;
+ sb = sget(fs_type, compare_super, ceph_set_super, client);
+ if (IS_ERR(sb)) {
+ err = PTR_ERR(sb);
+ goto out;
+ }
+
+ if (ceph_client(sb) != client) {
+ ceph_destroy_client(client);
+ client = ceph_client(sb);
+ dout("get_sb got existing client %p\n", client);
+ } else {
+ dout("get_sb using new client %p\n", client);
+ err = ceph_register_bdi(sb, client);
+ if (err < 0)
+ goto out_splat;
+ }
+
+ err = ceph_mount(client, mnt, path);
+ if (err < 0)
+ goto out_splat;
+ dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root,
+ mnt->mnt_root->d_inode, ceph_vinop(mnt->mnt_root->d_inode));
+ return 0;
+
+out_splat:
+ ceph_mdsc_close_sessions(&client->mdsc);
+ up_write(&sb->s_umount);
+ deactivate_super(sb);
+ goto out_final;
+
+out:
+ ceph_destroy_client(client);
+out_final:
+ dout("ceph_get_sb fail %d\n", err);
+ return err;
+}
+
+static void ceph_kill_sb(struct super_block *s)
+{
+ struct ceph_client *client = ceph_sb_to_client(s);
+ dout("kill_sb %p\n", s);
+ ceph_mdsc_pre_umount(&client->mdsc);
+ kill_anon_super(s); /* will call put_super after sb is r/o */
+ bdi_unregister(&client->backing_dev_info);
+ bdi_destroy(&client->backing_dev_info);
+ ceph_destroy_client(client);
+}
+
+static struct file_system_type ceph_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ceph",
+ .get_sb = ceph_get_sb,
+ .kill_sb = ceph_kill_sb,
+ .fs_flags = FS_RENAME_DOES_D_MOVE,
+};
+
+#define _STRINGIFY(x) #x
+#define STRINGIFY(x) _STRINGIFY(x)
+
+static int __init init_ceph(void)
+{
+ int ret = 0;
+
+ ret = ceph_debugfs_init();
+ if (ret < 0)
+ goto out;
+
+ ret = ceph_msgr_init();
+ if (ret < 0)
+ goto out_debugfs;
+
+ ret = init_caches();
+ if (ret)
+ goto out_msgr;
+
+ ceph_caps_init();
+
+ ret = register_filesystem(&ceph_fs_type);
+ if (ret)
+ goto out_icache;
+
+ pr_info("loaded %d.%d.%d (mon/mds/osd proto %d/%d/%d)\n",
+ CEPH_VERSION_MAJOR, CEPH_VERSION_MINOR, CEPH_VERSION_PATCH,
+ CEPH_MONC_PROTOCOL, CEPH_MDSC_PROTOCOL, CEPH_OSDC_PROTOCOL);
+ return 0;
+
+out_icache:
+ destroy_caches();
+out_msgr:
+ ceph_msgr_exit();
+out_debugfs:
+ ceph_debugfs_cleanup();
+out:
+ return ret;
+}
+
+static void __exit exit_ceph(void)
+{
+ dout("exit_ceph\n");
+ unregister_filesystem(&ceph_fs_type);
+ ceph_caps_finalize();
+ destroy_caches();
+ ceph_msgr_exit();
+ ceph_debugfs_cleanup();
+}
+
+module_init(init_ceph);
+module_exit(exit_ceph);
+
+MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
+MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
+MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
+MODULE_DESCRIPTION("Ceph filesystem for Linux");
+MODULE_LICENSE("GPL");
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
new file mode 100644
index 000000000000..de5e32414978
--- /dev/null
+++ b/fs/ceph/super.h
@@ -0,0 +1,895 @@
+#ifndef _FS_CEPH_SUPER_H
+#define _FS_CEPH_SUPER_H
+
+#include "ceph_debug.h"
+
+#include <asm/unaligned.h>
+#include <linux/backing-dev.h>
+#include <linux/completion.h>
+#include <linux/exportfs.h>
+#include <linux/fs.h>
+#include <linux/mempool.h>
+#include <linux/pagemap.h>
+#include <linux/wait.h>
+
+#include "types.h"
+#include "messenger.h"
+#include "msgpool.h"
+#include "mon_client.h"
+#include "mds_client.h"
+#include "osd_client.h"
+#include "ceph_fs.h"
+
+/* f_type in struct statfs */
+#define CEPH_SUPER_MAGIC 0x00c36400
+
+/* large granularity for statfs utilization stats to facilitate
+ * large volume sizes on 32-bit machines. */
+#define CEPH_BLOCK_SHIFT 20 /* 1 MB */
+#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
+
+/*
+ * mount options
+ */
+#define CEPH_OPT_FSID (1<<0)
+#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
+#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
+#define CEPH_OPT_DIRSTAT (1<<4) /* funky `cat dirname` for stats */
+#define CEPH_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
+#define CEPH_OPT_NOCRC (1<<6) /* no data crc on writes */
+#define CEPH_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
+
+#define CEPH_OPT_DEFAULT (CEPH_OPT_RBYTES)
+
+#define ceph_set_opt(client, opt) \
+ (client)->mount_args->flags |= CEPH_OPT_##opt;
+#define ceph_test_opt(client, opt) \
+ (!!((client)->mount_args->flags & CEPH_OPT_##opt))
+
+
+struct ceph_mount_args {
+ int sb_flags;
+ int num_mon;
+ struct ceph_entity_addr *mon_addr;
+ int flags;
+ int mount_timeout;
+ int caps_wanted_delay_min, caps_wanted_delay_max;
+ struct ceph_fsid fsid;
+ struct ceph_entity_addr my_addr;
+ int wsize;
+ int rsize; /* max readahead */
+ int max_readdir; /* max readdir size */
+ int osd_timeout;
+ char *snapdir_name; /* default ".snap" */
+ char *name;
+ char *secret;
+ int cap_release_safety;
+};
+
+/*
+ * defaults
+ */
+#define CEPH_MOUNT_TIMEOUT_DEFAULT 60
+#define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */
+
+#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
+#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
+
+#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
+#define CEPH_AUTH_NAME_DEFAULT "guest"
+
+/*
+ * Delay telling the MDS we no longer want caps, in case we reopen
+ * the file. Delay a minimum amount of time, even if we send a cap
+ * message for some other reason. Otherwise, take the oppotunity to
+ * update the mds to avoid sending another message later.
+ */
+#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
+#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
+
+
+/* mount state */
+enum {
+ CEPH_MOUNT_MOUNTING,
+ CEPH_MOUNT_MOUNTED,
+ CEPH_MOUNT_UNMOUNTING,
+ CEPH_MOUNT_UNMOUNTED,
+ CEPH_MOUNT_SHUTDOWN,
+};
+
+/*
+ * subtract jiffies
+ */
+static inline unsigned long time_sub(unsigned long a, unsigned long b)
+{
+ BUG_ON(time_after(b, a));
+ return (long)a - (long)b;
+}
+
+/*
+ * per-filesystem client state
+ *
+ * possibly shared by multiple mount points, if they are
+ * mounting the same ceph filesystem/cluster.
+ */
+struct ceph_client {
+ struct ceph_fsid fsid;
+ bool have_fsid;
+
+ struct mutex mount_mutex; /* serialize mount attempts */
+ struct ceph_mount_args *mount_args;
+
+ struct super_block *sb;
+
+ unsigned long mount_state;
+ wait_queue_head_t mount_wq;
+
+ int mount_err;
+
+ struct ceph_messenger *msgr; /* messenger instance */
+ struct ceph_mon_client monc;
+ struct ceph_mds_client mdsc;
+ struct ceph_osd_client osdc;
+
+ /* writeback */
+ mempool_t *wb_pagevec_pool;
+ struct workqueue_struct *wb_wq;
+ struct workqueue_struct *pg_inv_wq;
+ struct workqueue_struct *trunc_wq;
+
+ struct backing_dev_info backing_dev_info;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_monmap;
+ struct dentry *debugfs_mdsmap, *debugfs_osdmap;
+ struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps;
+#endif
+};
+
+static inline struct ceph_client *ceph_client(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+
+/*
+ * File i/o capability. This tracks shared state with the metadata
+ * server that allows us to cache or writeback attributes or to read
+ * and write data. For any given inode, we should have one or more
+ * capabilities, one issued by each metadata server, and our
+ * cumulative access is the OR of all issued capabilities.
+ *
+ * Each cap is referenced by the inode's i_caps rbtree and by per-mds
+ * session capability lists.
+ */
+struct ceph_cap {
+ struct ceph_inode_info *ci;
+ struct rb_node ci_node; /* per-ci cap tree */
+ struct ceph_mds_session *session;
+ struct list_head session_caps; /* per-session caplist */
+ int mds;
+ u64 cap_id; /* unique cap id (mds provided) */
+ int issued; /* latest, from the mds */
+ int implemented; /* implemented superset of issued (for revocation) */
+ int mds_wanted;
+ u32 seq, issue_seq, mseq;
+ u32 cap_gen; /* active/stale cycle */
+ unsigned long last_used;
+ struct list_head caps_item;
+};
+
+#define CHECK_CAPS_NODELAY 1 /* do not delay any further */
+#define CHECK_CAPS_AUTHONLY 2 /* only check auth cap */
+#define CHECK_CAPS_FLUSH 4 /* flush any dirty caps */
+
+/*
+ * Snapped cap state that is pending flush to mds. When a snapshot occurs,
+ * we first complete any in-process sync writes and writeback any dirty
+ * data before flushing the snapped state (tracked here) back to the MDS.
+ */
+struct ceph_cap_snap {
+ atomic_t nref;
+ struct ceph_inode_info *ci;
+ struct list_head ci_item, flushing_item;
+
+ u64 follows, flush_tid;
+ int issued, dirty;
+ struct ceph_snap_context *context;
+
+ mode_t mode;
+ uid_t uid;
+ gid_t gid;
+
+ void *xattr_blob;
+ int xattr_len;
+ u64 xattr_version;
+
+ u64 size;
+ struct timespec mtime, atime, ctime;
+ u64 time_warp_seq;
+ int writing; /* a sync write is still in progress */
+ int dirty_pages; /* dirty pages awaiting writeback */
+};
+
+static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
+{
+ if (atomic_dec_and_test(&capsnap->nref))
+ kfree(capsnap);
+}
+
+/*
+ * The frag tree describes how a directory is fragmented, potentially across
+ * multiple metadata servers. It is also used to indicate points where
+ * metadata authority is delegated, and whether/where metadata is replicated.
+ *
+ * A _leaf_ frag will be present in the i_fragtree IFF there is
+ * delegation info. That is, if mds >= 0 || ndist > 0.
+ */
+#define CEPH_MAX_DIRFRAG_REP 4
+
+struct ceph_inode_frag {
+ struct rb_node node;
+
+ /* fragtree state */
+ u32 frag;
+ int split_by; /* i.e. 2^(split_by) children */
+
+ /* delegation and replication info */
+ int mds; /* -1 if same authority as parent */
+ int ndist; /* >0 if replicated */
+ int dist[CEPH_MAX_DIRFRAG_REP];
+};
+
+/*
+ * We cache inode xattrs as an encoded blob until they are first used,
+ * at which point we parse them into an rbtree.
+ */
+struct ceph_inode_xattr {
+ struct rb_node node;
+
+ const char *name;
+ int name_len;
+ const char *val;
+ int val_len;
+ int dirty;
+
+ int should_free_name;
+ int should_free_val;
+};
+
+struct ceph_inode_xattrs_info {
+ /*
+ * (still encoded) xattr blob. we avoid the overhead of parsing
+ * this until someone actually calls getxattr, etc.
+ *
+ * blob->vec.iov_len == 4 implies there are no xattrs; blob ==
+ * NULL means we don't know.
+ */
+ struct ceph_buffer *blob, *prealloc_blob;
+
+ struct rb_root index;
+ bool dirty;
+ int count;
+ int names_size;
+ int vals_size;
+ u64 version, index_version;
+};
+
+/*
+ * Ceph inode.
+ */
+#define CEPH_I_COMPLETE 1 /* we have complete directory cached */
+#define CEPH_I_NODELAY 4 /* do not delay cap release */
+#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
+
+struct ceph_inode_info {
+ struct ceph_vino i_vino; /* ceph ino + snap */
+
+ u64 i_version;
+ u32 i_time_warp_seq;
+
+ unsigned i_ceph_flags;
+ unsigned long i_release_count;
+
+ struct ceph_file_layout i_layout;
+ char *i_symlink;
+
+ /* for dirs */
+ struct timespec i_rctime;
+ u64 i_rbytes, i_rfiles, i_rsubdirs;
+ u64 i_files, i_subdirs;
+ u64 i_max_offset; /* largest readdir offset, set with I_COMPLETE */
+
+ struct rb_root i_fragtree;
+ struct mutex i_fragtree_mutex;
+
+ struct ceph_inode_xattrs_info i_xattrs;
+
+ /* capabilities. protected _both_ by i_lock and cap->session's
+ * s_mutex. */
+ struct rb_root i_caps; /* cap list */
+ struct ceph_cap *i_auth_cap; /* authoritative cap, if any */
+ unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */
+ struct list_head i_dirty_item, i_flushing_item;
+ u64 i_cap_flush_seq;
+ /* we need to track cap writeback on a per-cap-bit basis, to allow
+ * overlapping, pipelined cap flushes to the mds. we can probably
+ * reduce the tid to 8 bits if we're concerned about inode size. */
+ u16 i_cap_flush_last_tid, i_cap_flush_tid[CEPH_CAP_BITS];
+ wait_queue_head_t i_cap_wq; /* threads waiting on a capability */
+ unsigned long i_hold_caps_min; /* jiffies */
+ unsigned long i_hold_caps_max; /* jiffies */
+ struct list_head i_cap_delay_list; /* for delayed cap release to mds */
+ int i_cap_exporting_mds; /* to handle cap migration between */
+ unsigned i_cap_exporting_mseq; /* mds's. */
+ unsigned i_cap_exporting_issued;
+ struct ceph_cap_reservation i_cap_migration_resv;
+ struct list_head i_cap_snaps; /* snapped state pending flush to mds */
+ struct ceph_snap_context *i_head_snapc; /* set if wr_buffer_head > 0 */
+ unsigned i_snap_caps; /* cap bits for snapped files */
+
+ int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */
+
+ u32 i_truncate_seq; /* last truncate to smaller size */
+ u64 i_truncate_size; /* and the size we last truncated down to */
+ int i_truncate_pending; /* still need to call vmtruncate */
+
+ u64 i_max_size; /* max file size authorized by mds */
+ u64 i_reported_size; /* (max_)size reported to or requested of mds */
+ u64 i_wanted_max_size; /* offset we'd like to write too */
+ u64 i_requested_max_size; /* max_size we've requested */
+
+ /* held references to caps */
+ int i_pin_ref;
+ int i_rd_ref, i_rdcache_ref, i_wr_ref;
+ int i_wrbuffer_ref, i_wrbuffer_ref_head;
+ u32 i_shared_gen; /* increment each time we get FILE_SHARED */
+ u32 i_rdcache_gen; /* we increment this each time we get
+ FILE_CACHE. If it's non-zero, we
+ _may_ have cached pages. */
+ u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */
+
+ struct list_head i_unsafe_writes; /* uncommitted sync writes */
+ struct list_head i_unsafe_dirops; /* uncommitted mds dir ops */
+ spinlock_t i_unsafe_lock;
+
+ struct ceph_snap_realm *i_snap_realm; /* snap realm (if caps) */
+ int i_snap_realm_counter; /* snap realm (if caps) */
+ struct list_head i_snap_realm_item;
+ struct list_head i_snap_flush_item;
+
+ struct work_struct i_wb_work; /* writeback work */
+ struct work_struct i_pg_inv_work; /* page invalidation work */
+
+ struct work_struct i_vmtruncate_work;
+
+ struct inode vfs_inode; /* at end */
+};
+
+static inline struct ceph_inode_info *ceph_inode(struct inode *inode)
+{
+ return container_of(inode, struct ceph_inode_info, vfs_inode);
+}
+
+static inline void ceph_i_clear(struct inode *inode, unsigned mask)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ spin_lock(&inode->i_lock);
+ ci->i_ceph_flags &= ~mask;
+ spin_unlock(&inode->i_lock);
+}
+
+static inline void ceph_i_set(struct inode *inode, unsigned mask)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ spin_lock(&inode->i_lock);
+ ci->i_ceph_flags |= mask;
+ spin_unlock(&inode->i_lock);
+}
+
+static inline bool ceph_i_test(struct inode *inode, unsigned mask)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ bool r;
+
+ smp_mb();
+ r = (ci->i_ceph_flags & mask) == mask;
+ return r;
+}
+
+
+/* find a specific frag @f */
+extern struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci,
+ u32 f);
+
+/*
+ * choose fragment for value @v. copy frag content to pfrag, if leaf
+ * exists
+ */
+extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
+ struct ceph_inode_frag *pfrag,
+ int *found);
+
+/*
+ * Ceph dentry state
+ */
+struct ceph_dentry_info {
+ struct ceph_mds_session *lease_session;
+ u32 lease_gen, lease_shared_gen;
+ u32 lease_seq;
+ unsigned long lease_renew_after, lease_renew_from;
+ struct list_head lru;
+ struct dentry *dentry;
+ u64 time;
+ u64 offset;
+};
+
+static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry)
+{
+ return (struct ceph_dentry_info *)dentry->d_fsdata;
+}
+
+static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
+{
+ return ((loff_t)frag << 32) | (loff_t)off;
+}
+
+/*
+ * ino_t is <64 bits on many architectures, blech.
+ *
+ * don't include snap in ino hash, at least for now.
+ */
+static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
+{
+ ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */
+#if BITS_PER_LONG == 32
+ ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8;
+ if (!ino)
+ ino = 1;
+#endif
+ return ino;
+}
+
+static inline int ceph_set_ino_cb(struct inode *inode, void *data)
+{
+ ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
+ inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
+ return 0;
+}
+
+static inline struct ceph_vino ceph_vino(struct inode *inode)
+{
+ return ceph_inode(inode)->i_vino;
+}
+
+/* for printf-style formatting */
+#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
+
+static inline u64 ceph_ino(struct inode *inode)
+{
+ return ceph_inode(inode)->i_vino.ino;
+}
+static inline u64 ceph_snap(struct inode *inode)
+{
+ return ceph_inode(inode)->i_vino.snap;
+}
+
+static inline int ceph_ino_compare(struct inode *inode, void *data)
+{
+ struct ceph_vino *pvino = (struct ceph_vino *)data;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ return ci->i_vino.ino == pvino->ino &&
+ ci->i_vino.snap == pvino->snap;
+}
+
+static inline struct inode *ceph_find_inode(struct super_block *sb,
+ struct ceph_vino vino)
+{
+ ino_t t = ceph_vino_to_ino(vino);
+ return ilookup5(sb, t, ceph_ino_compare, &vino);
+}
+
+
+/*
+ * caps helpers
+ */
+static inline bool __ceph_is_any_real_caps(struct ceph_inode_info *ci)
+{
+ return !RB_EMPTY_ROOT(&ci->i_caps);
+}
+
+extern int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented);
+extern int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int t);
+extern int __ceph_caps_issued_other(struct ceph_inode_info *ci,
+ struct ceph_cap *cap);
+
+static inline int ceph_caps_issued(struct ceph_inode_info *ci)
+{
+ int issued;
+ spin_lock(&ci->vfs_inode.i_lock);
+ issued = __ceph_caps_issued(ci, NULL);
+ spin_unlock(&ci->vfs_inode.i_lock);
+ return issued;
+}
+
+static inline int ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask,
+ int touch)
+{
+ int r;
+ spin_lock(&ci->vfs_inode.i_lock);
+ r = __ceph_caps_issued_mask(ci, mask, touch);
+ spin_unlock(&ci->vfs_inode.i_lock);
+ return r;
+}
+
+static inline int __ceph_caps_dirty(struct ceph_inode_info *ci)
+{
+ return ci->i_dirty_caps | ci->i_flushing_caps;
+}
+extern void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask);
+
+extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask);
+extern int __ceph_caps_used(struct ceph_inode_info *ci);
+
+extern int __ceph_caps_file_wanted(struct ceph_inode_info *ci);
+
+/*
+ * wanted, by virtue of open file modes AND cap refs (buffered/cached data)
+ */
+static inline int __ceph_caps_wanted(struct ceph_inode_info *ci)
+{
+ int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci);
+ if (w & CEPH_CAP_FILE_BUFFER)
+ w |= CEPH_CAP_FILE_EXCL; /* we want EXCL if dirty data */
+ return w;
+}
+
+/* what the mds thinks we want */
+extern int __ceph_caps_mds_wanted(struct ceph_inode_info *ci);
+
+extern void ceph_caps_init(void);
+extern void ceph_caps_finalize(void);
+extern int ceph_reserve_caps(struct ceph_cap_reservation *ctx, int need);
+extern int ceph_unreserve_caps(struct ceph_cap_reservation *ctx);
+extern void ceph_reservation_status(struct ceph_client *client,
+ int *total, int *avail, int *used,
+ int *reserved);
+
+static inline struct ceph_client *ceph_inode_to_client(struct inode *inode)
+{
+ return (struct ceph_client *)inode->i_sb->s_fs_info;
+}
+
+static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb)
+{
+ return (struct ceph_client *)sb->s_fs_info;
+}
+
+static inline int ceph_queue_writeback(struct inode *inode)
+{
+ return queue_work(ceph_inode_to_client(inode)->wb_wq,
+ &ceph_inode(inode)->i_wb_work);
+}
+
+static inline int ceph_queue_page_invalidation(struct inode *inode)
+{
+ return queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
+ &ceph_inode(inode)->i_pg_inv_work);
+}
+
+
+/*
+ * we keep buffered readdir results attached to file->private_data
+ */
+struct ceph_file_info {
+ int fmode; /* initialized on open */
+
+ /* readdir: position within the dir */
+ u32 frag;
+ struct ceph_mds_request *last_readdir;
+ int at_end;
+
+ /* readdir: position within a frag */
+ unsigned offset; /* offset of last chunk, adjusted for . and .. */
+ u64 next_offset; /* offset of next chunk (last_name's + 1) */
+ char *last_name; /* last entry in previous chunk */
+ struct dentry *dentry; /* next dentry (for dcache readdir) */
+ unsigned long dir_release_count;
+
+ /* used for -o dirstat read() on directory thing */
+ char *dir_info;
+ int dir_info_len;
+};
+
+
+
+/*
+ * snapshots
+ */
+
+/*
+ * A "snap context" is the set of existing snapshots when we
+ * write data. It is used by the OSD to guide its COW behavior.
+ *
+ * The ceph_snap_context is refcounted, and attached to each dirty
+ * page, indicating which context the dirty data belonged when it was
+ * dirtied.
+ */
+struct ceph_snap_context {
+ atomic_t nref;
+ u64 seq;
+ int num_snaps;
+ u64 snaps[];
+};
+
+static inline struct ceph_snap_context *
+ceph_get_snap_context(struct ceph_snap_context *sc)
+{
+ /*
+ printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
+ atomic_read(&sc->nref)+1);
+ */
+ if (sc)
+ atomic_inc(&sc->nref);
+ return sc;
+}
+
+static inline void ceph_put_snap_context(struct ceph_snap_context *sc)
+{
+ if (!sc)
+ return;
+ /*
+ printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
+ atomic_read(&sc->nref)-1);
+ */
+ if (atomic_dec_and_test(&sc->nref)) {
+ /*printk(" deleting snap_context %p\n", sc);*/
+ kfree(sc);
+ }
+}
+
+/*
+ * A "snap realm" describes a subset of the file hierarchy sharing
+ * the same set of snapshots that apply to it. The realms themselves
+ * are organized into a hierarchy, such that children inherit (some of)
+ * the snapshots of their parents.
+ *
+ * All inodes within the realm that have capabilities are linked into a
+ * per-realm list.
+ */
+struct ceph_snap_realm {
+ u64 ino;
+ atomic_t nref;
+ u64 created, seq;
+ u64 parent_ino;
+ u64 parent_since; /* snapid when our current parent became so */
+
+ u64 *prior_parent_snaps; /* snaps inherited from any parents we */
+ int num_prior_parent_snaps; /* had prior to parent_since */
+ u64 *snaps; /* snaps specific to this realm */
+ int num_snaps;
+
+ struct ceph_snap_realm *parent;
+ struct list_head children; /* list of child realms */
+ struct list_head child_item;
+
+ struct list_head empty_item; /* if i have ref==0 */
+
+ /* the current set of snaps for this realm */
+ struct ceph_snap_context *cached_context;
+
+ struct list_head inodes_with_caps;
+ spinlock_t inodes_with_caps_lock;
+};
+
+
+
+/*
+ * calculate the number of pages a given length and offset map onto,
+ * if we align the data.
+ */
+static inline int calc_pages_for(u64 off, u64 len)
+{
+ return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) -
+ (off >> PAGE_CACHE_SHIFT);
+}
+
+
+
+/* snap.c */
+struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
+ u64 ino);
+extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm);
+extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
+ struct ceph_snap_realm *realm);
+extern int ceph_update_snap_trace(struct ceph_mds_client *m,
+ void *p, void *e, bool deletion);
+extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
+ struct ceph_msg *msg);
+extern void ceph_queue_cap_snap(struct ceph_inode_info *ci,
+ struct ceph_snap_context *snapc);
+extern int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
+ struct ceph_cap_snap *capsnap);
+extern void ceph_cleanup_empty_realms(struct ceph_mds_client *mdsc);
+
+/*
+ * a cap_snap is "pending" if it is still awaiting an in-progress
+ * sync write (that may/may not still update size, mtime, etc.).
+ */
+static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
+{
+ return !list_empty(&ci->i_cap_snaps) &&
+ list_entry(ci->i_cap_snaps.prev, struct ceph_cap_snap,
+ ci_item)->writing;
+}
+
+
+/* super.c */
+extern struct kmem_cache *ceph_inode_cachep;
+extern struct kmem_cache *ceph_cap_cachep;
+extern struct kmem_cache *ceph_dentry_cachep;
+extern struct kmem_cache *ceph_file_cachep;
+
+extern const char *ceph_msg_type_name(int type);
+extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
+
+#define FSID_FORMAT "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" \
+ "%02x%02x%02x%02x%02x%02x"
+#define PR_FSID(f) (f)->fsid[0], (f)->fsid[1], (f)->fsid[2], (f)->fsid[3], \
+ (f)->fsid[4], (f)->fsid[5], (f)->fsid[6], (f)->fsid[7], \
+ (f)->fsid[8], (f)->fsid[9], (f)->fsid[10], (f)->fsid[11], \
+ (f)->fsid[12], (f)->fsid[13], (f)->fsid[14], (f)->fsid[15]
+
+/* inode.c */
+extern const struct inode_operations ceph_file_iops;
+
+extern struct inode *ceph_alloc_inode(struct super_block *sb);
+extern void ceph_destroy_inode(struct inode *inode);
+
+extern struct inode *ceph_get_inode(struct super_block *sb,
+ struct ceph_vino vino);
+extern struct inode *ceph_get_snapdir(struct inode *parent);
+extern int ceph_fill_file_size(struct inode *inode, int issued,
+ u32 truncate_seq, u64 truncate_size, u64 size);
+extern void ceph_fill_file_time(struct inode *inode, int issued,
+ u64 time_warp_seq, struct timespec *ctime,
+ struct timespec *mtime, struct timespec *atime);
+extern int ceph_fill_trace(struct super_block *sb,
+ struct ceph_mds_request *req,
+ struct ceph_mds_session *session);
+extern int ceph_readdir_prepopulate(struct ceph_mds_request *req,
+ struct ceph_mds_session *session);
+
+extern int ceph_inode_holds_cap(struct inode *inode, int mask);
+
+extern int ceph_inode_set_size(struct inode *inode, loff_t size);
+extern void ceph_inode_writeback(struct work_struct *work);
+extern void ceph_vmtruncate_work(struct work_struct *work);
+extern void __ceph_do_pending_vmtruncate(struct inode *inode);
+extern void __ceph_queue_vmtruncate(struct inode *inode);
+
+extern int ceph_do_getattr(struct inode *inode, int mask);
+extern int ceph_permission(struct inode *inode, int mask);
+extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
+extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat);
+
+/* xattr.c */
+extern int ceph_setxattr(struct dentry *, const char *, const void *,
+ size_t, int);
+extern ssize_t ceph_getxattr(struct dentry *, const char *, void *, size_t);
+extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
+extern int ceph_removexattr(struct dentry *, const char *);
+extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
+extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
+
+/* caps.c */
+extern const char *ceph_cap_string(int c);
+extern void ceph_handle_caps(struct ceph_mds_session *session,
+ struct ceph_msg *msg);
+extern int ceph_add_cap(struct inode *inode,
+ struct ceph_mds_session *session, u64 cap_id,
+ int fmode, unsigned issued, unsigned wanted,
+ unsigned cap, unsigned seq, u64 realmino, int flags,
+ struct ceph_cap_reservation *caps_reservation);
+extern void __ceph_remove_cap(struct ceph_cap *cap,
+ struct ceph_cap_reservation *ctx);
+static inline void ceph_remove_cap(struct ceph_cap *cap)
+{
+ struct inode *inode = &cap->ci->vfs_inode;
+ spin_lock(&inode->i_lock);
+ __ceph_remove_cap(cap, NULL);
+ spin_unlock(&inode->i_lock);
+}
+
+extern void ceph_queue_caps_release(struct inode *inode);
+extern int ceph_write_inode(struct inode *inode, int unused);
+extern int ceph_fsync(struct file *file, struct dentry *dentry, int datasync);
+extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session);
+extern int ceph_get_cap_mds(struct inode *inode);
+extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
+extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
+extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
+ struct ceph_snap_context *snapc);
+extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
+ struct ceph_mds_session **psession);
+extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
+ struct ceph_mds_session *session);
+extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
+extern void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc);
+
+extern int ceph_encode_inode_release(void **p, struct inode *inode,
+ int mds, int drop, int unless, int force);
+extern int ceph_encode_dentry_release(void **p, struct dentry *dn,
+ int mds, int drop, int unless);
+
+extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
+ int *got, loff_t endoff);
+
+/* for counting open files by mode */
+static inline void __ceph_get_fmode(struct ceph_inode_info *ci, int mode)
+{
+ ci->i_nr_by_mode[mode]++;
+}
+extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode);
+
+/* addr.c */
+extern const struct address_space_operations ceph_aops;
+extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
+
+/* file.c */
+extern const struct file_operations ceph_file_fops;
+extern const struct address_space_operations ceph_aops;
+extern int ceph_open(struct inode *inode, struct file *file);
+extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd, int mode,
+ int locked_dir);
+extern int ceph_release(struct inode *inode, struct file *filp);
+extern void ceph_release_page_vector(struct page **pages, int num_pages);
+
+/* dir.c */
+extern const struct file_operations ceph_dir_fops;
+extern const struct inode_operations ceph_dir_iops;
+extern struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
+ ceph_snapdir_dentry_ops;
+
+extern int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry);
+extern struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
+ struct dentry *dentry, int err);
+
+extern void ceph_dentry_lru_add(struct dentry *dn);
+extern void ceph_dentry_lru_touch(struct dentry *dn);
+extern void ceph_dentry_lru_del(struct dentry *dn);
+
+/*
+ * our d_ops vary depending on whether the inode is live,
+ * snapshotted (read-only), or a virtual ".snap" directory.
+ */
+int ceph_init_dentry(struct dentry *dentry);
+
+
+/* ioctl.c */
+extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+/* export.c */
+extern const struct export_operations ceph_export_ops;
+
+/* debugfs.c */
+extern int ceph_debugfs_init(void);
+extern void ceph_debugfs_cleanup(void);
+extern int ceph_debugfs_client_init(struct ceph_client *client);
+extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
+
+static inline struct inode *get_dentry_parent_inode(struct dentry *dentry)
+{
+ if (dentry && dentry->d_parent)
+ return dentry->d_parent->d_inode;
+
+ return NULL;
+}
+
+#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/ceph/types.h b/fs/ceph/types.h
new file mode 100644
index 000000000000..28b35a005ec2
--- /dev/null
+++ b/fs/ceph/types.h
@@ -0,0 +1,29 @@
+#ifndef _FS_CEPH_TYPES_H
+#define _FS_CEPH_TYPES_H
+
+/* needed before including ceph_fs.h */
+#include <linux/in.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+
+#include "ceph_fs.h"
+#include "ceph_frag.h"
+#include "ceph_hash.h"
+
+/*
+ * Identify inodes by both their ino AND snapshot id (a u64).
+ */
+struct ceph_vino {
+ u64 ino;
+ u64 snap;
+};
+
+
+/* context for the caps reservation mechanism */
+struct ceph_cap_reservation {
+ int count;
+};
+
+
+#endif
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
new file mode 100644
index 000000000000..37d6ce645691
--- /dev/null
+++ b/fs/ceph/xattr.c
@@ -0,0 +1,844 @@
+#include "ceph_debug.h"
+#include "super.h"
+#include "decode.h"
+
+#include <linux/xattr.h>
+
+static bool ceph_is_valid_xattr(const char *name)
+{
+ return !strncmp(name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN) ||
+ !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
+ !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
+}
+
+/*
+ * These define virtual xattrs exposing the recursive directory
+ * statistics and layout metadata.
+ */
+struct ceph_vxattr_cb {
+ bool readonly;
+ char *name;
+ size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val,
+ size_t size);
+};
+
+/* directories */
+
+static size_t ceph_vxattrcb_entries(struct ceph_inode_info *ci, char *val,
+ size_t size)
+{
+ return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs);
+}
+
+static size_t ceph_vxattrcb_files(struct ceph_inode_info *ci, char *val,
+ size_t size)
+{
+ return snprintf(val, size, "%lld", ci->i_files);
+}
+
+static size_t ceph_vxattrcb_subdirs(struct ceph_inode_info *ci, char *val,
+ size_t size)
+{
+ return snprintf(val, size, "%lld", ci->i_subdirs);
+}
+
+static size_t ceph_vxattrcb_rentries(struct ceph_inode_info *ci, char *val,
+ size_t size)
+{
+ return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs);
+}
+
+static size_t ceph_vxattrcb_rfiles(struct ceph_inode_info *ci, char *val,
+ size_t size)
+{
+ return snprintf(val, size, "%lld", ci->i_rfiles);
+}
+
+static size_t ceph_vxattrcb_rsubdirs(struct ceph_inode_info *ci, char *val,
+ size_t size)
+{
+ return snprintf(val, size, "%lld", ci->i_rsubdirs);
+}
+
+static size_t ceph_vxattrcb_rbytes(struct ceph_inode_info *ci, char *val,
+ size_t size)
+{
+ return snprintf(val, size, "%lld", ci->i_rbytes);
+}
+
+static size_t ceph_vxattrcb_rctime(struct ceph_inode_info *ci, char *val,
+ size_t size)
+{
+ return snprintf(val, size, "%ld.%ld", (long)ci->i_rctime.tv_sec,
+ (long)ci->i_rctime.tv_nsec);
+}
+
+static struct ceph_vxattr_cb ceph_dir_vxattrs[] = {
+ { true, "user.ceph.dir.entries", ceph_vxattrcb_entries},
+ { true, "user.ceph.dir.files", ceph_vxattrcb_files},
+ { true, "user.ceph.dir.subdirs", ceph_vxattrcb_subdirs},
+ { true, "user.ceph.dir.rentries", ceph_vxattrcb_rentries},
+ { true, "user.ceph.dir.rfiles", ceph_vxattrcb_rfiles},
+ { true, "user.ceph.dir.rsubdirs", ceph_vxattrcb_rsubdirs},
+ { true, "user.ceph.dir.rbytes", ceph_vxattrcb_rbytes},
+ { true, "user.ceph.dir.rctime", ceph_vxattrcb_rctime},
+ { true, NULL, NULL }
+};
+
+/* files */
+
+static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
+ size_t size)
+{
+ int ret;
+
+ ret = snprintf(val, size,
+ "chunk_bytes=%lld\nstripe_count=%lld\nobject_size=%lld\n",
+ (unsigned long long)ceph_file_layout_su(ci->i_layout),
+ (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
+ (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
+ if (ceph_file_layout_pg_preferred(ci->i_layout))
+ ret += snprintf(val + ret, size, "preferred_osd=%lld\n",
+ (unsigned long long)ceph_file_layout_pg_preferred(
+ ci->i_layout));
+ return ret;
+}
+
+static struct ceph_vxattr_cb ceph_file_vxattrs[] = {
+ { true, "user.ceph.layout", ceph_vxattrcb_layout},
+ { NULL, NULL }
+};
+
+static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode)
+{
+ if (S_ISDIR(inode->i_mode))
+ return ceph_dir_vxattrs;
+ else if (S_ISREG(inode->i_mode))
+ return ceph_file_vxattrs;
+ return NULL;
+}
+
+static struct ceph_vxattr_cb *ceph_match_vxattr(struct ceph_vxattr_cb *vxattr,
+ const char *name)
+{
+ do {
+ if (strcmp(vxattr->name, name) == 0)
+ return vxattr;
+ vxattr++;
+ } while (vxattr->name);
+ return NULL;
+}
+
+static int __set_xattr(struct ceph_inode_info *ci,
+ const char *name, int name_len,
+ const char *val, int val_len,
+ int dirty,
+ int should_free_name, int should_free_val,
+ struct ceph_inode_xattr **newxattr)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct ceph_inode_xattr *xattr = NULL;
+ int c;
+ int new = 0;
+
+ p = &ci->i_xattrs.index.rb_node;
+ while (*p) {
+ parent = *p;
+ xattr = rb_entry(parent, struct ceph_inode_xattr, node);
+ c = strncmp(name, xattr->name, min(name_len, xattr->name_len));
+ if (c < 0)
+ p = &(*p)->rb_left;
+ else if (c > 0)
+ p = &(*p)->rb_right;
+ else {
+ if (name_len == xattr->name_len)
+ break;
+ else if (name_len < xattr->name_len)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+ xattr = NULL;
+ }
+
+ if (!xattr) {
+ new = 1;
+ xattr = *newxattr;
+ xattr->name = name;
+ xattr->name_len = name_len;
+ xattr->should_free_name = should_free_name;
+
+ ci->i_xattrs.count++;
+ dout("__set_xattr count=%d\n", ci->i_xattrs.count);
+ } else {
+ kfree(*newxattr);
+ *newxattr = NULL;
+ if (xattr->should_free_val)
+ kfree((void *)xattr->val);
+
+ if (should_free_name) {
+ kfree((void *)name);
+ name = xattr->name;
+ }
+ ci->i_xattrs.names_size -= xattr->name_len;
+ ci->i_xattrs.vals_size -= xattr->val_len;
+ }
+ if (!xattr) {
+ pr_err("__set_xattr ENOMEM on %p %llx.%llx xattr %s=%s\n",
+ &ci->vfs_inode, ceph_vinop(&ci->vfs_inode), name,
+ xattr->val);
+ return -ENOMEM;
+ }
+ ci->i_xattrs.names_size += name_len;
+ ci->i_xattrs.vals_size += val_len;
+ if (val)
+ xattr->val = val;
+ else
+ xattr->val = "";
+
+ xattr->val_len = val_len;
+ xattr->dirty = dirty;
+ xattr->should_free_val = (val && should_free_val);
+
+ if (new) {
+ rb_link_node(&xattr->node, parent, p);
+ rb_insert_color(&xattr->node, &ci->i_xattrs.index);
+ dout("__set_xattr_val p=%p\n", p);
+ }
+
+ dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n",
+ ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val);
+
+ return 0;
+}
+
+static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
+ const char *name)
+{
+ struct rb_node **p;
+ struct rb_node *parent = NULL;
+ struct ceph_inode_xattr *xattr = NULL;
+ int c;
+
+ p = &ci->i_xattrs.index.rb_node;
+ while (*p) {
+ parent = *p;
+ xattr = rb_entry(parent, struct ceph_inode_xattr, node);
+ c = strncmp(name, xattr->name, xattr->name_len);
+ if (c < 0)
+ p = &(*p)->rb_left;
+ else if (c > 0)
+ p = &(*p)->rb_right;
+ else {
+ dout("__get_xattr %s: found %.*s\n", name,
+ xattr->val_len, xattr->val);
+ return xattr;
+ }
+ }
+
+ dout("__get_xattr %s: not found\n", name);
+
+ return NULL;
+}
+
+static void __free_xattr(struct ceph_inode_xattr *xattr)
+{
+ BUG_ON(!xattr);
+
+ if (xattr->should_free_name)
+ kfree((void *)xattr->name);
+ if (xattr->should_free_val)
+ kfree((void *)xattr->val);
+
+ kfree(xattr);
+}
+
+static int __remove_xattr(struct ceph_inode_info *ci,
+ struct ceph_inode_xattr *xattr)
+{
+ if (!xattr)
+ return -EOPNOTSUPP;
+
+ rb_erase(&xattr->node, &ci->i_xattrs.index);
+
+ if (xattr->should_free_name)
+ kfree((void *)xattr->name);
+ if (xattr->should_free_val)
+ kfree((void *)xattr->val);
+
+ ci->i_xattrs.names_size -= xattr->name_len;
+ ci->i_xattrs.vals_size -= xattr->val_len;
+ ci->i_xattrs.count--;
+ kfree(xattr);
+
+ return 0;
+}
+
+static int __remove_xattr_by_name(struct ceph_inode_info *ci,
+ const char *name)
+{
+ struct rb_node **p;
+ struct ceph_inode_xattr *xattr;
+ int err;
+
+ p = &ci->i_xattrs.index.rb_node;
+ xattr = __get_xattr(ci, name);
+ err = __remove_xattr(ci, xattr);
+ return err;
+}
+
+static char *__copy_xattr_names(struct ceph_inode_info *ci,
+ char *dest)
+{
+ struct rb_node *p;
+ struct ceph_inode_xattr *xattr = NULL;
+
+ p = rb_first(&ci->i_xattrs.index);
+ dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
+
+ while (p) {
+ xattr = rb_entry(p, struct ceph_inode_xattr, node);
+ memcpy(dest, xattr->name, xattr->name_len);
+ dest[xattr->name_len] = '\0';
+
+ dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
+ xattr->name_len, ci->i_xattrs.names_size);
+
+ dest += xattr->name_len + 1;
+ p = rb_next(p);
+ }
+
+ return dest;
+}
+
+void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
+{
+ struct rb_node *p, *tmp;
+ struct ceph_inode_xattr *xattr = NULL;
+
+ p = rb_first(&ci->i_xattrs.index);
+
+ dout("__ceph_destroy_xattrs p=%p\n", p);
+
+ while (p) {
+ xattr = rb_entry(p, struct ceph_inode_xattr, node);
+ tmp = p;
+ p = rb_next(tmp);
+ dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
+ xattr->name_len, xattr->name);
+ rb_erase(tmp, &ci->i_xattrs.index);
+
+ __free_xattr(xattr);
+ }
+
+ ci->i_xattrs.names_size = 0;
+ ci->i_xattrs.vals_size = 0;
+ ci->i_xattrs.index_version = 0;
+ ci->i_xattrs.count = 0;
+ ci->i_xattrs.index = RB_ROOT;
+}
+
+static int __build_xattrs(struct inode *inode)
+{
+ u32 namelen;
+ u32 numattr = 0;
+ void *p, *end;
+ u32 len;
+ const char *name, *val;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ int xattr_version;
+ struct ceph_inode_xattr **xattrs = NULL;
+ int err = 0;
+ int i;
+
+ dout("__build_xattrs() len=%d\n",
+ ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
+
+ if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
+ return 0; /* already built */
+
+ __ceph_destroy_xattrs(ci);
+
+start:
+ /* updated internal xattr rb tree */
+ if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) {
+ p = ci->i_xattrs.blob->vec.iov_base;
+ end = p + ci->i_xattrs.blob->vec.iov_len;
+ ceph_decode_32_safe(&p, end, numattr, bad);
+ xattr_version = ci->i_xattrs.version;
+ spin_unlock(&inode->i_lock);
+
+ xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
+ GFP_NOFS);
+ err = -ENOMEM;
+ if (!xattrs)
+ goto bad_lock;
+ memset(xattrs, 0, numattr*sizeof(struct ceph_xattr *));
+ for (i = 0; i < numattr; i++) {
+ xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr),
+ GFP_NOFS);
+ if (!xattrs[i])
+ goto bad_lock;
+ }
+
+ spin_lock(&inode->i_lock);
+ if (ci->i_xattrs.version != xattr_version) {
+ /* lost a race, retry */
+ for (i = 0; i < numattr; i++)
+ kfree(xattrs[i]);
+ kfree(xattrs);
+ goto start;
+ }
+ err = -EIO;
+ while (numattr--) {
+ ceph_decode_32_safe(&p, end, len, bad);
+ namelen = len;
+ name = p;
+ p += len;
+ ceph_decode_32_safe(&p, end, len, bad);
+ val = p;
+ p += len;
+
+ err = __set_xattr(ci, name, namelen, val, len,
+ 0, 0, 0, &xattrs[numattr]);
+
+ if (err < 0)
+ goto bad;
+ }
+ kfree(xattrs);
+ }
+ ci->i_xattrs.index_version = ci->i_xattrs.version;
+ ci->i_xattrs.dirty = false;
+
+ return err;
+bad_lock:
+ spin_lock(&inode->i_lock);
+bad:
+ if (xattrs) {
+ for (i = 0; i < numattr; i++)
+ kfree(xattrs[i]);
+ kfree(xattrs);
+ }
+ ci->i_xattrs.names_size = 0;
+ return err;
+}
+
+static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
+ int val_size)
+{
+ /*
+ * 4 bytes for the length, and additional 4 bytes per each xattr name,
+ * 4 bytes per each value
+ */
+ int size = 4 + ci->i_xattrs.count*(4 + 4) +
+ ci->i_xattrs.names_size +
+ ci->i_xattrs.vals_size;
+ dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
+ ci->i_xattrs.count, ci->i_xattrs.names_size,
+ ci->i_xattrs.vals_size);
+
+ if (name_size)
+ size += 4 + 4 + name_size + val_size;
+
+ return size;
+}
+
+/*
+ * If there are dirty xattrs, reencode xattrs into the prealloc_blob
+ * and swap into place.
+ */
+void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
+{
+ struct rb_node *p;
+ struct ceph_inode_xattr *xattr = NULL;
+ void *dest;
+
+ dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
+ if (ci->i_xattrs.dirty) {
+ int need = __get_required_blob_size(ci, 0, 0);
+
+ BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len);
+
+ p = rb_first(&ci->i_xattrs.index);
+ dest = ci->i_xattrs.prealloc_blob->vec.iov_base;
+
+ ceph_encode_32(&dest, ci->i_xattrs.count);
+ while (p) {
+ xattr = rb_entry(p, struct ceph_inode_xattr, node);
+
+ ceph_encode_32(&dest, xattr->name_len);
+ memcpy(dest, xattr->name, xattr->name_len);
+ dest += xattr->name_len;
+ ceph_encode_32(&dest, xattr->val_len);
+ memcpy(dest, xattr->val, xattr->val_len);
+ dest += xattr->val_len;
+
+ p = rb_next(p);
+ }
+
+ /* adjust buffer len; it may be larger than we need */
+ ci->i_xattrs.prealloc_blob->vec.iov_len =
+ dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
+
+ if (ci->i_xattrs.blob)
+ ceph_buffer_put(ci->i_xattrs.blob);
+ ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
+ ci->i_xattrs.prealloc_blob = NULL;
+ ci->i_xattrs.dirty = false;
+ }
+}
+
+ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value,
+ size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
+ int err;
+ struct ceph_inode_xattr *xattr;
+ struct ceph_vxattr_cb *vxattr = NULL;
+
+ if (!ceph_is_valid_xattr(name))
+ return -ENODATA;
+
+ /* let's see if a virtual xattr was requested */
+ if (vxattrs)
+ vxattr = ceph_match_vxattr(vxattrs, name);
+
+ spin_lock(&inode->i_lock);
+ dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
+ ci->i_xattrs.version, ci->i_xattrs.index_version);
+
+ if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
+ (ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
+ goto get_xattr;
+ } else {
+ spin_unlock(&inode->i_lock);
+ /* get xattrs from mds (if we don't already have them) */
+ err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
+ if (err)
+ return err;
+ }
+
+ spin_lock(&inode->i_lock);
+
+ if (vxattr && vxattr->readonly) {
+ err = vxattr->getxattr_cb(ci, value, size);
+ goto out;
+ }
+
+ err = __build_xattrs(inode);
+ if (err < 0)
+ goto out;
+
+get_xattr:
+ err = -ENODATA; /* == ENOATTR */
+ xattr = __get_xattr(ci, name);
+ if (!xattr) {
+ if (vxattr)
+ err = vxattr->getxattr_cb(ci, value, size);
+ goto out;
+ }
+
+ err = -ERANGE;
+ if (size && size < xattr->val_len)
+ goto out;
+
+ err = xattr->val_len;
+ if (size == 0)
+ goto out;
+
+ memcpy(value, xattr->val, xattr->val_len);
+
+out:
+ spin_unlock(&inode->i_lock);
+ return err;
+}
+
+ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
+ u32 vir_namelen = 0;
+ u32 namelen;
+ int err;
+ u32 len;
+ int i;
+
+ spin_lock(&inode->i_lock);
+ dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
+ ci->i_xattrs.version, ci->i_xattrs.index_version);
+
+ if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1) &&
+ (ci->i_xattrs.index_version > ci->i_xattrs.version)) {
+ goto list_xattr;
+ } else {
+ spin_unlock(&inode->i_lock);
+ err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
+ if (err)
+ return err;
+ }
+
+ spin_lock(&inode->i_lock);
+
+ err = __build_xattrs(inode);
+ if (err < 0)
+ goto out;
+
+list_xattr:
+ vir_namelen = 0;
+ /* include virtual dir xattrs */
+ if (vxattrs)
+ for (i = 0; vxattrs[i].name; i++)
+ vir_namelen += strlen(vxattrs[i].name) + 1;
+ /* adding 1 byte per each variable due to the null termination */
+ namelen = vir_namelen + ci->i_xattrs.names_size + ci->i_xattrs.count;
+ err = -ERANGE;
+ if (size && namelen > size)
+ goto out;
+
+ err = namelen;
+ if (size == 0)
+ goto out;
+
+ names = __copy_xattr_names(ci, names);
+
+ /* virtual xattr names, too */
+ if (vxattrs)
+ for (i = 0; vxattrs[i].name; i++) {
+ len = sprintf(names, "%s", vxattrs[i].name);
+ names += len + 1;
+ }
+
+out:
+ spin_unlock(&inode->i_lock);
+ return err;
+}
+
+static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
+ const char *value, size_t size, int flags)
+{
+ struct ceph_client *client = ceph_client(dentry->d_sb);
+ struct inode *inode = dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct inode *parent_inode = dentry->d_parent->d_inode;
+ struct ceph_mds_request *req;
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ int err;
+ int i, nr_pages;
+ struct page **pages = NULL;
+ void *kaddr;
+
+ /* copy value into some pages */
+ nr_pages = calc_pages_for(0, size);
+ if (nr_pages) {
+ pages = kmalloc(sizeof(pages[0])*nr_pages, GFP_NOFS);
+ if (!pages)
+ return -ENOMEM;
+ err = -ENOMEM;
+ for (i = 0; i < nr_pages; i++) {
+ pages[i] = alloc_page(GFP_NOFS);
+ if (!pages[i]) {
+ nr_pages = i;
+ goto out;
+ }
+ kaddr = kmap(pages[i]);
+ memcpy(kaddr, value + i*PAGE_CACHE_SIZE,
+ min(PAGE_CACHE_SIZE, size-i*PAGE_CACHE_SIZE));
+ }
+ }
+
+ dout("setxattr value=%.*s\n", (int)size, value);
+
+ /* do request */
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
+ USE_AUTH_MDS);
+ if (IS_ERR(req)) {
+ err = PTR_ERR(req);
+ goto out;
+ }
+ req->r_inode = igrab(inode);
+ req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
+ req->r_num_caps = 1;
+ req->r_args.setxattr.flags = cpu_to_le32(flags);
+ req->r_path2 = kstrdup(name, GFP_NOFS);
+
+ req->r_pages = pages;
+ req->r_num_pages = nr_pages;
+ req->r_data_len = size;
+
+ dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
+ err = ceph_mdsc_do_request(mdsc, parent_inode, req);
+ ceph_mdsc_put_request(req);
+ dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
+
+out:
+ if (pages) {
+ for (i = 0; i < nr_pages; i++)
+ __free_page(pages[i]);
+ kfree(pages);
+ }
+ return err;
+}
+
+int ceph_setxattr(struct dentry *dentry, const char *name,
+ const void *value, size_t size, int flags)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
+ int err;
+ int name_len = strlen(name);
+ int val_len = size;
+ char *newname = NULL;
+ char *newval = NULL;
+ struct ceph_inode_xattr *xattr = NULL;
+ int issued;
+ int required_blob_size;
+
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+ return -EROFS;
+
+ if (!ceph_is_valid_xattr(name))
+ return -EOPNOTSUPP;
+
+ if (vxattrs) {
+ struct ceph_vxattr_cb *vxattr =
+ ceph_match_vxattr(vxattrs, name);
+ if (vxattr && vxattr->readonly)
+ return -EOPNOTSUPP;
+ }
+
+ /* preallocate memory for xattr name, value, index node */
+ err = -ENOMEM;
+ newname = kmalloc(name_len + 1, GFP_NOFS);
+ if (!newname)
+ goto out;
+ memcpy(newname, name, name_len + 1);
+
+ if (val_len) {
+ newval = kmalloc(val_len + 1, GFP_NOFS);
+ if (!newval)
+ goto out;
+ memcpy(newval, value, val_len);
+ newval[val_len] = '\0';
+ }
+
+ xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS);
+ if (!xattr)
+ goto out;
+
+ spin_lock(&inode->i_lock);
+retry:
+ issued = __ceph_caps_issued(ci, NULL);
+ if (!(issued & CEPH_CAP_XATTR_EXCL))
+ goto do_sync;
+ __build_xattrs(inode);
+
+ required_blob_size = __get_required_blob_size(ci, name_len, val_len);
+
+ if (!ci->i_xattrs.prealloc_blob ||
+ required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
+ struct ceph_buffer *blob = NULL;
+
+ spin_unlock(&inode->i_lock);
+ dout(" preaallocating new blob size=%d\n", required_blob_size);
+ blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
+ if (!blob)
+ goto out;
+ spin_lock(&inode->i_lock);
+ if (ci->i_xattrs.prealloc_blob)
+ ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+ ci->i_xattrs.prealloc_blob = blob;
+ goto retry;
+ }
+
+ dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
+ err = __set_xattr(ci, newname, name_len, newval,
+ val_len, 1, 1, 1, &xattr);
+ __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
+ ci->i_xattrs.dirty = true;
+ inode->i_ctime = CURRENT_TIME;
+ spin_unlock(&inode->i_lock);
+
+ return err;
+
+do_sync:
+ spin_unlock(&inode->i_lock);
+ err = ceph_sync_setxattr(dentry, name, value, size, flags);
+out:
+ kfree(newname);
+ kfree(newval);
+ kfree(xattr);
+ return err;
+}
+
+static int ceph_send_removexattr(struct dentry *dentry, const char *name)
+{
+ struct ceph_client *client = ceph_client(dentry->d_sb);
+ struct ceph_mds_client *mdsc = &client->mdsc;
+ struct inode *inode = dentry->d_inode;
+ struct inode *parent_inode = dentry->d_parent->d_inode;
+ struct ceph_mds_request *req;
+ int err;
+
+ req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR,
+ USE_AUTH_MDS);
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req->r_inode = igrab(inode);
+ req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
+ req->r_num_caps = 1;
+ req->r_path2 = kstrdup(name, GFP_NOFS);
+
+ err = ceph_mdsc_do_request(mdsc, parent_inode, req);
+ ceph_mdsc_put_request(req);
+ return err;
+}
+
+int ceph_removexattr(struct dentry *dentry, const char *name)
+{
+ struct inode *inode = dentry->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
+ int issued;
+ int err;
+
+ if (ceph_snap(inode) != CEPH_NOSNAP)
+ return -EROFS;
+
+ if (!ceph_is_valid_xattr(name))
+ return -EOPNOTSUPP;
+
+ if (vxattrs) {
+ struct ceph_vxattr_cb *vxattr =
+ ceph_match_vxattr(vxattrs, name);
+ if (vxattr && vxattr->readonly)
+ return -EOPNOTSUPP;
+ }
+
+ spin_lock(&inode->i_lock);
+ __build_xattrs(inode);
+ issued = __ceph_caps_issued(ci, NULL);
+ dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
+
+ if (!(issued & CEPH_CAP_XATTR_EXCL))
+ goto do_sync;
+
+ err = __remove_xattr_by_name(ceph_inode(inode), name);
+ __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
+ ci->i_xattrs.dirty = true;
+ inode->i_ctime = CURRENT_TIME;
+
+ spin_unlock(&inode->i_lock);
+
+ return err;
+do_sync:
+ spin_unlock(&inode->i_lock);
+ err = ceph_send_removexattr(dentry, name);
+ return err;
+}
+
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 7b2600b380d7..49503d2edc7e 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,7 @@
+Version 1.62
+------------
+Add sockopt=TCP_NODELAY mount option.
+
Version 1.61
------------
Fix append problem to Samba servers (files opened with O_APPEND could
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index ac2b24c192f8..78c1b86d55f6 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.61"
+#define CIFS_VERSION "1.62"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4b35f7ec0583..ed751bb657db 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -149,6 +149,7 @@ struct TCP_Server_Info {
bool svlocal:1; /* local server or remote */
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
+ bool tcp_nodelay;
atomic_t inFlight; /* number of requests on the wire to server */
#ifdef CONFIG_CIFS_STATS2
atomic_t inSend; /* requests trying to send */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 3bbcaa716b3c..2e9e09ca0e30 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -98,7 +98,7 @@ struct smb_vol {
bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
unsigned int rsize;
unsigned int wsize;
- unsigned int sockopt;
+ bool sockopt_tcp_nodelay:1;
unsigned short int port;
char *prepath;
};
@@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname,
simple_strtoul(value, &value, 0);
}
} else if (strnicmp(data, "sockopt", 5) == 0) {
- if (value && *value) {
- vol->sockopt =
- simple_strtoul(value, &value, 0);
+ if (!value || !*value) {
+ cERROR(1, ("no socket option specified"));
+ continue;
+ } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
+ vol->sockopt_tcp_nodelay = 1;
}
} else if (strnicmp(data, "netbiosname", 4) == 0) {
if (!value || !*value || (*value == ' ')) {
@@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses->noblocksnd = volume_info->noblocksnd;
tcp_ses->noautotune = volume_info->noautotune;
+ tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
atomic_set(&tcp_ses->inFlight, 0);
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
@@ -1764,6 +1767,7 @@ static int
ipv4_connect(struct TCP_Server_Info *server)
{
int rc = 0;
+ int val;
bool connected = false;
__be16 orig_port = 0;
struct socket *socket = server->ssocket;
@@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server)
socket->sk->sk_rcvbuf = 140 * 1024;
}
+ if (server->tcp_nodelay) {
+ val = 1;
+ rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+ (char *)&val, sizeof(val));
+ if (rc)
+ cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ }
+
cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
socket->sk->sk_sndbuf,
socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
@@ -1916,6 +1928,7 @@ static int
ipv6_connect(struct TCP_Server_Info *server)
{
int rc = 0;
+ int val;
bool connected = false;
__be16 orig_port = 0;
struct socket *socket = server->ssocket;
@@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server)
*/
socket->sk->sk_rcvtimeo = 7 * HZ;
socket->sk->sk_sndtimeo = 5 * HZ;
+
+ if (server->tcp_nodelay) {
+ val = 1;
+ rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+ (char *)&val, sizeof(val));
+ if (rc)
+ cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ }
+
server->ssocket = socket;
return rc;
diff --git a/fs/compat.c b/fs/compat.c
index 00d90c2e66f0..ecc343812595 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1168,11 +1168,10 @@ out:
if (iov != iovstack)
kfree(iov);
if ((ret + (type == READ)) > 0) {
- struct dentry *dentry = file->f_path.dentry;
if (type == READ)
- fsnotify_access(dentry);
+ fsnotify_access(file);
else
- fsnotify_modify(dentry);
+ fsnotify_modify(file);
}
return ret;
}
diff --git a/fs/dlm/member.c b/fs/dlm/member.c
index 84f70bfb0baf..b12532e553f8 100644
--- a/fs/dlm/member.c
+++ b/fs/dlm/member.c
@@ -312,7 +312,7 @@ int dlm_ls_stop(struct dlm_ls *ls)
/*
* This in_recovery lock does two things:
* 1) Keeps this function from returning until all threads are out
- * of locking routines and locking is truely stopped.
+ * of locking routines and locking is truly stopped.
* 2) Keeps any new requests from being processed until it's unlocked
* when recovery is complete.
*/
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 9e944057001b..1744f17ce96e 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -191,13 +191,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
| ECRYPTFS_ENCRYPTED);
}
mutex_unlock(&crypt_stat->cs_mutex);
- if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
- && !(file->f_flags & O_RDONLY)) {
- rc = -EPERM;
- printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
- "file must hence be opened RO\n", __func__);
- goto out;
- }
if (!ecryptfs_inode_to_private(inode)->lower_file) {
rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
if (rc) {
@@ -208,6 +201,13 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
goto out;
}
}
+ if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
+ && !(file->f_flags & O_RDONLY)) {
+ rc = -EPERM;
+ printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
+ "file must hence be opened RO\n", __func__);
+ goto out;
+ }
ecryptfs_set_file_lower(
file, ecryptfs_inode_to_private(inode)->lower_file);
if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 429ca0b3ba08..b0747c4bad31 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -282,7 +282,8 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
goto out;
}
rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
- ecryptfs_dir_inode->i_sb, 1);
+ ecryptfs_dir_inode->i_sb,
+ ECRYPTFS_INTERPOSE_FLAG_D_ADD);
if (rc) {
printk(KERN_ERR "%s: Error interposing; rc = [%d]\n",
__func__, rc);
@@ -463,9 +464,6 @@ out_lock:
unlock_dir(lower_dir_dentry);
dput(lower_new_dentry);
dput(lower_old_dentry);
- d_drop(lower_old_dentry);
- d_drop(new_dentry);
- d_drop(old_dentry);
return rc;
}
@@ -614,6 +612,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_new_dentry;
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
+ struct dentry *trap = NULL;
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
@@ -621,7 +620,17 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
dget(lower_new_dentry);
lower_old_dir_dentry = dget_parent(lower_old_dentry);
lower_new_dir_dentry = dget_parent(lower_new_dentry);
- lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ /* source should not be ancestor of target */
+ if (trap == lower_old_dentry) {
+ rc = -EINVAL;
+ goto out_lock;
+ }
+ /* target should not be ancestor of source */
+ if (trap == lower_new_dentry) {
+ rc = -ENOTEMPTY;
+ goto out_lock;
+ }
rc = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry,
lower_new_dir_dentry->d_inode, lower_new_dentry);
if (rc)
@@ -772,18 +781,23 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
}
/**
- * ecryptfs_truncate
+ * truncate_upper
* @dentry: The ecryptfs layer dentry
- * @new_length: The length to expand the file to
+ * @ia: Address of the ecryptfs inode's attributes
+ * @lower_ia: Address of the lower inode's attributes
*
* Function to handle truncations modifying the size of the file. Note
* that the file sizes are interpolated. When expanding, we are simply
- * writing strings of 0's out. When truncating, we need to modify the
- * underlying file size according to the page index interpolations.
+ * writing strings of 0's out. When truncating, we truncate the upper
+ * inode and update the lower_ia according to the page index
+ * interpolations. If ATTR_SIZE is set in lower_ia->ia_valid upon return,
+ * the caller must use lower_ia in a call to notify_change() to perform
+ * the truncation of the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
-int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
+static int truncate_upper(struct dentry *dentry, struct iattr *ia,
+ struct iattr *lower_ia)
{
int rc = 0;
struct inode *inode = dentry->d_inode;
@@ -794,8 +808,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
loff_t lower_size_before_truncate;
loff_t lower_size_after_truncate;
- if (unlikely((new_length == i_size)))
+ if (unlikely((ia->ia_size == i_size))) {
+ lower_ia->ia_valid &= ~ATTR_SIZE;
goto out;
+ }
crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
/* Set up a fake ecryptfs file, this is used to interface with
* the file in the underlying filesystem so that the
@@ -815,28 +831,30 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
&fake_ecryptfs_file,
ecryptfs_inode_to_private(dentry->d_inode)->lower_file);
/* Switch on growing or shrinking file */
- if (new_length > i_size) {
+ if (ia->ia_size > i_size) {
char zero[] = { 0x00 };
+ lower_ia->ia_valid &= ~ATTR_SIZE;
/* Write a single 0 at the last position of the file;
* this triggers code that will fill in 0's throughout
* the intermediate portion of the previous end of the
* file and the new and of the file */
rc = ecryptfs_write(&fake_ecryptfs_file, zero,
- (new_length - 1), 1);
- } else { /* new_length < i_size_read(inode) */
- /* We're chopping off all the pages down do the page
- * in which new_length is located. Fill in the end of
- * that page from (new_length & ~PAGE_CACHE_MASK) to
+ (ia->ia_size - 1), 1);
+ } else { /* ia->ia_size < i_size_read(inode) */
+ /* We're chopping off all the pages down to the page
+ * in which ia->ia_size is located. Fill in the end of
+ * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to
* PAGE_CACHE_SIZE with zeros. */
size_t num_zeros = (PAGE_CACHE_SIZE
- - (new_length & ~PAGE_CACHE_MASK));
+ - (ia->ia_size & ~PAGE_CACHE_MASK));
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
- rc = vmtruncate(inode, new_length);
+ rc = vmtruncate(inode, ia->ia_size);
if (rc)
goto out_free;
- rc = vmtruncate(lower_dentry->d_inode, new_length);
+ lower_ia->ia_size = ia->ia_size;
+ lower_ia->ia_valid |= ATTR_SIZE;
goto out_free;
}
if (num_zeros) {
@@ -848,7 +866,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
goto out_free;
}
rc = ecryptfs_write(&fake_ecryptfs_file, zeros_virt,
- new_length, num_zeros);
+ ia->ia_size, num_zeros);
kfree(zeros_virt);
if (rc) {
printk(KERN_ERR "Error attempting to zero out "
@@ -857,7 +875,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
goto out_free;
}
}
- vmtruncate(inode, new_length);
+ vmtruncate(inode, ia->ia_size);
rc = ecryptfs_write_inode_size_to_metadata(inode);
if (rc) {
printk(KERN_ERR "Problem with "
@@ -870,10 +888,12 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
lower_size_before_truncate =
upper_size_to_lower_size(crypt_stat, i_size);
lower_size_after_truncate =
- upper_size_to_lower_size(crypt_stat, new_length);
- if (lower_size_after_truncate < lower_size_before_truncate)
- vmtruncate(lower_dentry->d_inode,
- lower_size_after_truncate);
+ upper_size_to_lower_size(crypt_stat, ia->ia_size);
+ if (lower_size_after_truncate < lower_size_before_truncate) {
+ lower_ia->ia_size = lower_size_after_truncate;
+ lower_ia->ia_valid |= ATTR_SIZE;
+ } else
+ lower_ia->ia_valid &= ~ATTR_SIZE;
}
out_free:
if (ecryptfs_file_to_private(&fake_ecryptfs_file))
@@ -883,6 +903,33 @@ out:
return rc;
}
+/**
+ * ecryptfs_truncate
+ * @dentry: The ecryptfs layer dentry
+ * @new_length: The length to expand the file to
+ *
+ * Simple function that handles the truncation of an eCryptfs inode and
+ * its corresponding lower inode.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
+{
+ struct iattr ia = { .ia_valid = ATTR_SIZE, .ia_size = new_length };
+ struct iattr lower_ia = { .ia_valid = 0 };
+ int rc;
+
+ rc = truncate_upper(dentry, &ia, &lower_ia);
+ if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+
+ mutex_lock(&lower_dentry->d_inode->i_mutex);
+ rc = notify_change(lower_dentry, &lower_ia);
+ mutex_unlock(&lower_dentry->d_inode->i_mutex);
+ }
+ return rc;
+}
+
static int
ecryptfs_permission(struct inode *inode, int mask)
{
@@ -905,6 +952,7 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
{
int rc = 0;
struct dentry *lower_dentry;
+ struct iattr lower_ia;
struct inode *inode;
struct inode *lower_inode;
struct ecryptfs_crypt_stat *crypt_stat;
@@ -943,15 +991,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
}
}
mutex_unlock(&crypt_stat->cs_mutex);
+ memcpy(&lower_ia, ia, sizeof(lower_ia));
+ if (ia->ia_valid & ATTR_FILE)
+ lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
if (ia->ia_valid & ATTR_SIZE) {
- ecryptfs_printk(KERN_DEBUG,
- "ia->ia_valid = [0x%x] ATTR_SIZE" " = [0x%x]\n",
- ia->ia_valid, ATTR_SIZE);
- rc = ecryptfs_truncate(dentry, ia->ia_size);
- /* ecryptfs_truncate handles resizing of the lower file */
- ia->ia_valid &= ~ATTR_SIZE;
- ecryptfs_printk(KERN_DEBUG, "ia->ia_valid = [%x]\n",
- ia->ia_valid);
+ rc = truncate_upper(dentry, ia, &lower_ia);
if (rc < 0)
goto out;
}
@@ -960,11 +1004,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
* mode change is for clearing setuid/setgid bits. Allow lower fs
* to interpret this in its own way.
*/
- if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
- ia->ia_valid &= ~ATTR_MODE;
+ if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
+ lower_ia.ia_valid &= ~ATTR_MODE;
mutex_lock(&lower_dentry->d_inode->i_mutex);
- rc = notify_change(lower_dentry, ia);
+ rc = notify_change(lower_dentry, &lower_ia);
mutex_unlock(&lower_dentry->d_inode->i_mutex);
out:
fsstack_copy_attr_all(inode, lower_inode);
diff --git a/fs/exec.c b/fs/exec.c
index 632b02e34ec7..ac52447e1e7b 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -129,7 +129,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
- fsnotify_open(file->f_path.dentry);
+ fsnotify_open(file);
error = -ENOEXEC;
if(file->f_op) {
@@ -662,7 +662,7 @@ struct file *open_exec(const char *name)
if (file->f_path.mnt->mnt_flags & MNT_NOEXEC)
goto exit;
- fsnotify_open(file->f_path.dentry);
+ fsnotify_open(file);
err = deny_write_access(file);
if (err)
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 698a8636d39c..2afbcebeda71 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -738,13 +738,28 @@ static int exofs_write_begin_export(struct file *file,
fsdata);
}
+static int exofs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ /* According to comment in simple_write_end i_mutex is held */
+ loff_t i_size = inode->i_size;
+ int ret;
+
+ ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
+ if (i_size != inode->i_size)
+ mark_inode_dirty(inode);
+ return ret;
+}
+
const struct address_space_operations exofs_aops = {
.readpage = exofs_readpage,
.readpages = exofs_readpages,
.writepage = exofs_writepage,
.writepages = exofs_writepages,
.write_begin = exofs_write_begin_export,
- .write_end = simple_write_end,
+ .write_end = exofs_write_end,
};
/******************************************************************************
diff --git a/fs/exofs/pnfs.h b/fs/exofs/pnfs.h
index 423033addd1f..c52e9888b8ab 100644
--- a/fs/exofs/pnfs.h
+++ b/fs/exofs/pnfs.h
@@ -15,13 +15,7 @@
#ifndef __EXOFS_PNFS_H__
#define __EXOFS_PNFS_H__
-#if defined(CONFIG_PNFS)
-
-
-/* FIXME: move this file to: linux/exportfs/pnfs_osd_xdr.h */
-#include "../nfs/objlayout/pnfs_osd_xdr.h"
-
-#else /* defined(CONFIG_PNFS) */
+#if ! defined(__PNFS_OSD_XDR_H__)
enum pnfs_iomode {
IOMODE_READ = 1,
@@ -46,6 +40,6 @@ struct pnfs_osd_data_map {
u32 odm_raid_algorithm;
};
-#endif /* else defined(CONFIG_PNFS) */
+#endif /* ! defined(__PNFS_OSD_XDR_H__) */
#endif /* __EXOFS_PNFS_H__ */
diff --git a/fs/ext3/file.c b/fs/ext3/file.c
index 388bbdfa0b4e..a86d3302cdc2 100644
--- a/fs/ext3/file.c
+++ b/fs/ext3/file.c
@@ -33,9 +33,9 @@
*/
static int ext3_release_file (struct inode * inode, struct file * filp)
{
- if (EXT3_I(inode)->i_state & EXT3_STATE_FLUSH_ON_CLOSE) {
+ if (ext3_test_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE)) {
filemap_flush(inode->i_mapping);
- EXT3_I(inode)->i_state &= ~EXT3_STATE_FLUSH_ON_CLOSE;
+ ext3_clear_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
}
/* if we are the last writer on the inode, drop the block reservation */
if ((filp->f_mode & FMODE_WRITE) &&
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 455e6e6e5cb9..44b53386ab8b 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1378,7 +1378,7 @@ static int ext3_journalled_write_end(struct file *file,
*/
if (pos + len > inode->i_size && ext3_can_truncate(inode))
ext3_orphan_add(handle, inode);
- EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
+ ext3_set_inode_state(inode, EXT3_STATE_JDATA);
if (inode->i_size > EXT3_I(inode)->i_disksize) {
EXT3_I(inode)->i_disksize = inode->i_size;
ret2 = ext3_mark_inode_dirty(handle, inode);
@@ -1417,7 +1417,7 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
journal_t *journal;
int err;
- if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
+ if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
/*
* This is a REALLY heavyweight approach, but the use of
* bmap on dirty files is expected to be extremely rare:
@@ -1436,7 +1436,7 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
* everything they get.
*/
- EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
+ ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
journal = EXT3_JOURNAL(inode);
journal_lock_updates(journal);
err = journal_flush(journal);
@@ -1670,7 +1670,7 @@ static int ext3_journalled_writepage(struct page *page,
PAGE_CACHE_SIZE, NULL, write_end_fn);
if (ret == 0)
ret = err;
- EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
+ ext3_set_inode_state(inode, EXT3_STATE_JDATA);
unlock_page(page);
} else {
/*
@@ -2402,7 +2402,7 @@ void ext3_truncate(struct inode *inode)
goto out_notrans;
if (inode->i_size == 0 && ext3_should_writeback_data(inode))
- ei->i_state |= EXT3_STATE_FLUSH_ON_CLOSE;
+ ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
/*
* We have to lock the EOF page here, because lock_page() nests
@@ -2721,7 +2721,7 @@ int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
{
/* We have all inode data except xattrs in memory here. */
return __ext3_get_inode_loc(inode, iloc,
- !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
+ !ext3_test_inode_state(inode, EXT3_STATE_XATTR));
}
void ext3_set_inode_flags(struct inode *inode)
@@ -2893,7 +2893,7 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
EXT3_GOOD_OLD_INODE_SIZE +
ei->i_extra_isize;
if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
- ei->i_state |= EXT3_STATE_XATTR;
+ ext3_set_inode_state(inode, EXT3_STATE_XATTR);
}
} else
ei->i_extra_isize = 0;
@@ -2955,7 +2955,7 @@ again:
/* For fields not not tracking in the in-memory inode,
* initialise them to zero for new inodes. */
- if (ei->i_state & EXT3_STATE_NEW)
+ if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
ext3_get_inode_flags(ei);
@@ -3052,7 +3052,7 @@ again:
rc = ext3_journal_dirty_metadata(handle, bh);
if (!err)
err = rc;
- ei->i_state &= ~EXT3_STATE_NEW;
+ ext3_clear_inode_state(inode, EXT3_STATE_NEW);
atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
out_brelse:
diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
index 66895ccf76c7..2d2fb2a85961 100644
--- a/fs/ext3/xattr.c
+++ b/fs/ext3/xattr.c
@@ -274,7 +274,7 @@ ext3_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
void *end;
int error;
- if (!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR))
+ if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR))
return -ENODATA;
error = ext3_get_inode_loc(inode, &iloc);
if (error)
@@ -403,7 +403,7 @@ ext3_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
void *end;
int error;
- if (!(EXT3_I(inode)->i_state & EXT3_STATE_XATTR))
+ if (!ext3_test_inode_state(inode, EXT3_STATE_XATTR))
return 0;
error = ext3_get_inode_loc(inode, &iloc);
if (error)
@@ -882,7 +882,7 @@ ext3_xattr_ibody_find(struct inode *inode, struct ext3_xattr_info *i,
is->s.base = is->s.first = IFIRST(header);
is->s.here = is->s.first;
is->s.end = (void *)raw_inode + EXT3_SB(inode->i_sb)->s_inode_size;
- if (EXT3_I(inode)->i_state & EXT3_STATE_XATTR) {
+ if (ext3_test_inode_state(inode, EXT3_STATE_XATTR)) {
error = ext3_xattr_check_names(IFIRST(header), is->s.end);
if (error)
return error;
@@ -914,10 +914,10 @@ ext3_xattr_ibody_set(handle_t *handle, struct inode *inode,
header = IHDR(inode, ext3_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT3_XATTR_MAGIC);
- EXT3_I(inode)->i_state |= EXT3_STATE_XATTR;
+ ext3_set_inode_state(inode, EXT3_STATE_XATTR);
} else {
header->h_magic = cpu_to_le32(0);
- EXT3_I(inode)->i_state &= ~EXT3_STATE_XATTR;
+ ext3_clear_inode_state(inode, EXT3_STATE_XATTR);
}
return 0;
}
@@ -967,10 +967,10 @@ ext3_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
if (error)
goto cleanup;
- if (EXT3_I(inode)->i_state & EXT3_STATE_NEW) {
+ if (ext3_test_inode_state(inode, EXT3_STATE_NEW)) {
struct ext3_inode *raw_inode = ext3_raw_inode(&is.iloc);
memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
- EXT3_I(inode)->i_state &= ~EXT3_STATE_NEW;
+ ext3_clear_inode_state(inode, EXT3_STATE_NEW);
}
error = ext3_xattr_ibody_find(inode, &i, &is);
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index f565f24019b5..411c192a05fa 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -701,6 +701,15 @@ static int vfat_find(struct inode *dir, struct qstr *qname,
return fat_search_long(dir, qname->name, len, sinfo);
}
+/*
+ * (nfsd's) anonymous disconnected dentry?
+ * NOTE: !IS_ROOT() is not anonymous (I.e. d_splice_alias() did the job).
+ */
+static int vfat_d_anon_disconn(struct dentry *dentry)
+{
+ return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED);
+}
+
static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
@@ -729,11 +738,11 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
}
alias = d_find_alias(inode);
- if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
+ if (alias && !vfat_d_anon_disconn(alias)) {
/*
- * This inode has non DCACHE_DISCONNECTED dentry. This
- * means, the user did ->lookup() by an another name
- * (longname vs 8.3 alias of it) in past.
+ * This inode has non anonymous-DCACHE_DISCONNECTED
+ * dentry. This means, the user did ->lookup() by an
+ * another name (longname vs 8.3 alias of it) in past.
*
* Switch to new one for reason of locality if possible.
*/
@@ -743,7 +752,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
iput(inode);
unlock_super(sb);
return alias;
- }
+ } else
+ dput(alias);
+
out:
unlock_super(sb);
dentry->d_op = sb->s_root->d_op;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 1a7c42c64ff4..36407692ee14 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -389,6 +389,17 @@ static int write_inode(struct inode *inode, int sync)
}
/*
+ * Commit the NFS unstable pages.
+ */
+static int commit_unstable_pages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ if (mapping->a_ops && mapping->a_ops->commit_unstable_pages)
+ return mapping->a_ops->commit_unstable_pages(mapping, wbc);
+ return 0;
+}
+
+/*
* Wait for writeback on an inode to complete.
*/
static void inode_wait_for_writeback(struct inode *inode)
@@ -475,6 +486,18 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
}
spin_lock(&inode_lock);
+ /*
+ * Special state for cleaning NFS unstable pages
+ */
+ if (inode->i_state & I_UNSTABLE_PAGES) {
+ int err;
+ inode->i_state &= ~I_UNSTABLE_PAGES;
+ spin_unlock(&inode_lock);
+ err = commit_unstable_pages(mapping, wbc);
+ if (ret == 0)
+ ret = err;
+ spin_lock(&inode_lock);
+ }
inode->i_state &= ~I_SYNC;
if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) {
@@ -533,6 +556,12 @@ select_queue:
inode->i_state |= I_DIRTY_PAGES;
redirty_tail(inode);
}
+ } else if (inode->i_state & I_UNSTABLE_PAGES) {
+ /*
+ * The inode has got yet more unstable pages to
+ * commit. Requeue...
+ */
+ redirty_tail(inode);
} else if (atomic_read(&inode->i_count)) {
/*
* The inode is clean, inuse
@@ -1051,7 +1080,7 @@ void __mark_inode_dirty(struct inode *inode, int flags)
spin_lock(&inode_lock);
if ((inode->i_state & flags) != flags) {
- const int was_dirty = inode->i_state & I_DIRTY;
+ const int was_dirty = inode->i_state & (I_DIRTY|I_UNSTABLE_PAGES);
inode->i_state |= flags;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 7b8da9415267..0c1d0b82dcf1 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1061,8 +1061,8 @@ out:
int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
{
- struct inode *aspace = page->mapping->host;
- struct gfs2_sbd *sdp = aspace->i_sb->s_fs_info;
+ struct address_space *mapping = page->mapping;
+ struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct buffer_head *bh, *head;
struct gfs2_bufdata *bd;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4eb308aa3234..a6abbae8a278 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -569,6 +569,40 @@ static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
return ret;
}
+/**
+ * gfs2_file_aio_write - Perform a write to a file
+ * @iocb: The io context
+ * @iov: The data to write
+ * @nr_segs: Number of @iov segments
+ * @pos: The file position
+ *
+ * We have to do a lock/unlock here to refresh the inode size for
+ * O_APPEND writes, otherwise we can land up writing at the wrong
+ * offset. There is still a race, but provided the app is using its
+ * own file locking, this will make O_APPEND work as expected.
+ *
+ */
+
+static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+
+ if (file->f_flags & O_APPEND) {
+ struct dentry *dentry = file->f_dentry;
+ struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
+ struct gfs2_holder gh;
+ int ret;
+
+ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+ if (ret)
+ return ret;
+ gfs2_glock_dq_uninit(&gh);
+ }
+
+ return generic_file_aio_write(iocb, iov, nr_segs, pos);
+}
+
#ifdef CONFIG_GFS2_FS_LOCKING_DLM
/**
@@ -711,7 +745,7 @@ const struct file_operations gfs2_file_fops = {
.read = do_sync_read,
.aio_read = generic_file_aio_read,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .aio_write = gfs2_file_aio_write,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
@@ -741,7 +775,7 @@ const struct file_operations gfs2_file_fops_nolock = {
.read = do_sync_read,
.aio_read = generic_file_aio_read,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .aio_write = gfs2_file_aio_write,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f455a03a09e2..6e1e52626f2d 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -19,7 +19,6 @@
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/module.h>
-#include <linux/rwsem.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
@@ -60,7 +59,6 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
-static DECLARE_RWSEM(gfs2_umount_flush_sem);
static struct dentry *gfs2_root;
static struct workqueue_struct *glock_workqueue;
struct workqueue_struct *gfs2_delete_workqueue;
@@ -154,12 +152,14 @@ static unsigned int gl_hash(const struct gfs2_sbd *sdp,
static void glock_free(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
- struct inode *aspace = gl->gl_aspace;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
+ struct kmem_cache *cachep = gfs2_glock_cachep;
- if (aspace)
- gfs2_aspace_put(aspace);
+ GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
trace_gfs2_glock_put(gl);
- sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
+ if (mapping)
+ cachep = gfs2_glock_aspace_cachep;
+ sdp->sd_lockstruct.ls_ops->lm_put_lock(cachep, gl);
}
/**
@@ -712,7 +712,6 @@ static void glock_work_func(struct work_struct *work)
finish_xmote(gl, gl->gl_reply);
drop_ref = 1;
}
- down_read(&gfs2_umount_flush_sem);
spin_lock(&gl->gl_spin);
if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
@@ -725,7 +724,6 @@ static void glock_work_func(struct work_struct *work)
}
run_queue(gl, 0);
spin_unlock(&gl->gl_spin);
- up_read(&gfs2_umount_flush_sem);
if (!delay ||
queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
@@ -750,10 +748,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops, int create,
struct gfs2_glock **glp)
{
+ struct super_block *s = sdp->sd_vfs;
struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
struct gfs2_glock *gl, *tmp;
unsigned int hash = gl_hash(sdp, &name);
- int error;
+ struct address_space *mapping;
read_lock(gl_lock_addr(hash));
gl = search_bucket(hash, sdp, &name);
@@ -765,7 +764,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (!create)
return -ENOENT;
- gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
+ if (glops->go_flags & GLOF_ASPACE)
+ gl = kmem_cache_alloc(gfs2_glock_aspace_cachep, GFP_KERNEL);
+ else
+ gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
if (!gl)
return -ENOMEM;
@@ -783,18 +785,18 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
gl->gl_sbd = sdp;
- gl->gl_aspace = NULL;
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
INIT_WORK(&gl->gl_delete, delete_work_func);
- /* If this glock protects actual on-disk data or metadata blocks,
- create a VFS inode to manage the pages/buffers holding them. */
- if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
- gl->gl_aspace = gfs2_aspace_get(sdp);
- if (!gl->gl_aspace) {
- error = -ENOMEM;
- goto fail;
- }
+ mapping = gfs2_glock2aspace(gl);
+ if (mapping) {
+ mapping->a_ops = &gfs2_meta_aops;
+ mapping->host = s->s_bdev->bd_inode;
+ mapping->flags = 0;
+ mapping_set_gfp_mask(mapping, GFP_NOFS);
+ mapping->assoc_mapping = NULL;
+ mapping->backing_dev_info = s->s_bdi;
+ mapping->writeback_index = 0;
}
write_lock(gl_lock_addr(hash));
@@ -811,10 +813,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
*glp = gl;
return 0;
-
-fail:
- kmem_cache_free(gfs2_glock_cachep, gl);
- return error;
}
/**
@@ -1509,35 +1507,12 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
{
- unsigned long t;
unsigned int x;
- int cont;
- t = jiffies;
-
- for (;;) {
- cont = 0;
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
- if (examine_bucket(clear_glock, sdp, x))
- cont = 1;
- }
-
- if (!cont)
- break;
-
- if (time_after_eq(jiffies,
- t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
- fs_warn(sdp, "Unmount seems to be stalled. "
- "Dumping lock state...\n");
- gfs2_dump_lockstate(sdp);
- t = jiffies;
- }
-
- down_write(&gfs2_umount_flush_sem);
- invalidate_inodes(sdp->sd_vfs);
- up_write(&gfs2_umount_flush_sem);
- msleep(10);
- }
+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
+ examine_bucket(clear_glock, sdp, x);
+ flush_workqueue(glock_workqueue);
+ gfs2_dump_lockstate(sdp);
}
void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 13f0bd228132..dac72614aed8 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -180,6 +180,13 @@ static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
return gl->gl_state == LM_ST_SHARED;
}
+static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
+{
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
+ return (struct address_space *)(gl + 1);
+ return NULL;
+}
+
int gfs2_glock_get(struct gfs2_sbd *sdp,
u64 number, const struct gfs2_glock_operations *glops,
int create, struct gfs2_glock **glp);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 78554acc0605..38e3749d476c 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -87,7 +87,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
static void rgrp_go_sync(struct gfs2_glock *gl)
{
- struct address_space *metamapping = gl->gl_aspace->i_mapping;
+ struct address_space *metamapping = gfs2_glock2aspace(gl);
int error;
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
@@ -113,7 +113,7 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
BUG_ON(!(flags & DIO_METADATA));
gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
@@ -134,7 +134,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
static void inode_go_sync(struct gfs2_glock *gl)
{
struct gfs2_inode *ip = gl->gl_object;
- struct address_space *metamapping = gl->gl_aspace->i_mapping;
+ struct address_space *metamapping = gfs2_glock2aspace(gl);
int error;
if (ip && !S_ISREG(ip->i_inode.i_mode))
@@ -183,7 +183,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
gfs2_assert_withdraw(gl->gl_sbd, !atomic_read(&gl->gl_ail_count));
if (flags & DIO_METADATA) {
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
truncate_inode_pages(mapping, 0);
if (ip) {
set_bit(GIF_INVALID, &ip->i_flags);
@@ -282,7 +282,8 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
{
- return !gl->gl_aspace->i_mapping->nrpages;
+ const struct address_space *mapping = (const struct address_space *)(gl + 1);
+ return !mapping->nrpages;
}
/**
@@ -387,8 +388,7 @@ static void iopen_go_callback(struct gfs2_glock *gl)
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
- gl->gl_state == LM_ST_SHARED &&
- ip && test_bit(GIF_USER, &ip->i_flags)) {
+ gl->gl_state == LM_ST_SHARED && ip) {
gfs2_glock_hold(gl);
if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
gfs2_glock_put_nolock(gl);
@@ -407,6 +407,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
.go_min_hold_time = HZ / 5,
+ .go_flags = GLOF_ASPACE,
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -418,6 +419,7 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_dump = gfs2_rgrp_dump,
.go_type = LM_TYPE_RGRP,
.go_min_hold_time = HZ / 5,
+ .go_flags = GLOF_ASPACE,
};
const struct gfs2_glock_operations gfs2_trans_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 4792200978c8..f93f9b911d43 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -162,6 +162,8 @@ struct gfs2_glock_operations {
void (*go_callback) (struct gfs2_glock *gl);
const int go_type;
const unsigned long go_min_hold_time;
+ const unsigned long go_flags;
+#define GLOF_ASPACE 1
};
enum {
@@ -225,7 +227,6 @@ struct gfs2_glock {
struct gfs2_sbd *gl_sbd;
- struct inode *gl_aspace;
struct list_head gl_ail_list;
atomic_t gl_ail_count;
struct delayed_work gl_work;
@@ -258,7 +259,6 @@ enum {
GIF_INVALID = 0,
GIF_QD_LOCKED = 1,
GIF_SW_PAGED = 3,
- GIF_USER = 4, /* user inode, not metadata addr space */
};
@@ -451,7 +451,6 @@ struct gfs2_tune {
unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
unsigned int gt_new_files_jdata;
unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
- unsigned int gt_stall_secs; /* Detects trouble! */
unsigned int gt_complain_secs;
unsigned int gt_statfs_quantum;
unsigned int gt_statfs_slow;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 6e220f4eee7d..b1bf2694fb2b 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -45,7 +45,7 @@ static int iget_test(struct inode *inode, void *opaque)
struct gfs2_inode *ip = GFS2_I(inode);
u64 *no_addr = opaque;
- if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags))
+ if (ip->i_no_addr == *no_addr)
return 1;
return 0;
@@ -58,7 +58,6 @@ static int iget_set(struct inode *inode, void *opaque)
inode->i_ino = (unsigned long)*no_addr;
ip->i_no_addr = *no_addr;
- set_bit(GIF_USER, &ip->i_flags);
return 0;
}
@@ -84,7 +83,7 @@ static int iget_skip_test(struct inode *inode, void *opaque)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_skip_data *data = opaque;
- if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){
+ if (ip->i_no_addr == data->no_addr) {
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
data->skipped = 1;
return 0;
@@ -103,7 +102,6 @@ static int iget_skip_set(struct inode *inode, void *opaque)
return 1;
inode->i_ino = (unsigned long)(data->no_addr);
ip->i_no_addr = data->no_addr;
- set_bit(GIF_USER, &ip->i_flags);
return 0;
}
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 46df988323bc..094839e41dc3 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -29,7 +29,10 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
- kmem_cache_free(gfs2_glock_cachep, gl);
+ if (gl->gl_ops->go_flags & GLOF_ASPACE)
+ kmem_cache_free(gfs2_glock_aspace_cachep, gl);
+ else
+ kmem_cache_free(gfs2_glock_cachep, gl);
return;
case -DLM_ECANCEL: /* Cancel while getting lock */
ret |= LM_OUT_CANCELED;
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 5b31f7741a8f..a88fadc704bb 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -52,6 +52,22 @@ static void gfs2_init_glock_once(void *foo)
atomic_set(&gl->gl_ail_count, 0);
}
+static void gfs2_init_gl_aspace_once(void *foo)
+{
+ struct gfs2_glock *gl = foo;
+ struct address_space *mapping = (struct address_space *)(gl + 1);
+
+ gfs2_init_glock_once(gl);
+ memset(mapping, 0, sizeof(*mapping));
+ INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+ spin_lock_init(&mapping->tree_lock);
+ spin_lock_init(&mapping->i_mmap_lock);
+ INIT_LIST_HEAD(&mapping->private_list);
+ spin_lock_init(&mapping->private_lock);
+ INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
+ INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
+}
+
/**
* init_gfs2_fs - Register GFS2 as a filesystem
*
@@ -78,6 +94,14 @@ static int __init init_gfs2_fs(void)
if (!gfs2_glock_cachep)
goto fail;
+ gfs2_glock_aspace_cachep = kmem_cache_create("gfs2_glock (aspace)",
+ sizeof(struct gfs2_glock) +
+ sizeof(struct address_space),
+ 0, 0, gfs2_init_gl_aspace_once);
+
+ if (!gfs2_glock_aspace_cachep)
+ goto fail;
+
gfs2_inode_cachep = kmem_cache_create("gfs2_inode",
sizeof(struct gfs2_inode),
0, SLAB_RECLAIM_ACCOUNT|
@@ -144,6 +168,9 @@ fail:
if (gfs2_inode_cachep)
kmem_cache_destroy(gfs2_inode_cachep);
+ if (gfs2_glock_aspace_cachep)
+ kmem_cache_destroy(gfs2_glock_aspace_cachep);
+
if (gfs2_glock_cachep)
kmem_cache_destroy(gfs2_glock_cachep);
@@ -169,6 +196,7 @@ static void __exit exit_gfs2_fs(void)
kmem_cache_destroy(gfs2_rgrpd_cachep);
kmem_cache_destroy(gfs2_bufdata_cachep);
kmem_cache_destroy(gfs2_inode_cachep);
+ kmem_cache_destroy(gfs2_glock_aspace_cachep);
kmem_cache_destroy(gfs2_glock_cachep);
gfs2_sys_uninit();
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index cb8d7a93d5ec..0bb12c80937a 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -93,49 +93,13 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
return err;
}
-static const struct address_space_operations aspace_aops = {
+const struct address_space_operations gfs2_meta_aops = {
.writepage = gfs2_aspace_writepage,
.releasepage = gfs2_releasepage,
.sync_page = block_sync_page,
};
/**
- * gfs2_aspace_get - Create and initialize a struct inode structure
- * @sdp: the filesystem the aspace is in
- *
- * Right now a struct inode is just a struct inode. Maybe Linux
- * will supply a more lightweight address space construct (that works)
- * in the future.
- *
- * Make sure pages/buffers in this aspace aren't in high memory.
- *
- * Returns: the aspace
- */
-
-struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
-{
- struct inode *aspace;
- struct gfs2_inode *ip;
-
- aspace = new_inode(sdp->sd_vfs);
- if (aspace) {
- mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
- aspace->i_mapping->a_ops = &aspace_aops;
- aspace->i_size = ~0ULL;
- ip = GFS2_I(aspace);
- clear_bit(GIF_USER, &ip->i_flags);
- insert_inode_hash(aspace);
- }
- return aspace;
-}
-
-void gfs2_aspace_put(struct inode *aspace)
-{
- remove_inode_hash(aspace);
- iput(aspace);
-}
-
-/**
* gfs2_meta_sync - Sync all buffers associated with a glock
* @gl: The glock
*
@@ -143,7 +107,7 @@ void gfs2_aspace_put(struct inode *aspace)
void gfs2_meta_sync(struct gfs2_glock *gl)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
int error;
filemap_fdatawrite(mapping);
@@ -164,7 +128,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
{
- struct address_space *mapping = gl->gl_aspace->i_mapping;
+ struct address_space *mapping = gfs2_glock2aspace(gl);
struct gfs2_sbd *sdp = gl->gl_sbd;
struct page *page;
struct buffer_head *bh;
@@ -344,8 +308,10 @@ void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta)
{
- struct gfs2_sbd *sdp = GFS2_SB(bh->b_page->mapping->host);
+ struct address_space *mapping = bh->b_page->mapping;
+ struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct gfs2_bufdata *bd = bh->b_private;
+
if (test_clear_buffer_pinned(bh)) {
list_del_init(&bd->bd_le.le_list);
if (meta) {
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index de270c2f9b63..6a1d9ba16411 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -37,8 +37,16 @@ static inline void gfs2_buffer_copy_tail(struct buffer_head *to_bh,
0, from_head - to_head);
}
-struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp);
-void gfs2_aspace_put(struct inode *aspace);
+extern const struct address_space_operations gfs2_meta_aops;
+
+static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
+{
+ struct inode *inode = mapping->host;
+ if (mapping->a_ops == &gfs2_meta_aops)
+ return (((struct gfs2_glock *)mapping) - 1)->gl_sbd;
+ else
+ return inode->i_sb->s_fs_info;
+}
void gfs2_meta_sync(struct gfs2_glock *gl);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index edfee24f3636..968a99fcff9a 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -65,7 +65,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_quota_scale_den = 1;
gt->gt_new_files_jdata = 0;
gt->gt_max_readahead = 1 << 18;
- gt->gt_stall_secs = 600;
gt->gt_complain_secs = 10;
}
@@ -1231,10 +1230,9 @@ fail_sb:
fail_locking:
init_locking(sdp, &mount_gh, UNDO);
fail_lm:
+ invalidate_inodes(sb);
gfs2_gl_hash_clear(sdp);
gfs2_lm_unmount(sdp);
- while (invalidate_inodes(sb))
- yield();
fail_sys:
gfs2_sys_fs_del(sdp);
fail:
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 247436c10deb..84350e1be66d 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -748,7 +748,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
struct gfs2_rgrpd *nrgd;
unsigned int num_gh;
int dir_rename = 0;
- int alloc_required;
+ int alloc_required = 0;
unsigned int x;
int error;
@@ -867,7 +867,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
goto out_gunlock;
}
- alloc_required = error = gfs2_diradd_alloc_required(ndir, &ndentry->d_name);
+ if (nip == NULL)
+ alloc_required = gfs2_diradd_alloc_required(ndir, &ndentry->d_name);
+ error = alloc_required;
if (error < 0)
goto out_gunlock;
error = 0;
@@ -1086,7 +1088,8 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
error = vfs_follow_link(nd, buf);
if (buf != array)
kfree(buf);
- }
+ } else
+ path_put(&nd->path);
return ERR_PTR(error);
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index c282ad41f3d1..c008b08c3c3c 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -721,8 +721,7 @@ static int gfs2_write_inode(struct inode *inode, int sync)
int ret = 0;
/* Check this is a "normal" inode, etc */
- if (!test_bit(GIF_USER, &ip->i_flags) ||
- (current->flags & PF_MEMALLOC))
+ if (current->flags & PF_MEMALLOC)
return 0;
ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (ret)
@@ -859,6 +858,7 @@ restart:
gfs2_clear_rgrpd(sdp);
gfs2_jindex_free(sdp);
/* Take apart glock structures and buffer lists */
+ invalidate_inodes(sdp->sd_vfs);
gfs2_gl_hash_clear(sdp);
/* Unmount the locking protocol */
gfs2_lm_unmount(sdp);
@@ -1193,7 +1193,7 @@ static void gfs2_drop_inode(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
- if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) {
+ if (inode->i_nlink) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
clear_nlink(inode);
@@ -1211,18 +1211,12 @@ static void gfs2_clear_inode(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
- /* This tells us its a "real" inode and not one which only
- * serves to contain an address space (see rgrp.c, meta_io.c)
- * which therefore doesn't have its own glocks.
- */
- if (test_bit(GIF_USER, &ip->i_flags)) {
- ip->i_gl->gl_object = NULL;
- gfs2_glock_put(ip->i_gl);
- ip->i_gl = NULL;
- if (ip->i_iopen_gh.gh_gl) {
- ip->i_iopen_gh.gh_gl->gl_object = NULL;
- gfs2_glock_dq_uninit(&ip->i_iopen_gh);
- }
+ ip->i_gl->gl_object = NULL;
+ gfs2_glock_put(ip->i_gl);
+ ip->i_gl = NULL;
+ if (ip->i_iopen_gh.gh_gl) {
+ ip->i_iopen_gh.gh_gl->gl_object = NULL;
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
}
}
@@ -1357,9 +1351,6 @@ static void gfs2_delete_inode(struct inode *inode)
struct gfs2_holder gh;
int error;
- if (!test_bit(GIF_USER, &ip->i_flags))
- goto out;
-
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
if (unlikely(error)) {
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 0dc34621f6a6..a0db1c94317d 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -478,7 +478,6 @@ TUNE_ATTR(complain_secs, 0);
TUNE_ATTR(statfs_slow, 0);
TUNE_ATTR(new_files_jdata, 0);
TUNE_ATTR(quota_simul_sync, 1);
-TUNE_ATTR(stall_secs, 1);
TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
@@ -491,7 +490,6 @@ static struct attribute *tune_attrs[] = {
&tune_attr_complain_secs.attr,
&tune_attr_statfs_slow.attr,
&tune_attr_quota_simul_sync.attr,
- &tune_attr_stall_secs.attr,
&tune_attr_statfs_quantum.attr,
&tune_attr_quota_scale.attr,
&tune_attr_new_files_jdata.attr,
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index f6a7efa34eb9..226f2bfbf16a 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -21,6 +21,7 @@
#include "util.h"
struct kmem_cache *gfs2_glock_cachep __read_mostly;
+struct kmem_cache *gfs2_glock_aspace_cachep __read_mostly;
struct kmem_cache *gfs2_inode_cachep __read_mostly;
struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
struct kmem_cache *gfs2_rgrpd_cachep __read_mostly;
diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
index 33e96b0ce9ab..b432e04600de 100644
--- a/fs/gfs2/util.h
+++ b/fs/gfs2/util.h
@@ -145,6 +145,7 @@ gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__);
extern struct kmem_cache *gfs2_glock_cachep;
+extern struct kmem_cache *gfs2_glock_aspace_cachep;
extern struct kmem_cache *gfs2_inode_cachep;
extern struct kmem_cache *gfs2_bufdata_cachep;
extern struct kmem_cache *gfs2_rgrpd_cachep;
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 8a04108e0c22..c2ebdf2c01d4 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1296,6 +1296,7 @@ fail:
int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_ea_location el;
struct buffer_head *dibh;
int error;
@@ -1305,16 +1306,17 @@ int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
return error;
if (GFS2_EA_IS_STUFFED(el.el_ea)) {
- error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
- if (error)
- return error;
-
- gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
- memcpy(GFS2_EA2DATA(el.el_ea), data,
- GFS2_EA_DATA_LEN(el.el_ea));
- } else
+ error = gfs2_trans_begin(sdp, RES_DINODE + RES_EATTR, 0);
+ if (error == 0) {
+ gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
+ memcpy(GFS2_EA2DATA(el.el_ea), data,
+ GFS2_EA_DATA_LEN(el.el_ea));
+ }
+ } else {
error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
+ }
+ brelse(el.el_bh);
if (error)
return error;
@@ -1327,8 +1329,7 @@ int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
brelse(dibh);
}
- gfs2_trans_end(GFS2_SB(&ip->i_inode));
-
+ gfs2_trans_end(sdp);
return error;
}
diff --git a/fs/inode.c b/fs/inode.c
index 03dfeb2e3928..e1924991d8c4 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -21,7 +21,6 @@
#include <linux/pagemap.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
-#include <linux/inotify.h>
#include <linux/fsnotify.h>
#include <linux/mount.h>
#include <linux/async.h>
@@ -265,12 +264,8 @@ void inode_init_once(struct inode *inode)
INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
i_size_ordered_init(inode);
-#ifdef CONFIG_INOTIFY
- INIT_LIST_HEAD(&inode->inotify_watches);
- mutex_init(&inode->inotify_mutex);
-#endif
#ifdef CONFIG_FSNOTIFY
- INIT_HLIST_HEAD(&inode->i_fsnotify_mark_entries);
+ INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
#endif
}
EXPORT_SYMBOL(inode_init_once);
@@ -417,7 +412,6 @@ int invalidate_inodes(struct super_block *sb)
down_write(&iprune_sem);
spin_lock(&inode_lock);
- inotify_unmount_inodes(&sb->s_inodes);
fsnotify_unmount_inodes(&sb->s_inodes);
busy = invalidate_list(&sb->s_inodes, &throw_away);
spin_unlock(&inode_lock);
diff --git a/fs/locks.c b/fs/locks.c
index a8794f233bc9..cde572db112f 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1454,7 +1454,7 @@ EXPORT_SYMBOL(generic_setlease);
* leases held by processes on this node.
*
* There is also no break_lease method; filesystems that
- * handle their own leases shoud break leases themselves from the
+ * handle their own leases should break leases themselves from the
* filesystem's open, create, and (on truncate) setattr methods.
*
* Warning: the only current setlease methods exist only to disable
diff --git a/fs/logfs/Kconfig b/fs/logfs/Kconfig
new file mode 100644
index 000000000000..daf9a9b32dd3
--- /dev/null
+++ b/fs/logfs/Kconfig
@@ -0,0 +1,17 @@
+config LOGFS
+ tristate "LogFS file system (EXPERIMENTAL)"
+ depends on (MTD || BLOCK) && EXPERIMENTAL
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
+ select CRC32
+ select BTREE
+ help
+ Flash filesystem aimed to scale efficiently to large devices.
+ In comparison to JFFS2 it offers significantly faster mount
+ times and potentially less RAM usage, although the latter has
+ not been measured yet.
+
+ In its current state it is still very experimental and should
+ not be used for other than testing purposes.
+
+ If unsure, say N.
diff --git a/fs/logfs/Makefile b/fs/logfs/Makefile
new file mode 100644
index 000000000000..4820027787ee
--- /dev/null
+++ b/fs/logfs/Makefile
@@ -0,0 +1,13 @@
+obj-$(CONFIG_LOGFS) += logfs.o
+
+logfs-y += compr.o
+logfs-y += dir.o
+logfs-y += file.o
+logfs-y += gc.o
+logfs-y += inode.o
+logfs-y += journal.o
+logfs-y += readwrite.o
+logfs-y += segment.o
+logfs-y += super.o
+logfs-$(CONFIG_BLOCK) += dev_bdev.o
+logfs-$(CONFIG_MTD) += dev_mtd.o
diff --git a/fs/logfs/compr.c b/fs/logfs/compr.c
new file mode 100644
index 000000000000..44bbfd249abc
--- /dev/null
+++ b/fs/logfs/compr.c
@@ -0,0 +1,95 @@
+/*
+ * fs/logfs/compr.c - compression routines
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ */
+#include "logfs.h"
+#include <linux/vmalloc.h>
+#include <linux/zlib.h>
+
+#define COMPR_LEVEL 3
+
+static DEFINE_MUTEX(compr_mutex);
+static struct z_stream_s stream;
+
+int logfs_compress(void *in, void *out, size_t inlen, size_t outlen)
+{
+ int err, ret;
+
+ ret = -EIO;
+ mutex_lock(&compr_mutex);
+ err = zlib_deflateInit(&stream, COMPR_LEVEL);
+ if (err != Z_OK)
+ goto error;
+
+ stream.next_in = in;
+ stream.avail_in = inlen;
+ stream.total_in = 0;
+ stream.next_out = out;
+ stream.avail_out = outlen;
+ stream.total_out = 0;
+
+ err = zlib_deflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END)
+ goto error;
+
+ err = zlib_deflateEnd(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ if (stream.total_out >= stream.total_in)
+ goto error;
+
+ ret = stream.total_out;
+error:
+ mutex_unlock(&compr_mutex);
+ return ret;
+}
+
+int logfs_uncompress(void *in, void *out, size_t inlen, size_t outlen)
+{
+ int err, ret;
+
+ ret = -EIO;
+ mutex_lock(&compr_mutex);
+ err = zlib_inflateInit(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ stream.next_in = in;
+ stream.avail_in = inlen;
+ stream.total_in = 0;
+ stream.next_out = out;
+ stream.avail_out = outlen;
+ stream.total_out = 0;
+
+ err = zlib_inflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END)
+ goto error;
+
+ err = zlib_inflateEnd(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ ret = 0;
+error:
+ mutex_unlock(&compr_mutex);
+ return ret;
+}
+
+int __init logfs_compr_init(void)
+{
+ size_t size = max(zlib_deflate_workspacesize(),
+ zlib_inflate_workspacesize());
+ stream.workspace = vmalloc(size);
+ if (!stream.workspace)
+ return -ENOMEM;
+ return 0;
+}
+
+void logfs_compr_exit(void)
+{
+ vfree(stream.workspace);
+}
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
new file mode 100644
index 000000000000..58a057b6e1af
--- /dev/null
+++ b/fs/logfs/dev_bdev.c
@@ -0,0 +1,263 @@
+/*
+ * fs/logfs/dev_bdev.c - Device access methods for block devices
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ */
+#include "logfs.h"
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/buffer_head.h>
+
+#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
+
+static void request_complete(struct bio *bio, int err)
+{
+ complete((struct completion *)bio->bi_private);
+}
+
+static int sync_request(struct page *page, struct block_device *bdev, int rw)
+{
+ struct bio bio;
+ struct bio_vec bio_vec;
+ struct completion complete;
+
+ bio_init(&bio);
+ bio.bi_io_vec = &bio_vec;
+ bio_vec.bv_page = page;
+ bio_vec.bv_len = PAGE_SIZE;
+ bio_vec.bv_offset = 0;
+ bio.bi_vcnt = 1;
+ bio.bi_idx = 0;
+ bio.bi_size = PAGE_SIZE;
+ bio.bi_bdev = bdev;
+ bio.bi_sector = page->index * (PAGE_SIZE >> 9);
+ init_completion(&complete);
+ bio.bi_private = &complete;
+ bio.bi_end_io = request_complete;
+
+ submit_bio(rw, &bio);
+ generic_unplug_device(bdev_get_queue(bdev));
+ wait_for_completion(&complete);
+ return test_bit(BIO_UPTODATE, &bio.bi_flags) ? 0 : -EIO;
+}
+
+static int bdev_readpage(void *_sb, struct page *page)
+{
+ struct super_block *sb = _sb;
+ struct block_device *bdev = logfs_super(sb)->s_bdev;
+ int err;
+
+ err = sync_request(page, bdev, READ);
+ if (err) {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ } else {
+ SetPageUptodate(page);
+ ClearPageError(page);
+ }
+ unlock_page(page);
+ return err;
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(wq);
+
+static void writeseg_end_io(struct bio *bio, int err)
+{
+ const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
+ struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
+ struct super_block *sb = bio->bi_private;
+ struct logfs_super *super = logfs_super(sb);
+ struct page *page;
+
+ BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
+ BUG_ON(err);
+ BUG_ON(bio->bi_vcnt == 0);
+ do {
+ page = bvec->bv_page;
+ if (--bvec >= bio->bi_io_vec)
+ prefetchw(&bvec->bv_page->flags);
+
+ end_page_writeback(page);
+ } while (bvec >= bio->bi_io_vec);
+ bio_put(bio);
+ if (atomic_dec_and_test(&super->s_pending_writes))
+ wake_up(&wq);
+}
+
+static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
+ size_t nr_pages)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping = super->s_mapping_inode->i_mapping;
+ struct bio *bio;
+ struct page *page;
+ struct request_queue *q = bdev_get_queue(sb->s_bdev);
+ unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
+ int i;
+
+ bio = bio_alloc(GFP_NOFS, max_pages);
+ BUG_ON(!bio); /* FIXME: handle this */
+
+ for (i = 0; i < nr_pages; i++) {
+ if (i >= max_pages) {
+ /* Block layer cannot split bios :( */
+ bio->bi_vcnt = i;
+ bio->bi_idx = 0;
+ bio->bi_size = i * PAGE_SIZE;
+ bio->bi_bdev = super->s_bdev;
+ bio->bi_sector = ofs >> 9;
+ bio->bi_private = sb;
+ bio->bi_end_io = writeseg_end_io;
+ atomic_inc(&super->s_pending_writes);
+ submit_bio(WRITE, bio);
+
+ ofs += i * PAGE_SIZE;
+ index += i;
+ nr_pages -= i;
+ i = 0;
+
+ bio = bio_alloc(GFP_NOFS, max_pages);
+ BUG_ON(!bio);
+ }
+ page = find_lock_page(mapping, index + i);
+ BUG_ON(!page);
+ bio->bi_io_vec[i].bv_page = page;
+ bio->bi_io_vec[i].bv_len = PAGE_SIZE;
+ bio->bi_io_vec[i].bv_offset = 0;
+
+ BUG_ON(PageWriteback(page));
+ set_page_writeback(page);
+ unlock_page(page);
+ }
+ bio->bi_vcnt = nr_pages;
+ bio->bi_idx = 0;
+ bio->bi_size = nr_pages * PAGE_SIZE;
+ bio->bi_bdev = super->s_bdev;
+ bio->bi_sector = ofs >> 9;
+ bio->bi_private = sb;
+ bio->bi_end_io = writeseg_end_io;
+ atomic_inc(&super->s_pending_writes);
+ submit_bio(WRITE, bio);
+ return 0;
+}
+
+static void bdev_writeseg(struct super_block *sb, u64 ofs, size_t len)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int head;
+
+ BUG_ON(super->s_flags & LOGFS_SB_FLAG_RO);
+
+ if (len == 0) {
+ /* This can happen when the object fit perfectly into a
+ * segment, the segment gets written per sync and subsequently
+ * closed.
+ */
+ return;
+ }
+ head = ofs & (PAGE_SIZE - 1);
+ if (head) {
+ ofs -= head;
+ len += head;
+ }
+ len = PAGE_ALIGN(len);
+ __bdev_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
+ generic_unplug_device(bdev_get_queue(logfs_super(sb)->s_bdev));
+}
+
+static int bdev_erase(struct super_block *sb, loff_t to, size_t len)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping = super->s_mapping_inode->i_mapping;
+ struct page *page;
+ pgoff_t index = to >> PAGE_SHIFT;
+ int i, nr_pages = len >> PAGE_SHIFT;
+
+ BUG_ON(to & (PAGE_SIZE - 1));
+ BUG_ON(len & (PAGE_SIZE - 1));
+
+ if (logfs_super(sb)->s_flags & LOGFS_SB_FLAG_RO)
+ return -EROFS;
+
+ for (i = 0; i < nr_pages; i++) {
+ page = find_get_page(mapping, index + i);
+ if (page) {
+ memset(page_address(page), 0xFF, PAGE_SIZE);
+ page_cache_release(page);
+ }
+ }
+ return 0;
+}
+
+static void bdev_sync(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ wait_event(wq, atomic_read(&super->s_pending_writes) == 0);
+}
+
+static struct page *bdev_find_first_sb(struct super_block *sb, u64 *ofs)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping = super->s_mapping_inode->i_mapping;
+ filler_t *filler = bdev_readpage;
+
+ *ofs = 0;
+ return read_cache_page(mapping, 0, filler, sb);
+}
+
+static struct page *bdev_find_last_sb(struct super_block *sb, u64 *ofs)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping = super->s_mapping_inode->i_mapping;
+ filler_t *filler = bdev_readpage;
+ u64 pos = (super->s_bdev->bd_inode->i_size & ~0xfffULL) - 0x1000;
+ pgoff_t index = pos >> PAGE_SHIFT;
+
+ *ofs = pos;
+ return read_cache_page(mapping, index, filler, sb);
+}
+
+static int bdev_write_sb(struct super_block *sb, struct page *page)
+{
+ struct block_device *bdev = logfs_super(sb)->s_bdev;
+
+ /* Nothing special to do for block devices. */
+ return sync_request(page, bdev, WRITE);
+}
+
+static void bdev_put_device(struct super_block *sb)
+{
+ close_bdev_exclusive(logfs_super(sb)->s_bdev, FMODE_READ|FMODE_WRITE);
+}
+
+static const struct logfs_device_ops bd_devops = {
+ .find_first_sb = bdev_find_first_sb,
+ .find_last_sb = bdev_find_last_sb,
+ .write_sb = bdev_write_sb,
+ .readpage = bdev_readpage,
+ .writeseg = bdev_writeseg,
+ .erase = bdev_erase,
+ .sync = bdev_sync,
+ .put_device = bdev_put_device,
+};
+
+int logfs_get_sb_bdev(struct file_system_type *type, int flags,
+ const char *devname, struct vfsmount *mnt)
+{
+ struct block_device *bdev;
+
+ bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, type);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+
+ if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
+ int mtdnr = MINOR(bdev->bd_dev);
+ close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
+ return logfs_get_sb_mtd(type, flags, mtdnr, mnt);
+ }
+
+ return logfs_get_sb_device(type, flags, NULL, bdev, &bd_devops, mnt);
+}
diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c
new file mode 100644
index 000000000000..68e99d046c23
--- /dev/null
+++ b/fs/logfs/dev_mtd.c
@@ -0,0 +1,253 @@
+/*
+ * fs/logfs/dev_mtd.c - Device access methods for MTD
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ */
+#include "logfs.h"
+#include <linux/completion.h>
+#include <linux/mount.h>
+#include <linux/sched.h>
+
+#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
+
+static int mtd_read(struct super_block *sb, loff_t ofs, size_t len, void *buf)
+{
+ struct mtd_info *mtd = logfs_super(sb)->s_mtd;
+ size_t retlen;
+ int ret;
+
+ ret = mtd->read(mtd, ofs, len, &retlen, buf);
+ BUG_ON(ret == -EINVAL);
+ if (ret)
+ return ret;
+
+ /* Not sure if we should loop instead. */
+ if (retlen != len)
+ return -EIO;
+
+ return 0;
+}
+
+static int mtd_write(struct super_block *sb, loff_t ofs, size_t len, void *buf)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct mtd_info *mtd = super->s_mtd;
+ size_t retlen;
+ loff_t page_start, page_end;
+ int ret;
+
+ if (super->s_flags & LOGFS_SB_FLAG_RO)
+ return -EROFS;
+
+ BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs));
+ BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift);
+ BUG_ON(len > PAGE_CACHE_SIZE);
+ page_start = ofs & PAGE_CACHE_MASK;
+ page_end = PAGE_CACHE_ALIGN(ofs + len) - 1;
+ ret = mtd->write(mtd, ofs, len, &retlen, buf);
+ if (ret || (retlen != len))
+ return -EIO;
+
+ return 0;
+}
+
+/*
+ * For as long as I can remember (since about 2001) mtd->erase has been an
+ * asynchronous interface lacking the first driver to actually use the
+ * asynchronous properties. So just to prevent the first implementor of such
+ * a thing from breaking logfs in 2350, we do the usual pointless dance to
+ * declare a completion variable and wait for completion before returning
+ * from mtd_erase(). What an excercise in futility!
+ */
+static void logfs_erase_callback(struct erase_info *ei)
+{
+ complete((struct completion *)ei->priv);
+}
+
+static int mtd_erase_mapping(struct super_block *sb, loff_t ofs, size_t len)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping = super->s_mapping_inode->i_mapping;
+ struct page *page;
+ pgoff_t index = ofs >> PAGE_SHIFT;
+
+ for (index = ofs >> PAGE_SHIFT; index < (ofs + len) >> PAGE_SHIFT; index++) {
+ page = find_get_page(mapping, index);
+ if (!page)
+ continue;
+ memset(page_address(page), 0xFF, PAGE_SIZE);
+ page_cache_release(page);
+ }
+ return 0;
+}
+
+static int mtd_erase(struct super_block *sb, loff_t ofs, size_t len)
+{
+ struct mtd_info *mtd = logfs_super(sb)->s_mtd;
+ struct erase_info ei;
+ DECLARE_COMPLETION_ONSTACK(complete);
+ int ret;
+
+ BUG_ON(len % mtd->erasesize);
+ if (logfs_super(sb)->s_flags & LOGFS_SB_FLAG_RO)
+ return -EROFS;
+
+ memset(&ei, 0, sizeof(ei));
+ ei.mtd = mtd;
+ ei.addr = ofs;
+ ei.len = len;
+ ei.callback = logfs_erase_callback;
+ ei.priv = (long)&complete;
+ ret = mtd->erase(mtd, &ei);
+ if (ret)
+ return -EIO;
+
+ wait_for_completion(&complete);
+ if (ei.state != MTD_ERASE_DONE)
+ return -EIO;
+ return mtd_erase_mapping(sb, ofs, len);
+}
+
+static void mtd_sync(struct super_block *sb)
+{
+ struct mtd_info *mtd = logfs_super(sb)->s_mtd;
+
+ if (mtd->sync)
+ mtd->sync(mtd);
+}
+
+static int mtd_readpage(void *_sb, struct page *page)
+{
+ struct super_block *sb = _sb;
+ int err;
+
+ err = mtd_read(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
+ page_address(page));
+ if (err == -EUCLEAN) {
+ err = 0;
+ /* FIXME: force GC this segment */
+ }
+ if (err) {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ } else {
+ SetPageUptodate(page);
+ ClearPageError(page);
+ }
+ unlock_page(page);
+ return err;
+}
+
+static struct page *mtd_find_first_sb(struct super_block *sb, u64 *ofs)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping = super->s_mapping_inode->i_mapping;
+ filler_t *filler = mtd_readpage;
+ struct mtd_info *mtd = super->s_mtd;
+
+ if (!mtd->block_isbad)
+ return NULL;
+
+ *ofs = 0;
+ while (mtd->block_isbad(mtd, *ofs)) {
+ *ofs += mtd->erasesize;
+ if (*ofs >= mtd->size)
+ return NULL;
+ }
+ BUG_ON(*ofs & ~PAGE_MASK);
+ return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
+}
+
+static struct page *mtd_find_last_sb(struct super_block *sb, u64 *ofs)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping = super->s_mapping_inode->i_mapping;
+ filler_t *filler = mtd_readpage;
+ struct mtd_info *mtd = super->s_mtd;
+
+ if (!mtd->block_isbad)
+ return NULL;
+
+ *ofs = mtd->size - mtd->erasesize;
+ while (mtd->block_isbad(mtd, *ofs)) {
+ *ofs -= mtd->erasesize;
+ if (*ofs <= 0)
+ return NULL;
+ }
+ *ofs = *ofs + mtd->erasesize - 0x1000;
+ BUG_ON(*ofs & ~PAGE_MASK);
+ return read_cache_page(mapping, *ofs >> PAGE_SHIFT, filler, sb);
+}
+
+static int __mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
+ size_t nr_pages)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping = super->s_mapping_inode->i_mapping;
+ struct page *page;
+ int i, err;
+
+ for (i = 0; i < nr_pages; i++) {
+ page = find_lock_page(mapping, index + i);
+ BUG_ON(!page);
+
+ err = mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE,
+ page_address(page));
+ unlock_page(page);
+ page_cache_release(page);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static void mtd_writeseg(struct super_block *sb, u64 ofs, size_t len)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int head;
+
+ if (super->s_flags & LOGFS_SB_FLAG_RO)
+ return;
+
+ if (len == 0) {
+ /* This can happen when the object fit perfectly into a
+ * segment, the segment gets written per sync and subsequently
+ * closed.
+ */
+ return;
+ }
+ head = ofs & (PAGE_SIZE - 1);
+ if (head) {
+ ofs -= head;
+ len += head;
+ }
+ len = PAGE_ALIGN(len);
+ __mtd_writeseg(sb, ofs, ofs >> PAGE_SHIFT, len >> PAGE_SHIFT);
+}
+
+static void mtd_put_device(struct super_block *sb)
+{
+ put_mtd_device(logfs_super(sb)->s_mtd);
+}
+
+static const struct logfs_device_ops mtd_devops = {
+ .find_first_sb = mtd_find_first_sb,
+ .find_last_sb = mtd_find_last_sb,
+ .readpage = mtd_readpage,
+ .writeseg = mtd_writeseg,
+ .erase = mtd_erase,
+ .sync = mtd_sync,
+ .put_device = mtd_put_device,
+};
+
+int logfs_get_sb_mtd(struct file_system_type *type, int flags,
+ int mtdnr, struct vfsmount *mnt)
+{
+ struct mtd_info *mtd;
+ const struct logfs_device_ops *devops = &mtd_devops;
+
+ mtd = get_mtd_device(NULL, mtdnr);
+ return logfs_get_sb_device(type, flags, mtd, NULL, devops, mnt);
+}
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
new file mode 100644
index 000000000000..56a8bfbb0120
--- /dev/null
+++ b/fs/logfs/dir.c
@@ -0,0 +1,827 @@
+/*
+ * fs/logfs/dir.c - directory-related code
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ */
+#include "logfs.h"
+
+
+/*
+ * Atomic dir operations
+ *
+ * Directory operations are by default not atomic. Dentries and Inodes are
+ * created/removed/altered in seperate operations. Therefore we need to do
+ * a small amount of journaling.
+ *
+ * Create, link, mkdir, mknod and symlink all share the same function to do
+ * the work: __logfs_create. This function works in two atomic steps:
+ * 1. allocate inode (remember in journal)
+ * 2. allocate dentry (clear journal)
+ *
+ * As we can only get interrupted between the two, when the inode we just
+ * created is simply stored in the anchor. On next mount, if we were
+ * interrupted, we delete the inode. From a users point of view the
+ * operation never happened.
+ *
+ * Unlink and rmdir also share the same function: unlink. Again, this
+ * function works in two atomic steps
+ * 1. remove dentry (remember inode in journal)
+ * 2. unlink inode (clear journal)
+ *
+ * And again, on the next mount, if we were interrupted, we delete the inode.
+ * From a users point of view the operation succeeded.
+ *
+ * Rename is the real pain to deal with, harder than all the other methods
+ * combined. Depending on the circumstances we can run into three cases.
+ * A "target rename" where the target dentry already existed, a "local
+ * rename" where both parent directories are identical or a "cross-directory
+ * rename" in the remaining case.
+ *
+ * Local rename is atomic, as the old dentry is simply rewritten with a new
+ * name.
+ *
+ * Cross-directory rename works in two steps, similar to __logfs_create and
+ * logfs_unlink:
+ * 1. Write new dentry (remember old dentry in journal)
+ * 2. Remove old dentry (clear journal)
+ *
+ * Here we remember a dentry instead of an inode. On next mount, if we were
+ * interrupted, we delete the dentry. From a users point of view, the
+ * operation succeeded.
+ *
+ * Target rename works in three atomic steps:
+ * 1. Attach old inode to new dentry (remember old dentry and new inode)
+ * 2. Remove old dentry (still remember the new inode)
+ * 3. Remove victim inode
+ *
+ * Here we remember both an inode an a dentry. If we get interrupted
+ * between steps 1 and 2, we delete both the dentry and the inode. If
+ * we get interrupted between steps 2 and 3, we delete just the inode.
+ * In either case, the remaining objects are deleted on next mount. From
+ * a users point of view, the operation succeeded.
+ */
+
+static int write_dir(struct inode *dir, struct logfs_disk_dentry *dd,
+ loff_t pos)
+{
+ return logfs_inode_write(dir, dd, sizeof(*dd), pos, WF_LOCK, NULL);
+}
+
+static int write_inode(struct inode *inode)
+{
+ return __logfs_write_inode(inode, WF_LOCK);
+}
+
+static s64 dir_seek_data(struct inode *inode, s64 pos)
+{
+ s64 new_pos = logfs_seek_data(inode, pos);
+
+ return max(pos, new_pos - 1);
+}
+
+static int beyond_eof(struct inode *inode, loff_t bix)
+{
+ loff_t pos = bix << inode->i_sb->s_blocksize_bits;
+ return pos >= i_size_read(inode);
+}
+
+/*
+ * Prime value was chosen to be roughly 256 + 26. r5 hash uses 11,
+ * so short names (len <= 9) don't even occupy the complete 32bit name
+ * space. A prime >256 ensures short names quickly spread the 32bit
+ * name space. Add about 26 for the estimated amount of information
+ * of each character and pick a prime nearby, preferrably a bit-sparse
+ * one.
+ */
+static u32 hash_32(const char *s, int len, u32 seed)
+{
+ u32 hash = seed;
+ int i;
+
+ for (i = 0; i < len; i++)
+ hash = hash * 293 + s[i];
+ return hash;
+}
+
+/*
+ * We have to satisfy several conflicting requirements here. Small
+ * directories should stay fairly compact and not require too many
+ * indirect blocks. The number of possible locations for a given hash
+ * should be small to make lookup() fast. And we should try hard not
+ * to overflow the 32bit name space or nfs and 32bit host systems will
+ * be unhappy.
+ *
+ * So we use the following scheme. First we reduce the hash to 0..15
+ * and try a direct block. If that is occupied we reduce the hash to
+ * 16..255 and try an indirect block. Same for 2x and 3x indirect
+ * blocks. Lastly we reduce the hash to 0x800_0000 .. 0xffff_ffff,
+ * but use buckets containing eight entries instead of a single one.
+ *
+ * Using 16 entries should allow for a reasonable amount of hash
+ * collisions, so the 32bit name space can be packed fairly tight
+ * before overflowing. Oh and currently we don't overflow but return
+ * and error.
+ *
+ * How likely are collisions? Doing the appropriate math is beyond me
+ * and the Bronstein textbook. But running a test program to brute
+ * force collisions for a couple of days showed that on average the
+ * first collision occurs after 598M entries, with 290M being the
+ * smallest result. Obviously 21 entries could already cause a
+ * collision if all entries are carefully chosen.
+ */
+static pgoff_t hash_index(u32 hash, int round)
+{
+ u32 i0_blocks = I0_BLOCKS;
+ u32 i1_blocks = I1_BLOCKS;
+ u32 i2_blocks = I2_BLOCKS;
+ u32 i3_blocks = I3_BLOCKS;
+
+ switch (round) {
+ case 0:
+ return hash % i0_blocks;
+ case 1:
+ return i0_blocks + hash % (i1_blocks - i0_blocks);
+ case 2:
+ return i1_blocks + hash % (i2_blocks - i1_blocks);
+ case 3:
+ return i2_blocks + hash % (i3_blocks - i2_blocks);
+ case 4 ... 19:
+ return i3_blocks + 16 * (hash % (((1<<31) - i3_blocks) / 16))
+ + round - 4;
+ }
+ BUG();
+}
+
+static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
+{
+ struct qstr *name = &dentry->d_name;
+ struct page *page;
+ struct logfs_disk_dentry *dd;
+ u32 hash = hash_32(name->name, name->len, 0);
+ pgoff_t index;
+ int round;
+
+ if (name->len > LOGFS_MAX_NAMELEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ for (round = 0; round < 20; round++) {
+ index = hash_index(hash, round);
+
+ if (beyond_eof(dir, index))
+ return NULL;
+ if (!logfs_exist_block(dir, index))
+ continue;
+ page = read_cache_page(dir->i_mapping, index,
+ (filler_t *)logfs_readpage, NULL);
+ if (IS_ERR(page))
+ return page;
+ dd = kmap_atomic(page, KM_USER0);
+ BUG_ON(dd->namelen == 0);
+
+ if (name->len != be16_to_cpu(dd->namelen) ||
+ memcmp(name->name, dd->name, name->len)) {
+ kunmap_atomic(dd, KM_USER0);
+ page_cache_release(page);
+ continue;
+ }
+
+ kunmap_atomic(dd, KM_USER0);
+ return page;
+ }
+ return NULL;
+}
+
+static int logfs_remove_inode(struct inode *inode)
+{
+ int ret;
+
+ inode->i_nlink--;
+ ret = write_inode(inode);
+ LOGFS_BUG_ON(ret, inode->i_sb);
+ return ret;
+}
+
+static void abort_transaction(struct inode *inode, struct logfs_transaction *ta)
+{
+ if (logfs_inode(inode)->li_block)
+ logfs_inode(inode)->li_block->ta = NULL;
+ kfree(ta);
+}
+
+static int logfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct logfs_super *super = logfs_super(dir->i_sb);
+ struct inode *inode = dentry->d_inode;
+ struct logfs_transaction *ta;
+ struct page *page;
+ pgoff_t index;
+ int ret;
+
+ ta = kzalloc(sizeof(*ta), GFP_KERNEL);
+ if (!ta)
+ return -ENOMEM;
+
+ ta->state = UNLINK_1;
+ ta->ino = inode->i_ino;
+
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+
+ page = logfs_get_dd_page(dir, dentry);
+ if (!page) {
+ kfree(ta);
+ return -ENOENT;
+ }
+ if (IS_ERR(page)) {
+ kfree(ta);
+ return PTR_ERR(page);
+ }
+ index = page->index;
+ page_cache_release(page);
+
+ mutex_lock(&super->s_dirop_mutex);
+ logfs_add_transaction(dir, ta);
+
+ ret = logfs_delete(dir, index, NULL);
+ if (!ret)
+ ret = write_inode(dir);
+
+ if (ret) {
+ abort_transaction(dir, ta);
+ printk(KERN_ERR"LOGFS: unable to delete inode\n");
+ goto out;
+ }
+
+ ta->state = UNLINK_2;
+ logfs_add_transaction(inode, ta);
+ ret = logfs_remove_inode(inode);
+out:
+ mutex_unlock(&super->s_dirop_mutex);
+ return ret;
+}
+
+static inline int logfs_empty_dir(struct inode *dir)
+{
+ u64 data;
+
+ data = logfs_seek_data(dir, 0) << dir->i_sb->s_blocksize_bits;
+ return data >= i_size_read(dir);
+}
+
+static int logfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!logfs_empty_dir(inode))
+ return -ENOTEMPTY;
+
+ return logfs_unlink(dir, dentry);
+}
+
+/* FIXME: readdir currently has it's own dir_walk code. I don't see a good
+ * way to combine the two copies */
+#define IMPLICIT_NODES 2
+static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
+{
+ struct inode *dir = file->f_dentry->d_inode;
+ loff_t pos = file->f_pos - IMPLICIT_NODES;
+ struct page *page;
+ struct logfs_disk_dentry *dd;
+ int full;
+
+ BUG_ON(pos < 0);
+ for (;; pos++) {
+ if (beyond_eof(dir, pos))
+ break;
+ if (!logfs_exist_block(dir, pos)) {
+ /* deleted dentry */
+ pos = dir_seek_data(dir, pos);
+ continue;
+ }
+ page = read_cache_page(dir->i_mapping, pos,
+ (filler_t *)logfs_readpage, NULL);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+ dd = kmap_atomic(page, KM_USER0);
+ BUG_ON(dd->namelen == 0);
+
+ full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
+ pos, be64_to_cpu(dd->ino), dd->type);
+ kunmap_atomic(dd, KM_USER0);
+ page_cache_release(page);
+ if (full)
+ break;
+ }
+
+ file->f_pos = pos + IMPLICIT_NODES;
+ return 0;
+}
+
+static int logfs_readdir(struct file *file, void *buf, filldir_t filldir)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ ino_t pino = parent_ino(file->f_dentry);
+ int err;
+
+ if (file->f_pos < 0)
+ return -EINVAL;
+
+ if (file->f_pos == 0) {
+ if (filldir(buf, ".", 1, 1, inode->i_ino, DT_DIR) < 0)
+ return 0;
+ file->f_pos++;
+ }
+ if (file->f_pos == 1) {
+ if (filldir(buf, "..", 2, 2, pino, DT_DIR) < 0)
+ return 0;
+ file->f_pos++;
+ }
+
+ err = __logfs_readdir(file, buf, filldir);
+ return err;
+}
+
+static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name)
+{
+ dd->namelen = cpu_to_be16(name->len);
+ memcpy(dd->name, name->name, name->len);
+}
+
+static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd)
+{
+ struct page *page;
+ struct logfs_disk_dentry *dd;
+ pgoff_t index;
+ u64 ino = 0;
+ struct inode *inode;
+
+ page = logfs_get_dd_page(dir, dentry);
+ if (IS_ERR(page))
+ return ERR_CAST(page);
+ if (!page) {
+ d_add(dentry, NULL);
+ return NULL;
+ }
+ index = page->index;
+ dd = kmap_atomic(page, KM_USER0);
+ ino = be64_to_cpu(dd->ino);
+ kunmap_atomic(dd, KM_USER0);
+ page_cache_release(page);
+
+ inode = logfs_iget(dir->i_sb, ino);
+ if (IS_ERR(inode)) {
+ printk(KERN_ERR"LogFS: Cannot read inode #%llx for dentry (%lx, %lx)n",
+ ino, dir->i_ino, index);
+ return ERR_CAST(inode);
+ }
+ return d_splice_alias(inode, dentry);
+}
+
+static void grow_dir(struct inode *dir, loff_t index)
+{
+ index = (index + 1) << dir->i_sb->s_blocksize_bits;
+ if (i_size_read(dir) < index)
+ i_size_write(dir, index);
+}
+
+static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
+ struct inode *inode)
+{
+ struct page *page;
+ struct logfs_disk_dentry *dd;
+ u32 hash = hash_32(dentry->d_name.name, dentry->d_name.len, 0);
+ pgoff_t index;
+ int round, err;
+
+ for (round = 0; round < 20; round++) {
+ index = hash_index(hash, round);
+
+ if (logfs_exist_block(dir, index))
+ continue;
+ page = find_or_create_page(dir->i_mapping, index, GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ dd = kmap_atomic(page, KM_USER0);
+ memset(dd, 0, sizeof(*dd));
+ dd->ino = cpu_to_be64(inode->i_ino);
+ dd->type = logfs_type(inode);
+ logfs_set_name(dd, &dentry->d_name);
+ kunmap_atomic(dd, KM_USER0);
+
+ err = logfs_write_buf(dir, page, WF_LOCK);
+ unlock_page(page);
+ page_cache_release(page);
+ if (!err)
+ grow_dir(dir, index);
+ return err;
+ }
+ /* FIXME: Is there a better return value? In most cases neither
+ * the filesystem nor the directory are full. But we have had
+ * too many collisions for this particular hash and no fallback.
+ */
+ return -ENOSPC;
+}
+
+static int __logfs_create(struct inode *dir, struct dentry *dentry,
+ struct inode *inode, const char *dest, long destlen)
+{
+ struct logfs_super *super = logfs_super(dir->i_sb);
+ struct logfs_inode *li = logfs_inode(inode);
+ struct logfs_transaction *ta;
+ int ret;
+
+ ta = kzalloc(sizeof(*ta), GFP_KERNEL);
+ if (!ta)
+ return -ENOMEM;
+
+ ta->state = CREATE_1;
+ ta->ino = inode->i_ino;
+ mutex_lock(&super->s_dirop_mutex);
+ logfs_add_transaction(inode, ta);
+
+ if (dest) {
+ /* symlink */
+ ret = logfs_inode_write(inode, dest, destlen, 0, WF_LOCK, NULL);
+ if (!ret)
+ ret = write_inode(inode);
+ } else {
+ /* creat/mkdir/mknod */
+ ret = write_inode(inode);
+ }
+ if (ret) {
+ abort_transaction(inode, ta);
+ li->li_flags |= LOGFS_IF_STILLBORN;
+ /* FIXME: truncate symlink */
+ inode->i_nlink--;
+ iput(inode);
+ goto out;
+ }
+
+ ta->state = CREATE_2;
+ logfs_add_transaction(dir, ta);
+ ret = logfs_write_dir(dir, dentry, inode);
+ /* sync directory */
+ if (!ret)
+ ret = write_inode(dir);
+
+ if (ret) {
+ logfs_del_transaction(dir, ta);
+ ta->state = CREATE_2;
+ logfs_add_transaction(inode, ta);
+ logfs_remove_inode(inode);
+ iput(inode);
+ goto out;
+ }
+ d_instantiate(dentry, inode);
+out:
+ mutex_unlock(&super->s_dirop_mutex);
+ return ret;
+}
+
+static int logfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+{
+ struct inode *inode;
+
+ /*
+ * FIXME: why do we have to fill in S_IFDIR, while the mode is
+ * correct for mknod, creat, etc.? Smells like the vfs *should*
+ * do it for us but for some reason fails to do so.
+ */
+ inode = logfs_new_inode(dir, S_IFDIR | mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ inode->i_op = &logfs_dir_iops;
+ inode->i_fop = &logfs_dir_fops;
+
+ return __logfs_create(dir, dentry, inode, NULL, 0);
+}
+
+static int logfs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *nd)
+{
+ struct inode *inode;
+
+ inode = logfs_new_inode(dir, mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ inode->i_op = &logfs_reg_iops;
+ inode->i_fop = &logfs_reg_fops;
+ inode->i_mapping->a_ops = &logfs_reg_aops;
+
+ return __logfs_create(dir, dentry, inode, NULL, 0);
+}
+
+static int logfs_mknod(struct inode *dir, struct dentry *dentry, int mode,
+ dev_t rdev)
+{
+ struct inode *inode;
+
+ if (dentry->d_name.len > LOGFS_MAX_NAMELEN)
+ return -ENAMETOOLONG;
+
+ inode = logfs_new_inode(dir, mode);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ init_special_inode(inode, mode, rdev);
+
+ return __logfs_create(dir, dentry, inode, NULL, 0);
+}
+
+static int logfs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *target)
+{
+ struct inode *inode;
+ size_t destlen = strlen(target) + 1;
+
+ if (destlen > dir->i_sb->s_blocksize)
+ return -ENAMETOOLONG;
+
+ inode = logfs_new_inode(dir, S_IFLNK | 0777);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ inode->i_op = &logfs_symlink_iops;
+ inode->i_mapping->a_ops = &logfs_reg_aops;
+
+ return __logfs_create(dir, dentry, inode, target, destlen);
+}
+
+static int logfs_permission(struct inode *inode, int mask)
+{
+ return generic_permission(inode, mask, NULL);
+}
+
+static int logfs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *dentry)
+{
+ struct inode *inode = old_dentry->d_inode;
+
+ if (inode->i_nlink >= LOGFS_LINK_MAX)
+ return -EMLINK;
+
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ atomic_inc(&inode->i_count);
+ inode->i_nlink++;
+ mark_inode_dirty_sync(inode);
+
+ return __logfs_create(dir, dentry, inode, NULL, 0);
+}
+
+static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
+ struct logfs_disk_dentry *dd, loff_t *pos)
+{
+ struct page *page;
+ void *map;
+
+ page = logfs_get_dd_page(dir, dentry);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+ *pos = page->index;
+ map = kmap_atomic(page, KM_USER0);
+ memcpy(dd, map, sizeof(*dd));
+ kunmap_atomic(map, KM_USER0);
+ page_cache_release(page);
+ return 0;
+}
+
+static int logfs_delete_dd(struct inode *dir, loff_t pos)
+{
+ /*
+ * Getting called with pos somewhere beyond eof is either a goofup
+ * within this file or means someone maliciously edited the
+ * (crc-protected) journal.
+ */
+ BUG_ON(beyond_eof(dir, pos));
+ dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ log_dir(" Delete dentry (%lx, %llx)\n", dir->i_ino, pos);
+ return logfs_delete(dir, pos, NULL);
+}
+
+/*
+ * Cross-directory rename, target does not exist. Just a little nasty.
+ * Create a new dentry in the target dir, then remove the old dentry,
+ * all the while taking care to remember our operation in the journal.
+ */
+static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct logfs_super *super = logfs_super(old_dir->i_sb);
+ struct logfs_disk_dentry dd;
+ struct logfs_transaction *ta;
+ loff_t pos;
+ int err;
+
+ /* 1. locate source dd */
+ err = logfs_get_dd(old_dir, old_dentry, &dd, &pos);
+ if (err)
+ return err;
+
+ ta = kzalloc(sizeof(*ta), GFP_KERNEL);
+ if (!ta)
+ return -ENOMEM;
+
+ ta->state = CROSS_RENAME_1;
+ ta->dir = old_dir->i_ino;
+ ta->pos = pos;
+
+ /* 2. write target dd */
+ mutex_lock(&super->s_dirop_mutex);
+ logfs_add_transaction(new_dir, ta);
+ err = logfs_write_dir(new_dir, new_dentry, old_dentry->d_inode);
+ if (!err)
+ err = write_inode(new_dir);
+
+ if (err) {
+ super->s_rename_dir = 0;
+ super->s_rename_pos = 0;
+ abort_transaction(new_dir, ta);
+ goto out;
+ }
+
+ /* 3. remove source dd */
+ ta->state = CROSS_RENAME_2;
+ logfs_add_transaction(old_dir, ta);
+ err = logfs_delete_dd(old_dir, pos);
+ if (!err)
+ err = write_inode(old_dir);
+ LOGFS_BUG_ON(err, old_dir->i_sb);
+out:
+ mutex_unlock(&super->s_dirop_mutex);
+ return err;
+}
+
+static int logfs_replace_inode(struct inode *dir, struct dentry *dentry,
+ struct logfs_disk_dentry *dd, struct inode *inode)
+{
+ loff_t pos;
+ int err;
+
+ err = logfs_get_dd(dir, dentry, dd, &pos);
+ if (err)
+ return err;
+ dd->ino = cpu_to_be64(inode->i_ino);
+ dd->type = logfs_type(inode);
+
+ err = write_dir(dir, dd, pos);
+ if (err)
+ return err;
+ log_dir("Replace dentry (%lx, %llx) %s -> %llx\n", dir->i_ino, pos,
+ dd->name, be64_to_cpu(dd->ino));
+ return write_inode(dir);
+}
+
+/* Target dentry exists - the worst case. We need to attach the source
+ * inode to the target dentry, then remove the orphaned target inode and
+ * source dentry.
+ */
+static int logfs_rename_target(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ struct logfs_super *super = logfs_super(old_dir->i_sb);
+ struct inode *old_inode = old_dentry->d_inode;
+ struct inode *new_inode = new_dentry->d_inode;
+ int isdir = S_ISDIR(old_inode->i_mode);
+ struct logfs_disk_dentry dd;
+ struct logfs_transaction *ta;
+ loff_t pos;
+ int err;
+
+ BUG_ON(isdir != S_ISDIR(new_inode->i_mode));
+ if (isdir) {
+ if (!logfs_empty_dir(new_inode))
+ return -ENOTEMPTY;
+ }
+
+ /* 1. locate source dd */
+ err = logfs_get_dd(old_dir, old_dentry, &dd, &pos);
+ if (err)
+ return err;
+
+ ta = kzalloc(sizeof(*ta), GFP_KERNEL);
+ if (!ta)
+ return -ENOMEM;
+
+ ta->state = TARGET_RENAME_1;
+ ta->dir = old_dir->i_ino;
+ ta->pos = pos;
+ ta->ino = new_inode->i_ino;
+
+ /* 2. attach source inode to target dd */
+ mutex_lock(&super->s_dirop_mutex);
+ logfs_add_transaction(new_dir, ta);
+ err = logfs_replace_inode(new_dir, new_dentry, &dd, old_inode);
+ if (err) {
+ super->s_rename_dir = 0;
+ super->s_rename_pos = 0;
+ super->s_victim_ino = 0;
+ abort_transaction(new_dir, ta);
+ goto out;
+ }
+
+ /* 3. remove source dd */
+ ta->state = TARGET_RENAME_2;
+ logfs_add_transaction(old_dir, ta);
+ err = logfs_delete_dd(old_dir, pos);
+ if (!err)
+ err = write_inode(old_dir);
+ LOGFS_BUG_ON(err, old_dir->i_sb);
+
+ /* 4. remove target inode */
+ ta->state = TARGET_RENAME_3;
+ logfs_add_transaction(new_inode, ta);
+ err = logfs_remove_inode(new_inode);
+
+out:
+ mutex_unlock(&super->s_dirop_mutex);
+ return err;
+}
+
+static int logfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+{
+ if (new_dentry->d_inode)
+ return logfs_rename_target(old_dir, old_dentry,
+ new_dir, new_dentry);
+ return logfs_rename_cross(old_dir, old_dentry, new_dir, new_dentry);
+}
+
+/* No locking done here, as this is called before .get_sb() returns. */
+int logfs_replay_journal(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct inode *inode;
+ u64 ino, pos;
+ int err;
+
+ if (super->s_victim_ino) {
+ /* delete victim inode */
+ ino = super->s_victim_ino;
+ printk(KERN_INFO"LogFS: delete unmapped inode #%llx\n", ino);
+ inode = logfs_iget(sb, ino);
+ if (IS_ERR(inode))
+ goto fail;
+
+ LOGFS_BUG_ON(i_size_read(inode) > 0, sb);
+ super->s_victim_ino = 0;
+ err = logfs_remove_inode(inode);
+ iput(inode);
+ if (err) {
+ super->s_victim_ino = ino;
+ goto fail;
+ }
+ }
+ if (super->s_rename_dir) {
+ /* delete old dd from rename */
+ ino = super->s_rename_dir;
+ pos = super->s_rename_pos;
+ printk(KERN_INFO"LogFS: delete unbacked dentry (%llx, %llx)\n",
+ ino, pos);
+ inode = logfs_iget(sb, ino);
+ if (IS_ERR(inode))
+ goto fail;
+
+ super->s_rename_dir = 0;
+ super->s_rename_pos = 0;
+ err = logfs_delete_dd(inode, pos);
+ iput(inode);
+ if (err) {
+ super->s_rename_dir = ino;
+ super->s_rename_pos = pos;
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ LOGFS_BUG(sb);
+ return -EIO;
+}
+
+const struct inode_operations logfs_symlink_iops = {
+ .readlink = generic_readlink,
+ .follow_link = page_follow_link_light,
+};
+
+const struct inode_operations logfs_dir_iops = {
+ .create = logfs_create,
+ .link = logfs_link,
+ .lookup = logfs_lookup,
+ .mkdir = logfs_mkdir,
+ .mknod = logfs_mknod,
+ .rename = logfs_rename,
+ .rmdir = logfs_rmdir,
+ .permission = logfs_permission,
+ .symlink = logfs_symlink,
+ .unlink = logfs_unlink,
+};
+const struct file_operations logfs_dir_fops = {
+ .fsync = logfs_fsync,
+ .ioctl = logfs_ioctl,
+ .readdir = logfs_readdir,
+ .read = generic_read_dir,
+};
diff --git a/fs/logfs/file.c b/fs/logfs/file.c
new file mode 100644
index 000000000000..370f367a933e
--- /dev/null
+++ b/fs/logfs/file.c
@@ -0,0 +1,263 @@
+/*
+ * fs/logfs/file.c - prepare_write, commit_write and friends
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ */
+#include "logfs.h"
+#include <linux/sched.h>
+#include <linux/writeback.h>
+
+static int logfs_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+{
+ struct inode *inode = mapping->host;
+ struct page *page;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+
+ page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+ *pagep = page;
+
+ if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
+ return 0;
+ if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
+ unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned end = start + len;
+
+ /* Reading beyond i_size is simple: memset to zero */
+ zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
+ return 0;
+ }
+ return logfs_readpage_nolock(page);
+}
+
+static int logfs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied, struct page *page,
+ void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ pgoff_t index = page->index;
+ unsigned start = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned end = start + copied;
+ int ret = 0;
+
+ BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize);
+ BUG_ON(page->index > I3_BLOCKS);
+
+ if (copied < len) {
+ /*
+ * Short write of a non-initialized paged. Just tell userspace
+ * to retry the entire page.
+ */
+ if (!PageUptodate(page)) {
+ copied = 0;
+ goto out;
+ }
+ }
+ if (copied == 0)
+ goto out; /* FIXME: do we need to update inode? */
+
+ if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) {
+ i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end);
+ mark_inode_dirty_sync(inode);
+ }
+
+ SetPageUptodate(page);
+ if (!PageDirty(page)) {
+ if (!get_page_reserve(inode, page))
+ __set_page_dirty_nobuffers(page);
+ else
+ ret = logfs_write_buf(inode, page, WF_LOCK);
+ }
+out:
+ unlock_page(page);
+ page_cache_release(page);
+ return ret ? ret : copied;
+}
+
+int logfs_readpage(struct file *file, struct page *page)
+{
+ int ret;
+
+ ret = logfs_readpage_nolock(page);
+ unlock_page(page);
+ return ret;
+}
+
+/* Clear the page's dirty flag in the radix tree. */
+/* TODO: mucking with PageWriteback is silly. Add a generic function to clear
+ * the dirty bit from the radix tree for filesystems that don't have to wait
+ * for page writeback to finish (i.e. any compressing filesystem).
+ */
+static void clear_radix_tree_dirty(struct page *page)
+{
+ BUG_ON(PagePrivate(page) || page->private);
+ set_page_writeback(page);
+ end_page_writeback(page);
+}
+
+static int __logfs_writepage(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ int err;
+
+ err = logfs_write_buf(inode, page, WF_LOCK);
+ if (err)
+ set_page_dirty(page);
+ else
+ clear_radix_tree_dirty(page);
+ unlock_page(page);
+ return err;
+}
+
+static int logfs_writepage(struct page *page, struct writeback_control *wbc)
+{
+ struct inode *inode = page->mapping->host;
+ loff_t i_size = i_size_read(inode);
+ pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
+ unsigned offset;
+ u64 bix;
+ level_t level;
+
+ log_file("logfs_writepage(%lx, %lx, %p)\n", inode->i_ino, page->index,
+ page);
+
+ logfs_unpack_index(page->index, &bix, &level);
+
+ /* Indirect blocks are never truncated */
+ if (level != 0)
+ return __logfs_writepage(page);
+
+ /*
+ * TODO: everything below is a near-verbatim copy of nobh_writepage().
+ * The relevant bits should be factored out after logfs is merged.
+ */
+
+ /* Is the page fully inside i_size? */
+ if (bix < end_index)
+ return __logfs_writepage(page);
+
+ /* Is the page fully outside i_size? (truncate in progress) */
+ offset = i_size & (PAGE_CACHE_SIZE-1);
+ if (bix > end_index || offset == 0) {
+ unlock_page(page);
+ return 0; /* don't care */
+ }
+
+ /*
+ * The page straddles i_size. It must be zeroed out on each and every
+ * writepage invokation because it may be mmapped. "A file is mapped
+ * in multiples of the page size. For a file that is not a multiple of
+ * the page size, the remaining memory is zeroed when mapped, and
+ * writes to that region are not written out to the file."
+ */
+ zero_user_segment(page, offset, PAGE_CACHE_SIZE);
+ return __logfs_writepage(page);
+}
+
+static void logfs_invalidatepage(struct page *page, unsigned long offset)
+{
+ move_page_to_btree(page);
+ BUG_ON(PagePrivate(page) || page->private);
+}
+
+static int logfs_releasepage(struct page *page, gfp_t only_xfs_uses_this)
+{
+ return 0; /* None of these are easy to release */
+}
+
+
+int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ unsigned int oldflags, flags;
+ int err;
+
+ switch (cmd) {
+ case FS_IOC_GETFLAGS:
+ flags = li->li_flags & LOGFS_FL_USER_VISIBLE;
+ return put_user(flags, (int __user *)arg);
+ case FS_IOC_SETFLAGS:
+ if (IS_RDONLY(inode))
+ return -EROFS;
+
+ if (!is_owner_or_cap(inode))
+ return -EACCES;
+
+ err = get_user(flags, (int __user *)arg);
+ if (err)
+ return err;
+
+ mutex_lock(&inode->i_mutex);
+ oldflags = li->li_flags;
+ flags &= LOGFS_FL_USER_MODIFIABLE;
+ flags |= oldflags & ~LOGFS_FL_USER_MODIFIABLE;
+ li->li_flags = flags;
+ mutex_unlock(&inode->i_mutex);
+
+ inode->i_ctime = CURRENT_TIME;
+ mark_inode_dirty_sync(inode);
+ return 0;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+int logfs_fsync(struct file *file, struct dentry *dentry, int datasync)
+{
+ struct super_block *sb = dentry->d_inode->i_sb;
+ struct logfs_super *super = logfs_super(sb);
+
+ /* FIXME: write anchor */
+ super->s_devops->sync(sb);
+ return 0;
+}
+
+static int logfs_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ struct inode *inode = dentry->d_inode;
+ int err = 0;
+
+ if (attr->ia_valid & ATTR_SIZE)
+ err = logfs_truncate(inode, attr->ia_size);
+ attr->ia_valid &= ~ATTR_SIZE;
+
+ if (!err)
+ err = inode_change_ok(inode, attr);
+ if (!err)
+ err = inode_setattr(inode, attr);
+ return err;
+}
+
+const struct inode_operations logfs_reg_iops = {
+ .setattr = logfs_setattr,
+};
+
+const struct file_operations logfs_reg_fops = {
+ .aio_read = generic_file_aio_read,
+ .aio_write = generic_file_aio_write,
+ .fsync = logfs_fsync,
+ .ioctl = logfs_ioctl,
+ .llseek = generic_file_llseek,
+ .mmap = generic_file_readonly_mmap,
+ .open = generic_file_open,
+ .read = do_sync_read,
+ .write = do_sync_write,
+};
+
+const struct address_space_operations logfs_reg_aops = {
+ .invalidatepage = logfs_invalidatepage,
+ .readpage = logfs_readpage,
+ .releasepage = logfs_releasepage,
+ .set_page_dirty = __set_page_dirty_nobuffers,
+ .writepage = logfs_writepage,
+ .writepages = generic_writepages,
+ .write_begin = logfs_write_begin,
+ .write_end = logfs_write_end,
+};
diff --git a/fs/logfs/gc.c b/fs/logfs/gc.c
new file mode 100644
index 000000000000..b3656c44190e
--- /dev/null
+++ b/fs/logfs/gc.c
@@ -0,0 +1,730 @@
+/*
+ * fs/logfs/gc.c - garbage collection code
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ */
+#include "logfs.h"
+#include <linux/sched.h>
+
+/*
+ * Wear leveling needs to kick in when the difference between low erase
+ * counts and high erase counts gets too big. A good value for "too big"
+ * may be somewhat below 10% of maximum erase count for the device.
+ * Why not 397, to pick a nice round number with no specific meaning? :)
+ *
+ * WL_RATELIMIT is the minimum time between two wear level events. A huge
+ * number of segments may fulfil the requirements for wear leveling at the
+ * same time. If that happens we don't want to cause a latency from hell,
+ * but just gently pick one segment every so often and minimize overhead.
+ */
+#define WL_DELTA 397
+#define WL_RATELIMIT 100
+#define MAX_OBJ_ALIASES 2600
+#define SCAN_RATIO 512 /* number of scanned segments per gc'd segment */
+#define LIST_SIZE 64 /* base size of candidate lists */
+#define SCAN_ROUNDS 128 /* maximum number of complete medium scans */
+#define SCAN_ROUNDS_HIGH 4 /* maximum number of higher-level scans */
+
+static int no_free_segments(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ return super->s_free_list.count;
+}
+
+/* journal has distance -1, top-most ifile layer distance 0 */
+static u8 root_distance(struct super_block *sb, gc_level_t __gc_level)
+{
+ struct logfs_super *super = logfs_super(sb);
+ u8 gc_level = (__force u8)__gc_level;
+
+ switch (gc_level) {
+ case 0: /* fall through */
+ case 1: /* fall through */
+ case 2: /* fall through */
+ case 3:
+ /* file data or indirect blocks */
+ return super->s_ifile_levels + super->s_iblock_levels - gc_level;
+ case 6: /* fall through */
+ case 7: /* fall through */
+ case 8: /* fall through */
+ case 9:
+ /* inode file data or indirect blocks */
+ return super->s_ifile_levels - (gc_level - 6);
+ default:
+ printk(KERN_ERR"LOGFS: segment of unknown level %x found\n",
+ gc_level);
+ WARN_ON(1);
+ return super->s_ifile_levels + super->s_iblock_levels;
+ }
+}
+
+static int segment_is_reserved(struct super_block *sb, u32 segno)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_area *area;
+ void *reserved;
+ int i;
+
+ /* Some segments are reserved. Just pretend they were all valid */
+ reserved = btree_lookup32(&super->s_reserved_segments, segno);
+ if (reserved)
+ return 1;
+
+ /* Currently open segments */
+ for_each_area(i) {
+ area = super->s_area[i];
+ if (area->a_is_open && area->a_segno == segno)
+ return 1;
+ }
+
+ return 0;
+}
+
+static void logfs_mark_segment_bad(struct super_block *sb, u32 segno)
+{
+ BUG();
+}
+
+/*
+ * Returns the bytes consumed by valid objects in this segment. Object headers
+ * are counted, the segment header is not.
+ */
+static u32 logfs_valid_bytes(struct super_block *sb, u32 segno, u32 *ec,
+ gc_level_t *gc_level)
+{
+ struct logfs_segment_entry se;
+ u32 ec_level;
+
+ logfs_get_segment_entry(sb, segno, &se);
+ if (se.ec_level == cpu_to_be32(BADSEG) ||
+ se.valid == cpu_to_be32(RESERVED))
+ return RESERVED;
+
+ ec_level = be32_to_cpu(se.ec_level);
+ *ec = ec_level >> 4;
+ *gc_level = GC_LEVEL(ec_level & 0xf);
+ return be32_to_cpu(se.valid);
+}
+
+static void logfs_cleanse_block(struct super_block *sb, u64 ofs, u64 ino,
+ u64 bix, gc_level_t gc_level)
+{
+ struct inode *inode;
+ int err, cookie;
+
+ inode = logfs_safe_iget(sb, ino, &cookie);
+ err = logfs_rewrite_block(inode, bix, ofs, gc_level, 0);
+ BUG_ON(err);
+ logfs_safe_iput(inode, cookie);
+}
+
+static u32 logfs_gc_segment(struct super_block *sb, u32 segno, u8 dist)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_segment_header sh;
+ struct logfs_object_header oh;
+ u64 ofs, ino, bix;
+ u32 seg_ofs, logical_segno, cleaned = 0;
+ int err, len, valid;
+ gc_level_t gc_level;
+
+ LOGFS_BUG_ON(segment_is_reserved(sb, segno), sb);
+
+ btree_insert32(&super->s_reserved_segments, segno, (void *)1, GFP_NOFS);
+ err = wbuf_read(sb, dev_ofs(sb, segno, 0), sizeof(sh), &sh);
+ BUG_ON(err);
+ gc_level = GC_LEVEL(sh.level);
+ logical_segno = be32_to_cpu(sh.segno);
+ if (sh.crc != logfs_crc32(&sh, sizeof(sh), 4)) {
+ logfs_mark_segment_bad(sb, segno);
+ cleaned = -1;
+ goto out;
+ }
+
+ for (seg_ofs = LOGFS_SEGMENT_HEADERSIZE;
+ seg_ofs + sizeof(oh) < super->s_segsize; ) {
+ ofs = dev_ofs(sb, logical_segno, seg_ofs);
+ err = wbuf_read(sb, dev_ofs(sb, segno, seg_ofs), sizeof(oh),
+ &oh);
+ BUG_ON(err);
+
+ if (!memchr_inv(&oh, 0xff, sizeof(oh)))
+ break;
+
+ if (oh.crc != logfs_crc32(&oh, sizeof(oh) - 4, 4)) {
+ logfs_mark_segment_bad(sb, segno);
+ cleaned = super->s_segsize - 1;
+ goto out;
+ }
+
+ ino = be64_to_cpu(oh.ino);
+ bix = be64_to_cpu(oh.bix);
+ len = sizeof(oh) + be16_to_cpu(oh.len);
+ valid = logfs_is_valid_block(sb, ofs, ino, bix, gc_level);
+ if (valid == 1) {
+ logfs_cleanse_block(sb, ofs, ino, bix, gc_level);
+ cleaned += len;
+ } else if (valid == 2) {
+ /* Will be invalid upon journal commit */
+ cleaned += len;
+ }
+ seg_ofs += len;
+ }
+out:
+ btree_remove32(&super->s_reserved_segments, segno);
+ return cleaned;
+}
+
+static struct gc_candidate *add_list(struct gc_candidate *cand,
+ struct candidate_list *list)
+{
+ struct rb_node **p = &list->rb_tree.rb_node;
+ struct rb_node *parent = NULL;
+ struct gc_candidate *cur;
+ int comp;
+
+ cand->list = list;
+ while (*p) {
+ parent = *p;
+ cur = rb_entry(parent, struct gc_candidate, rb_node);
+
+ if (list->sort_by_ec)
+ comp = cand->erase_count < cur->erase_count;
+ else
+ comp = cand->valid < cur->valid;
+
+ if (comp)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&cand->rb_node, parent, p);
+ rb_insert_color(&cand->rb_node, &list->rb_tree);
+
+ if (list->count <= list->maxcount) {
+ list->count++;
+ return NULL;
+ }
+ cand = rb_entry(rb_last(&list->rb_tree), struct gc_candidate, rb_node);
+ rb_erase(&cand->rb_node, &list->rb_tree);
+ cand->list = NULL;
+ return cand;
+}
+
+static void remove_from_list(struct gc_candidate *cand)
+{
+ struct candidate_list *list = cand->list;
+
+ rb_erase(&cand->rb_node, &list->rb_tree);
+ list->count--;
+}
+
+static void free_candidate(struct super_block *sb, struct gc_candidate *cand)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ btree_remove32(&super->s_cand_tree, cand->segno);
+ kfree(cand);
+}
+
+u32 get_best_cand(struct super_block *sb, struct candidate_list *list, u32 *ec)
+{
+ struct gc_candidate *cand;
+ u32 segno;
+
+ BUG_ON(list->count == 0);
+
+ cand = rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node);
+ remove_from_list(cand);
+ segno = cand->segno;
+ if (ec)
+ *ec = cand->erase_count;
+ free_candidate(sb, cand);
+ return segno;
+}
+
+/*
+ * We have several lists to manage segments with. The reserve_list is used to
+ * deal with bad blocks. We try to keep the best (lowest ec) segments on this
+ * list.
+ * The free_list contains free segments for normal usage. It usually gets the
+ * second pick after the reserve_list. But when the free_list is running short
+ * it is more important to keep the free_list full than to keep a reserve.
+ *
+ * Segments that are not free are put onto a per-level low_list. If we have
+ * to run garbage collection, we pick a candidate from there. All segments on
+ * those lists should have at least some free space so GC will make progress.
+ *
+ * And last we have the ec_list, which is used to pick segments for wear
+ * leveling.
+ *
+ * If all appropriate lists are full, we simply free the candidate and forget
+ * about that segment for a while. We have better candidates for each purpose.
+ */
+static void __add_candidate(struct super_block *sb, struct gc_candidate *cand)
+{
+ struct logfs_super *super = logfs_super(sb);
+ u32 full = super->s_segsize - LOGFS_SEGMENT_RESERVE;
+
+ if (cand->valid == 0) {
+ /* 100% free segments */
+ log_gc_noisy("add reserve segment %x (ec %x) at %llx\n",
+ cand->segno, cand->erase_count,
+ dev_ofs(sb, cand->segno, 0));
+ cand = add_list(cand, &super->s_reserve_list);
+ if (cand) {
+ log_gc_noisy("add free segment %x (ec %x) at %llx\n",
+ cand->segno, cand->erase_count,
+ dev_ofs(sb, cand->segno, 0));
+ cand = add_list(cand, &super->s_free_list);
+ }
+ } else {
+ /* good candidates for Garbage Collection */
+ if (cand->valid < full)
+ cand = add_list(cand, &super->s_low_list[cand->dist]);
+ /* good candidates for wear leveling,
+ * segments that were recently written get ignored */
+ if (cand)
+ cand = add_list(cand, &super->s_ec_list);
+ }
+ if (cand)
+ free_candidate(sb, cand);
+}
+
+static int add_candidate(struct super_block *sb, u32 segno, u32 valid, u32 ec,
+ u8 dist)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct gc_candidate *cand;
+
+ cand = kmalloc(sizeof(*cand), GFP_NOFS);
+ if (!cand)
+ return -ENOMEM;
+
+ cand->segno = segno;
+ cand->valid = valid;
+ cand->erase_count = ec;
+ cand->dist = dist;
+
+ btree_insert32(&super->s_cand_tree, segno, cand, GFP_NOFS);
+ __add_candidate(sb, cand);
+ return 0;
+}
+
+static void remove_segment_from_lists(struct super_block *sb, u32 segno)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct gc_candidate *cand;
+
+ cand = btree_lookup32(&super->s_cand_tree, segno);
+ if (cand) {
+ remove_from_list(cand);
+ free_candidate(sb, cand);
+ }
+}
+
+static void scan_segment(struct super_block *sb, u32 segno)
+{
+ u32 valid, ec = 0;
+ gc_level_t gc_level = 0;
+ u8 dist;
+
+ if (segment_is_reserved(sb, segno))
+ return;
+
+ remove_segment_from_lists(sb, segno);
+ valid = logfs_valid_bytes(sb, segno, &ec, &gc_level);
+ if (valid == RESERVED)
+ return;
+
+ dist = root_distance(sb, gc_level);
+ add_candidate(sb, segno, valid, ec, dist);
+}
+
+static struct gc_candidate *first_in_list(struct candidate_list *list)
+{
+ if (list->count == 0)
+ return NULL;
+ return rb_entry(rb_first(&list->rb_tree), struct gc_candidate, rb_node);
+}
+
+/*
+ * Find the best segment for garbage collection. Main criterion is
+ * the segment requiring the least effort to clean. Secondary
+ * criterion is to GC on the lowest level available.
+ *
+ * So we search the least effort segment on the lowest level first,
+ * then move up and pick another segment iff is requires significantly
+ * less effort. Hence the LOGFS_MAX_OBJECTSIZE in the comparison.
+ */
+static struct gc_candidate *get_candidate(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int i, max_dist;
+ struct gc_candidate *cand = NULL, *this;
+
+ max_dist = min(no_free_segments(sb), LOGFS_NO_AREAS);
+
+ for (i = max_dist; i >= 0; i--) {
+ this = first_in_list(&super->s_low_list[i]);
+ if (!this)
+ continue;
+ if (!cand)
+ cand = this;
+ if (this->valid + LOGFS_MAX_OBJECTSIZE <= cand->valid)
+ cand = this;
+ }
+ return cand;
+}
+
+static int __logfs_gc_once(struct super_block *sb, struct gc_candidate *cand)
+{
+ struct logfs_super *super = logfs_super(sb);
+ gc_level_t gc_level;
+ u32 cleaned, valid, segno, ec;
+ u8 dist;
+
+ if (!cand) {
+ log_gc("GC attempted, but no candidate found\n");
+ return 0;
+ }
+
+ segno = cand->segno;
+ dist = cand->dist;
+ valid = logfs_valid_bytes(sb, segno, &ec, &gc_level);
+ free_candidate(sb, cand);
+ log_gc("GC segment #%02x at %llx, %x required, %x free, %x valid, %llx free\n",
+ segno, (u64)segno << super->s_segshift,
+ dist, no_free_segments(sb), valid,
+ super->s_free_bytes);
+ cleaned = logfs_gc_segment(sb, segno, dist);
+ log_gc("GC segment #%02x complete - now %x valid\n", segno,
+ valid - cleaned);
+ BUG_ON(cleaned != valid);
+ return 1;
+}
+
+static int logfs_gc_once(struct super_block *sb)
+{
+ struct gc_candidate *cand;
+
+ cand = get_candidate(sb);
+ if (cand)
+ remove_from_list(cand);
+ return __logfs_gc_once(sb, cand);
+}
+
+/* returns 1 if a wrap occurs, 0 otherwise */
+static int logfs_scan_some(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ u32 segno;
+ int i, ret = 0;
+
+ segno = super->s_sweeper;
+ for (i = SCAN_RATIO; i > 0; i--) {
+ segno++;
+ if (segno >= super->s_no_segs) {
+ segno = 0;
+ ret = 1;
+ /* Break out of the loop. We want to read a single
+ * block from the segment size on next invocation if
+ * SCAN_RATIO is set to match block size
+ */
+ break;
+ }
+
+ scan_segment(sb, segno);
+ }
+ super->s_sweeper = segno;
+ return ret;
+}
+
+/*
+ * In principle, this function should loop forever, looking for GC candidates
+ * and moving data. LogFS is designed in such a way that this loop is
+ * guaranteed to terminate.
+ *
+ * Limiting the loop to some iterations serves purely to catch cases when
+ * these guarantees have failed. An actual endless loop is an obvious bug
+ * and should be reported as such.
+ */
+static void __logfs_gc_pass(struct super_block *sb, int target)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_block *block;
+ int round, progress, last_progress = 0;
+
+ if (no_free_segments(sb) >= target &&
+ super->s_no_object_aliases < MAX_OBJ_ALIASES)
+ return;
+
+ log_gc("__logfs_gc_pass(%x)\n", target);
+ for (round = 0; round < SCAN_ROUNDS; ) {
+ if (no_free_segments(sb) >= target)
+ goto write_alias;
+
+ /* Sync in-memory state with on-medium state in case they
+ * diverged */
+ logfs_write_anchor(super->s_master_inode);
+ round += logfs_scan_some(sb);
+ if (no_free_segments(sb) >= target)
+ goto write_alias;
+ progress = logfs_gc_once(sb);
+ if (progress)
+ last_progress = round;
+ else if (round - last_progress > 2)
+ break;
+ continue;
+
+ /*
+ * The goto logic is nasty, I just don't know a better way to
+ * code it. GC is supposed to ensure two things:
+ * 1. Enough free segments are available.
+ * 2. The number of aliases is bounded.
+ * When 1. is achieved, we take a look at 2. and write back
+ * some alias-containing blocks, if necessary. However, after
+ * each such write we need to go back to 1., as writes can
+ * consume free segments.
+ */
+write_alias:
+ if (super->s_no_object_aliases < MAX_OBJ_ALIASES)
+ return;
+ if (list_empty(&super->s_object_alias)) {
+ /* All aliases are still in btree */
+ return;
+ }
+ log_gc("Write back one alias\n");
+ block = list_entry(super->s_object_alias.next,
+ struct logfs_block, alias_list);
+ block->ops->write_block(block);
+ /*
+ * To round off the nasty goto logic, we reset round here. It
+ * is a safety-net for GC not making any progress and limited
+ * to something reasonably small. If incremented it for every
+ * single alias, the loop could terminate rather quickly.
+ */
+ round = 0;
+ }
+ LOGFS_BUG(sb);
+}
+
+static int wl_ratelimit(struct super_block *sb, u64 *next_event)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ if (*next_event < super->s_gec) {
+ *next_event = super->s_gec + WL_RATELIMIT;
+ return 0;
+ }
+ return 1;
+}
+
+static void logfs_wl_pass(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct gc_candidate *wl_cand, *free_cand;
+
+ if (wl_ratelimit(sb, &super->s_wl_gec_ostore))
+ return;
+
+ wl_cand = first_in_list(&super->s_ec_list);
+ if (!wl_cand)
+ return;
+ free_cand = first_in_list(&super->s_free_list);
+ if (!free_cand)
+ return;
+
+ if (wl_cand->erase_count < free_cand->erase_count + WL_DELTA) {
+ remove_from_list(wl_cand);
+ __logfs_gc_once(sb, wl_cand);
+ }
+}
+
+/*
+ * The journal needs wear leveling as well. But moving the journal is an
+ * expensive operation so we try to avoid it as much as possible. And if we
+ * have to do it, we move the whole journal, not individual segments.
+ *
+ * Ratelimiting is not strictly necessary here, it mainly serves to avoid the
+ * calculations. First we check whether moving the journal would be a
+ * significant improvement. That means that a) the current journal segments
+ * have more wear than the future journal segments and b) the current journal
+ * segments have more wear than normal ostore segments.
+ * Rationale for b) is that we don't have to move the journal if it is aging
+ * less than the ostore, even if the reserve segments age even less (they are
+ * excluded from wear leveling, after all).
+ * Next we check that the superblocks have less wear than the journal. Since
+ * moving the journal requires writing the superblocks, we have to protect the
+ * superblocks even more than the journal.
+ *
+ * Also we double the acceptable wear difference, compared to ostore wear
+ * leveling. Journal data is read and rewritten rapidly, comparatively. So
+ * soft errors have much less time to accumulate and we allow the journal to
+ * be a bit worse than the ostore.
+ */
+static void logfs_journal_wl_pass(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct gc_candidate *cand;
+ u32 min_journal_ec = -1, max_reserve_ec = 0;
+ int i;
+
+ if (wl_ratelimit(sb, &super->s_wl_gec_journal))
+ return;
+
+ if (super->s_reserve_list.count < super->s_no_journal_segs) {
+ /* Reserve is not full enough to move complete journal */
+ return;
+ }
+
+ journal_for_each(i)
+ if (super->s_journal_seg[i])
+ min_journal_ec = min(min_journal_ec,
+ super->s_journal_ec[i]);
+ cand = rb_entry(rb_first(&super->s_free_list.rb_tree),
+ struct gc_candidate, rb_node);
+ max_reserve_ec = cand->erase_count;
+ for (i = 0; i < 2; i++) {
+ struct logfs_segment_entry se;
+ u32 segno = seg_no(sb, super->s_sb_ofs[i]);
+ u32 ec;
+
+ logfs_get_segment_entry(sb, segno, &se);
+ ec = be32_to_cpu(se.ec_level) >> 4;
+ max_reserve_ec = max(max_reserve_ec, ec);
+ }
+
+ if (min_journal_ec > max_reserve_ec + 2 * WL_DELTA) {
+ do_logfs_journal_wl_pass(sb);
+ }
+}
+
+void logfs_gc_pass(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ //BUG_ON(mutex_trylock(&logfs_super(sb)->s_w_mutex));
+ /* Write journal before free space is getting saturated with dirty
+ * objects.
+ */
+ if (super->s_dirty_used_bytes + super->s_dirty_free_bytes
+ + LOGFS_MAX_OBJECTSIZE >= super->s_free_bytes)
+ logfs_write_anchor(super->s_master_inode);
+ __logfs_gc_pass(sb, logfs_super(sb)->s_total_levels);
+ logfs_wl_pass(sb);
+ logfs_journal_wl_pass(sb);
+}
+
+static int check_area(struct super_block *sb, int i)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_area *area = super->s_area[i];
+ struct logfs_object_header oh;
+ u32 segno = area->a_segno;
+ u32 ofs = area->a_used_bytes;
+ __be32 crc;
+ int err;
+
+ if (!area->a_is_open)
+ return 0;
+
+ for (ofs = area->a_used_bytes;
+ ofs <= super->s_segsize - sizeof(oh);
+ ofs += (u32)be16_to_cpu(oh.len) + sizeof(oh)) {
+ err = wbuf_read(sb, dev_ofs(sb, segno, ofs), sizeof(oh), &oh);
+ if (err)
+ return err;
+
+ if (!memchr_inv(&oh, 0xff, sizeof(oh)))
+ break;
+
+ crc = logfs_crc32(&oh, sizeof(oh) - 4, 4);
+ if (crc != oh.crc) {
+ printk(KERN_INFO "interrupted header at %llx\n",
+ dev_ofs(sb, segno, ofs));
+ return 0;
+ }
+ }
+ if (ofs != area->a_used_bytes) {
+ printk(KERN_INFO "%x bytes unaccounted data found at %llx\n",
+ ofs - area->a_used_bytes,
+ dev_ofs(sb, segno, area->a_used_bytes));
+ area->a_used_bytes = ofs;
+ }
+ return 0;
+}
+
+int logfs_check_areas(struct super_block *sb)
+{
+ int i, err;
+
+ for_each_area(i) {
+ err = check_area(sb, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static void logfs_init_candlist(struct candidate_list *list, int maxcount,
+ int sort_by_ec)
+{
+ list->count = 0;
+ list->maxcount = maxcount;
+ list->sort_by_ec = sort_by_ec;
+ list->rb_tree = RB_ROOT;
+}
+
+int logfs_init_gc(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int i;
+
+ btree_init_mempool32(&super->s_cand_tree, super->s_btree_pool);
+ logfs_init_candlist(&super->s_free_list, LIST_SIZE + SCAN_RATIO, 1);
+ logfs_init_candlist(&super->s_reserve_list,
+ super->s_bad_seg_reserve, 1);
+ for_each_area(i)
+ logfs_init_candlist(&super->s_low_list[i], LIST_SIZE, 0);
+ logfs_init_candlist(&super->s_ec_list, LIST_SIZE, 1);
+ return 0;
+}
+
+static void logfs_cleanup_list(struct super_block *sb,
+ struct candidate_list *list)
+{
+ struct gc_candidate *cand;
+
+ while (list->count) {
+ cand = rb_entry(list->rb_tree.rb_node, struct gc_candidate,
+ rb_node);
+ remove_from_list(cand);
+ free_candidate(sb, cand);
+ }
+ BUG_ON(list->rb_tree.rb_node);
+}
+
+void logfs_cleanup_gc(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int i;
+
+ if (!super->s_free_list.count)
+ return;
+
+ /*
+ * FIXME: The btree may still contain a single empty node. So we
+ * call the grim visitor to clean up that mess. Btree code should
+ * do it for us, really.
+ */
+ btree_grim_visitor32(&super->s_cand_tree, 0, NULL);
+ logfs_cleanup_list(sb, &super->s_free_list);
+ logfs_cleanup_list(sb, &super->s_reserve_list);
+ for_each_area(i)
+ logfs_cleanup_list(sb, &super->s_low_list[i]);
+ logfs_cleanup_list(sb, &super->s_ec_list);
+}
diff --git a/fs/logfs/inode.c b/fs/logfs/inode.c
new file mode 100644
index 000000000000..6d08b3762641
--- /dev/null
+++ b/fs/logfs/inode.c
@@ -0,0 +1,417 @@
+/*
+ * fs/logfs/inode.c - inode handling code
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ */
+#include "logfs.h"
+#include <linux/writeback.h>
+#include <linux/backing-dev.h>
+
+/*
+ * How soon to reuse old inode numbers? LogFS doesn't store deleted inodes
+ * on the medium. It therefore also lacks a method to store the previous
+ * generation number for deleted inodes. Instead a single generation number
+ * is stored which will be used for new inodes. Being just a 32bit counter,
+ * this can obvious wrap relatively quickly. So we only reuse inodes if we
+ * know that a fair number of inodes can be created before we have to increment
+ * the generation again - effectively adding some bits to the counter.
+ * But being too aggressive here means we keep a very large and very sparse
+ * inode file, wasting space on indirect blocks.
+ * So what is a good value? Beats me. 64k seems moderately bad on both
+ * fronts, so let's use that for now...
+ *
+ * NFS sucks, as everyone already knows.
+ */
+#define INOS_PER_WRAP (0x10000)
+
+/*
+ * Logfs' requirement to read inodes for garbage collection makes life a bit
+ * harder. GC may have to read inodes that are in I_FREEING state, when they
+ * are being written out - and waiting for GC to make progress, naturally.
+ *
+ * So we cannot just call iget() or some variant of it, but first have to check
+ * wether the inode in question might be in I_FREEING state. Therefore we
+ * maintain our own per-sb list of "almost deleted" inodes and check against
+ * that list first. Normally this should be at most 1-2 entries long.
+ *
+ * Also, inodes have logfs-specific reference counting on top of what the vfs
+ * does. When .destroy_inode is called, normally the reference count will drop
+ * to zero and the inode gets deleted. But if GC accessed the inode, its
+ * refcount will remain nonzero and final deletion will have to wait.
+ *
+ * As a result we have two sets of functions to get/put inodes:
+ * logfs_safe_iget/logfs_safe_iput - safe to call from GC context
+ * logfs_iget/iput - normal version
+ */
+static struct kmem_cache *logfs_inode_cache;
+
+static DEFINE_SPINLOCK(logfs_inode_lock);
+
+static void logfs_inode_setops(struct inode *inode)
+{
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFDIR:
+ inode->i_op = &logfs_dir_iops;
+ inode->i_fop = &logfs_dir_fops;
+ inode->i_mapping->a_ops = &logfs_reg_aops;
+ break;
+ case S_IFREG:
+ inode->i_op = &logfs_reg_iops;
+ inode->i_fop = &logfs_reg_fops;
+ inode->i_mapping->a_ops = &logfs_reg_aops;
+ break;
+ case S_IFLNK:
+ inode->i_op = &logfs_symlink_iops;
+ inode->i_mapping->a_ops = &logfs_reg_aops;
+ break;
+ case S_IFSOCK: /* fall through */
+ case S_IFBLK: /* fall through */
+ case S_IFCHR: /* fall through */
+ case S_IFIFO:
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static struct inode *__logfs_iget(struct super_block *sb, ino_t ino)
+{
+ struct inode *inode = iget_locked(sb, ino);
+ int err;
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ err = logfs_read_inode(inode);
+ if (err || inode->i_nlink == 0) {
+ /* inode->i_nlink == 0 can be true when called from
+ * block validator */
+ /* set i_nlink to 0 to prevent caching */
+ inode->i_nlink = 0;
+ logfs_inode(inode)->li_flags |= LOGFS_IF_ZOMBIE;
+ iget_failed(inode);
+ if (!err)
+ err = -ENOENT;
+ return ERR_PTR(err);
+ }
+
+ logfs_inode_setops(inode);
+ unlock_new_inode(inode);
+ return inode;
+}
+
+struct inode *logfs_iget(struct super_block *sb, ino_t ino)
+{
+ BUG_ON(ino == LOGFS_INO_MASTER);
+ BUG_ON(ino == LOGFS_INO_SEGFILE);
+ return __logfs_iget(sb, ino);
+}
+
+/*
+ * is_cached is set to 1 if we hand out a cached inode, 0 otherwise.
+ * this allows logfs_iput to do the right thing later
+ */
+struct inode *logfs_safe_iget(struct super_block *sb, ino_t ino, int *is_cached)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_inode *li;
+
+ if (ino == LOGFS_INO_MASTER)
+ return super->s_master_inode;
+ if (ino == LOGFS_INO_SEGFILE)
+ return super->s_segfile_inode;
+
+ spin_lock(&logfs_inode_lock);
+ list_for_each_entry(li, &super->s_freeing_list, li_freeing_list)
+ if (li->vfs_inode.i_ino == ino) {
+ li->li_refcount++;
+ spin_unlock(&logfs_inode_lock);
+ *is_cached = 1;
+ return &li->vfs_inode;
+ }
+ spin_unlock(&logfs_inode_lock);
+
+ *is_cached = 0;
+ return __logfs_iget(sb, ino);
+}
+
+static void __logfs_destroy_inode(struct inode *inode)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+
+ BUG_ON(li->li_block);
+ list_del(&li->li_freeing_list);
+ kmem_cache_free(logfs_inode_cache, li);
+}
+
+static void logfs_destroy_inode(struct inode *inode)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+
+ BUG_ON(list_empty(&li->li_freeing_list));
+ spin_lock(&logfs_inode_lock);
+ li->li_refcount--;
+ if (li->li_refcount == 0)
+ __logfs_destroy_inode(inode);
+ spin_unlock(&logfs_inode_lock);
+}
+
+void logfs_safe_iput(struct inode *inode, int is_cached)
+{
+ if (inode->i_ino == LOGFS_INO_MASTER)
+ return;
+ if (inode->i_ino == LOGFS_INO_SEGFILE)
+ return;
+
+ if (is_cached) {
+ logfs_destroy_inode(inode);
+ return;
+ }
+
+ iput(inode);
+}
+
+static void logfs_init_inode(struct super_block *sb, struct inode *inode)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ int i;
+
+ li->li_flags = 0;
+ li->li_height = 0;
+ li->li_used_bytes = 0;
+ li->li_block = NULL;
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ inode->i_size = 0;
+ inode->i_blocks = 0;
+ inode->i_ctime = CURRENT_TIME;
+ inode->i_mtime = CURRENT_TIME;
+ inode->i_nlink = 1;
+ INIT_LIST_HEAD(&li->li_freeing_list);
+
+ for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
+ li->li_data[i] = 0;
+
+ return;
+}
+
+static struct inode *logfs_alloc_inode(struct super_block *sb)
+{
+ struct logfs_inode *li;
+
+ li = kmem_cache_alloc(logfs_inode_cache, GFP_NOFS);
+ if (!li)
+ return NULL;
+ logfs_init_inode(sb, &li->vfs_inode);
+ return &li->vfs_inode;
+}
+
+/*
+ * In logfs inodes are written to an inode file. The inode file, like any
+ * other file, is managed with a inode. The inode file's inode, aka master
+ * inode, requires special handling in several respects. First, it cannot be
+ * written to the inode file, so it is stored in the journal instead.
+ *
+ * Secondly, this inode cannot be written back and destroyed before all other
+ * inodes have been written. The ordering is important. Linux' VFS is happily
+ * unaware of the ordering constraint and would ordinarily destroy the master
+ * inode at umount time while other inodes are still in use and dirty. Not
+ * good.
+ *
+ * So logfs makes sure the master inode is not written until all other inodes
+ * have been destroyed. Sadly, this method has another side-effect. The VFS
+ * will notice one remaining inode and print a frightening warning message.
+ * Worse, it is impossible to judge whether such a warning was caused by the
+ * master inode or any other inodes have leaked as well.
+ *
+ * Our attempt of solving this is with logfs_new_meta_inode() below. Its
+ * purpose is to create a new inode that will not trigger the warning if such
+ * an inode is still in use. An ugly hack, no doubt. Suggections for
+ * improvement are welcome.
+ */
+struct inode *logfs_new_meta_inode(struct super_block *sb, u64 ino)
+{
+ struct inode *inode;
+
+ inode = logfs_alloc_inode(sb);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ inode->i_mode = S_IFREG;
+ inode->i_ino = ino;
+ inode->i_sb = sb;
+
+ /* This is a blatant copy of alloc_inode code. We'd need alloc_inode
+ * to be nonstatic, alas. */
+ {
+ struct address_space * const mapping = &inode->i_data;
+
+ mapping->a_ops = &logfs_reg_aops;
+ mapping->host = inode;
+ mapping->flags = 0;
+ mapping_set_gfp_mask(mapping, GFP_NOFS);
+ mapping->assoc_mapping = NULL;
+ mapping->backing_dev_info = &default_backing_dev_info;
+ inode->i_mapping = mapping;
+ inode->i_nlink = 1;
+ }
+
+ return inode;
+}
+
+struct inode *logfs_read_meta_inode(struct super_block *sb, u64 ino)
+{
+ struct inode *inode;
+ int err;
+
+ inode = logfs_new_meta_inode(sb, ino);
+ if (IS_ERR(inode))
+ return inode;
+
+ err = logfs_read_inode(inode);
+ if (err) {
+ destroy_meta_inode(inode);
+ return ERR_PTR(err);
+ }
+ logfs_inode_setops(inode);
+ return inode;
+}
+
+static int logfs_write_inode(struct inode *inode, int do_sync)
+{
+ int ret;
+ long flags = WF_LOCK;
+
+ /* Can only happen if creat() failed. Safe to skip. */
+ if (logfs_inode(inode)->li_flags & LOGFS_IF_STILLBORN)
+ return 0;
+
+ ret = __logfs_write_inode(inode, flags);
+ LOGFS_BUG_ON(ret, inode->i_sb);
+ return ret;
+}
+
+void destroy_meta_inode(struct inode *inode)
+{
+ if (inode) {
+ if (inode->i_data.nrpages)
+ truncate_inode_pages(&inode->i_data, 0);
+ logfs_clear_inode(inode);
+ kmem_cache_free(logfs_inode_cache, logfs_inode(inode));
+ }
+}
+
+/* called with inode_lock held */
+static void logfs_drop_inode(struct inode *inode)
+{
+ struct logfs_super *super = logfs_super(inode->i_sb);
+ struct logfs_inode *li = logfs_inode(inode);
+
+ spin_lock(&logfs_inode_lock);
+ list_move(&li->li_freeing_list, &super->s_freeing_list);
+ spin_unlock(&logfs_inode_lock);
+ generic_drop_inode(inode);
+}
+
+static void logfs_set_ino_generation(struct super_block *sb,
+ struct inode *inode)
+{
+ struct logfs_super *super = logfs_super(sb);
+ u64 ino;
+
+ mutex_lock(&super->s_journal_mutex);
+ ino = logfs_seek_hole(super->s_master_inode, super->s_last_ino);
+ super->s_last_ino = ino;
+ super->s_inos_till_wrap--;
+ if (super->s_inos_till_wrap < 0) {
+ super->s_last_ino = LOGFS_RESERVED_INOS;
+ super->s_generation++;
+ super->s_inos_till_wrap = INOS_PER_WRAP;
+ }
+ inode->i_ino = ino;
+ inode->i_generation = super->s_generation;
+ mutex_unlock(&super->s_journal_mutex);
+}
+
+struct inode *logfs_new_inode(struct inode *dir, int mode)
+{
+ struct super_block *sb = dir->i_sb;
+ struct inode *inode;
+
+ inode = new_inode(sb);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ logfs_init_inode(sb, inode);
+
+ /* inherit parent flags */
+ logfs_inode(inode)->li_flags |=
+ logfs_inode(dir)->li_flags & LOGFS_FL_INHERITED;
+
+ inode->i_mode = mode;
+ logfs_set_ino_generation(sb, inode);
+
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
+ if (dir->i_mode & S_ISGID) {
+ inode->i_gid = dir->i_gid;
+ if (S_ISDIR(mode))
+ inode->i_mode |= S_ISGID;
+ }
+
+ logfs_inode_setops(inode);
+ insert_inode_hash(inode);
+
+ return inode;
+}
+
+static void logfs_init_once(void *_li)
+{
+ struct logfs_inode *li = _li;
+ int i;
+
+ li->li_flags = 0;
+ li->li_used_bytes = 0;
+ li->li_refcount = 1;
+ for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
+ li->li_data[i] = 0;
+ inode_init_once(&li->vfs_inode);
+}
+
+static int logfs_sync_fs(struct super_block *sb, int wait)
+{
+ /* FIXME: write anchor */
+ logfs_super(sb)->s_devops->sync(sb);
+ return 0;
+}
+
+const struct super_operations logfs_super_operations = {
+ .alloc_inode = logfs_alloc_inode,
+ .clear_inode = logfs_clear_inode,
+ .delete_inode = logfs_delete_inode,
+ .destroy_inode = logfs_destroy_inode,
+ .drop_inode = logfs_drop_inode,
+ .write_inode = logfs_write_inode,
+ .statfs = logfs_statfs,
+ .sync_fs = logfs_sync_fs,
+};
+
+int logfs_init_inode_cache(void)
+{
+ logfs_inode_cache = kmem_cache_create("logfs_inode_cache",
+ sizeof(struct logfs_inode), 0, SLAB_RECLAIM_ACCOUNT,
+ logfs_init_once);
+ if (!logfs_inode_cache)
+ return -ENOMEM;
+ return 0;
+}
+
+void logfs_destroy_inode_cache(void)
+{
+ kmem_cache_destroy(logfs_inode_cache);
+}
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
new file mode 100644
index 000000000000..2f2e8e4fd02d
--- /dev/null
+++ b/fs/logfs/journal.c
@@ -0,0 +1,879 @@
+/*
+ * fs/logfs/journal.c - journal handling code
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ */
+#include "logfs.h"
+
+static void logfs_calc_free(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ u64 reserve, no_segs = super->s_no_segs;
+ s64 free;
+ int i;
+
+ /* superblock segments */
+ no_segs -= 2;
+ super->s_no_journal_segs = 0;
+ /* journal */
+ journal_for_each(i)
+ if (super->s_journal_seg[i]) {
+ no_segs--;
+ super->s_no_journal_segs++;
+ }
+
+ /* open segments plus one extra per level for GC */
+ no_segs -= 2 * super->s_total_levels;
+
+ free = no_segs * (super->s_segsize - LOGFS_SEGMENT_RESERVE);
+ free -= super->s_used_bytes;
+ /* just a bit extra */
+ free -= super->s_total_levels * 4096;
+
+ /* Bad blocks are 'paid' for with speed reserve - the filesystem
+ * simply gets slower as bad blocks accumulate. Until the bad blocks
+ * exceed the speed reserve - then the filesystem gets smaller.
+ */
+ reserve = super->s_bad_segments + super->s_bad_seg_reserve;
+ reserve *= super->s_segsize - LOGFS_SEGMENT_RESERVE;
+ reserve = max(reserve, super->s_speed_reserve);
+ free -= reserve;
+ if (free < 0)
+ free = 0;
+
+ super->s_free_bytes = free;
+}
+
+static void reserve_sb_and_journal(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct btree_head32 *head = &super->s_reserved_segments;
+ int i, err;
+
+ err = btree_insert32(head, seg_no(sb, super->s_sb_ofs[0]), (void *)1,
+ GFP_KERNEL);
+ BUG_ON(err);
+
+ err = btree_insert32(head, seg_no(sb, super->s_sb_ofs[1]), (void *)1,
+ GFP_KERNEL);
+ BUG_ON(err);
+
+ journal_for_each(i) {
+ if (!super->s_journal_seg[i])
+ continue;
+ err = btree_insert32(head, super->s_journal_seg[i], (void *)1,
+ GFP_KERNEL);
+ BUG_ON(err);
+ }
+}
+
+static void read_dynsb(struct super_block *sb,
+ struct logfs_je_dynsb *dynsb)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ super->s_gec = be64_to_cpu(dynsb->ds_gec);
+ super->s_sweeper = be64_to_cpu(dynsb->ds_sweeper);
+ super->s_victim_ino = be64_to_cpu(dynsb->ds_victim_ino);
+ super->s_rename_dir = be64_to_cpu(dynsb->ds_rename_dir);
+ super->s_rename_pos = be64_to_cpu(dynsb->ds_rename_pos);
+ super->s_used_bytes = be64_to_cpu(dynsb->ds_used_bytes);
+ super->s_generation = be32_to_cpu(dynsb->ds_generation);
+}
+
+static void read_anchor(struct super_block *sb,
+ struct logfs_je_anchor *da)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct inode *inode = super->s_master_inode;
+ struct logfs_inode *li = logfs_inode(inode);
+ int i;
+
+ super->s_last_ino = be64_to_cpu(da->da_last_ino);
+ li->li_flags = 0;
+ li->li_height = da->da_height;
+ i_size_write(inode, be64_to_cpu(da->da_size));
+ li->li_used_bytes = be64_to_cpu(da->da_used_bytes);
+
+ for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
+ li->li_data[i] = be64_to_cpu(da->da_data[i]);
+}
+
+static void read_erasecount(struct super_block *sb,
+ struct logfs_je_journal_ec *ec)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int i;
+
+ journal_for_each(i)
+ super->s_journal_ec[i] = be32_to_cpu(ec->ec[i]);
+}
+
+static int read_area(struct super_block *sb, struct logfs_je_area *a)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_area *area = super->s_area[a->gc_level];
+ u64 ofs;
+ u32 writemask = ~(super->s_writesize - 1);
+
+ if (a->gc_level >= LOGFS_NO_AREAS)
+ return -EIO;
+ if (a->vim != VIM_DEFAULT)
+ return -EIO; /* TODO: close area and continue */
+
+ area->a_used_bytes = be32_to_cpu(a->used_bytes);
+ area->a_written_bytes = area->a_used_bytes & writemask;
+ area->a_segno = be32_to_cpu(a->segno);
+ if (area->a_segno)
+ area->a_is_open = 1;
+
+ ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
+ if (super->s_writesize > 1)
+ logfs_buf_recover(area, ofs, a + 1, super->s_writesize);
+ else
+ logfs_buf_recover(area, ofs, NULL, 0);
+ return 0;
+}
+
+static void *unpack(void *from, void *to)
+{
+ struct logfs_journal_header *jh = from;
+ void *data = from + sizeof(struct logfs_journal_header);
+ int err;
+ size_t inlen, outlen;
+
+ inlen = be16_to_cpu(jh->h_len);
+ outlen = be16_to_cpu(jh->h_datalen);
+
+ if (jh->h_compr == COMPR_NONE)
+ memcpy(to, data, inlen);
+ else {
+ err = logfs_uncompress(data, to, inlen, outlen);
+ BUG_ON(err);
+ }
+ return to;
+}
+
+static int __read_je_header(struct super_block *sb, u64 ofs,
+ struct logfs_journal_header *jh)
+{
+ struct logfs_super *super = logfs_super(sb);
+ size_t bufsize = max_t(size_t, sb->s_blocksize, super->s_writesize)
+ + MAX_JOURNAL_HEADER;
+ u16 type, len, datalen;
+ int err;
+
+ /* read header only */
+ err = wbuf_read(sb, ofs, sizeof(*jh), jh);
+ if (err)
+ return err;
+ type = be16_to_cpu(jh->h_type);
+ len = be16_to_cpu(jh->h_len);
+ datalen = be16_to_cpu(jh->h_datalen);
+ if (len > sb->s_blocksize)
+ return -EIO;
+ if ((type < JE_FIRST) || (type > JE_LAST))
+ return -EIO;
+ if (datalen > bufsize)
+ return -EIO;
+ return 0;
+}
+
+static int __read_je_payload(struct super_block *sb, u64 ofs,
+ struct logfs_journal_header *jh)
+{
+ u16 len;
+ int err;
+
+ len = be16_to_cpu(jh->h_len);
+ err = wbuf_read(sb, ofs + sizeof(*jh), len, jh + 1);
+ if (err)
+ return err;
+ if (jh->h_crc != logfs_crc32(jh, len + sizeof(*jh), 4)) {
+ /* Old code was confused. It forgot about the header length
+ * and stopped calculating the crc 16 bytes before the end
+ * of data - ick!
+ * FIXME: Remove this hack once the old code is fixed.
+ */
+ if (jh->h_crc == logfs_crc32(jh, len, 4))
+ WARN_ON_ONCE(1);
+ else
+ return -EIO;
+ }
+ return 0;
+}
+
+/*
+ * jh needs to be large enough to hold the complete entry, not just the header
+ */
+static int __read_je(struct super_block *sb, u64 ofs,
+ struct logfs_journal_header *jh)
+{
+ int err;
+
+ err = __read_je_header(sb, ofs, jh);
+ if (err)
+ return err;
+ return __read_je_payload(sb, ofs, jh);
+}
+
+static int read_je(struct super_block *sb, u64 ofs)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_journal_header *jh = super->s_compressed_je;
+ void *scratch = super->s_je;
+ u16 type, datalen;
+ int err;
+
+ err = __read_je(sb, ofs, jh);
+ if (err)
+ return err;
+ type = be16_to_cpu(jh->h_type);
+ datalen = be16_to_cpu(jh->h_datalen);
+
+ switch (type) {
+ case JE_DYNSB:
+ read_dynsb(sb, unpack(jh, scratch));
+ break;
+ case JE_ANCHOR:
+ read_anchor(sb, unpack(jh, scratch));
+ break;
+ case JE_ERASECOUNT:
+ read_erasecount(sb, unpack(jh, scratch));
+ break;
+ case JE_AREA:
+ read_area(sb, unpack(jh, scratch));
+ break;
+ case JE_OBJ_ALIAS:
+ err = logfs_load_object_aliases(sb, unpack(jh, scratch),
+ datalen);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EIO;
+ }
+ return err;
+}
+
+static int logfs_read_segment(struct super_block *sb, u32 segno)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_journal_header *jh = super->s_compressed_je;
+ u64 ofs, seg_ofs = dev_ofs(sb, segno, 0);
+ u32 h_ofs, last_ofs = 0;
+ u16 len, datalen, last_len = 0;
+ int i, err;
+
+ /* search for most recent commit */
+ for (h_ofs = 0; h_ofs < super->s_segsize; h_ofs += sizeof(*jh)) {
+ ofs = seg_ofs + h_ofs;
+ err = __read_je_header(sb, ofs, jh);
+ if (err)
+ continue;
+ if (jh->h_type != cpu_to_be16(JE_COMMIT))
+ continue;
+ err = __read_je_payload(sb, ofs, jh);
+ if (err)
+ continue;
+ len = be16_to_cpu(jh->h_len);
+ datalen = be16_to_cpu(jh->h_datalen);
+ if ((datalen > sizeof(super->s_je_array)) ||
+ (datalen % sizeof(__be64)))
+ continue;
+ last_ofs = h_ofs;
+ last_len = datalen;
+ h_ofs += ALIGN(len, sizeof(*jh)) - sizeof(*jh);
+ }
+ /* read commit */
+ if (last_ofs == 0)
+ return -ENOENT;
+ ofs = seg_ofs + last_ofs;
+ log_journal("Read commit from %llx\n", ofs);
+ err = __read_je(sb, ofs, jh);
+ BUG_ON(err); /* We should have caught it in the scan loop already */
+ if (err)
+ return err;
+ /* uncompress */
+ unpack(jh, super->s_je_array);
+ super->s_no_je = last_len / sizeof(__be64);
+ /* iterate over array */
+ for (i = 0; i < super->s_no_je; i++) {
+ err = read_je(sb, be64_to_cpu(super->s_je_array[i]));
+ if (err)
+ return err;
+ }
+ super->s_journal_area->a_segno = segno;
+ return 0;
+}
+
+static u64 read_gec(struct super_block *sb, u32 segno)
+{
+ struct logfs_segment_header sh;
+ __be32 crc;
+ int err;
+
+ if (!segno)
+ return 0;
+ err = wbuf_read(sb, dev_ofs(sb, segno, 0), sizeof(sh), &sh);
+ if (err)
+ return 0;
+ crc = logfs_crc32(&sh, sizeof(sh), 4);
+ if (crc != sh.crc) {
+ WARN_ON(sh.gec != cpu_to_be64(0xffffffffffffffffull));
+ /* Most likely it was just erased */
+ return 0;
+ }
+ return be64_to_cpu(sh.gec);
+}
+
+static int logfs_read_journal(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ u64 gec[LOGFS_JOURNAL_SEGS], max;
+ u32 segno;
+ int i, max_i;
+
+ max = 0;
+ max_i = -1;
+ journal_for_each(i) {
+ segno = super->s_journal_seg[i];
+ gec[i] = read_gec(sb, super->s_journal_seg[i]);
+ if (gec[i] > max) {
+ max = gec[i];
+ max_i = i;
+ }
+ }
+ if (max_i == -1)
+ return -EIO;
+ /* FIXME: Try older segments in case of error */
+ return logfs_read_segment(sb, super->s_journal_seg[max_i]);
+}
+
+/*
+ * First search the current segment (outer loop), then pick the next segment
+ * in the array, skipping any zero entries (inner loop).
+ */
+static void journal_get_free_segment(struct logfs_area *area)
+{
+ struct logfs_super *super = logfs_super(area->a_sb);
+ int i;
+
+ journal_for_each(i) {
+ if (area->a_segno != super->s_journal_seg[i])
+ continue;
+
+ do {
+ i++;
+ if (i == LOGFS_JOURNAL_SEGS)
+ i = 0;
+ } while (!super->s_journal_seg[i]);
+
+ area->a_segno = super->s_journal_seg[i];
+ area->a_erase_count = ++(super->s_journal_ec[i]);
+ log_journal("Journal now at %x (ec %x)\n", area->a_segno,
+ area->a_erase_count);
+ return;
+ }
+ BUG();
+}
+
+static void journal_get_erase_count(struct logfs_area *area)
+{
+ /* erase count is stored globally and incremented in
+ * journal_get_free_segment() - nothing to do here */
+}
+
+static int journal_erase_segment(struct logfs_area *area)
+{
+ struct super_block *sb = area->a_sb;
+ struct logfs_segment_header sh;
+ u64 ofs;
+ int err;
+
+ err = logfs_erase_segment(sb, area->a_segno);
+ if (err)
+ return err;
+
+ sh.pad = 0;
+ sh.type = SEG_JOURNAL;
+ sh.level = 0;
+ sh.segno = cpu_to_be32(area->a_segno);
+ sh.ec = cpu_to_be32(area->a_erase_count);
+ sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
+ sh.crc = logfs_crc32(&sh, sizeof(sh), 4);
+
+ /* This causes a bug in segment.c. Not yet. */
+ //logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count, 0);
+
+ ofs = dev_ofs(sb, area->a_segno, 0);
+ area->a_used_bytes = ALIGN(sizeof(sh), 16);
+ logfs_buf_write(area, ofs, &sh, sizeof(sh));
+ return 0;
+}
+
+static size_t __logfs_write_header(struct logfs_super *super,
+ struct logfs_journal_header *jh, size_t len, size_t datalen,
+ u16 type, u8 compr)
+{
+ jh->h_len = cpu_to_be16(len);
+ jh->h_type = cpu_to_be16(type);
+ jh->h_version = cpu_to_be16(++super->s_last_version);
+ jh->h_datalen = cpu_to_be16(datalen);
+ jh->h_compr = compr;
+ jh->h_pad[0] = 'H';
+ jh->h_pad[1] = 'A';
+ jh->h_pad[2] = 'T';
+ jh->h_crc = logfs_crc32(jh, len + sizeof(*jh), 4);
+ return ALIGN(len, 16) + sizeof(*jh);
+}
+
+static size_t logfs_write_header(struct logfs_super *super,
+ struct logfs_journal_header *jh, size_t datalen, u16 type)
+{
+ size_t len = datalen;
+
+ return __logfs_write_header(super, jh, len, datalen, type, COMPR_NONE);
+}
+
+static inline size_t logfs_journal_erasecount_size(struct logfs_super *super)
+{
+ return LOGFS_JOURNAL_SEGS * sizeof(__be32);
+}
+
+static void *logfs_write_erasecount(struct super_block *sb, void *_ec,
+ u16 *type, size_t *len)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_je_journal_ec *ec = _ec;
+ int i;
+
+ journal_for_each(i)
+ ec->ec[i] = cpu_to_be32(super->s_journal_ec[i]);
+ *type = JE_ERASECOUNT;
+ *len = logfs_journal_erasecount_size(super);
+ return ec;
+}
+
+static void account_shadow(void *_shadow, unsigned long _sb, u64 ignore,
+ size_t ignore2)
+{
+ struct logfs_shadow *shadow = _shadow;
+ struct super_block *sb = (void *)_sb;
+ struct logfs_super *super = logfs_super(sb);
+
+ /* consume new space */
+ super->s_free_bytes -= shadow->new_len;
+ super->s_used_bytes += shadow->new_len;
+ super->s_dirty_used_bytes -= shadow->new_len;
+
+ /* free up old space */
+ super->s_free_bytes += shadow->old_len;
+ super->s_used_bytes -= shadow->old_len;
+ super->s_dirty_free_bytes -= shadow->old_len;
+
+ logfs_set_segment_used(sb, shadow->old_ofs, -shadow->old_len);
+ logfs_set_segment_used(sb, shadow->new_ofs, shadow->new_len);
+
+ log_journal("account_shadow(%llx, %llx, %x) %llx->%llx %x->%x\n",
+ shadow->ino, shadow->bix, shadow->gc_level,
+ shadow->old_ofs, shadow->new_ofs,
+ shadow->old_len, shadow->new_len);
+ mempool_free(shadow, super->s_shadow_pool);
+}
+
+static void account_shadows(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct inode *inode = super->s_master_inode;
+ struct logfs_inode *li = logfs_inode(inode);
+ struct shadow_tree *tree = &super->s_shadow_tree;
+
+ btree_grim_visitor64(&tree->new, (unsigned long)sb, account_shadow);
+ btree_grim_visitor64(&tree->old, (unsigned long)sb, account_shadow);
+
+ if (li->li_block) {
+ /*
+ * We never actually use the structure, when attached to the
+ * master inode. But it is easier to always free it here than
+ * to have checks in several places elsewhere when allocating
+ * it.
+ */
+ li->li_block->ops->free_block(sb, li->li_block);
+ }
+ BUG_ON((s64)li->li_used_bytes < 0);
+}
+
+static void *__logfs_write_anchor(struct super_block *sb, void *_da,
+ u16 *type, size_t *len)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_je_anchor *da = _da;
+ struct inode *inode = super->s_master_inode;
+ struct logfs_inode *li = logfs_inode(inode);
+ int i;
+
+ da->da_height = li->li_height;
+ da->da_last_ino = cpu_to_be64(super->s_last_ino);
+ da->da_size = cpu_to_be64(i_size_read(inode));
+ da->da_used_bytes = cpu_to_be64(li->li_used_bytes);
+ for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
+ da->da_data[i] = cpu_to_be64(li->li_data[i]);
+ *type = JE_ANCHOR;
+ *len = sizeof(*da);
+ return da;
+}
+
+static void *logfs_write_dynsb(struct super_block *sb, void *_dynsb,
+ u16 *type, size_t *len)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_je_dynsb *dynsb = _dynsb;
+
+ dynsb->ds_gec = cpu_to_be64(super->s_gec);
+ dynsb->ds_sweeper = cpu_to_be64(super->s_sweeper);
+ dynsb->ds_victim_ino = cpu_to_be64(super->s_victim_ino);
+ dynsb->ds_rename_dir = cpu_to_be64(super->s_rename_dir);
+ dynsb->ds_rename_pos = cpu_to_be64(super->s_rename_pos);
+ dynsb->ds_used_bytes = cpu_to_be64(super->s_used_bytes);
+ dynsb->ds_generation = cpu_to_be32(super->s_generation);
+ *type = JE_DYNSB;
+ *len = sizeof(*dynsb);
+ return dynsb;
+}
+
+static void write_wbuf(struct super_block *sb, struct logfs_area *area,
+ void *wbuf)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping = super->s_mapping_inode->i_mapping;
+ u64 ofs;
+ pgoff_t index;
+ int page_ofs;
+ struct page *page;
+
+ ofs = dev_ofs(sb, area->a_segno,
+ area->a_used_bytes & ~(super->s_writesize - 1));
+ index = ofs >> PAGE_SHIFT;
+ page_ofs = ofs & (PAGE_SIZE - 1);
+
+ page = find_lock_page(mapping, index);
+ BUG_ON(!page);
+ memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize);
+ unlock_page(page);
+}
+
+static void *logfs_write_area(struct super_block *sb, void *_a,
+ u16 *type, size_t *len)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_area *area = super->s_area[super->s_sum_index];
+ struct logfs_je_area *a = _a;
+
+ a->vim = VIM_DEFAULT;
+ a->gc_level = super->s_sum_index;
+ a->used_bytes = cpu_to_be32(area->a_used_bytes);
+ a->segno = cpu_to_be32(area->a_segno);
+ if (super->s_writesize > 1)
+ write_wbuf(sb, area, a + 1);
+
+ *type = JE_AREA;
+ *len = sizeof(*a) + super->s_writesize;
+ return a;
+}
+
+static void *logfs_write_commit(struct super_block *sb, void *h,
+ u16 *type, size_t *len)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ *type = JE_COMMIT;
+ *len = super->s_no_je * sizeof(__be64);
+ return super->s_je_array;
+}
+
+static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type,
+ size_t len)
+{
+ struct logfs_super *super = logfs_super(sb);
+ void *header = super->s_compressed_je;
+ void *data = header + sizeof(struct logfs_journal_header);
+ ssize_t compr_len, pad_len;
+ u8 compr = COMPR_ZLIB;
+
+ if (len == 0)
+ return logfs_write_header(super, header, 0, type);
+
+ compr_len = logfs_compress(buf, data, len, sb->s_blocksize);
+ if (compr_len < 0 || type == JE_ANCHOR) {
+ BUG_ON(len > sb->s_blocksize);
+ memcpy(data, buf, len);
+ compr_len = len;
+ compr = COMPR_NONE;
+ }
+
+ pad_len = ALIGN(compr_len, 16);
+ memset(data + compr_len, 0, pad_len - compr_len);
+
+ return __logfs_write_header(super, header, compr_len, len, type, compr);
+}
+
+static s64 logfs_get_free_bytes(struct logfs_area *area, size_t *bytes,
+ int must_pad)
+{
+ u32 writesize = logfs_super(area->a_sb)->s_writesize;
+ s32 ofs;
+ int ret;
+
+ ret = logfs_open_area(area, *bytes);
+ if (ret)
+ return -EAGAIN;
+
+ ofs = area->a_used_bytes;
+ area->a_used_bytes += *bytes;
+
+ if (must_pad) {
+ area->a_used_bytes = ALIGN(area->a_used_bytes, writesize);
+ *bytes = area->a_used_bytes - ofs;
+ }
+
+ return dev_ofs(area->a_sb, area->a_segno, ofs);
+}
+
+static int logfs_write_je_buf(struct super_block *sb, void *buf, u16 type,
+ size_t buf_len)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_area *area = super->s_journal_area;
+ struct logfs_journal_header *jh = super->s_compressed_je;
+ size_t len;
+ int must_pad = 0;
+ s64 ofs;
+
+ len = __logfs_write_je(sb, buf, type, buf_len);
+ if (jh->h_type == cpu_to_be16(JE_COMMIT))
+ must_pad = 1;
+
+ ofs = logfs_get_free_bytes(area, &len, must_pad);
+ if (ofs < 0)
+ return ofs;
+ logfs_buf_write(area, ofs, super->s_compressed_je, len);
+ super->s_je_array[super->s_no_je++] = cpu_to_be64(ofs);
+ return 0;
+}
+
+static int logfs_write_je(struct super_block *sb,
+ void* (*write)(struct super_block *sb, void *scratch,
+ u16 *type, size_t *len))
+{
+ void *buf;
+ size_t len;
+ u16 type;
+
+ buf = write(sb, logfs_super(sb)->s_je, &type, &len);
+ return logfs_write_je_buf(sb, buf, type, len);
+}
+
+int write_alias_journal(struct super_block *sb, u64 ino, u64 bix,
+ level_t level, int child_no, __be64 val)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_obj_alias *oa = super->s_je;
+ int err = 0, fill = super->s_je_fill;
+
+ log_aliases("logfs_write_obj_aliases #%x(%llx, %llx, %x, %x) %llx\n",
+ fill, ino, bix, level, child_no, be64_to_cpu(val));
+ oa[fill].ino = cpu_to_be64(ino);
+ oa[fill].bix = cpu_to_be64(bix);
+ oa[fill].val = val;
+ oa[fill].level = (__force u8)level;
+ oa[fill].child_no = cpu_to_be16(child_no);
+ fill++;
+ if (fill >= sb->s_blocksize / sizeof(*oa)) {
+ err = logfs_write_je_buf(sb, oa, JE_OBJ_ALIAS, sb->s_blocksize);
+ fill = 0;
+ }
+
+ super->s_je_fill = fill;
+ return err;
+}
+
+static int logfs_write_obj_aliases(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int err;
+
+ log_journal("logfs_write_obj_aliases: %d aliases to write\n",
+ super->s_no_object_aliases);
+ super->s_je_fill = 0;
+ err = logfs_write_obj_aliases_pagecache(sb);
+ if (err)
+ return err;
+
+ if (super->s_je_fill)
+ err = logfs_write_je_buf(sb, super->s_je, JE_OBJ_ALIAS,
+ super->s_je_fill
+ * sizeof(struct logfs_obj_alias));
+ return err;
+}
+
+/*
+ * Write all journal entries. The goto logic ensures that all journal entries
+ * are written whenever a new segment is used. It is ugly and potentially a
+ * bit wasteful, but robustness is more important. With this we can *always*
+ * erase all journal segments except the one containing the most recent commit.
+ */
+void logfs_write_anchor(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_area *area = super->s_journal_area;
+ int i, err;
+
+ BUG_ON(logfs_super(sb)->s_flags & LOGFS_SB_FLAG_SHUTDOWN);
+ mutex_lock(&super->s_journal_mutex);
+
+ /* Do this first or suffer corruption */
+ logfs_sync_segments(sb);
+ account_shadows(sb);
+
+again:
+ super->s_no_je = 0;
+ for_each_area(i) {
+ if (!super->s_area[i]->a_is_open)
+ continue;
+ super->s_sum_index = i;
+ err = logfs_write_je(sb, logfs_write_area);
+ if (err)
+ goto again;
+ }
+ err = logfs_write_obj_aliases(sb);
+ if (err)
+ goto again;
+ err = logfs_write_je(sb, logfs_write_erasecount);
+ if (err)
+ goto again;
+ err = logfs_write_je(sb, __logfs_write_anchor);
+ if (err)
+ goto again;
+ err = logfs_write_je(sb, logfs_write_dynsb);
+ if (err)
+ goto again;
+ /*
+ * Order is imperative. First we sync all writes, including the
+ * non-committed journal writes. Then we write the final commit and
+ * sync the current journal segment.
+ * There is a theoretical bug here. Syncing the journal segment will
+ * write a number of journal entries and the final commit. All these
+ * are written in a single operation. If the device layer writes the
+ * data back-to-front, the commit will precede the other journal
+ * entries, leaving a race window.
+ * Two fixes are possible. Preferred is to fix the device layer to
+ * ensure writes happen front-to-back. Alternatively we can insert
+ * another logfs_sync_area() super->s_devops->sync() combo before
+ * writing the commit.
+ */
+ /*
+ * On another subject, super->s_devops->sync is usually not necessary.
+ * Unless called from sys_sync or friends, a barrier would suffice.
+ */
+ super->s_devops->sync(sb);
+ err = logfs_write_je(sb, logfs_write_commit);
+ if (err)
+ goto again;
+ log_journal("Write commit to %llx\n",
+ be64_to_cpu(super->s_je_array[super->s_no_je - 1]));
+ logfs_sync_area(area);
+ BUG_ON(area->a_used_bytes != area->a_written_bytes);
+ super->s_devops->sync(sb);
+
+ mutex_unlock(&super->s_journal_mutex);
+ return;
+}
+
+void do_logfs_journal_wl_pass(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_area *area = super->s_journal_area;
+ u32 segno, ec;
+ int i, err;
+
+ log_journal("Journal requires wear-leveling.\n");
+ /* Drop old segments */
+ journal_for_each(i)
+ if (super->s_journal_seg[i]) {
+ logfs_set_segment_unreserved(sb,
+ super->s_journal_seg[i],
+ super->s_journal_ec[i]);
+ super->s_journal_seg[i] = 0;
+ super->s_journal_ec[i] = 0;
+ }
+ /* Get new segments */
+ for (i = 0; i < super->s_no_journal_segs; i++) {
+ segno = get_best_cand(sb, &super->s_reserve_list, &ec);
+ super->s_journal_seg[i] = segno;
+ super->s_journal_ec[i] = ec;
+ logfs_set_segment_reserved(sb, segno);
+ }
+ /* Manually move journal_area */
+ area->a_segno = super->s_journal_seg[0];
+ area->a_is_open = 0;
+ area->a_used_bytes = 0;
+ /* Write journal */
+ logfs_write_anchor(super->s_master_inode);
+ /* Write superblocks */
+ err = logfs_write_sb(sb);
+ BUG_ON(err);
+}
+
+static const struct logfs_area_ops journal_area_ops = {
+ .get_free_segment = journal_get_free_segment,
+ .get_erase_count = journal_get_erase_count,
+ .erase_segment = journal_erase_segment,
+};
+
+int logfs_init_journal(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ size_t bufsize = max_t(size_t, sb->s_blocksize, super->s_writesize)
+ + MAX_JOURNAL_HEADER;
+ int ret = -ENOMEM;
+
+ mutex_init(&super->s_journal_mutex);
+ btree_init_mempool32(&super->s_reserved_segments, super->s_btree_pool);
+
+ super->s_je = kzalloc(bufsize, GFP_KERNEL);
+ if (!super->s_je)
+ return ret;
+
+ super->s_compressed_je = kzalloc(bufsize, GFP_KERNEL);
+ if (!super->s_compressed_je)
+ return ret;
+
+ super->s_master_inode = logfs_new_meta_inode(sb, LOGFS_INO_MASTER);
+ if (IS_ERR(super->s_master_inode))
+ return PTR_ERR(super->s_master_inode);
+
+ ret = logfs_read_journal(sb);
+ if (ret)
+ return -EIO;
+
+ reserve_sb_and_journal(sb);
+ logfs_calc_free(sb);
+
+ super->s_journal_area->a_ops = &journal_area_ops;
+ return 0;
+}
+
+void logfs_cleanup_journal(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ btree_grim_visitor32(&super->s_reserved_segments, 0, NULL);
+ destroy_meta_inode(super->s_master_inode);
+ super->s_master_inode = NULL;
+
+ kfree(super->s_compressed_je);
+ kfree(super->s_je);
+}
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
new file mode 100644
index 000000000000..e3082abe9e3b
--- /dev/null
+++ b/fs/logfs/logfs.h
@@ -0,0 +1,722 @@
+/*
+ * fs/logfs/logfs.h
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ *
+ * Private header for logfs.
+ */
+#ifndef FS_LOGFS_LOGFS_H
+#define FS_LOGFS_LOGFS_H
+
+#undef __CHECK_ENDIAN__
+#define __CHECK_ENDIAN__
+
+#include <linux/btree.h>
+#include <linux/crc32.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mempool.h>
+#include <linux/pagemap.h>
+#include <linux/mtd/mtd.h>
+#include "logfs_abi.h"
+
+#define LOGFS_DEBUG_SUPER (0x0001)
+#define LOGFS_DEBUG_SEGMENT (0x0002)
+#define LOGFS_DEBUG_JOURNAL (0x0004)
+#define LOGFS_DEBUG_DIR (0x0008)
+#define LOGFS_DEBUG_FILE (0x0010)
+#define LOGFS_DEBUG_INODE (0x0020)
+#define LOGFS_DEBUG_READWRITE (0x0040)
+#define LOGFS_DEBUG_GC (0x0080)
+#define LOGFS_DEBUG_GC_NOISY (0x0100)
+#define LOGFS_DEBUG_ALIASES (0x0200)
+#define LOGFS_DEBUG_BLOCKMOVE (0x0400)
+#define LOGFS_DEBUG_ALL (0xffffffff)
+
+#define LOGFS_DEBUG (0x01)
+/*
+ * To enable specific log messages, simply define LOGFS_DEBUG to match any
+ * or all of the above.
+ */
+#ifndef LOGFS_DEBUG
+#define LOGFS_DEBUG (0)
+#endif
+
+#define log_cond(cond, fmt, arg...) do { \
+ if (cond) \
+ printk(KERN_DEBUG fmt, ##arg); \
+} while (0)
+
+#define log_super(fmt, arg...) \
+ log_cond(LOGFS_DEBUG & LOGFS_DEBUG_SUPER, fmt, ##arg)
+#define log_segment(fmt, arg...) \
+ log_cond(LOGFS_DEBUG & LOGFS_DEBUG_SEGMENT, fmt, ##arg)
+#define log_journal(fmt, arg...) \
+ log_cond(LOGFS_DEBUG & LOGFS_DEBUG_JOURNAL, fmt, ##arg)
+#define log_dir(fmt, arg...) \
+ log_cond(LOGFS_DEBUG & LOGFS_DEBUG_DIR, fmt, ##arg)
+#define log_file(fmt, arg...) \
+ log_cond(LOGFS_DEBUG & LOGFS_DEBUG_FILE, fmt, ##arg)
+#define log_inode(fmt, arg...) \
+ log_cond(LOGFS_DEBUG & LOGFS_DEBUG_INODE, fmt, ##arg)
+#define log_readwrite(fmt, arg...) \
+ log_cond(LOGFS_DEBUG & LOGFS_DEBUG_READWRITE, fmt, ##arg)
+#define log_gc(fmt, arg...) \
+ log_cond(LOGFS_DEBUG & LOGFS_DEBUG_GC, fmt, ##arg)
+#define log_gc_noisy(fmt, arg...) \
+ log_cond(LOGFS_DEBUG & LOGFS_DEBUG_GC_NOISY, fmt, ##arg)
+#define log_aliases(fmt, arg...) \
+ log_cond(LOGFS_DEBUG & LOGFS_DEBUG_ALIASES, fmt, ##arg)
+#define log_blockmove(fmt, arg...) \
+ log_cond(LOGFS_DEBUG & LOGFS_DEBUG_BLOCKMOVE, fmt, ##arg)
+
+#define PG_pre_locked PG_owner_priv_1
+#define PagePreLocked(page) test_bit(PG_pre_locked, &(page)->flags)
+#define SetPagePreLocked(page) set_bit(PG_pre_locked, &(page)->flags)
+#define ClearPagePreLocked(page) clear_bit(PG_pre_locked, &(page)->flags)
+
+/* FIXME: This should really be somewhere in the 64bit area. */
+#define LOGFS_LINK_MAX (1<<30)
+
+/* Read-only filesystem */
+#define LOGFS_SB_FLAG_RO 0x0001
+#define LOGFS_SB_FLAG_SEG_ALIAS 0x0002
+#define LOGFS_SB_FLAG_OBJ_ALIAS 0x0004
+#define LOGFS_SB_FLAG_SHUTDOWN 0x0008
+
+/* Write Control Flags */
+#define WF_LOCK 0x01 /* take write lock */
+#define WF_WRITE 0x02 /* write block */
+#define WF_DELETE 0x04 /* delete old block */
+
+typedef u8 __bitwise level_t;
+typedef u8 __bitwise gc_level_t;
+
+#define LEVEL(level) ((__force level_t)(level))
+#define GC_LEVEL(gc_level) ((__force gc_level_t)(gc_level))
+
+#define SUBLEVEL(level) ( (void)((level) == LEVEL(1)), \
+ (__force level_t)((__force u8)(level) - 1) )
+
+/**
+ * struct logfs_area - area management information
+ *
+ * @a_sb: the superblock this area belongs to
+ * @a_is_open: 1 if the area is currently open, else 0
+ * @a_segno: segment number of area
+ * @a_written_bytes: number of bytes already written back
+ * @a_used_bytes: number of used bytes
+ * @a_ops: area operations (either journal or ostore)
+ * @a_erase_count: erase count
+ * @a_level: GC level
+ */
+struct logfs_area { /* a segment open for writing */
+ struct super_block *a_sb;
+ int a_is_open;
+ u32 a_segno;
+ u32 a_written_bytes;
+ u32 a_used_bytes;
+ const struct logfs_area_ops *a_ops;
+ u32 a_erase_count;
+ gc_level_t a_level;
+};
+
+/**
+ * struct logfs_area_ops - area operations
+ *
+ * @get_free_segment: fill area->ofs with the offset of a free segment
+ * @get_erase_count: fill area->erase_count (needs area->ofs)
+ * @erase_segment: erase and setup segment
+ */
+struct logfs_area_ops {
+ void (*get_free_segment)(struct logfs_area *area);
+ void (*get_erase_count)(struct logfs_area *area);
+ int (*erase_segment)(struct logfs_area *area);
+};
+
+/**
+ * struct logfs_device_ops - device access operations
+ *
+ * @readpage: read one page (mm page)
+ * @writeseg: write one segment. may be a partial segment
+ * @erase: erase one segment
+ * @read: read from the device
+ * @erase: erase part of the device
+ */
+struct logfs_device_ops {
+ struct page *(*find_first_sb)(struct super_block *sb, u64 *ofs);
+ struct page *(*find_last_sb)(struct super_block *sb, u64 *ofs);
+ int (*write_sb)(struct super_block *sb, struct page *page);
+ int (*readpage)(void *_sb, struct page *page);
+ void (*writeseg)(struct super_block *sb, u64 ofs, size_t len);
+ int (*erase)(struct super_block *sb, loff_t ofs, size_t len);
+ void (*sync)(struct super_block *sb);
+ void (*put_device)(struct super_block *sb);
+};
+
+/**
+ * struct candidate_list - list of similar candidates
+ */
+struct candidate_list {
+ struct rb_root rb_tree;
+ int count;
+ int maxcount;
+ int sort_by_ec;
+};
+
+/**
+ * struct gc_candidate - "candidate" segment to be garbage collected next
+ *
+ * @list: list (either free of low)
+ * @segno: segment number
+ * @valid: number of valid bytes
+ * @erase_count: erase count of segment
+ * @dist: distance from tree root
+ *
+ * Candidates can be on two lists. The free list contains electees rather
+ * than candidates - segments that no longer contain any valid data. The
+ * low list contains candidates to be picked for GC. It should be kept
+ * short. It is not required to always pick a perfect candidate. In the
+ * worst case GC will have to move more data than absolutely necessary.
+ */
+struct gc_candidate {
+ struct rb_node rb_node;
+ struct candidate_list *list;
+ u32 segno;
+ u32 valid;
+ u32 erase_count;
+ u8 dist;
+};
+
+/**
+ * struct logfs_journal_entry - temporary structure used during journal scan
+ *
+ * @used:
+ * @version: normalized version
+ * @len: length
+ * @offset: offset
+ */
+struct logfs_journal_entry {
+ int used;
+ s16 version;
+ u16 len;
+ u16 datalen;
+ u64 offset;
+};
+
+enum transaction_state {
+ CREATE_1 = 1,
+ CREATE_2,
+ UNLINK_1,
+ UNLINK_2,
+ CROSS_RENAME_1,
+ CROSS_RENAME_2,
+ TARGET_RENAME_1,
+ TARGET_RENAME_2,
+ TARGET_RENAME_3
+};
+
+/**
+ * struct logfs_transaction - essential fields to support atomic dirops
+ *
+ * @ino: target inode
+ * @dir: inode of directory containing dentry
+ * @pos: pos of dentry in directory
+ */
+struct logfs_transaction {
+ enum transaction_state state;
+ u64 ino;
+ u64 dir;
+ u64 pos;
+};
+
+/**
+ * struct logfs_shadow - old block in the shadow of a not-yet-committed new one
+ * @old_ofs: offset of old block on medium
+ * @new_ofs: offset of new block on medium
+ * @ino: inode number
+ * @bix: block index
+ * @old_len: size of old block, including header
+ * @new_len: size of new block, including header
+ * @level: block level
+ */
+struct logfs_shadow {
+ u64 old_ofs;
+ u64 new_ofs;
+ u64 ino;
+ u64 bix;
+ int old_len;
+ int new_len;
+ gc_level_t gc_level;
+};
+
+/**
+ * struct shadow_tree
+ * @new: shadows where old_ofs==0, indexed by new_ofs
+ * @old: shadows where old_ofs!=0, indexed by old_ofs
+ */
+struct shadow_tree {
+ struct btree_head64 new;
+ struct btree_head64 old;
+};
+
+struct object_alias_item {
+ struct list_head list;
+ __be64 val;
+ int child_no;
+};
+
+/**
+ * struct logfs_block - contains any block state
+ * @type: indirect block or inode
+ * @full: number of fully populated children
+ * @partial: number of partially populated children
+ *
+ * Most blocks are directly represented by page cache pages. But when a block
+ * becomes dirty, is part of a transaction, contains aliases or is otherwise
+ * special, a struct logfs_block is allocated to track the additional state.
+ * Inodes are very similar to indirect blocks, so they can also get one of
+ * these structures added when appropriate.
+ */
+#define BLOCK_INDIRECT 1 /* Indirect block */
+#define BLOCK_INODE 2 /* Inode */
+struct logfs_block_ops;
+struct logfs_block {
+ struct list_head alias_list;
+ struct list_head item_list;
+ struct super_block *sb;
+ u64 ino;
+ u64 bix;
+ level_t level;
+ struct page *page;
+ struct inode *inode;
+ struct logfs_transaction *ta;
+ unsigned long alias_map[LOGFS_BLOCK_FACTOR / BITS_PER_LONG];
+ struct logfs_block_ops *ops;
+ int full;
+ int partial;
+ int reserved_bytes;
+};
+
+typedef int write_alias_t(struct super_block *sb, u64 ino, u64 bix,
+ level_t level, int child_no, __be64 val);
+struct logfs_block_ops {
+ void (*write_block)(struct logfs_block *block);
+ gc_level_t (*block_level)(struct logfs_block *block);
+ void (*free_block)(struct super_block *sb, struct logfs_block*block);
+ int (*write_alias)(struct super_block *sb,
+ struct logfs_block *block,
+ write_alias_t *write_one_alias);
+};
+
+struct logfs_super {
+ struct mtd_info *s_mtd; /* underlying device */
+ struct block_device *s_bdev; /* underlying device */
+ const struct logfs_device_ops *s_devops;/* device access */
+ struct inode *s_master_inode; /* inode file */
+ struct inode *s_segfile_inode; /* segment file */
+ struct inode *s_mapping_inode; /* device mapping */
+ atomic_t s_pending_writes; /* outstanting bios */
+ long s_flags;
+ mempool_t *s_btree_pool; /* for btree nodes */
+ mempool_t *s_alias_pool; /* aliases in segment.c */
+ u64 s_feature_incompat;
+ u64 s_feature_ro_compat;
+ u64 s_feature_compat;
+ u64 s_feature_flags;
+ u64 s_sb_ofs[2];
+ /* alias.c fields */
+ struct btree_head32 s_segment_alias; /* remapped segments */
+ int s_no_object_aliases;
+ struct list_head s_object_alias; /* remapped objects */
+ struct btree_head128 s_object_alias_tree; /* remapped objects */
+ struct mutex s_object_alias_mutex;
+ /* dir.c fields */
+ struct mutex s_dirop_mutex; /* for creat/unlink/rename */
+ u64 s_victim_ino; /* used for atomic dir-ops */
+ u64 s_rename_dir; /* source directory ino */
+ u64 s_rename_pos; /* position of source dd */
+ /* gc.c fields */
+ long s_segsize; /* size of a segment */
+ int s_segshift; /* log2 of segment size */
+ long s_segmask; /* 1 << s_segshift - 1 */
+ long s_no_segs; /* segments on device */
+ long s_no_journal_segs; /* segments used for journal */
+ long s_no_blocks; /* blocks per segment */
+ long s_writesize; /* minimum write size */
+ int s_writeshift; /* log2 of write size */
+ u64 s_size; /* filesystem size */
+ struct logfs_area *s_area[LOGFS_NO_AREAS]; /* open segment array */
+ u64 s_gec; /* global erase count */
+ u64 s_wl_gec_ostore; /* time of last wl event */
+ u64 s_wl_gec_journal; /* time of last wl event */
+ u64 s_sweeper; /* current sweeper pos */
+ u8 s_ifile_levels; /* max level of ifile */
+ u8 s_iblock_levels; /* max level of regular files */
+ u8 s_data_levels; /* # of segments to leaf block*/
+ u8 s_total_levels; /* sum of above three */
+ struct btree_head32 s_cand_tree; /* all candidates */
+ struct candidate_list s_free_list; /* 100% free segments */
+ struct candidate_list s_reserve_list; /* Bad segment reserve */
+ struct candidate_list s_low_list[LOGFS_NO_AREAS];/* good candidates */
+ struct candidate_list s_ec_list; /* wear level candidates */
+ struct btree_head32 s_reserved_segments;/* sb, journal, bad, etc. */
+ /* inode.c fields */
+ u64 s_last_ino; /* highest ino used */
+ long s_inos_till_wrap;
+ u32 s_generation; /* i_generation for new files */
+ struct list_head s_freeing_list; /* inodes being freed */
+ /* journal.c fields */
+ struct mutex s_journal_mutex;
+ void *s_je; /* journal entry to compress */
+ void *s_compressed_je; /* block to write to journal */
+ u32 s_journal_seg[LOGFS_JOURNAL_SEGS]; /* journal segments */
+ u32 s_journal_ec[LOGFS_JOURNAL_SEGS]; /* journal erasecounts */
+ u64 s_last_version;
+ struct logfs_area *s_journal_area; /* open journal segment */
+ __be64 s_je_array[64];
+ int s_no_je;
+
+ int s_sum_index; /* for the 12 summaries */
+ struct shadow_tree s_shadow_tree;
+ int s_je_fill; /* index of current je */
+ /* readwrite.c fields */
+ struct mutex s_write_mutex;
+ int s_lock_count;
+ mempool_t *s_block_pool; /* struct logfs_block pool */
+ mempool_t *s_shadow_pool; /* struct logfs_shadow pool */
+ /*
+ * Space accounting:
+ * - s_used_bytes specifies space used to store valid data objects.
+ * - s_dirty_used_bytes is space used to store non-committed data
+ * objects. Those objects have already been written themselves,
+ * but they don't become valid until all indirect blocks up to the
+ * journal have been written as well.
+ * - s_dirty_free_bytes is space used to store the old copy of a
+ * replaced object, as long as the replacement is non-committed.
+ * In other words, it is the amount of space freed when all dirty
+ * blocks are written back.
+ * - s_free_bytes is the amount of free space available for any
+ * purpose.
+ * - s_root_reserve is the amount of free space available only to
+ * the root user. Non-privileged users can no longer write once
+ * this watermark has been reached.
+ * - s_speed_reserve is space which remains unused to speed up
+ * garbage collection performance.
+ * - s_dirty_pages is the space reserved for currently dirty pages.
+ * It is a pessimistic estimate, so some/most will get freed on
+ * page writeback.
+ *
+ * s_used_bytes + s_free_bytes + s_speed_reserve = total usable size
+ */
+ u64 s_free_bytes;
+ u64 s_used_bytes;
+ u64 s_dirty_free_bytes;
+ u64 s_dirty_used_bytes;
+ u64 s_root_reserve;
+ u64 s_speed_reserve;
+ u64 s_dirty_pages;
+ /* Bad block handling:
+ * - s_bad_seg_reserve is a number of segments usually kept
+ * free. When encountering bad blocks, the affected segment's data
+ * is _temporarily_ moved to a reserved segment.
+ * - s_bad_segments is the number of known bad segments.
+ */
+ u32 s_bad_seg_reserve;
+ u32 s_bad_segments;
+};
+
+/**
+ * struct logfs_inode - in-memory inode
+ *
+ * @vfs_inode: struct inode
+ * @li_data: data pointers
+ * @li_used_bytes: number of used bytes
+ * @li_freeing_list: used to track inodes currently being freed
+ * @li_flags: inode flags
+ * @li_refcount: number of internal (GC-induced) references
+ */
+struct logfs_inode {
+ struct inode vfs_inode;
+ u64 li_data[LOGFS_EMBEDDED_FIELDS];
+ u64 li_used_bytes;
+ struct list_head li_freeing_list;
+ struct logfs_block *li_block;
+ u32 li_flags;
+ u8 li_height;
+ int li_refcount;
+};
+
+#define journal_for_each(__i) for (__i = 0; __i < LOGFS_JOURNAL_SEGS; __i++)
+#define for_each_area(__i) for (__i = 0; __i < LOGFS_NO_AREAS; __i++)
+#define for_each_area_down(__i) for (__i = LOGFS_NO_AREAS - 1; __i >= 0; __i--)
+
+/* compr.c */
+int logfs_compress(void *in, void *out, size_t inlen, size_t outlen);
+int logfs_uncompress(void *in, void *out, size_t inlen, size_t outlen);
+int __init logfs_compr_init(void);
+void logfs_compr_exit(void);
+
+/* dev_bdev.c */
+#ifdef CONFIG_BLOCK
+int logfs_get_sb_bdev(struct file_system_type *type, int flags,
+ const char *devname, struct vfsmount *mnt);
+#else
+static inline int logfs_get_sb_bdev(struct file_system_type *type, int flags,
+ const char *devname, struct vfsmount *mnt)
+{
+ return -ENODEV;
+}
+#endif
+
+/* dev_mtd.c */
+#ifdef CONFIG_MTD
+int logfs_get_sb_mtd(struct file_system_type *type, int flags,
+ int mtdnr, struct vfsmount *mnt);
+#else
+static inline int logfs_get_sb_mtd(struct file_system_type *type, int flags,
+ int mtdnr, struct vfsmount *mnt)
+{
+ return -ENODEV;
+}
+#endif
+
+/* dir.c */
+extern const struct inode_operations logfs_symlink_iops;
+extern const struct inode_operations logfs_dir_iops;
+extern const struct file_operations logfs_dir_fops;
+int logfs_replay_journal(struct super_block *sb);
+
+/* file.c */
+extern const struct inode_operations logfs_reg_iops;
+extern const struct file_operations logfs_reg_fops;
+extern const struct address_space_operations logfs_reg_aops;
+int logfs_readpage(struct file *file, struct page *page);
+int logfs_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
+ unsigned long arg);
+int logfs_fsync(struct file *file, struct dentry *dentry, int datasync);
+
+/* gc.c */
+u32 get_best_cand(struct super_block *sb, struct candidate_list *list, u32 *ec);
+void logfs_gc_pass(struct super_block *sb);
+int logfs_check_areas(struct super_block *sb);
+int logfs_init_gc(struct super_block *sb);
+void logfs_cleanup_gc(struct super_block *sb);
+
+/* inode.c */
+extern const struct super_operations logfs_super_operations;
+struct inode *logfs_iget(struct super_block *sb, ino_t ino);
+struct inode *logfs_safe_iget(struct super_block *sb, ino_t ino, int *cookie);
+void logfs_safe_iput(struct inode *inode, int cookie);
+struct inode *logfs_new_inode(struct inode *dir, int mode);
+struct inode *logfs_new_meta_inode(struct super_block *sb, u64 ino);
+struct inode *logfs_read_meta_inode(struct super_block *sb, u64 ino);
+int logfs_init_inode_cache(void);
+void logfs_destroy_inode_cache(void);
+void destroy_meta_inode(struct inode *inode);
+void logfs_set_blocks(struct inode *inode, u64 no);
+/* these logically belong into inode.c but actually reside in readwrite.c */
+int logfs_read_inode(struct inode *inode);
+int __logfs_write_inode(struct inode *inode, long flags);
+void logfs_delete_inode(struct inode *inode);
+void logfs_clear_inode(struct inode *inode);
+
+/* journal.c */
+void logfs_write_anchor(struct inode *inode);
+int logfs_init_journal(struct super_block *sb);
+void logfs_cleanup_journal(struct super_block *sb);
+int write_alias_journal(struct super_block *sb, u64 ino, u64 bix,
+ level_t level, int child_no, __be64 val);
+void do_logfs_journal_wl_pass(struct super_block *sb);
+
+/* readwrite.c */
+pgoff_t logfs_pack_index(u64 bix, level_t level);
+void logfs_unpack_index(pgoff_t index, u64 *bix, level_t *level);
+int logfs_inode_write(struct inode *inode, const void *buf, size_t count,
+ loff_t bix, long flags, struct shadow_tree *shadow_tree);
+int logfs_readpage_nolock(struct page *page);
+int logfs_write_buf(struct inode *inode, struct page *page, long flags);
+int logfs_delete(struct inode *inode, pgoff_t index,
+ struct shadow_tree *shadow_tree);
+int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
+ gc_level_t gc_level, long flags);
+int logfs_is_valid_block(struct super_block *sb, u64 ofs, u64 ino, u64 bix,
+ gc_level_t gc_level);
+int logfs_truncate(struct inode *inode, u64 size);
+u64 logfs_seek_hole(struct inode *inode, u64 bix);
+u64 logfs_seek_data(struct inode *inode, u64 bix);
+int logfs_open_segfile(struct super_block *sb);
+int logfs_init_rw(struct super_block *sb);
+void logfs_cleanup_rw(struct super_block *sb);
+void logfs_add_transaction(struct inode *inode, struct logfs_transaction *ta);
+void logfs_del_transaction(struct inode *inode, struct logfs_transaction *ta);
+void logfs_write_block(struct logfs_block *block, long flags);
+int logfs_write_obj_aliases_pagecache(struct super_block *sb);
+void logfs_get_segment_entry(struct super_block *sb, u32 segno,
+ struct logfs_segment_entry *se);
+void logfs_set_segment_used(struct super_block *sb, u64 ofs, int increment);
+void logfs_set_segment_erased(struct super_block *sb, u32 segno, u32 ec,
+ gc_level_t gc_level);
+void logfs_set_segment_reserved(struct super_block *sb, u32 segno);
+void logfs_set_segment_unreserved(struct super_block *sb, u32 segno, u32 ec);
+struct logfs_block *__alloc_block(struct super_block *sb,
+ u64 ino, u64 bix, level_t level);
+void __free_block(struct super_block *sb, struct logfs_block *block);
+void btree_write_block(struct logfs_block *block);
+void initialize_block_counters(struct page *page, struct logfs_block *block,
+ __be64 *array, int page_is_empty);
+int logfs_exist_block(struct inode *inode, u64 bix);
+int get_page_reserve(struct inode *inode, struct page *page);
+extern struct logfs_block_ops indirect_block_ops;
+
+/* segment.c */
+int logfs_erase_segment(struct super_block *sb, u32 ofs);
+int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf);
+int logfs_segment_read(struct inode *inode, struct page *page, u64 ofs, u64 bix,
+ level_t level);
+int logfs_segment_write(struct inode *inode, struct page *page,
+ struct logfs_shadow *shadow);
+int logfs_segment_delete(struct inode *inode, struct logfs_shadow *shadow);
+int logfs_load_object_aliases(struct super_block *sb,
+ struct logfs_obj_alias *oa, int count);
+void move_page_to_btree(struct page *page);
+int logfs_init_mapping(struct super_block *sb);
+void logfs_sync_area(struct logfs_area *area);
+void logfs_sync_segments(struct super_block *sb);
+
+/* area handling */
+int logfs_init_areas(struct super_block *sb);
+void logfs_cleanup_areas(struct super_block *sb);
+int logfs_open_area(struct logfs_area *area, size_t bytes);
+void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
+ int use_filler);
+
+static inline void logfs_buf_write(struct logfs_area *area, u64 ofs,
+ void *buf, size_t len)
+{
+ __logfs_buf_write(area, ofs, buf, len, 0);
+}
+
+static inline void logfs_buf_recover(struct logfs_area *area, u64 ofs,
+ void *buf, size_t len)
+{
+ __logfs_buf_write(area, ofs, buf, len, 1);
+}
+
+/* super.c */
+struct page *emergency_read_begin(struct address_space *mapping, pgoff_t index);
+void emergency_read_end(struct page *page);
+void logfs_crash_dump(struct super_block *sb);
+void *memchr_inv(const void *s, int c, size_t n);
+int logfs_statfs(struct dentry *dentry, struct kstatfs *stats);
+int logfs_get_sb_device(struct file_system_type *type, int flags,
+ struct mtd_info *mtd, struct block_device *bdev,
+ const struct logfs_device_ops *devops, struct vfsmount *mnt);
+int logfs_check_ds(struct logfs_disk_super *ds);
+int logfs_write_sb(struct super_block *sb);
+
+static inline struct logfs_super *logfs_super(struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+static inline struct logfs_inode *logfs_inode(struct inode *inode)
+{
+ return container_of(inode, struct logfs_inode, vfs_inode);
+}
+
+static inline void logfs_set_ro(struct super_block *sb)
+{
+ logfs_super(sb)->s_flags |= LOGFS_SB_FLAG_RO;
+}
+
+#define LOGFS_BUG(sb) do { \
+ struct super_block *__sb = sb; \
+ logfs_crash_dump(__sb); \
+ logfs_super(__sb)->s_flags |= LOGFS_SB_FLAG_RO; \
+ BUG(); \
+} while (0)
+
+#define LOGFS_BUG_ON(condition, sb) \
+ do { if (unlikely(condition)) LOGFS_BUG((sb)); } while (0)
+
+static inline __be32 logfs_crc32(void *data, size_t len, size_t skip)
+{
+ return cpu_to_be32(crc32(~0, data+skip, len-skip));
+}
+
+static inline u8 logfs_type(struct inode *inode)
+{
+ return (inode->i_mode >> 12) & 15;
+}
+
+static inline pgoff_t logfs_index(struct super_block *sb, u64 pos)
+{
+ return pos >> sb->s_blocksize_bits;
+}
+
+static inline u64 dev_ofs(struct super_block *sb, u32 segno, u32 ofs)
+{
+ return ((u64)segno << logfs_super(sb)->s_segshift) + ofs;
+}
+
+static inline u32 seg_no(struct super_block *sb, u64 ofs)
+{
+ return ofs >> logfs_super(sb)->s_segshift;
+}
+
+static inline u32 seg_ofs(struct super_block *sb, u64 ofs)
+{
+ return ofs & logfs_super(sb)->s_segmask;
+}
+
+static inline u64 seg_align(struct super_block *sb, u64 ofs)
+{
+ return ofs & ~logfs_super(sb)->s_segmask;
+}
+
+static inline struct logfs_block *logfs_block(struct page *page)
+{
+ return (void *)page->private;
+}
+
+static inline level_t shrink_level(gc_level_t __level)
+{
+ u8 level = (__force u8)__level;
+
+ if (level >= LOGFS_MAX_LEVELS)
+ level -= LOGFS_MAX_LEVELS;
+ return (__force level_t)level;
+}
+
+static inline gc_level_t expand_level(u64 ino, level_t __level)
+{
+ u8 level = (__force u8)__level;
+
+ if (ino == LOGFS_INO_MASTER) {
+ /* ifile has seperate areas */
+ level += LOGFS_MAX_LEVELS;
+ }
+ return (__force gc_level_t)level;
+}
+
+static inline int logfs_block_shift(struct super_block *sb, level_t level)
+{
+ level = shrink_level((__force gc_level_t)level);
+ return (__force int)level * (sb->s_blocksize_bits - 3);
+}
+
+static inline u64 logfs_block_mask(struct super_block *sb, level_t level)
+{
+ return ~0ull << logfs_block_shift(sb, level);
+}
+
+static inline struct logfs_area *get_area(struct super_block *sb,
+ gc_level_t gc_level)
+{
+ return logfs_super(sb)->s_area[(__force u8)gc_level];
+}
+
+#endif
diff --git a/fs/logfs/logfs_abi.h b/fs/logfs/logfs_abi.h
new file mode 100644
index 000000000000..5d3782ddecc8
--- /dev/null
+++ b/fs/logfs/logfs_abi.h
@@ -0,0 +1,627 @@
+/*
+ * fs/logfs/logfs_abi.h
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ *
+ * Public header for logfs.
+ */
+#ifndef FS_LOGFS_LOGFS_ABI_H
+#define FS_LOGFS_LOGFS_ABI_H
+
+/* For out-of-kernel compiles */
+#ifndef BUILD_BUG_ON
+#define BUILD_BUG_ON(condition) /**/
+#endif
+
+#define SIZE_CHECK(type, size) \
+static inline void check_##type(void) \
+{ \
+ BUILD_BUG_ON(sizeof(struct type) != (size)); \
+}
+
+/*
+ * Throughout the logfs code, we're constantly dealing with blocks at
+ * various positions or offsets. To remove confusion, we stricly
+ * distinguish between a "position" - the logical position within a
+ * file and an "offset" - the physical location within the device.
+ *
+ * Any usage of the term offset for a logical location or position for
+ * a physical one is a bug and should get fixed.
+ */
+
+/*
+ * Block are allocated in one of several segments depending on their
+ * level. The following levels are used:
+ * 0 - regular data block
+ * 1 - i1 indirect blocks
+ * 2 - i2 indirect blocks
+ * 3 - i3 indirect blocks
+ * 4 - i4 indirect blocks
+ * 5 - i5 indirect blocks
+ * 6 - ifile data blocks
+ * 7 - ifile i1 indirect blocks
+ * 8 - ifile i2 indirect blocks
+ * 9 - ifile i3 indirect blocks
+ * 10 - ifile i4 indirect blocks
+ * 11 - ifile i5 indirect blocks
+ * Potential levels to be used in the future:
+ * 12 - gc recycled blocks, long-lived data
+ * 13 - replacement blocks, short-lived data
+ *
+ * Levels 1-11 are necessary for robust gc operations and help seperate
+ * short-lived metadata from longer-lived file data. In the future,
+ * file data should get seperated into several segments based on simple
+ * heuristics. Old data recycled during gc operation is expected to be
+ * long-lived. New data is of uncertain life expectancy. New data
+ * used to replace older blocks in existing files is expected to be
+ * short-lived.
+ */
+
+
+/* Magic numbers. 64bit for superblock, 32bit for statfs f_type */
+#define LOGFS_MAGIC 0xb21f205ac97e8168ull
+#define LOGFS_MAGIC_U32 0xc97e8168u
+
+/*
+ * Various blocksize related macros. Blocksize is currently fixed at 4KiB.
+ * Sooner or later that should become configurable and the macros replaced
+ * by something superblock-dependent. Pointers in indirect blocks are and
+ * will remain 64bit.
+ *
+ * LOGFS_BLOCKSIZE - self-explaining
+ * LOGFS_BLOCK_FACTOR - number of pointers per indirect block
+ * LOGFS_BLOCK_BITS - log2 of LOGFS_BLOCK_FACTOR, used for shifts
+ */
+#define LOGFS_BLOCKSIZE (4096ull)
+#define LOGFS_BLOCK_FACTOR (LOGFS_BLOCKSIZE / sizeof(u64))
+#define LOGFS_BLOCK_BITS (9)
+
+/*
+ * Number of blocks at various levels of indirection. There are 16 direct
+ * block pointers plus a single indirect pointer.
+ */
+#define I0_BLOCKS (16)
+#define I1_BLOCKS LOGFS_BLOCK_FACTOR
+#define I2_BLOCKS (LOGFS_BLOCK_FACTOR * I1_BLOCKS)
+#define I3_BLOCKS (LOGFS_BLOCK_FACTOR * I2_BLOCKS)
+#define I4_BLOCKS (LOGFS_BLOCK_FACTOR * I3_BLOCKS)
+#define I5_BLOCKS (LOGFS_BLOCK_FACTOR * I4_BLOCKS)
+
+#define INDIRECT_INDEX I0_BLOCKS
+#define LOGFS_EMBEDDED_FIELDS (I0_BLOCKS + 1)
+
+/*
+ * Sizes at which files require another level of indirection. Files smaller
+ * than LOGFS_EMBEDDED_SIZE can be completely stored in the inode itself,
+ * similar like ext2 fast symlinks.
+ *
+ * Data at a position smaller than LOGFS_I0_SIZE is accessed through the
+ * direct pointers, else through the 1x indirect pointer and so forth.
+ */
+#define LOGFS_EMBEDDED_SIZE (LOGFS_EMBEDDED_FIELDS * sizeof(u64))
+#define LOGFS_I0_SIZE (I0_BLOCKS * LOGFS_BLOCKSIZE)
+#define LOGFS_I1_SIZE (I1_BLOCKS * LOGFS_BLOCKSIZE)
+#define LOGFS_I2_SIZE (I2_BLOCKS * LOGFS_BLOCKSIZE)
+#define LOGFS_I3_SIZE (I3_BLOCKS * LOGFS_BLOCKSIZE)
+#define LOGFS_I4_SIZE (I4_BLOCKS * LOGFS_BLOCKSIZE)
+#define LOGFS_I5_SIZE (I5_BLOCKS * LOGFS_BLOCKSIZE)
+
+/*
+ * Each indirect block pointer must have this flag set, if all block pointers
+ * behind it are set, i.e. there is no hole hidden in the shadow of this
+ * indirect block pointer.
+ */
+#define LOGFS_FULLY_POPULATED (1ULL << 63)
+#define pure_ofs(ofs) (ofs & ~LOGFS_FULLY_POPULATED)
+
+/*
+ * LogFS needs to seperate data into levels. Each level is defined as the
+ * maximal possible distance from the master inode (inode of the inode file).
+ * Data blocks reside on level 0, 1x indirect block on level 1, etc.
+ * Inodes reside on level 6, indirect blocks for the inode file on levels 7-11.
+ * This effort is necessary to guarantee garbage collection to always make
+ * progress.
+ *
+ * LOGFS_MAX_INDIRECT is the maximal indirection through indirect blocks,
+ * LOGFS_MAX_LEVELS is one more for the actual data level of a file. It is
+ * the maximal number of levels for one file.
+ * LOGFS_NO_AREAS is twice that, as the inode file and regular files are
+ * effectively stacked on top of each other.
+ */
+#define LOGFS_MAX_INDIRECT (5)
+#define LOGFS_MAX_LEVELS (LOGFS_MAX_INDIRECT + 1)
+#define LOGFS_NO_AREAS (2 * LOGFS_MAX_LEVELS)
+
+/* Maximum size of filenames */
+#define LOGFS_MAX_NAMELEN (255)
+
+/* Number of segments in the primary journal. */
+#define LOGFS_JOURNAL_SEGS (16)
+
+/* Maximum number of free/erased/etc. segments in journal entries */
+#define MAX_CACHED_SEGS (64)
+
+
+/*
+ * LOGFS_OBJECT_HEADERSIZE is the size of a single header in the object store,
+ * LOGFS_MAX_OBJECTSIZE the size of the largest possible object, including
+ * its header,
+ * LOGFS_SEGMENT_RESERVE is the amount of space reserved for each segment for
+ * its segment header and the padded space at the end when no further objects
+ * fit.
+ */
+#define LOGFS_OBJECT_HEADERSIZE (0x1c)
+#define LOGFS_SEGMENT_HEADERSIZE (0x18)
+#define LOGFS_MAX_OBJECTSIZE (LOGFS_OBJECT_HEADERSIZE + LOGFS_BLOCKSIZE)
+#define LOGFS_SEGMENT_RESERVE \
+ (LOGFS_SEGMENT_HEADERSIZE + LOGFS_MAX_OBJECTSIZE - 1)
+
+/*
+ * Segment types:
+ * SEG_SUPER - Data or indirect block
+ * SEG_JOURNAL - Inode
+ * SEG_OSTORE - Dentry
+ */
+enum {
+ SEG_SUPER = 0x01,
+ SEG_JOURNAL = 0x02,
+ SEG_OSTORE = 0x03,
+};
+
+/**
+ * struct logfs_segment_header - per-segment header in the ostore
+ *
+ * @crc: crc32 of header (there is no data)
+ * @pad: unused, must be 0
+ * @type: segment type, see above
+ * @level: GC level for all objects in this segment
+ * @segno: segment number
+ * @ec: erase count for this segment
+ * @gec: global erase count at time of writing
+ */
+struct logfs_segment_header {
+ __be32 crc;
+ __be16 pad;
+ __u8 type;
+ __u8 level;
+ __be32 segno;
+ __be32 ec;
+ __be64 gec;
+};
+
+SIZE_CHECK(logfs_segment_header, LOGFS_SEGMENT_HEADERSIZE);
+
+/**
+ * struct logfs_disk_super - on-medium superblock
+ *
+ * @ds_magic: magic number, must equal LOGFS_MAGIC
+ * @ds_crc: crc32 of structure starting with the next field
+ * @ds_ifile_levels: maximum number of levels for ifile
+ * @ds_iblock_levels: maximum number of levels for regular files
+ * @ds_data_levels: number of seperate levels for data
+ * @pad0: reserved, must be 0
+ * @ds_feature_incompat: incompatible filesystem features
+ * @ds_feature_ro_compat: read-only compatible filesystem features
+ * @ds_feature_compat: compatible filesystem features
+ * @ds_flags: flags
+ * @ds_segment_shift: log2 of segment size
+ * @ds_block_shift: log2 of block size
+ * @ds_write_shift: log2 of write size
+ * @pad1: reserved, must be 0
+ * @ds_journal_seg: segments used by primary journal
+ * @ds_root_reserve: bytes reserved for the superuser
+ * @ds_speed_reserve: bytes reserved to speed up GC
+ * @ds_bad_seg_reserve: number of segments reserved to handle bad blocks
+ * @pad2: reserved, must be 0
+ * @pad3: reserved, must be 0
+ *
+ * Contains only read-only fields. Read-write fields like the amount of used
+ * space is tracked in the dynamic superblock, which is stored in the journal.
+ */
+struct logfs_disk_super {
+ struct logfs_segment_header ds_sh;
+ __be64 ds_magic;
+
+ __be32 ds_crc;
+ __u8 ds_ifile_levels;
+ __u8 ds_iblock_levels;
+ __u8 ds_data_levels;
+ __u8 ds_segment_shift;
+ __u8 ds_block_shift;
+ __u8 ds_write_shift;
+ __u8 pad0[6];
+
+ __be64 ds_filesystem_size;
+ __be32 ds_segment_size;
+ __be32 ds_bad_seg_reserve;
+
+ __be64 ds_feature_incompat;
+ __be64 ds_feature_ro_compat;
+
+ __be64 ds_feature_compat;
+ __be64 ds_feature_flags;
+
+ __be64 ds_root_reserve;
+ __be64 ds_speed_reserve;
+
+ __be32 ds_journal_seg[LOGFS_JOURNAL_SEGS];
+
+ __be64 ds_super_ofs[2];
+ __be64 pad3[8];
+};
+
+SIZE_CHECK(logfs_disk_super, 256);
+
+/*
+ * Object types:
+ * OBJ_BLOCK - Data or indirect block
+ * OBJ_INODE - Inode
+ * OBJ_DENTRY - Dentry
+ */
+enum {
+ OBJ_BLOCK = 0x04,
+ OBJ_INODE = 0x05,
+ OBJ_DENTRY = 0x06,
+};
+
+/**
+ * struct logfs_object_header - per-object header in the ostore
+ *
+ * @crc: crc32 of header, excluding data_crc
+ * @len: length of data
+ * @type: object type, see above
+ * @compr: compression type
+ * @ino: inode number
+ * @bix: block index
+ * @data_crc: crc32 of payload
+ */
+struct logfs_object_header {
+ __be32 crc;
+ __be16 len;
+ __u8 type;
+ __u8 compr;
+ __be64 ino;
+ __be64 bix;
+ __be32 data_crc;
+} __attribute__((packed));
+
+SIZE_CHECK(logfs_object_header, LOGFS_OBJECT_HEADERSIZE);
+
+/*
+ * Reserved inode numbers:
+ * LOGFS_INO_MASTER - master inode (for inode file)
+ * LOGFS_INO_ROOT - root directory
+ * LOGFS_INO_SEGFILE - per-segment used bytes and erase count
+ */
+enum {
+ LOGFS_INO_MAPPING = 0x00,
+ LOGFS_INO_MASTER = 0x01,
+ LOGFS_INO_ROOT = 0x02,
+ LOGFS_INO_SEGFILE = 0x03,
+ LOGFS_RESERVED_INOS = 0x10,
+};
+
+/*
+ * Inode flags. High bits should never be written to the medium. They are
+ * reserved for in-memory usage.
+ * Low bits should either remain in sync with the corresponding FS_*_FL or
+ * reuse slots that obviously don't make sense for logfs.
+ *
+ * LOGFS_IF_DIRTY Inode must be written back
+ * LOGFS_IF_ZOMBIE Inode has been deleted
+ * LOGFS_IF_STILLBORN -ENOSPC happened when creating inode
+ */
+#define LOGFS_IF_COMPRESSED 0x00000004 /* == FS_COMPR_FL */
+#define LOGFS_IF_DIRTY 0x20000000
+#define LOGFS_IF_ZOMBIE 0x40000000
+#define LOGFS_IF_STILLBORN 0x80000000
+
+/* Flags available to chattr */
+#define LOGFS_FL_USER_VISIBLE (LOGFS_IF_COMPRESSED)
+#define LOGFS_FL_USER_MODIFIABLE (LOGFS_IF_COMPRESSED)
+/* Flags inherited from parent directory on file/directory creation */
+#define LOGFS_FL_INHERITED (LOGFS_IF_COMPRESSED)
+
+/**
+ * struct logfs_disk_inode - on-medium inode
+ *
+ * @di_mode: file mode
+ * @di_pad: reserved, must be 0
+ * @di_flags: inode flags, see above
+ * @di_uid: user id
+ * @di_gid: group id
+ * @di_ctime: change time
+ * @di_mtime: modify time
+ * @di_refcount: reference count (aka nlink or link count)
+ * @di_generation: inode generation, for nfs
+ * @di_used_bytes: number of bytes used
+ * @di_size: file size
+ * @di_data: data pointers
+ */
+struct logfs_disk_inode {
+ __be16 di_mode;
+ __u8 di_height;
+ __u8 di_pad;
+ __be32 di_flags;
+ __be32 di_uid;
+ __be32 di_gid;
+
+ __be64 di_ctime;
+ __be64 di_mtime;
+
+ __be64 di_atime;
+ __be32 di_refcount;
+ __be32 di_generation;
+
+ __be64 di_used_bytes;
+ __be64 di_size;
+
+ __be64 di_data[LOGFS_EMBEDDED_FIELDS];
+};
+
+SIZE_CHECK(logfs_disk_inode, 200);
+
+#define INODE_POINTER_OFS \
+ (offsetof(struct logfs_disk_inode, di_data) / sizeof(__be64))
+#define INODE_USED_OFS \
+ (offsetof(struct logfs_disk_inode, di_used_bytes) / sizeof(__be64))
+#define INODE_SIZE_OFS \
+ (offsetof(struct logfs_disk_inode, di_size) / sizeof(__be64))
+#define INODE_HEIGHT_OFS (0)
+
+/**
+ * struct logfs_disk_dentry - on-medium dentry structure
+ *
+ * @ino: inode number
+ * @namelen: length of file name
+ * @type: file type, identical to bits 12..15 of mode
+ * @name: file name
+ */
+/* FIXME: add 6 bytes of padding to remove the __packed */
+struct logfs_disk_dentry {
+ __be64 ino;
+ __be16 namelen;
+ __u8 type;
+ __u8 name[LOGFS_MAX_NAMELEN];
+} __attribute__((packed));
+
+SIZE_CHECK(logfs_disk_dentry, 266);
+
+#define RESERVED 0xffffffff
+#define BADSEG 0xffffffff
+/**
+ * struct logfs_segment_entry - segment file entry
+ *
+ * @ec_level: erase count and level
+ * @valid: number of valid bytes
+ *
+ * Segment file contains one entry for every segment. ec_level contains the
+ * erasecount in the upper 28 bits and the level in the lower 4 bits. An
+ * ec_level of BADSEG (-1) identifies bad segments. valid contains the number
+ * of valid bytes or RESERVED (-1 again) if the segment is used for either the
+ * superblock or the journal, or when the segment is bad.
+ */
+struct logfs_segment_entry {
+ __be32 ec_level;
+ __be32 valid;
+};
+
+SIZE_CHECK(logfs_segment_entry, 8);
+
+/**
+ * struct logfs_journal_header - header for journal entries (JEs)
+ *
+ * @h_crc: crc32 of journal entry
+ * @h_len: length of compressed journal entry,
+ * not including header
+ * @h_datalen: length of uncompressed data
+ * @h_type: JE type
+ * @h_version: unnormalized version of journal entry
+ * @h_compr: compression type
+ * @h_pad: reserved
+ */
+struct logfs_journal_header {
+ __be32 h_crc;
+ __be16 h_len;
+ __be16 h_datalen;
+ __be16 h_type;
+ __be16 h_version;
+ __u8 h_compr;
+ __u8 h_pad[3];
+};
+
+SIZE_CHECK(logfs_journal_header, 16);
+
+/*
+ * Life expectency of data.
+ * VIM_DEFAULT - default vim
+ * VIM_SEGFILE - for segment file only - very short-living
+ * VIM_GC - GC'd data - likely long-living
+ */
+enum logfs_vim {
+ VIM_DEFAULT = 0,
+ VIM_SEGFILE = 1,
+};
+
+/**
+ * struct logfs_je_area - wbuf header
+ *
+ * @segno: segment number of area
+ * @used_bytes: number of bytes already used
+ * @gc_level: GC level
+ * @vim: life expectancy of data
+ *
+ * "Areas" are segments currently being used for writing. There is at least
+ * one area per GC level. Several may be used to seperate long-living from
+ * short-living data. If an area with unknown vim is encountered, it can
+ * simply be closed.
+ * The write buffer immediately follow this header.
+ */
+struct logfs_je_area {
+ __be32 segno;
+ __be32 used_bytes;
+ __u8 gc_level;
+ __u8 vim;
+} __attribute__((packed));
+
+SIZE_CHECK(logfs_je_area, 10);
+
+#define MAX_JOURNAL_HEADER \
+ (sizeof(struct logfs_journal_header) + sizeof(struct logfs_je_area))
+
+/**
+ * struct logfs_je_dynsb - dynamic superblock
+ *
+ * @ds_gec: global erase count
+ * @ds_sweeper: current position of GC "sweeper"
+ * @ds_rename_dir: source directory ino (see dir.c documentation)
+ * @ds_rename_pos: position of source dd (see dir.c documentation)
+ * @ds_victim_ino: victims of incomplete dir operation (see dir.c)
+ * @ds_victim_ino: parent inode of victim (see dir.c)
+ * @ds_used_bytes: number of used bytes
+ */
+struct logfs_je_dynsb {
+ __be64 ds_gec;
+ __be64 ds_sweeper;
+
+ __be64 ds_rename_dir;
+ __be64 ds_rename_pos;
+
+ __be64 ds_victim_ino;
+ __be64 ds_victim_parent; /* XXX */
+
+ __be64 ds_used_bytes;
+ __be32 ds_generation;
+ __be32 pad;
+};
+
+SIZE_CHECK(logfs_je_dynsb, 64);
+
+/**
+ * struct logfs_je_anchor - anchor of filesystem tree, aka master inode
+ *
+ * @da_size: size of inode file
+ * @da_last_ino: last created inode
+ * @da_used_bytes: number of bytes used
+ * @da_data: data pointers
+ */
+struct logfs_je_anchor {
+ __be64 da_size;
+ __be64 da_last_ino;
+
+ __be64 da_used_bytes;
+ u8 da_height;
+ u8 pad[7];
+
+ __be64 da_data[LOGFS_EMBEDDED_FIELDS];
+};
+
+SIZE_CHECK(logfs_je_anchor, 168);
+
+/**
+ * struct logfs_je_spillout - spillout entry (from 1st to 2nd journal)
+ *
+ * @so_segment: segments used for 2nd journal
+ *
+ * Length of the array is given by h_len field in the header.
+ */
+struct logfs_je_spillout {
+ __be64 so_segment[0];
+};
+
+SIZE_CHECK(logfs_je_spillout, 0);
+
+/**
+ * struct logfs_je_journal_ec - erase counts for all journal segments
+ *
+ * @ec: erase count
+ *
+ * Length of the array is given by h_len field in the header.
+ */
+struct logfs_je_journal_ec {
+ __be32 ec[0];
+};
+
+SIZE_CHECK(logfs_je_journal_ec, 0);
+
+/**
+ * struct logfs_je_free_segments - list of free segmetns with erase count
+ */
+struct logfs_je_free_segments {
+ __be32 segno;
+ __be32 ec;
+};
+
+SIZE_CHECK(logfs_je_free_segments, 8);
+
+/**
+ * struct logfs_seg_alias - list of segment aliases
+ */
+struct logfs_seg_alias {
+ __be32 old_segno;
+ __be32 new_segno;
+};
+
+SIZE_CHECK(logfs_seg_alias, 8);
+
+/**
+ * struct logfs_obj_alias - list of object aliases
+ */
+struct logfs_obj_alias {
+ __be64 ino;
+ __be64 bix;
+ __be64 val;
+ u8 level;
+ u8 pad[5];
+ __be16 child_no;
+};
+
+SIZE_CHECK(logfs_obj_alias, 32);
+
+/**
+ * Compression types.
+ *
+ * COMPR_NONE - uncompressed
+ * COMPR_ZLIB - compressed with zlib
+ */
+enum {
+ COMPR_NONE = 0,
+ COMPR_ZLIB = 1,
+};
+
+/*
+ * Journal entries come in groups of 16. First group contains unique
+ * entries, next groups contain one entry per level
+ *
+ * JE_FIRST - smallest possible journal entry number
+ *
+ * JEG_BASE - base group, containing unique entries
+ * JE_COMMIT - commit entry, validates all previous entries
+ * JE_DYNSB - dynamic superblock, anything that ought to be in the
+ * superblock but cannot because it is read-write data
+ * JE_ANCHOR - anchor aka master inode aka inode file's inode
+ * JE_ERASECOUNT erasecounts for all journal segments
+ * JE_SPILLOUT - unused
+ * JE_SEG_ALIAS - aliases segments
+ * JE_AREA - area description
+ *
+ * JE_LAST - largest possible journal entry number
+ */
+enum {
+ JE_FIRST = 0x01,
+
+ JEG_BASE = 0x00,
+ JE_COMMIT = 0x02,
+ JE_DYNSB = 0x03,
+ JE_ANCHOR = 0x04,
+ JE_ERASECOUNT = 0x05,
+ JE_SPILLOUT = 0x06,
+ JE_OBJ_ALIAS = 0x0d,
+ JE_AREA = 0x0e,
+
+ JE_LAST = 0x0e,
+};
+
+#endif
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
new file mode 100644
index 000000000000..1dbe6e8cccec
--- /dev/null
+++ b/fs/logfs/readwrite.c
@@ -0,0 +1,2246 @@
+/*
+ * fs/logfs/readwrite.c
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ *
+ *
+ * Actually contains five sets of very similar functions:
+ * read read blocks from a file
+ * seek_hole find next hole
+ * seek_data find next data block
+ * valid check whether a block still belongs to a file
+ * write write blocks to a file
+ * delete delete a block (for directories and ifile)
+ * rewrite move existing blocks of a file to a new location (gc helper)
+ * truncate truncate a file
+ */
+#include "logfs.h"
+#include <linux/sched.h>
+
+static u64 adjust_bix(u64 bix, level_t level)
+{
+ switch (level) {
+ case 0:
+ return bix;
+ case LEVEL(1):
+ return max_t(u64, bix, I0_BLOCKS);
+ case LEVEL(2):
+ return max_t(u64, bix, I1_BLOCKS);
+ case LEVEL(3):
+ return max_t(u64, bix, I2_BLOCKS);
+ case LEVEL(4):
+ return max_t(u64, bix, I3_BLOCKS);
+ case LEVEL(5):
+ return max_t(u64, bix, I4_BLOCKS);
+ default:
+ WARN_ON(1);
+ return bix;
+ }
+}
+
+static inline u64 maxbix(u8 height)
+{
+ return 1ULL << (LOGFS_BLOCK_BITS * height);
+}
+
+/**
+ * The inode address space is cut in two halves. Lower half belongs to data
+ * pages, upper half to indirect blocks. If the high bit (INDIRECT_BIT) is
+ * set, the actual block index (bix) and level can be derived from the page
+ * index.
+ *
+ * The lowest three bits of the block index are set to 0 after packing and
+ * unpacking. Since the lowest n bits (9 for 4KiB blocksize) are ignored
+ * anyway this is harmless.
+ */
+#define ARCH_SHIFT (BITS_PER_LONG - 32)
+#define INDIRECT_BIT (0x80000000UL << ARCH_SHIFT)
+#define LEVEL_SHIFT (28 + ARCH_SHIFT)
+static inline pgoff_t first_indirect_block(void)
+{
+ return INDIRECT_BIT | (1ULL << LEVEL_SHIFT);
+}
+
+pgoff_t logfs_pack_index(u64 bix, level_t level)
+{
+ pgoff_t index;
+
+ BUG_ON(bix >= INDIRECT_BIT);
+ if (level == 0)
+ return bix;
+
+ index = INDIRECT_BIT;
+ index |= (__force long)level << LEVEL_SHIFT;
+ index |= bix >> ((__force u8)level * LOGFS_BLOCK_BITS);
+ return index;
+}
+
+void logfs_unpack_index(pgoff_t index, u64 *bix, level_t *level)
+{
+ u8 __level;
+
+ if (!(index & INDIRECT_BIT)) {
+ *bix = index;
+ *level = 0;
+ return;
+ }
+
+ __level = (index & ~INDIRECT_BIT) >> LEVEL_SHIFT;
+ *level = LEVEL(__level);
+ *bix = (index << (__level * LOGFS_BLOCK_BITS)) & ~INDIRECT_BIT;
+ *bix = adjust_bix(*bix, *level);
+ return;
+}
+#undef ARCH_SHIFT
+#undef INDIRECT_BIT
+#undef LEVEL_SHIFT
+
+/*
+ * Time is stored as nanoseconds since the epoch.
+ */
+static struct timespec be64_to_timespec(__be64 betime)
+{
+ return ns_to_timespec(be64_to_cpu(betime));
+}
+
+static __be64 timespec_to_be64(struct timespec tsp)
+{
+ return cpu_to_be64((u64)tsp.tv_sec * NSEC_PER_SEC + tsp.tv_nsec);
+}
+
+static void logfs_disk_to_inode(struct logfs_disk_inode *di, struct inode*inode)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ int i;
+
+ inode->i_mode = be16_to_cpu(di->di_mode);
+ li->li_height = di->di_height;
+ li->li_flags = be32_to_cpu(di->di_flags);
+ inode->i_uid = be32_to_cpu(di->di_uid);
+ inode->i_gid = be32_to_cpu(di->di_gid);
+ inode->i_size = be64_to_cpu(di->di_size);
+ logfs_set_blocks(inode, be64_to_cpu(di->di_used_bytes));
+ inode->i_atime = be64_to_timespec(di->di_atime);
+ inode->i_ctime = be64_to_timespec(di->di_ctime);
+ inode->i_mtime = be64_to_timespec(di->di_mtime);
+ inode->i_nlink = be32_to_cpu(di->di_refcount);
+ inode->i_generation = be32_to_cpu(di->di_generation);
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFSOCK: /* fall through */
+ case S_IFBLK: /* fall through */
+ case S_IFCHR: /* fall through */
+ case S_IFIFO:
+ inode->i_rdev = be64_to_cpu(di->di_data[0]);
+ break;
+ case S_IFDIR: /* fall through */
+ case S_IFREG: /* fall through */
+ case S_IFLNK:
+ for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
+ li->li_data[i] = be64_to_cpu(di->di_data[i]);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void logfs_inode_to_disk(struct inode *inode, struct logfs_disk_inode*di)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ int i;
+
+ di->di_mode = cpu_to_be16(inode->i_mode);
+ di->di_height = li->li_height;
+ di->di_pad = 0;
+ di->di_flags = cpu_to_be32(li->li_flags);
+ di->di_uid = cpu_to_be32(inode->i_uid);
+ di->di_gid = cpu_to_be32(inode->i_gid);
+ di->di_size = cpu_to_be64(i_size_read(inode));
+ di->di_used_bytes = cpu_to_be64(li->li_used_bytes);
+ di->di_atime = timespec_to_be64(inode->i_atime);
+ di->di_ctime = timespec_to_be64(inode->i_ctime);
+ di->di_mtime = timespec_to_be64(inode->i_mtime);
+ di->di_refcount = cpu_to_be32(inode->i_nlink);
+ di->di_generation = cpu_to_be32(inode->i_generation);
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFSOCK: /* fall through */
+ case S_IFBLK: /* fall through */
+ case S_IFCHR: /* fall through */
+ case S_IFIFO:
+ di->di_data[0] = cpu_to_be64(inode->i_rdev);
+ break;
+ case S_IFDIR: /* fall through */
+ case S_IFREG: /* fall through */
+ case S_IFLNK:
+ for (i = 0; i < LOGFS_EMBEDDED_FIELDS; i++)
+ di->di_data[i] = cpu_to_be64(li->li_data[i]);
+ break;
+ default:
+ BUG();
+ }
+}
+
+static void __logfs_set_blocks(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct logfs_inode *li = logfs_inode(inode);
+
+ inode->i_blocks = ULONG_MAX;
+ if (li->li_used_bytes >> sb->s_blocksize_bits < ULONG_MAX)
+ inode->i_blocks = ALIGN(li->li_used_bytes, 512) >> 9;
+}
+
+void logfs_set_blocks(struct inode *inode, u64 bytes)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+
+ li->li_used_bytes = bytes;
+ __logfs_set_blocks(inode);
+}
+
+static void prelock_page(struct super_block *sb, struct page *page, int lock)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ BUG_ON(!PageLocked(page));
+ if (lock) {
+ BUG_ON(PagePreLocked(page));
+ SetPagePreLocked(page);
+ } else {
+ /* We are in GC path. */
+ if (PagePreLocked(page))
+ super->s_lock_count++;
+ else
+ SetPagePreLocked(page);
+ }
+}
+
+static void preunlock_page(struct super_block *sb, struct page *page, int lock)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ BUG_ON(!PageLocked(page));
+ if (lock)
+ ClearPagePreLocked(page);
+ else {
+ /* We are in GC path. */
+ BUG_ON(!PagePreLocked(page));
+ if (super->s_lock_count)
+ super->s_lock_count--;
+ else
+ ClearPagePreLocked(page);
+ }
+}
+
+/*
+ * Logfs is prone to an AB-BA deadlock where one task tries to acquire
+ * s_write_mutex with a locked page and GC tries to get that page while holding
+ * s_write_mutex.
+ * To solve this issue logfs will ignore the page lock iff the page in question
+ * is waiting for s_write_mutex. We annotate this fact by setting PG_pre_locked
+ * in addition to PG_locked.
+ */
+static void logfs_get_wblocks(struct super_block *sb, struct page *page,
+ int lock)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ if (page)
+ prelock_page(sb, page, lock);
+
+ if (lock) {
+ mutex_lock(&super->s_write_mutex);
+ logfs_gc_pass(sb);
+ /* FIXME: We also have to check for shadowed space
+ * and mempool fill grade */
+ }
+}
+
+static void logfs_put_wblocks(struct super_block *sb, struct page *page,
+ int lock)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ if (page)
+ preunlock_page(sb, page, lock);
+ /* Order matters - we must clear PG_pre_locked before releasing
+ * s_write_mutex or we could race against another task. */
+ if (lock)
+ mutex_unlock(&super->s_write_mutex);
+}
+
+static struct page *logfs_get_read_page(struct inode *inode, u64 bix,
+ level_t level)
+{
+ return find_or_create_page(inode->i_mapping,
+ logfs_pack_index(bix, level), GFP_NOFS);
+}
+
+static void logfs_put_read_page(struct page *page)
+{
+ unlock_page(page);
+ page_cache_release(page);
+}
+
+static void logfs_lock_write_page(struct page *page)
+{
+ int loop = 0;
+
+ while (unlikely(!trylock_page(page))) {
+ if (loop++ > 0x1000) {
+ /* Has been observed once so far... */
+ printk(KERN_ERR "stack at %p\n", &loop);
+ BUG();
+ }
+ if (PagePreLocked(page)) {
+ /* Holder of page lock is waiting for us, it
+ * is safe to use this page. */
+ break;
+ }
+ /* Some other process has this page locked and has
+ * nothing to do with us. Wait for it to finish.
+ */
+ schedule();
+ }
+ BUG_ON(!PageLocked(page));
+}
+
+static struct page *logfs_get_write_page(struct inode *inode, u64 bix,
+ level_t level)
+{
+ struct address_space *mapping = inode->i_mapping;
+ pgoff_t index = logfs_pack_index(bix, level);
+ struct page *page;
+ int err;
+
+repeat:
+ page = find_get_page(mapping, index);
+ if (!page) {
+ page = __page_cache_alloc(GFP_NOFS);
+ if (!page)
+ return NULL;
+ err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS);
+ if (unlikely(err)) {
+ page_cache_release(page);
+ if (err == -EEXIST)
+ goto repeat;
+ return NULL;
+ }
+ } else logfs_lock_write_page(page);
+ BUG_ON(!PageLocked(page));
+ return page;
+}
+
+static void logfs_unlock_write_page(struct page *page)
+{
+ if (!PagePreLocked(page))
+ unlock_page(page);
+}
+
+static void logfs_put_write_page(struct page *page)
+{
+ logfs_unlock_write_page(page);
+ page_cache_release(page);
+}
+
+static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level,
+ int rw)
+{
+ if (rw == READ)
+ return logfs_get_read_page(inode, bix, level);
+ else
+ return logfs_get_write_page(inode, bix, level);
+}
+
+static void logfs_put_page(struct page *page, int rw)
+{
+ if (rw == READ)
+ logfs_put_read_page(page);
+ else
+ logfs_put_write_page(page);
+}
+
+static unsigned long __get_bits(u64 val, int skip, int no)
+{
+ u64 ret = val;
+
+ ret >>= skip * no;
+ ret <<= 64 - no;
+ ret >>= 64 - no;
+ return ret;
+}
+
+static unsigned long get_bits(u64 val, level_t skip)
+{
+ return __get_bits(val, (__force int)skip, LOGFS_BLOCK_BITS);
+}
+
+static inline void init_shadow_tree(struct super_block *sb,
+ struct shadow_tree *tree)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ btree_init_mempool64(&tree->new, super->s_btree_pool);
+ btree_init_mempool64(&tree->old, super->s_btree_pool);
+}
+
+static void indirect_write_block(struct logfs_block *block)
+{
+ struct page *page;
+ struct inode *inode;
+ int ret;
+
+ page = block->page;
+ inode = page->mapping->host;
+ logfs_lock_write_page(page);
+ ret = logfs_write_buf(inode, page, 0);
+ logfs_unlock_write_page(page);
+ /*
+ * This needs some rework. Unless you want your filesystem to run
+ * completely synchronously (you don't), the filesystem will always
+ * report writes as 'successful' before the actual work has been
+ * done. The actual work gets done here and this is where any errors
+ * will show up. And there isn't much we can do about it, really.
+ *
+ * Some attempts to fix the errors (move from bad blocks, retry io,...)
+ * have already been done, so anything left should be either a broken
+ * device or a bug somewhere in logfs itself. Being relatively new,
+ * the odds currently favor a bug, so for now the line below isn't
+ * entirely tasteles.
+ */
+ BUG_ON(ret);
+}
+
+static void inode_write_block(struct logfs_block *block)
+{
+ struct inode *inode;
+ int ret;
+
+ inode = block->inode;
+ if (inode->i_ino == LOGFS_INO_MASTER)
+ logfs_write_anchor(inode);
+ else {
+ ret = __logfs_write_inode(inode, 0);
+ /* see indirect_write_block comment */
+ BUG_ON(ret);
+ }
+}
+
+static gc_level_t inode_block_level(struct logfs_block *block)
+{
+ BUG_ON(block->inode->i_ino == LOGFS_INO_MASTER);
+ return GC_LEVEL(LOGFS_MAX_LEVELS);
+}
+
+static gc_level_t indirect_block_level(struct logfs_block *block)
+{
+ struct page *page;
+ struct inode *inode;
+ u64 bix;
+ level_t level;
+
+ page = block->page;
+ inode = page->mapping->host;
+ logfs_unpack_index(page->index, &bix, &level);
+ return expand_level(inode->i_ino, level);
+}
+
+/*
+ * This silences a false, yet annoying gcc warning. I hate it when my editor
+ * jumps into bitops.h each time I recompile this file.
+ * TODO: Complain to gcc folks about this and upgrade compiler.
+ */
+static unsigned long fnb(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
+
+static __be64 inode_val0(struct inode *inode)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ u64 val;
+
+ /*
+ * Explicit shifting generates good code, but must match the format
+ * of the structure. Add some paranoia just in case.
+ */
+ BUILD_BUG_ON(offsetof(struct logfs_disk_inode, di_mode) != 0);
+ BUILD_BUG_ON(offsetof(struct logfs_disk_inode, di_height) != 2);
+ BUILD_BUG_ON(offsetof(struct logfs_disk_inode, di_flags) != 4);
+
+ val = (u64)inode->i_mode << 48 |
+ (u64)li->li_height << 40 |
+ (u64)li->li_flags;
+ return cpu_to_be64(val);
+}
+
+static int inode_write_alias(struct super_block *sb,
+ struct logfs_block *block, write_alias_t *write_one_alias)
+{
+ struct inode *inode = block->inode;
+ struct logfs_inode *li = logfs_inode(inode);
+ unsigned long pos;
+ u64 ino , bix;
+ __be64 val;
+ level_t level;
+ int err;
+
+ for (pos = 0; ; pos++) {
+ pos = fnb(block->alias_map, LOGFS_BLOCK_FACTOR, pos);
+ if (pos >= LOGFS_EMBEDDED_FIELDS + INODE_POINTER_OFS)
+ return 0;
+
+ switch (pos) {
+ case INODE_HEIGHT_OFS:
+ val = inode_val0(inode);
+ break;
+ case INODE_USED_OFS:
+ val = cpu_to_be64(li->li_used_bytes);;
+ break;
+ case INODE_SIZE_OFS:
+ val = cpu_to_be64(i_size_read(inode));
+ break;
+ case INODE_POINTER_OFS ... INODE_POINTER_OFS + LOGFS_EMBEDDED_FIELDS - 1:
+ val = cpu_to_be64(li->li_data[pos - INODE_POINTER_OFS]);
+ break;
+ default:
+ BUG();
+ }
+
+ ino = LOGFS_INO_MASTER;
+ bix = inode->i_ino;
+ level = LEVEL(0);
+ err = write_one_alias(sb, ino, bix, level, pos, val);
+ if (err)
+ return err;
+ }
+}
+
+static int indirect_write_alias(struct super_block *sb,
+ struct logfs_block *block, write_alias_t *write_one_alias)
+{
+ unsigned long pos;
+ struct page *page = block->page;
+ u64 ino , bix;
+ __be64 *child, val;
+ level_t level;
+ int err;
+
+ for (pos = 0; ; pos++) {
+ pos = fnb(block->alias_map, LOGFS_BLOCK_FACTOR, pos);
+ if (pos >= LOGFS_BLOCK_FACTOR)
+ return 0;
+
+ ino = page->mapping->host->i_ino;
+ logfs_unpack_index(page->index, &bix, &level);
+ child = kmap_atomic(page, KM_USER0);
+ val = child[pos];
+ kunmap_atomic(child, KM_USER0);
+ err = write_one_alias(sb, ino, bix, level, pos, val);
+ if (err)
+ return err;
+ }
+}
+
+int logfs_write_obj_aliases_pagecache(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_block *block;
+ int err;
+
+ list_for_each_entry(block, &super->s_object_alias, alias_list) {
+ err = block->ops->write_alias(sb, block, write_alias_journal);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+void __free_block(struct super_block *sb, struct logfs_block *block)
+{
+ BUG_ON(!list_empty(&block->item_list));
+ list_del(&block->alias_list);
+ mempool_free(block, logfs_super(sb)->s_block_pool);
+}
+
+static void inode_free_block(struct super_block *sb, struct logfs_block *block)
+{
+ struct inode *inode = block->inode;
+
+ logfs_inode(inode)->li_block = NULL;
+ __free_block(sb, block);
+}
+
+static void indirect_free_block(struct super_block *sb,
+ struct logfs_block *block)
+{
+ ClearPagePrivate(block->page);
+ block->page->private = 0;
+ __free_block(sb, block);
+}
+
+
+static struct logfs_block_ops inode_block_ops = {
+ .write_block = inode_write_block,
+ .block_level = inode_block_level,
+ .free_block = inode_free_block,
+ .write_alias = inode_write_alias,
+};
+
+struct logfs_block_ops indirect_block_ops = {
+ .write_block = indirect_write_block,
+ .block_level = indirect_block_level,
+ .free_block = indirect_free_block,
+ .write_alias = indirect_write_alias,
+};
+
+struct logfs_block *__alloc_block(struct super_block *sb,
+ u64 ino, u64 bix, level_t level)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_block *block;
+
+ block = mempool_alloc(super->s_block_pool, GFP_NOFS);
+ memset(block, 0, sizeof(*block));
+ INIT_LIST_HEAD(&block->alias_list);
+ INIT_LIST_HEAD(&block->item_list);
+ block->sb = sb;
+ block->ino = ino;
+ block->bix = bix;
+ block->level = level;
+ return block;
+}
+
+static void alloc_inode_block(struct inode *inode)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ struct logfs_block *block;
+
+ if (li->li_block)
+ return;
+
+ block = __alloc_block(inode->i_sb, LOGFS_INO_MASTER, inode->i_ino, 0);
+ block->inode = inode;
+ li->li_block = block;
+ block->ops = &inode_block_ops;
+}
+
+void initialize_block_counters(struct page *page, struct logfs_block *block,
+ __be64 *array, int page_is_empty)
+{
+ u64 ptr;
+ int i, start;
+
+ block->partial = 0;
+ block->full = 0;
+ start = 0;
+ if (page->index < first_indirect_block()) {
+ /* Counters are pointless on level 0 */
+ return;
+ }
+ if (page->index == first_indirect_block()) {
+ /* Skip unused pointers */
+ start = I0_BLOCKS;
+ block->full = I0_BLOCKS;
+ }
+ if (!page_is_empty) {
+ for (i = start; i < LOGFS_BLOCK_FACTOR; i++) {
+ ptr = be64_to_cpu(array[i]);
+ if (ptr)
+ block->partial++;
+ if (ptr & LOGFS_FULLY_POPULATED)
+ block->full++;
+ }
+ }
+}
+
+static void alloc_data_block(struct inode *inode, struct page *page)
+{
+ struct logfs_block *block;
+ u64 bix;
+ level_t level;
+
+ if (PagePrivate(page))
+ return;
+
+ logfs_unpack_index(page->index, &bix, &level);
+ block = __alloc_block(inode->i_sb, inode->i_ino, bix, level);
+ block->page = page;
+ SetPagePrivate(page);
+ page->private = (unsigned long)block;
+ block->ops = &indirect_block_ops;
+}
+
+static void alloc_indirect_block(struct inode *inode, struct page *page,
+ int page_is_empty)
+{
+ struct logfs_block *block;
+ __be64 *array;
+
+ if (PagePrivate(page))
+ return;
+
+ alloc_data_block(inode, page);
+
+ block = logfs_block(page);
+ array = kmap_atomic(page, KM_USER0);
+ initialize_block_counters(page, block, array, page_is_empty);
+ kunmap_atomic(array, KM_USER0);
+}
+
+static void block_set_pointer(struct page *page, int index, u64 ptr)
+{
+ struct logfs_block *block = logfs_block(page);
+ __be64 *array;
+ u64 oldptr;
+
+ BUG_ON(!block);
+ array = kmap_atomic(page, KM_USER0);
+ oldptr = be64_to_cpu(array[index]);
+ array[index] = cpu_to_be64(ptr);
+ kunmap_atomic(array, KM_USER0);
+ SetPageUptodate(page);
+
+ block->full += !!(ptr & LOGFS_FULLY_POPULATED)
+ - !!(oldptr & LOGFS_FULLY_POPULATED);
+ block->partial += !!ptr - !!oldptr;
+}
+
+static u64 block_get_pointer(struct page *page, int index)
+{
+ __be64 *block;
+ u64 ptr;
+
+ block = kmap_atomic(page, KM_USER0);
+ ptr = be64_to_cpu(block[index]);
+ kunmap_atomic(block, KM_USER0);
+ return ptr;
+}
+
+static int logfs_read_empty(struct page *page)
+{
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ return 0;
+}
+
+static int logfs_read_direct(struct inode *inode, struct page *page)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ pgoff_t index = page->index;
+ u64 block;
+
+ block = li->li_data[index];
+ if (!block)
+ return logfs_read_empty(page);
+
+ return logfs_segment_read(inode, page, block, index, 0);
+}
+
+static int logfs_read_loop(struct inode *inode, struct page *page,
+ int rw_context)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ u64 bix, bofs = li->li_data[INDIRECT_INDEX];
+ level_t level, target_level;
+ int ret;
+ struct page *ipage;
+
+ logfs_unpack_index(page->index, &bix, &target_level);
+ if (!bofs)
+ return logfs_read_empty(page);
+
+ if (bix >= maxbix(li->li_height))
+ return logfs_read_empty(page);
+
+ for (level = LEVEL(li->li_height);
+ (__force u8)level > (__force u8)target_level;
+ level = SUBLEVEL(level)){
+ ipage = logfs_get_page(inode, bix, level, rw_context);
+ if (!ipage)
+ return -ENOMEM;
+
+ ret = logfs_segment_read(inode, ipage, bofs, bix, level);
+ if (ret) {
+ logfs_put_read_page(ipage);
+ return ret;
+ }
+
+ bofs = block_get_pointer(ipage, get_bits(bix, SUBLEVEL(level)));
+ logfs_put_page(ipage, rw_context);
+ if (!bofs)
+ return logfs_read_empty(page);
+ }
+
+ return logfs_segment_read(inode, page, bofs, bix, 0);
+}
+
+static int logfs_read_block(struct inode *inode, struct page *page,
+ int rw_context)
+{
+ pgoff_t index = page->index;
+
+ if (index < I0_BLOCKS)
+ return logfs_read_direct(inode, page);
+ return logfs_read_loop(inode, page, rw_context);
+}
+
+static int logfs_exist_loop(struct inode *inode, u64 bix)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ u64 bofs = li->li_data[INDIRECT_INDEX];
+ level_t level;
+ int ret;
+ struct page *ipage;
+
+ if (!bofs)
+ return 0;
+ if (bix >= maxbix(li->li_height))
+ return 0;
+
+ for (level = LEVEL(li->li_height); level != 0; level = SUBLEVEL(level)) {
+ ipage = logfs_get_read_page(inode, bix, level);
+ if (!ipage)
+ return -ENOMEM;
+
+ ret = logfs_segment_read(inode, ipage, bofs, bix, level);
+ if (ret) {
+ logfs_put_read_page(ipage);
+ return ret;
+ }
+
+ bofs = block_get_pointer(ipage, get_bits(bix, SUBLEVEL(level)));
+ logfs_put_read_page(ipage);
+ if (!bofs)
+ return 0;
+ }
+
+ return 1;
+}
+
+int logfs_exist_block(struct inode *inode, u64 bix)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+
+ if (bix < I0_BLOCKS)
+ return !!li->li_data[bix];
+ return logfs_exist_loop(inode, bix);
+}
+
+static u64 seek_holedata_direct(struct inode *inode, u64 bix, int data)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+
+ for (; bix < I0_BLOCKS; bix++)
+ if (data ^ (li->li_data[bix] == 0))
+ return bix;
+ return I0_BLOCKS;
+}
+
+static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ __be64 *rblock;
+ u64 increment, bofs = li->li_data[INDIRECT_INDEX];
+ level_t level;
+ int ret, slot;
+ struct page *page;
+
+ BUG_ON(!bofs);
+
+ for (level = LEVEL(li->li_height); level != 0; level = SUBLEVEL(level)) {
+ increment = 1 << (LOGFS_BLOCK_BITS * ((__force u8)level-1));
+ page = logfs_get_read_page(inode, bix, level);
+ if (!page)
+ return bix;
+
+ ret = logfs_segment_read(inode, page, bofs, bix, level);
+ if (ret) {
+ logfs_put_read_page(page);
+ return bix;
+ }
+
+ slot = get_bits(bix, SUBLEVEL(level));
+ rblock = kmap_atomic(page, KM_USER0);
+ while (slot < LOGFS_BLOCK_FACTOR) {
+ if (data && (rblock[slot] != 0))
+ break;
+ if (!data && !(be64_to_cpu(rblock[slot]) & LOGFS_FULLY_POPULATED))
+ break;
+ slot++;
+ bix += increment;
+ bix &= ~(increment - 1);
+ }
+ if (slot >= LOGFS_BLOCK_FACTOR) {
+ kunmap_atomic(rblock, KM_USER0);
+ logfs_put_read_page(page);
+ return bix;
+ }
+ bofs = be64_to_cpu(rblock[slot]);
+ kunmap_atomic(rblock, KM_USER0);
+ logfs_put_read_page(page);
+ if (!bofs) {
+ BUG_ON(data);
+ return bix;
+ }
+ }
+ return bix;
+}
+
+/**
+ * logfs_seek_hole - find next hole starting at a given block index
+ * @inode: inode to search in
+ * @bix: block index to start searching
+ *
+ * Returns next hole. If the file doesn't contain any further holes, the
+ * block address next to eof is returned instead.
+ */
+u64 logfs_seek_hole(struct inode *inode, u64 bix)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+
+ if (bix < I0_BLOCKS) {
+ bix = seek_holedata_direct(inode, bix, 0);
+ if (bix < I0_BLOCKS)
+ return bix;
+ }
+
+ if (!li->li_data[INDIRECT_INDEX])
+ return bix;
+ else if (li->li_data[INDIRECT_INDEX] & LOGFS_FULLY_POPULATED)
+ bix = maxbix(li->li_height);
+ else {
+ bix = seek_holedata_loop(inode, bix, 0);
+ if (bix < maxbix(li->li_height))
+ return bix;
+ /* Should not happen anymore. But if some port writes semi-
+ * corrupt images (as this one used to) we might run into it.
+ */
+ WARN_ON_ONCE(bix == maxbix(li->li_height));
+ }
+
+ return bix;
+}
+
+static u64 __logfs_seek_data(struct inode *inode, u64 bix)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+
+ if (bix < I0_BLOCKS) {
+ bix = seek_holedata_direct(inode, bix, 1);
+ if (bix < I0_BLOCKS)
+ return bix;
+ }
+
+ if (bix < maxbix(li->li_height)) {
+ if (!li->li_data[INDIRECT_INDEX])
+ bix = maxbix(li->li_height);
+ else
+ return seek_holedata_loop(inode, bix, 1);
+ }
+
+ return bix;
+}
+
+/**
+ * logfs_seek_data - find next data block after a given block index
+ * @inode: inode to search in
+ * @bix: block index to start searching
+ *
+ * Returns next data block. If the file doesn't contain any further data
+ * blocks, the last block in the file is returned instead.
+ */
+u64 logfs_seek_data(struct inode *inode, u64 bix)
+{
+ struct super_block *sb = inode->i_sb;
+ u64 ret, end;
+
+ ret = __logfs_seek_data(inode, bix);
+ end = i_size_read(inode) >> sb->s_blocksize_bits;
+ if (ret >= end)
+ ret = max(bix, end);
+ return ret;
+}
+
+static int logfs_is_valid_direct(struct logfs_inode *li, u64 bix, u64 ofs)
+{
+ return pure_ofs(li->li_data[bix]) == ofs;
+}
+
+static int __logfs_is_valid_loop(struct inode *inode, u64 bix,
+ u64 ofs, u64 bofs)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ level_t level;
+ int ret;
+ struct page *page;
+
+ for (level = LEVEL(li->li_height); level != 0; level = SUBLEVEL(level)){
+ page = logfs_get_write_page(inode, bix, level);
+ BUG_ON(!page);
+
+ ret = logfs_segment_read(inode, page, bofs, bix, level);
+ if (ret) {
+ logfs_put_write_page(page);
+ return 0;
+ }
+
+ bofs = block_get_pointer(page, get_bits(bix, SUBLEVEL(level)));
+ logfs_put_write_page(page);
+ if (!bofs)
+ return 0;
+
+ if (pure_ofs(bofs) == ofs)
+ return 1;
+ }
+ return 0;
+}
+
+static int logfs_is_valid_loop(struct inode *inode, u64 bix, u64 ofs)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ u64 bofs = li->li_data[INDIRECT_INDEX];
+
+ if (!bofs)
+ return 0;
+
+ if (bix >= maxbix(li->li_height))
+ return 0;
+
+ if (pure_ofs(bofs) == ofs)
+ return 1;
+
+ return __logfs_is_valid_loop(inode, bix, ofs, bofs);
+}
+
+static int __logfs_is_valid_block(struct inode *inode, u64 bix, u64 ofs)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+
+ if ((inode->i_nlink == 0) && atomic_read(&inode->i_count) == 1)
+ return 0;
+
+ if (bix < I0_BLOCKS)
+ return logfs_is_valid_direct(li, bix, ofs);
+ return logfs_is_valid_loop(inode, bix, ofs);
+}
+
+/**
+ * logfs_is_valid_block - check whether this block is still valid
+ *
+ * @sb - superblock
+ * @ofs - block physical offset
+ * @ino - block inode number
+ * @bix - block index
+ * @level - block level
+ *
+ * Returns 0 if the block is invalid, 1 if it is valid and 2 if it will
+ * become invalid once the journal is written.
+ */
+int logfs_is_valid_block(struct super_block *sb, u64 ofs, u64 ino, u64 bix,
+ gc_level_t gc_level)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct inode *inode;
+ int ret, cookie;
+
+ /* Umount closes a segment with free blocks remaining. Those
+ * blocks are by definition invalid. */
+ if (ino == -1)
+ return 0;
+
+ LOGFS_BUG_ON((u64)(u_long)ino != ino, sb);
+
+ inode = logfs_safe_iget(sb, ino, &cookie);
+ if (IS_ERR(inode))
+ goto invalid;
+
+ ret = __logfs_is_valid_block(inode, bix, ofs);
+ logfs_safe_iput(inode, cookie);
+ if (ret)
+ return ret;
+
+invalid:
+ /* Block is nominally invalid, but may still sit in the shadow tree,
+ * waiting for a journal commit.
+ */
+ if (btree_lookup64(&super->s_shadow_tree.old, ofs))
+ return 2;
+ return 0;
+}
+
+int logfs_readpage_nolock(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ int ret = -EIO;
+
+ ret = logfs_read_block(inode, page, READ);
+
+ if (ret) {
+ ClearPageUptodate(page);
+ SetPageError(page);
+ } else {
+ SetPageUptodate(page);
+ ClearPageError(page);
+ }
+ flush_dcache_page(page);
+
+ return ret;
+}
+
+static int logfs_reserve_bytes(struct inode *inode, int bytes)
+{
+ struct logfs_super *super = logfs_super(inode->i_sb);
+ u64 available = super->s_free_bytes + super->s_dirty_free_bytes
+ - super->s_dirty_used_bytes - super->s_dirty_pages;
+
+ if (!bytes)
+ return 0;
+
+ if (available < bytes)
+ return -ENOSPC;
+
+ if (available < bytes + super->s_root_reserve &&
+ !capable(CAP_SYS_RESOURCE))
+ return -ENOSPC;
+
+ return 0;
+}
+
+int get_page_reserve(struct inode *inode, struct page *page)
+{
+ struct logfs_super *super = logfs_super(inode->i_sb);
+ int ret;
+
+ if (logfs_block(page) && logfs_block(page)->reserved_bytes)
+ return 0;
+
+ logfs_get_wblocks(inode->i_sb, page, WF_LOCK);
+ ret = logfs_reserve_bytes(inode, 6 * LOGFS_MAX_OBJECTSIZE);
+ if (!ret) {
+ alloc_data_block(inode, page);
+ logfs_block(page)->reserved_bytes += 6 * LOGFS_MAX_OBJECTSIZE;
+ super->s_dirty_pages += 6 * LOGFS_MAX_OBJECTSIZE;
+ }
+ logfs_put_wblocks(inode->i_sb, page, WF_LOCK);
+ return ret;
+}
+
+/*
+ * We are protected by write lock. Push victims up to superblock level
+ * and release transaction when appropriate.
+ */
+/* FIXME: This is currently called from the wrong spots. */
+static void logfs_handle_transaction(struct inode *inode,
+ struct logfs_transaction *ta)
+{
+ struct logfs_super *super = logfs_super(inode->i_sb);
+
+ if (!ta)
+ return;
+ logfs_inode(inode)->li_block->ta = NULL;
+
+ if (inode->i_ino != LOGFS_INO_MASTER) {
+ BUG(); /* FIXME: Yes, this needs more thought */
+ /* just remember the transaction until inode is written */
+ //BUG_ON(logfs_inode(inode)->li_transaction);
+ //logfs_inode(inode)->li_transaction = ta;
+ return;
+ }
+
+ switch (ta->state) {
+ case CREATE_1: /* fall through */
+ case UNLINK_1:
+ BUG_ON(super->s_victim_ino);
+ super->s_victim_ino = ta->ino;
+ break;
+ case CREATE_2: /* fall through */
+ case UNLINK_2:
+ BUG_ON(super->s_victim_ino != ta->ino);
+ super->s_victim_ino = 0;
+ /* transaction ends here - free it */
+ kfree(ta);
+ break;
+ case CROSS_RENAME_1:
+ BUG_ON(super->s_rename_dir);
+ BUG_ON(super->s_rename_pos);
+ super->s_rename_dir = ta->dir;
+ super->s_rename_pos = ta->pos;
+ break;
+ case CROSS_RENAME_2:
+ BUG_ON(super->s_rename_dir != ta->dir);
+ BUG_ON(super->s_rename_pos != ta->pos);
+ super->s_rename_dir = 0;
+ super->s_rename_pos = 0;
+ kfree(ta);
+ break;
+ case TARGET_RENAME_1:
+ BUG_ON(super->s_rename_dir);
+ BUG_ON(super->s_rename_pos);
+ BUG_ON(super->s_victim_ino);
+ super->s_rename_dir = ta->dir;
+ super->s_rename_pos = ta->pos;
+ super->s_victim_ino = ta->ino;
+ break;
+ case TARGET_RENAME_2:
+ BUG_ON(super->s_rename_dir != ta->dir);
+ BUG_ON(super->s_rename_pos != ta->pos);
+ BUG_ON(super->s_victim_ino != ta->ino);
+ super->s_rename_dir = 0;
+ super->s_rename_pos = 0;
+ break;
+ case TARGET_RENAME_3:
+ BUG_ON(super->s_rename_dir);
+ BUG_ON(super->s_rename_pos);
+ BUG_ON(super->s_victim_ino != ta->ino);
+ super->s_victim_ino = 0;
+ kfree(ta);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/*
+ * Not strictly a reservation, but rather a check that we still have enough
+ * space to satisfy the write.
+ */
+static int logfs_reserve_blocks(struct inode *inode, int blocks)
+{
+ return logfs_reserve_bytes(inode, blocks * LOGFS_MAX_OBJECTSIZE);
+}
+
+struct write_control {
+ u64 ofs;
+ long flags;
+};
+
+static struct logfs_shadow *alloc_shadow(struct inode *inode, u64 bix,
+ level_t level, u64 old_ofs)
+{
+ struct logfs_super *super = logfs_super(inode->i_sb);
+ struct logfs_shadow *shadow;
+
+ shadow = mempool_alloc(super->s_shadow_pool, GFP_NOFS);
+ memset(shadow, 0, sizeof(*shadow));
+ shadow->ino = inode->i_ino;
+ shadow->bix = bix;
+ shadow->gc_level = expand_level(inode->i_ino, level);
+ shadow->old_ofs = old_ofs & ~LOGFS_FULLY_POPULATED;
+ return shadow;
+}
+
+static void free_shadow(struct inode *inode, struct logfs_shadow *shadow)
+{
+ struct logfs_super *super = logfs_super(inode->i_sb);
+
+ mempool_free(shadow, super->s_shadow_pool);
+}
+
+/**
+ * fill_shadow_tree - Propagate shadow tree changes due to a write
+ * @inode: Inode owning the page
+ * @page: Struct page that was written
+ * @shadow: Shadow for the current write
+ *
+ * Writes in logfs can result in two semi-valid objects. The old object
+ * is still valid as long as it can be reached by following pointers on
+ * the medium. Only when writes propagate all the way up to the journal
+ * has the new object safely replaced the old one.
+ *
+ * To handle this problem, a struct logfs_shadow is used to represent
+ * every single write. It is attached to the indirect block, which is
+ * marked dirty. When the indirect block is written, its shadows are
+ * handed up to the next indirect block (or inode). Untimately they
+ * will reach the master inode and be freed upon journal commit.
+ *
+ * This function handles a single step in the propagation. It adds the
+ * shadow for the current write to the tree, along with any shadows in
+ * the page's tree, in case it was an indirect block. If a page is
+ * written, the inode parameter is left NULL, if an inode is written,
+ * the page parameter is left NULL.
+ */
+static void fill_shadow_tree(struct inode *inode, struct page *page,
+ struct logfs_shadow *shadow)
+{
+ struct logfs_super *super = logfs_super(inode->i_sb);
+ struct logfs_block *block = logfs_block(page);
+ struct shadow_tree *tree = &super->s_shadow_tree;
+
+ if (PagePrivate(page)) {
+ if (block->alias_map)
+ super->s_no_object_aliases -= bitmap_weight(
+ block->alias_map, LOGFS_BLOCK_FACTOR);
+ logfs_handle_transaction(inode, block->ta);
+ block->ops->free_block(inode->i_sb, block);
+ }
+ if (shadow) {
+ if (shadow->old_ofs)
+ btree_insert64(&tree->old, shadow->old_ofs, shadow,
+ GFP_NOFS);
+ else
+ btree_insert64(&tree->new, shadow->new_ofs, shadow,
+ GFP_NOFS);
+
+ super->s_dirty_used_bytes += shadow->new_len;
+ super->s_dirty_free_bytes += shadow->old_len;
+ }
+}
+
+static void logfs_set_alias(struct super_block *sb, struct logfs_block *block,
+ long child_no)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ if (block->inode && block->inode->i_ino == LOGFS_INO_MASTER) {
+ /* Aliases in the master inode are pointless. */
+ return;
+ }
+
+ if (!test_bit(child_no, block->alias_map)) {
+ set_bit(child_no, block->alias_map);
+ super->s_no_object_aliases++;
+ }
+ list_move_tail(&block->alias_list, &super->s_object_alias);
+}
+
+/*
+ * Object aliases can and often do change the size and occupied space of a
+ * file. So not only do we have to change the pointers, we also have to
+ * change inode->i_size and li->li_used_bytes. Which is done by setting
+ * another two object aliases for the inode itself.
+ */
+static void set_iused(struct inode *inode, struct logfs_shadow *shadow)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+
+ if (shadow->new_len == shadow->old_len)
+ return;
+
+ alloc_inode_block(inode);
+ li->li_used_bytes += shadow->new_len - shadow->old_len;
+ __logfs_set_blocks(inode);
+ logfs_set_alias(inode->i_sb, li->li_block, INODE_USED_OFS);
+ logfs_set_alias(inode->i_sb, li->li_block, INODE_SIZE_OFS);
+}
+
+static int logfs_write_i0(struct inode *inode, struct page *page,
+ struct write_control *wc)
+{
+ struct logfs_shadow *shadow;
+ u64 bix;
+ level_t level;
+ int full, err = 0;
+
+ logfs_unpack_index(page->index, &bix, &level);
+ if (wc->ofs == 0)
+ if (logfs_reserve_blocks(inode, 1))
+ return -ENOSPC;
+
+ shadow = alloc_shadow(inode, bix, level, wc->ofs);
+ if (wc->flags & WF_WRITE)
+ err = logfs_segment_write(inode, page, shadow);
+ if (wc->flags & WF_DELETE)
+ logfs_segment_delete(inode, shadow);
+ if (err) {
+ free_shadow(inode, shadow);
+ return err;
+ }
+
+ set_iused(inode, shadow);
+ full = 1;
+ if (level != 0) {
+ alloc_indirect_block(inode, page, 0);
+ full = logfs_block(page)->full == LOGFS_BLOCK_FACTOR;
+ }
+ fill_shadow_tree(inode, page, shadow);
+ wc->ofs = shadow->new_ofs;
+ if (wc->ofs && full)
+ wc->ofs |= LOGFS_FULLY_POPULATED;
+ return 0;
+}
+
+static int logfs_write_direct(struct inode *inode, struct page *page,
+ long flags)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ struct write_control wc = {
+ .ofs = li->li_data[page->index],
+ .flags = flags,
+ };
+ int err;
+
+ alloc_inode_block(inode);
+
+ err = logfs_write_i0(inode, page, &wc);
+ if (err)
+ return err;
+
+ li->li_data[page->index] = wc.ofs;
+ logfs_set_alias(inode->i_sb, li->li_block,
+ page->index + INODE_POINTER_OFS);
+ return 0;
+}
+
+static int ptr_change(u64 ofs, struct page *page)
+{
+ struct logfs_block *block = logfs_block(page);
+ int empty0, empty1, full0, full1;
+
+ empty0 = ofs == 0;
+ empty1 = block->partial == 0;
+ if (empty0 != empty1)
+ return 1;
+
+ /* The !! is necessary to shrink result to int */
+ full0 = !!(ofs & LOGFS_FULLY_POPULATED);
+ full1 = block->full == LOGFS_BLOCK_FACTOR;
+ if (full0 != full1)
+ return 1;
+ return 0;
+}
+
+static int __logfs_write_rec(struct inode *inode, struct page *page,
+ struct write_control *this_wc,
+ pgoff_t bix, level_t target_level, level_t level)
+{
+ int ret, page_empty = 0;
+ int child_no = get_bits(bix, SUBLEVEL(level));
+ struct page *ipage;
+ struct write_control child_wc = {
+ .flags = this_wc->flags,
+ };
+
+ ipage = logfs_get_write_page(inode, bix, level);
+ if (!ipage)
+ return -ENOMEM;
+
+ if (this_wc->ofs) {
+ ret = logfs_segment_read(inode, ipage, this_wc->ofs, bix, level);
+ if (ret)
+ goto out;
+ } else if (!PageUptodate(ipage)) {
+ page_empty = 1;
+ logfs_read_empty(ipage);
+ }
+
+ child_wc.ofs = block_get_pointer(ipage, child_no);
+
+ if ((__force u8)level-1 > (__force u8)target_level)
+ ret = __logfs_write_rec(inode, page, &child_wc, bix,
+ target_level, SUBLEVEL(level));
+ else
+ ret = logfs_write_i0(inode, page, &child_wc);
+
+ if (ret)
+ goto out;
+
+ alloc_indirect_block(inode, ipage, page_empty);
+ block_set_pointer(ipage, child_no, child_wc.ofs);
+ /* FIXME: first condition seems superfluous */
+ if (child_wc.ofs || logfs_block(ipage)->partial)
+ this_wc->flags |= WF_WRITE;
+ /* the condition on this_wc->ofs ensures that we won't consume extra
+ * space for indirect blocks in the future, which we cannot reserve */
+ if (!this_wc->ofs || ptr_change(this_wc->ofs, ipage))
+ ret = logfs_write_i0(inode, ipage, this_wc);
+ else
+ logfs_set_alias(inode->i_sb, logfs_block(ipage), child_no);
+out:
+ logfs_put_write_page(ipage);
+ return ret;
+}
+
+static int logfs_write_rec(struct inode *inode, struct page *page,
+ pgoff_t bix, level_t target_level, long flags)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ struct write_control wc = {
+ .ofs = li->li_data[INDIRECT_INDEX],
+ .flags = flags,
+ };
+ int ret;
+
+ alloc_inode_block(inode);
+
+ if (li->li_height > (__force u8)target_level)
+ ret = __logfs_write_rec(inode, page, &wc, bix, target_level,
+ LEVEL(li->li_height));
+ else
+ ret = logfs_write_i0(inode, page, &wc);
+ if (ret)
+ return ret;
+
+ if (li->li_data[INDIRECT_INDEX] != wc.ofs) {
+ li->li_data[INDIRECT_INDEX] = wc.ofs;
+ logfs_set_alias(inode->i_sb, li->li_block,
+ INDIRECT_INDEX + INODE_POINTER_OFS);
+ }
+ return ret;
+}
+
+void logfs_add_transaction(struct inode *inode, struct logfs_transaction *ta)
+{
+ alloc_inode_block(inode);
+ logfs_inode(inode)->li_block->ta = ta;
+}
+
+void logfs_del_transaction(struct inode *inode, struct logfs_transaction *ta)
+{
+ struct logfs_block *block = logfs_inode(inode)->li_block;
+
+ if (block && block->ta)
+ block->ta = NULL;
+}
+
+static int grow_inode(struct inode *inode, u64 bix, level_t level)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ u8 height = (__force u8)level;
+ struct page *page;
+ struct write_control wc = {
+ .flags = WF_WRITE,
+ };
+ int err;
+
+ BUG_ON(height > 5 || li->li_height > 5);
+ while (height > li->li_height || bix >= maxbix(li->li_height)) {
+ page = logfs_get_write_page(inode, I0_BLOCKS + 1,
+ LEVEL(li->li_height + 1));
+ if (!page)
+ return -ENOMEM;
+ logfs_read_empty(page);
+ alloc_indirect_block(inode, page, 1);
+ block_set_pointer(page, 0, li->li_data[INDIRECT_INDEX]);
+ err = logfs_write_i0(inode, page, &wc);
+ logfs_put_write_page(page);
+ if (err)
+ return err;
+ li->li_data[INDIRECT_INDEX] = wc.ofs;
+ wc.ofs = 0;
+ li->li_height++;
+ logfs_set_alias(inode->i_sb, li->li_block, INODE_HEIGHT_OFS);
+ }
+ return 0;
+}
+
+static int __logfs_write_buf(struct inode *inode, struct page *page, long flags)
+{
+ struct logfs_super *super = logfs_super(inode->i_sb);
+ pgoff_t index = page->index;
+ u64 bix;
+ level_t level;
+ int err;
+
+ flags |= WF_WRITE | WF_DELETE;
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+
+ logfs_unpack_index(index, &bix, &level);
+ if (logfs_block(page) && logfs_block(page)->reserved_bytes)
+ super->s_dirty_pages -= logfs_block(page)->reserved_bytes;
+
+ if (index < I0_BLOCKS)
+ return logfs_write_direct(inode, page, flags);
+
+ bix = adjust_bix(bix, level);
+ err = grow_inode(inode, bix, level);
+ if (err)
+ return err;
+ return logfs_write_rec(inode, page, bix, level, flags);
+}
+
+int logfs_write_buf(struct inode *inode, struct page *page, long flags)
+{
+ struct super_block *sb = inode->i_sb;
+ int ret;
+
+ logfs_get_wblocks(sb, page, flags & WF_LOCK);
+ ret = __logfs_write_buf(inode, page, flags);
+ logfs_put_wblocks(sb, page, flags & WF_LOCK);
+ return ret;
+}
+
+static int __logfs_delete(struct inode *inode, struct page *page)
+{
+ long flags = WF_DELETE;
+
+ inode->i_ctime = inode->i_mtime = CURRENT_TIME;
+
+ if (page->index < I0_BLOCKS)
+ return logfs_write_direct(inode, page, flags);
+ return logfs_write_rec(inode, page, page->index, 0, flags);
+}
+
+int logfs_delete(struct inode *inode, pgoff_t index,
+ struct shadow_tree *shadow_tree)
+{
+ struct super_block *sb = inode->i_sb;
+ struct page *page;
+ int ret;
+
+ page = logfs_get_read_page(inode, index, 0);
+ if (!page)
+ return -ENOMEM;
+
+ logfs_get_wblocks(sb, page, 1);
+ ret = __logfs_delete(inode, page);
+ logfs_put_wblocks(sb, page, 1);
+
+ logfs_put_read_page(page);
+
+ return ret;
+}
+
+/* Rewrite cannot mark the inode dirty but has to write it immediatly. */
+int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
+ gc_level_t gc_level, long flags)
+{
+ level_t level = shrink_level(gc_level);
+ struct page *page;
+ int err;
+
+ page = logfs_get_write_page(inode, bix, level);
+ if (!page)
+ return -ENOMEM;
+
+ err = logfs_segment_read(inode, page, ofs, bix, level);
+ if (!err) {
+ if (level != 0)
+ alloc_indirect_block(inode, page, 0);
+ err = logfs_write_buf(inode, page, flags);
+ }
+ logfs_put_write_page(page);
+ return err;
+}
+
+static int truncate_data_block(struct inode *inode, struct page *page,
+ u64 ofs, struct logfs_shadow *shadow, u64 size)
+{
+ loff_t pageofs = page->index << inode->i_sb->s_blocksize_bits;
+ u64 bix;
+ level_t level;
+ int err;
+
+ /* Does truncation happen within this page? */
+ if (size <= pageofs || size - pageofs >= PAGE_SIZE)
+ return 0;
+
+ logfs_unpack_index(page->index, &bix, &level);
+ BUG_ON(level != 0);
+
+ err = logfs_segment_read(inode, page, ofs, bix, level);
+ if (err)
+ return err;
+
+ zero_user_segment(page, size - pageofs, PAGE_CACHE_SIZE);
+ return logfs_segment_write(inode, page, shadow);
+}
+
+static int logfs_truncate_i0(struct inode *inode, struct page *page,
+ struct write_control *wc, u64 size)
+{
+ struct logfs_shadow *shadow;
+ u64 bix;
+ level_t level;
+ int err = 0;
+
+ logfs_unpack_index(page->index, &bix, &level);
+ BUG_ON(level != 0);
+ shadow = alloc_shadow(inode, bix, level, wc->ofs);
+
+ err = truncate_data_block(inode, page, wc->ofs, shadow, size);
+ if (err) {
+ free_shadow(inode, shadow);
+ return err;
+ }
+
+ logfs_segment_delete(inode, shadow);
+ set_iused(inode, shadow);
+ fill_shadow_tree(inode, page, shadow);
+ wc->ofs = shadow->new_ofs;
+ return 0;
+}
+
+static int logfs_truncate_direct(struct inode *inode, u64 size)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ struct write_control wc;
+ struct page *page;
+ int e;
+ int err;
+
+ alloc_inode_block(inode);
+
+ for (e = I0_BLOCKS - 1; e >= 0; e--) {
+ if (size > (e+1) * LOGFS_BLOCKSIZE)
+ break;
+
+ wc.ofs = li->li_data[e];
+ if (!wc.ofs)
+ continue;
+
+ page = logfs_get_write_page(inode, e, 0);
+ if (!page)
+ return -ENOMEM;
+ err = logfs_segment_read(inode, page, wc.ofs, e, 0);
+ if (err) {
+ logfs_put_write_page(page);
+ return err;
+ }
+ err = logfs_truncate_i0(inode, page, &wc, size);
+ logfs_put_write_page(page);
+ if (err)
+ return err;
+
+ li->li_data[e] = wc.ofs;
+ }
+ return 0;
+}
+
+/* FIXME: these need to become per-sb once we support different blocksizes */
+static u64 __logfs_step[] = {
+ 1,
+ I1_BLOCKS,
+ I2_BLOCKS,
+ I3_BLOCKS,
+};
+
+static u64 __logfs_start_index[] = {
+ I0_BLOCKS,
+ I1_BLOCKS,
+ I2_BLOCKS,
+ I3_BLOCKS
+};
+
+static inline u64 logfs_step(level_t level)
+{
+ return __logfs_step[(__force u8)level];
+}
+
+static inline u64 logfs_factor(u8 level)
+{
+ return __logfs_step[level] * LOGFS_BLOCKSIZE;
+}
+
+static inline u64 logfs_start_index(level_t level)
+{
+ return __logfs_start_index[(__force u8)level];
+}
+
+static void logfs_unpack_raw_index(pgoff_t index, u64 *bix, level_t *level)
+{
+ logfs_unpack_index(index, bix, level);
+ if (*bix <= logfs_start_index(SUBLEVEL(*level)))
+ *bix = 0;
+}
+
+static int __logfs_truncate_rec(struct inode *inode, struct page *ipage,
+ struct write_control *this_wc, u64 size)
+{
+ int truncate_happened = 0;
+ int e, err = 0;
+ u64 bix, child_bix, next_bix;
+ level_t level;
+ struct page *page;
+ struct write_control child_wc = { /* FIXME: flags */ };
+
+ logfs_unpack_raw_index(ipage->index, &bix, &level);
+ err = logfs_segment_read(inode, ipage, this_wc->ofs, bix, level);
+ if (err)
+ return err;
+
+ for (e = LOGFS_BLOCK_FACTOR - 1; e >= 0; e--) {
+ child_bix = bix + e * logfs_step(SUBLEVEL(level));
+ next_bix = child_bix + logfs_step(SUBLEVEL(level));
+ if (size > next_bix * LOGFS_BLOCKSIZE)
+ break;
+
+ child_wc.ofs = pure_ofs(block_get_pointer(ipage, e));
+ if (!child_wc.ofs)
+ continue;
+
+ page = logfs_get_write_page(inode, child_bix, SUBLEVEL(level));
+ if (!page)
+ return -ENOMEM;
+
+ if ((__force u8)level > 1)
+ err = __logfs_truncate_rec(inode, page, &child_wc, size);
+ else
+ err = logfs_truncate_i0(inode, page, &child_wc, size);
+ logfs_put_write_page(page);
+ if (err)
+ return err;
+
+ truncate_happened = 1;
+ alloc_indirect_block(inode, ipage, 0);
+ block_set_pointer(ipage, e, child_wc.ofs);
+ }
+
+ if (!truncate_happened) {
+ printk("ineffectual truncate (%lx, %lx, %llx)\n", inode->i_ino, ipage->index, size);
+ return 0;
+ }
+
+ this_wc->flags = WF_DELETE;
+ if (logfs_block(ipage)->partial)
+ this_wc->flags |= WF_WRITE;
+
+ return logfs_write_i0(inode, ipage, this_wc);
+}
+
+static int logfs_truncate_rec(struct inode *inode, u64 size)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ struct write_control wc = {
+ .ofs = li->li_data[INDIRECT_INDEX],
+ };
+ struct page *page;
+ int err;
+
+ alloc_inode_block(inode);
+
+ if (!wc.ofs)
+ return 0;
+
+ page = logfs_get_write_page(inode, 0, LEVEL(li->li_height));
+ if (!page)
+ return -ENOMEM;
+
+ err = __logfs_truncate_rec(inode, page, &wc, size);
+ logfs_put_write_page(page);
+ if (err)
+ return err;
+
+ if (li->li_data[INDIRECT_INDEX] != wc.ofs)
+ li->li_data[INDIRECT_INDEX] = wc.ofs;
+ return 0;
+}
+
+static int __logfs_truncate(struct inode *inode, u64 size)
+{
+ int ret;
+
+ if (size >= logfs_factor(logfs_inode(inode)->li_height))
+ return 0;
+
+ ret = logfs_truncate_rec(inode, size);
+ if (ret)
+ return ret;
+
+ return logfs_truncate_direct(inode, size);
+}
+
+int logfs_truncate(struct inode *inode, u64 size)
+{
+ struct super_block *sb = inode->i_sb;
+ int err;
+
+ logfs_get_wblocks(sb, NULL, 1);
+ err = __logfs_truncate(inode, size);
+ if (!err)
+ err = __logfs_write_inode(inode, 0);
+ logfs_put_wblocks(sb, NULL, 1);
+
+ if (!err)
+ err = vmtruncate(inode, size);
+
+ /* I don't trust error recovery yet. */
+ WARN_ON(err);
+ return err;
+}
+
+static void move_page_to_inode(struct inode *inode, struct page *page)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ struct logfs_block *block = logfs_block(page);
+
+ if (!block)
+ return;
+
+ log_blockmove("move_page_to_inode(%llx, %llx, %x)\n",
+ block->ino, block->bix, block->level);
+ BUG_ON(li->li_block);
+ block->ops = &inode_block_ops;
+ block->inode = inode;
+ li->li_block = block;
+
+ block->page = NULL;
+ page->private = 0;
+ ClearPagePrivate(page);
+}
+
+static void move_inode_to_page(struct page *page, struct inode *inode)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+ struct logfs_block *block = li->li_block;
+
+ if (!block)
+ return;
+
+ log_blockmove("move_inode_to_page(%llx, %llx, %x)\n",
+ block->ino, block->bix, block->level);
+ BUG_ON(PagePrivate(page));
+ block->ops = &indirect_block_ops;
+ block->page = page;
+ page->private = (unsigned long)block;
+ SetPagePrivate(page);
+
+ block->inode = NULL;
+ li->li_block = NULL;
+}
+
+int logfs_read_inode(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct logfs_super *super = logfs_super(sb);
+ struct inode *master_inode = super->s_master_inode;
+ struct page *page;
+ struct logfs_disk_inode *di;
+ u64 ino = inode->i_ino;
+
+ if (ino << sb->s_blocksize_bits > i_size_read(master_inode))
+ return -ENODATA;
+ if (!logfs_exist_block(master_inode, ino))
+ return -ENODATA;
+
+ page = read_cache_page(master_inode->i_mapping, ino,
+ (filler_t *)logfs_readpage, NULL);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ di = kmap_atomic(page, KM_USER0);
+ logfs_disk_to_inode(di, inode);
+ kunmap_atomic(di, KM_USER0);
+ move_page_to_inode(inode, page);
+ page_cache_release(page);
+ return 0;
+}
+
+/* Caller must logfs_put_write_page(page); */
+static struct page *inode_to_page(struct inode *inode)
+{
+ struct inode *master_inode = logfs_super(inode->i_sb)->s_master_inode;
+ struct logfs_disk_inode *di;
+ struct page *page;
+
+ BUG_ON(inode->i_ino == LOGFS_INO_MASTER);
+
+ page = logfs_get_write_page(master_inode, inode->i_ino, 0);
+ if (!page)
+ return NULL;
+
+ di = kmap_atomic(page, KM_USER0);
+ logfs_inode_to_disk(inode, di);
+ kunmap_atomic(di, KM_USER0);
+ move_inode_to_page(page, inode);
+ return page;
+}
+
+/* Cheaper version of write_inode. All changes are concealed in
+ * aliases, which are moved back. No write to the medium happens.
+ */
+void logfs_clear_inode(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct logfs_inode *li = logfs_inode(inode);
+ struct logfs_block *block = li->li_block;
+ struct page *page;
+
+ /* Only deleted files may be dirty at this point */
+ BUG_ON(inode->i_state & I_DIRTY && inode->i_nlink);
+ if (!block)
+ return;
+ if ((logfs_super(sb)->s_flags & LOGFS_SB_FLAG_SHUTDOWN)) {
+ block->ops->free_block(inode->i_sb, block);
+ return;
+ }
+
+ BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS);
+ page = inode_to_page(inode);
+ BUG_ON(!page); /* FIXME: Use emergency page */
+ logfs_put_write_page(page);
+}
+
+static int do_write_inode(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct inode *master_inode = logfs_super(sb)->s_master_inode;
+ loff_t size = (inode->i_ino + 1) << inode->i_sb->s_blocksize_bits;
+ struct page *page;
+ int err;
+
+ BUG_ON(inode->i_ino == LOGFS_INO_MASTER);
+ /* FIXME: lock inode */
+
+ if (i_size_read(master_inode) < size)
+ i_size_write(master_inode, size);
+
+ /* TODO: Tell vfs this inode is clean now */
+
+ page = inode_to_page(inode);
+ if (!page)
+ return -ENOMEM;
+
+ /* FIXME: transaction is part of logfs_block now. Is that enough? */
+ err = logfs_write_buf(master_inode, page, 0);
+ logfs_put_write_page(page);
+ return err;
+}
+
+static void logfs_mod_segment_entry(struct super_block *sb, u32 segno,
+ int write,
+ void (*change_se)(struct logfs_segment_entry *, long),
+ long arg)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct inode *inode;
+ struct page *page;
+ struct logfs_segment_entry *se;
+ pgoff_t page_no;
+ int child_no;
+
+ page_no = segno >> (sb->s_blocksize_bits - 3);
+ child_no = segno & ((sb->s_blocksize >> 3) - 1);
+
+ inode = super->s_segfile_inode;
+ page = logfs_get_write_page(inode, page_no, 0);
+ BUG_ON(!page); /* FIXME: We need some reserve page for this case */
+ if (!PageUptodate(page))
+ logfs_read_block(inode, page, WRITE);
+
+ if (write)
+ alloc_indirect_block(inode, page, 0);
+ se = kmap_atomic(page, KM_USER0);
+ change_se(se + child_no, arg);
+ if (write) {
+ logfs_set_alias(sb, logfs_block(page), child_no);
+ BUG_ON((int)be32_to_cpu(se[child_no].valid) > super->s_segsize);
+ }
+ kunmap_atomic(se, KM_USER0);
+
+ logfs_put_write_page(page);
+}
+
+static void __get_segment_entry(struct logfs_segment_entry *se, long _target)
+{
+ struct logfs_segment_entry *target = (void *)_target;
+
+ *target = *se;
+}
+
+void logfs_get_segment_entry(struct super_block *sb, u32 segno,
+ struct logfs_segment_entry *se)
+{
+ logfs_mod_segment_entry(sb, segno, 0, __get_segment_entry, (long)se);
+}
+
+static void __set_segment_used(struct logfs_segment_entry *se, long increment)
+{
+ u32 valid;
+
+ valid = be32_to_cpu(se->valid);
+ valid += increment;
+ se->valid = cpu_to_be32(valid);
+}
+
+void logfs_set_segment_used(struct super_block *sb, u64 ofs, int increment)
+{
+ struct logfs_super *super = logfs_super(sb);
+ u32 segno = ofs >> super->s_segshift;
+
+ if (!increment)
+ return;
+
+ logfs_mod_segment_entry(sb, segno, 1, __set_segment_used, increment);
+}
+
+static void __set_segment_erased(struct logfs_segment_entry *se, long ec_level)
+{
+ se->ec_level = cpu_to_be32(ec_level);
+}
+
+void logfs_set_segment_erased(struct super_block *sb, u32 segno, u32 ec,
+ gc_level_t gc_level)
+{
+ u32 ec_level = ec << 4 | (__force u8)gc_level;
+
+ logfs_mod_segment_entry(sb, segno, 1, __set_segment_erased, ec_level);
+}
+
+static void __set_segment_reserved(struct logfs_segment_entry *se, long ignore)
+{
+ se->valid = cpu_to_be32(RESERVED);
+}
+
+void logfs_set_segment_reserved(struct super_block *sb, u32 segno)
+{
+ logfs_mod_segment_entry(sb, segno, 1, __set_segment_reserved, 0);
+}
+
+static void __set_segment_unreserved(struct logfs_segment_entry *se,
+ long ec_level)
+{
+ se->valid = 0;
+ se->ec_level = cpu_to_be32(ec_level);
+}
+
+void logfs_set_segment_unreserved(struct super_block *sb, u32 segno, u32 ec)
+{
+ u32 ec_level = ec << 4;
+
+ logfs_mod_segment_entry(sb, segno, 1, __set_segment_unreserved,
+ ec_level);
+}
+
+int __logfs_write_inode(struct inode *inode, long flags)
+{
+ struct super_block *sb = inode->i_sb;
+ int ret;
+
+ logfs_get_wblocks(sb, NULL, flags & WF_LOCK);
+ ret = do_write_inode(inode);
+ logfs_put_wblocks(sb, NULL, flags & WF_LOCK);
+ return ret;
+}
+
+static int do_delete_inode(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct inode *master_inode = logfs_super(sb)->s_master_inode;
+ struct page *page;
+ int ret;
+
+ page = logfs_get_write_page(master_inode, inode->i_ino, 0);
+ if (!page)
+ return -ENOMEM;
+
+ move_inode_to_page(page, inode);
+
+ logfs_get_wblocks(sb, page, 1);
+ ret = __logfs_delete(master_inode, page);
+ logfs_put_wblocks(sb, page, 1);
+
+ logfs_put_write_page(page);
+ return ret;
+}
+
+/*
+ * ZOMBIE inodes have already been deleted before and should remain dead,
+ * if it weren't for valid checking. No need to kill them again here.
+ */
+void logfs_delete_inode(struct inode *inode)
+{
+ struct logfs_inode *li = logfs_inode(inode);
+
+ if (!(li->li_flags & LOGFS_IF_ZOMBIE)) {
+ li->li_flags |= LOGFS_IF_ZOMBIE;
+ if (i_size_read(inode) > 0)
+ logfs_truncate(inode, 0);
+ do_delete_inode(inode);
+ }
+ truncate_inode_pages(&inode->i_data, 0);
+ clear_inode(inode);
+}
+
+void btree_write_block(struct logfs_block *block)
+{
+ struct inode *inode;
+ struct page *page;
+ int err, cookie;
+
+ inode = logfs_safe_iget(block->sb, block->ino, &cookie);
+ page = logfs_get_write_page(inode, block->bix, block->level);
+
+ err = logfs_readpage_nolock(page);
+ BUG_ON(err);
+ BUG_ON(!PagePrivate(page));
+ BUG_ON(logfs_block(page) != block);
+ err = __logfs_write_buf(inode, page, 0);
+ BUG_ON(err);
+ BUG_ON(PagePrivate(page) || page->private);
+
+ logfs_put_write_page(page);
+ logfs_safe_iput(inode, cookie);
+}
+
+/**
+ * logfs_inode_write - write inode or dentry objects
+ *
+ * @inode: parent inode (ifile or directory)
+ * @buf: object to write (inode or dentry)
+ * @n: object size
+ * @_pos: object number (file position in blocks/objects)
+ * @flags: write flags
+ * @lock: 0 if write lock is already taken, 1 otherwise
+ * @shadow_tree: shadow below this inode
+ *
+ * FIXME: All caller of this put a 200-300 byte variable on the stack,
+ * only to call here and do a memcpy from that stack variable. A good
+ * example of wasted performance and stack space.
+ */
+int logfs_inode_write(struct inode *inode, const void *buf, size_t count,
+ loff_t bix, long flags, struct shadow_tree *shadow_tree)
+{
+ loff_t pos = bix << inode->i_sb->s_blocksize_bits;
+ int err;
+ struct page *page;
+ void *pagebuf;
+
+ BUG_ON(pos & (LOGFS_BLOCKSIZE-1));
+ BUG_ON(count > LOGFS_BLOCKSIZE);
+ page = logfs_get_write_page(inode, bix, 0);
+ if (!page)
+ return -ENOMEM;
+
+ pagebuf = kmap_atomic(page, KM_USER0);
+ memcpy(pagebuf, buf, count);
+ flush_dcache_page(page);
+ kunmap_atomic(pagebuf, KM_USER0);
+
+ if (i_size_read(inode) < pos + LOGFS_BLOCKSIZE)
+ i_size_write(inode, pos + LOGFS_BLOCKSIZE);
+
+ err = logfs_write_buf(inode, page, flags);
+ logfs_put_write_page(page);
+ return err;
+}
+
+int logfs_open_segfile(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct inode *inode;
+
+ inode = logfs_read_meta_inode(sb, LOGFS_INO_SEGFILE);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ super->s_segfile_inode = inode;
+ return 0;
+}
+
+int logfs_init_rw(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int min_fill = 3 * super->s_no_blocks;
+
+ INIT_LIST_HEAD(&super->s_object_alias);
+ mutex_init(&super->s_write_mutex);
+ super->s_block_pool = mempool_create_kmalloc_pool(min_fill,
+ sizeof(struct logfs_block));
+ super->s_shadow_pool = mempool_create_kmalloc_pool(min_fill,
+ sizeof(struct logfs_shadow));
+ return 0;
+}
+
+void logfs_cleanup_rw(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ destroy_meta_inode(super->s_segfile_inode);
+ if (super->s_block_pool)
+ mempool_destroy(super->s_block_pool);
+ if (super->s_shadow_pool)
+ mempool_destroy(super->s_shadow_pool);
+}
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
new file mode 100644
index 000000000000..5f58b74516ca
--- /dev/null
+++ b/fs/logfs/segment.c
@@ -0,0 +1,924 @@
+/*
+ * fs/logfs/segment.c - Handling the Object Store
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ *
+ * Object store or ostore makes up the complete device with exception of
+ * the superblock and journal areas. Apart from its own metadata it stores
+ * three kinds of objects: inodes, dentries and blocks, both data and indirect.
+ */
+#include "logfs.h"
+
+static int logfs_mark_segment_bad(struct super_block *sb, u32 segno)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct btree_head32 *head = &super->s_reserved_segments;
+ int err;
+
+ err = btree_insert32(head, segno, (void *)1, GFP_NOFS);
+ if (err)
+ return err;
+ logfs_super(sb)->s_bad_segments++;
+ /* FIXME: write to journal */
+ return 0;
+}
+
+int logfs_erase_segment(struct super_block *sb, u32 segno)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ super->s_gec++;
+
+ return super->s_devops->erase(sb, (u64)segno << super->s_segshift,
+ super->s_segsize);
+}
+
+static s64 logfs_get_free_bytes(struct logfs_area *area, size_t bytes)
+{
+ s32 ofs;
+
+ logfs_open_area(area, bytes);
+
+ ofs = area->a_used_bytes;
+ area->a_used_bytes += bytes;
+ BUG_ON(area->a_used_bytes >= logfs_super(area->a_sb)->s_segsize);
+
+ return dev_ofs(area->a_sb, area->a_segno, ofs);
+}
+
+static struct page *get_mapping_page(struct super_block *sb, pgoff_t index,
+ int use_filler)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping = super->s_mapping_inode->i_mapping;
+ filler_t *filler = super->s_devops->readpage;
+ struct page *page;
+
+ BUG_ON(mapping_gfp_mask(mapping) & __GFP_FS);
+ if (use_filler)
+ page = read_cache_page(mapping, index, filler, sb);
+ else {
+ page = find_or_create_page(mapping, index, GFP_NOFS);
+ unlock_page(page);
+ }
+ return page;
+}
+
+void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
+ int use_filler)
+{
+ pgoff_t index = ofs >> PAGE_SHIFT;
+ struct page *page;
+ long offset = ofs & (PAGE_SIZE-1);
+ long copylen;
+
+ /* Only logfs_wbuf_recover may use len==0 */
+ BUG_ON(!len && !use_filler);
+ do {
+ copylen = min((ulong)len, PAGE_SIZE - offset);
+
+ page = get_mapping_page(area->a_sb, index, use_filler);
+ SetPageUptodate(page);
+ BUG_ON(!page); /* FIXME: reserve a pool */
+ memcpy(page_address(page) + offset, buf, copylen);
+ SetPagePrivate(page);
+ page_cache_release(page);
+
+ buf += copylen;
+ len -= copylen;
+ offset = 0;
+ index++;
+ } while (len);
+}
+
+/*
+ * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
+ */
+static void pad_wbuf(struct logfs_area *area, int final)
+{
+ struct super_block *sb = area->a_sb;
+ struct logfs_super *super = logfs_super(sb);
+ struct page *page;
+ u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
+ pgoff_t index = ofs >> PAGE_SHIFT;
+ long offset = ofs & (PAGE_SIZE-1);
+ u32 len = PAGE_SIZE - offset;
+
+ if (len == PAGE_SIZE) {
+ /* The math in this function can surely use some love */
+ len = 0;
+ }
+ if (len) {
+ BUG_ON(area->a_used_bytes >= super->s_segsize);
+
+ page = get_mapping_page(area->a_sb, index, 0);
+ BUG_ON(!page); /* FIXME: reserve a pool */
+ memset(page_address(page) + offset, 0xff, len);
+ SetPagePrivate(page);
+ page_cache_release(page);
+ }
+
+ if (!final)
+ return;
+
+ area->a_used_bytes += len;
+ for ( ; area->a_used_bytes < super->s_segsize;
+ area->a_used_bytes += PAGE_SIZE) {
+ /* Memset another page */
+ index++;
+ page = get_mapping_page(area->a_sb, index, 0);
+ BUG_ON(!page); /* FIXME: reserve a pool */
+ memset(page_address(page), 0xff, PAGE_SIZE);
+ SetPagePrivate(page);
+ page_cache_release(page);
+ }
+}
+
+/*
+ * We have to be careful with the alias tree. Since lookup is done by bix,
+ * it needs to be normalized, so 14, 15, 16, etc. all match when dealing with
+ * indirect blocks. So always use it through accessor functions.
+ */
+static void *alias_tree_lookup(struct super_block *sb, u64 ino, u64 bix,
+ level_t level)
+{
+ struct btree_head128 *head = &logfs_super(sb)->s_object_alias_tree;
+ pgoff_t index = logfs_pack_index(bix, level);
+
+ return btree_lookup128(head, ino, index);
+}
+
+static int alias_tree_insert(struct super_block *sb, u64 ino, u64 bix,
+ level_t level, void *val)
+{
+ struct btree_head128 *head = &logfs_super(sb)->s_object_alias_tree;
+ pgoff_t index = logfs_pack_index(bix, level);
+
+ return btree_insert128(head, ino, index, val, GFP_NOFS);
+}
+
+static int btree_write_alias(struct super_block *sb, struct logfs_block *block,
+ write_alias_t *write_one_alias)
+{
+ struct object_alias_item *item;
+ int err;
+
+ list_for_each_entry(item, &block->item_list, list) {
+ err = write_alias_journal(sb, block->ino, block->bix,
+ block->level, item->child_no, item->val);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static gc_level_t btree_block_level(struct logfs_block *block)
+{
+ return expand_level(block->ino, block->level);
+}
+
+static struct logfs_block_ops btree_block_ops = {
+ .write_block = btree_write_block,
+ .block_level = btree_block_level,
+ .free_block = __free_block,
+ .write_alias = btree_write_alias,
+};
+
+int logfs_load_object_aliases(struct super_block *sb,
+ struct logfs_obj_alias *oa, int count)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_block *block;
+ struct object_alias_item *item;
+ u64 ino, bix;
+ level_t level;
+ int i, err;
+
+ super->s_flags |= LOGFS_SB_FLAG_OBJ_ALIAS;
+ count /= sizeof(*oa);
+ for (i = 0; i < count; i++) {
+ item = mempool_alloc(super->s_alias_pool, GFP_NOFS);
+ if (!item)
+ return -ENOMEM;
+ memset(item, 0, sizeof(*item));
+
+ super->s_no_object_aliases++;
+ item->val = oa[i].val;
+ item->child_no = be16_to_cpu(oa[i].child_no);
+
+ ino = be64_to_cpu(oa[i].ino);
+ bix = be64_to_cpu(oa[i].bix);
+ level = LEVEL(oa[i].level);
+
+ log_aliases("logfs_load_object_aliases(%llx, %llx, %x, %x) %llx\n",
+ ino, bix, level, item->child_no,
+ be64_to_cpu(item->val));
+ block = alias_tree_lookup(sb, ino, bix, level);
+ if (!block) {
+ block = __alloc_block(sb, ino, bix, level);
+ block->ops = &btree_block_ops;
+ err = alias_tree_insert(sb, ino, bix, level, block);
+ BUG_ON(err); /* mempool empty */
+ }
+ if (test_and_set_bit(item->child_no, block->alias_map)) {
+ printk(KERN_ERR"LogFS: Alias collision detected\n");
+ return -EIO;
+ }
+ list_move_tail(&block->alias_list, &super->s_object_alias);
+ list_add(&item->list, &block->item_list);
+ }
+ return 0;
+}
+
+static void kill_alias(void *_block, unsigned long ignore0,
+ u64 ignore1, u64 ignore2, size_t ignore3)
+{
+ struct logfs_block *block = _block;
+ struct super_block *sb = block->sb;
+ struct logfs_super *super = logfs_super(sb);
+ struct object_alias_item *item;
+
+ while (!list_empty(&block->item_list)) {
+ item = list_entry(block->item_list.next, typeof(*item), list);
+ list_del(&item->list);
+ mempool_free(item, super->s_alias_pool);
+ }
+ block->ops->free_block(sb, block);
+}
+
+static int obj_type(struct inode *inode, level_t level)
+{
+ if (level == 0) {
+ if (S_ISDIR(inode->i_mode))
+ return OBJ_DENTRY;
+ if (inode->i_ino == LOGFS_INO_MASTER)
+ return OBJ_INODE;
+ }
+ return OBJ_BLOCK;
+}
+
+static int obj_len(struct super_block *sb, int obj_type)
+{
+ switch (obj_type) {
+ case OBJ_DENTRY:
+ return sizeof(struct logfs_disk_dentry);
+ case OBJ_INODE:
+ return sizeof(struct logfs_disk_inode);
+ case OBJ_BLOCK:
+ return sb->s_blocksize;
+ default:
+ BUG();
+ }
+}
+
+static int __logfs_segment_write(struct inode *inode, void *buf,
+ struct logfs_shadow *shadow, int type, int len, int compr)
+{
+ struct logfs_area *area;
+ struct super_block *sb = inode->i_sb;
+ s64 ofs;
+ struct logfs_object_header h;
+ int acc_len;
+
+ if (shadow->gc_level == 0)
+ acc_len = len;
+ else
+ acc_len = obj_len(sb, type);
+
+ area = get_area(sb, shadow->gc_level);
+ ofs = logfs_get_free_bytes(area, len + LOGFS_OBJECT_HEADERSIZE);
+ LOGFS_BUG_ON(ofs <= 0, sb);
+ /*
+ * Order is important. logfs_get_free_bytes(), by modifying the
+ * segment file, may modify the content of the very page we're about
+ * to write now. Which is fine, as long as the calculated crc and
+ * written data still match. So do the modifications _before_
+ * calculating the crc.
+ */
+
+ h.len = cpu_to_be16(len);
+ h.type = type;
+ h.compr = compr;
+ h.ino = cpu_to_be64(inode->i_ino);
+ h.bix = cpu_to_be64(shadow->bix);
+ h.crc = logfs_crc32(&h, sizeof(h) - 4, 4);
+ h.data_crc = logfs_crc32(buf, len, 0);
+
+ logfs_buf_write(area, ofs, &h, sizeof(h));
+ logfs_buf_write(area, ofs + LOGFS_OBJECT_HEADERSIZE, buf, len);
+
+ shadow->new_ofs = ofs;
+ shadow->new_len = acc_len + LOGFS_OBJECT_HEADERSIZE;
+
+ return 0;
+}
+
+static s64 logfs_segment_write_compress(struct inode *inode, void *buf,
+ struct logfs_shadow *shadow, int type, int len)
+{
+ struct super_block *sb = inode->i_sb;
+ void *compressor_buf = logfs_super(sb)->s_compressed_je;
+ ssize_t compr_len;
+ int ret;
+
+ mutex_lock(&logfs_super(sb)->s_journal_mutex);
+ compr_len = logfs_compress(buf, compressor_buf, len, len);
+
+ if (compr_len >= 0) {
+ ret = __logfs_segment_write(inode, compressor_buf, shadow,
+ type, compr_len, COMPR_ZLIB);
+ } else {
+ ret = __logfs_segment_write(inode, buf, shadow, type, len,
+ COMPR_NONE);
+ }
+ mutex_unlock(&logfs_super(sb)->s_journal_mutex);
+ return ret;
+}
+
+/**
+ * logfs_segment_write - write data block to object store
+ * @inode: inode containing data
+ *
+ * Returns an errno or zero.
+ */
+int logfs_segment_write(struct inode *inode, struct page *page,
+ struct logfs_shadow *shadow)
+{
+ struct super_block *sb = inode->i_sb;
+ struct logfs_super *super = logfs_super(sb);
+ int do_compress, type, len;
+ int ret;
+ void *buf;
+
+ BUG_ON(logfs_super(sb)->s_flags & LOGFS_SB_FLAG_SHUTDOWN);
+ do_compress = logfs_inode(inode)->li_flags & LOGFS_IF_COMPRESSED;
+ if (shadow->gc_level != 0) {
+ /* temporarily disable compression for indirect blocks */
+ do_compress = 0;
+ }
+
+ type = obj_type(inode, shrink_level(shadow->gc_level));
+ len = obj_len(sb, type);
+ buf = kmap(page);
+ if (do_compress)
+ ret = logfs_segment_write_compress(inode, buf, shadow, type,
+ len);
+ else
+ ret = __logfs_segment_write(inode, buf, shadow, type, len,
+ COMPR_NONE);
+ kunmap(page);
+
+ log_segment("logfs_segment_write(%llx, %llx, %x) %llx->%llx %x->%x\n",
+ shadow->ino, shadow->bix, shadow->gc_level,
+ shadow->old_ofs, shadow->new_ofs,
+ shadow->old_len, shadow->new_len);
+ /* this BUG_ON did catch a locking bug. useful */
+ BUG_ON(!(shadow->new_ofs & (super->s_segsize - 1)));
+ return ret;
+}
+
+int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf)
+{
+ pgoff_t index = ofs >> PAGE_SHIFT;
+ struct page *page;
+ long offset = ofs & (PAGE_SIZE-1);
+ long copylen;
+
+ while (len) {
+ copylen = min((ulong)len, PAGE_SIZE - offset);
+
+ page = get_mapping_page(sb, index, 1);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+ memcpy(buf, page_address(page) + offset, copylen);
+ page_cache_release(page);
+
+ buf += copylen;
+ len -= copylen;
+ offset = 0;
+ index++;
+ }
+ return 0;
+}
+
+/*
+ * The "position" of indirect blocks is ambiguous. It can be the position
+ * of any data block somewhere behind this indirect block. So we need to
+ * normalize the positions through logfs_block_mask() before comparing.
+ */
+static int check_pos(struct super_block *sb, u64 pos1, u64 pos2, level_t level)
+{
+ return (pos1 & logfs_block_mask(sb, level)) !=
+ (pos2 & logfs_block_mask(sb, level));
+}
+
+#if 0
+static int read_seg_header(struct super_block *sb, u64 ofs,
+ struct logfs_segment_header *sh)
+{
+ __be32 crc;
+ int err;
+
+ err = wbuf_read(sb, ofs, sizeof(*sh), sh);
+ if (err)
+ return err;
+ crc = logfs_crc32(sh, sizeof(*sh), 4);
+ if (crc != sh->crc) {
+ printk(KERN_ERR"LOGFS: header crc error at %llx: expected %x, "
+ "got %x\n", ofs, be32_to_cpu(sh->crc),
+ be32_to_cpu(crc));
+ return -EIO;
+ }
+ return 0;
+}
+#endif
+
+static int read_obj_header(struct super_block *sb, u64 ofs,
+ struct logfs_object_header *oh)
+{
+ __be32 crc;
+ int err;
+
+ err = wbuf_read(sb, ofs, sizeof(*oh), oh);
+ if (err)
+ return err;
+ crc = logfs_crc32(oh, sizeof(*oh) - 4, 4);
+ if (crc != oh->crc) {
+ printk(KERN_ERR"LOGFS: header crc error at %llx: expected %x, "
+ "got %x\n", ofs, be32_to_cpu(oh->crc),
+ be32_to_cpu(crc));
+ return -EIO;
+ }
+ return 0;
+}
+
+static void move_btree_to_page(struct inode *inode, struct page *page,
+ __be64 *data)
+{
+ struct super_block *sb = inode->i_sb;
+ struct logfs_super *super = logfs_super(sb);
+ struct btree_head128 *head = &super->s_object_alias_tree;
+ struct logfs_block *block;
+ struct object_alias_item *item, *next;
+
+ if (!(super->s_flags & LOGFS_SB_FLAG_OBJ_ALIAS))
+ return;
+
+ block = btree_remove128(head, inode->i_ino, page->index);
+ if (!block)
+ return;
+
+ log_blockmove("move_btree_to_page(%llx, %llx, %x)\n",
+ block->ino, block->bix, block->level);
+ list_for_each_entry_safe(item, next, &block->item_list, list) {
+ data[item->child_no] = item->val;
+ list_del(&item->list);
+ mempool_free(item, super->s_alias_pool);
+ }
+ block->page = page;
+ SetPagePrivate(page);
+ page->private = (unsigned long)block;
+ block->ops = &indirect_block_ops;
+ initialize_block_counters(page, block, data, 0);
+}
+
+/*
+ * This silences a false, yet annoying gcc warning. I hate it when my editor
+ * jumps into bitops.h each time I recompile this file.
+ * TODO: Complain to gcc folks about this and upgrade compiler.
+ */
+static unsigned long fnb(const unsigned long *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
+
+void move_page_to_btree(struct page *page)
+{
+ struct logfs_block *block = logfs_block(page);
+ struct super_block *sb = block->sb;
+ struct logfs_super *super = logfs_super(sb);
+ struct object_alias_item *item;
+ unsigned long pos;
+ __be64 *child;
+ int err;
+
+ if (super->s_flags & LOGFS_SB_FLAG_SHUTDOWN) {
+ block->ops->free_block(sb, block);
+ return;
+ }
+ log_blockmove("move_page_to_btree(%llx, %llx, %x)\n",
+ block->ino, block->bix, block->level);
+ super->s_flags |= LOGFS_SB_FLAG_OBJ_ALIAS;
+
+ for (pos = 0; ; pos++) {
+ pos = fnb(block->alias_map, LOGFS_BLOCK_FACTOR, pos);
+ if (pos >= LOGFS_BLOCK_FACTOR)
+ break;
+
+ item = mempool_alloc(super->s_alias_pool, GFP_NOFS);
+ BUG_ON(!item); /* mempool empty */
+ memset(item, 0, sizeof(*item));
+
+ child = kmap_atomic(page, KM_USER0);
+ item->val = child[pos];
+ kunmap_atomic(child, KM_USER0);
+ item->child_no = pos;
+ list_add(&item->list, &block->item_list);
+ }
+ block->page = NULL;
+ ClearPagePrivate(page);
+ page->private = 0;
+ block->ops = &btree_block_ops;
+ err = alias_tree_insert(block->sb, block->ino, block->bix, block->level,
+ block);
+ BUG_ON(err); /* mempool empty */
+ ClearPageUptodate(page);
+}
+
+static int __logfs_segment_read(struct inode *inode, void *buf,
+ u64 ofs, u64 bix, level_t level)
+{
+ struct super_block *sb = inode->i_sb;
+ void *compressor_buf = logfs_super(sb)->s_compressed_je;
+ struct logfs_object_header oh;
+ __be32 crc;
+ u16 len;
+ int err, block_len;
+
+ block_len = obj_len(sb, obj_type(inode, level));
+ err = read_obj_header(sb, ofs, &oh);
+ if (err)
+ goto out_err;
+
+ err = -EIO;
+ if (be64_to_cpu(oh.ino) != inode->i_ino
+ || check_pos(sb, be64_to_cpu(oh.bix), bix, level)) {
+ printk(KERN_ERR"LOGFS: (ino, bix) don't match at %llx: "
+ "expected (%lx, %llx), got (%llx, %llx)\n",
+ ofs, inode->i_ino, bix,
+ be64_to_cpu(oh.ino), be64_to_cpu(oh.bix));
+ goto out_err;
+ }
+
+ len = be16_to_cpu(oh.len);
+
+ switch (oh.compr) {
+ case COMPR_NONE:
+ err = wbuf_read(sb, ofs + LOGFS_OBJECT_HEADERSIZE, len, buf);
+ if (err)
+ goto out_err;
+ crc = logfs_crc32(buf, len, 0);
+ if (crc != oh.data_crc) {
+ printk(KERN_ERR"LOGFS: uncompressed data crc error at "
+ "%llx: expected %x, got %x\n", ofs,
+ be32_to_cpu(oh.data_crc),
+ be32_to_cpu(crc));
+ goto out_err;
+ }
+ break;
+ case COMPR_ZLIB:
+ mutex_lock(&logfs_super(sb)->s_journal_mutex);
+ err = wbuf_read(sb, ofs + LOGFS_OBJECT_HEADERSIZE, len,
+ compressor_buf);
+ if (err) {
+ mutex_unlock(&logfs_super(sb)->s_journal_mutex);
+ goto out_err;
+ }
+ crc = logfs_crc32(compressor_buf, len, 0);
+ if (crc != oh.data_crc) {
+ printk(KERN_ERR"LOGFS: compressed data crc error at "
+ "%llx: expected %x, got %x\n", ofs,
+ be32_to_cpu(oh.data_crc),
+ be32_to_cpu(crc));
+ mutex_unlock(&logfs_super(sb)->s_journal_mutex);
+ goto out_err;
+ }
+ err = logfs_uncompress(compressor_buf, buf, len, block_len);
+ mutex_unlock(&logfs_super(sb)->s_journal_mutex);
+ if (err) {
+ printk(KERN_ERR"LOGFS: uncompress error at %llx\n", ofs);
+ goto out_err;
+ }
+ break;
+ default:
+ LOGFS_BUG(sb);
+ err = -EIO;
+ goto out_err;
+ }
+ return 0;
+
+out_err:
+ logfs_set_ro(sb);
+ printk(KERN_ERR"LOGFS: device is read-only now\n");
+ LOGFS_BUG(sb);
+ return err;
+}
+
+/**
+ * logfs_segment_read - read data block from object store
+ * @inode: inode containing data
+ * @buf: data buffer
+ * @ofs: physical data offset
+ * @bix: block index
+ * @level: block level
+ *
+ * Returns 0 on success or a negative errno.
+ */
+int logfs_segment_read(struct inode *inode, struct page *page,
+ u64 ofs, u64 bix, level_t level)
+{
+ int err;
+ void *buf;
+
+ if (PageUptodate(page))
+ return 0;
+
+ ofs &= ~LOGFS_FULLY_POPULATED;
+
+ buf = kmap(page);
+ err = __logfs_segment_read(inode, buf, ofs, bix, level);
+ if (!err) {
+ move_btree_to_page(inode, page, buf);
+ SetPageUptodate(page);
+ }
+ kunmap(page);
+ log_segment("logfs_segment_read(%lx, %llx, %x) %llx (%d)\n",
+ inode->i_ino, bix, level, ofs, err);
+ return err;
+}
+
+int logfs_segment_delete(struct inode *inode, struct logfs_shadow *shadow)
+{
+ struct super_block *sb = inode->i_sb;
+ struct logfs_object_header h;
+ u16 len;
+ int err;
+
+ BUG_ON(logfs_super(sb)->s_flags & LOGFS_SB_FLAG_SHUTDOWN);
+ BUG_ON(shadow->old_ofs & LOGFS_FULLY_POPULATED);
+ if (!shadow->old_ofs)
+ return 0;
+
+ log_segment("logfs_segment_delete(%llx, %llx, %x) %llx->%llx %x->%x\n",
+ shadow->ino, shadow->bix, shadow->gc_level,
+ shadow->old_ofs, shadow->new_ofs,
+ shadow->old_len, shadow->new_len);
+ err = read_obj_header(sb, shadow->old_ofs, &h);
+ LOGFS_BUG_ON(err, sb);
+ LOGFS_BUG_ON(be64_to_cpu(h.ino) != inode->i_ino, sb);
+ LOGFS_BUG_ON(check_pos(sb, shadow->bix, be64_to_cpu(h.bix),
+ shrink_level(shadow->gc_level)), sb);
+
+ if (shadow->gc_level == 0)
+ len = be16_to_cpu(h.len);
+ else
+ len = obj_len(sb, h.type);
+ shadow->old_len = len + sizeof(h);
+ return 0;
+}
+
+static void freeseg(struct super_block *sb, u32 segno)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping = super->s_mapping_inode->i_mapping;
+ struct page *page;
+ u64 ofs, start, end;
+
+ start = dev_ofs(sb, segno, 0);
+ end = dev_ofs(sb, segno + 1, 0);
+ for (ofs = start; ofs < end; ofs += PAGE_SIZE) {
+ page = find_get_page(mapping, ofs >> PAGE_SHIFT);
+ if (!page)
+ continue;
+ ClearPagePrivate(page);
+ page_cache_release(page);
+ }
+}
+
+int logfs_open_area(struct logfs_area *area, size_t bytes)
+{
+ struct super_block *sb = area->a_sb;
+ struct logfs_super *super = logfs_super(sb);
+ int err, closed = 0;
+
+ if (area->a_is_open && area->a_used_bytes + bytes <= super->s_segsize)
+ return 0;
+
+ if (area->a_is_open) {
+ u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
+ u32 len = super->s_segsize - area->a_written_bytes;
+
+ log_gc("logfs_close_area(%x)\n", area->a_segno);
+ pad_wbuf(area, 1);
+ super->s_devops->writeseg(area->a_sb, ofs, len);
+ freeseg(sb, area->a_segno);
+ closed = 1;
+ }
+
+ area->a_used_bytes = 0;
+ area->a_written_bytes = 0;
+again:
+ area->a_ops->get_free_segment(area);
+ area->a_ops->get_erase_count(area);
+
+ log_gc("logfs_open_area(%x, %x)\n", area->a_segno, area->a_level);
+ err = area->a_ops->erase_segment(area);
+ if (err) {
+ printk(KERN_WARNING "LogFS: Error erasing segment %x\n",
+ area->a_segno);
+ logfs_mark_segment_bad(sb, area->a_segno);
+ goto again;
+ }
+ area->a_is_open = 1;
+ return closed;
+}
+
+void logfs_sync_area(struct logfs_area *area)
+{
+ struct super_block *sb = area->a_sb;
+ struct logfs_super *super = logfs_super(sb);
+ u64 ofs = dev_ofs(sb, area->a_segno, area->a_written_bytes);
+ u32 len = (area->a_used_bytes - area->a_written_bytes);
+
+ if (super->s_writesize)
+ len &= ~(super->s_writesize - 1);
+ if (len == 0)
+ return;
+ pad_wbuf(area, 0);
+ super->s_devops->writeseg(sb, ofs, len);
+ area->a_written_bytes += len;
+}
+
+void logfs_sync_segments(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int i;
+
+ for_each_area(i)
+ logfs_sync_area(super->s_area[i]);
+}
+
+/*
+ * Pick a free segment to be used for this area. Effectively takes a
+ * candidate from the free list (not really a candidate anymore).
+ */
+static void ostore_get_free_segment(struct logfs_area *area)
+{
+ struct super_block *sb = area->a_sb;
+ struct logfs_super *super = logfs_super(sb);
+
+ if (super->s_free_list.count == 0) {
+ printk(KERN_ERR"LOGFS: ran out of free segments\n");
+ LOGFS_BUG(sb);
+ }
+
+ area->a_segno = get_best_cand(sb, &super->s_free_list, NULL);
+}
+
+static void ostore_get_erase_count(struct logfs_area *area)
+{
+ struct logfs_segment_entry se;
+ u32 ec_level;
+
+ logfs_get_segment_entry(area->a_sb, area->a_segno, &se);
+ BUG_ON(se.ec_level == cpu_to_be32(BADSEG) ||
+ se.valid == cpu_to_be32(RESERVED));
+
+ ec_level = be32_to_cpu(se.ec_level);
+ area->a_erase_count = (ec_level >> 4) + 1;
+}
+
+static int ostore_erase_segment(struct logfs_area *area)
+{
+ struct super_block *sb = area->a_sb;
+ struct logfs_segment_header sh;
+ u64 ofs;
+ int err;
+
+ err = logfs_erase_segment(sb, area->a_segno);
+ if (err)
+ return err;
+
+ sh.pad = 0;
+ sh.type = SEG_OSTORE;
+ sh.level = (__force u8)area->a_level;
+ sh.segno = cpu_to_be32(area->a_segno);
+ sh.ec = cpu_to_be32(area->a_erase_count);
+ sh.gec = cpu_to_be64(logfs_super(sb)->s_gec);
+ sh.crc = logfs_crc32(&sh, sizeof(sh), 4);
+
+ logfs_set_segment_erased(sb, area->a_segno, area->a_erase_count,
+ area->a_level);
+
+ ofs = dev_ofs(sb, area->a_segno, 0);
+ area->a_used_bytes = sizeof(sh);
+ logfs_buf_write(area, ofs, &sh, sizeof(sh));
+ return 0;
+}
+
+static const struct logfs_area_ops ostore_area_ops = {
+ .get_free_segment = ostore_get_free_segment,
+ .get_erase_count = ostore_get_erase_count,
+ .erase_segment = ostore_erase_segment,
+};
+
+static void free_area(struct logfs_area *area)
+{
+ if (area)
+ freeseg(area->a_sb, area->a_segno);
+ kfree(area);
+}
+
+static struct logfs_area *alloc_area(struct super_block *sb)
+{
+ struct logfs_area *area;
+
+ area = kzalloc(sizeof(*area), GFP_KERNEL);
+ if (!area)
+ return NULL;
+
+ area->a_sb = sb;
+ return area;
+}
+
+static void map_invalidatepage(struct page *page, unsigned long l)
+{
+ BUG();
+}
+
+static int map_releasepage(struct page *page, gfp_t g)
+{
+ /* Don't release these pages */
+ return 0;
+}
+
+static const struct address_space_operations mapping_aops = {
+ .invalidatepage = map_invalidatepage,
+ .releasepage = map_releasepage,
+ .set_page_dirty = __set_page_dirty_nobuffers,
+};
+
+int logfs_init_mapping(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct address_space *mapping;
+ struct inode *inode;
+
+ inode = logfs_new_meta_inode(sb, LOGFS_INO_MAPPING);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+ super->s_mapping_inode = inode;
+ mapping = inode->i_mapping;
+ mapping->a_ops = &mapping_aops;
+ /* Would it be possible to use __GFP_HIGHMEM as well? */
+ mapping_set_gfp_mask(mapping, GFP_NOFS);
+ return 0;
+}
+
+int logfs_init_areas(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int i = -1;
+
+ super->s_alias_pool = mempool_create_kmalloc_pool(600,
+ sizeof(struct object_alias_item));
+ if (!super->s_alias_pool)
+ return -ENOMEM;
+
+ super->s_journal_area = alloc_area(sb);
+ if (!super->s_journal_area)
+ goto err;
+
+ for_each_area(i) {
+ super->s_area[i] = alloc_area(sb);
+ if (!super->s_area[i])
+ goto err;
+ super->s_area[i]->a_level = GC_LEVEL(i);
+ super->s_area[i]->a_ops = &ostore_area_ops;
+ }
+ btree_init_mempool128(&super->s_object_alias_tree,
+ super->s_btree_pool);
+ return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ free_area(super->s_area[i]);
+ free_area(super->s_journal_area);
+ mempool_destroy(super->s_alias_pool);
+ return -ENOMEM;
+}
+
+void logfs_cleanup_areas(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int i;
+
+ btree_grim_visitor128(&super->s_object_alias_tree, 0, kill_alias);
+ for_each_area(i)
+ free_area(super->s_area[i]);
+ free_area(super->s_journal_area);
+ destroy_meta_inode(super->s_mapping_inode);
+}
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
new file mode 100644
index 000000000000..d128a2c1c8d1
--- /dev/null
+++ b/fs/logfs/super.c
@@ -0,0 +1,634 @@
+/*
+ * fs/logfs/super.c
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
+ *
+ * Generally contains mount/umount code and also serves as a dump area for
+ * any functions that don't fit elsewhere and neither justify a file of their
+ * own.
+ */
+#include "logfs.h"
+#include <linux/bio.h>
+#include <linux/mtd/mtd.h>
+#include <linux/statfs.h>
+#include <linux/buffer_head.h>
+
+static DEFINE_MUTEX(emergency_mutex);
+static struct page *emergency_page;
+
+struct page *emergency_read_begin(struct address_space *mapping, pgoff_t index)
+{
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ struct page *page;
+ int err;
+
+ page = read_cache_page(mapping, index, filler, NULL);
+ if (page)
+ return page;
+
+ /* No more pages available, switch to emergency page */
+ printk(KERN_INFO"Logfs: Using emergency page\n");
+ mutex_lock(&emergency_mutex);
+ err = filler(NULL, emergency_page);
+ if (err) {
+ mutex_unlock(&emergency_mutex);
+ printk(KERN_EMERG"Logfs: Error reading emergency page\n");
+ return ERR_PTR(err);
+ }
+ return emergency_page;
+}
+
+void emergency_read_end(struct page *page)
+{
+ if (page == emergency_page)
+ mutex_unlock(&emergency_mutex);
+ else
+ page_cache_release(page);
+}
+
+static void dump_segfile(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_segment_entry se;
+ u32 segno;
+
+ for (segno = 0; segno < super->s_no_segs; segno++) {
+ logfs_get_segment_entry(sb, segno, &se);
+ printk("%3x: %6x %8x", segno, be32_to_cpu(se.ec_level),
+ be32_to_cpu(se.valid));
+ if (++segno < super->s_no_segs) {
+ logfs_get_segment_entry(sb, segno, &se);
+ printk(" %6x %8x", be32_to_cpu(se.ec_level),
+ be32_to_cpu(se.valid));
+ }
+ if (++segno < super->s_no_segs) {
+ logfs_get_segment_entry(sb, segno, &se);
+ printk(" %6x %8x", be32_to_cpu(se.ec_level),
+ be32_to_cpu(se.valid));
+ }
+ if (++segno < super->s_no_segs) {
+ logfs_get_segment_entry(sb, segno, &se);
+ printk(" %6x %8x", be32_to_cpu(se.ec_level),
+ be32_to_cpu(se.valid));
+ }
+ printk("\n");
+ }
+}
+
+/*
+ * logfs_crash_dump - dump debug information to device
+ *
+ * The LogFS superblock only occupies part of a segment. This function will
+ * write as much debug information as it can gather into the spare space.
+ */
+void logfs_crash_dump(struct super_block *sb)
+{
+ dump_segfile(sb);
+}
+
+/*
+ * TODO: move to lib/string.c
+ */
+/**
+ * memchr_inv - Find a character in an area of memory.
+ * @s: The memory area
+ * @c: The byte to search for
+ * @n: The size of the area.
+ *
+ * returns the address of the first character other than @c, or %NULL
+ * if the whole buffer contains just @c.
+ */
+void *memchr_inv(const void *s, int c, size_t n)
+{
+ const unsigned char *p = s;
+ while (n-- != 0)
+ if ((unsigned char)c != *p++)
+ return (void *)(p - 1);
+
+ return NULL;
+}
+
+/*
+ * FIXME: There should be a reserve for root, similar to ext2.
+ */
+int logfs_statfs(struct dentry *dentry, struct kstatfs *stats)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct logfs_super *super = logfs_super(sb);
+
+ stats->f_type = LOGFS_MAGIC_U32;
+ stats->f_bsize = sb->s_blocksize;
+ stats->f_blocks = super->s_size >> LOGFS_BLOCK_BITS >> 3;
+ stats->f_bfree = super->s_free_bytes >> sb->s_blocksize_bits;
+ stats->f_bavail = super->s_free_bytes >> sb->s_blocksize_bits;
+ stats->f_files = 0;
+ stats->f_ffree = 0;
+ stats->f_namelen = LOGFS_MAX_NAMELEN;
+ return 0;
+}
+
+static int logfs_sb_set(struct super_block *sb, void *_super)
+{
+ struct logfs_super *super = _super;
+
+ sb->s_fs_info = super;
+ sb->s_mtd = super->s_mtd;
+ sb->s_bdev = super->s_bdev;
+ return 0;
+}
+
+static int logfs_sb_test(struct super_block *sb, void *_super)
+{
+ struct logfs_super *super = _super;
+ struct mtd_info *mtd = super->s_mtd;
+
+ if (mtd && sb->s_mtd == mtd)
+ return 1;
+ if (super->s_bdev && sb->s_bdev == super->s_bdev)
+ return 1;
+ return 0;
+}
+
+static void set_segment_header(struct logfs_segment_header *sh, u8 type,
+ u8 level, u32 segno, u32 ec)
+{
+ sh->pad = 0;
+ sh->type = type;
+ sh->level = level;
+ sh->segno = cpu_to_be32(segno);
+ sh->ec = cpu_to_be32(ec);
+ sh->gec = cpu_to_be64(segno);
+ sh->crc = logfs_crc32(sh, LOGFS_SEGMENT_HEADERSIZE, 4);
+}
+
+static void logfs_write_ds(struct super_block *sb, struct logfs_disk_super *ds,
+ u32 segno, u32 ec)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_segment_header *sh = &ds->ds_sh;
+ int i;
+
+ memset(ds, 0, sizeof(*ds));
+ set_segment_header(sh, SEG_SUPER, 0, segno, ec);
+
+ ds->ds_ifile_levels = super->s_ifile_levels;
+ ds->ds_iblock_levels = super->s_iblock_levels;
+ ds->ds_data_levels = super->s_data_levels; /* XXX: Remove */
+ ds->ds_segment_shift = super->s_segshift;
+ ds->ds_block_shift = sb->s_blocksize_bits;
+ ds->ds_write_shift = super->s_writeshift;
+ ds->ds_filesystem_size = cpu_to_be64(super->s_size);
+ ds->ds_segment_size = cpu_to_be32(super->s_segsize);
+ ds->ds_bad_seg_reserve = cpu_to_be32(super->s_bad_seg_reserve);
+ ds->ds_feature_incompat = cpu_to_be64(super->s_feature_incompat);
+ ds->ds_feature_ro_compat= cpu_to_be64(super->s_feature_ro_compat);
+ ds->ds_feature_compat = cpu_to_be64(super->s_feature_compat);
+ ds->ds_feature_flags = cpu_to_be64(super->s_feature_flags);
+ ds->ds_root_reserve = cpu_to_be64(super->s_root_reserve);
+ ds->ds_speed_reserve = cpu_to_be64(super->s_speed_reserve);
+ journal_for_each(i)
+ ds->ds_journal_seg[i] = cpu_to_be32(super->s_journal_seg[i]);
+ ds->ds_magic = cpu_to_be64(LOGFS_MAGIC);
+ ds->ds_crc = logfs_crc32(ds, sizeof(*ds),
+ LOGFS_SEGMENT_HEADERSIZE + 12);
+}
+
+static int write_one_sb(struct super_block *sb,
+ struct page *(*find_sb)(struct super_block *sb, u64 *ofs))
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_disk_super *ds;
+ struct logfs_segment_entry se;
+ struct page *page;
+ u64 ofs;
+ u32 ec, segno;
+ int err;
+
+ page = find_sb(sb, &ofs);
+ if (!page)
+ return -EIO;
+ ds = page_address(page);
+ segno = seg_no(sb, ofs);
+ logfs_get_segment_entry(sb, segno, &se);
+ ec = be32_to_cpu(se.ec_level) >> 4;
+ ec++;
+ logfs_set_segment_erased(sb, segno, ec, 0);
+ logfs_write_ds(sb, ds, segno, ec);
+ err = super->s_devops->write_sb(sb, page);
+ page_cache_release(page);
+ return err;
+}
+
+int logfs_write_sb(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int err;
+
+ /* First superblock */
+ err = write_one_sb(sb, super->s_devops->find_first_sb);
+ if (err)
+ return err;
+
+ /* Last superblock */
+ err = write_one_sb(sb, super->s_devops->find_last_sb);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int ds_cmp(const void *ds0, const void *ds1)
+{
+ size_t len = sizeof(struct logfs_disk_super);
+
+ /* We know the segment headers differ, so ignore them */
+ len -= LOGFS_SEGMENT_HEADERSIZE;
+ ds0 += LOGFS_SEGMENT_HEADERSIZE;
+ ds1 += LOGFS_SEGMENT_HEADERSIZE;
+ return memcmp(ds0, ds1, len);
+}
+
+static int logfs_recover_sb(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct logfs_disk_super _ds0, *ds0 = &_ds0;
+ struct logfs_disk_super _ds1, *ds1 = &_ds1;
+ int err, valid0, valid1;
+
+ /* read first superblock */
+ err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
+ if (err)
+ return err;
+ /* read last superblock */
+ err = wbuf_read(sb, super->s_sb_ofs[1], sizeof(*ds1), ds1);
+ if (err)
+ return err;
+ valid0 = logfs_check_ds(ds0) == 0;
+ valid1 = logfs_check_ds(ds1) == 0;
+
+ if (!valid0 && valid1) {
+ printk(KERN_INFO"First superblock is invalid - fixing.\n");
+ return write_one_sb(sb, super->s_devops->find_first_sb);
+ }
+ if (valid0 && !valid1) {
+ printk(KERN_INFO"Last superblock is invalid - fixing.\n");
+ return write_one_sb(sb, super->s_devops->find_last_sb);
+ }
+ if (valid0 && valid1 && ds_cmp(ds0, ds1)) {
+ printk(KERN_INFO"Superblocks don't match - fixing.\n");
+ return write_one_sb(sb, super->s_devops->find_last_sb);
+ }
+ /* If neither is valid now, something's wrong. Didn't we properly
+ * check them before?!? */
+ BUG_ON(!valid0 && !valid1);
+ return 0;
+}
+
+static int logfs_make_writeable(struct super_block *sb)
+{
+ int err;
+
+ /* Repair any broken superblock copies */
+ err = logfs_recover_sb(sb);
+ if (err)
+ return err;
+
+ /* Check areas for trailing unaccounted data */
+ err = logfs_check_areas(sb);
+ if (err)
+ return err;
+
+ err = logfs_open_segfile(sb);
+ if (err)
+ return err;
+
+ /* Do one GC pass before any data gets dirtied */
+ logfs_gc_pass(sb);
+
+ /* after all initializations are done, replay the journal
+ * for rw-mounts, if necessary */
+ err = logfs_replay_journal(sb);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int logfs_get_sb_final(struct super_block *sb, struct vfsmount *mnt)
+{
+ struct inode *rootdir;
+ int err;
+
+ /* root dir */
+ rootdir = logfs_iget(sb, LOGFS_INO_ROOT);
+ if (IS_ERR(rootdir))
+ goto fail;
+
+ sb->s_root = d_alloc_root(rootdir);
+ if (!sb->s_root)
+ goto fail;
+
+ /* FIXME: check for read-only mounts */
+ err = logfs_make_writeable(sb);
+ if (err)
+ goto fail2;
+
+ log_super("LogFS: Finished mounting\n");
+ simple_set_mnt(mnt, sb);
+ return 0;
+
+fail2:
+ iput(rootdir);
+fail:
+ iput(logfs_super(sb)->s_master_inode);
+ return -EIO;
+}
+
+int logfs_check_ds(struct logfs_disk_super *ds)
+{
+ struct logfs_segment_header *sh = &ds->ds_sh;
+
+ if (ds->ds_magic != cpu_to_be64(LOGFS_MAGIC))
+ return -EINVAL;
+ if (sh->crc != logfs_crc32(sh, LOGFS_SEGMENT_HEADERSIZE, 4))
+ return -EINVAL;
+ if (ds->ds_crc != logfs_crc32(ds, sizeof(*ds),
+ LOGFS_SEGMENT_HEADERSIZE + 12))
+ return -EINVAL;
+ return 0;
+}
+
+static struct page *find_super_block(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct page *first, *last;
+
+ first = super->s_devops->find_first_sb(sb, &super->s_sb_ofs[0]);
+ if (!first || IS_ERR(first))
+ return NULL;
+ last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]);
+ if (!last || IS_ERR(first)) {
+ page_cache_release(first);
+ return NULL;
+ }
+
+ if (!logfs_check_ds(page_address(first))) {
+ page_cache_release(last);
+ return first;
+ }
+
+ /* First one didn't work, try the second superblock */
+ if (!logfs_check_ds(page_address(last))) {
+ page_cache_release(first);
+ return last;
+ }
+
+ /* Neither worked, sorry folks */
+ page_cache_release(first);
+ page_cache_release(last);
+ return NULL;
+}
+
+static int __logfs_read_sb(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ struct page *page;
+ struct logfs_disk_super *ds;
+ int i;
+
+ page = find_super_block(sb);
+ if (!page)
+ return -EIO;
+
+ ds = page_address(page);
+ super->s_size = be64_to_cpu(ds->ds_filesystem_size);
+ super->s_root_reserve = be64_to_cpu(ds->ds_root_reserve);
+ super->s_speed_reserve = be64_to_cpu(ds->ds_speed_reserve);
+ super->s_bad_seg_reserve = be32_to_cpu(ds->ds_bad_seg_reserve);
+ super->s_segsize = 1 << ds->ds_segment_shift;
+ super->s_segmask = (1 << ds->ds_segment_shift) - 1;
+ super->s_segshift = ds->ds_segment_shift;
+ sb->s_blocksize = 1 << ds->ds_block_shift;
+ sb->s_blocksize_bits = ds->ds_block_shift;
+ super->s_writesize = 1 << ds->ds_write_shift;
+ super->s_writeshift = ds->ds_write_shift;
+ super->s_no_segs = super->s_size >> super->s_segshift;
+ super->s_no_blocks = super->s_segsize >> sb->s_blocksize_bits;
+ super->s_feature_incompat = be64_to_cpu(ds->ds_feature_incompat);
+ super->s_feature_ro_compat = be64_to_cpu(ds->ds_feature_ro_compat);
+ super->s_feature_compat = be64_to_cpu(ds->ds_feature_compat);
+ super->s_feature_flags = be64_to_cpu(ds->ds_feature_flags);
+
+ journal_for_each(i)
+ super->s_journal_seg[i] = be32_to_cpu(ds->ds_journal_seg[i]);
+
+ super->s_ifile_levels = ds->ds_ifile_levels;
+ super->s_iblock_levels = ds->ds_iblock_levels;
+ super->s_data_levels = ds->ds_data_levels;
+ super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels
+ + super->s_data_levels;
+ page_cache_release(page);
+ return 0;
+}
+
+static int logfs_read_sb(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+ int ret;
+
+ super->s_btree_pool = mempool_create(32, btree_alloc, btree_free, NULL);
+ if (!super->s_btree_pool)
+ return -ENOMEM;
+
+ btree_init_mempool64(&super->s_shadow_tree.new, super->s_btree_pool);
+ btree_init_mempool64(&super->s_shadow_tree.old, super->s_btree_pool);
+
+ ret = logfs_init_mapping(sb);
+ if (ret)
+ return ret;
+
+ ret = __logfs_read_sb(sb);
+ if (ret)
+ return ret;
+
+ mutex_init(&super->s_dirop_mutex);
+ mutex_init(&super->s_object_alias_mutex);
+ INIT_LIST_HEAD(&super->s_freeing_list);
+
+ ret = logfs_init_rw(sb);
+ if (ret)
+ return ret;
+
+ ret = logfs_init_areas(sb);
+ if (ret)
+ return ret;
+
+ ret = logfs_init_gc(sb);
+ if (ret)
+ return ret;
+
+ ret = logfs_init_journal(sb);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void logfs_kill_sb(struct super_block *sb)
+{
+ struct logfs_super *super = logfs_super(sb);
+
+ log_super("LogFS: Start unmounting\n");
+ /* Alias entries slow down mount, so evict as many as possible */
+ sync_filesystem(sb);
+ logfs_write_anchor(super->s_master_inode);
+
+ /*
+ * From this point on alias entries are simply dropped - and any
+ * writes to the object store are considered bugs.
+ */
+ super->s_flags |= LOGFS_SB_FLAG_SHUTDOWN;
+ log_super("LogFS: Now in shutdown\n");
+ generic_shutdown_super(sb);
+
+ BUG_ON(super->s_dirty_used_bytes || super->s_dirty_free_bytes);
+
+ logfs_cleanup_gc(sb);
+ logfs_cleanup_journal(sb);
+ logfs_cleanup_areas(sb);
+ logfs_cleanup_rw(sb);
+ super->s_devops->put_device(sb);
+ mempool_destroy(super->s_btree_pool);
+ mempool_destroy(super->s_alias_pool);
+ kfree(super);
+ log_super("LogFS: Finished unmounting\n");
+}
+
+int logfs_get_sb_device(struct file_system_type *type, int flags,
+ struct mtd_info *mtd, struct block_device *bdev,
+ const struct logfs_device_ops *devops, struct vfsmount *mnt)
+{
+ struct logfs_super *super;
+ struct super_block *sb;
+ int err = -ENOMEM;
+ static int mount_count;
+
+ log_super("LogFS: Start mount %x\n", mount_count++);
+ super = kzalloc(sizeof(*super), GFP_KERNEL);
+ if (!super)
+ goto err0;
+
+ super->s_mtd = mtd;
+ super->s_bdev = bdev;
+ err = -EINVAL;
+ sb = sget(type, logfs_sb_test, logfs_sb_set, super);
+ if (IS_ERR(sb))
+ goto err0;
+
+ if (sb->s_root) {
+ /* Device is already in use */
+ err = 0;
+ simple_set_mnt(mnt, sb);
+ goto err0;
+ }
+
+ super->s_devops = devops;
+
+ /*
+ * sb->s_maxbytes is limited to 8TB. On 32bit systems, the page cache
+ * only covers 16TB and the upper 8TB are used for indirect blocks.
+ * On 64bit system we could bump up the limit, but that would make
+ * the filesystem incompatible with 32bit systems.
+ */
+ sb->s_maxbytes = (1ull << 43) - 1;
+ sb->s_op = &logfs_super_operations;
+ sb->s_flags = flags | MS_NOATIME;
+
+ err = logfs_read_sb(sb);
+ if (err)
+ goto err1;
+
+ sb->s_flags |= MS_ACTIVE;
+ err = logfs_get_sb_final(sb, mnt);
+ if (err)
+ goto err1;
+ return 0;
+
+err1:
+ up_write(&sb->s_umount);
+ deactivate_super(sb);
+ return err;
+err0:
+ kfree(super);
+ //devops->put_device(sb);
+ return err;
+}
+
+static int logfs_get_sb(struct file_system_type *type, int flags,
+ const char *devname, void *data, struct vfsmount *mnt)
+{
+ ulong mtdnr;
+
+ if (!devname)
+ return logfs_get_sb_bdev(type, flags, devname, mnt);
+ if (strncmp(devname, "mtd", 3))
+ return logfs_get_sb_bdev(type, flags, devname, mnt);
+
+ {
+ char *garbage;
+ mtdnr = simple_strtoul(devname+3, &garbage, 0);
+ if (*garbage)
+ return -EINVAL;
+ }
+
+ return logfs_get_sb_mtd(type, flags, mtdnr, mnt);
+}
+
+static struct file_system_type logfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "logfs",
+ .get_sb = logfs_get_sb,
+ .kill_sb = logfs_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV,
+
+};
+
+static int __init logfs_init(void)
+{
+ int ret;
+
+ emergency_page = alloc_pages(GFP_KERNEL, 0);
+ if (!emergency_page)
+ return -ENOMEM;
+
+ ret = logfs_compr_init();
+ if (ret)
+ goto out1;
+
+ ret = logfs_init_inode_cache();
+ if (ret)
+ goto out2;
+
+ return register_filesystem(&logfs_fs_type);
+out2:
+ logfs_compr_exit();
+out1:
+ __free_pages(emergency_page, 0);
+ return ret;
+}
+
+static void __exit logfs_exit(void)
+{
+ unregister_filesystem(&logfs_fs_type);
+ logfs_destroy_inode_cache();
+ logfs_compr_exit();
+ __free_pages(emergency_page, 0);
+}
+
+module_init(logfs_init);
+module_exit(logfs_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
+MODULE_DESCRIPTION("scalable flash filesystem");
diff --git a/fs/mpage.c b/fs/mpage.c
index 42381bd6543b..598d54e200eb 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -561,7 +561,7 @@ page_is_mapped:
if (page->index >= end_index) {
/*
* The page straddles i_size. It must be zeroed out on each
- * and every writepage invokation because it may be mmapped.
+ * and every writepage invocation because it may be mmapped.
* "A file is mapped in multiples of the page size. For a file
* that is not a multiple of the page size, the remaining memory
* is zeroed when mapped, and writes to that region are not
diff --git a/fs/namei.c b/fs/namei.c
index b55440baf7ab..38d4653a88fb 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2557,7 +2557,7 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname
* e) conversion from fhandle to dentry may come in the wrong moment - when
* we are removing the target. Solution: we will have to grab ->i_mutex
* in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on
- * ->i_mutex on parents, which works but leads to some truely excessive
+ * ->i_mutex on parents, which works but leads to some truly excessive
* locking].
*/
static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
diff --git a/fs/namespace.c b/fs/namespace.c
index 7d70d63ceb29..356d6cfff3a7 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -29,6 +29,7 @@
#include <linux/log2.h>
#include <linux/idr.h>
#include <linux/fs_struct.h>
+#include <linux/fsnotify.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "pnode.h"
@@ -150,6 +151,9 @@ struct vfsmount *alloc_vfsmnt(const char *name)
INIT_LIST_HEAD(&mnt->mnt_share);
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
+#ifdef CONFIG_FSNOTIFY
+ INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
+#endif
#ifdef CONFIG_SMP
mnt->mnt_writers = alloc_percpu(int);
if (!mnt->mnt_writers)
@@ -610,6 +614,7 @@ static inline void __mntput(struct vfsmount *mnt)
* provides barriers, so count_mnt_writers() below is safe. AV
*/
WARN_ON(count_mnt_writers(mnt));
+ fsnotify_vfsmount_delete(mnt);
dput(mnt->mnt_root);
free_vfsmnt(mnt);
deactivate_super(sb);
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ee77713ce68b..d0b060aec80f 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -890,6 +890,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *
server->backing_dev_info.name = "nfs";
server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD;
+ server->backing_dev_info.capabilities |= BDI_CAP_ACCT_UNSTABLE;
if (server->wsize > max_rpc_payload)
server->wsize = max_rpc_payload;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 2c5ace4f00a7..3c7f03b669fb 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1615,6 +1615,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out;
new_dentry = dentry;
+ rehash = NULL;
new_inode = NULL;
}
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 6b891328f332..67e50ac9b84a 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -526,6 +526,7 @@ const struct address_space_operations nfs_file_aops = {
.migratepage = nfs_migrate_page,
.launder_page = nfs_launder_page,
.error_remove_page = generic_error_remove_page,
+ .commit_unstable_pages = nfs_commit_unstable_pages,
};
/*
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index faa091865ad0..83417090a090 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -97,22 +97,6 @@ u64 nfs_compat_user_ino64(u64 fileid)
return ino;
}
-int nfs_write_inode(struct inode *inode, int sync)
-{
- int ret;
-
- if (sync) {
- ret = filemap_fdatawait(inode->i_mapping);
- if (ret == 0)
- ret = nfs_commit_inode(inode, FLUSH_SYNC);
- } else
- ret = nfs_commit_inode(inode, 0);
- if (ret >= 0)
- return 0;
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- return ret;
-}
-
void nfs_clear_inode(struct inode *inode)
{
/*
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 29e464d23b32..7bb326fa0add 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -211,7 +211,6 @@ extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask);
extern struct workqueue_struct *nfsiod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_destroy_inode(struct inode *);
-extern int nfs_write_inode(struct inode *,int);
extern void nfs_clear_inode(struct inode *);
#ifdef CONFIG_NFS_V4
extern void nfs4_clear_inode(struct inode *);
@@ -253,6 +252,8 @@ extern int nfs4_path_walk(struct nfs_server *server,
extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
/* write.c */
+extern int nfs_commit_unstable_pages(struct address_space *mapping,
+ struct writeback_control *wbc);
extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
#ifdef CONFIG_MIGRATION
extern int nfs_migrate_page(struct address_space *,
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ce907efc5508..805c1a071f1d 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -265,7 +265,6 @@ struct file_system_type nfs_xdev_fs_type = {
static const struct super_operations nfs_sops = {
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
- .write_inode = nfs_write_inode,
.statfs = nfs_statfs,
.clear_inode = nfs_clear_inode,
.umount_begin = nfs_umount_begin,
@@ -334,7 +333,6 @@ struct file_system_type nfs4_referral_fs_type = {
static const struct super_operations nfs4_sops = {
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
- .write_inode = nfs_write_inode,
.statfs = nfs_statfs,
.clear_inode = nfs4_clear_inode,
.umount_begin = nfs_umount_begin,
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d171696017f4..7f1f2aa39f18 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -440,8 +440,8 @@ nfs_mark_request_commit(struct nfs_page *req)
NFS_PAGE_TAG_COMMIT);
spin_unlock(&inode->i_lock);
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
- inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
- __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+ inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_UNSTABLE);
+ mark_inode_unstable_pages(inode);
}
static int
@@ -451,7 +451,7 @@ nfs_clear_request_commit(struct nfs_page *req)
if (test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) {
dec_zone_page_state(page, NR_UNSTABLE_NFS);
- dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
+ dec_bdi_stat(page->mapping->backing_dev_info, BDI_UNSTABLE);
return 1;
}
return 0;
@@ -1322,7 +1322,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
nfs_mark_request_commit(req);
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
- BDI_RECLAIMABLE);
+ BDI_UNSTABLE);
nfs_clear_page_tag_locked(req);
}
return -ENOMEM;
@@ -1406,11 +1406,42 @@ int nfs_commit_inode(struct inode *inode, int how)
}
return res;
}
+
+int nfs_commit_unstable_pages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct inode *inode = mapping->host;
+ int flags = FLUSH_SYNC;
+ int ret;
+
+ /* Don't commit yet if this is a non-blocking flush and there are
+ * outstanding writes for this mapping.
+ */
+ if (!wbc->force_commit_unstable && wbc->sync_mode != WB_SYNC_ALL &&
+ radix_tree_tagged(&NFS_I(inode)->nfs_page_tree,
+ NFS_PAGE_TAG_LOCKED)) {
+ mark_inode_unstable_pages(inode);
+ return 0;
+ }
+ if (wbc->nonblocking || wbc->for_background)
+ flags = 0;
+ ret = nfs_commit_inode(inode, flags);
+ if (ret > 0)
+ ret = 0;
+ return ret;
+}
+
#else
static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
{
return 0;
}
+
+int nfs_commit_unstable_pages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ return 0;
+}
#endif
long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index a8587e90fd5a..c458fb11c957 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1434,7 +1434,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
}
op->opnum = ntohl(*argp->p++);
- if (op->opnum >= OP_ACCESS && op->opnum < ops->nops)
+ if (op->opnum >= FIRST_NFS4_OP && op->opnum <= LAST_NFS4_OP)
op->status = ops->decoders[op->opnum](argp, &op->u);
else {
op->opnum = OP_ILLEGAL;
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 7c2e337d05af..9c9437aa58b1 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -769,43 +769,25 @@ nfsd_close(struct file *filp)
}
/*
- * Sync a file
- * As this calls fsync (not fdatasync) there is no need for a write_inode
- * after it.
+ * Sync a directory to disk.
+ *
+ * We can't just call vfs_fsync because our requirements are slightly odd:
+ *
+ * a) we do not have a file struct available
+ * b) we expect to have i_mutex already held by the caller
*/
-static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
- const struct file_operations *fop)
-{
- struct inode *inode = dp->d_inode;
- int (*fsync) (struct file *, struct dentry *, int);
- int err;
-
- err = filemap_fdatawrite(inode->i_mapping);
- if (err == 0 && fop && (fsync = fop->fsync))
- err = fsync(filp, dp, 0);
- if (err == 0)
- err = filemap_fdatawait(inode->i_mapping);
-
- return err;
-}
-
-static int
-nfsd_sync(struct file *filp)
+int
+nfsd_sync_dir(struct dentry *dentry)
{
- int err;
- struct inode *inode = filp->f_path.dentry->d_inode;
- dprintk("nfsd: sync file %s\n", filp->f_path.dentry->d_name.name);
- mutex_lock(&inode->i_mutex);
- err=nfsd_dosync(filp, filp->f_path.dentry, filp->f_op);
- mutex_unlock(&inode->i_mutex);
+ struct inode *inode = dentry->d_inode;
+ int error;
- return err;
-}
+ WARN_ON(!mutex_is_locked(&inode->i_mutex));
-int
-nfsd_sync_dir(struct dentry *dp)
-{
- return nfsd_dosync(NULL, dp, dp->d_inode->i_fop);
+ error = filemap_write_and_wait(inode->i_mapping);
+ if (!error && inode->i_fop->fsync)
+ error = inode->i_fop->fsync(NULL, dentry, 0);
+ return error;
}
/*
@@ -964,7 +946,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
nfsdstats.io_read += host_err;
*count = host_err;
err = 0;
- fsnotify_access(file->f_path.dentry);
+ fsnotify_access(file);
} else
err = nfserrno(host_err);
out:
@@ -1011,7 +993,7 @@ static int wait_for_concurrent_writes(struct file *file)
if (inode->i_state & I_DIRTY) {
dprintk("nfsd: write sync %d\n", task_pid_nr(current));
- err = nfsd_sync(file);
+ err = vfs_fsync(file, file->f_path.dentry, 0);
}
last_ino = inode->i_ino;
last_dev = inode->i_sb->s_dev;
@@ -1075,7 +1057,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
goto out_nfserr;
*cnt = host_err;
nfsdstats.io_write += host_err;
- fsnotify_modify(file->f_path.dentry);
+ fsnotify_modify(file);
/* clear setuid/setgid flag after write */
if (inode->i_mode & (S_ISUID | S_ISGID))
@@ -1180,7 +1162,7 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
return err;
if (EX_ISSYNC(fhp->fh_export)) {
if (file->f_op && file->f_op->fsync) {
- err = nfserrno(nfsd_sync(file));
+ err = nfserrno(vfs_fsync(file, file->f_path.dentry, 0));
} else {
err = nfserr_notsupp;
}
diff --git a/fs/notify/Kconfig b/fs/notify/Kconfig
index dffbb0911d02..22c629eedd82 100644
--- a/fs/notify/Kconfig
+++ b/fs/notify/Kconfig
@@ -3,3 +3,4 @@ config FSNOTIFY
source "fs/notify/dnotify/Kconfig"
source "fs/notify/inotify/Kconfig"
+source "fs/notify/fanotify/Kconfig"
diff --git a/fs/notify/Makefile b/fs/notify/Makefile
index 0922cc826c46..ae5f33a6d868 100644
--- a/fs/notify/Makefile
+++ b/fs/notify/Makefile
@@ -1,4 +1,6 @@
-obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o
+obj-$(CONFIG_FSNOTIFY) += fsnotify.o notification.o group.o inode_mark.o \
+ mark.o vfsmount_mark.o
obj-y += dnotify/
obj-y += inotify/
+obj-y += fanotify/
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 7e54e52964dd..6624c2ee8786 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -29,17 +29,17 @@
int dir_notify_enable __read_mostly = 1;
static struct kmem_cache *dnotify_struct_cache __read_mostly;
-static struct kmem_cache *dnotify_mark_entry_cache __read_mostly;
+static struct kmem_cache *dnotify_mark_cache __read_mostly;
static struct fsnotify_group *dnotify_group __read_mostly;
static DEFINE_MUTEX(dnotify_mark_mutex);
/*
- * dnotify will attach one of these to each inode (i_fsnotify_mark_entries) which
+ * dnotify will attach one of these to each inode (i_fsnotify_marks) which
* is being watched by dnotify. If multiple userspace applications are watching
* the same directory with dnotify their information is chained in dn
*/
-struct dnotify_mark_entry {
- struct fsnotify_mark_entry fsn_entry;
+struct dnotify_mark {
+ struct fsnotify_mark fsn_mark;
struct dnotify_struct *dn;
};
@@ -51,27 +51,27 @@ struct dnotify_mark_entry {
* it calls the fsnotify function so it can update the set of all events relevant
* to this inode.
*/
-static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry)
+static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark)
{
__u32 new_mask, old_mask;
struct dnotify_struct *dn;
- struct dnotify_mark_entry *dnentry = container_of(entry,
- struct dnotify_mark_entry,
- fsn_entry);
+ struct dnotify_mark *dn_mark = container_of(fsn_mark,
+ struct dnotify_mark,
+ fsn_mark);
- assert_spin_locked(&entry->lock);
+ assert_spin_locked(&fsn_mark->lock);
- old_mask = entry->mask;
+ old_mask = fsn_mark->mask;
new_mask = 0;
- for (dn = dnentry->dn; dn != NULL; dn = dn->dn_next)
+ for (dn = dn_mark->dn; dn != NULL; dn = dn->dn_next)
new_mask |= (dn->dn_mask & ~FS_DN_MULTISHOT);
- entry->mask = new_mask;
+ fsnotify_set_mark_mask_locked(fsn_mark, new_mask);
if (old_mask == new_mask)
return;
- if (entry->inode)
- fsnotify_recalc_inode_mask(entry->inode);
+ if (fsn_mark->i.inode)
+ fsnotify_recalc_inode_mask(fsn_mark->i.inode);
}
/*
@@ -85,8 +85,8 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark_entry *entry)
static int dnotify_handle_event(struct fsnotify_group *group,
struct fsnotify_event *event)
{
- struct fsnotify_mark_entry *entry = NULL;
- struct dnotify_mark_entry *dnentry;
+ struct fsnotify_mark *fsn_mark = NULL;
+ struct dnotify_mark *dn_mark;
struct inode *to_tell;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
@@ -95,17 +95,13 @@ static int dnotify_handle_event(struct fsnotify_group *group,
to_tell = event->to_tell;
- spin_lock(&to_tell->i_lock);
- entry = fsnotify_find_mark_entry(group, to_tell);
- spin_unlock(&to_tell->i_lock);
-
- /* unlikely since we alreay passed dnotify_should_send_event() */
- if (unlikely(!entry))
+ fsn_mark = fsnotify_find_inode_mark(group, to_tell);
+ if (unlikely(!fsn_mark))
return 0;
- dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
+ dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
- spin_lock(&entry->lock);
- prev = &dnentry->dn;
+ spin_lock(&fsn_mark->lock);
+ prev = &dn_mark->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_mask & test_mask) == 0) {
prev = &dn->dn_next;
@@ -118,12 +114,12 @@ static int dnotify_handle_event(struct fsnotify_group *group,
else {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
- dnotify_recalc_inode_mask(entry);
+ dnotify_recalc_inode_mask(fsn_mark);
}
}
- spin_unlock(&entry->lock);
- fsnotify_put_mark(entry);
+ spin_unlock(&fsn_mark->lock);
+ fsnotify_put_mark(fsn_mark);
return 0;
}
@@ -133,9 +129,10 @@ static int dnotify_handle_event(struct fsnotify_group *group,
* userspace notification for that pair.
*/
static bool dnotify_should_send_event(struct fsnotify_group *group,
- struct inode *inode, __u32 mask)
+ struct inode *inode, struct vfsmount *mnt,
+ __u32 mask, void *data, int data_type)
{
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *fsn_mark;
bool send;
/* !dir_notify_enable should never get here, don't waste time checking
@@ -146,31 +143,27 @@ static bool dnotify_should_send_event(struct fsnotify_group *group,
if (!S_ISDIR(inode->i_mode))
return false;
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(group, inode);
- spin_unlock(&inode->i_lock);
-
- /* no mark means no dnotify watch */
- if (!entry)
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
return false;
mask = (mask & ~FS_EVENT_ON_CHILD);
- send = (mask & entry->mask);
+ send = (mask & fsn_mark->mask);
- fsnotify_put_mark(entry); /* matches fsnotify_find_mark_entry */
+ fsnotify_put_mark(fsn_mark); /* matches fsnotify_find_inode_mark */
return send;
}
-static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
+static void dnotify_free_mark(struct fsnotify_mark *fsn_mark)
{
- struct dnotify_mark_entry *dnentry = container_of(entry,
- struct dnotify_mark_entry,
- fsn_entry);
+ struct dnotify_mark *dn_mark = container_of(fsn_mark,
+ struct dnotify_mark,
+ fsn_mark);
- BUG_ON(dnentry->dn);
+ BUG_ON(dn_mark->dn);
- kmem_cache_free(dnotify_mark_entry_cache, dnentry);
+ kmem_cache_free(dnotify_mark_cache, dn_mark);
}
static struct fsnotify_ops dnotify_fsnotify_ops = {
@@ -183,15 +176,15 @@ static struct fsnotify_ops dnotify_fsnotify_ops = {
/*
* Called every time a file is closed. Looks first for a dnotify mark on the
- * inode. If one is found run all of the ->dn entries attached to that
+ * inode. If one is found run all of the ->dn structures attached to that
* mark for one relevant to this process closing the file and remove that
* dnotify_struct. If that was the last dnotify_struct also remove the
- * fsnotify_mark_entry.
+ * fsnotify_mark.
*/
void dnotify_flush(struct file *filp, fl_owner_t id)
{
- struct fsnotify_mark_entry *entry;
- struct dnotify_mark_entry *dnentry;
+ struct fsnotify_mark *fsn_mark;
+ struct dnotify_mark *dn_mark;
struct dnotify_struct *dn;
struct dnotify_struct **prev;
struct inode *inode;
@@ -200,38 +193,36 @@ void dnotify_flush(struct file *filp, fl_owner_t id)
if (!S_ISDIR(inode->i_mode))
return;
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(dnotify_group, inode);
- spin_unlock(&inode->i_lock);
- if (!entry)
+ fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
+ if (!fsn_mark)
return;
- dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
+ dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
mutex_lock(&dnotify_mark_mutex);
- spin_lock(&entry->lock);
- prev = &dnentry->dn;
+ spin_lock(&fsn_mark->lock);
+ prev = &dn_mark->dn;
while ((dn = *prev) != NULL) {
if ((dn->dn_owner == id) && (dn->dn_filp == filp)) {
*prev = dn->dn_next;
kmem_cache_free(dnotify_struct_cache, dn);
- dnotify_recalc_inode_mask(entry);
+ dnotify_recalc_inode_mask(fsn_mark);
break;
}
prev = &dn->dn_next;
}
- spin_unlock(&entry->lock);
+ spin_unlock(&fsn_mark->lock);
/* nothing else could have found us thanks to the dnotify_mark_mutex */
- if (dnentry->dn == NULL)
- fsnotify_destroy_mark_by_entry(entry);
+ if (dn_mark->dn == NULL)
+ fsnotify_destroy_mark(fsn_mark);
fsnotify_recalc_group_mask(dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(fsn_mark);
}
/* this conversion is done only at watch creation */
@@ -259,16 +250,16 @@ static __u32 convert_arg(unsigned long arg)
/*
* If multiple processes watch the same inode with dnotify there is only one
- * dnotify mark in inode->i_fsnotify_mark_entries but we chain a dnotify_struct
+ * dnotify mark in inode->i_fsnotify_marks but we chain a dnotify_struct
* onto that mark. This function either attaches the new dnotify_struct onto
* that list, or it |= the mask onto an existing dnofiy_struct.
*/
-static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnentry,
+static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark *dn_mark,
fl_owner_t id, int fd, struct file *filp, __u32 mask)
{
struct dnotify_struct *odn;
- odn = dnentry->dn;
+ odn = dn_mark->dn;
while (odn != NULL) {
/* adding more events to existing dnofiy_struct? */
if ((odn->dn_owner == id) && (odn->dn_filp == filp)) {
@@ -283,8 +274,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent
dn->dn_fd = fd;
dn->dn_filp = filp;
dn->dn_owner = id;
- dn->dn_next = dnentry->dn;
- dnentry->dn = dn;
+ dn->dn_next = dn_mark->dn;
+ dn_mark->dn = dn;
return 0;
}
@@ -296,8 +287,8 @@ static int attach_dn(struct dnotify_struct *dn, struct dnotify_mark_entry *dnent
*/
int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
{
- struct dnotify_mark_entry *new_dnentry, *dnentry;
- struct fsnotify_mark_entry *new_entry, *entry;
+ struct dnotify_mark *new_dn_mark, *dn_mark;
+ struct fsnotify_mark *new_fsn_mark, *fsn_mark;
struct dnotify_struct *dn;
struct inode *inode;
fl_owner_t id = current->files;
@@ -306,7 +297,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
__u32 mask;
/* we use these to tell if we need to kfree */
- new_entry = NULL;
+ new_fsn_mark = NULL;
dn = NULL;
if (!dir_notify_enable) {
@@ -336,8 +327,8 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
}
/* new fsnotify mark, we expect most fcntl calls to add a new mark */
- new_dnentry = kmem_cache_alloc(dnotify_mark_entry_cache, GFP_KERNEL);
- if (!new_dnentry) {
+ new_dn_mark = kmem_cache_alloc(dnotify_mark_cache, GFP_KERNEL);
+ if (!new_dn_mark) {
error = -ENOMEM;
goto out_err;
}
@@ -345,29 +336,27 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
/* convert the userspace DN_* "arg" to the internal FS_* defines in fsnotify */
mask = convert_arg(arg);
- /* set up the new_entry and new_dnentry */
- new_entry = &new_dnentry->fsn_entry;
- fsnotify_init_mark(new_entry, dnotify_free_mark);
- new_entry->mask = mask;
- new_dnentry->dn = NULL;
+ /* set up the new_fsn_mark and new_dn_mark */
+ new_fsn_mark = &new_dn_mark->fsn_mark;
+ fsnotify_init_mark(new_fsn_mark, dnotify_free_mark);
+ new_fsn_mark->mask = mask;
+ new_dn_mark->dn = NULL;
/* this is needed to prevent the fcntl/close race described below */
mutex_lock(&dnotify_mark_mutex);
- /* add the new_entry or find an old one. */
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(dnotify_group, inode);
- spin_unlock(&inode->i_lock);
- if (entry) {
- dnentry = container_of(entry, struct dnotify_mark_entry, fsn_entry);
- spin_lock(&entry->lock);
+ /* add the new_fsn_mark or find an old one. */
+ fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode);
+ if (fsn_mark) {
+ dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);
+ spin_lock(&fsn_mark->lock);
} else {
- fsnotify_add_mark(new_entry, dnotify_group, inode);
- spin_lock(&new_entry->lock);
- entry = new_entry;
- dnentry = new_dnentry;
- /* we used new_entry, so don't free it */
- new_entry = NULL;
+ fsnotify_add_mark(new_fsn_mark, dnotify_group, inode, NULL, 0);
+ spin_lock(&new_fsn_mark->lock);
+ fsn_mark = new_fsn_mark;
+ dn_mark = new_dn_mark;
+ /* we used new_fsn_mark, so don't free it */
+ new_fsn_mark = NULL;
}
rcu_read_lock();
@@ -376,17 +365,17 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
/* if (f != filp) means that we lost a race and another task/thread
* actually closed the fd we are still playing with before we grabbed
- * the dnotify_mark_mutex and entry->lock. Since closing the fd is the
- * only time we clean up the mark entries we need to get our mark off
+ * the dnotify_mark_mutex and fsn_mark->lock. Since closing the fd is the
+ * only time we clean up the marks we need to get our mark off
* the list. */
if (f != filp) {
/* if we added ourselves, shoot ourselves, it's possible that
- * the flush actually did shoot this entry. That's fine too
+ * the flush actually did shoot this fsn_mark. That's fine too
* since multiple calls to destroy_mark is perfectly safe, if
- * we found a dnentry already attached to the inode, just sod
+ * we found a dn_mark already attached to the inode, just sod
* off silently as the flush at close time dealt with it.
*/
- if (dnentry == new_dnentry)
+ if (dn_mark == new_dn_mark)
destroy = 1;
goto out;
}
@@ -394,13 +383,13 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
if (error) {
/* if we added, we must shoot */
- if (dnentry == new_dnentry)
+ if (dn_mark == new_dn_mark)
destroy = 1;
goto out;
}
- error = attach_dn(dn, dnentry, id, fd, filp, mask);
- /* !error means that we attached the dn to the dnentry, so don't free it */
+ error = attach_dn(dn, dn_mark, id, fd, filp, mask);
+ /* !error means that we attached the dn to the dn_mark, so don't free it */
if (!error)
dn = NULL;
/* -EEXIST means that we didn't add this new dn and used an old one.
@@ -408,20 +397,20 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
else if (error == -EEXIST)
error = 0;
- dnotify_recalc_inode_mask(entry);
+ dnotify_recalc_inode_mask(fsn_mark);
out:
- spin_unlock(&entry->lock);
+ spin_unlock(&fsn_mark->lock);
if (destroy)
- fsnotify_destroy_mark_by_entry(entry);
+ fsnotify_destroy_mark(fsn_mark);
fsnotify_recalc_group_mask(dnotify_group);
mutex_unlock(&dnotify_mark_mutex);
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(fsn_mark);
out_err:
- if (new_entry)
- fsnotify_put_mark(new_entry);
+ if (new_fsn_mark)
+ fsnotify_put_mark(new_fsn_mark);
if (dn)
kmem_cache_free(dnotify_struct_cache, dn);
return error;
@@ -430,10 +419,9 @@ out_err:
static int __init dnotify_init(void)
{
dnotify_struct_cache = KMEM_CACHE(dnotify_struct, SLAB_PANIC);
- dnotify_mark_entry_cache = KMEM_CACHE(dnotify_mark_entry, SLAB_PANIC);
+ dnotify_mark_cache = KMEM_CACHE(dnotify_mark, SLAB_PANIC);
- dnotify_group = fsnotify_obtain_group(DNOTIFY_GROUP_NUM,
- 0, &dnotify_fsnotify_ops);
+ dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops);
if (IS_ERR(dnotify_group))
panic("unable to allocate fsnotify group for dnotify\n");
return 0;
diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig
new file mode 100644
index 000000000000..566de30395c2
--- /dev/null
+++ b/fs/notify/fanotify/Kconfig
@@ -0,0 +1,26 @@
+config FANOTIFY
+ bool "Filesystem wide access notification"
+ select FSNOTIFY
+ select ANON_INODES
+ default y
+ ---help---
+ Say Y here to enable fanotify suport. fanotify is a file access
+ notification system which differs from inotify in that it sends
+ and open file descriptor to the userspace listener along with
+ the event.
+
+ If unsure, say Y.
+
+config FANOTIFY_ACCESS_PERMISSIONS
+ bool "fanotify permissions checking"
+ depends on FANOTIFY
+ depends on SECURITY
+ default n
+ ---help---
+ Say Y here is you want fanotify listeners to be able to make permissions
+ decisions concerning filesystem events. This is used by some fanotify
+ listeners which need to scan files before allowing the system access to
+ use those files. This is used by some anti-malware vendors and by some
+ hierarchical storage managent systems.
+
+ If unsure, say N.
diff --git a/fs/notify/fanotify/Makefile b/fs/notify/fanotify/Makefile
new file mode 100644
index 000000000000..0999213e7e6e
--- /dev/null
+++ b/fs/notify/fanotify/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_FANOTIFY) += fanotify.o fanotify_user.o
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
new file mode 100644
index 000000000000..bbcfccd4a8ea
--- /dev/null
+++ b/fs/notify/fanotify/fanotify.c
@@ -0,0 +1,255 @@
+#include <linux/fanotify.h>
+#include <linux/fdtable.h>
+#include <linux/fsnotify_backend.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h> /* UINT_MAX */
+#include <linux/mount.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new)
+{
+ pr_debug("%s: old=%p new=%p\n", __func__, old, new);
+
+ if (old->to_tell == new->to_tell &&
+ old->data_type == new->data_type &&
+ old->tgid == new->tgid) {
+ switch (old->data_type) {
+ case (FSNOTIFY_EVENT_PATH):
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
+ case (FSNOTIFY_EVENT_NONE):
+ return true;
+ default:
+ BUG();
+ };
+ }
+ return false;
+}
+
+/* Note, if we return an event in *arg that a reference is being held... */
+static int fanotify_merge(struct list_head *list,
+ struct fsnotify_event *event,
+ void **arg)
+{
+ struct fsnotify_event_holder *test_holder;
+ struct fsnotify_event *test_event;
+ struct fsnotify_event *new_event;
+ struct fsnotify_event **return_event = (struct fsnotify_event **)arg;
+ int ret = 0;
+
+ pr_debug("%s: list=%p event=%p\n", __func__, list, event);
+
+ *return_event = NULL;
+
+ /* and the list better be locked by something too! */
+
+ list_for_each_entry_reverse(test_holder, list, event_list) {
+ test_event = test_holder->event;
+ if (should_merge(test_event, event)) {
+ fsnotify_get_event(test_event);
+ *return_event = test_event;
+
+ ret = -EEXIST;
+ /* if they are exactly the same we are done */
+ if (test_event->mask == event->mask)
+ goto out;
+
+ /*
+ * if the refcnt == 1 this is the only queue
+ * for this event and so we can update the mask
+ * in place.
+ */
+ if (atomic_read(&test_event->refcnt) == 1) {
+ test_event->mask |= event->mask;
+ goto out;
+ }
+
+ /* can't allocate memory, merge was no possible */
+ new_event = fsnotify_clone_event(test_event);
+ if (unlikely(!new_event)) {
+ ret = 0;
+ goto out;
+ }
+
+ /* we didn't return the test_event, so drop that ref */
+ fsnotify_put_event(test_event);
+ /* the reference we return on new_event is from clone */
+ *return_event = new_event;
+
+ /* build new event and replace it on the list */
+ new_event->mask = (test_event->mask | event->mask);
+ fsnotify_replace_event(test_holder, new_event);
+
+ break;
+ }
+ }
+out:
+ return ret;
+}
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+static int fanotify_get_response_from_access(struct fsnotify_group *group,
+ struct fsnotify_event *event)
+{
+ int ret;
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ wait_event(group->fanotify_data.access_waitq, event->response);
+
+ /* userspace responded, convert to something usable */
+ spin_lock(&event->lock);
+ switch (event->response) {
+ case FAN_ALLOW:
+ ret = 0;
+ break;
+ case FAN_DENY:
+ default:
+ ret = -EPERM;
+ }
+ event->response = 0;
+ spin_unlock(&event->lock);
+
+ pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,
+ group, event, ret);
+
+ return ret;
+}
+#endif
+
+static int fanotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
+{
+ int ret;
+ struct fsnotify_event *notify_event = NULL;
+
+ BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
+ BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
+ BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
+ BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE);
+ BUILD_BUG_ON(FAN_OPEN != FS_OPEN);
+ BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD);
+ BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW);
+ BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM);
+ BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM);
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ ret = fsnotify_add_notify_event(group, event, NULL, fanotify_merge,
+ (void **)&notify_event);
+ /* -EEXIST means this event was merged with another, not that it was an error */
+ if (ret == -EEXIST)
+ ret = 0;
+ if (ret)
+ goto out;
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ if (event->mask & FAN_ALL_PERM_EVENTS) {
+ /* if we merged we need to wait on the new event */
+ if (notify_event)
+ event = notify_event;
+ ret = fanotify_get_response_from_access(group, event);
+ }
+#endif
+
+out:
+ if (notify_event)
+ fsnotify_put_event(notify_event);
+ return ret;
+}
+
+static bool should_send_vfsmount_event(struct fsnotify_group *group, struct vfsmount *mnt,
+ struct inode *inode, __u32 mask)
+{
+ struct fsnotify_mark *mnt_mark;
+ struct fsnotify_mark *inode_mark;
+
+ pr_debug("%s: group=%p vfsmount=%p mask=%x\n",
+ __func__, group, mnt, mask);
+
+ mnt_mark = fsnotify_find_vfsmount_mark(group, mnt);
+ if (!mnt_mark)
+ return false;
+
+ mask &= mnt_mark->mask;
+ mask &= ~mnt_mark->ignored_mask;
+
+ if (mask) {
+ inode_mark = fsnotify_find_inode_mark(group, inode);
+ if (inode_mark) {
+ mask &= ~inode_mark->ignored_mask;
+ fsnotify_put_mark(inode_mark);
+ }
+ }
+
+ /* find took a reference */
+ fsnotify_put_mark(mnt_mark);
+
+ return mask;
+}
+
+static bool should_send_inode_event(struct fsnotify_group *group, struct inode *inode,
+ __u32 mask)
+{
+ struct fsnotify_mark *fsn_mark;
+
+ pr_debug("%s: group=%p inode=%p mask=%x\n",
+ __func__, group, inode, mask);
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
+ return false;
+
+ /* if the event is for a child and this inode doesn't care about
+ * events on the child, don't send it! */
+ if ((mask & FS_EVENT_ON_CHILD) &&
+ !(fsn_mark->mask & FS_EVENT_ON_CHILD)) {
+ mask = 0;
+ } else {
+ /*
+ * We care about children, but do we care about this particular
+ * type of event?
+ */
+ mask &= ~FS_EVENT_ON_CHILD;
+ mask &= fsn_mark->mask;
+ mask &= ~fsn_mark->ignored_mask;
+ }
+
+ /* find took a reference */
+ fsnotify_put_mark(fsn_mark);
+
+ return mask;
+}
+
+static bool fanotify_should_send_event(struct fsnotify_group *group, struct inode *to_tell,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_type)
+{
+ pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x data=%p data_type=%d\n",
+ __func__, group, to_tell, mnt, mask, data, data_type);
+
+ /* sorry, fanotify only gives a damn about files and dirs */
+ if (!S_ISREG(to_tell->i_mode) &&
+ !S_ISDIR(to_tell->i_mode))
+ return false;
+
+ /* if we don't have enough info to send an event to userspace say no */
+ if (data_type != FSNOTIFY_EVENT_PATH)
+ return false;
+
+ if (mnt)
+ return should_send_vfsmount_event(group, mnt, to_tell, mask);
+ else
+ return should_send_inode_event(group, to_tell, mask);
+}
+
+const struct fsnotify_ops fanotify_fsnotify_ops = {
+ .handle_event = fanotify_handle_event,
+ .should_send_event = fanotify_should_send_event,
+ .free_group_priv = NULL,
+ .free_event_priv = NULL,
+ .freeing_mark = NULL,
+};
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
new file mode 100644
index 000000000000..7c869fa23ec6
--- /dev/null
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -0,0 +1,776 @@
+#include <linux/fanotify.h>
+#include <linux/fcntl.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/fsnotify_backend.h>
+#include <linux/init.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/poll.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include <asm/ioctls.h>
+
+extern const struct fsnotify_ops fanotify_fsnotify_ops;
+
+static struct kmem_cache *fanotify_mark_cache __read_mostly;
+static struct kmem_cache *fanotify_response_event_cache __read_mostly;
+
+struct fanotify_response_event {
+ struct list_head list;
+ __s32 fd;
+ struct fsnotify_event *event;
+};
+
+/*
+ * Get an fsnotify notification event if one exists and is small
+ * enough to fit in "count". Return an error pointer if the count
+ * is not large enough.
+ *
+ * Called with the group->notification_mutex held.
+ */
+static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
+ size_t count)
+{
+ BUG_ON(!mutex_is_locked(&group->notification_mutex));
+
+ pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
+
+ if (fsnotify_notify_queue_is_empty(group))
+ return NULL;
+
+ if (FAN_EVENT_METADATA_LEN > count)
+ return ERR_PTR(-EINVAL);
+
+ /* held the notification_mutex the whole time, so this is the
+ * same event we peeked above */
+ return fsnotify_remove_notify_event(group);
+}
+
+static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
+{
+ int client_fd;
+ struct dentry *dentry;
+ struct vfsmount *mnt;
+ struct file *new_file;
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ client_fd = get_unused_fd();
+ if (client_fd < 0)
+ return client_fd;
+
+ if (event->data_type != FSNOTIFY_EVENT_PATH) {
+ WARN_ON(1);
+ put_unused_fd(client_fd);
+ return -EINVAL;
+ }
+
+ /*
+ * we need a new file handle for the userspace program so it can read even if it was
+ * originally opened O_WRONLY.
+ */
+ dentry = dget(event->path.dentry);
+ mnt = mntget(event->path.mnt);
+ /* it's possible this event was an overflow event. in that case dentry and mnt
+ * are NULL; That's fine, just don't call dentry open */
+ if (dentry && mnt)
+ new_file = dentry_open(dentry, mnt,
+ O_RDONLY | O_LARGEFILE | FMODE_NONOTIFY,
+ current_cred());
+ else
+ new_file = ERR_PTR(-EOVERFLOW);
+ if (IS_ERR(new_file)) {
+ /*
+ * we still send an event even if we can't open the file. this
+ * can happen when say tasks are gone and we try to open their
+ * /proc files or we try to open a WRONLY file like in sysfs
+ * we just send the errno to userspace since there isn't much
+ * else we can do.
+ */
+ put_unused_fd(client_fd);
+ client_fd = PTR_ERR(new_file);
+ } else {
+ fd_install(client_fd, new_file);
+ }
+
+ return client_fd;
+}
+
+static ssize_t fill_event_metadata(struct fsnotify_group *group,
+ struct fanotify_event_metadata *metadata,
+ struct fsnotify_event *event)
+{
+ pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
+ group, metadata, event);
+
+ metadata->event_len = FAN_EVENT_METADATA_LEN;
+ metadata->vers = FANOTIFY_METADATA_VERSION;
+ metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
+ metadata->pid = pid_vnr(event->tgid);
+ metadata->fd = create_fd(group, event);
+
+ return metadata->fd;
+}
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
+ __s32 fd)
+{
+ struct fanotify_response_event *re, *return_re = NULL;
+
+ mutex_lock(&group->fanotify_data.access_mutex);
+ list_for_each_entry(re, &group->fanotify_data.access_list, list) {
+ if (re->fd != fd)
+ continue;
+
+ list_del_init(&re->list);
+ return_re = re;
+ break;
+ }
+ mutex_unlock(&group->fanotify_data.access_mutex);
+
+ pr_debug("%s: found return_re=%p\n", __func__, return_re);
+
+ return return_re;
+}
+
+static int process_access_response(struct fsnotify_group *group,
+ struct fanotify_response *response_struct)
+{
+ struct fanotify_response_event *re;
+ __s32 fd = response_struct->fd;
+ __u32 response = response_struct->response;
+
+ pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
+ fd, response);
+ /*
+ * make sure the response is valid, if invalid we do nothing and either
+ * userspace can send a valid responce or we will clean it up after the
+ * timeout
+ */
+ switch (response) {
+ case FAN_ALLOW:
+ case FAN_DENY:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (fd < 0)
+ return -EINVAL;
+
+ re = dequeue_re(group, fd);
+ if (!re)
+ return -ENOENT;
+
+ re->event->response = response;
+
+ wake_up(&group->fanotify_data.access_waitq);
+
+ kmem_cache_free(fanotify_response_event_cache, re);
+
+ return 0;
+}
+
+static int prepare_for_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ struct fanotify_response_event *re;
+
+ if (!(event->mask & FAN_ALL_PERM_EVENTS))
+ return 0;
+
+ re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
+ if (!re)
+ return -ENOMEM;
+
+ re->event = event;
+ re->fd = fd;
+
+ mutex_lock(&group->fanotify_data.access_mutex);
+ list_add_tail(&re->list, &group->fanotify_data.access_list);
+ mutex_unlock(&group->fanotify_data.access_mutex);
+
+ return 0;
+}
+
+static void remove_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ struct fanotify_response_event *re;
+
+ if (!(event->mask & FAN_ALL_PERM_EVENTS))
+ return;
+
+ re = dequeue_re(group, fd);
+ if (!re)
+ return;
+
+ BUG_ON(re->event != event);
+
+ kmem_cache_free(fanotify_response_event_cache, re);
+
+ return;
+}
+#else
+static int prepare_for_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ return 0;
+}
+
+static void remove_access_response(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ __s32 fd)
+{
+ return;
+}
+#endif
+
+static ssize_t copy_event_to_user(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ char __user *buf)
+{
+ struct fanotify_event_metadata fanotify_event_metadata;
+ int fd, ret;
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
+ fd = fill_event_metadata(group, &fanotify_event_metadata, event);
+ if (fd < 0)
+ return fd;
+
+ ret = prepare_for_access_response(group, event, fd);
+ if (ret)
+ goto out_close_fd;
+
+ ret = -EFAULT;
+ if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN))
+ goto out_kill_access_response;
+
+ return FAN_EVENT_METADATA_LEN;
+
+out_kill_access_response:
+ remove_access_response(group, event, fd);
+out_close_fd:
+ sys_close(fd);
+ return ret;
+}
+
+/* intofiy userspace file descriptor functions */
+static unsigned int fanotify_poll(struct file *file, poll_table *wait)
+{
+ struct fsnotify_group *group = file->private_data;
+ int ret = 0;
+
+ poll_wait(file, &group->notification_waitq, wait);
+ mutex_lock(&group->notification_mutex);
+ if (!fsnotify_notify_queue_is_empty(group))
+ ret = POLLIN | POLLRDNORM;
+ mutex_unlock(&group->notification_mutex);
+
+ return ret;
+}
+
+static ssize_t fanotify_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct fsnotify_group *group;
+ struct fsnotify_event *kevent;
+ char __user *start;
+ int ret;
+ DEFINE_WAIT(wait);
+
+ start = buf;
+ group = file->private_data;
+
+ pr_debug("%s: group=%p\n", __func__, group);
+
+ while (1) {
+ prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
+
+ mutex_lock(&group->notification_mutex);
+ kevent = get_one_event(group, count);
+ mutex_unlock(&group->notification_mutex);
+
+ if (kevent) {
+ ret = PTR_ERR(kevent);
+ if (IS_ERR(kevent))
+ break;
+ ret = copy_event_to_user(group, kevent, buf);
+ fsnotify_put_event(kevent);
+ if (ret < 0)
+ break;
+ buf += ret;
+ count -= ret;
+ continue;
+ }
+
+ ret = -EAGAIN;
+ if (file->f_flags & O_NONBLOCK)
+ break;
+ ret = -EINTR;
+ if (signal_pending(current))
+ break;
+
+ if (start != buf)
+ break;
+
+ schedule();
+ }
+
+ finish_wait(&group->notification_waitq, &wait);
+ if (start != buf && ret != -EFAULT)
+ ret = buf - start;
+ return ret;
+}
+
+static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
+{
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ struct fanotify_response response = { .fd = -1, .response = -1 };
+ struct fsnotify_group *group;
+ int ret;
+
+ group = file->private_data;
+
+ if (count > sizeof(response))
+ count = sizeof(response);
+
+ pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
+
+ if (copy_from_user(&response, buf, count))
+ return -EFAULT;
+
+ ret = process_access_response(group, &response);
+ if (ret < 0)
+ count = ret;
+
+ return count;
+#else
+ return -EINVAL;
+#endif
+}
+
+static int fanotify_release(struct inode *ignored, struct file *file)
+{
+ struct fsnotify_group *group = file->private_data;
+
+ pr_debug("%s: file=%p group=%p\n", __func__, file, group);
+
+ /* matches the fanotify_init->fsnotify_alloc_group */
+ fsnotify_put_group(group);
+
+ return 0;
+}
+
+static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct fsnotify_group *group;
+ struct fsnotify_event_holder *holder;
+ void __user *p;
+ int ret = -ENOTTY;
+ size_t send_len = 0;
+
+ group = file->private_data;
+
+ p = (void __user *) arg;
+
+ switch (cmd) {
+ case FIONREAD:
+ mutex_lock(&group->notification_mutex);
+ list_for_each_entry(holder, &group->notification_list, event_list)
+ send_len += FAN_EVENT_METADATA_LEN;
+ mutex_unlock(&group->notification_mutex);
+ ret = put_user(send_len, (int __user *) p);
+ break;
+ }
+
+ return ret;
+}
+
+static const struct file_operations fanotify_fops = {
+ .poll = fanotify_poll,
+ .read = fanotify_read,
+ .write = fanotify_write,
+ .fasync = NULL,
+ .release = fanotify_release,
+ .unlocked_ioctl = fanotify_ioctl,
+ .compat_ioctl = fanotify_ioctl,
+};
+
+static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
+{
+ kmem_cache_free(fanotify_mark_cache, fsn_mark);
+}
+
+static int fanotify_find_path(int dfd, const char __user *filename,
+ struct path *path, unsigned int flags)
+{
+ int ret;
+
+ pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
+ dfd, filename, flags);
+
+ if (filename == NULL) {
+ struct file *file;
+ int fput_needed;
+
+ ret = -EBADF;
+ file = fget_light(dfd, &fput_needed);
+ if (!file)
+ goto out;
+
+ ret = -ENOTDIR;
+ if ((flags & FAN_MARK_ONLYDIR) &&
+ !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
+ fput_light(file, fput_needed);
+ goto out;
+ }
+
+ *path = file->f_path;
+ path_get(path);
+ fput_light(file, fput_needed);
+ } else {
+ unsigned int lookup_flags = 0;
+
+ if (!(flags & FAN_MARK_DONT_FOLLOW))
+ lookup_flags |= LOOKUP_FOLLOW;
+ if (flags & FAN_MARK_ONLYDIR)
+ lookup_flags |= LOOKUP_DIRECTORY;
+
+ ret = user_path_at(dfd, filename, lookup_flags, path);
+ if (ret)
+ goto out;
+ }
+
+ /* you can only watch an inode if you have read permissions on it */
+ ret = inode_permission(path->dentry->d_inode, MAY_READ);
+ if (ret)
+ path_put(path);
+out:
+ return ret;
+}
+
+static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
+ __u32 mask,
+ unsigned int flags)
+{
+ __u32 oldmask;
+
+ spin_lock(&fsn_mark->lock);
+ if (!(flags & FAN_MARK_IGNORED_MASK)) {
+ oldmask = fsn_mark->mask;
+ fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
+ } else {
+ oldmask = fsn_mark->ignored_mask;
+ fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
+ }
+ spin_unlock(&fsn_mark->lock);
+
+ if (!(oldmask & ~mask))
+ fsnotify_destroy_mark(fsn_mark);
+
+ return mask & oldmask;
+}
+
+static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
+ struct vfsmount *mnt, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark = NULL;
+ __u32 removed;
+
+ fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
+ if (!fsn_mark)
+ return -ENOENT;
+
+ removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
+ fsnotify_put_mark(fsn_mark);
+ if (removed & group->mask)
+ fsnotify_recalc_group_mask(group);
+ if (removed & mnt->mnt_fsnotify_mask)
+ fsnotify_recalc_vfsmount_mask(mnt);
+
+ return 0;
+}
+
+static int fanotify_remove_inode_mark(struct fsnotify_group *group,
+ struct inode *inode, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark = NULL;
+ __u32 removed;
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
+ return -ENOENT;
+
+ removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
+ /* matches the fsnotify_find_inode_mark() */
+ fsnotify_put_mark(fsn_mark);
+
+ if (removed & group->mask)
+ fsnotify_recalc_group_mask(group);
+ if (removed & inode->i_fsnotify_mask)
+ fsnotify_recalc_inode_mask(inode);
+
+ return 0;
+}
+
+static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
+ __u32 mask,
+ unsigned int flags)
+{
+ __u32 oldmask;
+
+ spin_lock(&fsn_mark->lock);
+ if (!(flags & FAN_MARK_IGNORED_MASK)) {
+ oldmask = fsn_mark->mask;
+ fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
+ } else {
+ oldmask = fsn_mark->ignored_mask;
+ fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask | mask));
+ if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
+ fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
+ }
+ spin_unlock(&fsn_mark->lock);
+
+ return mask & ~oldmask;
+}
+
+static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
+ struct vfsmount *mnt, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark;
+ __u32 added;
+
+ fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
+ if (!fsn_mark) {
+ int ret;
+
+ fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
+ if (!fsn_mark)
+ return -ENOMEM;
+
+ fsnotify_init_mark(fsn_mark, fanotify_free_mark);
+ ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
+ if (ret) {
+ fanotify_free_mark(fsn_mark);
+ return ret;
+ }
+ }
+ added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
+ fsnotify_put_mark(fsn_mark);
+ if (added) {
+ if (added & ~group->mask)
+ fsnotify_recalc_group_mask(group);
+ if (added & ~mnt->mnt_fsnotify_mask)
+ fsnotify_recalc_vfsmount_mask(mnt);
+ }
+ return 0;
+}
+
+static int fanotify_add_inode_mark(struct fsnotify_group *group,
+ struct inode *inode, __u32 mask,
+ unsigned int flags)
+{
+ struct fsnotify_mark *fsn_mark;
+ __u32 added;
+
+ pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
+
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark) {
+ int ret;
+
+ fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
+ if (!fsn_mark)
+ return -ENOMEM;
+
+ fsnotify_init_mark(fsn_mark, fanotify_free_mark);
+ ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
+ if (ret) {
+ fanotify_free_mark(fsn_mark);
+ return ret;
+ }
+ }
+ added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
+ fsnotify_put_mark(fsn_mark);
+ if (added) {
+ if (added & ~group->mask)
+ fsnotify_recalc_group_mask(group);
+ if (added & ~inode->i_fsnotify_mask)
+ fsnotify_recalc_inode_mask(inode);
+ }
+ return 0;
+}
+
+/* fanotify syscalls */
+SYSCALL_DEFINE3(fanotify_init, unsigned int, flags, unsigned int, event_f_flags,
+ unsigned int, priority)
+{
+ struct fsnotify_group *group;
+ int f_flags, fd;
+
+ pr_debug("%s: flags=%d event_f_flags=%d priority=%d\n",
+ __func__, flags, event_f_flags, priority);
+
+ if (event_f_flags)
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (flags & ~FAN_ALL_INIT_FLAGS)
+ return -EINVAL;
+
+ f_flags = O_RDWR | FMODE_NONOTIFY;
+ if (flags & FAN_CLOEXEC)
+ f_flags |= O_CLOEXEC;
+ if (flags & FAN_NONBLOCK)
+ f_flags |= O_NONBLOCK;
+
+ /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
+ group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
+ if (IS_ERR(group))
+ return PTR_ERR(group);
+
+ group->priority = priority;
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ mutex_init(&group->fanotify_data.access_mutex);
+ init_waitqueue_head(&group->fanotify_data.access_waitq);
+ INIT_LIST_HEAD(&group->fanotify_data.access_list);
+#endif
+
+ fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
+ if (fd < 0)
+ goto out_put_group;
+
+ return fd;
+
+out_put_group:
+ fsnotify_put_group(group);
+ return fd;
+}
+
+SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
+ __u64 mask, int dfd,
+ const char __user * pathname)
+{
+ struct inode *inode = NULL;
+ struct vfsmount *mnt = NULL;
+ struct fsnotify_group *group;
+ struct file *filp;
+ struct path path;
+ int ret, fput_needed;
+
+ pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
+ __func__, fanotify_fd, flags, dfd, pathname, mask);
+
+ /* we only use the lower 32 bits as of right now. */
+ if (mask & ((__u64)0xffffffff << 32))
+ return -EINVAL;
+
+ if (flags & ~FAN_ALL_MARK_FLAGS)
+ return -EINVAL;
+ switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
+ case FAN_MARK_ADD:
+ case FAN_MARK_REMOVE:
+ case FAN_MARK_FLUSH:
+ break;
+ default:
+ return -EINVAL;
+ }
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
+#else
+ if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
+#endif
+ return -EINVAL;
+
+ filp = fget_light(fanotify_fd, &fput_needed);
+ if (unlikely(!filp))
+ return -EBADF;
+
+ /* verify that this is indeed an fanotify instance */
+ ret = -EINVAL;
+ if (unlikely(filp->f_op != &fanotify_fops))
+ goto fput_and_out;
+
+ ret = fanotify_find_path(dfd, pathname, &path, flags);
+ if (ret)
+ goto fput_and_out;
+
+ /* inode held in place by reference to path; group by fget on fd */
+ if (!(flags & FAN_MARK_MOUNT))
+ inode = path.dentry->d_inode;
+ else
+ mnt = path.mnt;
+ group = filp->private_data;
+
+ /* create/update an inode mark */
+ switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
+ case FAN_MARK_ADD:
+ if (flags & FAN_MARK_MOUNT)
+ ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
+ else
+ ret = fanotify_add_inode_mark(group, inode, mask, flags);
+ break;
+ case FAN_MARK_REMOVE:
+ if (flags & FAN_MARK_MOUNT)
+ ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
+ else
+ ret = fanotify_remove_inode_mark(group, inode, mask, flags);
+ break;
+ case FAN_MARK_FLUSH:
+ if (flags & FAN_MARK_MOUNT)
+ fsnotify_clear_vfsmount_marks_by_group(group);
+ else
+ fsnotify_clear_inode_marks_by_group(group);
+ fsnotify_recalc_group_mask(group);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ path_put(&path);
+fput_and_out:
+ fput_light(filp, fput_needed);
+ return ret;
+}
+
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
+ long dfd, long pathname)
+{
+ return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
+ mask, (int) dfd,
+ (const char __user *) pathname);
+}
+SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
+#endif
+
+/*
+ * fanotify_user_setup - Our initialization function. Note that we cannnot return
+ * error because we have compiled-in VFS hooks. So an (unlikely) failure here
+ * must result in panic().
+ */
+static int __init fanotify_user_setup(void)
+{
+ fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
+ fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
+ SLAB_PANIC);
+
+ return 0;
+}
+device_initcall(fanotify_user_setup);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 037e878e03fc..fb7342ae6a80 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -20,6 +20,7 @@
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mount.h>
#include <linux/srcu.h>
#include <linux/fsnotify_backend.h>
@@ -34,6 +35,11 @@ void __fsnotify_inode_delete(struct inode *inode)
}
EXPORT_SYMBOL_GPL(__fsnotify_inode_delete);
+void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
+{
+ fsnotify_clear_marks_by_mount(mnt);
+}
+
/*
* Given an inode, first check if we care what happens to our children. Inotify
* and dnotify both tell their parents about events. If we care about any event
@@ -77,13 +83,16 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)
}
/* Notify this dentry's parent about a child's events. */
-void __fsnotify_parent(struct dentry *dentry, __u32 mask)
+void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
struct dentry *parent;
struct inode *p_inode;
bool send = false;
bool should_update_children = false;
+ if (!dentry)
+ dentry = path->dentry;
+
if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED))
return;
@@ -114,8 +123,12 @@ void __fsnotify_parent(struct dentry *dentry, __u32 mask)
* specifies these are events which came from a child. */
mask |= FS_EVENT_ON_CHILD;
- fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
- dentry->d_name.name, 0);
+ if (path)
+ fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
+ dentry->d_name.name, 0);
+ else
+ fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
+ dentry->d_name.name, 0);
dput(parent);
}
@@ -126,51 +139,126 @@ void __fsnotify_parent(struct dentry *dentry, __u32 mask)
}
EXPORT_SYMBOL_GPL(__fsnotify_parent);
+void __fsnotify_flush_ignored_mask(struct inode *inode, void *data, int data_is)
+{
+ struct fsnotify_mark *mark;
+ struct hlist_node *node;
+
+ if (!hlist_empty(&inode->i_fsnotify_marks)) {
+ spin_lock(&inode->i_lock);
+ hlist_for_each_entry(mark, node, &inode->i_fsnotify_marks, i.i_list) {
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+ mark->ignored_mask = 0;
+ }
+ spin_unlock(&inode->i_lock);
+ }
+
+ if (data_is == FSNOTIFY_EVENT_PATH) {
+ struct vfsmount *mnt;
+
+ mnt = ((struct path *)data)->mnt;
+ if (mnt && !hlist_empty(&mnt->mnt_fsnotify_marks)) {
+ spin_lock(&mnt->mnt_root->d_lock);
+ hlist_for_each_entry(mark, node, &mnt->mnt_fsnotify_marks, m.m_list) {
+ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY))
+ mark->ignored_mask = 0;
+ }
+ spin_unlock(&mnt->mnt_root->d_lock);
+ }
+ }
+}
+
+static int send_to_group(struct fsnotify_group *group, struct inode *to_tell,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_is, u32 cookie, const char *file_name,
+ struct fsnotify_event **event)
+{
+ if (!group->ops->should_send_event(group, to_tell, mnt, mask,
+ data, data_is))
+ return 0;
+ if (!*event) {
+ *event = fsnotify_create_event(to_tell, mask, data,
+ data_is, file_name,
+ cookie, GFP_KERNEL);
+ if (!*event)
+ return -ENOMEM;
+ }
+ return group->ops->handle_event(group, *event);
+}
+
+static bool needed_by_vfsmount(__u32 test_mask, struct vfsmount *mnt)
+{
+ if (!mnt)
+ return false;
+
+ return (test_mask & mnt->mnt_fsnotify_mask);
+}
+
/*
* This is the main call to fsnotify. The VFS calls into hook specific functions
* in linux/fsnotify.h. Those functions then in turn call here. Here will call
* out to all of the registered fsnotify_group. Those groups can then use the
* notification event in whatever means they feel necessary.
*/
-void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const char *file_name, u32 cookie)
+int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const char *file_name, u32 cookie)
{
struct fsnotify_group *group;
struct fsnotify_event *event = NULL;
- int idx;
+ struct vfsmount *mnt = NULL;
+ int idx, ret = 0;
/* global tests shouldn't care about events on child only the specific event */
__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
- if (list_empty(&fsnotify_groups))
- return;
+ /* if no fsnotify listeners, nothing to do */
+ if (list_empty(&fsnotify_inode_groups) &&
+ list_empty(&fsnotify_vfsmount_groups))
+ return 0;
+
+ if (mask & FS_MODIFY)
+ __fsnotify_flush_ignored_mask(to_tell, data, data_is);
- if (!(test_mask & fsnotify_mask))
- return;
+ /* if none of the directed listeners or vfsmount listeners care */
+ if (!(test_mask & fsnotify_inode_mask) &&
+ !(test_mask & fsnotify_vfsmount_mask))
+ return 0;
+
+ if (data_is == FSNOTIFY_EVENT_PATH)
+ mnt = ((struct path *)data)->mnt;
+
+ /* if this inode's directed listeners don't care and nothing on the vfsmount
+ * listeners list cares, nothing to do */
+ if (!(test_mask & to_tell->i_fsnotify_mask) &&
+ !needed_by_vfsmount(test_mask, mnt))
+ return 0;
- if (!(test_mask & to_tell->i_fsnotify_mask))
- return;
/*
* SRCU!! the groups list is very very much read only and the path is
* very hot. The VAST majority of events are not going to need to do
* anything other than walk the list so it's crazy to pre-allocate.
*/
idx = srcu_read_lock(&fsnotify_grp_srcu);
- list_for_each_entry_rcu(group, &fsnotify_groups, group_list) {
- if (test_mask & group->mask) {
- if (!group->ops->should_send_event(group, to_tell, mask))
- continue;
- if (!event) {
- event = fsnotify_create_event(to_tell, mask, data,
- data_is, file_name, cookie,
- GFP_KERNEL);
- /* shit, we OOM'd and now we can't tell, maybe
- * someday someone else will want to do something
- * here */
- if (!event)
- break;
+
+ if (test_mask & to_tell->i_fsnotify_mask) {
+ list_for_each_entry_rcu(group, &fsnotify_inode_groups, inode_group_list) {
+ if (test_mask & group->mask) {
+ ret = send_to_group(group, to_tell, NULL, mask, data, data_is,
+ cookie, file_name, &event);
+ if (ret)
+ goto out;
}
- group->ops->handle_event(group, event);
}
}
+ if (needed_by_vfsmount(test_mask, mnt)) {
+ list_for_each_entry_rcu(group, &fsnotify_vfsmount_groups, vfsmount_group_list) {
+ if (test_mask & group->mask) {
+ ret = send_to_group(group, to_tell, mnt, mask, data, data_is,
+ cookie, file_name, &event);
+ if (ret)
+ goto out;
+ }
+ }
+ }
+out:
srcu_read_unlock(&fsnotify_grp_srcu, idx);
/*
* fsnotify_create_event() took a reference so the event can't be cleaned
@@ -178,6 +266,8 @@ void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, const
*/
if (event)
fsnotify_put_event(event);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(fsnotify);
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 4dc240824b2d..1be54f6f9e7d 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -8,19 +8,44 @@
/* protects reads of fsnotify_groups */
extern struct srcu_struct fsnotify_grp_srcu;
-/* all groups which receive fsnotify events */
-extern struct list_head fsnotify_groups;
-/* all bitwise OR of all event types (FS_*) for all fsnotify_groups */
-extern __u32 fsnotify_mask;
+/* all groups which receive inode fsnotify events */
+extern struct list_head fsnotify_inode_groups;
+/* all groups which receive vfsmount fsnotify events */
+extern struct list_head fsnotify_vfsmount_groups;
+/* all bitwise OR of all event types (FS_*) for all fsnotify_inode_groups */
+extern __u32 fsnotify_inode_mask;
+/* all bitwise OR of all event types (FS_*) for all fsnotify_vfsmount_groups */
+extern __u32 fsnotify_vfsmount_mask;
/* destroy all events sitting in this groups notification queue */
extern void fsnotify_flush_notify(struct fsnotify_group *group);
+extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark,
+ __u32 mask);
+/* add a mark to an inode */
+extern int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct inode *inode,
+ int allow_dups);
+/* add a mark to a vfsmount */
+extern int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct vfsmount *mnt,
+ int allow_dups);
+
+/* add a group to the inode group list */
+extern void fsnotify_add_inode_group(struct fsnotify_group *group);
+/* add a group to the vfsmount group list */
+extern void fsnotify_add_vfsmount_group(struct fsnotify_group *group);
/* final kfree of a group */
extern void fsnotify_final_destroy_group(struct fsnotify_group *group);
+/* vfsmount specific destruction of a mark */
+extern void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark);
+/* inode specific destruction of a mark */
+extern void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark);
/* run the list of all marks associated with inode and flag them to be freed */
extern void fsnotify_clear_marks_by_inode(struct inode *inode);
+/* run the list of all marks associated with vfsmount and flag them to be freed */
+extern void fsnotify_clear_marks_by_mount(struct vfsmount *mnt);
/*
* update the dentry->d_flags of all of inode's children to indicate if inode cares
* about events that happen to its children.
diff --git a/fs/notify/group.c b/fs/notify/group.c
index 0e1677144bc5..ada913fd4f7f 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -32,10 +32,14 @@
static DEFINE_MUTEX(fsnotify_grp_mutex);
/* protects reads while running the fsnotify_groups list */
struct srcu_struct fsnotify_grp_srcu;
-/* all groups registered to receive filesystem notifications */
-LIST_HEAD(fsnotify_groups);
+/* all groups registered to receive inode filesystem notifications */
+LIST_HEAD(fsnotify_inode_groups);
+/* all groups registered to receive mount point filesystem notifications */
+LIST_HEAD(fsnotify_vfsmount_groups);
/* bitwise OR of all events (FS_*) interesting to some group on this system */
-__u32 fsnotify_mask;
+__u32 fsnotify_inode_mask;
+/* bitwise OR of all events (FS_*) interesting to some group on this system */
+__u32 fsnotify_vfsmount_mask;
/*
* When a new group registers or changes it's set of interesting events
@@ -44,14 +48,20 @@ __u32 fsnotify_mask;
void fsnotify_recalc_global_mask(void)
{
struct fsnotify_group *group;
- __u32 mask = 0;
+ __u32 inode_mask = 0;
+ __u32 vfsmount_mask = 0;
int idx;
idx = srcu_read_lock(&fsnotify_grp_srcu);
- list_for_each_entry_rcu(group, &fsnotify_groups, group_list)
- mask |= group->mask;
+ list_for_each_entry_rcu(group, &fsnotify_inode_groups, inode_group_list)
+ inode_mask |= group->mask;
+ list_for_each_entry_rcu(group, &fsnotify_vfsmount_groups, vfsmount_group_list)
+ vfsmount_mask |= group->mask;
+
srcu_read_unlock(&fsnotify_grp_srcu, idx);
- fsnotify_mask = mask;
+
+ fsnotify_inode_mask = inode_mask;
+ fsnotify_vfsmount_mask = vfsmount_mask;
}
/*
@@ -64,11 +74,11 @@ void fsnotify_recalc_group_mask(struct fsnotify_group *group)
{
__u32 mask = 0;
__u32 old_mask = group->mask;
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *mark;
spin_lock(&group->mark_lock);
- list_for_each_entry(entry, &group->mark_entries, g_list)
- mask |= entry->mask;
+ list_for_each_entry(mark, &group->marks_list, g_list)
+ mask |= mark->mask;
spin_unlock(&group->mark_lock);
group->mask = mask;
@@ -77,13 +87,60 @@ void fsnotify_recalc_group_mask(struct fsnotify_group *group)
fsnotify_recalc_global_mask();
}
-/*
- * Take a reference to a group so things found under the fsnotify_grp_mutex
- * can't get freed under us
- */
-static void fsnotify_get_group(struct fsnotify_group *group)
+void fsnotify_add_vfsmount_group(struct fsnotify_group *group)
{
- atomic_inc(&group->refcnt);
+ struct fsnotify_group *group_iter;
+ unsigned int priority = group->priority;
+
+ mutex_lock(&fsnotify_grp_mutex);
+
+ if (!group->on_vfsmount_group_list) {
+ list_for_each_entry(group_iter, &fsnotify_vfsmount_groups,
+ vfsmount_group_list) {
+ /* insert in front of this one? */
+ if (priority < group_iter->priority) {
+ /* list_add_tail() insert in front of group_iter */
+ list_add_tail_rcu(&group->inode_group_list,
+ &group_iter->inode_group_list);
+ goto out;
+ }
+ }
+
+ /* apparently we need to be the last entry */
+ list_add_tail_rcu(&group->vfsmount_group_list, &fsnotify_vfsmount_groups);
+ }
+out:
+ group->on_vfsmount_group_list = 1;
+
+ mutex_unlock(&fsnotify_grp_mutex);
+}
+
+void fsnotify_add_inode_group(struct fsnotify_group *group)
+{
+ struct fsnotify_group *group_iter;
+ unsigned int priority = group->priority;
+
+ mutex_lock(&fsnotify_grp_mutex);
+
+ /* add to global group list, priority 0 first, UINT_MAX last */
+ if (!group->on_inode_group_list) {
+ list_for_each_entry(group_iter, &fsnotify_inode_groups,
+ inode_group_list) {
+ if (priority < group_iter->priority) {
+ /* list_add_tail() insert in front of group_iter */
+ list_add_tail_rcu(&group->inode_group_list,
+ &group_iter->inode_group_list);
+ goto out;
+ }
+ }
+
+ /* apparently we need to be the last entry */
+ list_add_tail_rcu(&group->inode_group_list, &fsnotify_inode_groups);
+ }
+out:
+ group->on_inode_group_list = 1;
+
+ mutex_unlock(&fsnotify_grp_mutex);
}
/*
@@ -110,7 +167,7 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group)
*/
static void fsnotify_destroy_group(struct fsnotify_group *group)
{
- /* clear all inode mark entries for this group */
+ /* clear all inode marks for this group */
fsnotify_clear_marks_by_group(group);
/* past the point of no return, matches the initial value of 1 */
@@ -127,9 +184,12 @@ static void __fsnotify_evict_group(struct fsnotify_group *group)
{
BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
- if (group->on_group_list)
- list_del_rcu(&group->group_list);
- group->on_group_list = 0;
+ if (group->on_inode_group_list)
+ list_del_rcu(&group->inode_group_list);
+ group->on_inode_group_list = 0;
+ if (group->on_vfsmount_group_list)
+ list_del_rcu(&group->vfsmount_group_list);
+ group->on_vfsmount_group_list = 0;
}
/*
@@ -171,84 +231,38 @@ void fsnotify_put_group(struct fsnotify_group *group)
}
/*
- * Simply run the fsnotify_groups list and find a group which matches
- * the given parameters. If a group is found we take a reference to that
- * group.
+ * Create a new fsnotify_group and hold a reference for the group returned.
*/
-static struct fsnotify_group *fsnotify_find_group(unsigned int group_num, __u32 mask,
- const struct fsnotify_ops *ops)
+struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
{
- struct fsnotify_group *group_iter;
- struct fsnotify_group *group = NULL;
-
- BUG_ON(!mutex_is_locked(&fsnotify_grp_mutex));
-
- list_for_each_entry_rcu(group_iter, &fsnotify_groups, group_list) {
- if (group_iter->group_num == group_num) {
- if ((group_iter->mask == mask) &&
- (group_iter->ops == ops)) {
- fsnotify_get_group(group_iter);
- group = group_iter;
- } else
- group = ERR_PTR(-EEXIST);
- }
- }
- return group;
-}
-
-/*
- * Either finds an existing group which matches the group_num, mask, and ops or
- * creates a new group and adds it to the global group list. In either case we
- * take a reference for the group returned.
- */
-struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num, __u32 mask,
- const struct fsnotify_ops *ops)
-{
- struct fsnotify_group *group, *tgroup;
+ struct fsnotify_group *group;
- /* very low use, simpler locking if we just always alloc */
- group = kmalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
+ group = kzalloc(sizeof(struct fsnotify_group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
+ /* set to 0 when there a no external references to this group */
atomic_set(&group->refcnt, 1);
-
- group->on_group_list = 0;
- group->group_num = group_num;
- group->mask = mask;
+ /*
+ * hits 0 when there are no external references AND no marks for
+ * this group
+ */
+ atomic_set(&group->num_marks, 1);
mutex_init(&group->notification_mutex);
INIT_LIST_HEAD(&group->notification_list);
init_waitqueue_head(&group->notification_waitq);
- group->q_len = 0;
group->max_events = UINT_MAX;
- spin_lock_init(&group->mark_lock);
- atomic_set(&group->num_marks, 0);
- INIT_LIST_HEAD(&group->mark_entries);
+ INIT_LIST_HEAD(&group->inode_group_list);
+ INIT_LIST_HEAD(&group->vfsmount_group_list);
- group->ops = ops;
-
- mutex_lock(&fsnotify_grp_mutex);
- tgroup = fsnotify_find_group(group_num, mask, ops);
- if (tgroup) {
- /* group already exists */
- mutex_unlock(&fsnotify_grp_mutex);
- /* destroy the new one we made */
- fsnotify_put_group(group);
- return tgroup;
- }
-
- /* group not found, add a new one */
- list_add_rcu(&group->group_list, &fsnotify_groups);
- group->on_group_list = 1;
- /* being on the fsnotify_groups list holds one num_marks */
- atomic_inc(&group->num_marks);
+ spin_lock_init(&group->mark_lock);
+ INIT_LIST_HEAD(&group->marks_list);
- mutex_unlock(&fsnotify_grp_mutex);
+ group->priority = UINT_MAX;
- if (mask)
- fsnotify_recalc_global_mask();
+ group->ops = ops;
return group;
}
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 3165d85aada2..9b573f97c503 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -16,72 +16,6 @@
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-/*
- * fsnotify inode mark locking/lifetime/and refcnting
- *
- * REFCNT:
- * The mark->refcnt tells how many "things" in the kernel currently are
- * referencing this object. The object typically will live inside the kernel
- * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
- * which can find this object holding the appropriete locks, can take a reference
- * and the object itself is guarenteed to survive until the reference is dropped.
- *
- * LOCKING:
- * There are 3 spinlocks involved with fsnotify inode marks and they MUST
- * be taken in order as follows:
- *
- * entry->lock
- * group->mark_lock
- * inode->i_lock
- *
- * entry->lock protects 2 things, entry->group and entry->inode. You must hold
- * that lock to dereference either of these things (they could be NULL even with
- * the lock)
- *
- * group->mark_lock protects the mark_entries list anchored inside a given group
- * and each entry is hooked via the g_list. It also sorta protects the
- * free_g_list, which when used is anchored by a private list on the stack of the
- * task which held the group->mark_lock.
- *
- * inode->i_lock protects the i_fsnotify_mark_entries list anchored inside a
- * given inode and each entry is hooked via the i_list. (and sorta the
- * free_i_list)
- *
- *
- * LIFETIME:
- * Inode marks survive between when they are added to an inode and when their
- * refcnt==0.
- *
- * The inode mark can be cleared for a number of different reasons including:
- * - The inode is unlinked for the last time. (fsnotify_inode_remove)
- * - The inode is being evicted from cache. (fsnotify_inode_delete)
- * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
- * - Something explicitly requests that it be removed. (fsnotify_destroy_mark_by_entry)
- * - The fsnotify_group associated with the mark is going away and all such marks
- * need to be cleaned up. (fsnotify_clear_marks_by_group)
- *
- * Worst case we are given an inode and need to clean up all the marks on that
- * inode. We take i_lock and walk the i_fsnotify_mark_entries safely. For each
- * mark on the list we take a reference (so the mark can't disappear under us).
- * We remove that mark form the inode's list of marks and we add this mark to a
- * private list anchored on the stack using i_free_list; At this point we no
- * longer fear anything finding the mark using the inode's list of marks.
- *
- * We can safely and locklessly run the private list on the stack of everything
- * we just unattached from the original inode. For each mark on the private list
- * we grab the mark-> and can thus dereference mark->group and mark->inode. If
- * we see the group and inode are not NULL we take those locks. Now holding all
- * 3 locks we can completely remove the mark from other tasks finding it in the
- * future. Remember, 10 things might already be referencing this mark, but they
- * better be holding a ref. We drop our reference we took before we unhooked it
- * from the inode. When the ref hits 0 we can free the mark.
- *
- * Very similarly for freeing by group, except we use free_g_list.
- *
- * This has the very interesting property of being able to run concurrently with
- * any (or all) other directions.
- */
-
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -96,30 +30,19 @@
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
-void fsnotify_get_mark(struct fsnotify_mark_entry *entry)
-{
- atomic_inc(&entry->refcnt);
-}
-
-void fsnotify_put_mark(struct fsnotify_mark_entry *entry)
-{
- if (atomic_dec_and_test(&entry->refcnt))
- entry->free_mark(entry);
-}
-
/*
* Recalculate the mask of events relevant to a given inode locked.
*/
static void fsnotify_recalc_inode_mask_locked(struct inode *inode)
{
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *mark;
struct hlist_node *pos;
__u32 new_mask = 0;
assert_spin_locked(&inode->i_lock);
- hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list)
- new_mask |= entry->mask;
+ hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list)
+ new_mask |= mark->mask;
inode->i_fsnotify_mask = new_mask;
}
@@ -136,107 +59,26 @@ void fsnotify_recalc_inode_mask(struct inode *inode)
__fsnotify_update_child_dentry_flags(inode);
}
-/*
- * Any time a mark is getting freed we end up here.
- * The caller had better be holding a reference to this mark so we don't actually
- * do the final put under the entry->lock
- */
-void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry)
+void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)
{
- struct fsnotify_group *group;
- struct inode *inode;
-
- spin_lock(&entry->lock);
+ struct inode *inode = mark->i.inode;
- group = entry->group;
- inode = entry->inode;
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&mark->group->mark_lock);
- BUG_ON(group && !inode);
- BUG_ON(!group && inode);
-
- /* if !group something else already marked this to die */
- if (!group) {
- spin_unlock(&entry->lock);
- return;
- }
-
- /* 1 from caller and 1 for being on i_list/g_list */
- BUG_ON(atomic_read(&entry->refcnt) < 2);
-
- spin_lock(&group->mark_lock);
spin_lock(&inode->i_lock);
- hlist_del_init(&entry->i_list);
- entry->inode = NULL;
-
- list_del_init(&entry->g_list);
- entry->group = NULL;
-
- fsnotify_put_mark(entry); /* for i_list and g_list */
+ hlist_del_init(&mark->i.i_list);
+ mark->i.inode = NULL;
/*
- * this mark is now off the inode->i_fsnotify_mark_entries list and we
+ * this mark is now off the inode->i_fsnotify_marks list and we
* hold the inode->i_lock, so this is the perfect time to update the
* inode->i_fsnotify_mask
*/
fsnotify_recalc_inode_mask_locked(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(&group->mark_lock);
- spin_unlock(&entry->lock);
-
- /*
- * Some groups like to know that marks are being freed. This is a
- * callback to the group function to let it know that this entry
- * is being freed.
- */
- if (group->ops->freeing_mark)
- group->ops->freeing_mark(entry, group);
-
- /*
- * __fsnotify_update_child_dentry_flags(inode);
- *
- * I really want to call that, but we can't, we have no idea if the inode
- * still exists the second we drop the entry->lock.
- *
- * The next time an event arrive to this inode from one of it's children
- * __fsnotify_parent will see that the inode doesn't care about it's
- * children and will update all of these flags then. So really this
- * is just a lazy update (and could be a perf win...)
- */
-
-
- iput(inode);
-
- /*
- * it's possible that this group tried to destroy itself, but this
- * this mark was simultaneously being freed by inode. If that's the
- * case, we finish freeing the group here.
- */
- if (unlikely(atomic_dec_and_test(&group->num_marks)))
- fsnotify_final_destroy_group(group);
-}
-
-/*
- * Given a group, destroy all of the marks associated with that group.
- */
-void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
-{
- struct fsnotify_mark_entry *lentry, *entry;
- LIST_HEAD(free_list);
-
- spin_lock(&group->mark_lock);
- list_for_each_entry_safe(entry, lentry, &group->mark_entries, g_list) {
- list_add(&entry->free_g_list, &free_list);
- list_del_init(&entry->g_list);
- fsnotify_get_mark(entry);
- }
- spin_unlock(&group->mark_lock);
-
- list_for_each_entry_safe(entry, lentry, &free_list, free_g_list) {
- fsnotify_destroy_mark_by_entry(entry);
- fsnotify_put_mark(entry);
- }
}
/*
@@ -244,112 +86,127 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
*/
void fsnotify_clear_marks_by_inode(struct inode *inode)
{
- struct fsnotify_mark_entry *entry, *lentry;
+ struct fsnotify_mark *mark, *lmark;
struct hlist_node *pos, *n;
LIST_HEAD(free_list);
spin_lock(&inode->i_lock);
- hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_mark_entries, i_list) {
- list_add(&entry->free_i_list, &free_list);
- hlist_del_init(&entry->i_list);
- fsnotify_get_mark(entry);
+ hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) {
+ list_add(&mark->i.free_i_list, &free_list);
+ hlist_del_init(&mark->i.i_list);
+ fsnotify_get_mark(mark);
}
spin_unlock(&inode->i_lock);
- list_for_each_entry_safe(entry, lentry, &free_list, free_i_list) {
- fsnotify_destroy_mark_by_entry(entry);
- fsnotify_put_mark(entry);
+ list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) {
+ fsnotify_destroy_mark(mark);
+ fsnotify_put_mark(mark);
}
}
/*
+ * Given a group clear all of the inode marks associated with that group.
+ */
+void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_INODE);
+}
+
+/*
* given a group and inode, find the mark associated with that combination.
* if found take a reference to that mark and return it, else return NULL
*/
-struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group,
- struct inode *inode)
+struct fsnotify_mark *fsnotify_find_inode_mark_locked(struct fsnotify_group *group,
+ struct inode *inode)
{
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *mark;
struct hlist_node *pos;
assert_spin_locked(&inode->i_lock);
- hlist_for_each_entry(entry, pos, &inode->i_fsnotify_mark_entries, i_list) {
- if (entry->group == group) {
- fsnotify_get_mark(entry);
- return entry;
+ hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) {
+ if (mark->group == group) {
+ fsnotify_get_mark(mark);
+ return mark;
}
}
return NULL;
}
/*
- * Nothing fancy, just initialize lists and locks and counters.
+ * given a group and inode, find the mark associated with that combination.
+ * if found take a reference to that mark and return it, else return NULL
*/
-void fsnotify_init_mark(struct fsnotify_mark_entry *entry,
- void (*free_mark)(struct fsnotify_mark_entry *entry))
+struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group,
+ struct inode *inode)
+{
+ struct fsnotify_mark *mark;
+ spin_lock(&inode->i_lock);
+ mark = fsnotify_find_inode_mark_locked(group, inode);
+ spin_unlock(&inode->i_lock);
+
+ return mark;
+}
+
+/*
+ * If we are setting a mark mask on an inode mark we should pin the inode
+ * in memory.
+ */
+void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *mark,
+ __u32 mask)
{
- spin_lock_init(&entry->lock);
- atomic_set(&entry->refcnt, 1);
- INIT_HLIST_NODE(&entry->i_list);
- entry->group = NULL;
- entry->mask = 0;
- entry->inode = NULL;
- entry->free_mark = free_mark;
+ struct inode *inode;
+
+ assert_spin_locked(&mark->lock);
+
+ if (mask &&
+ mark->i.inode &&
+ !(mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) {
+ mark->flags |= FSNOTIFY_MARK_FLAG_OBJECT_PINNED;
+ inode = igrab(mark->i.inode);
+ /*
+ * we shouldn't be able to get here if the inode wasn't
+ * already safely held in memory. But bug in case it
+ * ever is wrong.
+ */
+ BUG_ON(!inode);
+ }
}
/*
- * Attach an initialized mark entry to a given group and inode.
+ * Attach an initialized mark to a given group and inode.
* These marks may be used for the fsnotify backend to determine which
* event types should be delivered to which group and for which inodes.
*/
-int fsnotify_add_mark(struct fsnotify_mark_entry *entry,
- struct fsnotify_group *group, struct inode *inode)
+int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct inode *inode,
+ int allow_dups)
{
- struct fsnotify_mark_entry *lentry;
+ struct fsnotify_mark *lmark = NULL;
int ret = 0;
- inode = igrab(inode);
- if (unlikely(!inode))
- return -EINVAL;
-
- /*
- * LOCKING ORDER!!!!
- * entry->lock
- * group->mark_lock
- * inode->i_lock
- */
- spin_lock(&entry->lock);
- spin_lock(&group->mark_lock);
- spin_lock(&inode->i_lock);
+ mark->flags = FSNOTIFY_MARK_FLAG_INODE;
- lentry = fsnotify_find_mark_entry(group, inode);
- if (!lentry) {
- entry->group = group;
- entry->inode = inode;
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&group->mark_lock);
- hlist_add_head(&entry->i_list, &inode->i_fsnotify_mark_entries);
- list_add(&entry->g_list, &group->mark_entries);
+ spin_lock(&inode->i_lock);
- fsnotify_get_mark(entry); /* for i_list and g_list */
+ if (!allow_dups)
+ lmark = fsnotify_find_inode_mark_locked(group, inode);
+ if (!lmark) {
+ mark->i.inode = inode;
- atomic_inc(&group->num_marks);
+ hlist_add_head(&mark->i.i_list, &inode->i_fsnotify_marks);
fsnotify_recalc_inode_mask_locked(inode);
}
spin_unlock(&inode->i_lock);
- spin_unlock(&group->mark_lock);
- spin_unlock(&entry->lock);
- if (lentry) {
+ if (lmark)
ret = -EEXIST;
- iput(inode);
- fsnotify_put_mark(lentry);
- } else {
- __fsnotify_update_child_dentry_flags(inode);
- }
return ret;
}
diff --git a/fs/notify/inotify/Kconfig b/fs/notify/inotify/Kconfig
index 3e56dbffe729..0161c74e76e2 100644
--- a/fs/notify/inotify/Kconfig
+++ b/fs/notify/inotify/Kconfig
@@ -1,18 +1,3 @@
-config INOTIFY
- bool "Inotify file change notification support"
- default n
- ---help---
- Say Y here to enable legacy in kernel inotify support. Inotify is a
- file change notification system. It is a replacement for dnotify.
- This option only provides the legacy inotify in kernel API. There
- are no in tree kernel users of this interface since it is deprecated.
- You only need this if you are loading an out of tree kernel module
- that uses inotify.
-
- For more information, see <file:Documentation/filesystems/inotify.txt>
-
- If unsure, say N.
-
config INOTIFY_USER
bool "Inotify support for userspace"
select FSNOTIFY
diff --git a/fs/notify/inotify/Makefile b/fs/notify/inotify/Makefile
index 943828171362..a380dabe09de 100644
--- a/fs/notify/inotify/Makefile
+++ b/fs/notify/inotify/Makefile
@@ -1,2 +1 @@
-obj-$(CONFIG_INOTIFY) += inotify.o
obj-$(CONFIG_INOTIFY_USER) += inotify_fsnotify.o inotify_user.o
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c
deleted file mode 100644
index 40b1cf914ccb..000000000000
--- a/fs/notify/inotify/inotify.c
+++ /dev/null
@@ -1,933 +0,0 @@
-/*
- * fs/inotify.c - inode-based file event notifications
- *
- * Authors:
- * John McCutchan <ttb@tentacle.dhs.org>
- * Robert Love <rml@novell.com>
- *
- * Kernel API added by: Amy Griffis <amy.griffis@hp.com>
- *
- * Copyright (C) 2005 John McCutchan
- * Copyright 2006 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/idr.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/writeback.h>
-#include <linux/inotify.h>
-#include <linux/fsnotify_backend.h>
-
-static atomic_t inotify_cookie;
-
-/*
- * Lock ordering:
- *
- * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
- * iprune_mutex (synchronize shrink_icache_memory())
- * inode_lock (protects the super_block->s_inodes list)
- * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
- * inotify_handle->mutex (protects inotify_handle and watches->h_list)
- *
- * The inode->inotify_mutex and inotify_handle->mutex and held during execution
- * of a caller's event handler. Thus, the caller must not hold any locks
- * taken in their event handler while calling any of the published inotify
- * interfaces.
- */
-
-/*
- * Lifetimes of the three main data structures--inotify_handle, inode, and
- * inotify_watch--are managed by reference count.
- *
- * inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
- * Additional references can bump the count via get_inotify_handle() and drop
- * the count via put_inotify_handle().
- *
- * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
- * to remove_watch_no_event(). Additional references can bump the count via
- * get_inotify_watch() and drop the count via put_inotify_watch(). The caller
- * is reponsible for the final put after receiving IN_IGNORED, or when using
- * IN_ONESHOT after receiving the first event. Inotify does the final put if
- * inotify_destroy() is called.
- *
- * inode: Pinned so long as the inode is associated with a watch, from
- * inotify_add_watch() to the final put_inotify_watch().
- */
-
-/*
- * struct inotify_handle - represents an inotify instance
- *
- * This structure is protected by the mutex 'mutex'.
- */
-struct inotify_handle {
- struct idr idr; /* idr mapping wd -> watch */
- struct mutex mutex; /* protects this bad boy */
- struct list_head watches; /* list of watches */
- atomic_t count; /* reference count */
- u32 last_wd; /* the last wd allocated */
- const struct inotify_operations *in_ops; /* inotify caller operations */
-};
-
-static inline void get_inotify_handle(struct inotify_handle *ih)
-{
- atomic_inc(&ih->count);
-}
-
-static inline void put_inotify_handle(struct inotify_handle *ih)
-{
- if (atomic_dec_and_test(&ih->count)) {
- idr_destroy(&ih->idr);
- kfree(ih);
- }
-}
-
-/**
- * get_inotify_watch - grab a reference to an inotify_watch
- * @watch: watch to grab
- */
-void get_inotify_watch(struct inotify_watch *watch)
-{
- atomic_inc(&watch->count);
-}
-EXPORT_SYMBOL_GPL(get_inotify_watch);
-
-int pin_inotify_watch(struct inotify_watch *watch)
-{
- struct super_block *sb = watch->inode->i_sb;
- spin_lock(&sb_lock);
- if (sb->s_count >= S_BIAS) {
- atomic_inc(&sb->s_active);
- spin_unlock(&sb_lock);
- atomic_inc(&watch->count);
- return 1;
- }
- spin_unlock(&sb_lock);
- return 0;
-}
-
-/**
- * put_inotify_watch - decrements the ref count on a given watch. cleans up
- * watch references if the count reaches zero. inotify_watch is freed by
- * inotify callers via the destroy_watch() op.
- * @watch: watch to release
- */
-void put_inotify_watch(struct inotify_watch *watch)
-{
- if (atomic_dec_and_test(&watch->count)) {
- struct inotify_handle *ih = watch->ih;
-
- iput(watch->inode);
- ih->in_ops->destroy_watch(watch);
- put_inotify_handle(ih);
- }
-}
-EXPORT_SYMBOL_GPL(put_inotify_watch);
-
-void unpin_inotify_watch(struct inotify_watch *watch)
-{
- struct super_block *sb = watch->inode->i_sb;
- put_inotify_watch(watch);
- deactivate_super(sb);
-}
-
-/*
- * inotify_handle_get_wd - returns the next WD for use by the given handle
- *
- * Callers must hold ih->mutex. This function can sleep.
- */
-static int inotify_handle_get_wd(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
- int ret;
-
- do {
- if (unlikely(!idr_pre_get(&ih->idr, GFP_NOFS)))
- return -ENOSPC;
- ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
- } while (ret == -EAGAIN);
-
- if (likely(!ret))
- ih->last_wd = watch->wd;
-
- return ret;
-}
-
-/*
- * inotify_inode_watched - returns nonzero if there are watches on this inode
- * and zero otherwise. We call this lockless, we do not care if we race.
- */
-static inline int inotify_inode_watched(struct inode *inode)
-{
- return !list_empty(&inode->inotify_watches);
-}
-
-/*
- * Get child dentry flag into synch with parent inode.
- * Flag should always be clear for negative dentrys.
- */
-static void set_dentry_child_flags(struct inode *inode, int watched)
-{
- struct dentry *alias;
-
- spin_lock(&dcache_lock);
- list_for_each_entry(alias, &inode->i_dentry, d_alias) {
- struct dentry *child;
-
- list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
- if (!child->d_inode)
- continue;
-
- spin_lock(&child->d_lock);
- if (watched)
- child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
- else
- child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED;
- spin_unlock(&child->d_lock);
- }
- }
- spin_unlock(&dcache_lock);
-}
-
-/*
- * inotify_find_handle - find the watch associated with the given inode and
- * handle
- *
- * Callers must hold inode->inotify_mutex.
- */
-static struct inotify_watch *inode_find_handle(struct inode *inode,
- struct inotify_handle *ih)
-{
- struct inotify_watch *watch;
-
- list_for_each_entry(watch, &inode->inotify_watches, i_list) {
- if (watch->ih == ih)
- return watch;
- }
-
- return NULL;
-}
-
-/*
- * remove_watch_no_event - remove watch without the IN_IGNORED event.
- *
- * Callers must hold both inode->inotify_mutex and ih->mutex.
- */
-static void remove_watch_no_event(struct inotify_watch *watch,
- struct inotify_handle *ih)
-{
- list_del(&watch->i_list);
- list_del(&watch->h_list);
-
- if (!inotify_inode_watched(watch->inode))
- set_dentry_child_flags(watch->inode, 0);
-
- idr_remove(&ih->idr, watch->wd);
-}
-
-/**
- * inotify_remove_watch_locked - Remove a watch from both the handle and the
- * inode. Sends the IN_IGNORED event signifying that the inode is no longer
- * watched. May be invoked from a caller's event handler.
- * @ih: inotify handle associated with watch
- * @watch: watch to remove
- *
- * Callers must hold both inode->inotify_mutex and ih->mutex.
- */
-void inotify_remove_watch_locked(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
- remove_watch_no_event(watch, ih);
- ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
-}
-EXPORT_SYMBOL_GPL(inotify_remove_watch_locked);
-
-/* Kernel API for producing events */
-
-/*
- * inotify_d_instantiate - instantiate dcache entry for inode
- */
-void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
-{
- struct dentry *parent;
-
- if (!inode)
- return;
-
- spin_lock(&entry->d_lock);
- parent = entry->d_parent;
- if (parent->d_inode && inotify_inode_watched(parent->d_inode))
- entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
- spin_unlock(&entry->d_lock);
-}
-
-/*
- * inotify_d_move - dcache entry has been moved
- */
-void inotify_d_move(struct dentry *entry)
-{
- struct dentry *parent;
-
- parent = entry->d_parent;
- if (inotify_inode_watched(parent->d_inode))
- entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
- else
- entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
-}
-
-/**
- * inotify_inode_queue_event - queue an event to all watches on this inode
- * @inode: inode event is originating from
- * @mask: event mask describing this event
- * @cookie: cookie for synchronization, or zero
- * @name: filename, if any
- * @n_inode: inode associated with name
- */
-void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
- const char *name, struct inode *n_inode)
-{
- struct inotify_watch *watch, *next;
-
- if (!inotify_inode_watched(inode))
- return;
-
- mutex_lock(&inode->inotify_mutex);
- list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
- u32 watch_mask = watch->mask;
- if (watch_mask & mask) {
- struct inotify_handle *ih= watch->ih;
- mutex_lock(&ih->mutex);
- if (watch_mask & IN_ONESHOT)
- remove_watch_no_event(watch, ih);
- ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
- name, n_inode);
- mutex_unlock(&ih->mutex);
- }
- }
- mutex_unlock(&inode->inotify_mutex);
-}
-EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
-
-/**
- * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
- * @dentry: the dentry in question, we queue against this dentry's parent
- * @mask: event mask describing this event
- * @cookie: cookie for synchronization, or zero
- * @name: filename, if any
- */
-void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
- u32 cookie, const char *name)
-{
- struct dentry *parent;
- struct inode *inode;
-
- if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED))
- return;
-
- spin_lock(&dentry->d_lock);
- parent = dentry->d_parent;
- inode = parent->d_inode;
-
- if (inotify_inode_watched(inode)) {
- dget(parent);
- spin_unlock(&dentry->d_lock);
- inotify_inode_queue_event(inode, mask, cookie, name,
- dentry->d_inode);
- dput(parent);
- } else
- spin_unlock(&dentry->d_lock);
-}
-EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
-
-/**
- * inotify_get_cookie - return a unique cookie for use in synchronizing events.
- */
-u32 inotify_get_cookie(void)
-{
- return atomic_inc_return(&inotify_cookie);
-}
-EXPORT_SYMBOL_GPL(inotify_get_cookie);
-
-/**
- * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
- * @list: list of inodes being unmounted (sb->s_inodes)
- *
- * Called with inode_lock held, protecting the unmounting super block's list
- * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
- * We temporarily drop inode_lock, however, and CAN block.
- */
-void inotify_unmount_inodes(struct list_head *list)
-{
- struct inode *inode, *next_i, *need_iput = NULL;
-
- list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
- struct inotify_watch *watch, *next_w;
- struct inode *need_iput_tmp;
- struct list_head *watches;
-
- /*
- * We cannot __iget() an inode in state I_CLEAR, I_FREEING,
- * I_WILL_FREE, or I_NEW which is fine because by that point
- * the inode cannot have any associated watches.
- */
- if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW))
- continue;
-
- /*
- * If i_count is zero, the inode cannot have any watches and
- * doing an __iget/iput with MS_ACTIVE clear would actually
- * evict all inodes with zero i_count from icache which is
- * unnecessarily violent and may in fact be illegal to do.
- */
- if (!atomic_read(&inode->i_count))
- continue;
-
- need_iput_tmp = need_iput;
- need_iput = NULL;
- /* In case inotify_remove_watch_locked() drops a reference. */
- if (inode != need_iput_tmp)
- __iget(inode);
- else
- need_iput_tmp = NULL;
- /* In case the dropping of a reference would nuke next_i. */
- if ((&next_i->i_sb_list != list) &&
- atomic_read(&next_i->i_count) &&
- !(next_i->i_state & (I_CLEAR | I_FREEING |
- I_WILL_FREE))) {
- __iget(next_i);
- need_iput = next_i;
- }
-
- /*
- * We can safely drop inode_lock here because we hold
- * references on both inode and next_i. Also no new inodes
- * will be added since the umount has begun. Finally,
- * iprune_mutex keeps shrink_icache_memory() away.
- */
- spin_unlock(&inode_lock);
-
- if (need_iput_tmp)
- iput(need_iput_tmp);
-
- /* for each watch, send IN_UNMOUNT and then remove it */
- mutex_lock(&inode->inotify_mutex);
- watches = &inode->inotify_watches;
- list_for_each_entry_safe(watch, next_w, watches, i_list) {
- struct inotify_handle *ih= watch->ih;
- get_inotify_watch(watch);
- mutex_lock(&ih->mutex);
- ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
- NULL, NULL);
- inotify_remove_watch_locked(ih, watch);
- mutex_unlock(&ih->mutex);
- put_inotify_watch(watch);
- }
- mutex_unlock(&inode->inotify_mutex);
- iput(inode);
-
- spin_lock(&inode_lock);
- }
-}
-EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
-
-/**
- * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
- * @inode: inode that is about to be removed
- */
-void inotify_inode_is_dead(struct inode *inode)
-{
- struct inotify_watch *watch, *next;
-
- mutex_lock(&inode->inotify_mutex);
- list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
- struct inotify_handle *ih = watch->ih;
- mutex_lock(&ih->mutex);
- inotify_remove_watch_locked(ih, watch);
- mutex_unlock(&ih->mutex);
- }
- mutex_unlock(&inode->inotify_mutex);
-}
-EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
-
-/* Kernel Consumer API */
-
-/**
- * inotify_init - allocate and initialize an inotify instance
- * @ops: caller's inotify operations
- */
-struct inotify_handle *inotify_init(const struct inotify_operations *ops)
-{
- struct inotify_handle *ih;
-
- ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL);
- if (unlikely(!ih))
- return ERR_PTR(-ENOMEM);
-
- idr_init(&ih->idr);
- INIT_LIST_HEAD(&ih->watches);
- mutex_init(&ih->mutex);
- ih->last_wd = 0;
- ih->in_ops = ops;
- atomic_set(&ih->count, 0);
- get_inotify_handle(ih);
-
- return ih;
-}
-EXPORT_SYMBOL_GPL(inotify_init);
-
-/**
- * inotify_init_watch - initialize an inotify watch
- * @watch: watch to initialize
- */
-void inotify_init_watch(struct inotify_watch *watch)
-{
- INIT_LIST_HEAD(&watch->h_list);
- INIT_LIST_HEAD(&watch->i_list);
- atomic_set(&watch->count, 0);
- get_inotify_watch(watch); /* initial get */
-}
-EXPORT_SYMBOL_GPL(inotify_init_watch);
-
-/*
- * Watch removals suck violently. To kick the watch out we need (in this
- * order) inode->inotify_mutex and ih->mutex. That's fine if we have
- * a hold on inode; however, for all other cases we need to make damn sure
- * we don't race with umount. We can *NOT* just grab a reference to a
- * watch - inotify_unmount_inodes() will happily sail past it and we'll end
- * with reference to inode potentially outliving its superblock. Ideally
- * we just want to grab an active reference to superblock if we can; that
- * will make sure we won't go into inotify_umount_inodes() until we are
- * done. Cleanup is just deactivate_super(). However, that leaves a messy
- * case - what if we *are* racing with umount() and active references to
- * superblock can't be acquired anymore? We can bump ->s_count, grab
- * ->s_umount, which will almost certainly wait until the superblock is shut
- * down and the watch in question is pining for fjords. That's fine, but
- * there is a problem - we might have hit the window between ->s_active
- * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock
- * is past the point of no return and is heading for shutdown) and the
- * moment when deactivate_super() acquires ->s_umount. We could just do
- * drop_super() yield() and retry, but that's rather antisocial and this
- * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having
- * found that we'd got there first (i.e. that ->s_root is non-NULL) we know
- * that we won't race with inotify_umount_inodes(). So we could grab a
- * reference to watch and do the rest as above, just with drop_super() instead
- * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we
- * could grab ->s_umount. So the watch could've been gone already.
- *
- * That still can be dealt with - we need to save watch->wd, do idr_find()
- * and compare its result with our pointer. If they match, we either have
- * the damn thing still alive or we'd lost not one but two races at once,
- * the watch had been killed and a new one got created with the same ->wd
- * at the same address. That couldn't have happened in inotify_destroy(),
- * but inotify_rm_wd() could run into that. Still, "new one got created"
- * is not a problem - we have every right to kill it or leave it alone,
- * whatever's more convenient.
- *
- * So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
- * "grab it and kill it" check. If it's been our original watch, we are
- * fine, if it's a newcomer - nevermind, just pretend that we'd won the
- * race and kill the fscker anyway; we are safe since we know that its
- * superblock won't be going away.
- *
- * And yes, this is far beyond mere "not very pretty"; so's the entire
- * concept of inotify to start with.
- */
-
-/**
- * pin_to_kill - pin the watch down for removal
- * @ih: inotify handle
- * @watch: watch to kill
- *
- * Called with ih->mutex held, drops it. Possible return values:
- * 0 - nothing to do, it has died
- * 1 - remove it, drop the reference and deactivate_super()
- * 2 - remove it, drop the reference and drop_super(); we tried hard to avoid
- * that variant, since it involved a lot of PITA, but that's the best that
- * could've been done.
- */
-static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch)
-{
- struct super_block *sb = watch->inode->i_sb;
- s32 wd = watch->wd;
-
- spin_lock(&sb_lock);
- if (sb->s_count >= S_BIAS) {
- atomic_inc(&sb->s_active);
- spin_unlock(&sb_lock);
- get_inotify_watch(watch);
- mutex_unlock(&ih->mutex);
- return 1; /* the best outcome */
- }
- sb->s_count++;
- spin_unlock(&sb_lock);
- mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */
- down_read(&sb->s_umount);
- if (likely(!sb->s_root)) {
- /* fs is already shut down; the watch is dead */
- drop_super(sb);
- return 0;
- }
- /* raced with the final deactivate_super() */
- mutex_lock(&ih->mutex);
- if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) {
- /* the watch is dead */
- mutex_unlock(&ih->mutex);
- drop_super(sb);
- return 0;
- }
- /* still alive or freed and reused with the same sb and wd; kill */
- get_inotify_watch(watch);
- mutex_unlock(&ih->mutex);
- return 2;
-}
-
-static void unpin_and_kill(struct inotify_watch *watch, int how)
-{
- struct super_block *sb = watch->inode->i_sb;
- put_inotify_watch(watch);
- switch (how) {
- case 1:
- deactivate_super(sb);
- break;
- case 2:
- drop_super(sb);
- }
-}
-
-/**
- * inotify_destroy - clean up and destroy an inotify instance
- * @ih: inotify handle
- */
-void inotify_destroy(struct inotify_handle *ih)
-{
- /*
- * Destroy all of the watches for this handle. Unfortunately, not very
- * pretty. We cannot do a simple iteration over the list, because we
- * do not know the inode until we iterate to the watch. But we need to
- * hold inode->inotify_mutex before ih->mutex. The following works.
- *
- * AV: it had to become even uglier to start working ;-/
- */
- while (1) {
- struct inotify_watch *watch;
- struct list_head *watches;
- struct super_block *sb;
- struct inode *inode;
- int how;
-
- mutex_lock(&ih->mutex);
- watches = &ih->watches;
- if (list_empty(watches)) {
- mutex_unlock(&ih->mutex);
- break;
- }
- watch = list_first_entry(watches, struct inotify_watch, h_list);
- sb = watch->inode->i_sb;
- how = pin_to_kill(ih, watch);
- if (!how)
- continue;
-
- inode = watch->inode;
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /* make sure we didn't race with another list removal */
- if (likely(idr_find(&ih->idr, watch->wd))) {
- remove_watch_no_event(watch, ih);
- put_inotify_watch(watch);
- }
-
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- unpin_and_kill(watch, how);
- }
-
- /* free this handle: the put matching the get in inotify_init() */
- put_inotify_handle(ih);
-}
-EXPORT_SYMBOL_GPL(inotify_destroy);
-
-/**
- * inotify_find_watch - find an existing watch for an (ih,inode) pair
- * @ih: inotify handle
- * @inode: inode to watch
- * @watchp: pointer to existing inotify_watch
- *
- * Caller must pin given inode (via nameidata).
- */
-s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
- struct inotify_watch **watchp)
-{
- struct inotify_watch *old;
- int ret = -ENOENT;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- old = inode_find_handle(inode, ih);
- if (unlikely(old)) {
- get_inotify_watch(old); /* caller must put watch */
- *watchp = old;
- ret = old->wd;
- }
-
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(inotify_find_watch);
-
-/**
- * inotify_find_update_watch - find and update the mask of an existing watch
- * @ih: inotify handle
- * @inode: inode's watch to update
- * @mask: mask of events to watch
- *
- * Caller must pin given inode (via nameidata).
- */
-s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
- u32 mask)
-{
- struct inotify_watch *old;
- int mask_add = 0;
- int ret;
-
- if (mask & IN_MASK_ADD)
- mask_add = 1;
-
- /* don't allow invalid bits: we don't want flags set */
- mask &= IN_ALL_EVENTS | IN_ONESHOT;
- if (unlikely(!mask))
- return -EINVAL;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /*
- * Handle the case of re-adding a watch on an (inode,ih) pair that we
- * are already watching. We just update the mask and return its wd.
- */
- old = inode_find_handle(inode, ih);
- if (unlikely(!old)) {
- ret = -ENOENT;
- goto out;
- }
-
- if (mask_add)
- old->mask |= mask;
- else
- old->mask = mask;
- ret = old->wd;
-out:
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(inotify_find_update_watch);
-
-/**
- * inotify_add_watch - add a watch to an inotify instance
- * @ih: inotify handle
- * @watch: caller allocated watch structure
- * @inode: inode to watch
- * @mask: mask of events to watch
- *
- * Caller must pin given inode (via nameidata).
- * Caller must ensure it only calls inotify_add_watch() once per watch.
- * Calls inotify_handle_get_wd() so may sleep.
- */
-s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
- struct inode *inode, u32 mask)
-{
- int ret = 0;
- int newly_watched;
-
- /* don't allow invalid bits: we don't want flags set */
- mask &= IN_ALL_EVENTS | IN_ONESHOT;
- if (unlikely(!mask))
- return -EINVAL;
- watch->mask = mask;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /* Initialize a new watch */
- ret = inotify_handle_get_wd(ih, watch);
- if (unlikely(ret))
- goto out;
- ret = watch->wd;
-
- /* save a reference to handle and bump the count to make it official */
- get_inotify_handle(ih);
- watch->ih = ih;
-
- /*
- * Save a reference to the inode and bump the ref count to make it
- * official. We hold a reference to nameidata, which makes this safe.
- */
- watch->inode = igrab(inode);
-
- /* Add the watch to the handle's and the inode's list */
- newly_watched = !inotify_inode_watched(inode);
- list_add(&watch->h_list, &ih->watches);
- list_add(&watch->i_list, &inode->inotify_watches);
- /*
- * Set child flags _after_ adding the watch, so there is no race
- * windows where newly instantiated children could miss their parent's
- * watched flag.
- */
- if (newly_watched)
- set_dentry_child_flags(inode, 1);
-
-out:
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(inotify_add_watch);
-
-/**
- * inotify_clone_watch - put the watch next to existing one
- * @old: already installed watch
- * @new: new watch
- *
- * Caller must hold the inotify_mutex of inode we are dealing with;
- * it is expected to remove the old watch before unlocking the inode.
- */
-s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new)
-{
- struct inotify_handle *ih = old->ih;
- int ret = 0;
-
- new->mask = old->mask;
- new->ih = ih;
-
- mutex_lock(&ih->mutex);
-
- /* Initialize a new watch */
- ret = inotify_handle_get_wd(ih, new);
- if (unlikely(ret))
- goto out;
- ret = new->wd;
-
- get_inotify_handle(ih);
-
- new->inode = igrab(old->inode);
-
- list_add(&new->h_list, &ih->watches);
- list_add(&new->i_list, &old->inode->inotify_watches);
-out:
- mutex_unlock(&ih->mutex);
- return ret;
-}
-
-void inotify_evict_watch(struct inotify_watch *watch)
-{
- get_inotify_watch(watch);
- mutex_lock(&watch->ih->mutex);
- inotify_remove_watch_locked(watch->ih, watch);
- mutex_unlock(&watch->ih->mutex);
-}
-
-/**
- * inotify_rm_wd - remove a watch from an inotify instance
- * @ih: inotify handle
- * @wd: watch descriptor to remove
- *
- * Can sleep.
- */
-int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
-{
- struct inotify_watch *watch;
- struct super_block *sb;
- struct inode *inode;
- int how;
-
- mutex_lock(&ih->mutex);
- watch = idr_find(&ih->idr, wd);
- if (unlikely(!watch)) {
- mutex_unlock(&ih->mutex);
- return -EINVAL;
- }
- sb = watch->inode->i_sb;
- how = pin_to_kill(ih, watch);
- if (!how)
- return 0;
-
- inode = watch->inode;
-
- mutex_lock(&inode->inotify_mutex);
- mutex_lock(&ih->mutex);
-
- /* make sure that we did not race */
- if (likely(idr_find(&ih->idr, wd) == watch))
- inotify_remove_watch_locked(ih, watch);
-
- mutex_unlock(&ih->mutex);
- mutex_unlock(&inode->inotify_mutex);
- unpin_and_kill(watch, how);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(inotify_rm_wd);
-
-/**
- * inotify_rm_watch - remove a watch from an inotify instance
- * @ih: inotify handle
- * @watch: watch to remove
- *
- * Can sleep.
- */
-int inotify_rm_watch(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
- return inotify_rm_wd(ih, watch->wd);
-}
-EXPORT_SYMBOL_GPL(inotify_rm_watch);
-
-/*
- * inotify_setup - core initialization function
- */
-static int __init inotify_setup(void)
-{
- BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
- BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
- BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
- BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
- BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
- BUILD_BUG_ON(IN_OPEN != FS_OPEN);
- BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
- BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
- BUILD_BUG_ON(IN_CREATE != FS_CREATE);
- BUILD_BUG_ON(IN_DELETE != FS_DELETE);
- BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
- BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
- BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
-
- BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
- BUILD_BUG_ON(IN_ISDIR != FS_IN_ISDIR);
- BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
- BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
-
- atomic_set(&inotify_cookie, 0);
-
- return 0;
-}
-
-module_init(inotify_setup);
diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h
index f234f3a4c8ca..b6642e4de4bf 100644
--- a/fs/notify/inotify/inotify.h
+++ b/fs/notify/inotify/inotify.h
@@ -9,13 +9,12 @@ struct inotify_event_private_data {
int wd;
};
-struct inotify_inode_mark_entry {
- /* fsnotify_mark_entry MUST be the first thing */
- struct fsnotify_mark_entry fsn_entry;
+struct inotify_inode_mark {
+ struct fsnotify_mark fsn_mark;
int wd;
};
-extern void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
+extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group);
extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv);
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index c9ee67b442e1..4706f408971c 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -31,10 +31,66 @@
#include "inotify.h"
+/*
+ * Check if 2 events contain the same information. We do not compare private data
+ * but at this moment that isn't a problem for any know fsnotify listeners.
+ */
+static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
+{
+ if ((old->mask == new->mask) &&
+ (old->to_tell == new->to_tell) &&
+ (old->data_type == new->data_type) &&
+ (old->name_len == new->name_len)) {
+ switch (old->data_type) {
+ case (FSNOTIFY_EVENT_INODE):
+ /* remember, after old was put on the wait_q we aren't
+ * allowed to look at the inode any more, only thing
+ * left to check was if the file_name is the same */
+ if (!old->name_len ||
+ !strcmp(old->file_name, new->file_name))
+ return true;
+ break;
+ case (FSNOTIFY_EVENT_PATH):
+ if ((old->path.mnt == new->path.mnt) &&
+ (old->path.dentry == new->path.dentry))
+ return true;
+ break;
+ case (FSNOTIFY_EVENT_NONE):
+ if (old->mask & FS_Q_OVERFLOW)
+ return true;
+ else if (old->mask & FS_IN_IGNORED)
+ return false;
+ return true;
+ };
+ }
+ return false;
+}
+
+static int inotify_merge(struct list_head *list,
+ struct fsnotify_event *event,
+ void **arg)
+{
+ struct fsnotify_event_holder *last_holder;
+ struct fsnotify_event *last_event;
+ int ret = 0;
+
+ /* and the list better be locked by something too */
+ spin_lock(&event->lock);
+
+ last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
+ last_event = last_holder->event;
+ if (event_compare(last_event, event))
+ ret = -EEXIST;
+
+ spin_unlock(&event->lock);
+
+ return ret;
+}
+
static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *ientry;
+ struct fsnotify_mark *fsn_mark;
+ struct inotify_inode_mark *i_mark;
struct inode *to_tell;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
@@ -42,15 +98,13 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
to_tell = event->to_tell;
- spin_lock(&to_tell->i_lock);
- entry = fsnotify_find_mark_entry(group, to_tell);
- spin_unlock(&to_tell->i_lock);
+ fsn_mark = fsnotify_find_inode_mark(group, to_tell);
/* race with watch removal? We already passes should_send */
- if (unlikely(!entry))
+ if (unlikely(!fsn_mark))
return 0;
- ientry = container_of(entry, struct inotify_inode_mark_entry,
- fsn_entry);
- wd = ientry->wd;
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark,
+ fsn_mark);
+ wd = i_mark->wd;
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL);
if (unlikely(!event_priv))
@@ -61,7 +115,7 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
fsn_event_priv->group = group;
event_priv->wd = wd;
- ret = fsnotify_add_notify_event(group, event, fsn_event_priv);
+ ret = fsnotify_add_notify_event(group, event, fsn_event_priv, inotify_merge, NULL);
if (ret) {
inotify_free_event_priv(fsn_event_priv);
/* EEXIST says we tail matched, EOVERFLOW isn't something
@@ -72,35 +126,35 @@ static int inotify_handle_event(struct fsnotify_group *group, struct fsnotify_ev
}
/*
- * If we hold the entry until after the event is on the queue
+ * If we hold the fsn_mark until after the event is on the queue
* IN_IGNORED won't be able to pass this event in the queue
*/
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(fsn_mark);
return ret;
}
-static void inotify_freeing_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group)
+static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group)
{
- inotify_ignored_and_remove_idr(entry, group);
+ inotify_ignored_and_remove_idr(fsn_mark, group);
}
-static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, __u32 mask)
+static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_type)
{
- struct fsnotify_mark_entry *entry;
+ struct fsnotify_mark *fsn_mark;
bool send;
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(group, inode);
- spin_unlock(&inode->i_lock);
- if (!entry)
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
return false;
mask = (mask & ~FS_EVENT_ON_CHILD);
- send = (entry->mask & mask);
+ send = (fsn_mark->mask & mask);
/* find took a reference */
- fsnotify_put_mark(entry);
+ fsnotify_put_mark(fsn_mark);
return send;
}
@@ -114,18 +168,18 @@ static bool inotify_should_send_event(struct fsnotify_group *group, struct inode
*/
static int idr_callback(int id, void *p, void *data)
{
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *ientry;
+ struct fsnotify_mark *fsn_mark;
+ struct inotify_inode_mark *i_mark;
static bool warned = false;
if (warned)
return 0;
- warned = false;
- entry = p;
- ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
+ warned = true;
+ fsn_mark = p;
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
- WARN(1, "inotify closing but id=%d for entry=%p in group=%p still in "
+ WARN(1, "inotify closing but id=%d for fsn_mark=%p in group=%p still in "
"idr. Probably leaking memory\n", id, p, data);
/*
@@ -134,9 +188,9 @@ static int idr_callback(int id, void *p, void *data)
* out why we got here and the panic is no worse than the original
* BUG() that was here.
*/
- if (entry)
- printk(KERN_WARNING "entry->group=%p inode=%p wd=%d\n",
- entry->group, entry->inode, ientry->wd);
+ if (fsn_mark)
+ printk(KERN_WARNING "fsn_mark->group=%p inode=%p wd=%d\n",
+ fsn_mark->group, fsn_mark->i.inode, i_mark->wd);
return 0;
}
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 8271cf05c957..bc1f395c6750 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -55,12 +55,6 @@ int inotify_max_user_watches __read_mostly;
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly;
-/*
- * When inotify registers a new group it increments this and uses that
- * value as an offset to set the fsnotify group "name" and priority.
- */
-static atomic_t inotify_grp_num;
-
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
@@ -361,58 +355,158 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns
return error;
}
+static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
+ int *last_wd,
+ struct inotify_inode_mark *i_mark)
+{
+ int ret;
+
+ do {
+ if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
+ return -ENOMEM;
+
+ spin_lock(idr_lock);
+ ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
+ &i_mark->wd);
+ /* we added the mark to the idr, take a reference */
+ if (!ret) {
+ *last_wd = i_mark->wd;
+ fsnotify_get_mark(&i_mark->fsn_mark);
+ }
+ spin_unlock(idr_lock);
+ } while (ret == -EAGAIN);
+
+ return ret;
+}
+
+static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
+ int wd)
+{
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+ struct inotify_inode_mark *i_mark;
+
+ assert_spin_locked(idr_lock);
+
+ i_mark = idr_find(idr, wd);
+ if (i_mark) {
+ struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
+
+ fsnotify_get_mark(fsn_mark);
+ /* One ref for being in the idr, one ref we just took */
+ BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
+ }
+
+ return i_mark;
+}
+
+static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
+ int wd)
+{
+ struct inotify_inode_mark *i_mark;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+
+ spin_lock(idr_lock);
+ i_mark = inotify_idr_find_locked(group, wd);
+ spin_unlock(idr_lock);
+
+ return i_mark;
+}
+
+static void do_inotify_remove_from_idr(struct fsnotify_group *group,
+ struct inotify_inode_mark *i_mark)
+{
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+ int wd = i_mark->wd;
+
+ assert_spin_locked(idr_lock);
+
+ idr_remove(idr, wd);
+
+ /* removed from the idr, drop that ref */
+ fsnotify_put_mark(&i_mark->fsn_mark);
+}
+
/*
* Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr.
*/
static void inotify_remove_from_idr(struct fsnotify_group *group,
- struct inotify_inode_mark_entry *ientry)
+ struct inotify_inode_mark *i_mark)
{
- struct idr *idr;
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *found_ientry;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
+ struct inotify_inode_mark *found_i_mark = NULL;
int wd;
- spin_lock(&group->inotify_data.idr_lock);
- idr = &group->inotify_data.idr;
- wd = ientry->wd;
+ spin_lock(idr_lock);
+ wd = i_mark->wd;
- if (wd == -1)
+ /*
+ * does this i_mark think it is in the idr? we shouldn't get called
+ * if it wasn't....
+ */
+ if (wd == -1) {
+ WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
+ " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
+ i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
+ }
- entry = idr_find(&group->inotify_data.idr, wd);
- if (unlikely(!entry))
+ /* Lets look in the idr to see if we find it */
+ found_i_mark = inotify_idr_find_locked(group, wd);
+ if (unlikely(!found_i_mark)) {
+ WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
+ " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
+ i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
+ }
- found_ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
- if (unlikely(found_ientry != ientry)) {
- /* We found an entry in the idr with the right wd, but it's
- * not the entry we were told to remove. eparis seriously
- * fucked up somewhere. */
- WARN_ON(1);
- ientry->wd = -1;
+ /*
+ * We found an mark in the idr at the right wd, but it's
+ * not the mark we were told to remove. eparis seriously
+ * fucked up somewhere.
+ */
+ if (unlikely(found_i_mark != i_mark)) {
+ WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
+ "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
+ "found_i_mark->group=%p found_i_mark->inode=%p\n",
+ __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
+ i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
+ found_i_mark->fsn_mark.group,
+ found_i_mark->fsn_mark.i.inode);
goto out;
}
- /* One ref for being in the idr, one ref held by the caller */
- BUG_ON(atomic_read(&entry->refcnt) < 2);
-
- idr_remove(idr, wd);
- ientry->wd = -1;
+ /*
+ * One ref for being in the idr
+ * one ref held by the caller trying to kill us
+ * one ref grabbed by inotify_idr_find
+ */
+ if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
+ printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
+ " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
+ i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
+ /* we can't really recover with bad ref cnting.. */
+ BUG();
+ }
- /* removed from the idr, drop that ref */
- fsnotify_put_mark(entry);
+ do_inotify_remove_from_idr(group, i_mark);
out:
- spin_unlock(&group->inotify_data.idr_lock);
+ /* match the ref taken by inotify_idr_find_locked() */
+ if (found_i_mark)
+ fsnotify_put_mark(&found_i_mark->fsn_mark);
+ i_mark->wd = -1;
+ spin_unlock(idr_lock);
}
/*
* Send IN_IGNORED for this wd, remove this wd from the idr.
*/
-void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
+void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group)
{
- struct inotify_inode_mark_entry *ientry;
+ struct inotify_inode_mark *i_mark;
struct fsnotify_event *ignored_event;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
@@ -424,7 +518,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
if (!ignored_event)
return;
- ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
@@ -433,9 +527,9 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark_entry *entry,
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
- event_priv->wd = ientry->wd;
+ event_priv->wd = i_mark->wd;
- ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv);
+ ret = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL, NULL);
if (ret)
inotify_free_event_priv(fsn_event_priv);
@@ -444,26 +538,28 @@ skip_send_ignore:
/* matches the reference taken when the event was created */
fsnotify_put_event(ignored_event);
- /* remove this entry from the idr */
- inotify_remove_from_idr(group, ientry);
+ /* remove this mark from the idr */
+ inotify_remove_from_idr(group, i_mark);
atomic_dec(&group->inotify_data.user->inotify_watches);
}
/* ding dong the mark is dead */
-static void inotify_free_mark(struct fsnotify_mark_entry *entry)
+static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
{
- struct inotify_inode_mark_entry *ientry = (struct inotify_inode_mark_entry *)entry;
+ struct inotify_inode_mark *i_mark;
+
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
- kmem_cache_free(inotify_inode_mark_cachep, ientry);
+ kmem_cache_free(inotify_inode_mark_cachep, i_mark);
}
static int inotify_update_existing_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
- struct fsnotify_mark_entry *entry;
- struct inotify_inode_mark_entry *ientry;
+ struct fsnotify_mark *fsn_mark;
+ struct inotify_inode_mark *i_mark;
__u32 old_mask, new_mask;
__u32 mask;
int add = (arg & IN_MASK_ADD);
@@ -474,36 +570,32 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
if (unlikely(!mask))
return -EINVAL;
- spin_lock(&inode->i_lock);
- entry = fsnotify_find_mark_entry(group, inode);
- spin_unlock(&inode->i_lock);
- if (!entry)
+ fsn_mark = fsnotify_find_inode_mark(group, inode);
+ if (!fsn_mark)
return -ENOENT;
- ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
- spin_lock(&entry->lock);
+ spin_lock(&fsn_mark->lock);
- old_mask = entry->mask;
- if (add) {
- entry->mask |= mask;
- new_mask = entry->mask;
- } else {
- entry->mask = mask;
- new_mask = entry->mask;
- }
+ old_mask = fsn_mark->mask;
+ if (add)
+ fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
+ else
+ fsnotify_set_mark_mask_locked(fsn_mark, mask);
+ new_mask = fsn_mark->mask;
- spin_unlock(&entry->lock);
+ spin_unlock(&fsn_mark->lock);
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
- /* more bits in this entry than the inode's mask? */
+ /* more bits in this fsn_mark than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
- /* more bits in this entry than the group? */
+ /* more bits in this fsn_mark than the group? */
int do_group = (new_mask & ~group->mask);
- /* update the inode with this new entry */
+ /* update the inode with this new fsn_mark */
if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode);
@@ -513,10 +605,10 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,
}
/* return the wd */
- ret = ientry->wd;
+ ret = i_mark->wd;
- /* match the get from fsnotify_find_mark_entry() */
- fsnotify_put_mark(entry);
+ /* match the get from fsnotify_find_mark() */
+ fsnotify_put_mark(fsn_mark);
return ret;
}
@@ -525,73 +617,55 @@ static int inotify_new_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
- struct inotify_inode_mark_entry *tmp_ientry;
+ struct inotify_inode_mark *tmp_i_mark;
__u32 mask;
int ret;
+ struct idr *idr = &group->inotify_data.idr;
+ spinlock_t *idr_lock = &group->inotify_data.idr_lock;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!mask))
return -EINVAL;
- tmp_ientry = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
- if (unlikely(!tmp_ientry))
+ tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
+ if (unlikely(!tmp_i_mark))
return -ENOMEM;
- fsnotify_init_mark(&tmp_ientry->fsn_entry, inotify_free_mark);
- tmp_ientry->fsn_entry.mask = mask;
- tmp_ientry->wd = -1;
+ fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
+ tmp_i_mark->fsn_mark.mask = mask;
+ tmp_i_mark->wd = -1;
ret = -ENOSPC;
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err;
-retry:
- ret = -ENOMEM;
- if (unlikely(!idr_pre_get(&group->inotify_data.idr, GFP_KERNEL)))
- goto out_err;
- spin_lock(&group->inotify_data.idr_lock);
- ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
- group->inotify_data.last_wd,
- &tmp_ientry->wd);
- spin_unlock(&group->inotify_data.idr_lock);
- if (ret) {
- /* idr was out of memory allocate and try again */
- if (ret == -EAGAIN)
- goto retry;
+ ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
+ tmp_i_mark);
+ if (ret)
goto out_err;
- }
-
- /* we put the mark on the idr, take a reference */
- fsnotify_get_mark(&tmp_ientry->fsn_entry);
/* we are on the idr, now get on the inode */
- ret = fsnotify_add_mark(&tmp_ientry->fsn_entry, group, inode);
+ ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
if (ret) {
/* we failed to get on the inode, get off the idr */
- inotify_remove_from_idr(group, tmp_ientry);
+ inotify_remove_from_idr(group, tmp_i_mark);
goto out_err;
}
- /* update the idr hint, who cares about races, it's just a hint */
- group->inotify_data.last_wd = tmp_ientry->wd;
-
/* increment the number of watches the user has */
atomic_inc(&group->inotify_data.user->inotify_watches);
- /* return the watch descriptor for this new entry */
- ret = tmp_ientry->wd;
-
- /* match the ref from fsnotify_init_markentry() */
- fsnotify_put_mark(&tmp_ientry->fsn_entry);
+ /* return the watch descriptor for this new mark */
+ ret = tmp_i_mark->wd;
/* if this mark added a new event update the group mask */
if (mask & ~group->mask)
fsnotify_recalc_group_mask(group);
out_err:
- if (ret < 0)
- kmem_cache_free(inotify_inode_mark_cachep, tmp_ientry);
+ /* match the ref from fsnotify_init_mark() */
+ fsnotify_put_mark(&tmp_i_mark->fsn_mark);
return ret;
}
@@ -620,11 +694,8 @@ retry:
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{
struct fsnotify_group *group;
- unsigned int grp_num;
- /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
- grp_num = (INOTIFY_GROUP_NUM - atomic_inc_return(&inotify_grp_num));
- group = fsnotify_obtain_group(grp_num, 0, &inotify_fsnotify_ops);
+ group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
@@ -632,7 +703,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
- group->inotify_data.last_wd = 1;
+ group->inotify_data.last_wd = 0;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
@@ -749,7 +820,7 @@ fput_and_out:
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
{
struct fsnotify_group *group;
- struct fsnotify_mark_entry *entry;
+ struct inotify_inode_mark *i_mark;
struct file *filp;
int ret = 0, fput_needed;
@@ -758,25 +829,23 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
return -EBADF;
/* verify that this is indeed an inotify instance */
- if (unlikely(filp->f_op != &inotify_fops)) {
- ret = -EINVAL;
+ ret = -EINVAL;
+ if (unlikely(filp->f_op != &inotify_fops))
goto out;
- }
group = filp->private_data;
- spin_lock(&group->inotify_data.idr_lock);
- entry = idr_find(&group->inotify_data.idr, wd);
- if (unlikely(!entry)) {
- spin_unlock(&group->inotify_data.idr_lock);
- ret = -EINVAL;
+ ret = -EINVAL;
+ i_mark = inotify_idr_find(group, wd);
+ if (unlikely(!i_mark))
goto out;
- }
- fsnotify_get_mark(entry);
- spin_unlock(&group->inotify_data.idr_lock);
- fsnotify_destroy_mark_by_entry(entry);
- fsnotify_put_mark(entry);
+ ret = 0;
+
+ fsnotify_destroy_mark(&i_mark->fsn_mark);
+
+ /* match ref taken by inotify_idr_find */
+ fsnotify_put_mark(&i_mark->fsn_mark);
out:
fput_light(filp, fput_needed);
@@ -814,7 +883,7 @@ static int __init inotify_user_setup(void)
if (IS_ERR(inotify_mnt))
panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
- inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark_entry, SLAB_PANIC);
+ inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
inotify_max_queued_events = 16384;
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
new file mode 100644
index 000000000000..1e824e64441d
--- /dev/null
+++ b/fs/notify/mark.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * fsnotify inode mark locking/lifetime/and refcnting
+ *
+ * REFCNT:
+ * The mark->refcnt tells how many "things" in the kernel currently are
+ * referencing this object. The object typically will live inside the kernel
+ * with a refcnt of 2, one for each list it is on (i_list, g_list). Any task
+ * which can find this object holding the appropriete locks, can take a reference
+ * and the object itself is guarenteed to survive until the reference is dropped.
+ *
+ * LOCKING:
+ * There are 3 spinlocks involved with fsnotify inode marks and they MUST
+ * be taken in order as follows:
+ *
+ * mark->lock
+ * group->mark_lock
+ * inode->i_lock
+ *
+ * mark->lock protects 2 things, mark->group and mark->inode. You must hold
+ * that lock to dereference either of these things (they could be NULL even with
+ * the lock)
+ *
+ * group->mark_lock protects the marks_list anchored inside a given group
+ * and each mark is hooked via the g_list. It also sorta protects the
+ * free_g_list, which when used is anchored by a private list on the stack of the
+ * task which held the group->mark_lock.
+ *
+ * inode->i_lock protects the i_fsnotify_marks list anchored inside a
+ * given inode and each mark is hooked via the i_list. (and sorta the
+ * free_i_list)
+ *
+ *
+ * LIFETIME:
+ * Inode marks survive between when they are added to an inode and when their
+ * refcnt==0.
+ *
+ * The inode mark can be cleared for a number of different reasons including:
+ * - The inode is unlinked for the last time. (fsnotify_inode_remove)
+ * - The inode is being evicted from cache. (fsnotify_inode_delete)
+ * - The fs the inode is on is unmounted. (fsnotify_inode_delete/fsnotify_unmount_inodes)
+ * - Something explicitly requests that it be removed. (fsnotify_destroy_mark)
+ * - The fsnotify_group associated with the mark is going away and all such marks
+ * need to be cleaned up. (fsnotify_clear_marks_by_group)
+ *
+ * Worst case we are given an inode and need to clean up all the marks on that
+ * inode. We take i_lock and walk the i_fsnotify_marks safely. For each
+ * mark on the list we take a reference (so the mark can't disappear under us).
+ * We remove that mark form the inode's list of marks and we add this mark to a
+ * private list anchored on the stack using i_free_list; At this point we no
+ * longer fear anything finding the mark using the inode's list of marks.
+ *
+ * We can safely and locklessly run the private list on the stack of everything
+ * we just unattached from the original inode. For each mark on the private list
+ * we grab the mark-> and can thus dereference mark->group and mark->inode. If
+ * we see the group and inode are not NULL we take those locks. Now holding all
+ * 3 locks we can completely remove the mark from other tasks finding it in the
+ * future. Remember, 10 things might already be referencing this mark, but they
+ * better be holding a ref. We drop our reference we took before we unhooked it
+ * from the inode. When the ref hits 0 we can free the mark.
+ *
+ * Very similarly for freeing by group, except we use free_g_list.
+ *
+ * This has the very interesting property of being able to run concurrently with
+ * any (or all) other directions.
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/writeback.h> /* for inode_lock */
+
+#include <asm/atomic.h>
+
+#include <linux/fsnotify_backend.h>
+#include "fsnotify.h"
+
+void fsnotify_get_mark(struct fsnotify_mark *mark)
+{
+ atomic_inc(&mark->refcnt);
+}
+
+void fsnotify_put_mark(struct fsnotify_mark *mark)
+{
+ if (atomic_dec_and_test(&mark->refcnt))
+ mark->free_mark(mark);
+}
+
+/*
+ * Any time a mark is getting freed we end up here.
+ * The caller had better be holding a reference to this mark so we don't actually
+ * do the final put under the mark->lock
+ */
+void fsnotify_destroy_mark(struct fsnotify_mark *mark)
+{
+ struct fsnotify_group *group;
+ struct inode *inode = NULL;
+
+ spin_lock(&mark->lock);
+
+ group = mark->group;
+
+ /* if !group something else already marked this to die */
+ if (!group) {
+ spin_unlock(&mark->lock);
+ return;
+ }
+
+ /* 1 from caller and 1 for being on i_list/g_list */
+ BUG_ON(atomic_read(&mark->refcnt) < 2);
+
+ spin_lock(&group->mark_lock);
+
+ if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {
+ fsnotify_destroy_inode_mark(mark);
+ inode = mark->i.inode;
+ } else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT)
+ fsnotify_destroy_vfsmount_mark(mark);
+ else
+ BUG();
+
+ list_del_init(&mark->g_list);
+ mark->group = NULL;
+
+ fsnotify_put_mark(mark); /* for i_list and g_list */
+
+ spin_unlock(&group->mark_lock);
+ spin_unlock(&mark->lock);
+
+ /*
+ * Some groups like to know that marks are being freed. This is a
+ * callback to the group function to let it know that this mark
+ * is being freed.
+ */
+ if (group->ops->freeing_mark)
+ group->ops->freeing_mark(mark, group);
+
+ /*
+ * __fsnotify_update_child_dentry_flags(inode);
+ *
+ * I really want to call that, but we can't, we have no idea if the inode
+ * still exists the second we drop the mark->lock.
+ *
+ * The next time an event arrive to this inode from one of it's children
+ * __fsnotify_parent will see that the inode doesn't care about it's
+ * children and will update all of these flags then. So really this
+ * is just a lazy update (and could be a perf win...)
+ */
+
+ if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
+ iput(inode);
+
+ /*
+ * it's possible that this group tried to destroy itself, but this
+ * this mark was simultaneously being freed by inode. If that's the
+ * case, we finish freeing the group here.
+ */
+ if (unlikely(atomic_dec_and_test(&group->num_marks)))
+ fsnotify_final_destroy_group(group);
+}
+
+void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask)
+{
+ assert_spin_locked(&mark->lock);
+
+ mark->mask = mask;
+
+ if (mark->flags & FSNOTIFY_MARK_FLAG_INODE)
+ fsnotify_set_inode_mark_mask_locked(mark, mask);
+}
+
+void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask)
+{
+ assert_spin_locked(&mark->lock);
+
+ mark->ignored_mask = mask;
+}
+
+/*
+ * Attach an initialized mark to a given group and fs object.
+ * These marks may be used for the fsnotify backend to determine which
+ * event types should be delivered to which group.
+ */
+int fsnotify_add_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, int allow_dups)
+{
+ int ret = 0;
+
+ BUG_ON(inode && mnt);
+ BUG_ON(!inode && !mnt);
+
+ /*
+ * if this group isn't being testing for inode type events we need
+ * to start testing
+ */
+ if (inode && unlikely(list_empty(&group->inode_group_list)))
+ fsnotify_add_inode_group(group);
+ else if (mnt && unlikely(list_empty(&group->vfsmount_group_list)))
+ fsnotify_add_vfsmount_group(group);
+
+ /*
+ * LOCKING ORDER!!!!
+ * mark->lock
+ * group->mark_lock
+ * inode->i_lock
+ */
+ spin_lock(&mark->lock);
+ spin_lock(&group->mark_lock);
+
+ mark->group = group;
+ list_add(&mark->g_list, &group->marks_list);
+ atomic_inc(&group->num_marks);
+ fsnotify_get_mark(mark); /* for i_list and g_list */
+
+ if (inode) {
+ ret = fsnotify_add_inode_mark(mark, group, inode, allow_dups);
+ if (ret)
+ goto err;
+ } else if (mnt) {
+ ret = fsnotify_add_vfsmount_mark(mark, group, mnt, allow_dups);
+ if (ret)
+ goto err;
+ } else {
+ BUG();
+ }
+
+ spin_unlock(&group->mark_lock);
+
+ /* this will pin the object if appropriate */
+ fsnotify_set_mark_mask_locked(mark, mark->mask);
+
+ spin_unlock(&mark->lock);
+
+ if (inode)
+ __fsnotify_update_child_dentry_flags(inode);
+
+ return ret;
+err:
+ mark->group = NULL;
+ list_del_init(&mark->g_list);
+ atomic_dec(&group->num_marks);
+ fsnotify_put_mark(mark);
+
+ spin_unlock(&group->mark_lock);
+ spin_unlock(&mark->lock);
+
+ return ret;
+}
+
+/*
+ * clear any marks in a group in which mark->flags & flags is true
+ */
+void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
+ unsigned int flags)
+{
+ struct fsnotify_mark *lmark, *mark;
+ LIST_HEAD(free_list);
+
+ spin_lock(&group->mark_lock);
+ list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
+ if (mark->flags & flags) {
+ list_add(&mark->free_g_list, &free_list);
+ list_del_init(&mark->g_list);
+ fsnotify_get_mark(mark);
+ }
+ }
+ spin_unlock(&group->mark_lock);
+
+ list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) {
+ fsnotify_destroy_mark(mark);
+ fsnotify_put_mark(mark);
+ }
+}
+
+/*
+ * Given a group, destroy all of the marks associated with that group.
+ */
+void fsnotify_clear_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1);
+}
+
+void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
+{
+ assert_spin_locked(&old->lock);
+ new->i.inode = old->i.inode;
+ new->m.mnt = old->m.mnt;
+ new->group = old->group;
+ new->mask = old->mask;
+ new->free_mark = old->free_mark;
+}
+
+/*
+ * Nothing fancy, just initialize lists and locks and counters.
+ */
+void fsnotify_init_mark(struct fsnotify_mark *mark,
+ void (*free_mark)(struct fsnotify_mark *mark))
+{
+ memset(mark, 0, sizeof(*mark));
+ spin_lock_init(&mark->lock);
+ atomic_set(&mark->refcnt, 1);
+ mark->free_mark = free_mark;
+}
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index b8bf53b4c108..2d50a40ab1e4 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -56,7 +56,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
* it is needed. It's refcnt is set 1 at kernel init time and will never
* get set to 0 so it will never get 'freed'
*/
-static struct fsnotify_event q_overflow_event;
+static struct fsnotify_event *q_overflow_event;
static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
/**
@@ -93,6 +93,7 @@ void fsnotify_put_event(struct fsnotify_event *event)
BUG_ON(!list_empty(&event->private_data_list));
kfree(event->file_name);
+ put_pid(event->tgid);
kmem_cache_free(fsnotify_event_cachep, event);
}
}
@@ -104,7 +105,8 @@ struct fsnotify_event_holder *fsnotify_alloc_event_holder(void)
void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder)
{
- kmem_cache_free(fsnotify_event_holder_cachep, holder);
+ if (holder)
+ kmem_cache_free(fsnotify_event_holder_cachep, holder);
}
/*
@@ -129,53 +131,20 @@ struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnot
}
/*
- * Check if 2 events contain the same information. We do not compare private data
- * but at this moment that isn't a problem for any know fsnotify listeners.
- */
-static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new)
-{
- if ((old->mask == new->mask) &&
- (old->to_tell == new->to_tell) &&
- (old->data_type == new->data_type) &&
- (old->name_len == new->name_len)) {
- switch (old->data_type) {
- case (FSNOTIFY_EVENT_INODE):
- /* remember, after old was put on the wait_q we aren't
- * allowed to look at the inode any more, only thing
- * left to check was if the file_name is the same */
- if (!old->name_len ||
- !strcmp(old->file_name, new->file_name))
- return true;
- break;
- case (FSNOTIFY_EVENT_PATH):
- if ((old->path.mnt == new->path.mnt) &&
- (old->path.dentry == new->path.dentry))
- return true;
- break;
- case (FSNOTIFY_EVENT_NONE):
- if (old->mask & FS_Q_OVERFLOW)
- return true;
- else if (old->mask & FS_IN_IGNORED)
- return false;
- return false;
- };
- }
- return false;
-}
-
-/*
* Add an event to the group notification queue. The group can later pull this
* event off the queue to deal with. If the event is successfully added to the
* group's notification queue, a reference is taken on event.
*/
int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
- struct fsnotify_event_private_data *priv)
+ struct fsnotify_event_private_data *priv,
+ int (*merge)(struct list_head *,
+ struct fsnotify_event *,
+ void **arg),
+ void **arg)
{
struct fsnotify_event_holder *holder = NULL;
struct list_head *list = &group->notification_list;
- struct fsnotify_event_holder *last_holder;
- struct fsnotify_event *last_event;
- int ret = 0;
+ int rc = 0;
/*
* There is one fsnotify_event_holder embedded inside each fsnotify_event.
@@ -195,12 +164,24 @@ alloc_holder:
mutex_lock(&group->notification_mutex);
if (group->q_len >= group->max_events) {
- event = &q_overflow_event;
- ret = -EOVERFLOW;
+ event = q_overflow_event;
+ rc = -EOVERFLOW;
/* sorry, no private data on the overflow event */
priv = NULL;
}
+ if (!list_empty(list) && merge) {
+ int ret;
+
+ ret = merge(list, event, arg);
+ if (ret) {
+ mutex_unlock(&group->notification_mutex);
+ if (holder != &event->holder)
+ fsnotify_destroy_event_holder(holder);
+ return ret;
+ }
+ }
+
spin_lock(&event->lock);
if (list_empty(&event->holder.event_list)) {
@@ -215,18 +196,6 @@ alloc_holder:
goto alloc_holder;
}
- if (!list_empty(list)) {
- last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list);
- last_event = last_holder->event;
- if (event_compare(last_event, event)) {
- spin_unlock(&event->lock);
- mutex_unlock(&group->notification_mutex);
- if (holder != &event->holder)
- fsnotify_destroy_event_holder(holder);
- return -EEXIST;
- }
- }
-
group->q_len++;
holder->event = event;
@@ -238,7 +207,7 @@ alloc_holder:
mutex_unlock(&group->notification_mutex);
wake_up(&group->notification_waitq);
- return ret;
+ return rc;
}
/*
@@ -314,25 +283,78 @@ void fsnotify_flush_notify(struct fsnotify_group *group)
static void initialize_event(struct fsnotify_event *event)
{
- event->holder.event = NULL;
INIT_LIST_HEAD(&event->holder.event_list);
atomic_set(&event->refcnt, 1);
spin_lock_init(&event->lock);
- event->path.dentry = NULL;
- event->path.mnt = NULL;
- event->inode = NULL;
- event->data_type = FSNOTIFY_EVENT_NONE;
-
INIT_LIST_HEAD(&event->private_data_list);
+}
+
+/*
+ * Caller damn well better be holding whatever mutex is protecting the
+ * old_holder->event_list and the new_event must be a clean event which
+ * cannot be found anywhere else in the kernel.
+ */
+int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
+ struct fsnotify_event *new_event)
+{
+ struct fsnotify_event *old_event = old_holder->event;
+ struct fsnotify_event_holder *new_holder = &new_event->holder;
+
+ enum event_spinlock_class {
+ SPINLOCK_OLD,
+ SPINLOCK_NEW,
+ };
+
+ /*
+ * if the new_event's embedded holder is in use someone
+ * screwed up and didn't give us a clean new event.
+ */
+ BUG_ON(!list_empty(&new_holder->event_list));
+
+ spin_lock_nested(&old_event->lock, SPINLOCK_OLD);
+ spin_lock_nested(&new_event->lock, SPINLOCK_NEW);
+
+ new_holder->event = new_event;
+ list_replace_init(&old_holder->event_list, &new_holder->event_list);
+
+ spin_unlock(&new_event->lock);
+ spin_unlock(&old_event->lock);
+
+ /* event == holder means we are referenced through the in event holder */
+ if (old_holder != &old_event->holder)
+ fsnotify_destroy_event_holder(old_holder);
+
+ fsnotify_get_event(new_event); /* on the list take reference */
+ fsnotify_put_event(old_event); /* off the list, drop reference */
+
+ return 0;
+}
+
+struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event)
+{
+ struct fsnotify_event *event;
+
+ event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL);
+ if (!event)
+ return NULL;
- event->to_tell = NULL;
+ memcpy(event, old_event, sizeof(*event));
+ initialize_event(event);
- event->file_name = NULL;
- event->name_len = 0;
+ if (event->name_len) {
+ event->file_name = kstrdup(old_event->file_name, GFP_KERNEL);
+ if (!event->file_name) {
+ kmem_cache_free(fsnotify_event_cachep, event);
+ return NULL;
+ }
+ }
+ event->tgid = get_pid(old_event->tgid);
+ if (event->data_type == FSNOTIFY_EVENT_PATH)
+ path_get(&event->path);
- event->sync_cookie = 0;
+ return event;
}
/*
@@ -353,7 +375,7 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
{
struct fsnotify_event *event;
- event = kmem_cache_alloc(fsnotify_event_cachep, gfp);
+ event = kmem_cache_zalloc(fsnotify_event_cachep, gfp);
if (!event)
return NULL;
@@ -368,30 +390,21 @@ struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
event->name_len = strlen(event->file_name);
}
+ event->tgid = get_pid(task_tgid(current));
event->sync_cookie = cookie;
event->to_tell = to_tell;
+ event->data_type = data_type;
switch (data_type) {
- case FSNOTIFY_EVENT_FILE: {
- struct file *file = data;
- struct path *path = &file->f_path;
- event->path.dentry = path->dentry;
- event->path.mnt = path->mnt;
- path_get(&event->path);
- event->data_type = FSNOTIFY_EVENT_PATH;
- break;
- }
case FSNOTIFY_EVENT_PATH: {
struct path *path = data;
event->path.dentry = path->dentry;
event->path.mnt = path->mnt;
path_get(&event->path);
- event->data_type = FSNOTIFY_EVENT_PATH;
break;
}
case FSNOTIFY_EVENT_INODE:
event->inode = data;
- event->data_type = FSNOTIFY_EVENT_INODE;
break;
case FSNOTIFY_EVENT_NONE:
event->inode = NULL;
@@ -412,8 +425,11 @@ __init int fsnotify_notification_init(void)
fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC);
fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC);
- initialize_event(&q_overflow_event);
- q_overflow_event.mask = FS_Q_OVERFLOW;
+ q_overflow_event = fsnotify_create_event(NULL, FS_Q_OVERFLOW, NULL,
+ FSNOTIFY_EVENT_NONE, NULL, 0,
+ GFP_KERNEL);
+ if (!q_overflow_event)
+ panic("unable to allocate fsnotify q_overflow_event\n");
return 0;
}
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
new file mode 100644
index 000000000000..8f1aa02f4f02
--- /dev/null
+++ b/fs/notify/vfsmount_mark.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/writeback.h> /* for inode_lock */
+
+#include <asm/atomic.h>
+
+#include <linux/fsnotify_backend.h>
+#include "fsnotify.h"
+
+void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark, *lmark;
+ struct hlist_node *pos, *n;
+ LIST_HEAD(free_list);
+
+ spin_lock(&mnt->mnt_root->d_lock);
+ hlist_for_each_entry_safe(mark, pos, n, &mnt->mnt_fsnotify_marks, m.m_list) {
+ list_add(&mark->m.free_m_list, &free_list);
+ hlist_del_init(&mark->m.m_list);
+ fsnotify_get_mark(mark);
+ }
+ spin_unlock(&mnt->mnt_root->d_lock);
+
+ list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) {
+ fsnotify_destroy_mark(mark);
+ fsnotify_put_mark(mark);
+ }
+}
+
+void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_VFSMOUNT);
+}
+
+/*
+ * Recalculate the mask of events relevant to a given vfsmount locked.
+ */
+static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark;
+ struct hlist_node *pos;
+ __u32 new_mask = 0;
+
+ assert_spin_locked(&mnt->mnt_root->d_lock);
+
+ hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list)
+ new_mask |= mark->mask;
+ mnt->mnt_fsnotify_mask = new_mask;
+}
+
+/*
+ * Recalculate the mnt->mnt_fsnotify_mask, or the mask of all FS_* event types
+ * any notifier is interested in hearing for this mount point
+ */
+void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt)
+{
+ spin_lock(&mnt->mnt_root->d_lock);
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+ spin_unlock(&mnt->mnt_root->d_lock);
+}
+
+void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)
+{
+ struct vfsmount *mnt = mark->m.mnt;
+
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&mark->group->mark_lock);
+
+ spin_lock(&mnt->mnt_root->d_lock);
+
+ hlist_del_init(&mark->m.m_list);
+ mark->m.mnt = NULL;
+
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+
+ spin_unlock(&mnt->mnt_root->d_lock);
+}
+
+static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group,
+ struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark;
+ struct hlist_node *pos;
+
+ assert_spin_locked(&mnt->mnt_root->d_lock);
+
+ hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list) {
+ if (mark->group == group) {
+ fsnotify_get_mark(mark);
+ return mark;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * given a group and vfsmount, find the mark associated with that combination.
+ * if found take a reference to that mark and return it, else return NULL
+ */
+struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group,
+ struct vfsmount *mnt)
+{
+ struct fsnotify_mark *mark;
+
+ spin_lock(&mnt->mnt_root->d_lock);
+ mark = fsnotify_find_vfsmount_mark_locked(group, mnt);
+ spin_unlock(&mnt->mnt_root->d_lock);
+
+ return mark;
+}
+
+/*
+ * Attach an initialized mark to a given group and vfsmount.
+ * These marks may be used for the fsnotify backend to determine which
+ * event types should be delivered to which groups.
+ */
+int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group, struct vfsmount *mnt,
+ int allow_dups)
+{
+ struct fsnotify_mark *lmark = NULL;
+ int ret = 0;
+
+ mark->flags = FSNOTIFY_MARK_FLAG_VFSMOUNT;
+
+ /*
+ * LOCKING ORDER!!!!
+ * mark->lock
+ * group->mark_lock
+ * mnt->mnt_root->d_lock
+ */
+ assert_spin_locked(&mark->lock);
+ assert_spin_locked(&group->mark_lock);
+
+ spin_lock(&mnt->mnt_root->d_lock);
+
+ if (!allow_dups)
+ lmark = fsnotify_find_vfsmount_mark_locked(group, mnt);
+ if (!lmark) {
+ mark->m.mnt = mnt;
+
+ hlist_add_head(&mark->m.m_list, &mnt->mnt_fsnotify_marks);
+
+ fsnotify_recalc_vfsmount_mask_locked(mnt);
+ } else {
+ ret = -EEXIST;
+ }
+
+ spin_unlock(&mnt->mnt_root->d_lock);
+
+ return ret;
+}
diff --git a/fs/open.c b/fs/open.c
index 040cef72bc00..a87457d92fc6 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -31,6 +31,7 @@
#include <linux/falloc.h>
#include <linux/fs_struct.h>
#include <linux/ima.h>
+#include <linux/dnotify.h>
#include "internal.h"
@@ -1057,7 +1058,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
put_unused_fd(fd);
fd = PTR_ERR(f);
} else {
- fsnotify_open(f->f_path.dentry);
+ fsnotify_open(f);
fd_install(fd, f);
}
}
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 64bc8998ac9a..e8865c11777f 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -412,9 +412,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
pdev = part_to_dev(p);
p->start_sect = start;
- p->alignment_offset = queue_sector_alignment_offset(disk->queue, start);
- p->discard_alignment = queue_sector_discard_alignment(disk->queue,
- start);
+ p->alignment_offset =
+ queue_limit_alignment_offset(&disk->queue->limits, start);
+ p->discard_alignment =
+ queue_limit_discard_alignment(&disk->queue->limits, start);
p->nr_sects = len;
p->partno = partno;
p->policy = get_disk_ro(disk);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index f560325c444f..13b5d0708175 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -327,94 +327,6 @@ static inline void task_context_switch_counts(struct seq_file *m,
p->nivcsw);
}
-#ifdef CONFIG_MMU
-
-struct stack_stats {
- struct vm_area_struct *vma;
- unsigned long startpage;
- unsigned long usage;
-};
-
-static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, struct mm_walk *walk)
-{
- struct stack_stats *ss = walk->private;
- struct vm_area_struct *vma = ss->vma;
- pte_t *pte, ptent;
- spinlock_t *ptl;
- int ret = 0;
-
- pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- for (; addr != end; pte++, addr += PAGE_SIZE) {
- ptent = *pte;
-
-#ifdef CONFIG_STACK_GROWSUP
- if (pte_present(ptent) || is_swap_pte(ptent))
- ss->usage = addr - ss->startpage + PAGE_SIZE;
-#else
- if (pte_present(ptent) || is_swap_pte(ptent)) {
- ss->usage = ss->startpage - addr + PAGE_SIZE;
- pte++;
- ret = 1;
- break;
- }
-#endif
- }
- pte_unmap_unlock(pte - 1, ptl);
- cond_resched();
- return ret;
-}
-
-static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma,
- struct task_struct *task)
-{
- struct stack_stats ss;
- struct mm_walk stack_walk = {
- .pmd_entry = stack_usage_pte_range,
- .mm = vma->vm_mm,
- .private = &ss,
- };
-
- if (!vma->vm_mm || is_vm_hugetlb_page(vma))
- return 0;
-
- ss.vma = vma;
- ss.startpage = task->stack_start & PAGE_MASK;
- ss.usage = 0;
-
-#ifdef CONFIG_STACK_GROWSUP
- walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end,
- &stack_walk);
-#else
- walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE,
- &stack_walk);
-#endif
- return ss.usage;
-}
-
-static inline void task_show_stack_usage(struct seq_file *m,
- struct task_struct *task)
-{
- struct vm_area_struct *vma;
- struct mm_struct *mm = get_task_mm(task);
-
- if (mm) {
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, task->stack_start);
- if (vma)
- seq_printf(m, "Stack usage:\t%lu kB\n",
- get_stack_usage_in_bytes(vma, task) >> 10);
-
- up_read(&mm->mmap_sem);
- mmput(mm);
- }
-}
-#else
-static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
-{
-}
-#endif /* CONFIG_MMU */
-
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
{
seq_printf(m, "Cpus_allowed:\t");
@@ -445,7 +357,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
task_show_regs(m, task);
#endif
task_context_switch_counts(m, task);
- task_show_stack_usage(m, task);
return 0;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 18d5cc62d8ed..8fbcc84c268f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -477,19 +477,30 @@ static const struct limit_names lnames[RLIM_NLIMITS] = {
};
/* Display limits for a process */
-static int proc_pid_limits(struct task_struct *task, char *buffer)
+static ssize_t limits_read(struct file *file, char __user *buf, size_t rcount,
+ loff_t *ppos)
{
- unsigned int i;
- int count = 0;
- unsigned long flags;
- char *bufptr = buffer;
-
struct rlimit rlim[RLIM_NLIMITS];
+ struct task_struct *task;
+ unsigned long flags;
+ unsigned int i;
+ ssize_t count = 0;
+ char *bufptr;
- if (!lock_task_sighand(task, &flags))
+ task = get_proc_task(file->f_path.dentry->d_inode);
+ if (!task)
+ return -ESRCH;
+ if (!lock_task_sighand(task, &flags)) {
+ put_task_struct(task);
return 0;
+ }
memcpy(rlim, task->signal->rlim, sizeof(struct rlimit) * RLIM_NLIMITS);
unlock_task_sighand(task, &flags);
+ put_task_struct(task);
+
+ bufptr = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!bufptr)
+ return -ENOMEM;
/*
* print the file header
@@ -518,9 +529,81 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
count += sprintf(&bufptr[count], "\n");
}
+ count = simple_read_from_buffer(buf, rcount, ppos, bufptr, count);
+
+ free_page((unsigned long)bufptr);
+
+ return count;
+}
+
+static ssize_t limits_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
+ char str[32 + 1 + 16 + 1 + 16 + 1], *delim, *next;
+ struct rlimit new_rlimit;
+ unsigned int i;
+ int ret;
+
+ if (!task) {
+ count = -ESRCH;
+ goto out;
+ }
+ if (copy_from_user(str, buf, min(count, sizeof(str) - 1))) {
+ count = -EFAULT;
+ goto put_task;
+ }
+
+ str[min(count, sizeof(str) - 1)] = 0;
+
+ delim = strchr(str, '=');
+ if (!delim) {
+ count = -EINVAL;
+ goto put_task;
+ }
+ *delim++ = 0; /* for easy 'str' usage */
+ new_rlimit.rlim_cur = simple_strtoul(delim, &next, 0);
+ if (*next != ':') {
+ if (strncmp(delim, "unlimited:", 10)) {
+ count = -EINVAL;
+ goto put_task;
+ }
+ new_rlimit.rlim_cur = RLIM_INFINITY;
+ next = delim + 9; /* move to ':' */
+ }
+ delim = next + 1;
+ new_rlimit.rlim_max = simple_strtoul(delim, &next, 0);
+ if (*next != 0) {
+ if (strcmp(delim, "unlimited")) {
+ count = -EINVAL;
+ goto put_task;
+ }
+ new_rlimit.rlim_max = RLIM_INFINITY;
+ }
+
+ for (i = 0; i < RLIM_NLIMITS; i++)
+ if (!strcmp(str, lnames[i].name))
+ break;
+ if (i >= RLIM_NLIMITS) {
+ count = -EINVAL;
+ goto put_task;
+ }
+
+ ret = do_setrlimit(task, i, &new_rlimit);
+ if (ret)
+ count = ret;
+
+put_task:
+ put_task_struct(task);
+out:
return count;
}
+static const struct file_operations proc_pid_limits_operations = {
+ .read = limits_read,
+ .write = limits_write,
+};
+
#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
static int proc_pid_syscall(struct task_struct *task, char *buffer)
{
@@ -2566,7 +2649,7 @@ static const struct pid_entry tgid_base_stuff[] = {
INF("auxv", S_IRUSR, proc_pid_auxv),
ONE("status", S_IRUGO, proc_pid_status),
ONE("personality", S_IRUSR, proc_pid_personality),
- INF("limits", S_IRUSR, proc_pid_limits),
+ REG("limits", S_IRUSR|S_IWUSR, proc_pid_limits_operations),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
@@ -2901,7 +2984,7 @@ static const struct pid_entry tid_base_stuff[] = {
INF("auxv", S_IRUSR, proc_pid_auxv),
ONE("status", S_IRUGO, proc_pid_status),
ONE("personality", S_IRUSR, proc_pid_personality),
- INF("limits", S_IRUSR, proc_pid_limits),
+ REG("limits", S_IRUSR|S_IWUSR, proc_pid_limits_operations),
#ifdef CONFIG_SCHED_DEBUG
REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
#endif
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 1f24a3eddd12..36d55e156ab5 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -32,11 +32,11 @@ extern struct mm_struct *mm_for_maps(struct task_struct *);
#ifdef CONFIG_MMU
#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
-extern void get_vmalloc_info(struct vmalloc_info *vmi);
+extern void get_vmalloc_info(struct vmalloc_info *vmi, int lock);
#else
#define VMALLOC_TOTAL 0UL
-#define get_vmalloc_info(vmi) \
+#define get_vmalloc_info(vmi, lock) \
do { \
(vmi)->used = 0; \
(vmi)->largest_chunk = 0; \
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index a65239cfd97e..b967cf21c61f 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -10,6 +10,7 @@
#include <linux/seq_file.h>
#include <linux/swap.h>
#include <linux/vmstat.h>
+#include <linux/kdb.h>
#include <asm/atomic.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -19,7 +20,7 @@ void __attribute__((weak)) arch_report_meminfo(struct seq_file *m)
{
}
-static int meminfo_proc_show(struct seq_file *m, void *v)
+int _meminfo_proc_show(struct seq_file *m, void *v, int lock)
{
struct sysinfo i;
unsigned long committed;
@@ -34,7 +35,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
*/
#define K(x) ((x) << (PAGE_SHIFT - 10))
si_meminfo(&i);
- si_swapinfo(&i);
+ if (lock)
+ si_swapinfo(&i);
+ else
+ __si_swapinfo(&i);
committed = percpu_counter_read_positive(&vm_committed_as);
allowed = ((totalram_pages - hugetlb_total_pages())
* sysctl_overcommit_ratio / 100) + total_swap_pages;
@@ -44,7 +48,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
if (cached < 0)
cached = 0;
- get_vmalloc_info(&vmi);
+ get_vmalloc_info(&vmi, lock);
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
pages[lru] = global_page_state(NR_LRU_BASE + lru);
@@ -161,6 +165,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
#undef K
}
+static int meminfo_proc_show(struct seq_file *m, void *v)
+{
+ return _meminfo_proc_show(m, v, 1);
+}
+
static int meminfo_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, meminfo_proc_show, NULL);
diff --git a/fs/proc/mmu.c b/fs/proc/mmu.c
index 8ae221dfd010..10a0f8bbbe4a 100644
--- a/fs/proc/mmu.c
+++ b/fs/proc/mmu.c
@@ -14,7 +14,7 @@
#include <asm/pgtable.h>
#include "internal.h"
-void get_vmalloc_info(struct vmalloc_info *vmi)
+void get_vmalloc_info(struct vmalloc_info *vmi, int lock)
{
struct vm_struct *vma;
unsigned long free_area_size;
@@ -30,7 +30,8 @@ void get_vmalloc_info(struct vmalloc_info *vmi)
prev_end = VMALLOC_START;
- read_lock(&vmlist_lock);
+ if (lock)
+ read_lock(&vmlist_lock);
for (vma = vmlist; vma; vma = vma->next) {
unsigned long addr = (unsigned long) vma->addr;
@@ -55,6 +56,7 @@ void get_vmalloc_info(struct vmalloc_info *vmi)
if (VMALLOC_END - prev_end > vmi->largest_chunk)
vmi->largest_chunk = VMALLOC_END - prev_end;
- read_unlock(&vmlist_lock);
+ if (lock)
+ read_unlock(&vmlist_lock);
}
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 47c03f4336b8..f277c4a111cb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -361,12 +361,11 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (!pte_present(ptent))
continue;
- mss->resident += PAGE_SIZE;
-
page = vm_normal_page(vma, addr, ptent);
if (!page)
continue;
+ mss->resident += PAGE_SIZE;
/* Accumulate the size in pages that have been accessed. */
if (pte_young(ptent) || PageReferenced(page))
mss->referenced += PAGE_SIZE;
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index ebf3440d28ca..277575ddc05c 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -201,7 +201,8 @@ static const char *qnx4_checkroot(struct super_block *sb)
rootdir = (struct qnx4_inode_entry *) (bh->b_data + i * QNX4_DIR_ENTRY_SIZE);
if (rootdir->di_fname != NULL) {
QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname));
- if (!strncmp(rootdir->di_fname, QNX4_BMNAME, sizeof QNX4_BMNAME)) {
+ if (!strcmp(rootdir->di_fname,
+ QNX4_BMNAME)) {
found = 1;
qnx4_sb(sb)->BitMap = kmalloc( sizeof( struct qnx4_inode_entry ), GFP_KERNEL );
if (!qnx4_sb(sb)->BitMap) {
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index dea86abdf2e7..f6eaf0d8fd6a 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -100,9 +100,13 @@
*
* Any operation working on dquots via inode pointers must hold dqptr_sem. If
* operation is just reading pointers from inode (or not using them at all) the
- * read lock is enough. If pointers are altered function must hold write lock
- * (these locking rules also apply for S_NOQUOTA flag in the inode - note that
- * for altering the flag i_mutex is also needed).
+ * read lock is enough. If pointers are altered function must hold write lock.
+ * Special care needs to be taken about S_NOQUOTA inode flag (marking that
+ * inode is a quota file). Functions adding pointers from inode to dquots have
+ * to check this flag under dqptr_sem and then (if S_NOQUOTA is not set) they
+ * have to do all pointer modifications before dropping dqptr_sem. This makes
+ * sure they cannot race with quotaon which first sets S_NOQUOTA flag and
+ * then drops all pointers to dquots from an inode.
*
* Each dquot has its dq_lock mutex. Locked dquots might not be referenced
* from inodes (dquot_alloc_space() and such don't check the dq_lock).
@@ -1275,7 +1279,6 @@ int dquot_initialize(struct inode *inode, int type)
}
down_write(&sb_dqopt(sb)->dqptr_sem);
- /* Having dqptr_sem we know NOQUOTA flags can't be altered... */
if (IS_NOQUOTA(inode))
goto out_err;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1377,6 +1380,9 @@ static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
static qsize_t inode_get_rsv_space(struct inode *inode)
{
qsize_t ret;
+
+ if (!inode->i_sb->dq_op->get_reserved_space)
+ return 0;
spin_lock(&inode->i_lock);
ret = *inode_reserved_space(inode);
spin_unlock(&inode->i_lock);
@@ -1428,11 +1434,6 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
}
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- if (IS_NOQUOTA(inode)) {
- inode_incr_space(inode, number, reserve);
- goto out_unlock;
- }
-
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
@@ -1463,7 +1464,6 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
mark_all_dquot_dirty(inode->i_dquot);
out_flush_warn:
flush_warnings(inode->i_dquot, warntype);
-out_unlock:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
out:
return ret;
@@ -1496,10 +1496,6 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number)
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- if (IS_NOQUOTA(inode)) {
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- return QUOTA_OK;
- }
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1536,12 +1532,6 @@ int dquot_claim_space(struct inode *inode, qsize_t number)
}
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- if (IS_NOQUOTA(inode)) {
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- inode_claim_rsv_space(inode, number);
- goto out;
- }
-
spin_lock(&dq_data_lock);
/* Claim reserved quotas to allocated quotas */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
@@ -1570,17 +1560,11 @@ int __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode)) {
-out_sub:
inode_decr_space(inode, number, reserve);
return QUOTA_OK;
}
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- /* Now recheck reliably when holding dqptr_sem */
- if (IS_NOQUOTA(inode)) {
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- goto out_sub;
- }
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1633,11 +1617,6 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
return QUOTA_OK;
down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- /* Now recheck reliably when holding dqptr_sem */
- if (IS_NOQUOTA(inode)) {
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- return QUOTA_OK;
- }
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (!inode->i_dquot[cnt])
@@ -1689,7 +1668,6 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
GRPQUOTA);
down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
- /* Now recheck reliably when holding dqptr_sem */
if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
goto put_all;
@@ -2007,13 +1985,15 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
/* We don't want quota and atime on quota files (deadlocks
* possible) Also nobody should write to the file - we use
* special IO operations which ignore the immutable bit. */
- down_write(&dqopt->dqptr_sem);
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
S_NOQUOTA);
inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
mutex_unlock(&inode->i_mutex);
- up_write(&dqopt->dqptr_sem);
+ /*
+ * When S_NOQUOTA is set, remove dquot references as no more
+ * references can be added
+ */
sb->dq_op->drop(inode);
}
@@ -2050,14 +2030,12 @@ out_file_init:
iput(inode);
out_lock:
if (oldflags != -1) {
- down_write(&dqopt->dqptr_sem);
mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
/* Set the flags back (in the case of accidental quotaon()
* on a wrong file we don't want to mess up the flags) */
inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
inode->i_flags |= oldflags;
mutex_unlock(&inode->i_mutex);
- up_write(&dqopt->dqptr_sem);
}
mutex_unlock(&dqopt->dqonoff_mutex);
out_fmt:
diff --git a/fs/read_write.c b/fs/read_write.c
index b7f4a1f94d48..1963debee646 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -293,7 +293,7 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos)
else
ret = do_sync_read(file, buf, count, pos);
if (ret > 0) {
- fsnotify_access(file->f_path.dentry);
+ fsnotify_access(file);
add_rchar(current, ret);
}
inc_syscr(current);
@@ -348,7 +348,7 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_
else
ret = do_sync_write(file, buf, count, pos);
if (ret > 0) {
- fsnotify_modify(file->f_path.dentry);
+ fsnotify_modify(file);
add_wchar(current, ret);
}
inc_syscw(current);
@@ -656,9 +656,9 @@ out:
kfree(iov);
if ((ret + (type == READ)) > 0) {
if (type == READ)
- fsnotify_access(file->f_path.dentry);
+ fsnotify_access(file);
else
- fsnotify_modify(file->f_path.dentry);
+ fsnotify_modify(file);
}
return ret;
}
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 1150ebb2536f..9087b10209e6 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3062,13 +3062,14 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
- int error;
unsigned int ia_valid;
+ int depth;
+ int error;
/* must be turned off for recursive notify_change calls */
ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
- reiserfs_write_lock(inode->i_sb);
+ depth = reiserfs_write_lock_once(inode->i_sb);
if (attr->ia_valid & ATTR_SIZE) {
/* version 2 items will be caught by the s_maxbytes check
** done for us in vmtruncate
@@ -3149,8 +3150,17 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
journal_end(&th, inode->i_sb, jbegin_count);
}
}
- if (!error)
+ if (!error) {
+ /*
+ * Relax the lock here, as it might truncate the
+ * inode pages and wait for inode pages locks.
+ * To release such page lock, the owner needs the
+ * reiserfs lock
+ */
+ reiserfs_write_unlock_once(inode->i_sb, depth);
error = inode_setattr(inode, attr);
+ depth = reiserfs_write_lock_once(inode->i_sb);
+ }
}
if (!error && reiserfs_posixacl(inode->i_sb)) {
@@ -3159,7 +3169,8 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
}
out:
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, depth);
+
return error;
}
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index ace77451ceb1..f53505de0712 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -104,9 +104,10 @@ setflags_out:
err = put_user(inode->i_generation, (int __user *)arg);
break;
case REISERFS_IOC_SETVERSION:
- if (!is_owner_or_cap(inode))
+ if (!is_owner_or_cap(inode)) {
err = -EPERM;
break;
+ }
err = mnt_want_write(filp->f_path.mnt);
if (err)
break;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index c3b004ee627b..81f09fab8ae4 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -452,7 +452,9 @@ static int lookup_and_delete_xattr(struct inode *inode, const char *name)
}
if (dentry->d_inode) {
+ reiserfs_write_lock(inode->i_sb);
err = xattr_unlink(xadir->d_inode, dentry);
+ reiserfs_write_unlock(inode->i_sb);
update_ctime(inode);
}
@@ -486,17 +488,21 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
if (get_inode_sd_version(inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
- if (!buffer)
- return lookup_and_delete_xattr(inode, name);
-
reiserfs_write_unlock(inode->i_sb);
+
+ if (!buffer) {
+ err = lookup_and_delete_xattr(inode, name);
+ reiserfs_write_lock(inode->i_sb);
+ return err;
+ }
+
dentry = xattr_lookup(inode, name, flags);
if (IS_ERR(dentry)) {
reiserfs_write_lock(inode->i_sb);
return PTR_ERR(dentry);
}
- down_read(&REISERFS_I(inode)->i_xattr_sem);
+ down_write(&REISERFS_I(inode)->i_xattr_sem);
reiserfs_write_lock(inode->i_sb);
@@ -554,8 +560,12 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
.ia_size = buffer_size,
.ia_valid = ATTR_SIZE | ATTR_CTIME,
};
+
+ reiserfs_write_unlock(inode->i_sb);
mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
down_write(&dentry->d_inode->i_alloc_sem);
+ reiserfs_write_lock(inode->i_sb);
+
err = reiserfs_setattr(dentry, &newattrs);
up_write(&dentry->d_inode->i_alloc_sem);
mutex_unlock(&dentry->d_inode->i_mutex);
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index cc32e6ada67b..dd20a7883f0f 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -455,7 +455,9 @@ int reiserfs_acl_chmod(struct inode *inode)
return 0;
}
+ reiserfs_write_unlock(inode->i_sb);
acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
+ reiserfs_write_lock(inode->i_sb);
if (!acl)
return 0;
if (IS_ERR(acl))
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index f90231eb2916..03f523f6a9a7 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1402,20 +1402,19 @@ static int udf_update_inode(struct inode *inode, int do_sync)
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
struct udf_inode_info *iinfo = UDF_I(inode);
- bh = udf_tread(inode->i_sb,
- udf_get_lb_pblock(inode->i_sb,
- &iinfo->i_location, 0));
+ bh = udf_tgetblk(inode->i_sb,
+ udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
if (!bh) {
- udf_debug("bread failure\n");
- return -EIO;
+ udf_debug("getblk failure\n");
+ return -ENOMEM;
}
- memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
-
+ lock_buffer(bh);
+ memset(bh->b_data, 0, inode->i_sb->s_blocksize);
fe = (struct fileEntry *)bh->b_data;
efe = (struct extendedFileEntry *)bh->b_data;
- if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
+ if (iinfo->i_use) {
struct unallocSpaceEntry *use =
(struct unallocSpaceEntry *)bh->b_data;
@@ -1423,20 +1422,18 @@ static int udf_update_inode(struct inode *inode, int do_sync)
memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry));
+ use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
+ use->descTag.tagLocation =
+ cpu_to_le32(iinfo->i_location.logicalBlockNum);
crclen = sizeof(struct unallocSpaceEntry) +
iinfo->i_lenAlloc - sizeof(struct tag);
- use->descTag.tagLocation = cpu_to_le32(
- iinfo->i_location.
- logicalBlockNum);
use->descTag.descCRCLength = cpu_to_le16(crclen);
use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
sizeof(struct tag),
crclen));
use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
- mark_buffer_dirty(bh);
- brelse(bh);
- return err;
+ goto out;
}
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
@@ -1591,18 +1588,21 @@ static int udf_update_inode(struct inode *inode, int do_sync)
fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
fe->descTag.tagLocation = cpu_to_le32(
iinfo->i_location.logicalBlockNum);
- crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc -
- sizeof(struct tag);
+ crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag);
fe->descTag.descCRCLength = cpu_to_le16(crclen);
fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
crclen));
fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
+out:
+ set_buffer_uptodate(bh);
+ unlock_buffer(bh);
+
/* write the data blocks */
mark_buffer_dirty(bh);
if (do_sync) {
sync_dirty_buffer(bh);
- if (buffer_req(bh) && !buffer_uptodate(bh)) {
+ if (buffer_write_io_error(bh)) {
printk(KERN_WARNING "IO error syncing udf inode "
"[%s:%08lx]\n", inode->i_sb->s_id,
inode->i_ino);
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index 2512125dfa7c..883ca5ab8af5 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -251,8 +251,9 @@ xfs_set_mode(struct inode *inode, mode_t mode)
if (mode != inode->i_mode) {
struct iattr iattr;
- iattr.ia_valid = ATTR_MODE;
+ iattr.ia_valid = ATTR_MODE | ATTR_CTIME;
iattr.ia_mode = mode;
+ iattr.ia_ctime = current_fs_time(inode->i_sb);
error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
}
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index c40834bdee58..c22a608321a3 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -33,51 +33,55 @@ struct xfs_dquot;
struct xlog_ticket;
struct log;
+DECLARE_EVENT_CLASS(xfs_attr_list_class,
+ TP_PROTO(struct xfs_attr_list_context *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(u32, hashval)
+ __field(u32, blkno)
+ __field(u32, offset)
+ __field(void *, alist)
+ __field(int, bufsize)
+ __field(int, count)
+ __field(int, firstu)
+ __field(int, dupcnt)
+ __field(int, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
+ __entry->ino = ctx->dp->i_ino;
+ __entry->hashval = ctx->cursor->hashval;
+ __entry->blkno = ctx->cursor->blkno;
+ __entry->offset = ctx->cursor->offset;
+ __entry->alist = ctx->alist;
+ __entry->bufsize = ctx->bufsize;
+ __entry->count = ctx->count;
+ __entry->firstu = ctx->firstu;
+ __entry->flags = ctx->flags;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
+ "alist 0x%p size %u count %u firstu %u flags %d %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->hashval,
+ __entry->blkno,
+ __entry->offset,
+ __entry->dupcnt,
+ __entry->alist,
+ __entry->bufsize,
+ __entry->count,
+ __entry->firstu,
+ __entry->flags,
+ __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS)
+ )
+)
+
#define DEFINE_ATTR_LIST_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_attr_list_class, name, \
TP_PROTO(struct xfs_attr_list_context *ctx), \
- TP_ARGS(ctx), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(u32, hashval) \
- __field(u32, blkno) \
- __field(u32, offset) \
- __field(void *, alist) \
- __field(int, bufsize) \
- __field(int, count) \
- __field(int, firstu) \
- __field(int, dupcnt) \
- __field(int, flags) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; \
- __entry->ino = ctx->dp->i_ino; \
- __entry->hashval = ctx->cursor->hashval; \
- __entry->blkno = ctx->cursor->blkno; \
- __entry->offset = ctx->cursor->offset; \
- __entry->alist = ctx->alist; \
- __entry->bufsize = ctx->bufsize; \
- __entry->count = ctx->count; \
- __entry->firstu = ctx->firstu; \
- __entry->flags = ctx->flags; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " \
- "alist 0x%p size %u count %u firstu %u flags %d %s", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->hashval, \
- __entry->blkno, \
- __entry->offset, \
- __entry->dupcnt, \
- __entry->alist, \
- __entry->bufsize, \
- __entry->count, \
- __entry->firstu, \
- __entry->flags, \
- __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) \
- ) \
-)
+ TP_ARGS(ctx))
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
@@ -178,91 +182,99 @@ TRACE_EVENT(xfs_iext_insert,
(char *)__entry->caller_ip)
);
+DECLARE_EVENT_CLASS(xfs_bmap_class,
+ TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state,
+ unsigned long caller_ip),
+ TP_ARGS(ip, idx, state, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_extnum_t, idx)
+ __field(xfs_fileoff_t, startoff)
+ __field(xfs_fsblock_t, startblock)
+ __field(xfs_filblks_t, blockcount)
+ __field(xfs_exntst_t, state)
+ __field(int, bmap_state)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ?
+ ip->i_afp : &ip->i_df;
+ struct xfs_bmbt_irec r;
+
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r);
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->idx = idx;
+ __entry->startoff = r.br_startoff;
+ __entry->startblock = r.br_startblock;
+ __entry->blockcount = r.br_blockcount;
+ __entry->state = r.br_state;
+ __entry->bmap_state = state;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
+ "offset %lld block %s count %lld flag %d caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
+ (long)__entry->idx,
+ __entry->startoff,
+ xfs_fmtfsblock(__entry->startblock),
+ __entry->blockcount,
+ __entry->state,
+ (char *)__entry->caller_ip)
+)
+
#define DEFINE_BMAP_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_bmap_class, name, \
TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
unsigned long caller_ip), \
- TP_ARGS(ip, idx, state, caller_ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(xfs_extnum_t, idx) \
- __field(xfs_fileoff_t, startoff) \
- __field(xfs_fsblock_t, startblock) \
- __field(xfs_filblks_t, blockcount) \
- __field(xfs_exntst_t, state) \
- __field(int, bmap_state) \
- __field(unsigned long, caller_ip) \
- ), \
- TP_fast_assign( \
- struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? \
- ip->i_afp : &ip->i_df; \
- struct xfs_bmbt_irec r; \
- \
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- __entry->idx = idx; \
- __entry->startoff = r.br_startoff; \
- __entry->startblock = r.br_startblock; \
- __entry->blockcount = r.br_blockcount; \
- __entry->state = r.br_state; \
- __entry->bmap_state = state; \
- __entry->caller_ip = caller_ip; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " \
- "offset %lld block %s count %lld flag %d caller %pf", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), \
- (long)__entry->idx, \
- __entry->startoff, \
- xfs_fmtfsblock(__entry->startblock), \
- __entry->blockcount, \
- __entry->state, \
- (char *)__entry->caller_ip) \
-)
-
+ TP_ARGS(ip, idx, state, caller_ip))
DEFINE_BMAP_EVENT(xfs_iext_remove);
DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
DEFINE_BMAP_EVENT(xfs_bmap_post_update);
DEFINE_BMAP_EVENT(xfs_extlist);
-#define DEFINE_BUF_EVENT(tname) \
-TRACE_EVENT(tname, \
- TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
- TP_ARGS(bp, caller_ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_daddr_t, bno) \
- __field(size_t, buffer_length) \
- __field(int, hold) \
- __field(int, pincount) \
- __field(unsigned, lockval) \
- __field(unsigned, flags) \
- __field(unsigned long, caller_ip) \
- ), \
- TP_fast_assign( \
- __entry->dev = bp->b_target->bt_dev; \
- __entry->bno = bp->b_bn; \
- __entry->buffer_length = bp->b_buffer_length; \
- __entry->hold = atomic_read(&bp->b_hold); \
- __entry->pincount = atomic_read(&bp->b_pin_count); \
- __entry->lockval = xfs_buf_lock_value(bp); \
- __entry->flags = bp->b_flags; \
- __entry->caller_ip = caller_ip; \
- ), \
- TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
- "lock %d flags %s caller %pf", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- (unsigned long long)__entry->bno, \
- __entry->buffer_length, \
- __entry->hold, \
- __entry->pincount, \
- __entry->lockval, \
- __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
- (void *)__entry->caller_ip) \
+DECLARE_EVENT_CLASS(xfs_buf_class,
+ TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip),
+ TP_ARGS(bp, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_daddr_t, bno)
+ __field(size_t, buffer_length)
+ __field(int, hold)
+ __field(int, pincount)
+ __field(unsigned, lockval)
+ __field(unsigned, flags)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = bp->b_target->bt_dev;
+ __entry->bno = bp->b_bn;
+ __entry->buffer_length = bp->b_buffer_length;
+ __entry->hold = atomic_read(&bp->b_hold);
+ __entry->pincount = atomic_read(&bp->b_pin_count);
+ __entry->lockval = xfs_buf_lock_value(bp);
+ __entry->flags = bp->b_flags;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
+ "lock %d flags %s caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->bno,
+ __entry->buffer_length,
+ __entry->hold,
+ __entry->pincount,
+ __entry->lockval,
+ __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
+ (void *)__entry->caller_ip)
)
+
+#define DEFINE_BUF_EVENT(name) \
+DEFINE_EVENT(xfs_buf_class, name, \
+ TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
+ TP_ARGS(bp, caller_ip))
DEFINE_BUF_EVENT(xfs_buf_init);
DEFINE_BUF_EVENT(xfs_buf_free);
DEFINE_BUF_EVENT(xfs_buf_hold);
@@ -299,41 +311,45 @@ DEFINE_BUF_EVENT(xfs_reset_dqcounts);
DEFINE_BUF_EVENT(xfs_inode_item_push);
/* pass flags explicitly */
-#define DEFINE_BUF_FLAGS_EVENT(tname) \
-TRACE_EVENT(tname, \
- TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
- TP_ARGS(bp, flags, caller_ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_daddr_t, bno) \
- __field(size_t, buffer_length) \
- __field(int, hold) \
- __field(int, pincount) \
- __field(unsigned, lockval) \
- __field(unsigned, flags) \
- __field(unsigned long, caller_ip) \
- ), \
- TP_fast_assign( \
- __entry->dev = bp->b_target->bt_dev; \
- __entry->bno = bp->b_bn; \
- __entry->buffer_length = bp->b_buffer_length; \
- __entry->flags = flags; \
- __entry->hold = atomic_read(&bp->b_hold); \
- __entry->pincount = atomic_read(&bp->b_pin_count); \
- __entry->lockval = xfs_buf_lock_value(bp); \
- __entry->caller_ip = caller_ip; \
- ), \
- TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
- "lock %d flags %s caller %pf", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- (unsigned long long)__entry->bno, \
- __entry->buffer_length, \
- __entry->hold, \
- __entry->pincount, \
- __entry->lockval, \
- __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
- (void *)__entry->caller_ip) \
+DECLARE_EVENT_CLASS(xfs_buf_flags_class,
+ TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip),
+ TP_ARGS(bp, flags, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_daddr_t, bno)
+ __field(size_t, buffer_length)
+ __field(int, hold)
+ __field(int, pincount)
+ __field(unsigned, lockval)
+ __field(unsigned, flags)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = bp->b_target->bt_dev;
+ __entry->bno = bp->b_bn;
+ __entry->buffer_length = bp->b_buffer_length;
+ __entry->flags = flags;
+ __entry->hold = atomic_read(&bp->b_hold);
+ __entry->pincount = atomic_read(&bp->b_pin_count);
+ __entry->lockval = xfs_buf_lock_value(bp);
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
+ "lock %d flags %s caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->bno,
+ __entry->buffer_length,
+ __entry->hold,
+ __entry->pincount,
+ __entry->lockval,
+ __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
+ (void *)__entry->caller_ip)
)
+
+#define DEFINE_BUF_FLAGS_EVENT(name) \
+DEFINE_EVENT(xfs_buf_flags_class, name, \
+ TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
+ TP_ARGS(bp, flags, caller_ip))
DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
@@ -376,55 +392,58 @@ TRACE_EVENT(xfs_buf_ioerror,
(void *)__entry->caller_ip)
);
-#define DEFINE_BUF_ITEM_EVENT(tname) \
-TRACE_EVENT(tname, \
- TP_PROTO(struct xfs_buf_log_item *bip), \
- TP_ARGS(bip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_daddr_t, buf_bno) \
- __field(size_t, buf_len) \
- __field(int, buf_hold) \
- __field(int, buf_pincount) \
- __field(int, buf_lockval) \
- __field(unsigned, buf_flags) \
- __field(unsigned, bli_recur) \
- __field(int, bli_refcount) \
- __field(unsigned, bli_flags) \
- __field(void *, li_desc) \
- __field(unsigned, li_flags) \
- ), \
- TP_fast_assign( \
- __entry->dev = bip->bli_buf->b_target->bt_dev; \
- __entry->bli_flags = bip->bli_flags; \
- __entry->bli_recur = bip->bli_recur; \
- __entry->bli_refcount = atomic_read(&bip->bli_refcount); \
- __entry->buf_bno = bip->bli_buf->b_bn; \
- __entry->buf_len = bip->bli_buf->b_buffer_length; \
- __entry->buf_flags = bip->bli_buf->b_flags; \
- __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); \
- __entry->buf_pincount = \
- atomic_read(&bip->bli_buf->b_pin_count); \
- __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); \
- __entry->li_desc = bip->bli_item.li_desc; \
- __entry->li_flags = bip->bli_item.li_flags; \
- ), \
- TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
- "lock %d flags %s recur %d refcount %d bliflags %s " \
- "lidesc 0x%p liflags %s", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- (unsigned long long)__entry->buf_bno, \
- __entry->buf_len, \
- __entry->buf_hold, \
- __entry->buf_pincount, \
- __entry->buf_lockval, \
- __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), \
- __entry->bli_recur, \
- __entry->bli_refcount, \
- __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), \
- __entry->li_desc, \
- __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) \
+DECLARE_EVENT_CLASS(xfs_buf_item_class,
+ TP_PROTO(struct xfs_buf_log_item *bip),
+ TP_ARGS(bip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_daddr_t, buf_bno)
+ __field(size_t, buf_len)
+ __field(int, buf_hold)
+ __field(int, buf_pincount)
+ __field(int, buf_lockval)
+ __field(unsigned, buf_flags)
+ __field(unsigned, bli_recur)
+ __field(int, bli_refcount)
+ __field(unsigned, bli_flags)
+ __field(void *, li_desc)
+ __field(unsigned, li_flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = bip->bli_buf->b_target->bt_dev;
+ __entry->bli_flags = bip->bli_flags;
+ __entry->bli_recur = bip->bli_recur;
+ __entry->bli_refcount = atomic_read(&bip->bli_refcount);
+ __entry->buf_bno = bip->bli_buf->b_bn;
+ __entry->buf_len = bip->bli_buf->b_buffer_length;
+ __entry->buf_flags = bip->bli_buf->b_flags;
+ __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
+ __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
+ __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf);
+ __entry->li_desc = bip->bli_item.li_desc;
+ __entry->li_flags = bip->bli_item.li_flags;
+ ),
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
+ "lock %d flags %s recur %d refcount %d bliflags %s "
+ "lidesc 0x%p liflags %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->buf_bno,
+ __entry->buf_len,
+ __entry->buf_hold,
+ __entry->buf_pincount,
+ __entry->buf_lockval,
+ __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS),
+ __entry->bli_recur,
+ __entry->bli_refcount,
+ __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS),
+ __entry->li_desc,
+ __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS))
)
+
+#define DEFINE_BUF_ITEM_EVENT(name) \
+DEFINE_EVENT(xfs_buf_item_class, name, \
+ TP_PROTO(struct xfs_buf_log_item *bip), \
+ TP_ARGS(bip))
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
@@ -450,78 +469,90 @@ DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
+DECLARE_EVENT_CLASS(xfs_lock_class,
+ TP_PROTO(struct xfs_inode *ip, unsigned lock_flags,
+ unsigned long caller_ip),
+ TP_ARGS(ip, lock_flags, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, lock_flags)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->lock_flags = lock_flags;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
+ (void *)__entry->caller_ip)
+)
+
#define DEFINE_LOCK_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_lock_class, name, \
TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
unsigned long caller_ip), \
- TP_ARGS(ip, lock_flags, caller_ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(int, lock_flags) \
- __field(unsigned long, caller_ip) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- __entry->lock_flags = lock_flags; \
- __entry->caller_ip = caller_ip; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), \
- (void *)__entry->caller_ip) \
-)
-
+ TP_ARGS(ip, lock_flags, caller_ip))
DEFINE_LOCK_EVENT(xfs_ilock);
DEFINE_LOCK_EVENT(xfs_ilock_nowait);
DEFINE_LOCK_EVENT(xfs_ilock_demote);
DEFINE_LOCK_EVENT(xfs_iunlock);
+DECLARE_EVENT_CLASS(xfs_iget_class,
+ TP_PROTO(struct xfs_inode *ip),
+ TP_ARGS(ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino)
+)
+
#define DEFINE_IGET_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_iget_class, name, \
TP_PROTO(struct xfs_inode *ip), \
- TP_ARGS(ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino) \
-)
+ TP_ARGS(ip))
DEFINE_IGET_EVENT(xfs_iget_skip);
DEFINE_IGET_EVENT(xfs_iget_reclaim);
DEFINE_IGET_EVENT(xfs_iget_found);
DEFINE_IGET_EVENT(xfs_iget_alloc);
+DECLARE_EVENT_CLASS(xfs_inode_class,
+ TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
+ TP_ARGS(ip, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, count)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->count = atomic_read(&VFS_I(ip)->i_count);
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx count %d caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->count,
+ (char *)__entry->caller_ip)
+)
+
#define DEFINE_INODE_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_inode_class, name, \
TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
- TP_ARGS(ip, caller_ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(int, count) \
- __field(unsigned long, caller_ip) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- __entry->count = atomic_read(&VFS_I(ip)->i_count); \
- __entry->caller_ip = caller_ip; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx count %d caller %pf", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->count, \
- (char *)__entry->caller_ip) \
-)
+ TP_ARGS(ip, caller_ip))
DEFINE_INODE_EVENT(xfs_ihold);
DEFINE_INODE_EVENT(xfs_irele);
/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
@@ -529,55 +560,59 @@ DEFINE_INODE_EVENT(xfs_inode);
#define xfs_itrace_entry(ip) \
trace_xfs_inode(ip, _THIS_IP_)
-#define DEFINE_DQUOT_EVENT(tname) \
-TRACE_EVENT(tname, \
- TP_PROTO(struct xfs_dquot *dqp), \
- TP_ARGS(dqp), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(__be32, id) \
- __field(unsigned, flags) \
- __field(unsigned, nrefs) \
- __field(unsigned long long, res_bcount) \
- __field(unsigned long long, bcount) \
- __field(unsigned long long, icount) \
- __field(unsigned long long, blk_hardlimit) \
- __field(unsigned long long, blk_softlimit) \
- __field(unsigned long long, ino_hardlimit) \
- __field(unsigned long long, ino_softlimit) \
- ), \
- TP_fast_assign( \
- __entry->dev = dqp->q_mount->m_super->s_dev; \
- __entry->id = dqp->q_core.d_id; \
- __entry->flags = dqp->dq_flags; \
- __entry->nrefs = dqp->q_nrefs; \
- __entry->res_bcount = dqp->q_res_bcount; \
- __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); \
- __entry->icount = be64_to_cpu(dqp->q_core.d_icount); \
- __entry->blk_hardlimit = \
- be64_to_cpu(dqp->q_core.d_blk_hardlimit); \
- __entry->blk_softlimit = \
- be64_to_cpu(dqp->q_core.d_blk_softlimit); \
- __entry->ino_hardlimit = \
- be64_to_cpu(dqp->q_core.d_ino_hardlimit); \
- __entry->ino_softlimit = \
- be64_to_cpu(dqp->q_core.d_ino_softlimit); \
+DECLARE_EVENT_CLASS(xfs_dquot_class,
+ TP_PROTO(struct xfs_dquot *dqp),
+ TP_ARGS(dqp),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(__be32, id)
+ __field(unsigned, flags)
+ __field(unsigned, nrefs)
+ __field(unsigned long long, res_bcount)
+ __field(unsigned long long, bcount)
+ __field(unsigned long long, icount)
+ __field(unsigned long long, blk_hardlimit)
+ __field(unsigned long long, blk_softlimit)
+ __field(unsigned long long, ino_hardlimit)
+ __field(unsigned long long, ino_softlimit)
), \
- TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " \
- "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] " \
- "icnt 0x%llx [hard 0x%llx | soft 0x%llx]", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- be32_to_cpu(__entry->id), \
- __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), \
- __entry->nrefs, \
- __entry->res_bcount, \
- __entry->bcount, \
- __entry->blk_hardlimit, \
- __entry->blk_softlimit, \
- __entry->icount, \
- __entry->ino_hardlimit, \
- __entry->ino_softlimit) \
+ TP_fast_assign(
+ __entry->dev = dqp->q_mount->m_super->s_dev;
+ __entry->id = dqp->q_core.d_id;
+ __entry->flags = dqp->dq_flags;
+ __entry->nrefs = dqp->q_nrefs;
+ __entry->res_bcount = dqp->q_res_bcount;
+ __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount);
+ __entry->icount = be64_to_cpu(dqp->q_core.d_icount);
+ __entry->blk_hardlimit =
+ be64_to_cpu(dqp->q_core.d_blk_hardlimit);
+ __entry->blk_softlimit =
+ be64_to_cpu(dqp->q_core.d_blk_softlimit);
+ __entry->ino_hardlimit =
+ be64_to_cpu(dqp->q_core.d_ino_hardlimit);
+ __entry->ino_softlimit =
+ be64_to_cpu(dqp->q_core.d_ino_softlimit);
+ ),
+ TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx "
+ "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] "
+ "icnt 0x%llx [hard 0x%llx | soft 0x%llx]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ be32_to_cpu(__entry->id),
+ __print_flags(__entry->flags, "|", XFS_DQ_FLAGS),
+ __entry->nrefs,
+ __entry->res_bcount,
+ __entry->bcount,
+ __entry->blk_hardlimit,
+ __entry->blk_softlimit,
+ __entry->icount,
+ __entry->ino_hardlimit,
+ __entry->ino_softlimit)
)
+
+#define DEFINE_DQUOT_EVENT(name) \
+DEFINE_EVENT(xfs_dquot_class, name, \
+ TP_PROTO(struct xfs_dquot *dqp), \
+ TP_ARGS(dqp))
DEFINE_DQUOT_EVENT(xfs_dqadjust);
DEFINE_DQUOT_EVENT(xfs_dqshake_dirty);
DEFINE_DQUOT_EVENT(xfs_dqshake_unlink);
@@ -610,72 +645,75 @@ DEFINE_DQUOT_EVENT(xfs_dqflush_done);
DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
+DECLARE_EVENT_CLASS(xfs_loggrant_class,
+ TP_PROTO(struct log *log, struct xlog_ticket *tic),
+ TP_ARGS(log, tic),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned, trans_type)
+ __field(char, ocnt)
+ __field(char, cnt)
+ __field(int, curr_res)
+ __field(int, unit_res)
+ __field(unsigned int, flags)
+ __field(void *, reserve_headq)
+ __field(void *, write_headq)
+ __field(int, grant_reserve_cycle)
+ __field(int, grant_reserve_bytes)
+ __field(int, grant_write_cycle)
+ __field(int, grant_write_bytes)
+ __field(int, curr_cycle)
+ __field(int, curr_block)
+ __field(xfs_lsn_t, tail_lsn)
+ ),
+ TP_fast_assign(
+ __entry->dev = log->l_mp->m_super->s_dev;
+ __entry->trans_type = tic->t_trans_type;
+ __entry->ocnt = tic->t_ocnt;
+ __entry->cnt = tic->t_cnt;
+ __entry->curr_res = tic->t_curr_res;
+ __entry->unit_res = tic->t_unit_res;
+ __entry->flags = tic->t_flags;
+ __entry->reserve_headq = log->l_reserve_headq;
+ __entry->write_headq = log->l_write_headq;
+ __entry->grant_reserve_cycle = log->l_grant_reserve_cycle;
+ __entry->grant_reserve_bytes = log->l_grant_reserve_bytes;
+ __entry->grant_write_cycle = log->l_grant_write_cycle;
+ __entry->grant_write_bytes = log->l_grant_write_bytes;
+ __entry->curr_cycle = log->l_curr_cycle;
+ __entry->curr_block = log->l_curr_block;
+ __entry->tail_lsn = log->l_tail_lsn;
+ ),
+ TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
+ "t_unit_res %u t_flags %s reserve_headq 0x%p "
+ "write_headq 0x%p grant_reserve_cycle %d "
+ "grant_reserve_bytes %d grant_write_cycle %d "
+ "grant_write_bytes %d curr_cycle %d curr_block %d "
+ "tail_cycle %d tail_block %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES),
+ __entry->ocnt,
+ __entry->cnt,
+ __entry->curr_res,
+ __entry->unit_res,
+ __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
+ __entry->reserve_headq,
+ __entry->write_headq,
+ __entry->grant_reserve_cycle,
+ __entry->grant_reserve_bytes,
+ __entry->grant_write_cycle,
+ __entry->grant_write_bytes,
+ __entry->curr_cycle,
+ __entry->curr_block,
+ CYCLE_LSN(__entry->tail_lsn),
+ BLOCK_LSN(__entry->tail_lsn)
+ )
+)
-#define DEFINE_LOGGRANT_EVENT(tname) \
-TRACE_EVENT(tname, \
+#define DEFINE_LOGGRANT_EVENT(name) \
+DEFINE_EVENT(xfs_loggrant_class, name, \
TP_PROTO(struct log *log, struct xlog_ticket *tic), \
- TP_ARGS(log, tic), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(unsigned, trans_type) \
- __field(char, ocnt) \
- __field(char, cnt) \
- __field(int, curr_res) \
- __field(int, unit_res) \
- __field(unsigned int, flags) \
- __field(void *, reserve_headq) \
- __field(void *, write_headq) \
- __field(int, grant_reserve_cycle) \
- __field(int, grant_reserve_bytes) \
- __field(int, grant_write_cycle) \
- __field(int, grant_write_bytes) \
- __field(int, curr_cycle) \
- __field(int, curr_block) \
- __field(xfs_lsn_t, tail_lsn) \
- ), \
- TP_fast_assign( \
- __entry->dev = log->l_mp->m_super->s_dev; \
- __entry->trans_type = tic->t_trans_type; \
- __entry->ocnt = tic->t_ocnt; \
- __entry->cnt = tic->t_cnt; \
- __entry->curr_res = tic->t_curr_res; \
- __entry->unit_res = tic->t_unit_res; \
- __entry->flags = tic->t_flags; \
- __entry->reserve_headq = log->l_reserve_headq; \
- __entry->write_headq = log->l_write_headq; \
- __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; \
- __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; \
- __entry->grant_write_cycle = log->l_grant_write_cycle; \
- __entry->grant_write_bytes = log->l_grant_write_bytes; \
- __entry->curr_cycle = log->l_curr_cycle; \
- __entry->curr_block = log->l_curr_block; \
- __entry->tail_lsn = log->l_tail_lsn; \
- ), \
- TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " \
- "t_unit_res %u t_flags %s reserve_headq 0x%p " \
- "write_headq 0x%p grant_reserve_cycle %d " \
- "grant_reserve_bytes %d grant_write_cycle %d " \
- "grant_write_bytes %d curr_cycle %d curr_block %d " \
- "tail_cycle %d tail_block %d", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), \
- __entry->ocnt, \
- __entry->cnt, \
- __entry->curr_res, \
- __entry->unit_res, \
- __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), \
- __entry->reserve_headq, \
- __entry->write_headq, \
- __entry->grant_reserve_cycle, \
- __entry->grant_reserve_bytes, \
- __entry->grant_write_cycle, \
- __entry->grant_write_bytes, \
- __entry->curr_cycle, \
- __entry->curr_block, \
- CYCLE_LSN(__entry->tail_lsn), \
- BLOCK_LSN(__entry->tail_lsn) \
- ) \
-)
+ TP_ARGS(log, tic))
DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
@@ -815,7 +853,7 @@ TRACE_EVENT(name, \
), \
TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
"offset 0x%llx count %zd flags %s " \
- "startoff 0x%llx startblock 0x%llx blockcount 0x%llx", \
+ "startoff 0x%llx startblock %s blockcount 0x%llx", \
MAJOR(__entry->dev), MINOR(__entry->dev), \
__entry->ino, \
__entry->size, \
@@ -824,7 +862,7 @@ TRACE_EVENT(name, \
__entry->count, \
__print_flags(__entry->flags, "|", BMAPI_FLAGS), \
__entry->startoff, \
- __entry->startblock, \
+ xfs_fmtfsblock(__entry->startblock), \
__entry->blockcount) \
)
DEFINE_IOMAP_EVENT(xfs_iomap_enter);
@@ -897,28 +935,32 @@ TRACE_EVENT(xfs_itruncate_start,
__entry->toss_finish)
);
+DECLARE_EVENT_CLASS(xfs_itrunc_class,
+ TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
+ TP_ARGS(ip, new_size),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_fsize_t, size)
+ __field(xfs_fsize_t, new_size)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->size = ip->i_d.di_size;
+ __entry->new_size = new_size;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->size,
+ __entry->new_size)
+)
+
#define DEFINE_ITRUNC_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_itrunc_class, name, \
TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
- TP_ARGS(ip, new_size), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(xfs_fsize_t, size) \
- __field(xfs_fsize_t, new_size) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- __entry->size = ip->i_d.di_size; \
- __entry->new_size = new_size; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->size, \
- __entry->new_size) \
-)
+ TP_ARGS(ip, new_size))
DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start);
DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end);
@@ -1037,28 +1079,28 @@ TRACE_EVENT(xfs_alloc_unbusy,
TRACE_EVENT(xfs_alloc_busysearch,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
- xfs_extlen_t len, int found),
- TP_ARGS(mp, agno, agbno, len, found),
+ xfs_extlen_t len, xfs_lsn_t lsn),
+ TP_ARGS(mp, agno, agbno, len, lsn),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, len)
- __field(int, found)
+ __field(xfs_lsn_t, lsn)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->agbno = agbno;
__entry->len = len;
- __entry->found = found;
+ __entry->lsn = lsn;
),
- TP_printk("dev %d:%d agno %u agbno %u len %u %s",
+ TP_printk("dev %d:%d agno %u agbno %u len %u force lsn 0x%llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agbno,
__entry->len,
- __print_symbolic(__entry->found, XFS_BUSY_STATES))
+ __entry->lsn)
);
TRACE_EVENT(xfs_agf,
@@ -1152,77 +1194,80 @@ TRACE_EVENT(xfs_free_extent,
);
-#define DEFINE_ALLOC_EVENT(name) \
-TRACE_EVENT(name, \
- TP_PROTO(struct xfs_alloc_arg *args), \
- TP_ARGS(args), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_agnumber_t, agno) \
- __field(xfs_agblock_t, agbno) \
- __field(xfs_extlen_t, minlen) \
- __field(xfs_extlen_t, maxlen) \
- __field(xfs_extlen_t, mod) \
- __field(xfs_extlen_t, prod) \
- __field(xfs_extlen_t, minleft) \
- __field(xfs_extlen_t, total) \
- __field(xfs_extlen_t, alignment) \
- __field(xfs_extlen_t, minalignslop) \
- __field(xfs_extlen_t, len) \
- __field(short, type) \
- __field(short, otype) \
- __field(char, wasdel) \
- __field(char, wasfromfl) \
- __field(char, isfl) \
- __field(char, userdata) \
- __field(xfs_fsblock_t, firstblock) \
- ), \
- TP_fast_assign( \
- __entry->dev = args->mp->m_super->s_dev; \
- __entry->agno = args->agno; \
- __entry->agbno = args->agbno; \
- __entry->minlen = args->minlen; \
- __entry->maxlen = args->maxlen; \
- __entry->mod = args->mod; \
- __entry->prod = args->prod; \
- __entry->minleft = args->minleft; \
- __entry->total = args->total; \
- __entry->alignment = args->alignment; \
- __entry->minalignslop = args->minalignslop; \
- __entry->len = args->len; \
- __entry->type = args->type; \
- __entry->otype = args->otype; \
- __entry->wasdel = args->wasdel; \
- __entry->wasfromfl = args->wasfromfl; \
- __entry->isfl = args->isfl; \
- __entry->userdata = args->userdata; \
- __entry->firstblock = args->firstblock; \
- ), \
- TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \
- "prod %u minleft %u total %u alignment %u minalignslop %u " \
- "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \
- "userdata %d firstblock 0x%llx", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->agno, \
- __entry->agbno, \
- __entry->minlen, \
- __entry->maxlen, \
- __entry->mod, \
- __entry->prod, \
- __entry->minleft, \
- __entry->total, \
- __entry->alignment, \
- __entry->minalignslop, \
- __entry->len, \
- __print_symbolic(__entry->type, XFS_ALLOC_TYPES), \
- __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), \
- __entry->wasdel, \
- __entry->wasfromfl, \
- __entry->isfl, \
- __entry->userdata, \
- __entry->firstblock) \
+DECLARE_EVENT_CLASS(xfs_alloc_class,
+ TP_PROTO(struct xfs_alloc_arg *args),
+ TP_ARGS(args),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, minlen)
+ __field(xfs_extlen_t, maxlen)
+ __field(xfs_extlen_t, mod)
+ __field(xfs_extlen_t, prod)
+ __field(xfs_extlen_t, minleft)
+ __field(xfs_extlen_t, total)
+ __field(xfs_extlen_t, alignment)
+ __field(xfs_extlen_t, minalignslop)
+ __field(xfs_extlen_t, len)
+ __field(short, type)
+ __field(short, otype)
+ __field(char, wasdel)
+ __field(char, wasfromfl)
+ __field(char, isfl)
+ __field(char, userdata)
+ __field(xfs_fsblock_t, firstblock)
+ ),
+ TP_fast_assign(
+ __entry->dev = args->mp->m_super->s_dev;
+ __entry->agno = args->agno;
+ __entry->agbno = args->agbno;
+ __entry->minlen = args->minlen;
+ __entry->maxlen = args->maxlen;
+ __entry->mod = args->mod;
+ __entry->prod = args->prod;
+ __entry->minleft = args->minleft;
+ __entry->total = args->total;
+ __entry->alignment = args->alignment;
+ __entry->minalignslop = args->minalignslop;
+ __entry->len = args->len;
+ __entry->type = args->type;
+ __entry->otype = args->otype;
+ __entry->wasdel = args->wasdel;
+ __entry->wasfromfl = args->wasfromfl;
+ __entry->isfl = args->isfl;
+ __entry->userdata = args->userdata;
+ __entry->firstblock = args->firstblock;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u "
+ "prod %u minleft %u total %u alignment %u minalignslop %u "
+ "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d "
+ "userdata %d firstblock 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->minlen,
+ __entry->maxlen,
+ __entry->mod,
+ __entry->prod,
+ __entry->minleft,
+ __entry->total,
+ __entry->alignment,
+ __entry->minalignslop,
+ __entry->len,
+ __print_symbolic(__entry->type, XFS_ALLOC_TYPES),
+ __print_symbolic(__entry->otype, XFS_ALLOC_TYPES),
+ __entry->wasdel,
+ __entry->wasfromfl,
+ __entry->isfl,
+ __entry->userdata,
+ __entry->firstblock)
)
+#define DEFINE_ALLOC_EVENT(name) \
+DEFINE_EVENT(xfs_alloc_class, name, \
+ TP_PROTO(struct xfs_alloc_arg *args), \
+ TP_ARGS(args))
DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
@@ -1245,92 +1290,100 @@ DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
-#define DEFINE_DIR2_TRACE(tname) \
-TRACE_EVENT(tname, \
+DECLARE_EVENT_CLASS(xfs_dir2_class,
+ TP_PROTO(struct xfs_da_args *args),
+ TP_ARGS(args),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __dynamic_array(char, name, args->namelen)
+ __field(int, namelen)
+ __field(xfs_dahash_t, hashval)
+ __field(xfs_ino_t, inumber)
+ __field(int, op_flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
+ __entry->ino = args->dp->i_ino;
+ if (args->namelen)
+ memcpy(__get_str(name), args->name, args->namelen);
+ __entry->namelen = args->namelen;
+ __entry->hashval = args->hashval;
+ __entry->inumber = args->inumber;
+ __entry->op_flags = args->op_flags;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x "
+ "inumber 0x%llx op_flags %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->namelen,
+ __entry->namelen ? __get_str(name) : NULL,
+ __entry->namelen,
+ __entry->hashval,
+ __entry->inumber,
+ __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
+)
+
+#define DEFINE_DIR2_EVENT(name) \
+DEFINE_EVENT(xfs_dir2_class, name, \
TP_PROTO(struct xfs_da_args *args), \
- TP_ARGS(args), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __dynamic_array(char, name, args->namelen) \
- __field(int, namelen) \
- __field(xfs_dahash_t, hashval) \
- __field(xfs_ino_t, inumber) \
- __field(int, op_flags) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
- __entry->ino = args->dp->i_ino; \
- if (args->namelen) \
- memcpy(__get_str(name), args->name, args->namelen); \
- __entry->namelen = args->namelen; \
- __entry->hashval = args->hashval; \
- __entry->inumber = args->inumber; \
- __entry->op_flags = args->op_flags; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " \
- "inumber 0x%llx op_flags %s", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->namelen, \
- __entry->namelen ? __get_str(name) : NULL, \
- __entry->namelen, \
- __entry->hashval, \
- __entry->inumber, \
- __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) \
+ TP_ARGS(args))
+DEFINE_DIR2_EVENT(xfs_dir2_sf_addname);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_create);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_replace);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_removename);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block);
+DEFINE_DIR2_EVENT(xfs_dir2_block_addname);
+DEFINE_DIR2_EVENT(xfs_dir2_block_lookup);
+DEFINE_DIR2_EVENT(xfs_dir2_block_replace);
+DEFINE_DIR2_EVENT(xfs_dir2_block_removename);
+DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf);
+DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node);
+DEFINE_DIR2_EVENT(xfs_dir2_node_addname);
+DEFINE_DIR2_EVENT(xfs_dir2_node_lookup);
+DEFINE_DIR2_EVENT(xfs_dir2_node_replace);
+DEFINE_DIR2_EVENT(xfs_dir2_node_removename);
+DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf);
+
+DECLARE_EVENT_CLASS(xfs_dir2_space_class,
+ TP_PROTO(struct xfs_da_args *args, int idx),
+ TP_ARGS(args, idx),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, op_flags)
+ __field(int, idx)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
+ __entry->ino = args->dp->i_ino;
+ __entry->op_flags = args->op_flags;
+ __entry->idx = idx;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
+ __entry->idx)
)
-DEFINE_DIR2_TRACE(xfs_dir2_sf_addname);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_create);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_lookup);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_replace);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_removename);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_toino4);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_toino8);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_to_block);
-DEFINE_DIR2_TRACE(xfs_dir2_block_addname);
-DEFINE_DIR2_TRACE(xfs_dir2_block_lookup);
-DEFINE_DIR2_TRACE(xfs_dir2_block_replace);
-DEFINE_DIR2_TRACE(xfs_dir2_block_removename);
-DEFINE_DIR2_TRACE(xfs_dir2_block_to_sf);
-DEFINE_DIR2_TRACE(xfs_dir2_block_to_leaf);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_addname);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_lookup);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_replace);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_removename);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_block);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_node);
-DEFINE_DIR2_TRACE(xfs_dir2_node_addname);
-DEFINE_DIR2_TRACE(xfs_dir2_node_lookup);
-DEFINE_DIR2_TRACE(xfs_dir2_node_replace);
-DEFINE_DIR2_TRACE(xfs_dir2_node_removename);
-DEFINE_DIR2_TRACE(xfs_dir2_node_to_leaf);
-#define DEFINE_DIR2_SPACE_TRACE(tname) \
-TRACE_EVENT(tname, \
+#define DEFINE_DIR2_SPACE_EVENT(name) \
+DEFINE_EVENT(xfs_dir2_space_class, name, \
TP_PROTO(struct xfs_da_args *args, int idx), \
- TP_ARGS(args, idx), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(int, op_flags) \
- __field(int, idx) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
- __entry->ino = args->dp->i_ino; \
- __entry->op_flags = args->op_flags; \
- __entry->idx = idx; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), \
- __entry->idx) \
-)
-DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_add);
-DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_remove);
-DEFINE_DIR2_SPACE_TRACE(xfs_dir2_grow_inode);
-DEFINE_DIR2_SPACE_TRACE(xfs_dir2_shrink_inode);
+ TP_ARGS(args, idx))
+DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add);
+DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove);
+DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode);
+DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode);
TRACE_EVENT(xfs_dir2_leafn_moveents,
TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index a1c65fc6d9c4..275b1f4f9430 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -2563,43 +2563,41 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
xfs_mount_t *mp;
xfs_perag_busy_t *bsy;
xfs_agblock_t uend, bend;
- xfs_lsn_t lsn;
+ xfs_lsn_t lsn = 0;
int cnt;
mp = tp->t_mountp;
spin_lock(&mp->m_perag[agno].pagb_lock);
- cnt = mp->m_perag[agno].pagb_count;
uend = bno + len - 1;
- /* search pagb_list for this slot, skipping open slots */
- for (bsy = mp->m_perag[agno].pagb_list; cnt; bsy++) {
+ /*
+ * search pagb_list for this slot, skipping open slots. We have to
+ * search the entire array as there may be multiple overlaps and
+ * we have to get the most recent LSN for the log force to push out
+ * all the transactions that span the range.
+ */
+ for (cnt = 0; cnt < mp->m_perag[agno].pagb_count; cnt++) {
+ bsy = &mp->m_perag[agno].pagb_list[cnt];
+ if (!bsy->busy_tp)
+ continue;
- /*
- * (start1,length1) within (start2, length2)
- */
- if (bsy->busy_tp != NULL) {
- bend = bsy->busy_start + bsy->busy_length - 1;
- if ((bno > bend) || (uend < bsy->busy_start)) {
- cnt--;
- } else {
- break;
- }
- }
- }
+ bend = bsy->busy_start + bsy->busy_length - 1;
+ if (bno > bend || uend < bsy->busy_start)
+ continue;
- trace_xfs_alloc_busysearch(mp, agno, bno, len, !!cnt);
+ /* (start1,length1) within (start2, length2) */
+ if (XFS_LSN_CMP(bsy->busy_tp->t_commit_lsn, lsn) > 0)
+ lsn = bsy->busy_tp->t_commit_lsn;
+ }
+ spin_unlock(&mp->m_perag[agno].pagb_lock);
+ trace_xfs_alloc_busysearch(tp->t_mountp, agno, bno, len, lsn);
/*
* If a block was found, force the log through the LSN of the
* transaction that freed the block
*/
- if (cnt) {
- lsn = bsy->busy_tp->t_commit_lsn;
- spin_unlock(&mp->m_perag[agno].pagb_lock);
+ if (lsn)
xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
- } else {
- spin_unlock(&mp->m_perag[agno].pagb_lock);
- }
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ce278b3ae7fc..391d36b0e68c 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2841,10 +2841,14 @@ xfs_iflush(
mp = ip->i_mount;
/*
- * If the inode isn't dirty, then just release the inode
- * flush lock and do nothing.
+ * If the inode isn't dirty, then just release the inode flush lock and
+ * do nothing. Treat stale inodes the same; we cannot rely on the
+ * backing buffer remaining stale in cache for the remaining life of
+ * the stale inode and so xfs_itobp() below may give us a buffer that
+ * no longer contains inodes below. Doing this stale check here also
+ * avoids forcing the log on pinned, stale inodes.
*/
- if (xfs_inode_clean(ip)) {
+ if (xfs_inode_clean(ip) || xfs_iflags_test(ip, XFS_ISTALE)) {
xfs_ifunlock(ip);
return 0;
}
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 6558ffd8d140..6f268756bf36 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -70,7 +70,6 @@ xfs_setattr(
uint commit_flags=0;
uid_t uid=0, iuid=0;
gid_t gid=0, igid=0;
- int timeflags = 0;
struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
int need_iolock = 1;
@@ -135,16 +134,13 @@ xfs_setattr(
if (flags & XFS_ATTR_NOLOCK)
need_iolock = 0;
if (!(mask & ATTR_SIZE)) {
- if ((mask != (ATTR_CTIME|ATTR_ATIME|ATTR_MTIME)) ||
- (mp->m_flags & XFS_MOUNT_WSYNC)) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
- commit_flags = 0;
- if ((code = xfs_trans_reserve(tp, 0,
- XFS_ICHANGE_LOG_RES(mp), 0,
- 0, 0))) {
- lock_flags = 0;
- goto error_return;
- }
+ tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+ commit_flags = 0;
+ code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp),
+ 0, 0, 0);
+ if (code) {
+ lock_flags = 0;
+ goto error_return;
}
} else {
if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) &&
@@ -295,15 +291,23 @@ xfs_setattr(
* or we are explicitly asked to change it. This handles
* the semantic difference between truncate() and ftruncate()
* as implemented in the VFS.
+ *
+ * The regular truncate() case without ATTR_CTIME and ATTR_MTIME
+ * is a special case where we need to update the times despite
+ * not having these flags set. For all other operations the
+ * VFS set these flags explicitly if it wants a timestamp
+ * update.
*/
- if (iattr->ia_size != ip->i_size || (mask & ATTR_CTIME))
- timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+ if (iattr->ia_size != ip->i_size &&
+ (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
+ iattr->ia_ctime = iattr->ia_mtime =
+ current_fs_time(inode->i_sb);
+ mask |= ATTR_CTIME | ATTR_MTIME;
+ }
if (iattr->ia_size > ip->i_size) {
ip->i_d.di_size = iattr->ia_size;
ip->i_size = iattr->ia_size;
- if (!(flags & XFS_ATTR_DMI))
- xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
} else if (iattr->ia_size <= ip->i_size ||
(iattr->ia_size == 0 && ip->i_d.di_nextents)) {
@@ -374,9 +378,6 @@ xfs_setattr(
ip->i_d.di_gid = gid;
inode->i_gid = gid;
}
-
- xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
- timeflags |= XFS_ICHGTIME_CHG;
}
/*
@@ -393,51 +394,37 @@ xfs_setattr(
inode->i_mode &= S_IFMT;
inode->i_mode |= mode & ~S_IFMT;
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- timeflags |= XFS_ICHGTIME_CHG;
}
/*
* Change file access or modified times.
*/
- if (mask & (ATTR_ATIME|ATTR_MTIME)) {
- if (mask & ATTR_ATIME) {
- inode->i_atime = iattr->ia_atime;
- ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
- ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
- ip->i_update_core = 1;
- }
- if (mask & ATTR_MTIME) {
- inode->i_mtime = iattr->ia_mtime;
- ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
- ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
- timeflags &= ~XFS_ICHGTIME_MOD;
- timeflags |= XFS_ICHGTIME_CHG;
- }
- if (tp && (mask & (ATTR_MTIME_SET|ATTR_ATIME_SET)))
- xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
+ if (mask & ATTR_ATIME) {
+ inode->i_atime = iattr->ia_atime;
+ ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
+ ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
+ ip->i_update_core = 1;
}
-
- /*
- * Change file inode change time only if ATTR_CTIME set
- * AND we have been called by a DMI function.
- */
-
- if ((flags & XFS_ATTR_DMI) && (mask & ATTR_CTIME)) {
+ if (mask & ATTR_CTIME) {
inode->i_ctime = iattr->ia_ctime;
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
ip->i_update_core = 1;
- timeflags &= ~XFS_ICHGTIME_CHG;
+ }
+ if (mask & ATTR_MTIME) {
+ inode->i_mtime = iattr->ia_mtime;
+ ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
+ ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
+ ip->i_update_core = 1;
}
/*
- * Send out timestamp changes that need to be set to the
- * current time. Not done when called by a DMI function.
+ * And finally, log the inode core if any attribute in it
+ * has been changed.
*/
- if (timeflags && !(flags & XFS_ATTR_DMI))
- xfs_ichgtime(ip, timeflags);
+ if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE|
+ ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
XFS_STATS_INC(xs_ig_attrchg);
@@ -452,12 +439,10 @@ xfs_setattr(
* mix so this probably isn't worth the trouble to optimize.
*/
code = 0;
- if (tp) {
- if (mp->m_flags & XFS_MOUNT_WSYNC)
- xfs_trans_set_sync(tp);
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
+ xfs_trans_set_sync(tp);
- code = xfs_trans_commit(tp, commit_flags);
- }
+ code = xfs_trans_commit(tp, commit_flags);
xfs_iunlock(ip, lock_flags);
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
new file mode 100644
index 000000000000..631a1ad2d108
--- /dev/null
+++ b/include/acpi/apei.h
@@ -0,0 +1,13 @@
+/*
+ * apei.h - ACPI Platform Error Interface
+ */
+
+#ifndef ACPI_APEI_H
+#define ACPI_APEI_H
+
+extern int hest_disable;
+
+typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
+int apei_hest_parse(apei_hest_func_t func, void *data);
+
+#endif
diff --git a/include/acpi/atomicio.h b/include/acpi/atomicio.h
new file mode 100644
index 000000000000..8b9fb4b0b9ce
--- /dev/null
+++ b/include/acpi/atomicio.h
@@ -0,0 +1,10 @@
+#ifndef ACPI_ATOMIC_IO_H
+#define ACPI_ATOMIC_IO_H
+
+int acpi_pre_map_gar(struct acpi_generic_address *reg);
+int acpi_post_unmap_gar(struct acpi_generic_address *reg);
+
+int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg);
+int acpi_atomic_write(u64 val, struct acpi_generic_address *reg);
+
+#endif
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 9d7febde10a1..09469971472f 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -152,7 +152,7 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache)
#include <linux/hardirq.h>
#define ACPI_PREEMPTION_POINT() \
do { \
- if (!in_atomic_preempt_off()) \
+ if (!in_atomic_preempt_off() && !irqs_disabled()) \
cond_resched(); \
} while (0)
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h
index fcd268ce0674..009bd6149d99 100644
--- a/include/asm-generic/fcntl.h
+++ b/include/asm-generic/fcntl.h
@@ -3,6 +3,14 @@
#include <linux/types.h>
+/*
+ * FMODE_EXEC is 0x20
+ * FMODE_NONOTIFY is 0x800000
+ * These cannot be used by userspace O_* until internal and external open
+ * flags are split.
+ * -Eric Paris
+ */
+
#define O_ACCMODE 00000003
#define O_RDONLY 00000000
#define O_WRONLY 00000001
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
index e5f234a08540..97e807c8c812 100644
--- a/include/asm-generic/kmap_types.h
+++ b/include/asm-generic/kmap_types.h
@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
KMAP_D(16) KM_IRQ_PTE,
KMAP_D(17) KM_NMI,
KMAP_D(18) KM_NMI_PTE,
-KMAP_D(19) KM_TYPE_NR
+KMAP_D(19) KM_KDB,
+KMAP_D(20) KM_TYPE_NR
};
#undef KMAP_D
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index fc218444e315..c8a5d68541d7 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -52,23 +52,4 @@ typedef struct
#define __local_add(i,l) local_set((l), local_read(l) + (i))
#define __local_sub(i,l) local_set((l), local_read(l) - (i))
-/* Use these for per-cpu local_t variables: on some archs they are
- * much more efficient than these naive implementations. Note they take
- * a variable (eg. mystruct.foo), not an address.
- */
-#define cpu_local_read(l) local_read(&__get_cpu_var(l))
-#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
-#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
-#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
-#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
-#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
-
-/* Non-atomic increments, ie. preemption disabled and won't be touched
- * in interrupt, etc. Some archs can optimize this case well.
- */
-#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
-#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
-#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
-#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
-
#endif /* _ASM_GENERIC_LOCAL_H */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 8087b90d4673..04f91c2d3f7b 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -41,7 +41,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
* Only S390 provides its own means of moving the pointer.
*/
#ifndef SHIFT_PERCPU_PTR
-#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
+/* Weird cast keeps both GCC and sparse happy. */
+#define SHIFT_PERCPU_PTR(__p, __offset) ({ \
+ __verify_pcpu_ptr((__p)); \
+ RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
+})
#endif
/*
@@ -50,11 +54,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
* offset.
*/
#define per_cpu(var, cpu) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
+ (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
#define __get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
+ (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset))
#define __raw_get_cpu_var(var) \
- (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
+ (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset))
#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
#define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
@@ -66,9 +70,9 @@ extern void setup_per_cpu_areas(void);
#else /* ! SMP */
-#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
-#define __get_cpu_var(var) per_cpu_var(var)
-#define __raw_get_cpu_var(var) per_cpu_var(var)
+#define per_cpu(var, cpu) (*((void)(cpu), &(var)))
+#define __get_cpu_var(var) (var)
+#define __raw_get_cpu_var(var) (var)
#define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
#define __this_cpu_ptr(ptr) this_cpu_ptr(ptr)
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 6a0b30f78a62..fa0156f25415 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -626,9 +626,13 @@ __SYSCALL(__NR_perf_event_open, sys_perf_event_open)
__SYSCALL(__NR_accept4, sys_accept4)
#define __NR_recvmmsg 243
__SYSCALL(__NR_recvmmsg, sys_recvmmsg)
+#define __NR_getprlimit 244
+__SYSCALL(__NR_getprlimit, sys_getprlimit)
+#define __NR_setprlimit 245
+__SYSCALL(__NR_setprlimit, sys_setprlimit)
#undef __NR_syscalls
-#define __NR_syscalls 244
+#define __NR_syscalls 246
/*
* All syscalls below here should go away really,
diff --git a/include/crypto/pcrypt.h b/include/crypto/pcrypt.h
new file mode 100644
index 000000000000..d7d8bd8c6edc
--- /dev/null
+++ b/include/crypto/pcrypt.h
@@ -0,0 +1,51 @@
+/*
+ * pcrypt - Parallel crypto engine.
+ *
+ * Copyright (C) 2009 secunet Security Networks AG
+ * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _CRYPTO_PCRYPT_H
+#define _CRYPTO_PCRYPT_H
+
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/padata.h>
+
+struct pcrypt_request {
+ struct padata_priv padata;
+ void *data;
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+static inline void *pcrypt_request_ctx(struct pcrypt_request *req)
+{
+ return req->__ctx;
+}
+
+static inline
+struct padata_priv *pcrypt_request_padata(struct pcrypt_request *req)
+{
+ return &req->padata;
+}
+
+static inline
+struct pcrypt_request *pcrypt_padata_request(struct padata_priv *padata)
+{
+ return container_of(padata, struct pcrypt_request, padata);
+}
+
+#endif
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 71dafb69cfeb..ffac157fb5b2 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1408,7 +1408,7 @@ extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
struct drm_ati_pcigart_info * gart_info);
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
- size_t align, dma_addr_t maxaddr);
+ size_t align);
extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index b29e20168b5f..4c1231921ba2 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -61,6 +61,8 @@ struct drm_crtc_helper_funcs {
/* Move the crtc on the current fb to the given position *optional* */
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
+ int (*mode_set_base_atomic)(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int x, int y);
/* reload the current crtc LUT */
void (*load_lut)(struct drm_crtc *crtc);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 58c892a2cbfa..c4f87a51c691 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -30,6 +30,8 @@
#ifndef DRM_FB_HELPER_H
#define DRM_FB_HELPER_H
+#include <linux/kgdb.h>
+
struct drm_fb_helper_crtc {
uint32_t crtc_id;
struct drm_mode_set mode_set;
@@ -63,8 +65,10 @@ struct drm_fb_helper_connector {
struct drm_fb_helper {
struct drm_framebuffer *fb;
+ struct drm_framebuffer *saved_fb;
struct drm_device *dev;
struct drm_display_mode *mode;
+ struct dbg_kms_console_ops kdb_ops;
int crtc_count;
struct drm_fb_helper_crtc *crtc_info;
struct drm_fb_helper_funcs *funcs;
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 43009bc2e757..bc4fdf27bd2e 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -160,6 +160,7 @@ struct drm_mode_get_encoder {
#define DRM_MODE_CONNECTOR_HDMIA 11
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_CONNECTOR_TV 13
+#define DRM_MODE_CONNECTOR_eDP 14
struct drm_mode_get_connector {
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index ec3f5e80a5df..b64a8d7cdf6d 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -188,6 +188,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_GEM_MADVISE 0x26
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
#define DRM_I915_OVERLAY_ATTRS 0x28
+#define DRM_I915_GEM_EXECBUFFER2 0x29
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -207,6 +208,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
@@ -272,6 +274,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_NUM_FENCES_AVAIL 6
#define I915_PARAM_HAS_OVERLAY 7
#define I915_PARAM_HAS_PAGEFLIPPING 8
+#define I915_PARAM_HAS_EXECBUF2 9
typedef struct drm_i915_getparam {
int param;
@@ -567,6 +570,57 @@ struct drm_i915_gem_execbuffer {
__u64 cliprects_ptr;
};
+struct drm_i915_gem_exec_object2 {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ __u32 handle;
+
+ /** Number of relocations to be performed on this buffer */
+ __u32 relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ __u64 relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ __u64 alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ __u64 offset;
+
+#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
+ __u64 flags;
+ __u64 rsvd1;
+ __u64 rsvd2;
+};
+
+struct drm_i915_gem_execbuffer2 {
+ /**
+ * List of gem_exec_object2 structs
+ */
+ __u64 buffers_ptr;
+ __u32 buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ __u32 batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ __u32 batch_len;
+ __u32 DR1;
+ __u32 DR4;
+ __u32 num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ __u64 cliprects_ptr;
+ __u64 flags; /* currently unused */
+ __u64 rsvd1;
+ __u64 rsvd2;
+};
+
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
__u32 handle;
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 756f831cbdd5..ca03d9bc5374 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -207,6 +207,7 @@ unifdef-y += ethtool.h
unifdef-y += eventpoll.h
unifdef-y += signalfd.h
unifdef-y += ext2_fs.h
+unifdef-y += fanotify.h
unifdef-y += fb.h
unifdef-y += fcntl.h
unifdef-y += filter.h
@@ -248,6 +249,7 @@ unifdef-y += in.h
unifdef-y += in6.h
unifdef-y += inotify.h
unifdef-y += input.h
+unifdef-y += ioq.h
unifdef-y += ip.h
unifdef-y += ipc.h
unifdef-y += ipmi.h
@@ -337,6 +339,7 @@ unifdef-y += serial_core.h
unifdef-y += serial.h
unifdef-y += serio.h
unifdef-y += shm.h
+unifdef-y += shm_signal.h
unifdef-y += signal.h
unifdef-y += smb_fs.h
unifdef-y += smb.h
@@ -362,6 +365,8 @@ unifdef-y += uio.h
unifdef-y += unistd.h
unifdef-y += usbdevice_fs.h
unifdef-y += utsname.h
+unifdef-y += vbus_pci.h
+unifdef-y += venet.h
unifdef-y += videodev2.h
unifdef-y += videodev.h
unifdef-y += virtio_config.h
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index fcbc26af00e4..8b451663968f 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -36,7 +36,8 @@ enum bdi_state {
typedef int (congested_fn)(void *, int);
enum bdi_stat_item {
- BDI_RECLAIMABLE,
+ BDI_DIRTY,
+ BDI_UNSTABLE,
BDI_WRITEBACK,
NR_BDI_STAT_ITEMS
};
@@ -231,6 +232,7 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
#define BDI_CAP_EXEC_MAP 0x00000040
#define BDI_CAP_NO_ACCT_WB 0x00000080
#define BDI_CAP_SWAP_BACKED 0x00000100
+#define BDI_CAP_ACCT_UNSTABLE 0x00000200
#define BDI_CAP_VMFLAGS \
(BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
@@ -310,6 +312,11 @@ static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi)
return bdi == &default_backing_dev_info;
}
+static inline bool bdi_cap_account_unstable(struct backing_dev_info *bdi)
+{
+ return bdi->capabilities & BDI_CAP_ACCT_UNSTABLE;
+}
+
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
{
return bdi_cap_writeback_dirty(mapping->backing_dev_info);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9b98173a8184..ffb13ad35716 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -938,6 +938,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_default_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset);
+extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
+ sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
@@ -1110,18 +1112,13 @@ static inline int queue_alignment_offset(struct request_queue *q)
return q->limits.alignment_offset;
}
-static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
+static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
{
unsigned int granularity = max(lim->physical_block_size, lim->io_min);
+ unsigned int alignment = (sector << 9) & (granularity - 1);
- offset &= granularity - 1;
- return (granularity + lim->alignment_offset - offset) & (granularity - 1);
-}
-
-static inline int queue_sector_alignment_offset(struct request_queue *q,
- sector_t sector)
-{
- return queue_limit_alignment_offset(&q->limits, sector << 9);
+ return (granularity + lim->alignment_offset - alignment)
+ & (granularity - 1);
}
static inline int bdev_alignment_offset(struct block_device *bdev)
@@ -1145,11 +1142,12 @@ static inline int queue_discard_alignment(struct request_queue *q)
return q->limits.discard_alignment;
}
-static inline int queue_sector_discard_alignment(struct request_queue *q,
- sector_t sector)
+static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
{
- return ((sector << 9) - q->limits.discard_alignment)
- & (q->limits.discard_granularity - 1);
+ unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
+
+ return (lim->discard_granularity + lim->discard_alignment - alignment)
+ & (lim->discard_granularity - 1);
}
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
diff --git a/include/linux/btree-128.h b/include/linux/btree-128.h
new file mode 100644
index 000000000000..0b3414c4c928
--- /dev/null
+++ b/include/linux/btree-128.h
@@ -0,0 +1,109 @@
+extern struct btree_geo btree_geo128;
+
+struct btree_head128 { struct btree_head h; };
+
+static inline void btree_init_mempool128(struct btree_head128 *head,
+ mempool_t *mempool)
+{
+ btree_init_mempool(&head->h, mempool);
+}
+
+static inline int btree_init128(struct btree_head128 *head)
+{
+ return btree_init(&head->h);
+}
+
+static inline void btree_destroy128(struct btree_head128 *head)
+{
+ btree_destroy(&head->h);
+}
+
+static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2)
+{
+ u64 key[2] = {k1, k2};
+ return btree_lookup(&head->h, &btree_geo128, (unsigned long *)&key);
+}
+
+static inline void *btree_get_prev128(struct btree_head128 *head,
+ u64 *k1, u64 *k2)
+{
+ u64 key[2] = {*k1, *k2};
+ void *val;
+
+ val = btree_get_prev(&head->h, &btree_geo128,
+ (unsigned long *)&key);
+ *k1 = key[0];
+ *k2 = key[1];
+ return val;
+}
+
+static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2,
+ void *val, gfp_t gfp)
+{
+ u64 key[2] = {k1, k2};
+ return btree_insert(&head->h, &btree_geo128,
+ (unsigned long *)&key, val, gfp);
+}
+
+static inline int btree_update128(struct btree_head128 *head, u64 k1, u64 k2,
+ void *val)
+{
+ u64 key[2] = {k1, k2};
+ return btree_update(&head->h, &btree_geo128,
+ (unsigned long *)&key, val);
+}
+
+static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2)
+{
+ u64 key[2] = {k1, k2};
+ return btree_remove(&head->h, &btree_geo128, (unsigned long *)&key);
+}
+
+static inline void *btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2)
+{
+ u64 key[2];
+ void *val;
+
+ val = btree_last(&head->h, &btree_geo128, (unsigned long *)&key[0]);
+ if (val) {
+ *k1 = key[0];
+ *k2 = key[1];
+ }
+
+ return val;
+}
+
+static inline int btree_merge128(struct btree_head128 *target,
+ struct btree_head128 *victim,
+ gfp_t gfp)
+{
+ return btree_merge(&target->h, &victim->h, &btree_geo128, gfp);
+}
+
+void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
+ size_t index, void *__func);
+
+typedef void (*visitor128_t)(void *elem, unsigned long opaque,
+ u64 key1, u64 key2, size_t index);
+
+static inline size_t btree_visitor128(struct btree_head128 *head,
+ unsigned long opaque,
+ visitor128_t func2)
+{
+ return btree_visitor(&head->h, &btree_geo128, opaque,
+ visitor128, func2);
+}
+
+static inline size_t btree_grim_visitor128(struct btree_head128 *head,
+ unsigned long opaque,
+ visitor128_t func2)
+{
+ return btree_grim_visitor(&head->h, &btree_geo128, opaque,
+ visitor128, func2);
+}
+
+#define btree_for_each_safe128(head, k1, k2, val) \
+ for (val = btree_last128(head, &k1, &k2); \
+ val; \
+ val = btree_get_prev128(head, &k1, &k2))
+
diff --git a/include/linux/btree-type.h b/include/linux/btree-type.h
new file mode 100644
index 000000000000..9a1147ef8563
--- /dev/null
+++ b/include/linux/btree-type.h
@@ -0,0 +1,147 @@
+#define __BTREE_TP(pfx, type, sfx) pfx ## type ## sfx
+#define _BTREE_TP(pfx, type, sfx) __BTREE_TP(pfx, type, sfx)
+#define BTREE_TP(pfx) _BTREE_TP(pfx, BTREE_TYPE_SUFFIX,)
+#define BTREE_FN(name) BTREE_TP(btree_ ## name)
+#define BTREE_TYPE_HEAD BTREE_TP(struct btree_head)
+#define VISITOR_FN BTREE_TP(visitor)
+#define VISITOR_FN_T _BTREE_TP(visitor, BTREE_TYPE_SUFFIX, _t)
+
+BTREE_TYPE_HEAD {
+ struct btree_head h;
+};
+
+static inline void BTREE_FN(init_mempool)(BTREE_TYPE_HEAD *head,
+ mempool_t *mempool)
+{
+ btree_init_mempool(&head->h, mempool);
+}
+
+static inline int BTREE_FN(init)(BTREE_TYPE_HEAD *head)
+{
+ return btree_init(&head->h);
+}
+
+static inline void BTREE_FN(destroy)(BTREE_TYPE_HEAD *head)
+{
+ btree_destroy(&head->h);
+}
+
+static inline int BTREE_FN(merge)(BTREE_TYPE_HEAD *target,
+ BTREE_TYPE_HEAD *victim,
+ gfp_t gfp)
+{
+ return btree_merge(&target->h, &victim->h, BTREE_TYPE_GEO, gfp);
+}
+
+#if (BITS_PER_LONG > BTREE_TYPE_BITS)
+static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key)
+{
+ unsigned long _key = key;
+ return btree_lookup(&head->h, BTREE_TYPE_GEO, &_key);
+}
+
+static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key,
+ void *val, gfp_t gfp)
+{
+ unsigned long _key = key;
+ return btree_insert(&head->h, BTREE_TYPE_GEO, &_key, val, gfp);
+}
+
+static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key,
+ void *val)
+{
+ unsigned long _key = key;
+ return btree_update(&head->h, BTREE_TYPE_GEO, &_key, val);
+}
+
+static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key)
+{
+ unsigned long _key = key;
+ return btree_remove(&head->h, BTREE_TYPE_GEO, &_key);
+}
+
+static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key)
+{
+ unsigned long _key;
+ void *val = btree_last(&head->h, BTREE_TYPE_GEO, &_key);
+ if (val)
+ *key = _key;
+ return val;
+}
+
+static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key)
+{
+ unsigned long _key = *key;
+ void *val = btree_get_prev(&head->h, BTREE_TYPE_GEO, &_key);
+ if (val)
+ *key = _key;
+ return val;
+}
+#else
+static inline void *BTREE_FN(lookup)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key)
+{
+ return btree_lookup(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key);
+}
+
+static inline int BTREE_FN(insert)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key,
+ void *val, gfp_t gfp)
+{
+ return btree_insert(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key,
+ val, gfp);
+}
+
+static inline int BTREE_FN(update)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key,
+ void *val)
+{
+ return btree_update(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key, val);
+}
+
+static inline void *BTREE_FN(remove)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE key)
+{
+ return btree_remove(&head->h, BTREE_TYPE_GEO, (unsigned long *)&key);
+}
+
+static inline void *BTREE_FN(last)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key)
+{
+ return btree_last(&head->h, BTREE_TYPE_GEO, (unsigned long *)key);
+}
+
+static inline void *BTREE_FN(get_prev)(BTREE_TYPE_HEAD *head, BTREE_KEYTYPE *key)
+{
+ return btree_get_prev(&head->h, BTREE_TYPE_GEO, (unsigned long *)key);
+}
+#endif
+
+void VISITOR_FN(void *elem, unsigned long opaque, unsigned long *key,
+ size_t index, void *__func);
+
+typedef void (*VISITOR_FN_T)(void *elem, unsigned long opaque,
+ BTREE_KEYTYPE key, size_t index);
+
+static inline size_t BTREE_FN(visitor)(BTREE_TYPE_HEAD *head,
+ unsigned long opaque,
+ VISITOR_FN_T func2)
+{
+ return btree_visitor(&head->h, BTREE_TYPE_GEO, opaque,
+ visitorl, func2);
+}
+
+static inline size_t BTREE_FN(grim_visitor)(BTREE_TYPE_HEAD *head,
+ unsigned long opaque,
+ VISITOR_FN_T func2)
+{
+ return btree_grim_visitor(&head->h, BTREE_TYPE_GEO, opaque,
+ visitorl, func2);
+}
+
+#undef VISITOR_FN
+#undef VISITOR_FN_T
+#undef __BTREE_TP
+#undef _BTREE_TP
+#undef BTREE_TP
+#undef BTREE_FN
+#undef BTREE_TYPE_HEAD
+#undef BTREE_TYPE_SUFFIX
+#undef BTREE_TYPE_GEO
+#undef BTREE_KEYTYPE
+#undef BTREE_TYPE_BITS
diff --git a/include/linux/btree.h b/include/linux/btree.h
new file mode 100644
index 000000000000..65b5bb058324
--- /dev/null
+++ b/include/linux/btree.h
@@ -0,0 +1,243 @@
+#ifndef BTREE_H
+#define BTREE_H
+
+#include <linux/kernel.h>
+#include <linux/mempool.h>
+
+/**
+ * DOC: B+Tree basics
+ *
+ * A B+Tree is a data structure for looking up arbitrary (currently allowing
+ * unsigned long, u32, u64 and 2 * u64) keys into pointers. The data structure
+ * is described at http://en.wikipedia.org/wiki/B-tree, we currently do not
+ * use binary search to find the key on lookups.
+ *
+ * Each B+Tree consists of a head, that contains bookkeeping information and
+ * a variable number (starting with zero) nodes. Each node contains the keys
+ * and pointers to sub-nodes, or, for leaf nodes, the keys and values for the
+ * tree entries.
+ *
+ * Each node in this implementation has the following layout:
+ * [key1, key2, ..., keyN] [val1, val2, ..., valN]
+ *
+ * Each key here is an array of unsigned longs, geo->no_longs in total. The
+ * number of keys and values (N) is geo->no_pairs.
+ */
+
+/**
+ * struct btree_head - btree head
+ *
+ * @node: the first node in the tree
+ * @mempool: mempool used for node allocations
+ * @height: current of the tree
+ */
+struct btree_head {
+ unsigned long *node;
+ mempool_t *mempool;
+ int height;
+};
+
+/* btree geometry */
+struct btree_geo;
+
+/**
+ * btree_alloc - allocate function for the mempool
+ * @gfp_mask: gfp mask for the allocation
+ * @pool_data: unused
+ */
+void *btree_alloc(gfp_t gfp_mask, void *pool_data);
+
+/**
+ * btree_free - free function for the mempool
+ * @element: the element to free
+ * @pool_data: unused
+ */
+void btree_free(void *element, void *pool_data);
+
+/**
+ * btree_init_mempool - initialise a btree with given mempool
+ *
+ * @head: the btree head to initialise
+ * @mempool: the mempool to use
+ *
+ * When this function is used, there is no need to destroy
+ * the mempool.
+ */
+void btree_init_mempool(struct btree_head *head, mempool_t *mempool);
+
+/**
+ * btree_init - initialise a btree
+ *
+ * @head: the btree head to initialise
+ *
+ * This function allocates the memory pool that the
+ * btree needs. Returns zero or a negative error code
+ * (-%ENOMEM) when memory allocation fails.
+ *
+ */
+int __must_check btree_init(struct btree_head *head);
+
+/**
+ * btree_destroy - destroy mempool
+ *
+ * @head: the btree head to destroy
+ *
+ * This function destroys the internal memory pool, use only
+ * when using btree_init(), not with btree_init_mempool().
+ */
+void btree_destroy(struct btree_head *head);
+
+/**
+ * btree_lookup - look up a key in the btree
+ *
+ * @head: the btree to look in
+ * @geo: the btree geometry
+ * @key: the key to look up
+ *
+ * This function returns the value for the given key, or %NULL.
+ */
+void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key);
+
+/**
+ * btree_insert - insert an entry into the btree
+ *
+ * @head: the btree to add to
+ * @geo: the btree geometry
+ * @key: the key to add (must not already be present)
+ * @val: the value to add (must not be %NULL)
+ * @gfp: allocation flags for node allocations
+ *
+ * This function returns 0 if the item could be added, or an
+ * error code if it failed (may fail due to memory pressure).
+ */
+int __must_check btree_insert(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val, gfp_t gfp);
+/**
+ * btree_update - update an entry in the btree
+ *
+ * @head: the btree to update
+ * @geo: the btree geometry
+ * @key: the key to update
+ * @val: the value to change it to (must not be %NULL)
+ *
+ * This function returns 0 if the update was successful, or
+ * -%ENOENT if the key could not be found.
+ */
+int btree_update(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val);
+/**
+ * btree_remove - remove an entry from the btree
+ *
+ * @head: the btree to update
+ * @geo: the btree geometry
+ * @key: the key to remove
+ *
+ * This function returns the removed entry, or %NULL if the key
+ * could not be found.
+ */
+void *btree_remove(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key);
+
+/**
+ * btree_merge - merge two btrees
+ *
+ * @target: the tree that gets all the entries
+ * @victim: the tree that gets merged into @target
+ * @geo: the btree geometry
+ * @gfp: allocation flags
+ *
+ * The two trees @target and @victim may not contain the same keys,
+ * that is a bug and triggers a BUG(). This function returns zero
+ * if the trees were merged successfully, and may return a failure
+ * when memory allocation fails, in which case both trees might have
+ * been partially merged, i.e. some entries have been moved from
+ * @victim to @target.
+ */
+int btree_merge(struct btree_head *target, struct btree_head *victim,
+ struct btree_geo *geo, gfp_t gfp);
+
+/**
+ * btree_last - get last entry in btree
+ *
+ * @head: btree head
+ * @geo: btree geometry
+ * @key: last key
+ *
+ * Returns the last entry in the btree, and sets @key to the key
+ * of that entry; returns NULL if the tree is empty, in that case
+ * key is not changed.
+ */
+void *btree_last(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key);
+
+/**
+ * btree_get_prev - get previous entry
+ *
+ * @head: btree head
+ * @geo: btree geometry
+ * @key: pointer to key
+ *
+ * The function returns the next item right before the value pointed to by
+ * @key, and updates @key with its key, or returns %NULL when there is no
+ * entry with a key smaller than the given key.
+ */
+void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key);
+
+
+/* internal use, use btree_visitor{l,32,64,128} */
+size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
+ unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key, size_t index,
+ void *func2),
+ void *func2);
+
+/* internal use, use btree_grim_visitor{l,32,64,128} */
+size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
+ unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key,
+ size_t index, void *func2),
+ void *func2);
+
+
+#include <linux/btree-128.h>
+
+extern struct btree_geo btree_geo32;
+#define BTREE_TYPE_SUFFIX l
+#define BTREE_TYPE_BITS BITS_PER_LONG
+#define BTREE_TYPE_GEO &btree_geo32
+#define BTREE_KEYTYPE unsigned long
+#include <linux/btree-type.h>
+
+#define btree_for_each_safel(head, key, val) \
+ for (val = btree_lastl(head, &key); \
+ val; \
+ val = btree_get_prevl(head, &key))
+
+#define BTREE_TYPE_SUFFIX 32
+#define BTREE_TYPE_BITS 32
+#define BTREE_TYPE_GEO &btree_geo32
+#define BTREE_KEYTYPE u32
+#include <linux/btree-type.h>
+
+#define btree_for_each_safe32(head, key, val) \
+ for (val = btree_last32(head, &key); \
+ val; \
+ val = btree_get_prev32(head, &key))
+
+extern struct btree_geo btree_geo64;
+#define BTREE_TYPE_SUFFIX 64
+#define BTREE_TYPE_BITS 64
+#define BTREE_TYPE_GEO &btree_geo64
+#define BTREE_KEYTYPE u64
+#include <linux/btree-type.h>
+
+#define btree_for_each_safe64(head, key, val) \
+ for (val = btree_last64(head, &key); \
+ val; \
+ val = btree_get_prev64(head, &key))
+
+#endif
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 3db7767d2a17..7e7c98a3e908 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -60,6 +60,21 @@ struct can_priv {
*/
#define get_can_dlc(i) (min_t(__u8, (i), 8))
+/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
+static inline int can_dropped_invalid_skb(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ const struct can_frame *cf = (struct can_frame *)skb->data;
+
+ if (unlikely(skb->len != sizeof(*cf) || cf->can_dlc > 8)) {
+ kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ return 1;
+ }
+
+ return 0;
+}
+
struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
void free_candev(struct net_device *dev);
diff --git a/include/linux/can/netlink.h b/include/linux/can/netlink.h
index 9ecbb7871c0e..c818335fbb13 100644
--- a/include/linux/can/netlink.h
+++ b/include/linux/can/netlink.h
@@ -80,6 +80,7 @@ struct can_ctrlmode {
#define CAN_CTRLMODE_LOOPBACK 0x1 /* Loopback mode */
#define CAN_CTRLMODE_LISTENONLY 0x2 /* Listen-only mode */
#define CAN_CTRLMODE_3_SAMPLES 0x4 /* Triple sampling mode */
+#define CAN_CTRLMODE_ONE_SHOT 0x8 /* One-Shot mode */
/*
* CAN device statistics
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 5be3dab4a695..a5a472b10746 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -5,7 +5,7 @@
#ifdef __CHECKER__
# define __user __attribute__((noderef, address_space(1)))
-# define __kernel /* default address space */
+# define __kernel __attribute__((address_space(0)))
# define __safe __attribute__((safe))
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
@@ -15,6 +15,7 @@
# define __acquire(x) __context__(x,1)
# define __release(x) __context__(x,-1)
# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
+# define __percpu __attribute__((noderef, address_space(3)))
extern void __chk_user_ptr(const volatile void __user *);
extern void __chk_io_ptr(const volatile void __iomem *);
#else
@@ -32,6 +33,7 @@ extern void __chk_io_ptr(const volatile void __iomem *);
# define __acquire(x) (void)0
# define __release(x) (void)0
# define __cond_lock(x,c) (c)
+# define __percpu
#endif
#ifdef __KERNEL__
diff --git a/arch/arm/mach-davinci/include/mach/emac.h b/include/linux/davinci_emac.h
index beff4fb7c845..7c930dba477c 100644
--- a/arch/arm/mach-davinci/include/mach/emac.h
+++ b/include/linux/davinci_emac.h
@@ -8,8 +8,8 @@
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
-#ifndef _MACH_DAVINCI_EMAC_H
-#define _MACH_DAVINCI_EMAC_H
+#ifndef _LINUX_DAVINCI_EMAC_H
+#define _LINUX_DAVINCI_EMAC_H
#include <linux/if_ether.h>
#include <linux/memory.h>
@@ -19,12 +19,15 @@ struct emac_platform_data {
u32 ctrl_reg_offset;
u32 ctrl_mod_reg_offset;
u32 ctrl_ram_offset;
+ u32 hw_ram_addr;
u32 mdio_reg_offset;
u32 ctrl_ram_size;
u32 phy_mask;
u32 mdio_max_freq;
u8 rmii_en;
u8 version;
+ void (*interrupt_enable) (void);
+ void (*interrupt_disable) (void);
};
enum {
diff --git a/include/linux/decompress/unlzo.h b/include/linux/decompress/unlzo.h
new file mode 100644
index 000000000000..987229752519
--- /dev/null
+++ b/include/linux/decompress/unlzo.h
@@ -0,0 +1,10 @@
+#ifndef DECOMPRESS_UNLZO_H
+#define DECOMPRESS_UNLZO_H
+
+int unlzo(unsigned char *inbuf, int len,
+ int(*fill)(void*, unsigned int),
+ int(*flush)(void*, unsigned int),
+ unsigned char *output,
+ int *pos,
+ void(*error)(char *x));
+#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index a62799f2ab00..b30527db3ac0 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -472,6 +472,23 @@ static inline int device_is_registered(struct device *dev)
return dev->kobj.state_in_sysfs;
}
+static inline void device_enable_async_suspend(struct device *dev)
+{
+ if (dev->power.status == DPM_ON)
+ dev->power.async_suspend = true;
+}
+
+static inline void device_disable_async_suspend(struct device *dev)
+{
+ if (dev->power.status == DPM_ON)
+ dev->power.async_suspend = false;
+}
+
+static inline bool device_async_suspend_enabled(struct device *dev)
+{
+ return !!dev->power.async_suspend;
+}
+
void driver_init(void);
/*
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h
index c30879cf93bc..96e87693d933 100644
--- a/include/linux/dm9000.h
+++ b/include/linux/dm9000.h
@@ -23,7 +23,7 @@
#define DM9000_PLATF_NO_EEPROM (0x0010)
#define DM9000_PLATF_SIMPLE_PHY (0x0020) /* Use NSR to find LinkStatus */
-/* platfrom data for platfrom device structure's platfrom_data field */
+/* platform data for platform device structure's platform_data field */
struct dm9000_plat_data {
unsigned int flags;
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index e84f4733cb55..78962272338a 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.6"
+#define REL_VERSION "8.3.7"
#define API_VERSION 88
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 91
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index db5721ad50d1..a4d82f895994 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -69,6 +69,7 @@ NL_PACKET(disconnect, 6, )
NL_PACKET(resize, 7,
NL_INT64( 29, T_MAY_IGNORE, resize_size)
+ NL_BIT( 68, T_MAY_IGNORE, resize_force)
)
NL_PACKET(syncer_conf, 8,
diff --git a/include/linux/edac_mce.h b/include/linux/edac_mce.h
new file mode 100644
index 000000000000..f974fc035363
--- /dev/null
+++ b/include/linux/edac_mce.h
@@ -0,0 +1,31 @@
+/* Provides edac interface to mcelog events
+ *
+ * This file may be distributed under the terms of the
+ * GNU General Public License version 2.
+ *
+ * Copyright (c) 2009 by:
+ * Mauro Carvalho Chehab <mchehab@redhat.com>
+ *
+ * Red Hat Inc. http://www.redhat.com
+ */
+
+#if defined(CONFIG_EDAC_MCE) || \
+ (defined(CONFIG_EDAC_MCE_MODULE) && defined(MODULE))
+
+#include <asm/mce.h>
+#include <linux/list.h>
+
+struct edac_mce {
+ struct list_head list;
+
+ void *priv;
+ int (*check_error)(void *priv, struct mce *mce);
+};
+
+int edac_mce_register(struct edac_mce *edac_mce);
+void edac_mce_unregister(struct edac_mce *edac_mce);
+int edac_mce_parse(struct mce *mce);
+
+#else
+#define edac_mce_parse(mce) (0)
+#endif
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index 6b049030fbe6..e6590f8f0b3c 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -202,14 +202,6 @@ static inline __u32 ext3_mask_flags(umode_t mode, __u32 flags)
return flags & EXT3_OTHER_FLMASK;
}
-/*
- * Inode dynamic state flags
- */
-#define EXT3_STATE_JDATA 0x00000001 /* journaled data exists */
-#define EXT3_STATE_NEW 0x00000002 /* inode is newly created */
-#define EXT3_STATE_XATTR 0x00000004 /* has in-inode xattrs */
-#define EXT3_STATE_FLUSH_ON_CLOSE 0x00000008
-
/* Used to pass group descriptor data when online resize is done */
struct ext3_new_group_input {
__u32 group; /* Group number for this data */
@@ -560,6 +552,31 @@ static inline int ext3_valid_inum(struct super_block *sb, unsigned long ino)
(ino >= EXT3_FIRST_INO(sb) &&
ino <= le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count));
}
+
+/*
+ * Inode dynamic state flags
+ */
+enum {
+ EXT3_STATE_JDATA, /* journaled data exists */
+ EXT3_STATE_NEW, /* inode is newly created */
+ EXT3_STATE_XATTR, /* has in-inode xattrs */
+ EXT3_STATE_FLUSH_ON_CLOSE, /* flush dirty pages on close */
+};
+
+static inline int ext3_test_inode_state(struct inode *inode, int bit)
+{
+ return test_bit(bit, &EXT3_I(inode)->i_state);
+}
+
+static inline void ext3_set_inode_state(struct inode *inode, int bit)
+{
+ set_bit(bit, &EXT3_I(inode)->i_state);
+}
+
+static inline void ext3_clear_inode_state(struct inode *inode, int bit)
+{
+ clear_bit(bit, &EXT3_I(inode)->i_state);
+}
#else
/* Assume that user mode programs are passing in an ext3fs superblock, not
* a kernel struct super_block. This will allow us to call the feature-test
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index 93e7428156ba..7679acdb519a 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -87,7 +87,7 @@ struct ext3_inode_info {
* near to their parent directory's inode.
*/
__u32 i_block_group;
- __u32 i_state; /* Dynamic state flags for ext3 */
+ unsigned long i_state; /* Dynamic state flags for ext3 */
/* block reservation info */
struct ext3_block_alloc_info *i_block_alloc_info;
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
new file mode 100644
index 000000000000..f0949a57ca9d
--- /dev/null
+++ b/include/linux/fanotify.h
@@ -0,0 +1,105 @@
+#ifndef _LINUX_FANOTIFY_H
+#define _LINUX_FANOTIFY_H
+
+#include <linux/types.h>
+
+/* the following events that user-space can register for */
+#define FAN_ACCESS 0x00000001 /* File was accessed */
+#define FAN_MODIFY 0x00000002 /* File was modified */
+#define FAN_CLOSE_WRITE 0x00000008 /* Unwrittable file closed */
+#define FAN_CLOSE_NOWRITE 0x00000010 /* Writtable file closed */
+#define FAN_OPEN 0x00000020 /* File was opened */
+
+#define FAN_EVENT_ON_CHILD 0x08000000 /* interested in child events */
+
+/* FIXME currently Q's have no limit.... */
+#define FAN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+
+#define FAN_OPEN_PERM 0x00010000 /* File open in perm check */
+#define FAN_ACCESS_PERM 0x00020000 /* File accessed in perm check */
+
+/* helper events */
+#define FAN_CLOSE (FAN_CLOSE_WRITE | FAN_CLOSE_NOWRITE) /* close */
+
+/* flags used for fanotify_init() */
+#define FAN_CLOEXEC 0x00000001
+#define FAN_NONBLOCK 0x00000002
+
+#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK)
+
+/* flags used for fanotify_modify_mark() */
+#define FAN_MARK_ADD 0x00000001
+#define FAN_MARK_REMOVE 0x00000002
+#define FAN_MARK_DONT_FOLLOW 0x00000004
+#define FAN_MARK_ONLYDIR 0x00000008
+#define FAN_MARK_MOUNT 0x00000010
+#define FAN_MARK_IGNORED_MASK 0x00000020
+#define FAN_MARK_IGNORED_SURV_MODIFY 0x00000040
+#define FAN_MARK_FLUSH 0x00000080
+
+#define FAN_ALL_MARK_FLAGS (FAN_MARK_ADD |\
+ FAN_MARK_REMOVE |\
+ FAN_MARK_DONT_FOLLOW |\
+ FAN_MARK_ONLYDIR |\
+ FAN_MARK_MOUNT |\
+ FAN_MARK_IGNORED_MASK |\
+ FAN_MARK_IGNORED_SURV_MODIFY)
+
+/*
+ * All of the events - we build the list by hand so that we can add flags in
+ * the future and not break backward compatibility. Apps will get only the
+ * events that they originally wanted. Be sure to add new events here!
+ */
+#define FAN_ALL_EVENTS (FAN_ACCESS |\
+ FAN_MODIFY |\
+ FAN_CLOSE |\
+ FAN_OPEN)
+
+/*
+ * All events which require a permission response from userspace
+ */
+#define FAN_ALL_PERM_EVENTS (FAN_OPEN_PERM |\
+ FAN_ACCESS_PERM)
+
+#define FAN_ALL_OUTGOING_EVENTS (FAN_ALL_EVENTS |\
+ FAN_ALL_PERM_EVENTS |\
+ FAN_Q_OVERFLOW)
+
+#define FANOTIFY_METADATA_VERSION 1
+
+struct fanotify_event_metadata {
+ __u32 event_len;
+ __u32 vers;
+ __s32 fd;
+ __u64 mask;
+ __s64 pid;
+} __attribute__ ((packed));
+
+struct fanotify_response {
+ __s32 fd;
+ __u32 response;
+} __attribute__ ((packed));
+
+/* Legit userspace responses to a _PERM event */
+#define FAN_ALLOW 0x01
+#define FAN_DENY 0x02
+
+/* Helper functions to deal with fanotify_event_metadata buffers */
+#define FAN_EVENT_METADATA_LEN (sizeof(struct fanotify_event_metadata))
+
+#define FAN_EVENT_NEXT(meta, len) ((len) -= (meta)->event_len, \
+ (struct fanotify_event_metadata*)(((char *)(meta)) + \
+ (meta)->event_len))
+
+#define FAN_EVENT_OK(meta, len) ((long)(len) >= (long)FAN_EVENT_METADATA_LEN && \
+ (long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \
+ (long)(meta)->event_len <= (long)(len))
+
+#ifdef __KERNEL__
+
+struct fanotify_wait {
+ struct fsnotify_event *event;
+ __s32 fd;
+};
+#endif /* __KERNEL__ */
+#endif /* _LINUX_FANOTIFY_H */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index a0e67150a729..4bd94bf5e739 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -65,12 +65,13 @@
#define CSR_DIRECTORY_ID 0x20
struct fw_csr_iterator {
- u32 *p;
- u32 *end;
+ const u32 *p;
+ const u32 *end;
};
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 *p);
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p);
int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value);
+int fw_csr_string(const u32 *directory, int key, char *buf, size_t size);
extern struct bus_type fw_bus_type;
@@ -162,7 +163,7 @@ struct fw_device {
struct mutex client_list_mutex;
struct list_head client_list;
- u32 *config_rom;
+ const u32 *config_rom;
size_t config_rom_length;
int config_rom_retries;
unsigned is_local:1;
@@ -204,7 +205,7 @@ int fw_device_enable_phys_dma(struct fw_device *device);
*/
struct fw_unit {
struct device device;
- u32 *directory;
+ const u32 *directory;
struct fw_attribute_group attribute_group;
};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 9147ca88f253..91457acec7de 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -87,6 +87,9 @@ struct inodes_stat_t {
*/
#define FMODE_NOCMTIME ((__force fmode_t)2048)
+/* File was opened by fanotify and shouldn't generate fanotify events */
+#define FMODE_NONOTIFY ((__force fmode_t)8388608)
+
/*
* The below are the various read and write types that we support. Some of
* them include behavioral modifiers that send information down to the
@@ -602,6 +605,8 @@ struct address_space_operations {
int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
unsigned long);
int (*error_remove_page)(struct address_space *, struct page *);
+ int (*commit_unstable_pages)(struct address_space *,
+ struct writeback_control *);
};
/*
@@ -764,12 +769,7 @@ struct inode {
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
- struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */
-#endif
-
-#ifdef CONFIG_INOTIFY
- struct list_head inotify_watches; /* watches on this inode */
- struct mutex inotify_mutex; /* protects the watches list */
+ struct hlist_head i_fsnotify_marks;
#endif
unsigned long i_state;
@@ -1635,6 +1635,8 @@ struct super_operations {
#define I_CLEAR 64
#define __I_SYNC 7
#define I_SYNC (1 << __I_SYNC)
+#define __I_UNSTABLE_PAGES 8
+#define I_UNSTABLE_PAGES (1 << __I_UNSTABLE_PAGES)
#define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES)
@@ -1649,6 +1651,11 @@ static inline void mark_inode_dirty_sync(struct inode *inode)
__mark_inode_dirty(inode, I_DIRTY_SYNC);
}
+static inline void mark_inode_unstable_pages(struct inode *inode)
+{
+ __mark_inode_dirty(inode, I_UNSTABLE_PAGES);
+}
+
/**
* inc_nlink - directly increment an inode's link count
* @inode: inode
@@ -2464,7 +2471,8 @@ int proc_nr_files(struct ctl_table *table, int write,
int __init get_filesystem_list(char *buf);
#define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE])
-#define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE))
+#define OPEN_FMODE(flag) ((__force fmode_t)(((flag + 1) & O_ACCMODE) | \
+ (flag & FMODE_NONOTIFY)))
#endif /* __KERNEL__ */
#endif /* _LINUX_FS_H */
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 936f9aa8bb97..8db68579e311 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -11,8 +11,6 @@
* (C) Copyright 2005 Robert Love
*/
-#include <linux/dnotify.h>
-#include <linux/inotify.h>
#include <linux/fsnotify_backend.h>
#include <linux/audit.h>
@@ -20,35 +18,51 @@
* fsnotify_d_instantiate - instantiate a dentry for inode
* Called with dcache_lock held.
*/
-static inline void fsnotify_d_instantiate(struct dentry *entry,
- struct inode *inode)
+static inline void fsnotify_d_instantiate(struct dentry *dentry,
+ struct inode *inode)
{
- __fsnotify_d_instantiate(entry, inode);
-
- inotify_d_instantiate(entry, inode);
+ __fsnotify_d_instantiate(dentry, inode);
}
/* Notify this dentry's parent about a child's events. */
-static inline void fsnotify_parent(struct dentry *dentry, __u32 mask)
+static inline void fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{
- __fsnotify_parent(dentry, mask);
+ if (!dentry)
+ dentry = path->dentry;
- inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
+ __fsnotify_parent(path, dentry, mask);
+}
+
+/* simple call site for access decisions */
+static inline int fsnotify_perm(struct file *file, int mask)
+{
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
+ __u32 fsnotify_mask;
+
+ if (file->f_mode & FMODE_NONOTIFY)
+ return 0;
+ if (!(mask & (MAY_READ | MAY_OPEN)))
+ return 0;
+ if (mask & MAY_READ)
+ fsnotify_mask = FS_ACCESS_PERM;
+ if (mask & MAY_OPEN)
+ fsnotify_mask = FS_OPEN_PERM;
+
+ return fsnotify(inode, fsnotify_mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
}
/*
- * fsnotify_d_move - entry has been moved
- * Called with dcache_lock and entry->d_lock held.
+ * fsnotify_d_move - dentry has been moved
+ * Called with dcache_lock and dentry->d_lock held.
*/
-static inline void fsnotify_d_move(struct dentry *entry)
+static inline void fsnotify_d_move(struct dentry *dentry)
{
/*
- * On move we need to update entry->d_flags to indicate if the new parent
- * cares about events from this entry.
+ * On move we need to update dentry->d_flags to indicate if the new parent
+ * cares about events from this dentry.
*/
- __fsnotify_update_dcache_flags(entry);
-
- inotify_d_move(entry);
+ __fsnotify_update_dcache_flags(dentry);
}
/*
@@ -56,8 +70,6 @@ static inline void fsnotify_d_move(struct dentry *entry)
*/
static inline void fsnotify_link_count(struct inode *inode)
{
- inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
-
fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
@@ -69,7 +81,6 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
int isdir, struct inode *target, struct dentry *moved)
{
struct inode *source = moved->d_inode;
- u32 in_cookie = inotify_get_cookie();
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
__u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
@@ -78,31 +89,19 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
old_dir_mask |= FS_DN_RENAME;
if (isdir) {
- isdir = IN_ISDIR;
old_dir_mask |= FS_IN_ISDIR;
new_dir_mask |= FS_IN_ISDIR;
}
- inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name,
- source);
- inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name,
- source);
-
fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
- if (target) {
- inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL);
- inotify_inode_is_dead(target);
-
- /* this is really a link_count change not a removal */
+ if (target)
fsnotify_link_count(target);
- }
- if (source) {
- inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
+ if (source)
fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
- }
+
audit_inode_child(new_name, moved, new_dir);
}
@@ -115,6 +114,14 @@ static inline void fsnotify_inode_delete(struct inode *inode)
}
/*
+ * fsnotify_vfsmount_delete - a vfsmount is being destroyed, clean up is needed
+ */
+static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt)
+{
+ __fsnotify_vfsmount_delete(mnt);
+}
+
+/*
* fsnotify_nameremove - a filename was removed from a directory
*/
static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
@@ -124,7 +131,7 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
if (isdir)
mask |= FS_IN_ISDIR;
- fsnotify_parent(dentry, mask);
+ fsnotify_parent(NULL, dentry, mask);
}
/*
@@ -132,9 +139,6 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
*/
static inline void fsnotify_inoderemove(struct inode *inode)
{
- inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL);
- inotify_inode_is_dead(inode);
-
fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
__fsnotify_inode_delete(inode);
}
@@ -144,8 +148,6 @@ static inline void fsnotify_inoderemove(struct inode *inode)
*/
static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
{
- inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
- dentry->d_inode);
audit_inode_child(dentry->d_name.name, dentry, inode);
fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
@@ -158,8 +160,6 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
{
- inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name,
- inode);
fsnotify_link_count(inode);
audit_inode_child(new_dentry->d_name.name, new_dentry, dir);
@@ -174,7 +174,6 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
__u32 mask = (FS_CREATE | FS_IN_ISDIR);
struct inode *d_inode = dentry->d_inode;
- inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode);
audit_inode_child(dentry->d_name.name, dentry, inode);
fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
@@ -183,52 +182,55 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
/*
* fsnotify_access - file was read
*/
-static inline void fsnotify_access(struct dentry *dentry)
+static inline void fsnotify_access(struct file *file)
{
- struct inode *inode = dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
-
- fsnotify_parent(dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ if (!(file->f_mode & FMODE_NONOTIFY)) {
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ }
}
/*
* fsnotify_modify - file was modified
*/
-static inline void fsnotify_modify(struct dentry *dentry)
+static inline void fsnotify_modify(struct file *file)
{
- struct inode *inode = dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
-
- fsnotify_parent(dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ if (!(file->f_mode & FMODE_NONOTIFY)) {
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ }
}
/*
* fsnotify_open - file was opened
*/
-static inline void fsnotify_open(struct dentry *dentry)
+static inline void fsnotify_open(struct file *file)
{
- struct inode *inode = dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = path->dentry->d_inode;
__u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
-
- fsnotify_parent(dentry, mask);
- fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ if (!(file->f_mode & FMODE_NONOTIFY)) {
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ }
}
/*
@@ -236,18 +238,18 @@ static inline void fsnotify_open(struct dentry *dentry)
*/
static inline void fsnotify_close(struct file *file)
{
- struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct path *path = &file->f_path;
+ struct inode *inode = file->f_path.dentry->d_inode;
fmode_t mode = file->f_mode;
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
-
- fsnotify_parent(dentry, mask);
- fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
+ if (!(file->f_mode & FMODE_NONOTIFY)) {
+ fsnotify_parent(path, NULL, mask);
+ fsnotify(inode, mask, path, FSNOTIFY_EVENT_PATH, NULL, 0);
+ }
}
/*
@@ -261,9 +263,7 @@ static inline void fsnotify_xattr(struct dentry *dentry)
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
-
- fsnotify_parent(dentry, mask);
+ fsnotify_parent(NULL, dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
@@ -297,14 +297,13 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
if (mask) {
if (S_ISDIR(inode->i_mode))
mask |= FS_IN_ISDIR;
- inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
- fsnotify_parent(dentry, mask);
+ fsnotify_parent(NULL, dentry, mask);
fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
}
-#if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */
+#if defined(CONFIG_FSNOTIFY) /* notify helpers */
/*
* fsnotify_oldname_init - save off the old filename before we change it
@@ -322,7 +321,7 @@ static inline void fsnotify_oldname_free(const char *old_name)
kfree(old_name);
}
-#else /* CONFIG_INOTIFY || CONFIG_FSNOTIFY */
+#else /* CONFIG_FSNOTIFY */
static inline const char *fsnotify_oldname_init(const char *name)
{
@@ -333,6 +332,6 @@ static inline void fsnotify_oldname_free(const char *old_name)
{
}
-#endif /* ! CONFIG_INOTIFY */
+#endif /* CONFIG_FSNOTIFY */
#endif /* _LINUX_FS_NOTIFY_H */
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 4d6f47b51189..e5325a8eeeb4 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -41,6 +41,9 @@
#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
+#define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */
+#define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */
+
#define FS_IN_ISDIR 0x40000000 /* event occurred against dir */
#define FS_IN_ONESHOT 0x80000000 /* only send event once */
@@ -58,13 +61,11 @@
FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
FS_DELETE)
-/* listeners that hard code group numbers near the top */
-#define DNOTIFY_GROUP_NUM UINT_MAX
-#define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
+#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
struct fsnotify_group;
struct fsnotify_event;
-struct fsnotify_mark_entry;
+struct fsnotify_mark;
struct fsnotify_event_private_data;
/*
@@ -80,10 +81,12 @@ struct fsnotify_event_private_data;
* valid group and inode to use to clean up.
*/
struct fsnotify_ops {
- bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask);
+ bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_type);
int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event);
void (*free_group_priv)(struct fsnotify_group *group);
- void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
+ void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
void (*free_event_priv)(struct fsnotify_event_private_data *priv);
};
@@ -96,10 +99,14 @@ struct fsnotify_ops {
struct fsnotify_group {
/*
* global list of all groups receiving events from fsnotify.
- * anchored by fsnotify_groups and protected by either fsnotify_grp_mutex
+ * anchored by fsnotify_inode_groups and protected by either fsnotify_grp_mutex
* or fsnotify_grp_srcu depending on write vs read.
*/
- struct list_head group_list;
+ struct list_head inode_group_list;
+ /*
+ * same as above except anchored by fsnotify_vfsmount_groups
+ */
+ struct list_head vfsmount_group_list;
/*
* Defines all of the event types in which this group is interested.
@@ -119,7 +126,6 @@ struct fsnotify_group {
* closed.
*/
atomic_t refcnt; /* things with interest in this group */
- unsigned int group_num; /* simply prevents accidental group collision */
const struct fsnotify_ops *ops; /* how this group handles things */
@@ -130,15 +136,17 @@ struct fsnotify_group {
unsigned int q_len; /* events on the queue */
unsigned int max_events; /* maximum events allowed on the list */
- /* stores all fastapth entries assoc with this group so they can be cleaned on unregister */
- spinlock_t mark_lock; /* protect mark_entries list */
- atomic_t num_marks; /* 1 for each mark entry and 1 for not being
+ /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */
+ spinlock_t mark_lock; /* protect marks_list */
+ atomic_t num_marks; /* 1 for each mark and 1 for not being
* past the point of no return when freeing
* a group */
- struct list_head mark_entries; /* all inode mark entries for this group */
+ struct list_head marks_list; /* all inode marks for this group */
+ unsigned int priority; /* order of this group compared to others */
/* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */
- bool on_group_list;
+ bool on_inode_group_list;
+ bool on_vfsmount_group_list;
/* groups can define private fields here or use the void *private */
union {
@@ -152,6 +160,14 @@ struct fsnotify_group {
struct user_struct *user;
} inotify_data;
#endif
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ struct fanotify_group_private_data {
+ /* allows a group to block waiting for a userspace response */
+ struct mutex access_mutex;
+ struct list_head access_list;
+ wait_queue_head_t access_waitq;
+ } fanotify_data;
+#endif
};
};
@@ -210,7 +226,6 @@ struct fsnotify_event {
#define FSNOTIFY_EVENT_NONE 0
#define FSNOTIFY_EVENT_PATH 1
#define FSNOTIFY_EVENT_INODE 2
-#define FSNOTIFY_EVENT_FILE 3
int data_type; /* which of the above union we have */
atomic_t refcnt; /* how many groups still are using/need to send this event */
__u32 mask; /* the type of access, bitwise OR for FS_* event types */
@@ -218,12 +233,35 @@ struct fsnotify_event {
u32 sync_cookie; /* used to corrolate events, namely inotify mv events */
char *file_name;
size_t name_len;
+ struct pid *tgid;
+
+#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ __u32 response; /* userspace answer to question */
+#endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */
struct list_head private_data_list; /* groups can store private data here */
};
/*
- * a mark is simply an entry attached to an in core inode which allows an
+ * Inode specific fields in an fsnotify_mark
+ */
+struct fsnotify_inode_mark {
+ struct inode *inode; /* inode this mark is associated with */
+ struct hlist_node i_list; /* list of marks by inode->i_fsnotify_marks */
+ struct list_head free_i_list; /* tmp list used when freeing this mark */
+};
+
+/*
+ * Mount point specific fields in an fsnotify_mark
+ */
+struct fsnotify_vfsmount_mark {
+ struct vfsmount *mnt; /* vfsmount this mark is associated with */
+ struct hlist_node m_list; /* list of marks by inode->i_fsnotify_marks */
+ struct list_head free_m_list; /* tmp list used when freeing this mark */
+};
+
+/*
+ * a mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
*
@@ -232,19 +270,26 @@ struct fsnotify_event {
* (such as dnotify) will flush these when the open fd is closed and not at
* inode eviction or modification.
*/
-struct fsnotify_mark_entry {
- __u32 mask; /* mask this mark entry is for */
+struct fsnotify_mark {
+ __u32 mask; /* mask this mark is for */
/* we hold ref for each i_list and g_list. also one ref for each 'thing'
* in kernel that found and may be using this mark. */
atomic_t refcnt; /* active things looking at this mark */
- struct inode *inode; /* inode this entry is associated with */
- struct fsnotify_group *group; /* group this mark entry is for */
- struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */
- struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */
- spinlock_t lock; /* protect group, inode, and killme */
- struct list_head free_i_list; /* tmp list used when freeing this mark */
+ struct fsnotify_group *group; /* group this mark is for */
+ struct list_head g_list; /* list of marks by group->i_fsnotify_marks */
+ spinlock_t lock; /* protect group and inode */
+ union {
+ struct fsnotify_inode_mark i;
+ struct fsnotify_vfsmount_mark m;
+ };
+ __u32 ignored_mask; /* events types to ignore */
struct list_head free_g_list; /* tmp list used when freeing this mark */
- void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */
+#define FSNOTIFY_MARK_FLAG_INODE 0x01
+#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
+#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
+#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
+ unsigned int flags; /* vfsmount or inode mark? */
+ void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
};
#ifdef CONFIG_FSNOTIFY
@@ -252,10 +297,11 @@ struct fsnotify_mark_entry {
/* called from the vfs helpers */
/* main fsnotify call to send events */
-extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
const char *name, u32 cookie);
-extern void __fsnotify_parent(struct dentry *dentry, __u32 mask);
+extern void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask);
extern void __fsnotify_inode_delete(struct inode *inode);
+extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt);
extern u32 fsnotify_get_cookie(void);
static inline int fsnotify_inode_watches_children(struct inode *inode)
@@ -307,12 +353,10 @@ static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode
/* must call when a group changes its ->mask */
extern void fsnotify_recalc_global_mask(void);
/* get a reference to an existing or create a new group */
-extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num,
- __u32 mask,
- const struct fsnotify_ops *ops);
+extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops);
/* run all marks associated with this group and update group->mask */
extern void fsnotify_recalc_group_mask(struct fsnotify_group *group);
-/* drop reference on a group from fsnotify_obtain_group */
+/* drop reference on a group from fsnotify_alloc_group */
extern void fsnotify_put_group(struct fsnotify_group *group);
/* take a reference to an event */
@@ -323,8 +367,13 @@ extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struc
struct fsnotify_event *event);
/* attach the event to the group notification queue */
-extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
- struct fsnotify_event_private_data *priv);
+extern int fsnotify_add_notify_event(struct fsnotify_group *group,
+ struct fsnotify_event *event,
+ struct fsnotify_event_private_data *priv,
+ int (*merge)(struct list_head *,
+ struct fsnotify_event *,
+ void **),
+ void **arg);
/* true if the group notification queue is empty */
extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
/* return, but do not dequeue the first event on the notification queue */
@@ -334,19 +383,36 @@ extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group
/* functions used to manipulate the marks attached to inodes */
+/* run all marks associated with a vfsmount and update mnt->mnt_fsnotify_mask */
+extern void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt);
/* run all marks associated with an inode and update inode->i_fsnotify_mask */
extern void fsnotify_recalc_inode_mask(struct inode *inode);
-extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry));
+extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(struct fsnotify_mark *mark));
/* find (and take a reference) to a mark associated with group and inode */
-extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode);
+extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
+/* find (and take a reference) to a mark associated with group and vfsmount */
+extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
+/* copy the values from old into new */
+extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old);
+/* set the ignored_mask of a mark */
+extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
+/* set the mask of a mark (might pin the object into memory */
+extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask);
/* attach the mark to both the group and the inode */
-extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode);
+extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
+ struct inode *inode, struct vfsmount *mnt, int allow_dups);
/* given a mark, flag it to be freed when all references are dropped */
-extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry);
+extern void fsnotify_destroy_mark(struct fsnotify_mark *mark);
+/* run all the marks in a group, and clear all of the vfsmount marks */
+extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
+/* run all the marks in a group, and clear all of the inode marks */
+extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group);
+/* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/
+extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags);
/* run all the marks in a group, and flag them to be freed */
extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
-extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry);
-extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry);
+extern void fsnotify_get_mark(struct fsnotify_mark *mark);
+extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct list_head *list);
/* put here because inotify does some weird stuff when destroying watches */
@@ -354,18 +420,28 @@ extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32
void *data, int data_is, const char *name,
u32 cookie, gfp_t gfp);
+/* fanotify likes to change events after they are on lists... */
+extern struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event);
+extern int fsnotify_replace_event(struct fsnotify_event_holder *old_holder,
+ struct fsnotify_event *new_event);
+
#else
-static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
- const char *name, u32 cookie)
-{}
+static inline int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ const char *name, u32 cookie)
+{
+ return 0;
+}
-static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask)
+static inline void __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
{}
static inline void __fsnotify_inode_delete(struct inode *inode)
{}
+static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt)
+{}
+
static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
{}
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 2233c98d80df..0a09e758c7d3 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -188,7 +188,7 @@ do { \
__trace_printk(ip, fmt, ##args); \
} while (0)
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
struct perf_event;
extern int ftrace_profile_enable(int event_id);
extern void ftrace_profile_disable(int event_id);
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 1bc08541c2b9..48e68da097f6 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -46,7 +46,6 @@ struct gameport {
struct mutex drv_mutex; /* protects serio->drv so attributes can pin driver */
struct device dev;
- unsigned int registered; /* port has been fully registered with driver core */
struct list_head node;
};
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index c6c0c41af35f..9717081c75ad 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -256,9 +256,9 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
#define part_stat_read(part, field) \
({ \
typeof((part)->dkstats->field) res = 0; \
- int i; \
- for_each_possible_cpu(i) \
- res += per_cpu_ptr((part)->dkstats, i)->field; \
+ unsigned int _cpu; \
+ for_each_possible_cpu(_cpu) \
+ res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
res; \
})
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 557bdad320b6..f53e9b868c26 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -220,7 +220,7 @@ static inline enum zone_type gfp_zone(gfp_t flags)
((1 << ZONES_SHIFT) - 1);
if (__builtin_constant_p(bit))
- MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+ BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
else {
#ifdef CONFIG_DEBUG_VM
BUG_ON((GFP_ZONE_BAD >> bit) & 1);
diff --git a/include/linux/gfs2_ondisk.h b/include/linux/gfs2_ondisk.h
index 81f90a59cda6..4f4462974c14 100644
--- a/include/linux/gfs2_ondisk.h
+++ b/include/linux/gfs2_ondisk.h
@@ -180,33 +180,6 @@ struct gfs2_rgrp {
};
/*
- * quota linked list: user quotas and group quotas form two separate
- * singly linked lists. ll_next stores uids or gids of next quotas in the
- * linked list.
-
-Given the uid/gid, how to calculate the quota file offsets for the corresponding
-gfs2_quota structures on disk:
-
-for user quotas, given uid,
-offset = uid * sizeof(struct gfs2_quota);
-
-for group quotas, given gid,
-offset = (gid * sizeof(struct gfs2_quota)) + sizeof(struct gfs2_quota);
-
-
- uid:0 gid:0 uid:12 gid:12 uid:17 gid:17 uid:5142 gid:5142
-+-------+-------+ +-------+-------+ +-------+- - - -+ +- - - -+-------+
-| valid | valid | :: | valid | valid | :: | valid | inval | :: | inval | valid |
-+-------+-------+ +-------+-------+ +-------+- - - -+ +- - - -+-------+
-next:12 next:12 next:17 next:5142 next:NULL next:NULL
- | | | | |<-- user quota list |
- \______|___________/ \______|___________/ group quota list -->|
- | | |
- \__________________/ \_______________________________________/
-
-*/
-
-/*
* quota structure
*/
@@ -214,8 +187,7 @@ struct gfs2_quota {
__be64 qu_limit;
__be64 qu_warn;
__be64 qu_value;
- __be32 qu_ll_next; /* location of next quota in list */
- __u8 qu_reserved[60];
+ __u8 qu_reserved[64];
};
/*
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 211ff4497269..ab2cc20e21a5 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -46,7 +46,7 @@ void kmap_flush_unused(void);
static inline unsigned int nr_free_highpages(void) { return 0; }
-#define totalhigh_pages 0
+#define totalhigh_pages 0UL
#ifndef ARCH_HAS_KMAP
static inline void *kmap(struct page *page)
diff --git a/include/linux/i2c-pnx.h b/include/linux/i2c-pnx.h
index 9eb07bbc6522..a87124d4d533 100644
--- a/include/linux/i2c-pnx.h
+++ b/include/linux/i2c-pnx.h
@@ -12,9 +12,8 @@
#ifndef __I2C_PNX_H__
#define __I2C_PNX_H__
-#include <linux/pm.h>
-
struct platform_device;
+struct clk;
struct i2c_pnx_mif {
int ret; /* Return value */
@@ -26,20 +25,18 @@ struct i2c_pnx_mif {
};
struct i2c_pnx_algo_data {
- u32 base;
- u32 ioaddr;
- int irq;
+ void __iomem *ioaddr;
struct i2c_pnx_mif mif;
int last;
+ struct clk *clk;
+ struct i2c_pnx_data *i2c_pnx;
+ struct i2c_adapter adapter;
};
struct i2c_pnx_data {
- int (*suspend) (struct platform_device *pdev, pm_message_t state);
- int (*resume) (struct platform_device *pdev);
- u32 (*calculate_input_freq) (struct platform_device *pdev);
- int (*set_clock_run) (struct platform_device *pdev);
- int (*set_clock_stop) (struct platform_device *pdev);
- struct i2c_adapter *adapter;
+ const char *name;
+ u32 base;
+ int irq;
};
#endif /* __I2C_PNX_H__ */
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
index fc5db826b48e..02c9af374741 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/i2c/adp5588.h
@@ -89,4 +89,16 @@ struct adp5588_kpad_platform_data {
unsigned short unlock_key2; /* Unlock Key 2 */
};
+struct adp5588_gpio_platform_data {
+ unsigned gpio_start; /* GPIO Chip base # */
+ unsigned pullup_dis_mask; /* Pull-Up Disable Mask */
+ int (*setup)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ int (*teardown)(struct i2c_client *client,
+ int gpio, unsigned ngpio,
+ void *context);
+ void *context;
+};
+
#endif
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 163c840437d6..842701906ae9 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -120,6 +120,24 @@
#define IEEE80211_QOS_CTL_TID_MASK 0x000F
#define IEEE80211_QOS_CTL_TAG1D_MASK 0x0007
+/* U-APSD queue for WMM IEs sent by AP */
+#define IEEE80211_WMM_IE_AP_QOSINFO_UAPSD (1<<7)
+
+/* U-APSD queues for WMM IEs sent by STA */
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VO (1<<0)
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_VI (1<<1)
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BK (1<<2)
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_BE (1<<3)
+#define IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK 0x0f
+
+/* U-APSD max SP length for WMM IEs sent by STA */
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL 0x00
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_2 0x01
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_4 0x02
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_6 0x03
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK 0x03
+#define IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT 5
+
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
@@ -130,6 +148,25 @@ struct ieee80211_hdr {
u8 addr4[6];
} __attribute__ ((packed));
+struct ieee80211_hdr_3addr {
+ __le16 frame_control;
+ __le16 duration_id;
+ u8 addr1[6];
+ u8 addr2[6];
+ u8 addr3[6];
+ __le16 seq_ctrl;
+} __attribute__ ((packed));
+
+struct ieee80211_qos_hdr {
+ __le16 frame_control;
+ __le16 duration_id;
+ u8 addr1[6];
+ u8 addr2[6];
+ u8 addr3[6];
+ __le16 seq_ctrl;
+ __le16 qos_ctrl;
+} __attribute__ ((packed));
+
/**
* ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
* @fc: frame control bytes in little-endian byteorder
@@ -707,6 +744,10 @@ struct ieee80211_mgmt {
u8 action;
u8 trans_id[WLAN_SA_QUERY_TR_ID_LEN];
} __attribute__ ((packed)) sa_query;
+ struct {
+ u8 action;
+ u8 smps_control;
+ } __attribute__ ((packed)) ht_smps;
} u;
} __attribute__ ((packed)) action;
} u;
@@ -771,7 +812,10 @@ struct ieee80211_bar {
/**
* struct ieee80211_mcs_info - MCS information
* @rx_mask: RX mask
- * @rx_highest: highest supported RX rate
+ * @rx_highest: highest supported RX rate. If set represents
+ * the highest supported RX data rate in units of 1 Mbps.
+ * If this field is 0 this value should not be used to
+ * consider the highest RX data rate supported.
* @tx_params: TX parameters
*/
struct ieee80211_mcs_info {
@@ -824,6 +868,7 @@ struct ieee80211_ht_cap {
#define IEEE80211_HT_CAP_LDPC_CODING 0x0001
#define IEEE80211_HT_CAP_SUP_WIDTH_20_40 0x0002
#define IEEE80211_HT_CAP_SM_PS 0x000C
+#define IEEE80211_HT_CAP_SM_PS_SHIFT 2
#define IEEE80211_HT_CAP_GRN_FLD 0x0010
#define IEEE80211_HT_CAP_SGI_20 0x0020
#define IEEE80211_HT_CAP_SGI_40 0x0040
@@ -839,6 +884,7 @@ struct ieee80211_ht_cap {
/* 802.11n HT capability AMPDU settings (for ampdu_params_info) */
#define IEEE80211_HT_AMPDU_PARM_FACTOR 0x03
#define IEEE80211_HT_AMPDU_PARM_DENSITY 0x1C
+#define IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT 2
/*
* Maximum length of AMPDU that the STA can receive.
@@ -922,12 +968,17 @@ struct ieee80211_ht_info {
#define IEEE80211_MAX_AMPDU_BUF 0x40
-/* Spatial Multiplexing Power Save Modes */
+/* Spatial Multiplexing Power Save Modes (for capability) */
#define WLAN_HT_CAP_SM_PS_STATIC 0
#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
#define WLAN_HT_CAP_SM_PS_INVALID 2
#define WLAN_HT_CAP_SM_PS_DISABLED 3
+/* for SM power control field lower two bits */
+#define WLAN_HT_SMPS_CONTROL_DISABLED 0
+#define WLAN_HT_SMPS_CONTROL_STATIC 1
+#define WLAN_HT_SMPS_CONTROL_DYNAMIC 3
+
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
@@ -1071,12 +1122,12 @@ enum ieee80211_eid {
WLAN_EID_TIM = 5,
WLAN_EID_IBSS_PARAMS = 6,
WLAN_EID_CHALLENGE = 16,
- /* 802.11d */
+
WLAN_EID_COUNTRY = 7,
WLAN_EID_HP_PARAMS = 8,
WLAN_EID_HP_TABLE = 9,
WLAN_EID_REQUEST = 10,
- /* 802.11e */
+
WLAN_EID_QBSS_LOAD = 11,
WLAN_EID_EDCA_PARAM_SET = 12,
WLAN_EID_TSPEC = 13,
@@ -1099,7 +1150,7 @@ enum ieee80211_eid {
WLAN_EID_PREP = 69,
WLAN_EID_PERR = 70,
WLAN_EID_RANN = 49, /* compatible with FreeBSD */
- /* 802.11h */
+
WLAN_EID_PWR_CONSTRAINT = 32,
WLAN_EID_PWR_CAPABILITY = 33,
WLAN_EID_TPC_REQUEST = 34,
@@ -1110,20 +1161,41 @@ enum ieee80211_eid {
WLAN_EID_MEASURE_REPORT = 39,
WLAN_EID_QUIET = 40,
WLAN_EID_IBSS_DFS = 41,
- /* 802.11g */
+
WLAN_EID_ERP_INFO = 42,
WLAN_EID_EXT_SUPP_RATES = 50,
- /* 802.11n */
+
WLAN_EID_HT_CAPABILITY = 45,
WLAN_EID_HT_INFORMATION = 61,
- /* 802.11i */
+
WLAN_EID_RSN = 48,
- WLAN_EID_TIMEOUT_INTERVAL = 56,
- WLAN_EID_MMIE = 76 /* 802.11w */,
+ WLAN_EID_MMIE = 76,
WLAN_EID_WPA = 221,
WLAN_EID_GENERIC = 221,
WLAN_EID_VENDOR_SPECIFIC = 221,
- WLAN_EID_QOS_PARAMETER = 222
+ WLAN_EID_QOS_PARAMETER = 222,
+
+ WLAN_EID_AP_CHAN_REPORT = 51,
+ WLAN_EID_NEIGHBOR_REPORT = 52,
+ WLAN_EID_RCPI = 53,
+ WLAN_EID_BSS_AVG_ACCESS_DELAY = 63,
+ WLAN_EID_ANTENNA_INFO = 64,
+ WLAN_EID_RSNI = 65,
+ WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66,
+ WLAN_EID_BSS_AVAILABLE_CAPACITY = 67,
+ WLAN_EID_BSS_AC_ACCESS_DELAY = 68,
+ WLAN_EID_RRM_ENABLED_CAPABILITIES = 70,
+ WLAN_EID_MULTIPLE_BSSID = 71,
+
+ WLAN_EID_MOBILITY_DOMAIN = 54,
+ WLAN_EID_FAST_BSS_TRANSITION = 55,
+ WLAN_EID_TIMEOUT_INTERVAL = 56,
+ WLAN_EID_RIC_DATA = 57,
+ WLAN_EID_RIC_DESCRIPTOR = 75,
+
+ WLAN_EID_DSE_REGISTERED_LOCATION = 58,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59,
+ WLAN_EID_EXT_CHANSWITCH_ANN = 60,
};
/* Action category code */
@@ -1150,6 +1222,18 @@ enum ieee80211_spectrum_mgmt_actioncode {
WLAN_ACTION_SPCT_CHL_SWITCH = 4,
};
+/* HT action codes */
+enum ieee80211_ht_actioncode {
+ WLAN_HT_ACTION_NOTIFY_CHANWIDTH = 0,
+ WLAN_HT_ACTION_SMPS = 1,
+ WLAN_HT_ACTION_PSMP = 2,
+ WLAN_HT_ACTION_PCO_PHASE = 3,
+ WLAN_HT_ACTION_CSI = 4,
+ WLAN_HT_ACTION_NONCOMPRESSED_BF = 5,
+ WLAN_HT_ACTION_COMPRESSED_BF = 6,
+ WLAN_HT_ACTION_ASEL_IDX_FEEDBACK = 7,
+};
+
/* Security key length */
enum ieee80211_key_len {
WLAN_KEY_LEN_WEP40 = 5,
diff --git a/include/linux/in.h b/include/linux/in.h
index b615649db129..583c76f9c30f 100644
--- a/include/linux/in.h
+++ b/include/linux/in.h
@@ -84,6 +84,8 @@ struct in_addr {
#define IP_ORIGDSTADDR 20
#define IP_RECVORIGDSTADDR IP_ORIGDSTADDR
+#define IP_MINTTL 21
+
/* IP_MTU_DISCOVER values */
#define IP_PMTUDISC_DONT 0 /* Never send DF frames */
#define IP_PMTUDISC_WANT 1 /* Use per route hints */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index b2304929434e..cf257809771b 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -89,6 +89,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS)
#define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP)
+#define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN)
#define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA)
#define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS)
#define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 37ea2894b3c0..959a38b8f75d 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -69,178 +69,4 @@ struct inotify_event {
#define IN_CLOEXEC O_CLOEXEC
#define IN_NONBLOCK O_NONBLOCK
-#ifdef __KERNEL__
-
-#include <linux/dcache.h>
-#include <linux/fs.h>
-
-/*
- * struct inotify_watch - represents a watch request on a specific inode
- *
- * h_list is protected by ih->mutex of the associated inotify_handle.
- * i_list, mask are protected by inode->inotify_mutex of the associated inode.
- * ih, inode, and wd are never written to once the watch is created.
- *
- * Callers must use the established inotify interfaces to access inotify_watch
- * contents. The content of this structure is private to the inotify
- * implementation.
- */
-struct inotify_watch {
- struct list_head h_list; /* entry in inotify_handle's list */
- struct list_head i_list; /* entry in inode's list */
- atomic_t count; /* reference count */
- struct inotify_handle *ih; /* associated inotify handle */
- struct inode *inode; /* associated inode */
- __s32 wd; /* watch descriptor */
- __u32 mask; /* event mask for this watch */
-};
-
-struct inotify_operations {
- void (*handle_event)(struct inotify_watch *, u32, u32, u32,
- const char *, struct inode *);
- void (*destroy_watch)(struct inotify_watch *);
-};
-
-#ifdef CONFIG_INOTIFY
-
-/* Kernel API for producing events */
-
-extern void inotify_d_instantiate(struct dentry *, struct inode *);
-extern void inotify_d_move(struct dentry *);
-extern void inotify_inode_queue_event(struct inode *, __u32, __u32,
- const char *, struct inode *);
-extern void inotify_dentry_parent_queue_event(struct dentry *, __u32, __u32,
- const char *);
-extern void inotify_unmount_inodes(struct list_head *);
-extern void inotify_inode_is_dead(struct inode *);
-extern u32 inotify_get_cookie(void);
-
-/* Kernel Consumer API */
-
-extern struct inotify_handle *inotify_init(const struct inotify_operations *);
-extern void inotify_init_watch(struct inotify_watch *);
-extern void inotify_destroy(struct inotify_handle *);
-extern __s32 inotify_find_watch(struct inotify_handle *, struct inode *,
- struct inotify_watch **);
-extern __s32 inotify_find_update_watch(struct inotify_handle *, struct inode *,
- u32);
-extern __s32 inotify_add_watch(struct inotify_handle *, struct inotify_watch *,
- struct inode *, __u32);
-extern __s32 inotify_clone_watch(struct inotify_watch *, struct inotify_watch *);
-extern void inotify_evict_watch(struct inotify_watch *);
-extern int inotify_rm_watch(struct inotify_handle *, struct inotify_watch *);
-extern int inotify_rm_wd(struct inotify_handle *, __u32);
-extern void inotify_remove_watch_locked(struct inotify_handle *,
- struct inotify_watch *);
-extern void get_inotify_watch(struct inotify_watch *);
-extern void put_inotify_watch(struct inotify_watch *);
-extern int pin_inotify_watch(struct inotify_watch *);
-extern void unpin_inotify_watch(struct inotify_watch *);
-
-#else
-
-static inline void inotify_d_instantiate(struct dentry *dentry,
- struct inode *inode)
-{
-}
-
-static inline void inotify_d_move(struct dentry *dentry)
-{
-}
-
-static inline void inotify_inode_queue_event(struct inode *inode,
- __u32 mask, __u32 cookie,
- const char *filename,
- struct inode *n_inode)
-{
-}
-
-static inline void inotify_dentry_parent_queue_event(struct dentry *dentry,
- __u32 mask, __u32 cookie,
- const char *filename)
-{
-}
-
-static inline void inotify_unmount_inodes(struct list_head *list)
-{
-}
-
-static inline void inotify_inode_is_dead(struct inode *inode)
-{
-}
-
-static inline u32 inotify_get_cookie(void)
-{
- return 0;
-}
-
-static inline struct inotify_handle *inotify_init(const struct inotify_operations *ops)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void inotify_init_watch(struct inotify_watch *watch)
-{
-}
-
-static inline void inotify_destroy(struct inotify_handle *ih)
-{
-}
-
-static inline __s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
- struct inotify_watch **watchp)
-{
- return -EOPNOTSUPP;
-}
-
-static inline __s32 inotify_find_update_watch(struct inotify_handle *ih,
- struct inode *inode, u32 mask)
-{
- return -EOPNOTSUPP;
-}
-
-static inline __s32 inotify_add_watch(struct inotify_handle *ih,
- struct inotify_watch *watch,
- struct inode *inode, __u32 mask)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int inotify_rm_watch(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
- return -EOPNOTSUPP;
-}
-
-static inline int inotify_rm_wd(struct inotify_handle *ih, __u32 wd)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void inotify_remove_watch_locked(struct inotify_handle *ih,
- struct inotify_watch *watch)
-{
-}
-
-static inline void get_inotify_watch(struct inotify_watch *watch)
-{
-}
-
-static inline void put_inotify_watch(struct inotify_watch *watch)
-{
-}
-
-extern inline int pin_inotify_watch(struct inotify_watch *watch)
-{
- return 0;
-}
-
-extern inline void unpin_inotify_watch(struct inotify_watch *watch)
-{
-}
-
-#endif /* CONFIG_INOTIFY */
-
-#endif /* __KERNEL __ */
-
#endif /* _LINUX_INOTIFY_H */
diff --git a/include/linux/input.h b/include/linux/input.h
index 7be8a6537b57..26e1d6ddcadd 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -597,6 +597,48 @@ struct input_absinfo {
#define KEY_CAMERA_FOCUS 0x210
+#define BTN_TRIGGER_HAPPY 0x2c0
+#define BTN_TRIGGER_HAPPY1 0x2c0
+#define BTN_TRIGGER_HAPPY2 0x2c1
+#define BTN_TRIGGER_HAPPY3 0x2c2
+#define BTN_TRIGGER_HAPPY4 0x2c3
+#define BTN_TRIGGER_HAPPY5 0x2c4
+#define BTN_TRIGGER_HAPPY6 0x2c5
+#define BTN_TRIGGER_HAPPY7 0x2c6
+#define BTN_TRIGGER_HAPPY8 0x2c7
+#define BTN_TRIGGER_HAPPY9 0x2c8
+#define BTN_TRIGGER_HAPPY10 0x2c9
+#define BTN_TRIGGER_HAPPY11 0x2ca
+#define BTN_TRIGGER_HAPPY12 0x2cb
+#define BTN_TRIGGER_HAPPY13 0x2cc
+#define BTN_TRIGGER_HAPPY14 0x2cd
+#define BTN_TRIGGER_HAPPY15 0x2ce
+#define BTN_TRIGGER_HAPPY16 0x2cf
+#define BTN_TRIGGER_HAPPY17 0x2d0
+#define BTN_TRIGGER_HAPPY18 0x2d1
+#define BTN_TRIGGER_HAPPY19 0x2d2
+#define BTN_TRIGGER_HAPPY20 0x2d3
+#define BTN_TRIGGER_HAPPY21 0x2d4
+#define BTN_TRIGGER_HAPPY22 0x2d5
+#define BTN_TRIGGER_HAPPY23 0x2d6
+#define BTN_TRIGGER_HAPPY24 0x2d7
+#define BTN_TRIGGER_HAPPY25 0x2d8
+#define BTN_TRIGGER_HAPPY26 0x2d9
+#define BTN_TRIGGER_HAPPY27 0x2da
+#define BTN_TRIGGER_HAPPY28 0x2db
+#define BTN_TRIGGER_HAPPY29 0x2dc
+#define BTN_TRIGGER_HAPPY30 0x2dd
+#define BTN_TRIGGER_HAPPY31 0x2de
+#define BTN_TRIGGER_HAPPY32 0x2df
+#define BTN_TRIGGER_HAPPY33 0x2e0
+#define BTN_TRIGGER_HAPPY34 0x2e1
+#define BTN_TRIGGER_HAPPY35 0x2e2
+#define BTN_TRIGGER_HAPPY36 0x2e3
+#define BTN_TRIGGER_HAPPY37 0x2e4
+#define BTN_TRIGGER_HAPPY38 0x2e5
+#define BTN_TRIGGER_HAPPY39 0x2e6
+#define BTN_TRIGGER_HAPPY40 0x2e7
+
/* We avoid low common keys in module aliases so they don't get huge. */
#define KEY_MIN_INTERESTING KEY_MUTE
#define KEY_MAX 0x2ff
@@ -1230,6 +1272,9 @@ struct input_handler {
int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id);
void (*disconnect)(struct input_handle *handle);
void (*start)(struct input_handle *handle);
+#ifdef CONFIG_KGDB_KDB
+ void (*dbg_clear_keys)(void);
+#endif
const struct file_operations *fops;
int minor;
@@ -1315,6 +1360,13 @@ int input_flush_device(struct input_handle* handle, struct file* file);
void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value);
+#ifdef CONFIG_KGDB_KDB
+void input_dbg_clear_keys(void);
+#else
+static inline void input_dbg_clear_keys(void)
+{}
+#endif
+
static inline void input_report_key(struct input_dev *dev, unsigned int code, int value)
{
input_event(dev, EV_KEY, code, !!value);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index a63235996309..78ef023227d4 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -4,32 +4,6 @@
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
-/*
- * This is the per-process anticipatory I/O scheduler state.
- */
-struct as_io_context {
- spinlock_t lock;
-
- void (*dtor)(struct as_io_context *aic); /* destructor */
- void (*exit)(struct as_io_context *aic); /* called on task exit */
-
- unsigned long state;
- atomic_t nr_queued; /* queued reads & sync writes */
- atomic_t nr_dispatched; /* number of requests gone to the drivers */
-
- /* IO History tracking */
- /* Thinktime */
- unsigned long last_end_request;
- unsigned long ttime_total;
- unsigned long ttime_samples;
- unsigned long ttime_mean;
- /* Layout pattern */
- unsigned int seek_samples;
- sector_t last_request_pos;
- u64 seek_total;
- sector_t seek_mean;
-};
-
struct cfq_queue;
struct cfq_io_context {
void *key;
@@ -78,7 +52,6 @@ struct io_context {
unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */
- struct as_io_context *aic;
struct radix_tree_root radix_root;
struct hlist_head cic_list;
void *ioc_data;
diff --git a/include/linux/ioq.h b/include/linux/ioq.h
new file mode 100644
index 000000000000..7c6d6cad83c7
--- /dev/null
+++ b/include/linux/ioq.h
@@ -0,0 +1,414 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * IOQ is a generic shared-memory, lockless queue mechanism. It can be used
+ * in a variety of ways, though its intended purpose is to become the
+ * asynchronous communication path for virtual-bus drivers.
+ *
+ * The following are a list of key design points:
+ *
+ * #) All shared-memory is always allocated on explicitly one side of the
+ * link. This typically would be the guest side in a VM/VMM scenario.
+ * #) Each IOQ has the concept of "north" and "south" locales, where
+ * north denotes the memory-owner side (e.g. guest).
+ * #) An IOQ is manipulated using an iterator idiom.
+ * #) Provides a bi-directional signaling/notification infrastructure on
+ * a per-queue basis, which includes an event mitigation strategy
+ * to reduce boundary switching.
+ * #) The signaling path is abstracted so that various technologies and
+ * topologies can define their own specific implementation while sharing
+ * the basic structures and code.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_IOQ_H
+#define _LINUX_IOQ_H
+
+#include <linux/types.h>
+#include <linux/shm_signal.h>
+
+/*
+ *---------
+ * The following structures represent data that is shared across boundaries
+ * which may be quite disparate from one another (e.g. Windows vs Linux,
+ * 32 vs 64 bit, etc). Therefore, care has been taken to make sure they
+ * present data in a manner that is independent of the environment.
+ *-----------
+ */
+struct ioq_ring_desc {
+ __u64 cookie; /* for arbitrary use by north-side */
+ __le64 ptr;
+ __le64 len;
+ __u8 valid;
+ __u8 sown; /* South owned = 1, North owned = 0 */
+};
+
+#define IOQ_RING_MAGIC cpu_to_le32(0x47fa2fe4)
+#define IOQ_RING_VER cpu_to_le32(4)
+
+struct ioq_ring_idx {
+ __le32 head; /* 0 based index to head of ptr array */
+ __le32 tail; /* 0 based index to tail of ptr array */
+ __u8 full;
+};
+
+enum ioq_locality {
+ ioq_locality_north,
+ ioq_locality_south,
+};
+
+struct ioq_ring_head {
+ __le32 magic;
+ __le32 ver;
+ struct shm_signal_desc signal;
+ struct ioq_ring_idx idx[2];
+ __le32 count;
+ struct ioq_ring_desc ring[1]; /* "count" elements will be allocated */
+};
+
+#define IOQ_HEAD_DESC_SIZE(count) \
+ (sizeof(struct ioq_ring_head) + sizeof(struct ioq_ring_desc) * (count - 1))
+
+/* --- END SHARED STRUCTURES --- */
+
+#ifdef __KERNEL__
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/kref.h>
+
+enum ioq_idx_type {
+ ioq_idxtype_valid,
+ ioq_idxtype_inuse,
+ ioq_idxtype_both,
+ ioq_idxtype_invalid,
+};
+
+enum ioq_seek_type {
+ ioq_seek_tail,
+ ioq_seek_next,
+ ioq_seek_head,
+ ioq_seek_set
+};
+
+struct ioq_iterator {
+ struct ioq *ioq;
+ struct ioq_ring_idx *idx;
+ u32 pos;
+ struct ioq_ring_desc *desc;
+ bool update;
+ bool dualidx;
+ bool flipowner;
+};
+
+struct ioq_notifier {
+ void (*signal)(struct ioq_notifier *);
+};
+
+struct ioq_ops {
+ void (*release)(struct ioq *ioq);
+};
+
+struct ioq {
+ struct ioq_ops *ops;
+
+ struct kref kref;
+ enum ioq_locality locale;
+ struct ioq_ring_head *head_desc;
+ struct ioq_ring_desc *ring;
+ struct shm_signal *signal;
+ wait_queue_head_t wq;
+ struct ioq_notifier *notifier;
+ size_t count;
+ struct shm_signal_notifier shm_notifier;
+};
+
+#define IOQ_ITER_AUTOUPDATE (1 << 0)
+#define IOQ_ITER_NOFLIPOWNER (1 << 1)
+
+/**
+ * ioq_init() - initialize an IOQ
+ * @ioq: IOQ context
+ *
+ * Initializes IOQ context before first use
+ *
+ **/
+void ioq_init(struct ioq *ioq,
+ struct ioq_ops *ops,
+ enum ioq_locality locale,
+ struct ioq_ring_head *head,
+ struct shm_signal *signal,
+ size_t count);
+
+/**
+ * ioq_get() - acquire an IOQ context reference
+ * @ioq: IOQ context
+ *
+ **/
+static inline struct ioq *ioq_get(struct ioq *ioq)
+{
+ kref_get(&ioq->kref);
+
+ return ioq;
+}
+
+static inline void _ioq_kref_release(struct kref *kref)
+{
+ struct ioq *ioq = container_of(kref, struct ioq, kref);
+
+ shm_signal_put(ioq->signal);
+ ioq->ops->release(ioq);
+}
+
+/**
+ * ioq_put() - release an IOQ context reference
+ * @ioq: IOQ context
+ *
+ **/
+static inline void ioq_put(struct ioq *ioq)
+{
+ kref_put(&ioq->kref, _ioq_kref_release);
+}
+
+/**
+ * ioq_notify_enable() - enables local notifications on an IOQ
+ * @ioq: IOQ context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Enables/unmasks the registered ioq_notifier (if applicable) and waitq to
+ * receive wakeups whenever the remote side performs an ioq_signal() operation.
+ * A notification will be dispatched immediately if any pending signals have
+ * already been issued prior to invoking this call.
+ *
+ * This is synonymous with unmasking an interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+static inline int ioq_notify_enable(struct ioq *ioq, int flags)
+{
+ return shm_signal_enable(ioq->signal, 0);
+}
+
+/**
+ * ioq_notify_disable() - disable local notifications on an IOQ
+ * @ioq: IOQ context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Disables/masks the registered ioq_notifier (if applicable) and waitq
+ * from receiving any further notifications. Any subsequent calls to
+ * ioq_signal() by the remote side will update the ring as dirty, but
+ * will not traverse the locale boundary and will not invoke the notifier
+ * callback or wakeup the waitq. Signals delivered while masked will
+ * be deferred until ioq_notify_enable() is invoked
+ *
+ * This is synonymous with masking an interrupt
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+static inline int ioq_notify_disable(struct ioq *ioq, int flags)
+{
+ return shm_signal_disable(ioq->signal, 0);
+}
+
+/**
+ * ioq_signal() - notify the remote side about ring changes
+ * @ioq: IOQ context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Marks the ring state as "dirty" and, if enabled, will traverse
+ * a locale boundary to invoke a remote notification. The remote
+ * side controls whether the notification should be delivered via
+ * the ioq_notify_enable/disable() interface.
+ *
+ * The specifics of how to traverse a locale boundary are abstracted
+ * by the ioq_ops->signal() interface and provided by a particular
+ * implementation. However, typically going north to south would be
+ * something like a syscall/hypercall, and going south to north would be
+ * something like a posix-signal/guest-interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+static inline int ioq_signal(struct ioq *ioq, int flags)
+{
+ return shm_signal_inject(ioq->signal, 0);
+}
+
+/**
+ * ioq_count() - counts the number of outstanding descriptors in an index
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * Returns:
+ * (*) >=0: # of descriptors outstanding in the index
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_count(struct ioq *ioq, enum ioq_idx_type type);
+
+/**
+ * ioq_remain() - counts the number of remaining descriptors in an index
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * This is the converse of ioq_count(). This function returns the number
+ * of "free" descriptors left in a particular index
+ *
+ * Returns:
+ * (*) >=0: # of descriptors remaining in the index
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_remain(struct ioq *ioq, enum ioq_idx_type type);
+
+/**
+ * ioq_size() - counts the maximum number of descriptors in an ring
+ * @ioq: IOQ context
+ *
+ * This function returns the maximum number of descriptors supported in
+ * a ring, regardless of their current state (free or inuse).
+ *
+ * Returns:
+ * (*) >=0: total # of descriptors in the ring
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_size(struct ioq *ioq);
+
+/**
+ * ioq_full() - determines if a specific index is "full"
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * Returns:
+ * (*) 0: index is not full
+ * (*) 1: index is full
+ * (*) <0 = ERRNO
+ *
+ **/
+int ioq_full(struct ioq *ioq, enum ioq_idx_type type);
+
+/**
+ * ioq_empty() - determines if a specific index is "empty"
+ * @ioq: IOQ context
+ * @type: Specifies the index type
+ * (*) valid: the descriptor is valid. This is usually
+ * used to keep track of descriptors that may not
+ * be carrying a useful payload, but still need to
+ * be tracked carefully.
+ * (*) inuse: Descriptors that carry useful payload
+ *
+ * Returns:
+ * (*) 0: index is not empty
+ * (*) 1: index is empty
+ * (*) <0 = ERRNO
+ *
+ **/
+static inline int ioq_empty(struct ioq *ioq, enum ioq_idx_type type)
+{
+ return !ioq_count(ioq, type);
+}
+
+/**
+ * ioq_iter_init() - initialize an iterator for IOQ descriptor traversal
+ * @ioq: IOQ context to iterate on
+ * @iter: Iterator context to init (usually from stack)
+ * @type: Specifies the index type to iterate against
+ * (*) valid: iterate against the "valid" index
+ * (*) inuse: iterate against the "inuse" index
+ * (*) both: iterate against both indexes simultaneously
+ * @flags: Bitfield with 0 or more bits set to alter behavior
+ * (*) autoupdate: automatically signal the remote side
+ * whenever the iterator pushes/pops to a new desc
+ * (*) noflipowner: do not flip the ownership bit during
+ * a push/pop operation
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_init(struct ioq *ioq, struct ioq_iterator *iter,
+ enum ioq_idx_type type, int flags);
+
+/**
+ * ioq_iter_seek() - seek to a specific location in the IOQ ring
+ * @iter: Iterator context (must be initialized with ioq_iter_init)
+ * @type: Specifies the type of seek operation
+ * (*) tail: seek to the absolute tail, offset is ignored
+ * (*) next: seek to the relative next, offset is ignored
+ * (*) head: seek to the absolute head, offset is ignored
+ * (*) set: seek to the absolute offset
+ * @offset: Offset for ioq_seek_set operations
+ * @flags: Reserved for future use, must be 0
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_seek(struct ioq_iterator *iter, enum ioq_seek_type type,
+ long offset, int flags);
+
+/**
+ * ioq_iter_push() - push the tail pointer forward
+ * @iter: Iterator context (must be initialized with ioq_iter_init)
+ * @flags: Reserved for future use, must be 0
+ *
+ * This function will simultaneously advance the tail ptr in the current
+ * index (valid/inuse, as specified in the ioq_iter_init) as well as
+ * perform a seek(next) operation. This effectively "pushes" a new pointer
+ * onto the tail of the index.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_push(struct ioq_iterator *iter, int flags);
+
+/**
+ * ioq_iter_pop() - pop the head pointer from the ring
+ * @iter: Iterator context (must be initialized with ioq_iter_init)
+ * @flags: Reserved for future use, must be 0
+ *
+ * This function will simultaneously advance the head ptr in the current
+ * index (valid/inuse, as specified in the ioq_iter_init) as well as
+ * perform a seek(next) operation. This effectively "pops" a pointer
+ * from the head of the index.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int ioq_iter_pop(struct ioq_iterator *iter, int flags);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_IOQ_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
new file mode 100644
index 000000000000..75d8777d51d9
--- /dev/null
+++ b/include/linux/kdb.h
@@ -0,0 +1,118 @@
+#ifndef _KDB_H
+#define _KDB_H
+
+/*
+ * Kernel Debugger Architecture Independent Global Headers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2007 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
+ */
+
+#ifdef CONFIG_KGDB_KDB
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+
+#define KDB_POLL_FUNC_MAX 5
+extern int kdb_poll_idx;
+
+/*
+ * kdb_initial_cpu is initialized to -1, and is set to the cpu
+ * number whenever the kernel debugger is entered.
+ */
+extern int kdb_initial_cpu;
+extern atomic_t kdb_event;
+
+#define KDB_IS_RUNNING() (kdb_initial_cpu != -1)
+
+/*
+ * kdb_diemsg
+ *
+ * Contains a pointer to the last string supplied to the
+ * kernel 'die' panic function.
+ */
+extern const char *kdb_diemsg;
+
+#define KDB_FLAG_EARLYKDB (1 << 0) /* set from boot parameter kdb=early */
+#define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */
+#define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */
+#define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */
+#define KDB_FLAG_ONLY_DO_DUMP (1 << 4) /* Only do a dump, used when
+ * kdb is off */
+#define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available,
+ * kdb is disabled */
+#define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do
+ * not use keyboard */
+#define KDB_FLAG_NO_I8042 (1 << 7) /* No i8042 chip is available, do
+ * not use keyboard */
+
+extern int kdb_flags; /* Global flags, see kdb_state for per cpu state */
+
+extern void kdb_save_flags(void);
+extern void kdb_restore_flags(void);
+
+#define KDB_FLAG(flag) (kdb_flags & KDB_FLAG_##flag)
+#define KDB_FLAG_SET(flag) ((void)(kdb_flags |= KDB_FLAG_##flag))
+#define KDB_FLAG_CLEAR(flag) ((void)(kdb_flags &= ~KDB_FLAG_##flag))
+
+/*
+ * External entry point for the kernel debugger. The pt_regs
+ * at the time of entry are supplied along with the reason for
+ * entry to the kernel debugger.
+ */
+
+typedef enum {
+ KDB_REASON_ENTER = 1, /* KDB_ENTER() trap/fault - regs valid */
+ KDB_REASON_ENTER_SLAVE, /* KDB_ENTER_SLAVE() trap/fault - regs valid */
+ KDB_REASON_BREAK, /* Breakpoint inst. - regs valid */
+ KDB_REASON_DEBUG, /* Debug Fault - regs valid */
+ KDB_REASON_OOPS, /* Kernel Oops - regs valid */
+ KDB_REASON_SWITCH, /* CPU switch - regs valid*/
+ KDB_REASON_KEYBOARD, /* Keyboard entry - regs valid */
+ KDB_REASON_NMI, /* Non-maskable interrupt; regs valid */
+ KDB_REASON_RECURSE, /* Recursive entry to kdb;
+ * regs probably valid */
+ KDB_REASON_SSTEP, /* Single Step trap. - regs valid */
+} kdb_reason_t;
+
+extern int kdb_trap_printk;
+extern int vkdb_printf(const char *fmt, va_list args)
+ __attribute__ ((format (printf, 1, 0)));
+extern int kdb_printf(const char *, ...)
+ __attribute__ ((format (printf, 1, 2)));
+typedef int (*kdb_printf_t)(const char *, ...)
+ __attribute__ ((format (printf, 1, 2)));
+extern void kdb_init(void);
+
+/* Access to kdb specific polling devices */
+typedef int (*get_char_func)(void);
+extern get_char_func kdb_poll_funcs[];
+extern int kdb_get_kbd_char(void);
+
+static inline
+int kdb_process_cpu(const struct task_struct *p)
+{
+ unsigned int cpu = task_thread_info(p)->cpu;
+ if (cpu > num_possible_cpus())
+ cpu = 0;
+ return cpu;
+}
+
+/* kdb access functions for non-kdb files*/
+#ifdef CONFIG_SWAP
+extern void kdb_si_swapinfo(struct sysinfo *);
+#else
+#include <linux/swap.h>
+#define kdb_si_swapinfo(x) si_swapinfo(x)
+#endif
+
+#else /* ! CONFIG_KGDB_KDB */
+#define KDB_IS_RUNNING() (0)
+#define kdb_printf(...)
+#endif /* CONFIG_KGDB_KDB */
+#endif /* !_KDB_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3fc9f5aab5f8..abfb4d494106 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -124,7 +124,7 @@ extern int _cond_resched(void);
#endif
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
- void __might_sleep(char *file, int line, int preempt_offset);
+ void __might_sleep(const char *file, int line, int preempt_offset);
/**
* might_sleep - annotation for functions that can sleep
*
@@ -138,7 +138,8 @@ extern int _cond_resched(void);
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
#else
- static inline void __might_sleep(char *file, int line, int preempt_offset) { }
+ static inline void __might_sleep(const char *file, int line,
+ int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
#endif
@@ -702,13 +703,59 @@ static inline void ftrace_dump(void) { }
struct sysinfo;
extern int do_sysinfo(struct sysinfo *info);
-#endif /* __KERNEL__ */
+/* Force a compilation error if condition is true, but also produce a
+ result (of value 0 and type size_t), so the expression can be used
+ e.g. in a structure initializer (or where-ever else comma expressions
+ aren't permitted). */
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
+
+/**
+ * BUILD_BUG_ON - break compile if a condition is true.
+ * @cond: the condition which the compiler should know is false.
+ *
+ * If you have some code which relies on certain constants being equal, or
+ * other compile-time-evaluated condition, you should use BUILD_BUG_ON to
+ * detect if someone changes it.
+ *
+ * The implementation uses gcc's reluctance to create a negative array, but
+ * gcc (as of 4.4) only emits that error for obvious cases (eg. not arguments
+ * to inline functions). So as a fallback we use the optimizer; if it can't
+ * prove the condition is false, it will cause a link error on the undefined
+ * "__build_bug_on_failed". This error message can be harder to track down
+ * though, hence the two different methods.
+ */
+#ifndef __OPTIMIZE__
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else
+extern int __build_bug_on_failed;
+#define BUILD_BUG_ON(condition) \
+ do { \
+ ((void)sizeof(char[1 - 2*!!(condition)])); \
+ if (condition) __build_bug_on_failed = 1; \
+ } while(0)
+#endif
+
+/* Trap pasters of __FUNCTION__ at compile-time */
+#define __FUNCTION__ (__func__)
+
+/* This helps us to avoid #ifdef CONFIG_NUMA */
+#ifdef CONFIG_NUMA
+#define NUMA_BUILD 1
+#else
+#define NUMA_BUILD 0
+#endif
+
+/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
+#endif
+#else /* __KERNEL__ */
#ifndef __EXPORTED_HEADERS__
-#ifndef __KERNEL__
#warning Attempt to use kernel headers from user space, see http://kernelnewbies.org/KernelHeaders
-#endif /* __KERNEL__ */
#endif /* __EXPORTED_HEADERS__ */
+#endif /* !__KERNEL__ */
#define SI_LOAD_SHIFT 16
struct sysinfo {
@@ -728,32 +775,4 @@ struct sysinfo {
char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
};
-/* Force a compilation error if condition is true */
-#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
-
-/* Force a compilation error if condition is constant and true */
-#define MAYBE_BUILD_BUG_ON(cond) ((void)sizeof(char[1 - 2 * !!(cond)]))
-
-/* Force a compilation error if condition is true, but also produce a
- result (of value 0 and type size_t), so the expression can be used
- e.g. in a structure initializer (or where-ever else comma expressions
- aren't permitted). */
-#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
-#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
-
-/* Trap pasters of __FUNCTION__ at compile-time */
-#define __FUNCTION__ (__func__)
-
-/* This helps us to avoid #ifdef CONFIG_NUMA */
-#ifdef CONFIG_NUMA
-#define NUMA_BUILD 1
-#else
-#define NUMA_BUILD 0
-#endif
-
-/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
-# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
-#endif
-
#endif
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 6adcc297e354..c34a9951808a 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -16,10 +16,12 @@
#include <linux/serial_8250.h>
#include <linux/linkage.h>
#include <linux/init.h>
-
#include <asm/atomic.h>
+#ifdef CONFIG_HAVE_ARCH_KGDB
#include <asm/kgdb.h>
+#endif
+#ifdef CONFIG_KGDB
struct pt_regs;
/**
@@ -29,26 +31,11 @@ struct pt_regs;
*
* On some architectures it is required to skip a breakpoint
* exception when it occurs after a breakpoint has been removed.
- * This can be implemented in the architecture specific portion of
- * for kgdb.
+ * This can be implemented in the architecture specific portion of kgdb.
*/
extern int kgdb_skipexception(int exception, struct pt_regs *regs);
/**
- * kgdb_post_primary_code - (optional) Save error vector/code numbers.
- * @regs: Original pt_regs.
- * @e_vector: Original error vector.
- * @err_code: Original error code.
- *
- * This is usually needed on architectures which support SMP and
- * KGDB. This function is called after all the secondary cpus have
- * been put to a know spin state and the primary CPU has control over
- * KGDB.
- */
-extern void kgdb_post_primary_code(struct pt_regs *regs, int e_vector,
- int err_code);
-
-/**
* kgdb_disable_hw_debug - (optional) Disable hardware debugging hook
* @regs: Current &struct pt_regs.
*
@@ -65,7 +52,7 @@ struct uart_port;
/**
* kgdb_breakpoint - compiled in breakpoint
*
- * This will be impelmented a static inline per architecture. This
+ * This will be implemented as a static inline per architecture. This
* function is called by the kgdb core to execute an architecture
* specific trap to cause kgdb to enter the exception processing.
*
@@ -73,6 +60,7 @@ struct uart_port;
void kgdb_breakpoint(void);
extern int kgdb_connected;
+extern int kgdb_io_module_registered;
extern atomic_t kgdb_setting_breakpoint;
extern atomic_t kgdb_cpu_doing_single_step;
@@ -190,7 +178,7 @@ kgdb_arch_handle_exception(int vector, int signo, int err_code,
* @flags: Current IRQ state
*
* On SMP systems, we need to get the attention of the other CPUs
- * and get them be in a known state. This should do what is needed
+ * and get them into a known state. This should do what is needed
* to get the other CPUs to call kgdb_wait(). Note that on some arches,
* the NMI approach is not used for rounding up all the CPUs. For example,
* in case of MIPS, smp_call_function() is used to roundup CPUs. In
@@ -248,6 +236,8 @@ struct kgdb_arch {
* the I/O driver.
* @post_exception: Pointer to a function that will do any cleanup work
* for the I/O driver.
+ * @is_console: 1 if the end device is a console 0 if the I/O device is
+ * not a console
*/
struct kgdb_io {
const char *name;
@@ -257,12 +247,14 @@ struct kgdb_io {
int (*init) (void);
void (*pre_exception) (void);
void (*post_exception) (void);
+ int is_console;
};
extern struct kgdb_arch arch_kgdb_ops;
extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
+extern void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc);
extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
@@ -271,6 +263,7 @@ extern int kgdb_mem2hex(char *mem, char *buf, int count);
extern int kgdb_hex2mem(char *buf, char *mem, int count);
extern int kgdb_isremovedbreak(unsigned long addr);
+extern void kgdb_schedule_breakpoint(void);
extern int
kgdb_handle_exception(int ex_vector, int signo, int err_code,
@@ -279,5 +272,48 @@ extern int kgdb_nmicallback(int cpu, void *regs);
extern int kgdb_single_step;
extern atomic_t kgdb_active;
+#ifdef CONFIG_KGDB_SERIAL_CONSOLE
+extern void __init early_kgdboc_init(void);
+#endif /* CONFIG_KGDB_SERIAL_CONSOLE */
+#endif /* CONFIG_KGDB */
+
+/* Common to all that include kgdb.h */
+#ifdef CONFIG_VT
+extern void dbg_pre_vt_hook(void);
+extern void dbg_post_vt_hook(void);
+#else /* ! CONFIG_VT */
+#define dbg_pre_vt_hook()
+#define dbg_post_vt_hook()
+#endif /* CONFIG_VT */
+
+struct dbg_kms_console_ops {
+ int (*activate_console) (struct dbg_kms_console_ops *ops);
+ int (*restore_console) (struct dbg_kms_console_ops *ops);
+};
+#ifdef CONFIG_KGDB
+extern struct dbg_kms_console_ops *dbg_kms_console_core;
+extern int dbg_kms_console_ops_register(struct dbg_kms_console_ops *ops);
+extern int dbg_kms_console_ops_unregister(struct dbg_kms_console_ops *ops);
+#define in_dbg_master() \
+ (raw_smp_processor_id() == atomic_read(&kgdb_active))
+#define dbg_safe_mutex_lock(x) \
+ if (!in_dbg_master()) \
+ mutex_lock(x)
+#define dbg_safe_mutex_unlock(x) \
+ if (!in_dbg_master()) \
+ mutex_unlock(x)
+#else /* ! CONFIG_KGDB */
+static inline int dbg_kms_console_ops_register(struct dbg_kms_console_ops *ops)
+{
+ return 0;
+}
+static inline int dbg_kms_console_ops_unregister(struct dbg_kms_console_ops *ops)
+{
+ return 0;
+}
+#define in_dbg_master() (0)
+#define dbg_safe_mutex_lock(x) mutex_lock(x)
+#define dbg_safe_mutex_unlock(x) mutex_unlock(x)
+#endif /* ! CONFIG_KGDB */
#endif /* _KGDB_H_ */
diff --git a/include/linux/kmemcheck.h b/include/linux/kmemcheck.h
index e880d4cf9e22..39f8453239f7 100644
--- a/include/linux/kmemcheck.h
+++ b/include/linux/kmemcheck.h
@@ -36,6 +36,56 @@ int kmemcheck_hide_addr(unsigned long address);
bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size);
+/*
+ * Bitfield annotations
+ *
+ * How to use: If you have a struct using bitfields, for example
+ *
+ * struct a {
+ * int x:8, y:8;
+ * };
+ *
+ * then this should be rewritten as
+ *
+ * struct a {
+ * kmemcheck_bitfield_begin(flags);
+ * int x:8, y:8;
+ * kmemcheck_bitfield_end(flags);
+ * };
+ *
+ * Now the "flags_begin" and "flags_end" members may be used to refer to the
+ * beginning and end, respectively, of the bitfield (and things like
+ * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
+ * fields should be annotated:
+ *
+ * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
+ * kmemcheck_annotate_bitfield(a, flags);
+ */
+#define kmemcheck_bitfield_begin(name) \
+ int name##_begin[0];
+
+#define kmemcheck_bitfield_end(name) \
+ int name##_end[0];
+
+#define kmemcheck_annotate_bitfield(ptr, name) \
+ do { \
+ int _n; \
+ \
+ if (!ptr) \
+ break; \
+ \
+ _n = (long) &((ptr)->name##_end) \
+ - (long) &((ptr)->name##_begin); \
+ BUILD_BUG_ON(_n < 0); \
+ \
+ kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
+ } while (0)
+
+#define kmemcheck_annotate_variable(var) \
+ do { \
+ kmemcheck_mark_initialized(&(var), sizeof(var)); \
+ } while (0) \
+
#else
#define kmemcheck_enabled 0
@@ -106,60 +156,16 @@ static inline bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size)
return true;
}
-#endif /* CONFIG_KMEMCHECK */
-
-/*
- * Bitfield annotations
- *
- * How to use: If you have a struct using bitfields, for example
- *
- * struct a {
- * int x:8, y:8;
- * };
- *
- * then this should be rewritten as
- *
- * struct a {
- * kmemcheck_bitfield_begin(flags);
- * int x:8, y:8;
- * kmemcheck_bitfield_end(flags);
- * };
- *
- * Now the "flags_begin" and "flags_end" members may be used to refer to the
- * beginning and end, respectively, of the bitfield (and things like
- * &x.flags_begin is allowed). As soon as the struct is allocated, the bit-
- * fields should be annotated:
- *
- * struct a *a = kmalloc(sizeof(struct a), GFP_KERNEL);
- * kmemcheck_annotate_bitfield(a, flags);
- *
- * Note: We provide the same definitions for both kmemcheck and non-
- * kmemcheck kernels. This makes it harder to introduce accidental errors. It
- * is also allowed to pass NULL pointers to kmemcheck_annotate_bitfield().
- */
-#define kmemcheck_bitfield_begin(name) \
- int name##_begin[0];
-
-#define kmemcheck_bitfield_end(name) \
- int name##_end[0];
+#define kmemcheck_bitfield_begin(name)
+#define kmemcheck_bitfield_end(name)
+#define kmemcheck_annotate_bitfield(ptr, name) \
+ do { \
+ } while (0)
-#define kmemcheck_annotate_bitfield(ptr, name) \
- do { \
- int _n; \
- \
- if (!ptr) \
- break; \
- \
- _n = (long) &((ptr)->name##_end) \
- - (long) &((ptr)->name##_begin); \
- MAYBE_BUILD_BUG_ON(_n < 0); \
- \
- kmemcheck_mark_initialized(&((ptr)->name##_begin), _n); \
+#define kmemcheck_annotate_variable(var) \
+ do { \
} while (0)
-#define kmemcheck_annotate_variable(var) \
- do { \
- kmemcheck_mark_initialized(&(var), sizeof(var)); \
- } while (0) \
+#endif /* CONFIG_KMEMCHECK */
#endif /* LINUX_KMEMCHECK_H */
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index e32aa268efac..24b44145a886 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -17,6 +17,7 @@
enum kmsg_dump_reason {
KMSG_DUMP_OOPS,
KMSG_DUMP_PANIC,
+ KMSG_DUMP_KEXEC,
};
/**
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index a24de0b1858e..f2feef68ffd6 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -103,7 +103,7 @@ struct kvm_userspace_memory_region {
/* for kvm_memory_region::flags */
#define KVM_MEM_LOG_DIRTY_PAGES 1UL
-
+#define KVM_MEMSLOT_INVALID (1UL << 1)
/* for KVM_IRQ_LINE */
struct kvm_irq_level {
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index bd5a616d9373..bb0314ea9267 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -57,20 +57,20 @@ struct kvm_io_bus {
struct kvm_io_device *devs[NR_IOBUS_DEVS];
};
-void kvm_io_bus_init(struct kvm_io_bus *bus);
-void kvm_io_bus_destroy(struct kvm_io_bus *bus);
-int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr, int len,
- const void *val);
-int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len,
+enum kvm_bus {
+ KVM_MMIO_BUS,
+ KVM_PIO_BUS,
+ KVM_NR_BUSES
+};
+
+int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, const void *val);
+int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
void *val);
-int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev);
-int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
+int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev);
-void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev);
-void kvm_io_bus_unregister_dev(struct kvm *kvm, struct kvm_io_bus *bus,
- struct kvm_io_device *dev);
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);
struct kvm_vcpu {
struct kvm *kvm;
@@ -83,6 +83,8 @@ struct kvm_vcpu {
struct kvm_run *run;
unsigned long requests;
unsigned long guest_debug;
+ int srcu_idx;
+
int fpu_active;
int guest_fpu_loaded;
wait_queue_head_t wq;
@@ -150,14 +152,19 @@ struct kvm_irq_routing_table {};
#endif
+struct kvm_memslots {
+ int nmemslots;
+ struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
+ KVM_PRIVATE_MEM_SLOTS];
+};
+
struct kvm {
spinlock_t mmu_lock;
spinlock_t requests_lock;
- struct rw_semaphore slots_lock;
+ struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
- int nmemslots;
- struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
- KVM_PRIVATE_MEM_SLOTS];
+ struct kvm_memslots *memslots;
+ struct srcu_struct srcu;
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
u32 bsp_vcpu_id;
struct kvm_vcpu *bsp_vcpu;
@@ -166,8 +173,7 @@ struct kvm {
atomic_t online_vcpus;
struct list_head vm_list;
struct mutex lock;
- struct kvm_io_bus mmio_bus;
- struct kvm_io_bus pio_bus;
+ struct kvm_io_bus *buses[KVM_NR_BUSES];
#ifdef CONFIG_HAVE_KVM_EVENTFD
struct {
spinlock_t lock;
@@ -249,13 +255,20 @@ int kvm_set_memory_region(struct kvm *kvm,
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc);
-int kvm_arch_set_memory_region(struct kvm *kvm,
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ struct kvm_memory_slot old,
+ struct kvm_userspace_memory_region *mem,
+ int user_alloc);
+void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
int user_alloc);
void kvm_disable_largepages(void);
void kvm_arch_flush_shadow(struct kvm *kvm);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
+gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn);
+
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
void kvm_release_page_clean(struct page *page);
@@ -264,6 +277,9 @@ void kvm_set_page_dirty(struct page *page);
void kvm_set_page_accessed(struct page *page);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
+pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn);
+int memslot_id(struct kvm *kvm, gfn_t gfn);
void kvm_release_pfn_dirty(pfn_t);
void kvm_release_pfn_clean(pfn_t pfn);
void kvm_set_pfn_dirty(pfn_t pfn);
@@ -429,8 +445,7 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
#define KVM_IOMMU_CACHE_COHERENCY 0x1
#ifdef CONFIG_IOMMU_API
-int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
- unsigned long npages);
+int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
int kvm_iommu_map_guest(struct kvm *kvm);
int kvm_iommu_unmap_guest(struct kvm *kvm);
int kvm_assign_device(struct kvm *kvm,
@@ -480,11 +495,6 @@ static inline void kvm_guest_exit(void)
current->flags &= ~PF_VCPU;
}
-static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
-{
- return slot - kvm->memslots;
-}
-
static inline gpa_t gfn_to_gpa(gfn_t gfn)
{
return (gpa_t)gfn << PAGE_SHIFT;
@@ -532,6 +542,10 @@ static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_se
}
#endif
+#ifndef KVM_ARCH_HAS_UNALIAS_INSTANTIATION
+#define unalias_gfn_instantiation unalias_gfn
+#endif
+
#ifdef CONFIG_HAVE_KVM_IRQCHIP
#define KVM_MAX_IRQ_ROUTES 1024
diff --git a/include/linux/llc.h b/include/linux/llc.h
index 7733585603f1..ad7074ba81af 100644
--- a/include/linux/llc.h
+++ b/include/linux/llc.h
@@ -36,6 +36,7 @@ enum llc_sockopts {
LLC_OPT_BUSY_TMR_EXP, /* busy state expire time (secs). */
LLC_OPT_TX_WIN, /* tx window size. */
LLC_OPT_RX_WIN, /* rx window size. */
+ LLC_OPT_PKTINFO, /* ancillary packet information. */
LLC_OPT_MAX
};
@@ -70,6 +71,12 @@ enum llc_sockopts {
#define LLC_SAP_RM 0xD4 /* Resource Management */
#define LLC_SAP_GLOBAL 0xFF /* Global SAP. */
+struct llc_pktinfo {
+ int lpi_ifindex;
+ unsigned char lpi_sap;
+ unsigned char lpi_mac[IFHWADDRLEN];
+};
+
#ifdef __KERNEL__
#define LLC_SAP_DYN_START 0xC0
#define LLC_SAP_DYN_STOP 0xDE
diff --git a/include/linux/mfd/88pm8607.h b/include/linux/mfd/88pm8607.h
deleted file mode 100644
index f41b428d2cec..000000000000
--- a/include/linux/mfd/88pm8607.h
+++ /dev/null
@@ -1,217 +0,0 @@
-/*
- * Marvell 88PM8607 Interface
- *
- * Copyright (C) 2009 Marvell International Ltd.
- * Haojian Zhuang <haojian.zhuang@marvell.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __LINUX_MFD_88PM8607_H
-#define __LINUX_MFD_88PM8607_H
-
-enum {
- PM8607_ID_BUCK1 = 0,
- PM8607_ID_BUCK2,
- PM8607_ID_BUCK3,
-
- PM8607_ID_LDO1,
- PM8607_ID_LDO2,
- PM8607_ID_LDO3,
- PM8607_ID_LDO4,
- PM8607_ID_LDO5,
- PM8607_ID_LDO6,
- PM8607_ID_LDO7,
- PM8607_ID_LDO8,
- PM8607_ID_LDO9,
- PM8607_ID_LDO10,
- PM8607_ID_LDO12,
- PM8607_ID_LDO14,
-
- PM8607_ID_RG_MAX,
-};
-
-#define CHIP_ID (0x40)
-#define CHIP_ID_MASK (0xF8)
-
-/* Interrupt Registers */
-#define PM8607_STATUS_1 (0x01)
-#define PM8607_STATUS_2 (0x02)
-#define PM8607_INT_STATUS1 (0x03)
-#define PM8607_INT_STATUS2 (0x04)
-#define PM8607_INT_STATUS3 (0x05)
-#define PM8607_INT_MASK_1 (0x06)
-#define PM8607_INT_MASK_2 (0x07)
-#define PM8607_INT_MASK_3 (0x08)
-
-/* Regulator Control Registers */
-#define PM8607_LDO1 (0x10)
-#define PM8607_LDO2 (0x11)
-#define PM8607_LDO3 (0x12)
-#define PM8607_LDO4 (0x13)
-#define PM8607_LDO5 (0x14)
-#define PM8607_LDO6 (0x15)
-#define PM8607_LDO7 (0x16)
-#define PM8607_LDO8 (0x17)
-#define PM8607_LDO9 (0x18)
-#define PM8607_LDO10 (0x19)
-#define PM8607_LDO12 (0x1A)
-#define PM8607_LDO14 (0x1B)
-#define PM8607_SLEEP_MODE1 (0x1C)
-#define PM8607_SLEEP_MODE2 (0x1D)
-#define PM8607_SLEEP_MODE3 (0x1E)
-#define PM8607_SLEEP_MODE4 (0x1F)
-#define PM8607_GO (0x20)
-#define PM8607_SLEEP_BUCK1 (0x21)
-#define PM8607_SLEEP_BUCK2 (0x22)
-#define PM8607_SLEEP_BUCK3 (0x23)
-#define PM8607_BUCK1 (0x24)
-#define PM8607_BUCK2 (0x25)
-#define PM8607_BUCK3 (0x26)
-#define PM8607_BUCK_CONTROLS (0x27)
-#define PM8607_SUPPLIES_EN11 (0x2B)
-#define PM8607_SUPPLIES_EN12 (0x2C)
-#define PM8607_GROUP1 (0x2D)
-#define PM8607_GROUP2 (0x2E)
-#define PM8607_GROUP3 (0x2F)
-#define PM8607_GROUP4 (0x30)
-#define PM8607_GROUP5 (0x31)
-#define PM8607_GROUP6 (0x32)
-#define PM8607_SUPPLIES_EN21 (0x33)
-#define PM8607_SUPPLIES_EN22 (0x34)
-
-/* RTC Control Registers */
-#define PM8607_RTC1 (0xA0)
-#define PM8607_RTC_COUNTER1 (0xA1)
-#define PM8607_RTC_COUNTER2 (0xA2)
-#define PM8607_RTC_COUNTER3 (0xA3)
-#define PM8607_RTC_COUNTER4 (0xA4)
-#define PM8607_RTC_EXPIRE1 (0xA5)
-#define PM8607_RTC_EXPIRE2 (0xA6)
-#define PM8607_RTC_EXPIRE3 (0xA7)
-#define PM8607_RTC_EXPIRE4 (0xA8)
-#define PM8607_RTC_TRIM1 (0xA9)
-#define PM8607_RTC_TRIM2 (0xAA)
-#define PM8607_RTC_TRIM3 (0xAB)
-#define PM8607_RTC_TRIM4 (0xAC)
-#define PM8607_RTC_MISC1 (0xAD)
-#define PM8607_RTC_MISC2 (0xAE)
-#define PM8607_RTC_MISC3 (0xAF)
-
-/* Misc Registers */
-#define PM8607_CHIP_ID (0x00)
-#define PM8607_LDO1 (0x10)
-#define PM8607_DVC3 (0x26)
-#define PM8607_MISC1 (0x40)
-
-/* bit definitions for PM8607 events */
-#define PM8607_EVENT_ONKEY (1 << 0)
-#define PM8607_EVENT_EXTON (1 << 1)
-#define PM8607_EVENT_CHG (1 << 2)
-#define PM8607_EVENT_BAT (1 << 3)
-#define PM8607_EVENT_RTC (1 << 4)
-#define PM8607_EVENT_CC (1 << 5)
-#define PM8607_EVENT_VBAT (1 << 8)
-#define PM8607_EVENT_VCHG (1 << 9)
-#define PM8607_EVENT_VSYS (1 << 10)
-#define PM8607_EVENT_TINT (1 << 11)
-#define PM8607_EVENT_GPADC0 (1 << 12)
-#define PM8607_EVENT_GPADC1 (1 << 13)
-#define PM8607_EVENT_GPADC2 (1 << 14)
-#define PM8607_EVENT_GPADC3 (1 << 15)
-#define PM8607_EVENT_AUDIO_SHORT (1 << 16)
-#define PM8607_EVENT_PEN (1 << 17)
-#define PM8607_EVENT_HEADSET (1 << 18)
-#define PM8607_EVENT_HOOK (1 << 19)
-#define PM8607_EVENT_MICIN (1 << 20)
-#define PM8607_EVENT_CHG_TIMEOUT (1 << 21)
-#define PM8607_EVENT_CHG_DONE (1 << 22)
-#define PM8607_EVENT_CHG_FAULT (1 << 23)
-
-/* bit definitions of Status Query Interface */
-#define PM8607_STATUS_CC (1 << 3)
-#define PM8607_STATUS_PEN (1 << 4)
-#define PM8607_STATUS_HEADSET (1 << 5)
-#define PM8607_STATUS_HOOK (1 << 6)
-#define PM8607_STATUS_MICIN (1 << 7)
-#define PM8607_STATUS_ONKEY (1 << 8)
-#define PM8607_STATUS_EXTON (1 << 9)
-#define PM8607_STATUS_CHG (1 << 10)
-#define PM8607_STATUS_BAT (1 << 11)
-#define PM8607_STATUS_VBUS (1 << 12)
-#define PM8607_STATUS_OV (1 << 13)
-
-/* bit definitions of BUCK3 */
-#define PM8607_BUCK3_DOUBLE (1 << 6)
-
-/* bit definitions of Misc1 */
-#define PM8607_MISC1_PI2C (1 << 0)
-
-/* Interrupt Number in 88PM8607 */
-enum {
- PM8607_IRQ_ONKEY = 0,
- PM8607_IRQ_EXTON,
- PM8607_IRQ_CHG,
- PM8607_IRQ_BAT,
- PM8607_IRQ_RTC,
- PM8607_IRQ_VBAT = 8,
- PM8607_IRQ_VCHG,
- PM8607_IRQ_VSYS,
- PM8607_IRQ_TINT,
- PM8607_IRQ_GPADC0,
- PM8607_IRQ_GPADC1,
- PM8607_IRQ_GPADC2,
- PM8607_IRQ_GPADC3,
- PM8607_IRQ_AUDIO_SHORT = 16,
- PM8607_IRQ_PEN,
- PM8607_IRQ_HEADSET,
- PM8607_IRQ_HOOK,
- PM8607_IRQ_MICIN,
- PM8607_IRQ_CHG_FAIL,
- PM8607_IRQ_CHG_DONE,
- PM8607_IRQ_CHG_FAULT,
-};
-
-enum {
- PM8607_CHIP_A0 = 0x40,
- PM8607_CHIP_A1 = 0x41,
- PM8607_CHIP_B0 = 0x48,
-};
-
-
-struct pm8607_chip {
- struct device *dev;
- struct mutex io_lock;
- struct i2c_client *client;
-
- int (*read)(struct pm8607_chip *chip, int reg, int bytes, void *dest);
- int (*write)(struct pm8607_chip *chip, int reg, int bytes, void *src);
-
- int buck3_double; /* DVC ramp slope double */
- unsigned char chip_id;
-
-};
-
-#define PM8607_MAX_REGULATOR 15 /* 3 Bucks, 12 LDOs */
-
-enum {
- GI2C_PORT = 0,
- PI2C_PORT,
-};
-
-struct pm8607_platform_data {
- int i2c_port; /* Controlled by GI2C or PI2C */
- struct regulator_init_data *regulator[PM8607_MAX_REGULATOR];
-};
-
-extern int pm8607_reg_read(struct pm8607_chip *, int);
-extern int pm8607_reg_write(struct pm8607_chip *, int, unsigned char);
-extern int pm8607_bulk_read(struct pm8607_chip *, int, int,
- unsigned char *);
-extern int pm8607_bulk_write(struct pm8607_chip *, int, int,
- unsigned char *);
-extern int pm8607_set_bits(struct pm8607_chip *, int, unsigned char,
- unsigned char);
-#endif /* __LINUX_MFD_88PM8607_H */
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
new file mode 100644
index 000000000000..80bc82a7ac96
--- /dev/null
+++ b/include/linux/mfd/88pm860x.h
@@ -0,0 +1,381 @@
+/*
+ * Marvell 88PM860x Interface
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_88PM860X_H
+#define __LINUX_MFD_88PM860X_H
+
+#include <linux/interrupt.h>
+
+#define MFD_NAME_SIZE (40)
+
+enum {
+ CHIP_INVALID = 0,
+ CHIP_PM8606,
+ CHIP_PM8607,
+ CHIP_MAX,
+};
+
+enum {
+ PM8606_ID_INVALID,
+ PM8606_ID_BACKLIGHT,
+ PM8606_ID_LED,
+ PM8606_ID_VIBRATOR,
+ PM8606_ID_TOUCH,
+ PM8606_ID_SOUND,
+ PM8606_ID_CHARGER,
+ PM8606_ID_MAX,
+};
+
+enum {
+ PM8606_BACKLIGHT1 = 0,
+ PM8606_BACKLIGHT2,
+ PM8606_BACKLIGHT3,
+};
+
+enum {
+ PM8606_LED1_RED = 0,
+ PM8606_LED1_GREEN,
+ PM8606_LED1_BLUE,
+ PM8606_LED2_RED,
+ PM8606_LED2_GREEN,
+ PM8606_LED2_BLUE,
+ PM8607_LED_VIBRATOR,
+};
+
+
+/* 8606 Registers */
+#define PM8606_DCM_BOOST (0x00)
+#define PM8606_PWM (0x01)
+
+/* Backlight Registers */
+#define PM8606_WLED1A (0x02)
+#define PM8606_WLED1B (0x03)
+#define PM8606_WLED2A (0x04)
+#define PM8606_WLED2B (0x05)
+#define PM8606_WLED3A (0x06)
+#define PM8606_WLED3B (0x07)
+
+/* LED Registers */
+#define PM8606_RGB2A (0x08)
+#define PM8606_RGB2B (0x09)
+#define PM8606_RGB2C (0x0A)
+#define PM8606_RGB2D (0x0B)
+#define PM8606_RGB1A (0x0C)
+#define PM8606_RGB1B (0x0D)
+#define PM8606_RGB1C (0x0E)
+#define PM8606_RGB1D (0x0F)
+
+#define PM8606_PREREGULATORA (0x10)
+#define PM8606_PREREGULATORB (0x11)
+#define PM8606_VIBRATORA (0x12)
+#define PM8606_VIBRATORB (0x13)
+#define PM8606_VCHG (0x14)
+#define PM8606_VSYS (0x15)
+#define PM8606_MISC (0x16)
+#define PM8606_CHIP_ID (0x17)
+#define PM8606_STATUS (0x18)
+#define PM8606_FLAGS (0x19)
+#define PM8606_PROTECTA (0x1A)
+#define PM8606_PROTECTB (0x1B)
+#define PM8606_PROTECTC (0x1C)
+
+/* Bit definitions of PM8606 registers */
+#define PM8606_DCM_500MA (0x0) /* current limit */
+#define PM8606_DCM_750MA (0x1)
+#define PM8606_DCM_1000MA (0x2)
+#define PM8606_DCM_1250MA (0x3)
+#define PM8606_DCM_250MV (0x0 << 2)
+#define PM8606_DCM_300MV (0x1 << 2)
+#define PM8606_DCM_350MV (0x2 << 2)
+#define PM8606_DCM_400MV (0x3 << 2)
+
+#define PM8606_PWM_31200HZ (0x0)
+#define PM8606_PWM_15600HZ (0x1)
+#define PM8606_PWM_7800HZ (0x2)
+#define PM8606_PWM_3900HZ (0x3)
+#define PM8606_PWM_1950HZ (0x4)
+#define PM8606_PWM_976HZ (0x5)
+#define PM8606_PWM_488HZ (0x6)
+#define PM8606_PWM_244HZ (0x7)
+#define PM8606_PWM_FREQ_MASK (0x7)
+
+#define PM8606_WLED_ON (1 << 0)
+#define PM8606_WLED_CURRENT(x) ((x & 0x1F) << 1)
+
+#define PM8606_LED_CURRENT(x) (((x >> 2) & 0x07) << 5)
+
+#define PM8606_VSYS_EN (1 << 1)
+
+#define PM8606_MISC_OSC_EN (1 << 4)
+
+enum {
+ PM8607_ID_BUCK1 = 0,
+ PM8607_ID_BUCK2,
+ PM8607_ID_BUCK3,
+
+ PM8607_ID_LDO1,
+ PM8607_ID_LDO2,
+ PM8607_ID_LDO3,
+ PM8607_ID_LDO4,
+ PM8607_ID_LDO5,
+ PM8607_ID_LDO6,
+ PM8607_ID_LDO7,
+ PM8607_ID_LDO8,
+ PM8607_ID_LDO9,
+ PM8607_ID_LDO10,
+ PM8607_ID_LDO12,
+ PM8607_ID_LDO14,
+
+ PM8607_ID_RG_MAX,
+};
+
+#define PM8607_VERSION (0x40) /* 8607 chip ID */
+#define PM8607_VERSION_MASK (0xF0) /* 8607 chip ID mask */
+
+/* Interrupt Registers */
+#define PM8607_STATUS_1 (0x01)
+#define PM8607_STATUS_2 (0x02)
+#define PM8607_INT_STATUS1 (0x03)
+#define PM8607_INT_STATUS2 (0x04)
+#define PM8607_INT_STATUS3 (0x05)
+#define PM8607_INT_MASK_1 (0x06)
+#define PM8607_INT_MASK_2 (0x07)
+#define PM8607_INT_MASK_3 (0x08)
+
+/* Regulator Control Registers */
+#define PM8607_LDO1 (0x10)
+#define PM8607_LDO2 (0x11)
+#define PM8607_LDO3 (0x12)
+#define PM8607_LDO4 (0x13)
+#define PM8607_LDO5 (0x14)
+#define PM8607_LDO6 (0x15)
+#define PM8607_LDO7 (0x16)
+#define PM8607_LDO8 (0x17)
+#define PM8607_LDO9 (0x18)
+#define PM8607_LDO10 (0x19)
+#define PM8607_LDO12 (0x1A)
+#define PM8607_LDO14 (0x1B)
+#define PM8607_SLEEP_MODE1 (0x1C)
+#define PM8607_SLEEP_MODE2 (0x1D)
+#define PM8607_SLEEP_MODE3 (0x1E)
+#define PM8607_SLEEP_MODE4 (0x1F)
+#define PM8607_GO (0x20)
+#define PM8607_SLEEP_BUCK1 (0x21)
+#define PM8607_SLEEP_BUCK2 (0x22)
+#define PM8607_SLEEP_BUCK3 (0x23)
+#define PM8607_BUCK1 (0x24)
+#define PM8607_BUCK2 (0x25)
+#define PM8607_BUCK3 (0x26)
+#define PM8607_BUCK_CONTROLS (0x27)
+#define PM8607_SUPPLIES_EN11 (0x2B)
+#define PM8607_SUPPLIES_EN12 (0x2C)
+#define PM8607_GROUP1 (0x2D)
+#define PM8607_GROUP2 (0x2E)
+#define PM8607_GROUP3 (0x2F)
+#define PM8607_GROUP4 (0x30)
+#define PM8607_GROUP5 (0x31)
+#define PM8607_GROUP6 (0x32)
+#define PM8607_SUPPLIES_EN21 (0x33)
+#define PM8607_SUPPLIES_EN22 (0x34)
+
+/* Vibrator Control Registers */
+#define PM8607_VIBRATOR_SET (0x28)
+#define PM8607_VIBRATOR_PWM (0x29)
+
+/* GPADC Registers */
+#define PM8607_GP_BIAS1 (0x4F)
+#define PM8607_MEAS_EN1 (0x50)
+#define PM8607_MEAS_EN2 (0x51)
+#define PM8607_MEAS_EN3 (0x52)
+#define PM8607_MEAS_OFF_TIME1 (0x53)
+#define PM8607_MEAS_OFF_TIME2 (0x54)
+#define PM8607_TSI_PREBIAS (0x55) /* prebias time */
+#define PM8607_PD_PREBIAS (0x56) /* prebias time */
+#define PM8607_GPADC_MISC1 (0x57)
+
+/* RTC Control Registers */
+#define PM8607_RTC1 (0xA0)
+#define PM8607_RTC_COUNTER1 (0xA1)
+#define PM8607_RTC_COUNTER2 (0xA2)
+#define PM8607_RTC_COUNTER3 (0xA3)
+#define PM8607_RTC_COUNTER4 (0xA4)
+#define PM8607_RTC_EXPIRE1 (0xA5)
+#define PM8607_RTC_EXPIRE2 (0xA6)
+#define PM8607_RTC_EXPIRE3 (0xA7)
+#define PM8607_RTC_EXPIRE4 (0xA8)
+#define PM8607_RTC_TRIM1 (0xA9)
+#define PM8607_RTC_TRIM2 (0xAA)
+#define PM8607_RTC_TRIM3 (0xAB)
+#define PM8607_RTC_TRIM4 (0xAC)
+#define PM8607_RTC_MISC1 (0xAD)
+#define PM8607_RTC_MISC2 (0xAE)
+#define PM8607_RTC_MISC3 (0xAF)
+
+/* Misc Registers */
+#define PM8607_CHIP_ID (0x00)
+#define PM8607_B0_MISC1 (0x0C)
+#define PM8607_LDO1 (0x10)
+#define PM8607_DVC3 (0x26)
+#define PM8607_A1_MISC1 (0x40)
+
+/* bit definitions of Status Query Interface */
+#define PM8607_STATUS_CC (1 << 3)
+#define PM8607_STATUS_PEN (1 << 4)
+#define PM8607_STATUS_HEADSET (1 << 5)
+#define PM8607_STATUS_HOOK (1 << 6)
+#define PM8607_STATUS_MICIN (1 << 7)
+#define PM8607_STATUS_ONKEY (1 << 8)
+#define PM8607_STATUS_EXTON (1 << 9)
+#define PM8607_STATUS_CHG (1 << 10)
+#define PM8607_STATUS_BAT (1 << 11)
+#define PM8607_STATUS_VBUS (1 << 12)
+#define PM8607_STATUS_OV (1 << 13)
+
+/* bit definitions of BUCK3 */
+#define PM8607_BUCK3_DOUBLE (1 << 6)
+
+/* bit definitions of Misc1 */
+#define PM8607_A1_MISC1_PI2C (1 << 0)
+#define PM8607_B0_MISC1_INV_INT (1 << 0)
+#define PM8607_B0_MISC1_INT_CLEAR (1 << 1)
+#define PM8607_B0_MISC1_INT_MASK (1 << 2)
+#define PM8607_B0_MISC1_PI2C (1 << 3)
+#define PM8607_B0_MISC1_RESET (1 << 6)
+
+/* bits definitions of GPADC */
+#define PM8607_GPADC_EN (1 << 0)
+#define PM8607_GPADC_PREBIAS_MASK (3 << 1)
+#define PM8607_GPADC_SLOT_CYCLE_MASK (3 << 3) /* slow mode */
+#define PM8607_GPADC_OFF_SCALE_MASK (3 << 5) /* GP sleep mode */
+#define PM8607_GPADC_SW_CAL_MASK (1 << 7)
+
+#define PM8607_PD_PREBIAS_MASK (0x1F << 0)
+#define PM8607_PD_PRECHG_MASK (7 << 5)
+
+/* Interrupt Number in 88PM8607 */
+enum {
+ PM8607_IRQ_ONKEY = 0,
+ PM8607_IRQ_EXTON,
+ PM8607_IRQ_CHG,
+ PM8607_IRQ_BAT,
+ PM8607_IRQ_RTC,
+ PM8607_IRQ_VBAT = 8,
+ PM8607_IRQ_VCHG,
+ PM8607_IRQ_VSYS,
+ PM8607_IRQ_TINT,
+ PM8607_IRQ_GPADC0,
+ PM8607_IRQ_GPADC1,
+ PM8607_IRQ_GPADC2,
+ PM8607_IRQ_GPADC3,
+ PM8607_IRQ_AUDIO_SHORT = 16,
+ PM8607_IRQ_PEN,
+ PM8607_IRQ_HEADSET,
+ PM8607_IRQ_HOOK,
+ PM8607_IRQ_MICIN,
+ PM8607_IRQ_CHG_FAIL,
+ PM8607_IRQ_CHG_DONE,
+ PM8607_IRQ_CHG_FAULT,
+};
+
+enum {
+ PM8607_CHIP_A0 = 0x40,
+ PM8607_CHIP_A1 = 0x41,
+ PM8607_CHIP_B0 = 0x48,
+};
+
+#define PM860X_NUM_IRQ 24
+
+struct pm860x_irq {
+ irq_handler_t handler;
+ void *data;
+};
+
+struct pm860x_chip {
+ struct device *dev;
+ struct mutex io_lock;
+ struct mutex irq_lock;
+ struct i2c_client *client;
+ struct i2c_client *companion; /* companion chip client */
+ struct pm860x_irq irq[PM860X_NUM_IRQ];
+
+ int buck3_double; /* DVC ramp slope double */
+ unsigned short companion_addr;
+ int id;
+ int irq_mode;
+ int chip_irq;
+ unsigned char chip_version;
+
+};
+
+#define PM8607_MAX_REGULATOR 15 /* 3 Bucks, 12 LDOs */
+
+enum {
+ GI2C_PORT = 0,
+ PI2C_PORT,
+};
+
+struct pm860x_backlight_pdata {
+ int id;
+ int pwm;
+ int iset;
+ unsigned long flags;
+};
+
+struct pm860x_led_pdata {
+ int id;
+ int iset;
+ unsigned long flags;
+};
+
+struct pm860x_touch_pdata {
+ int gpadc_prebias;
+ int slot_cycle;
+ int off_scale;
+ int sw_cal;
+ int tsi_prebias; /* time, slot */
+ int pen_prebias; /* time, slot */
+ int pen_prechg; /* time, slot */
+ int res_x; /* resistor of Xplate */
+ unsigned long flags;
+};
+
+struct pm860x_platform_data {
+ struct pm860x_backlight_pdata *backlight;
+ struct pm860x_led_pdata *led;
+ struct pm860x_touch_pdata *touch;
+
+ unsigned short companion_addr; /* I2C address of companion chip */
+ int i2c_port; /* Controlled by GI2C or PI2C */
+ int irq_mode; /* Clear interrupt by read/write(0/1) */
+ struct regulator_init_data *regulator[PM8607_MAX_REGULATOR];
+};
+
+extern char pm860x_backlight_name[][MFD_NAME_SIZE];
+extern char pm860x_led_name[][MFD_NAME_SIZE];
+
+extern int pm860x_reg_read(struct i2c_client *, int);
+extern int pm860x_reg_write(struct i2c_client *, int, unsigned char);
+extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *);
+extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *);
+extern int pm860x_set_bits(struct i2c_client *, int, unsigned char,
+ unsigned char);
+
+extern int pm860x_mask_irq(struct pm860x_chip *, int);
+extern int pm860x_unmask_irq(struct pm860x_chip *, int);
+extern int pm860x_request_irq(struct pm860x_chip *, int,
+ irq_handler_t handler, void *);
+extern int pm860x_free_irq(struct pm860x_chip *, int);
+
+extern int pm860x_device_init(struct pm860x_chip *chip,
+ struct pm860x_platform_data *pdata);
+extern void pm860x_device_exit(struct pm860x_chip *chip);
+
+#endif /* __LINUX_MFD_88PM860X_H */
diff --git a/include/linux/mfd/ab3100.h b/include/linux/mfd/ab3100.h
index e9aa4c9d749d..9a881c305a50 100644
--- a/include/linux/mfd/ab3100.h
+++ b/include/linux/mfd/ab3100.h
@@ -6,7 +6,6 @@
*/
#include <linux/device.h>
-#include <linux/workqueue.h>
#include <linux/regulator/machine.h>
#ifndef MFD_AB3100_H
@@ -74,7 +73,6 @@
* @testreg_client: secondary client for test registers
* @chip_name: name of this chip variant
* @chip_id: 8 bit chip ID for this chip variant
- * @work: an event handling worker
* @event_subscribers: event subscribers are listed here
* @startup_events: a copy of the first reading of the event registers
* @startup_events_read: whether the first events have been read
@@ -90,7 +88,6 @@ struct ab3100 {
struct i2c_client *testreg_client;
char chip_name[32];
u8 chip_id;
- struct work_struct work;
struct blocking_notifier_head event_subscribers;
u32 startup_events;
bool startup_events_read;
diff --git a/include/linux/mfd/max8925.h b/include/linux/mfd/max8925.h
new file mode 100644
index 000000000000..b72dbe174d51
--- /dev/null
+++ b/include/linux/mfd/max8925.h
@@ -0,0 +1,215 @@
+/*
+ * Maxim8925 Interface
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_MFD_MAX8925_H
+#define __LINUX_MFD_MAX8925_H
+
+#include <linux/interrupt.h>
+
+/* Unified sub device IDs for MAX8925 */
+enum {
+ MAX8925_ID_SD1,
+ MAX8925_ID_SD2,
+ MAX8925_ID_SD3,
+ MAX8925_ID_LDO1,
+ MAX8925_ID_LDO2,
+ MAX8925_ID_LDO3,
+ MAX8925_ID_LDO4,
+ MAX8925_ID_LDO5,
+ MAX8925_ID_LDO6,
+ MAX8925_ID_LDO7,
+ MAX8925_ID_LDO8,
+ MAX8925_ID_LDO9,
+ MAX8925_ID_LDO10,
+ MAX8925_ID_LDO11,
+ MAX8925_ID_LDO12,
+ MAX8925_ID_LDO13,
+ MAX8925_ID_LDO14,
+ MAX8925_ID_LDO15,
+ MAX8925_ID_LDO16,
+ MAX8925_ID_LDO17,
+ MAX8925_ID_LDO18,
+ MAX8925_ID_LDO19,
+ MAX8925_ID_LDO20,
+};
+
+/* Charger registers */
+#define MAX8925_CHG_IRQ1 (0x7e)
+#define MAX8925_CHG_IRQ2 (0x7f)
+#define MAX8925_CHG_IRQ1_MASK (0x80)
+#define MAX8925_CHG_IRQ2_MASK (0x81)
+
+/* GPM registers */
+#define MAX8925_SYSENSEL (0x00)
+#define MAX8925_ON_OFF_IRQ1 (0x01)
+#define MAX8925_ON_OFF_IRQ1_MASK (0x02)
+#define MAX8925_ON_OFF_STAT (0x03)
+#define MAX8925_ON_OFF_IRQ2 (0x0d)
+#define MAX8925_ON_OFF_IRQ2_MASK (0x0e)
+#define MAX8925_RESET_CNFG (0x0f)
+
+/* Touch registers */
+#define MAX8925_TSC_IRQ (0x00)
+#define MAX8925_TSC_IRQ_MASK (0x01)
+#define MAX8925_ADC_RES_END (0x6f)
+
+/* RTC registers */
+#define MAX8925_RTC_STATUS (0x1a)
+#define MAX8925_RTC_IRQ (0x1c)
+#define MAX8925_RTC_IRQ_MASK (0x1d)
+
+/* WLED registers */
+#define MAX8925_WLED_MODE_CNTL (0x84)
+#define MAX8925_WLED_CNTL (0x85)
+
+/* MAX8925 Registers */
+#define MAX8925_SDCTL1 (0x04)
+#define MAX8925_SDCTL2 (0x07)
+#define MAX8925_SDCTL3 (0x0A)
+#define MAX8925_SDV1 (0x06)
+#define MAX8925_SDV2 (0x09)
+#define MAX8925_SDV3 (0x0C)
+#define MAX8925_LDOCTL1 (0x18)
+#define MAX8925_LDOCTL2 (0x1C)
+#define MAX8925_LDOCTL3 (0x20)
+#define MAX8925_LDOCTL4 (0x24)
+#define MAX8925_LDOCTL5 (0x28)
+#define MAX8925_LDOCTL6 (0x2C)
+#define MAX8925_LDOCTL7 (0x30)
+#define MAX8925_LDOCTL8 (0x34)
+#define MAX8925_LDOCTL9 (0x38)
+#define MAX8925_LDOCTL10 (0x3C)
+#define MAX8925_LDOCTL11 (0x40)
+#define MAX8925_LDOCTL12 (0x44)
+#define MAX8925_LDOCTL13 (0x48)
+#define MAX8925_LDOCTL14 (0x4C)
+#define MAX8925_LDOCTL15 (0x50)
+#define MAX8925_LDOCTL16 (0x10)
+#define MAX8925_LDOCTL17 (0x14)
+#define MAX8925_LDOCTL18 (0x72)
+#define MAX8925_LDOCTL19 (0x5C)
+#define MAX8925_LDOCTL20 (0x9C)
+#define MAX8925_LDOVOUT1 (0x1A)
+#define MAX8925_LDOVOUT2 (0x1E)
+#define MAX8925_LDOVOUT3 (0x22)
+#define MAX8925_LDOVOUT4 (0x26)
+#define MAX8925_LDOVOUT5 (0x2A)
+#define MAX8925_LDOVOUT6 (0x2E)
+#define MAX8925_LDOVOUT7 (0x32)
+#define MAX8925_LDOVOUT8 (0x36)
+#define MAX8925_LDOVOUT9 (0x3A)
+#define MAX8925_LDOVOUT10 (0x3E)
+#define MAX8925_LDOVOUT11 (0x42)
+#define MAX8925_LDOVOUT12 (0x46)
+#define MAX8925_LDOVOUT13 (0x4A)
+#define MAX8925_LDOVOUT14 (0x4E)
+#define MAX8925_LDOVOUT15 (0x52)
+#define MAX8925_LDOVOUT16 (0x12)
+#define MAX8925_LDOVOUT17 (0x16)
+#define MAX8925_LDOVOUT18 (0x74)
+#define MAX8925_LDOVOUT19 (0x5E)
+#define MAX8925_LDOVOUT20 (0x9E)
+
+/* bit definitions */
+#define CHG_IRQ1_MASK (0x07)
+#define CHG_IRQ2_MASK (0xff)
+#define ON_OFF_IRQ1_MASK (0xff)
+#define ON_OFF_IRQ2_MASK (0x03)
+#define TSC_IRQ_MASK (0x03)
+#define RTC_IRQ_MASK (0x0c)
+
+#define MAX8925_NUM_IRQ (32)
+
+#define MAX8925_NAME_SIZE (32)
+
+enum {
+ MAX8925_INVALID = 0,
+ MAX8925_RTC,
+ MAX8925_ADC,
+ MAX8925_GPM, /* general power management */
+ MAX8925_MAX,
+};
+
+#define MAX8925_IRQ_VCHG_OVP (0)
+#define MAX8925_IRQ_VCHG_F (1)
+#define MAX8925_IRQ_VCHG_R (2)
+#define MAX8925_IRQ_VCHG_THM_OK_R (8)
+#define MAX8925_IRQ_VCHG_THM_OK_F (9)
+#define MAX8925_IRQ_VCHG_BATTLOW_F (10)
+#define MAX8925_IRQ_VCHG_BATTLOW_R (11)
+#define MAX8925_IRQ_VCHG_RST (12)
+#define MAX8925_IRQ_VCHG_DONE (13)
+#define MAX8925_IRQ_VCHG_TOPOFF (14)
+#define MAX8925_IRQ_VCHG_TMR_FAULT (15)
+#define MAX8925_IRQ_GPM_RSTIN (16)
+#define MAX8925_IRQ_GPM_MPL (17)
+#define MAX8925_IRQ_GPM_SW_3SEC (18)
+#define MAX8925_IRQ_GPM_EXTON_F (19)
+#define MAX8925_IRQ_GPM_EXTON_R (20)
+#define MAX8925_IRQ_GPM_SW_1SEC (21)
+#define MAX8925_IRQ_GPM_SW_F (22)
+#define MAX8925_IRQ_GPM_SW_R (23)
+#define MAX8925_IRQ_GPM_SYSCKEN_F (24)
+#define MAX8925_IRQ_GPM_SYSCKEN_R (25)
+
+#define MAX8925_IRQ_TSC_STICK (0)
+#define MAX8925_IRQ_TSC_NSTICK (1)
+
+#define MAX8925_MAX_REGULATOR (23)
+
+struct max8925_irq {
+ irq_handler_t handler;
+ void *data;
+};
+
+struct max8925_chip {
+ struct device *dev;
+ struct mutex io_lock;
+ struct mutex irq_lock;
+ struct i2c_client *i2c;
+ struct max8925_irq irq[MAX8925_NUM_IRQ];
+
+ const char *name;
+ int chip_id;
+ int chip_irq;
+};
+
+struct max8925_backlight_pdata {
+ int lxw_scl; /* 0/1 -- 0.8Ohm/0.4Ohm */
+ int lxw_freq; /* 700KHz ~ 1400KHz */
+ int dual_string; /* 0/1 -- single/dual string */
+};
+
+struct max8925_touch_pdata {
+ unsigned int flags;
+};
+
+struct max8925_platform_data {
+ struct max8925_backlight_pdata *backlight;
+ struct max8925_touch_pdata *touch;
+ struct regulator_init_data *regulator[MAX8925_MAX_REGULATOR];
+
+ int chip_id;
+ int chip_irq;
+};
+
+extern int max8925_reg_read(struct i2c_client *, int);
+extern int max8925_reg_write(struct i2c_client *, int, unsigned char);
+extern int max8925_bulk_read(struct i2c_client *, int, int, unsigned char *);
+extern int max8925_bulk_write(struct i2c_client *, int, int, unsigned char *);
+extern int max8925_set_bits(struct i2c_client *, int, unsigned char,
+ unsigned char);
+
+extern int max8925_device_init(struct max8925_chip *,
+ struct max8925_platform_data *);
+extern void max8925_device_exit(struct max8925_chip *);
+#endif /* __LINUX_MFD_MAX8925_H */
+
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 6b9c5d06690c..9cb1834deffa 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -2,6 +2,8 @@
#define MFD_TMIO_H
#include <linux/fb.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
#define tmio_ioread8(addr) readb(addr)
#define tmio_ioread16(addr) readw(addr)
@@ -18,11 +20,48 @@
writew((val) >> 16, (addr) + 2); \
} while (0)
+#define CNF_CMD 0x04
+#define CNF_CTL_BASE 0x10
+#define CNF_INT_PIN 0x3d
+#define CNF_STOP_CLK_CTL 0x40
+#define CNF_GCLK_CTL 0x41
+#define CNF_SD_CLK_MODE 0x42
+#define CNF_PIN_STATUS 0x44
+#define CNF_PWR_CTL_1 0x48
+#define CNF_PWR_CTL_2 0x49
+#define CNF_PWR_CTL_3 0x4a
+#define CNF_CARD_DETECT_MODE 0x4c
+#define CNF_SD_SLOT 0x50
+#define CNF_EXT_GCLK_CTL_1 0xf0
+#define CNF_EXT_GCLK_CTL_2 0xf1
+#define CNF_EXT_GCLK_CTL_3 0xf9
+#define CNF_SD_LED_EN_1 0xfa
+#define CNF_SD_LED_EN_2 0xfe
+
+#define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/
+
+#define sd_config_write8(base, shift, reg, val) \
+ tmio_iowrite8((val), (base) + ((reg) << (shift)))
+#define sd_config_write16(base, shift, reg, val) \
+ tmio_iowrite16((val), (base) + ((reg) << (shift)))
+#define sd_config_write32(base, shift, reg, val) \
+ do { \
+ tmio_iowrite16((val), (base) + ((reg) << (shift))); \
+ tmio_iowrite16((val) >> 16, (base) + ((reg + 2) << (shift))); \
+ } while (0)
+
+int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
+int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
+void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
+void tmio_core_mmc_clk_div(void __iomem *cnf, int shift, int state);
+
/*
* data for the MMC controller
*/
struct tmio_mmc_data {
const unsigned int hclk;
+ void (*set_pwr)(struct platform_device *host, int state);
+ void (*set_clk_div)(struct platform_device *host, int state);
};
/*
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 43868899bf49..fae08aa65413 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -579,6 +579,8 @@
#define WM8350_NUM_IRQ 63
+#define WM8350_NUM_IRQ_REGS 7
+
struct wm8350_reg_access {
u16 readable; /* Mask of readable bits */
u16 writable; /* Mask of writable bits */
@@ -600,11 +602,6 @@ extern const u16 wm8352_mode3_defaults[];
struct wm8350;
-struct wm8350_irq {
- irq_handler_t handler;
- void *data;
-};
-
struct wm8350_hwmon {
struct platform_device *pdev;
struct device *classdev;
@@ -626,9 +623,10 @@ struct wm8350 {
struct mutex auxadc_mutex;
/* Interrupt handling */
- struct mutex irq_mutex; /* IRQ table mutex */
- struct wm8350_irq irq[WM8350_NUM_IRQ];
+ struct mutex irq_lock;
int chip_irq;
+ int irq_base;
+ u16 irq_masks[WM8350_NUM_IRQ_REGS];
/* Client devices */
struct wm8350_codec codec;
@@ -647,11 +645,13 @@ struct wm8350 {
* used by the platform to configure GPIO functions and similar.
* @irq_high: Set if WM8350 IRQ is active high.
* @irq_base: Base IRQ for genirq (not currently used).
+ * @gpio_base: Base for gpiolib.
*/
struct wm8350_platform_data {
int (*init)(struct wm8350 *wm8350);
int irq_high;
int irq_base;
+ int gpio_base;
};
@@ -677,12 +677,33 @@ int wm8350_block_write(struct wm8350 *wm8350, int reg, int size, u16 *src);
/*
* WM8350 internal interrupts
*/
-int wm8350_register_irq(struct wm8350 *wm8350, int irq,
- irq_handler_t handler, unsigned long flags,
- const char *name, void *data);
-int wm8350_free_irq(struct wm8350 *wm8350, int irq);
-int wm8350_mask_irq(struct wm8350 *wm8350, int irq);
-int wm8350_unmask_irq(struct wm8350 *wm8350, int irq);
+static inline int wm8350_register_irq(struct wm8350 *wm8350, int irq,
+ irq_handler_t handler,
+ unsigned long flags,
+ const char *name, void *data)
+{
+ if (!wm8350->irq_base)
+ return -ENODEV;
+
+ return request_threaded_irq(irq + wm8350->irq_base, NULL,
+ handler, flags, name, data);
+}
+
+static inline void wm8350_free_irq(struct wm8350 *wm8350, int irq, void *data)
+{
+ free_irq(irq + wm8350->irq_base, data);
+}
+
+static inline void wm8350_mask_irq(struct wm8350 *wm8350, int irq)
+{
+ disable_irq(irq + wm8350->irq_base);
+}
+
+static inline void wm8350_unmask_irq(struct wm8350 *wm8350, int irq)
+{
+ enable_irq(irq + wm8350->irq_base);
+}
+
int wm8350_irq_init(struct wm8350 *wm8350, int irq,
struct wm8350_platform_data *pdata);
int wm8350_irq_exit(struct wm8350 *wm8350);
diff --git a/include/linux/mfd/wm8350/gpio.h b/include/linux/mfd/wm8350/gpio.h
index 71af3d6ebe9d..d657bcd6d955 100644
--- a/include/linux/mfd/wm8350/gpio.h
+++ b/include/linux/mfd/wm8350/gpio.h
@@ -29,6 +29,7 @@
#define WM8350_GPIO_FUNCTION_SELECT_2 0x8D
#define WM8350_GPIO_FUNCTION_SELECT_3 0x8E
#define WM8350_GPIO_FUNCTION_SELECT_4 0x8F
+#define WM8350_GPIO_LEVEL 0xE6
/*
* GPIO Functions
diff --git a/include/linux/mfd/wm8350/pmic.h b/include/linux/mfd/wm8350/pmic.h
index be3264e286e0..e786fe9841ef 100644
--- a/include/linux/mfd/wm8350/pmic.h
+++ b/include/linux/mfd/wm8350/pmic.h
@@ -666,20 +666,20 @@
#define WM8350_ISINK_FLASH_DUR_64MS (1 << 8)
#define WM8350_ISINK_FLASH_DUR_96MS (2 << 8)
#define WM8350_ISINK_FLASH_DUR_1024MS (3 << 8)
-#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 4)
-#define WM8350_ISINK_FLASH_ON_0_25S (1 << 4)
-#define WM8350_ISINK_FLASH_ON_0_50S (2 << 4)
-#define WM8350_ISINK_FLASH_ON_1_00S (3 << 4)
-#define WM8350_ISINK_FLASH_ON_1_95S (1 << 4)
-#define WM8350_ISINK_FLASH_ON_3_91S (2 << 4)
-#define WM8350_ISINK_FLASH_ON_7_80S (3 << 4)
-#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 0)
-#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 0)
-#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 0)
-#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 0)
-#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 0)
-#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 0)
-#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 0)
+#define WM8350_ISINK_FLASH_ON_INSTANT (0 << 0)
+#define WM8350_ISINK_FLASH_ON_0_25S (1 << 0)
+#define WM8350_ISINK_FLASH_ON_0_50S (2 << 0)
+#define WM8350_ISINK_FLASH_ON_1_00S (3 << 0)
+#define WM8350_ISINK_FLASH_ON_1_95S (1 << 0)
+#define WM8350_ISINK_FLASH_ON_3_91S (2 << 0)
+#define WM8350_ISINK_FLASH_ON_7_80S (3 << 0)
+#define WM8350_ISINK_FLASH_OFF_INSTANT (0 << 4)
+#define WM8350_ISINK_FLASH_OFF_0_25S (1 << 4)
+#define WM8350_ISINK_FLASH_OFF_0_50S (2 << 4)
+#define WM8350_ISINK_FLASH_OFF_1_00S (3 << 4)
+#define WM8350_ISINK_FLASH_OFF_1_95S (1 << 4)
+#define WM8350_ISINK_FLASH_OFF_3_91S (2 << 4)
+#define WM8350_ISINK_FLASH_OFF_7_80S (3 << 4)
/*
* Regulator Interrupts.
diff --git a/include/linux/mfd/wm8350/rtc.h b/include/linux/mfd/wm8350/rtc.h
index 24add2bef6c9..ebd72ffc62d1 100644
--- a/include/linux/mfd/wm8350/rtc.h
+++ b/include/linux/mfd/wm8350/rtc.h
@@ -263,6 +263,7 @@ struct wm8350_rtc {
struct platform_device *pdev;
struct rtc_device *rtc;
int alarm_enabled; /* used over suspend/resume */
+ int update_enabled;
};
#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 2265f28eb47a..554fa395aac9 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1079,11 +1079,7 @@ extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
extern int after_bootmem;
-#ifdef CONFIG_NUMA
extern void setup_per_cpu_pageset(void);
-#else
-static inline void setup_per_cpu_pageset(void) {}
-#endif
extern void zone_pcp_update(struct zone *zone);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 84a524afb3dc..84d020bed083 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -123,6 +123,8 @@ struct vm_region {
struct file *vm_file; /* the backing file or NULL */
atomic_t vm_usage; /* region usage count */
+ bool vm_icache_flushed : 1; /* true if the icache has been flushed for
+ * this region */
};
/*
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 30fe668c2542..90890c5471c4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -184,13 +184,7 @@ struct per_cpu_pageset {
s8 stat_threshold;
s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
-} ____cacheline_aligned_in_smp;
-
-#ifdef CONFIG_NUMA
-#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
-#else
-#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
-#endif
+};
#endif /* !__GENERATING_BOUNDS.H */
@@ -306,10 +300,8 @@ struct zone {
*/
unsigned long min_unmapped_pages;
unsigned long min_slab_pages;
- struct per_cpu_pageset *pageset[NR_CPUS];
-#else
- struct per_cpu_pageset pageset[NR_CPUS];
#endif
+ struct per_cpu_pageset *pageset;
/*
* free areas of different sizes
*/
@@ -349,7 +341,7 @@ struct zone {
* prev_priority holds the scanning priority for this zone. It is
* defined as the scanning priority at which we achieved our reclaim
* target at the previous try_to_free_pages() or balance_pgdat()
- * invokation.
+ * invocation.
*
* We use prev_priority as a measure of how much stress page reclaim is
* under - it drives the swappiness decision: whether to unmap mapped
diff --git a/include/linux/module.h b/include/linux/module.h
index 6cb1a3cab5d3..7e74ae0051cc 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -17,7 +17,7 @@
#include <linux/moduleparam.h>
#include <linux/tracepoint.h>
-#include <asm/local.h>
+#include <linux/percpu.h>
#include <asm/module.h>
#include <trace/events/module.h>
@@ -363,11 +363,9 @@ struct module
/* Destruction function. */
void (*exit)(void);
-#ifdef CONFIG_SMP
- char *refptr;
-#else
- local_t ref;
-#endif
+ struct module_ref {
+ int count;
+ } *refptr;
#endif
#ifdef CONFIG_CONSTRUCTORS
@@ -454,25 +452,16 @@ void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
void symbol_put_addr(void *addr);
-static inline local_t *__module_ref_addr(struct module *mod, int cpu)
-{
-#ifdef CONFIG_SMP
- return (local_t *) (mod->refptr + per_cpu_offset(cpu));
-#else
- return &mod->ref;
-#endif
-}
-
/* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */
static inline void __module_get(struct module *module)
{
if (module) {
- unsigned int cpu = get_cpu();
- local_inc(__module_ref_addr(module, cpu));
+ preempt_disable();
+ __this_cpu_inc(module->refptr->count);
trace_module_get(module, _THIS_IP_,
- local_read(__module_ref_addr(module, cpu)));
- put_cpu();
+ __this_cpu_read(module->refptr->count));
+ preempt_enable();
}
}
@@ -481,15 +470,17 @@ static inline int try_module_get(struct module *module)
int ret = 1;
if (module) {
- unsigned int cpu = get_cpu();
+ preempt_disable();
+
if (likely(module_is_live(module))) {
- local_inc(__module_ref_addr(module, cpu));
+ __this_cpu_inc(module->refptr->count);
trace_module_get(module, _THIS_IP_,
- local_read(__module_ref_addr(module, cpu)));
+ __this_cpu_read(module->refptr->count));
}
else
ret = 0;
- put_cpu();
+
+ preempt_enable();
}
return ret;
}
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 5d5275364867..9d542fc88e6c 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -45,7 +45,11 @@ struct vfsmount {
struct list_head mnt_mounts; /* list of children, anchored here */
struct list_head mnt_child; /* and going through their mnt_child */
int mnt_flags;
- /* 4 bytes hole on 64bits arches */
+ /* 4 bytes hole on 64bits arches without fsnotify */
+#ifdef CONFIG_FSNOTIFY
+ __u32 mnt_fsnotify_mask;
+ struct hlist_head mnt_fsnotify_marks;
+#endif
const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
struct list_head mnt_list;
struct list_head mnt_expire; /* link in fs-specific expiry list */
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index da8ea2e19273..127a73015760 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -270,6 +270,35 @@
* @NL80211_CMD_SET_WIPHY_NETNS: Set a wiphy's netns. Note that all devices
* associated with this wiphy must be down and will follow.
*
+ * @NL80211_CMD_REMAIN_ON_CHANNEL: Request to remain awake on the specified
+ * channel for the specified amount of time. This can be used to do
+ * off-channel operations like transmit a Public Action frame and wait for
+ * a response while being associated to an AP on another channel.
+ * %NL80211_ATTR_WIPHY or %NL80211_ATTR_IFINDEX is used to specify which
+ * radio is used. %NL80211_ATTR_WIPHY_FREQ is used to specify the
+ * frequency for the operation and %NL80211_ATTR_WIPHY_CHANNEL_TYPE may be
+ * optionally used to specify additional channel parameters.
+ * %NL80211_ATTR_DURATION is used to specify the duration in milliseconds
+ * to remain on the channel. This command is also used as an event to
+ * notify when the requested duration starts (it may take a while for the
+ * driver to schedule this time due to other concurrent needs for the
+ * radio).
+ * When called, this operation returns a cookie (%NL80211_ATTR_COOKIE)
+ * that will be included with any events pertaining to this request;
+ * the cookie is also used to cancel the request.
+ * @NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL: This command can be used to cancel a
+ * pending remain-on-channel duration if the desired operation has been
+ * completed prior to expiration of the originally requested duration.
+ * %NL80211_ATTR_WIPHY or %NL80211_ATTR_IFINDEX is used to specify the
+ * radio. The %NL80211_ATTR_COOKIE attribute must be given as well to
+ * uniquely identify the request.
+ * This command is also used as an event to notify when a requested
+ * remain-on-channel duration has expired.
+ *
+ * @NL80211_CMD_SET_TX_BITRATE_MASK: Set the mask of rates to be used in TX
+ * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface
+ * and @NL80211_ATTR_TX_RATES the set of allowed rates.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -353,6 +382,11 @@ enum nl80211_commands {
NL80211_CMD_DEL_PMKSA,
NL80211_CMD_FLUSH_PMKSA,
+ NL80211_CMD_REMAIN_ON_CHANNEL,
+ NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
+
+ NL80211_CMD_SET_TX_BITRATE_MASK,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -402,6 +436,8 @@ enum nl80211_commands {
* @NL80211_ATTR_WIPHY_RTS_THRESHOLD: RTS threshold (TX frames with length
* larger than or equal to this use RTS/CTS handshake); allowed range:
* 0..65536, disable with (u32)-1; dot11RTSThreshold; u32
+ * @NL80211_ATTR_WIPHY_COVERAGE_CLASS: Coverage Class as defined by IEEE 802.11
+ * section 7.3.2.9; dot11CoverageClass; u8
*
* @NL80211_ATTR_IFINDEX: network interface index of the device to operate on
* @NL80211_ATTR_IFNAME: network interface name
@@ -606,6 +642,17 @@ enum nl80211_commands {
* @NL80211_ATTR_MAX_NUM_PMKIDS: maximum number of PMKIDs a firmware can
* cache, a wiphy attribute.
*
+ * @NL80211_ATTR_DURATION: Duration of an operation in milliseconds, u32.
+ *
+ * @NL80211_ATTR_COOKIE: Generic 64-bit cookie to identify objects.
+ *
+ * @NL80211_ATTR_TX_RATES: Nested set of attributes
+ * (enum nl80211_tx_rate_attributes) describing TX rates per band. The
+ * enum nl80211_band value is used as the index (nla_type() of the nested
+ * data. If a band is not included, it will be configured to allow all
+ * rates based on negotiated supported rates information. This attribute
+ * is used with %NL80211_CMD_SET_TX_BITRATE_MASK.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -743,6 +790,14 @@ enum nl80211_attrs {
NL80211_ATTR_PMKID,
NL80211_ATTR_MAX_NUM_PMKIDS,
+ NL80211_ATTR_DURATION,
+
+ NL80211_ATTR_COOKIE,
+
+ NL80211_ATTR_WIPHY_COVERAGE_CLASS,
+
+ NL80211_ATTR_TX_RATES,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -1323,13 +1378,20 @@ enum nl80211_channel_type {
* @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16)
* @NL80211_BSS_CAPABILITY: capability field (CPU order, u16)
* @NL80211_BSS_INFORMATION_ELEMENTS: binary attribute containing the
- * raw information elements from the probe response/beacon (bin)
+ * raw information elements from the probe response/beacon (bin);
+ * if the %NL80211_BSS_BEACON_IES attribute is present, the IEs here are
+ * from a Probe Response frame; otherwise they are from a Beacon frame.
+ * However, if the driver does not indicate the source of the IEs, these
+ * IEs may be from either frame subtype.
* @NL80211_BSS_SIGNAL_MBM: signal strength of probe response/beacon
* in mBm (100 * dBm) (s32)
* @NL80211_BSS_SIGNAL_UNSPEC: signal strength of the probe response/beacon
* in unspecified units, scaled to 0..100 (u8)
* @NL80211_BSS_STATUS: status, if this BSS is "used"
* @NL80211_BSS_SEEN_MS_AGO: age of this BSS entry in ms
+ * @NL80211_BSS_BEACON_IES: binary attribute containing the raw information
+ * elements from a Beacon frame (bin); not present if no Beacon frame has
+ * yet been received
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -1345,6 +1407,7 @@ enum nl80211_bss {
NL80211_BSS_SIGNAL_UNSPEC,
NL80211_BSS_STATUS,
NL80211_BSS_SEEN_MS_AGO,
+ NL80211_BSS_BEACON_IES,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -1442,4 +1505,33 @@ enum nl80211_key_attributes {
NL80211_KEY_MAX = __NL80211_KEY_AFTER_LAST - 1
};
+/**
+ * enum nl80211_tx_rate_attributes - TX rate set attributes
+ * @__NL80211_TXRATE_INVALID: invalid
+ * @NL80211_TXRATE_LEGACY: Legacy (non-MCS) rates allowed for TX rate selection
+ * in an array of rates as defined in IEEE 802.11 7.3.2.2 (u8 values with
+ * 1 = 500 kbps) but without the IE length restriction (at most
+ * %NL80211_MAX_SUPP_RATES in a single array).
+ * @__NL80211_TXRATE_AFTER_LAST: internal
+ * @NL80211_TXRATE_MAX: highest TX rate attribute
+ */
+enum nl80211_tx_rate_attributes {
+ __NL80211_TXRATE_INVALID,
+ NL80211_TXRATE_LEGACY,
+
+ /* keep last */
+ __NL80211_TXRATE_AFTER_LAST,
+ NL80211_TXRATE_MAX = __NL80211_TXRATE_AFTER_LAST - 1
+};
+
+/**
+ * enum nl80211_band - Frequency band
+ * @NL80211_BAND_2GHZ - 2.4 GHz ISM band
+ * @NL80211_BAND_5GHZ - around 5 GHz band (4.9 - 5.7 GHz)
+ */
+enum nl80211_band {
+ NL80211_BAND_2GHZ,
+ NL80211_BAND_5GHZ,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/padata.h b/include/linux/padata.h
new file mode 100644
index 000000000000..51611da9c498
--- /dev/null
+++ b/include/linux/padata.h
@@ -0,0 +1,88 @@
+/*
+ * padata.h - header for the padata parallelization interface
+ *
+ * Copyright (C) 2008, 2009 secunet Security Networks AG
+ * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef PADATA_H
+#define PADATA_H
+
+#include <linux/workqueue.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+struct padata_priv {
+ struct list_head list;
+ struct parallel_data *pd;
+ int cb_cpu;
+ int seq_nr;
+ int info;
+ void (*parallel)(struct padata_priv *padata);
+ void (*serial)(struct padata_priv *padata);
+};
+
+struct padata_list {
+ struct list_head list;
+ spinlock_t lock;
+};
+
+struct padata_queue {
+ struct padata_list parallel;
+ struct padata_list reorder;
+ struct padata_list serial;
+ struct work_struct pwork;
+ struct work_struct swork;
+ struct parallel_data *pd;
+ atomic_t num_obj;
+ int cpu_index;
+};
+
+struct parallel_data {
+ struct padata_instance *pinst;
+ struct padata_queue *queue;
+ atomic_t seq_nr;
+ atomic_t reorder_objects;
+ atomic_t refcnt;
+ unsigned int max_seq_nr;
+ cpumask_var_t cpumask;
+ spinlock_t lock;
+};
+
+struct padata_instance {
+ struct notifier_block cpu_notifier;
+ struct workqueue_struct *wq;
+ struct parallel_data *pd;
+ cpumask_var_t cpumask;
+ struct mutex lock;
+ u8 flags;
+#define PADATA_INIT 1
+#define PADATA_RESET 2
+};
+
+extern struct padata_instance *padata_alloc(const struct cpumask *cpumask,
+ struct workqueue_struct *wq);
+extern void padata_free(struct padata_instance *pinst);
+extern int padata_do_parallel(struct padata_instance *pinst,
+ struct padata_priv *padata, int cb_cpu);
+extern void padata_do_serial(struct padata_priv *padata);
+extern int padata_set_cpumask(struct padata_instance *pinst,
+ cpumask_var_t cpumask);
+extern int padata_add_cpu(struct padata_instance *pinst, int cpu);
+extern int padata_remove_cpu(struct padata_instance *pinst, int cpu);
+extern void padata_start(struct padata_instance *pinst);
+extern void padata_stop(struct padata_instance *pinst);
+#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 5da0690d9cee..1d7bde4097a1 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -187,6 +187,33 @@ enum pci_bus_flags {
PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
};
+/* Based on the PCI Hotplug Spec, but some values are made up by us */
+enum pci_bus_speed {
+ PCI_SPEED_33MHz = 0x00,
+ PCI_SPEED_66MHz = 0x01,
+ PCI_SPEED_66MHz_PCIX = 0x02,
+ PCI_SPEED_100MHz_PCIX = 0x03,
+ PCI_SPEED_133MHz_PCIX = 0x04,
+ PCI_SPEED_66MHz_PCIX_ECC = 0x05,
+ PCI_SPEED_100MHz_PCIX_ECC = 0x06,
+ PCI_SPEED_133MHz_PCIX_ECC = 0x07,
+ PCI_SPEED_66MHz_PCIX_266 = 0x09,
+ PCI_SPEED_100MHz_PCIX_266 = 0x0a,
+ PCI_SPEED_133MHz_PCIX_266 = 0x0b,
+ AGP_UNKNOWN = 0x0c,
+ AGP_1X = 0x0d,
+ AGP_2X = 0x0e,
+ AGP_4X = 0x0f,
+ AGP_8X = 0x10,
+ PCI_SPEED_66MHz_PCIX_533 = 0x11,
+ PCI_SPEED_100MHz_PCIX_533 = 0x12,
+ PCI_SPEED_133MHz_PCIX_533 = 0x13,
+ PCIE_SPEED_2_5GT = 0x14,
+ PCIE_SPEED_5_0GT = 0x15,
+ PCIE_SPEED_8_0GT = 0x16,
+ PCI_SPEED_UNKNOWN = 0xff,
+};
+
struct pci_cap_saved_state {
struct hlist_node next;
char cap_nr;
@@ -243,6 +270,7 @@ struct pci_dev {
unsigned int d2_support:1; /* Low power state D2 is supported */
unsigned int no_d1d2:1; /* Only allow D0 and D3 */
unsigned int wakeup_prepared:1;
+ unsigned int d3_delay; /* D3->D0 transition time in ms */
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state. */
@@ -358,6 +386,8 @@ struct pci_bus {
unsigned char primary; /* number of primary bridge */
unsigned char secondary; /* number of secondary bridge */
unsigned char subordinate; /* max number of subordinate buses */
+ unsigned char max_bus_speed; /* enum pci_bus_speed */
+ unsigned char cur_bus_speed; /* enum pci_bus_speed */
char name[48];
@@ -571,6 +601,7 @@ void pci_fixup_cardbus(struct pci_bus *);
/* Generic PCI functions used internally */
+void pcibios_scan_specific_bus(int busn);
extern struct pci_bus *pci_find_bus(int domain, int busnr);
void pci_bus_add_devices(const struct pci_bus *bus);
struct pci_bus *pci_scan_bus_parented(struct device *parent, int bus,
@@ -588,6 +619,7 @@ struct pci_bus *pci_create_bus(struct device *parent, int bus,
struct pci_ops *ops, void *sysdata);
struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
int busnr);
+void pcie_update_link_speed(struct pci_bus *bus, u16 link_status);
struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr,
const char *name,
struct hotplug_slot *hotplug);
@@ -1236,8 +1268,12 @@ enum pci_fixup_pass {
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
suspend##vendor##device##hook, vendor, device, hook)
-
+#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
+#else
+static inline void pci_fixup_device(enum pci_fixup_pass pass,
+ struct pci_dev *dev) {}
+#endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 652ba797696d..5d09cbafa7db 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -28,26 +28,6 @@
#ifndef _PCI_HOTPLUG_H
#define _PCI_HOTPLUG_H
-
-/* These values come from the PCI Hotplug Spec */
-enum pci_bus_speed {
- PCI_SPEED_33MHz = 0x00,
- PCI_SPEED_66MHz = 0x01,
- PCI_SPEED_66MHz_PCIX = 0x02,
- PCI_SPEED_100MHz_PCIX = 0x03,
- PCI_SPEED_133MHz_PCIX = 0x04,
- PCI_SPEED_66MHz_PCIX_ECC = 0x05,
- PCI_SPEED_100MHz_PCIX_ECC = 0x06,
- PCI_SPEED_133MHz_PCIX_ECC = 0x07,
- PCI_SPEED_66MHz_PCIX_266 = 0x09,
- PCI_SPEED_100MHz_PCIX_266 = 0x0a,
- PCI_SPEED_133MHz_PCIX_266 = 0x0b,
- PCI_SPEED_66MHz_PCIX_533 = 0x11,
- PCI_SPEED_100MHz_PCIX_533 = 0x12,
- PCI_SPEED_133MHz_PCIX_533 = 0x13,
- PCI_SPEED_UNKNOWN = 0xff,
-};
-
/* These values come from the PCI Express Spec */
enum pcie_link_width {
PCIE_LNK_WIDTH_RESRV = 0x00,
@@ -61,12 +41,6 @@ enum pcie_link_width {
PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
};
-enum pcie_link_speed {
- PCIE_2_5GB = 0x14,
- PCIE_5_0GB = 0x15,
- PCIE_LNK_SPEED_UNKNOWN = 0xFF,
-};
-
/**
* struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
* @owner: The module owner of this structure
@@ -89,12 +63,6 @@ enum pcie_link_speed {
* @get_adapter_status: Called to get see if an adapter is present in the slot or not.
* If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user.
- * @get_max_bus_speed: Called to get the max bus speed for a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
- * @get_cur_bus_speed: Called to get the current bus speed for a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
*
* The table of function pointers that is passed to the hotplug pci core by a
* hotplug pci driver. These functions are called by the hotplug pci core when
@@ -112,17 +80,14 @@ struct hotplug_slot_ops {
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
- int (*get_max_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
- int (*get_cur_bus_speed) (struct hotplug_slot *slot, enum pci_bus_speed *value);
};
/**
* struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
- * @power: if power is enabled or not (1/0)
+ * @power_status: if power is enabled or not (1/0)
* @attention_status: if the attention light is enabled or not (1/0)
* @latch_status: if the latch (if any) is open or closed (1/0)
- * @adapter_present: if there is a pci board present in the slot or not (1/0)
- * @address: (domain << 16 | bus << 8 | dev)
+ * @adapter_status: if there is a pci board present in the slot or not (1/0)
*
* Used to notify the hotplug pci core of the status of a specific slot.
*/
@@ -131,8 +96,6 @@ struct hotplug_slot_info {
u8 attention_status;
u8 latch_status;
u8 adapter_status;
- enum pci_bus_speed max_bus_speed;
- enum pci_bus_speed cur_bus_speed;
};
/**
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 8c181738d552..a12f25a35450 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2530,11 +2530,30 @@
#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
+#define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18
+#define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19
+#define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a
+#define PCI_DEVICE_ID_INTEL_I7_MC_TEST 0x2c1c
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_CTRL 0x2c20
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_ADDR 0x2c21
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_RANK 0x2c22
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH0_TC 0x2c23
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_CTRL 0x2c28
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_ADDR 0x2c29
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_RANK 0x2c2a
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH1_TC 0x2c2b
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_CTRL 0x2c30
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32
+#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33
+#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
+#define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
#define PCI_DEVICE_ID_INTEL_IOAT_TBG6 0x342b
#define PCI_DEVICE_ID_INTEL_IOAT_TBG7 0x342c
+#define PCI_DEVICE_ID_INTEL_X58_HUB_MGMT 0x342e
#define PCI_DEVICE_ID_INTEL_IOAT_TBG0 0x3430
#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431
#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 5a5d6ce4bd55..68567c0b3a5d 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -2,12 +2,6 @@
#define _LINUX_PERCPU_DEFS_H
/*
- * Determine the real variable name from the name visible in the
- * kernel sources.
- */
-#define per_cpu_var(var) per_cpu__##var
-
-/*
* Base implementations of per-CPU variable declarations and definitions, where
* the section in which the variable is to be placed is provided by the
* 'sec' argument. This may be used to affect the parameters governing the
@@ -18,13 +12,23 @@
* that section.
*/
#define __PCPU_ATTRS(sec) \
- __attribute__((section(PER_CPU_BASE_SECTION sec))) \
+ __percpu __attribute__((section(PER_CPU_BASE_SECTION sec))) \
PER_CPU_ATTRIBUTES
#define __PCPU_DUMMY_ATTRS \
__attribute__((section(".discard"), unused))
/*
+ * Macro which verifies @ptr is a percpu pointer without evaluating
+ * @ptr. This is to be used in percpu accessors to verify that the
+ * input parameter is a percpu pointer.
+ */
+#define __verify_pcpu_ptr(ptr) do { \
+ const void __percpu *__vpp_verify = (typeof(ptr))NULL; \
+ (void)__vpp_verify; \
+} while (0)
+
+/*
* s390 and alpha modules require percpu variables to be defined as
* weak to force the compiler to generate GOT based external
* references for them. This is necessary because percpu sections
@@ -56,24 +60,24 @@
*/
#define DECLARE_PER_CPU_SECTION(type, name, sec) \
extern __PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
- extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
+ extern __PCPU_ATTRS(sec) __typeof__(type) name
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
- __typeof__(type) per_cpu__##name
+ __typeof__(type) name
#else
/*
* Normal declaration and definition macros.
*/
#define DECLARE_PER_CPU_SECTION(type, name, sec) \
- extern __PCPU_ATTRS(sec) __typeof__(type) per_cpu__##name
+ extern __PCPU_ATTRS(sec) __typeof__(type) name
#define DEFINE_PER_CPU_SECTION(type, name, sec) \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES \
- __typeof__(type) per_cpu__##name
+ __typeof__(type) name
#endif
/*
@@ -135,10 +139,16 @@
__aligned(PAGE_SIZE)
/*
- * Intermodule exports for per-CPU variables.
+ * Intermodule exports for per-CPU variables. sparse forgets about
+ * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
+ * noop if __CHECKER__.
*/
-#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
-#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
-
+#ifndef __CHECKER__
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(var)
+#else
+#define EXPORT_PER_CPU_SYMBOL(var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var)
+#endif
#endif /* _LINUX_PERCPU_DEFS_H */
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index cf5efbcf716c..a93e5bfdccb8 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -27,10 +27,17 @@
* we force a syntax error here if it isn't.
*/
#define get_cpu_var(var) (*({ \
- extern int simple_identifier_##var(void); \
preempt_disable(); \
&__get_cpu_var(var); }))
-#define put_cpu_var(var) preempt_enable()
+
+/*
+ * The weird & is necessary because sparse considers (void)(var) to be
+ * a direct dereference of percpu variable (var).
+ */
+#define put_cpu_var(var) do { \
+ (void)&(var); \
+ preempt_enable(); \
+} while (0)
#ifdef CONFIG_SMP
@@ -127,9 +134,9 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
*/
#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
-extern void *__alloc_reserved_percpu(size_t size, size_t align);
-extern void *__alloc_percpu(size_t size, size_t align);
-extern void free_percpu(void *__pdata);
+extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern void __percpu *__alloc_percpu(size_t size, size_t align);
+extern void free_percpu(void __percpu *__pdata);
extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
@@ -140,7 +147,7 @@ extern void __init setup_per_cpu_areas(void);
#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
-static inline void *__alloc_percpu(size_t size, size_t align)
+static inline void __percpu *__alloc_percpu(size_t size, size_t align)
{
/*
* Can't easily make larger alignment work with kmalloc. WARN
@@ -151,7 +158,7 @@ static inline void *__alloc_percpu(size_t size, size_t align)
return kzalloc(size, GFP_KERNEL);
}
-static inline void free_percpu(void *p)
+static inline void free_percpu(void __percpu *p)
{
kfree(p);
}
@@ -171,7 +178,7 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
#endif /* CONFIG_SMP */
#define alloc_percpu(type) \
- (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
+ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
/*
* Optional methods for optimized non-lvalue per-cpu variable access.
@@ -188,17 +195,19 @@ static inline void *pcpu_lpage_remapped(void *kaddr)
#ifndef percpu_read
# define percpu_read(var) \
({ \
- typeof(per_cpu_var(var)) __tmp_var__; \
- __tmp_var__ = get_cpu_var(var); \
- put_cpu_var(var); \
- __tmp_var__; \
+ typeof(var) *pr_ptr__ = &(var); \
+ typeof(var) pr_ret__; \
+ pr_ret__ = get_cpu_var(*pr_ptr__); \
+ put_cpu_var(*pr_ptr__); \
+ pr_ret__; \
})
#endif
#define __percpu_generic_to_op(var, val, op) \
do { \
- get_cpu_var(var) op val; \
- put_cpu_var(var); \
+ typeof(var) *pgto_ptr__ = &(var); \
+ get_cpu_var(*pgto_ptr__) op val; \
+ put_cpu_var(*pgto_ptr__); \
} while (0)
#ifndef percpu_write
@@ -234,6 +243,7 @@ extern void __bad_size_call_parameter(void);
#define __pcpu_size_call_return(stem, variable) \
({ typeof(variable) pscr_ret__; \
+ __verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: pscr_ret__ = stem##1(variable);break; \
case 2: pscr_ret__ = stem##2(variable);break; \
@@ -247,6 +257,7 @@ extern void __bad_size_call_parameter(void);
#define __pcpu_size_call(stem, variable, ...) \
do { \
+ __verify_pcpu_ptr(&(variable)); \
switch(sizeof(variable)) { \
case 1: stem##1(variable, __VA_ARGS__);break; \
case 2: stem##2(variable, __VA_ARGS__);break; \
@@ -259,8 +270,7 @@ do { \
/*
* Optimized manipulation for memory allocated through the per cpu
- * allocator or for addresses of per cpu variables (can be determined
- * using per_cpu_var(xx).
+ * allocator or for addresses of per cpu variables.
*
* These operation guarantee exclusivity of access for other operations
* on the *same* processor. The assumption is that per cpu data is only
@@ -311,7 +321,7 @@ do { \
#define _this_cpu_generic_to_op(pcp, val, op) \
do { \
preempt_disable(); \
- *__this_cpu_ptr(&pcp) op val; \
+ *__this_cpu_ptr(&(pcp)) op val; \
preempt_enable(); \
} while (0)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c66b34f75eea..9a1d276db754 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -658,7 +658,7 @@ struct perf_event {
perf_overflow_handler_t overflow_handler;
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_EVENT_TRACING
struct event_filter *filter;
#endif
@@ -746,10 +746,10 @@ extern int perf_max_events;
extern const struct pmu *hw_perf_event_init(struct perf_event *event);
-extern void perf_event_task_sched_in(struct task_struct *task, int cpu);
+extern void perf_event_task_sched_in(struct task_struct *task);
extern void perf_event_task_sched_out(struct task_struct *task,
- struct task_struct *next, int cpu);
-extern void perf_event_task_tick(struct task_struct *task, int cpu);
+ struct task_struct *next);
+extern void perf_event_task_tick(struct task_struct *task);
extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
@@ -870,12 +870,12 @@ extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
#else
static inline void
-perf_event_task_sched_in(struct task_struct *task, int cpu) { }
+perf_event_task_sched_in(struct task_struct *task) { }
static inline void
perf_event_task_sched_out(struct task_struct *task,
- struct task_struct *next, int cpu) { }
+ struct task_struct *next) { }
static inline void
-perf_event_task_tick(struct task_struct *task, int cpu) { }
+perf_event_task_tick(struct task_struct *task) { }
static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
diff --git a/include/linux/phy.h b/include/linux/phy.h
index b1368b8f6572..7968defd2fa7 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -447,6 +447,7 @@ struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
int phy_device_register(struct phy_device *phy);
int phy_clear_interrupt(struct phy_device *phydev);
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
+int phy_init_hw(struct phy_device *phydev);
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface);
struct phy_device * phy_attach(struct net_device *dev,
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 198b8f9fe05e..20aeee88fd54 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -26,6 +26,7 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/timer.h>
+#include <linux/completion.h>
/*
* Callbacks for platform drivers to implement.
@@ -412,9 +413,11 @@ struct dev_pm_info {
pm_message_t power_state;
unsigned int can_wakeup:1;
unsigned int should_wakeup:1;
+ unsigned async_suspend:1;
enum dpm_state status; /* Owned by the PM core */
#ifdef CONFIG_PM_SLEEP
struct list_head entry;
+ struct completion completion;
#endif
#ifdef CONFIG_PM_RUNTIME
struct timer_list suspend_timer;
diff --git a/include/linux/pm_qos_params.h b/include/linux/pm_qos_params.h
index d74f75ed1e47..091c13c742f0 100644
--- a/include/linux/pm_qos_params.h
+++ b/include/linux/pm_qos_params.h
@@ -10,8 +10,9 @@
#define PM_QOS_CPU_DMA_LATENCY 1
#define PM_QOS_NETWORK_LATENCY 2
#define PM_QOS_NETWORK_THROUGHPUT 3
+#define PM_QOS_SYSTEM_BUS_FREQ 4
-#define PM_QOS_NUM_CLASSES 4
+#define PM_QOS_NUM_CLASSES 5
#define PM_QOS_DEFAULT_VALUE -1
int pm_qos_add_requirement(int qos, char *name, s32 value);
diff --git a/include/linux/poison.h b/include/linux/poison.h
index 7fc194aef8c2..2110a81c5e2a 100644
--- a/include/linux/poison.h
+++ b/include/linux/poison.h
@@ -2,13 +2,25 @@
#define _LINUX_POISON_H
/********** include/linux/list.h **********/
+
+/*
+ * Architectures might want to move the poison pointer offset
+ * into some well-recognized area such as 0xdead000000000000,
+ * that is also not mappable by user-space exploits:
+ */
+#ifdef CONFIG_ILLEGAL_POINTER_VALUE
+# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
+#else
+# define POISON_POINTER_DELTA 0
+#endif
+
/*
* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
-#define LIST_POISON1 ((void *) 0x00100100)
-#define LIST_POISON2 ((void *) 0x00200200)
+#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
+#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
/********** include/linux/timer.h **********/
/*
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 4f71bf4e628c..3e23844a6990 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -117,6 +117,6 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
long clock_nanosleep_restart(struct restart_block *restart_block);
-void update_rlimit_cpu(unsigned long rlim_new);
+void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 030d92255c7a..28c9fd020d39 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -89,8 +89,9 @@
* REGULATION_OUT Regulator output is out of regulation.
* FAIL Regulator output has failed.
* OVER_TEMP Regulator over temp.
- * FORCE_DISABLE Regulator shut down by software.
+ * FORCE_DISABLE Regulator forcibly shut down by software.
* VOLTAGE_CHANGE Regulator voltage changed.
+ * DISABLE Regulator was disabled.
*
* NOTE: These events can be OR'ed together when passed into handler.
*/
@@ -102,6 +103,7 @@
#define REGULATOR_EVENT_OVER_TEMP 0x10
#define REGULATOR_EVENT_FORCE_DISABLE 0x20
#define REGULATOR_EVENT_VOLTAGE_CHANGE 0x40
+#define REGULATOR_EVENT_DISABLE 0x80
struct regulator;
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 31f2055eae28..592cd7c642c2 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -58,6 +58,9 @@ enum regulator_status {
* @get_optimum_mode: Get the most efficient operating mode for the regulator
* when running with the specified parameters.
*
+ * @enable_time: Time taken for the regulator voltage output voltage to
+ * stabalise after being enabled, in microseconds.
+ *
* @set_suspend_voltage: Set the voltage for the regulator when the system
* is suspended.
* @set_suspend_enable: Mark the regulator as enabled when the system is
@@ -93,6 +96,9 @@ struct regulator_ops {
int (*set_mode) (struct regulator_dev *, unsigned int mode);
unsigned int (*get_mode) (struct regulator_dev *);
+ /* Time taken to enable the regulator */
+ int (*enable_time) (struct regulator_dev *);
+
/* report regulator status ... most other accessors report
* control inputs, this reports results of combining inputs
* from Linux (and other sources) with the actual load.
diff --git a/include/linux/resource.h b/include/linux/resource.h
index f1e914eefeab..cf8dc96653ee 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -73,6 +73,8 @@ struct rlimit {
struct task_struct;
int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
+int do_setrlimit(struct task_struct *tsk, unsigned int resource,
+ struct rlimit *new_rlim);
#endif /* __KERNEL__ */
diff --git a/include/linux/resume-trace.h b/include/linux/resume-trace.h
index c9ba2fdf807d..bc8c3881c729 100644
--- a/include/linux/resume-trace.h
+++ b/include/linux/resume-trace.h
@@ -6,6 +6,11 @@
extern int pm_trace_enabled;
+static inline int pm_trace_is_enabled(void)
+{
+ return pm_trace_enabled;
+}
+
struct device;
extern void set_trace_device(struct device *);
extern void generate_resume_trace(const void *tracedata, unsigned int user);
@@ -17,6 +22,8 @@ extern void generate_resume_trace(const void *tracedata, unsigned int user);
#else
+static inline int pm_trace_is_enabled(void) { return 0; }
+
#define TRACE_DEVICE(dev) do { } while (0)
#define TRACE_RESUME(dev) do { } while (0)
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 05330fc5b436..9590364fe8b5 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -362,6 +362,8 @@ enum {
#define RTAX_FEATURES RTAX_FEATURES
RTAX_RTO_MIN,
#define RTAX_RTO_MIN RTAX_RTO_MIN
+ RTAX_INITRWND,
+#define RTAX_INITRWND RTAX_INITRWND
__RTAX_MAX
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8d4991be9d53..112c621ebbf4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -310,6 +310,7 @@ extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_DETECT_SOFTLOCKUP
extern void softlockup_tick(void);
extern void touch_softlockup_watchdog(void);
+extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
void __user *buffer,
@@ -323,6 +324,9 @@ static inline void softlockup_tick(void)
static inline void touch_softlockup_watchdog(void)
{
}
+static inline void touch_softlockup_watchdog_sync(void)
+{
+}
static inline void touch_all_softlockup_watchdogs(void)
{
}
diff --git a/include/linux/security.h b/include/linux/security.h
index 2c627d361c02..8e8bff8aaa9d 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -23,6 +23,7 @@
#define __LINUX_SECURITY_H
#include <linux/fs.h>
+#include <linux/fsnotify.h>
#include <linux/binfmts.h>
#include <linux/signal.h>
#include <linux/resource.h>
@@ -985,6 +986,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Check permissions on incoming network packets. This hook is distinct
* from Netfilter's IP input hooks since it is the first time that the
* incoming sk_buff @skb has been associated with a particular socket, @sk.
+ * Must not sleep inside this hook because some callers hold spinlocks.
* @sk contains the sock (not socket) associated with the incoming sk_buff.
* @skb contains the incoming network data.
* @socket_getpeersec_stream:
@@ -1598,7 +1600,8 @@ struct security_operations {
int (*task_setnice) (struct task_struct *p, int nice);
int (*task_setioprio) (struct task_struct *p, int ioprio);
int (*task_getioprio) (struct task_struct *p);
- int (*task_setrlimit) (unsigned int resource, struct rlimit *new_rlim);
+ int (*task_setrlimit) (struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim);
int (*task_setscheduler) (struct task_struct *p, int policy,
struct sched_param *lp);
int (*task_getscheduler) (struct task_struct *p);
@@ -1863,7 +1866,8 @@ int security_task_setgroups(struct group_info *group_info);
int security_task_setnice(struct task_struct *p, int nice);
int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p);
-int security_task_setrlimit(unsigned int resource, struct rlimit *new_rlim);
+int security_task_setrlimit(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim);
int security_task_setscheduler(struct task_struct *p,
int policy, struct sched_param *lp);
int security_task_getscheduler(struct task_struct *p);
@@ -2479,7 +2483,8 @@ static inline int security_task_getioprio(struct task_struct *p)
return 0;
}
-static inline int security_task_setrlimit(unsigned int resource,
+static inline int security_task_setrlimit(struct task_struct *p,
+ unsigned int resource,
struct rlimit *new_rlim)
{
return 0;
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 8c3dd36fe91a..d40db835cb05 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -246,6 +246,7 @@ struct uart_ops {
#endif
};
+#define NO_POLL_CHAR 0x00ff0000
#define UART_CONFIG_TYPE (1 << 0)
#define UART_CONFIG_IRQ (1 << 1)
diff --git a/include/linux/serio.h b/include/linux/serio.h
index e2f3044d4a4a..64b473066b9a 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -30,7 +30,6 @@ struct serio {
char phys[32];
bool manual_bind;
- bool registered; /* port has been fully registered with driver core */
struct serio_device_id id;
@@ -136,25 +135,6 @@ static inline void serio_continue_rx(struct serio *serio)
spin_unlock_irq(&serio->lock);
}
-/*
- * Use the following functions to pin serio's driver in process context
- */
-static inline int serio_pin_driver(struct serio *serio)
-{
- return mutex_lock_interruptible(&serio->drv_mutex);
-}
-
-static inline void serio_pin_driver_uninterruptible(struct serio *serio)
-{
- mutex_lock(&serio->drv_mutex);
-}
-
-static inline void serio_unpin_driver(struct serio *serio)
-{
- mutex_unlock(&serio->drv_mutex);
-}
-
-
#endif
/*
diff --git a/include/linux/shm_signal.h b/include/linux/shm_signal.h
new file mode 100644
index 000000000000..b2efd72669fb
--- /dev/null
+++ b/include/linux/shm_signal.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_SHM_SIGNAL_H
+#define _LINUX_SHM_SIGNAL_H
+
+#include <linux/types.h>
+
+/*
+ *---------
+ * The following structures represent data that is shared across boundaries
+ * which may be quite disparate from one another (e.g. Windows vs Linux,
+ * 32 vs 64 bit, etc). Therefore, care has been taken to make sure they
+ * present data in a manner that is independent of the environment.
+ *-----------
+ */
+
+#define SHM_SIGNAL_MAGIC cpu_to_le32(0x58fa39df)
+#define SHM_SIGNAL_VER cpu_to_le32(1)
+
+struct shm_signal_irq {
+ __u8 enabled;
+ __u8 pending;
+ __u8 dirty;
+};
+
+enum shm_signal_locality {
+ shm_locality_north,
+ shm_locality_south,
+};
+
+struct shm_signal_desc {
+ __le32 magic;
+ __le32 ver;
+ struct shm_signal_irq irq[2];
+};
+
+/* --- END SHARED STRUCTURES --- */
+
+#ifdef __KERNEL__
+
+#include <linux/kref.h>
+#include <linux/interrupt.h>
+
+struct shm_signal_notifier {
+ void (*signal)(struct shm_signal_notifier *);
+};
+
+struct shm_signal;
+
+struct shm_signal_ops {
+ int (*inject)(struct shm_signal *s);
+ void (*fault)(struct shm_signal *s, const char *fmt, ...);
+ void (*release)(struct shm_signal *s);
+};
+
+enum {
+ shm_signal_in_wakeup,
+};
+
+struct shm_signal {
+ struct kref kref;
+ spinlock_t lock;
+ enum shm_signal_locality locale;
+ unsigned long flags;
+ struct shm_signal_ops *ops;
+ struct shm_signal_desc *desc;
+ struct shm_signal_notifier *notifier;
+ struct tasklet_struct deferred_notify;
+};
+
+#define SHM_SIGNAL_FAULT(s, fmt, args...) \
+ ((s)->ops->fault ? (s)->ops->fault((s), fmt, ## args) : panic(fmt, ## args))
+
+ /*
+ * These functions should only be used internally
+ */
+void _shm_signal_release(struct kref *kref);
+void _shm_signal_wakeup(struct shm_signal *s);
+
+/**
+ * shm_signal_init() - initialize an SHM_SIGNAL
+ * @s: SHM_SIGNAL context
+ *
+ * Initializes SHM_SIGNAL context before first use
+ *
+ **/
+void shm_signal_init(struct shm_signal *s, enum shm_signal_locality locale,
+ struct shm_signal_ops *ops, struct shm_signal_desc *desc);
+
+/**
+ * shm_signal_get() - acquire an SHM_SIGNAL context reference
+ * @s: SHM_SIGNAL context
+ *
+ **/
+static inline struct shm_signal *shm_signal_get(struct shm_signal *s)
+{
+ kref_get(&s->kref);
+
+ return s;
+}
+
+/**
+ * shm_signal_put() - release an SHM_SIGNAL context reference
+ * @s: SHM_SIGNAL context
+ *
+ **/
+static inline void shm_signal_put(struct shm_signal *s)
+{
+ kref_put(&s->kref, _shm_signal_release);
+}
+
+/**
+ * shm_signal_enable() - enables local notifications on an SHM_SIGNAL
+ * @s: SHM_SIGNAL context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Enables/unmasks the registered notifier (if applicable) to receive wakeups
+ * whenever the remote side performs an shm_signal() operation. A notification
+ * will be dispatched immediately if any pending signals have already been
+ * issued prior to invoking this call.
+ *
+ * This is synonymous with unmasking an interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int shm_signal_enable(struct shm_signal *s, int flags);
+
+/**
+ * shm_signal_disable() - disable local notifications on an SHM_SIGNAL
+ * @s: SHM_SIGNAL context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Disables/masks the registered shm_signal_notifier (if applicable) from
+ * receiving any further notifications. Any subsequent calls to shm_signal()
+ * by the remote side will update the shm as dirty, but will not traverse the
+ * locale boundary and will not invoke the notifier callback. Signals
+ * delivered while masked will be deferred until shm_signal_enable() is
+ * invoked.
+ *
+ * This is synonymous with masking an interrupt
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int shm_signal_disable(struct shm_signal *s, int flags);
+
+/**
+ * shm_signal_inject() - notify the remote side about shm changes
+ * @s: SHM_SIGNAL context
+ * @flags: Reserved for future use, must be 0
+ *
+ * Marks the shm state as "dirty" and, if enabled, will traverse
+ * a locale boundary to inject a remote notification. The remote
+ * side controls whether the notification should be delivered via
+ * the shm_signal_enable/disable() interface.
+ *
+ * The specifics of how to traverse a locale boundary are abstracted
+ * by the shm_signal_ops->signal() interface and provided by a particular
+ * implementation. However, typically going north to south would be
+ * something like a syscall/hypercall, and going south to north would be
+ * something like a posix-signal/guest-interrupt.
+ *
+ * Returns: success = 0, <0 = ERRNO
+ *
+ **/
+int shm_signal_inject(struct shm_signal *s, int flags);
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_SHM_SIGNAL_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 1e14beb23f9b..0249d4175bac 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -38,8 +38,6 @@ struct kmem_cache_cpu {
void **freelist; /* Pointer to first free per cpu object */
struct page *page; /* The slab from which we are allocating */
int node; /* The node of the page (or -1 for debug) */
- unsigned int offset; /* Freepointer offset (in word units) */
- unsigned int objsize; /* Size of an object (from kmem_cache) */
#ifdef CONFIG_SLUB_STATS
unsigned stat[NR_SLUB_STAT_ITEMS];
#endif
@@ -69,6 +67,7 @@ struct kmem_cache_order_objects {
* Slab cache management.
*/
struct kmem_cache {
+ struct kmem_cache_cpu *cpu_slab;
/* Used for retriving partial slabs etc */
unsigned long flags;
int size; /* The size of an object including meta data */
@@ -104,11 +103,6 @@ struct kmem_cache {
int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
#endif
-#ifdef CONFIG_SMP
- struct kmem_cache_cpu *cpu_slab[NR_CPUS];
-#else
- struct kmem_cache_cpu cpu_slab;
-#endif
};
/*
@@ -135,11 +129,21 @@ struct kmem_cache {
#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
+#ifdef CONFIG_ZONE_DMA
+#define SLUB_DMA __GFP_DMA
+/* Reserve extra caches for potential DMA use */
+#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
+#else
+/* Disable DMA functionality */
+#define SLUB_DMA (__force gfp_t)0
+#define KMALLOC_CACHES SLUB_PAGE_SHIFT
+#endif
+
/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
-extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
+extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
/*
* Sorry that the following has to be that ugly but some versions of GCC
@@ -207,13 +211,6 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
return &kmalloc_caches[index];
}
-#ifdef CONFIG_ZONE_DMA
-#define SLUB_DMA __GFP_DMA
-#else
-/* Disable DMA functionality */
-#define SLUB_DMA (__force gfp_t)0
-#endif
-
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
new file mode 100644
index 000000000000..32bfd1a8a48d
--- /dev/null
+++ b/include/linux/stmmac.h
@@ -0,0 +1,53 @@
+/*******************************************************************************
+
+ Header file for stmmac platform data
+
+ Copyright (C) 2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __STMMAC_PLATFORM_DATA
+#define __STMMAC_PLATFORM_DATA
+
+/* platfrom data for platfrom device structure's platfrom_data field */
+
+/* Private data for the STM on-board ethernet driver */
+struct plat_stmmacenet_data {
+ int bus_id;
+ int pbl;
+ int has_gmac;
+ void (*fix_mac_speed)(void *priv, unsigned int speed);
+ void (*bus_setup)(unsigned long ioaddr);
+#ifdef CONFIG_STM_DRIVERS
+ struct stm_pad_config *pad_config;
+#endif
+ void *bsp_priv;
+};
+
+struct plat_stmmacphy_data {
+ int bus_id;
+ int phy_addr;
+ unsigned int phy_mask;
+ int interface;
+ int (*phy_reset)(void *priv);
+ void *priv;
+};
+#endif
+
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a2602a8207a6..c326282c4623 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -312,6 +312,7 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
/* linux/mm/swapfile.c */
extern long nr_swap_pages;
extern long total_swap_pages;
+extern void __si_swapinfo(struct sysinfo *);
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int);
@@ -377,6 +378,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
+#define __si_swapinfo(val) si_swapinfo(val)
/* only sparc can not include linux/pagemap.h in this file
* so leave page_cache_release and release_pages undeclared... */
#define free_page_and_swap_cache(page) \
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 207466a49f3d..ee79a8de66d9 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -99,7 +99,7 @@ struct perf_event_attr;
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
#define TRACE_SYS_ENTER_PROFILE_INIT(sname) \
.profile_enable = prof_sysenter_enable, \
@@ -113,7 +113,7 @@ struct perf_event_attr;
#define TRACE_SYS_ENTER_PROFILE_INIT(sname)
#define TRACE_SYS_EXIT_PROFILE(sname)
#define TRACE_SYS_EXIT_PROFILE_INIT(sname)
-#endif
+#endif /* CONFIG_PERF_EVENTS */
#ifdef CONFIG_FTRACE_SYSCALLS
#define __SC_STR_ADECL1(t, a) #a
@@ -655,11 +655,15 @@ asmlinkage long sys_newuname(struct new_utsname __user *name);
asmlinkage long sys_getrlimit(unsigned int resource,
struct rlimit __user *rlim);
+asmlinkage long sys_getprlimit(pid_t pid, unsigned int resource,
+ struct rlimit __user *rlim);
#if defined(COMPAT_RLIM_OLD_INFINITY) || !(defined(CONFIG_IA64))
asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim);
#endif
asmlinkage long sys_setrlimit(unsigned int resource,
struct rlimit __user *rlim);
+asmlinkage long sys_setprlimit(pid_t pid, unsigned int resource,
+ struct rlimit __user *rlim);
asmlinkage long sys_getrusage(int who, struct rusage __user *ru);
asmlinkage long sys_umask(int mask);
@@ -825,6 +829,11 @@ asmlinkage long sys_pselect6(int, fd_set __user *, fd_set __user *,
asmlinkage long sys_ppoll(struct pollfd __user *, unsigned int,
struct timespec __user *, const sigset_t __user *,
size_t);
+asmlinkage long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags,
+ unsigned int priority);
+asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags,
+ u64 mask, int fd,
+ const char __user *pathname);
int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index bd27fbc9db62..9f236cdcf3fe 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -483,6 +483,7 @@ enum
NET_IPV4_CONF_ARP_NOTIFY=22,
NET_IPV4_CONF_ACCEPT_LOCAL=23,
NET_IPV4_CONF_SRC_VMARK=24,
+ NET_IPV4_CONF_PROXY_ARP_PVLAN=25,
__NET_IPV4_CONF_MAX
};
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 6b58367d145e..d512d98dfb7d 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -94,6 +94,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, void *src, size_t size);
+extern long __probe_kernel_read(void *dst, void *src, size_t size);
/*
* probe_kernel_write(): safely attempt to write to a location
@@ -104,6 +105,7 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-extern long probe_kernel_write(void *dst, void *src, size_t size);
+extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
+extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/vbus_driver.h b/include/linux/vbus_driver.h
new file mode 100644
index 000000000000..8a7acb1a7a05
--- /dev/null
+++ b/include/linux/vbus_driver.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Mediates access to a host VBUS from a guest kernel by providing a
+ * global view of all VBUS devices
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_VBUS_DRIVER_H
+#define _LINUX_VBUS_DRIVER_H
+
+#include <linux/device.h>
+#include <linux/shm_signal.h>
+#include <linux/ioq.h>
+
+struct vbus_device_proxy;
+struct vbus_driver;
+
+struct vbus_device_proxy_ops {
+ int (*open)(struct vbus_device_proxy *dev, int version, int flags);
+ int (*close)(struct vbus_device_proxy *dev, int flags);
+ int (*shm)(struct vbus_device_proxy *dev, const char *name,
+ int id, int prio,
+ void *ptr, size_t len,
+ struct shm_signal_desc *sigdesc, struct shm_signal **signal,
+ int flags);
+ int (*call)(struct vbus_device_proxy *dev, u32 func,
+ void *data, size_t len, int flags);
+ void (*release)(struct vbus_device_proxy *dev);
+};
+
+struct vbus_device_proxy {
+ char *type;
+ u64 id;
+ void *priv; /* Used by drivers */
+ struct vbus_device_proxy_ops *ops;
+ struct device dev;
+};
+
+int vbus_device_proxy_register(struct vbus_device_proxy *dev);
+void vbus_device_proxy_unregister(struct vbus_device_proxy *dev);
+
+struct vbus_device_proxy *vbus_device_proxy_find(u64 id);
+
+struct vbus_driver_ops {
+ int (*probe)(struct vbus_device_proxy *dev);
+ int (*remove)(struct vbus_device_proxy *dev);
+};
+
+struct vbus_driver {
+ char *type;
+ struct module *owner;
+ struct vbus_driver_ops *ops;
+ struct device_driver drv;
+};
+
+int vbus_driver_register(struct vbus_driver *drv);
+void vbus_driver_unregister(struct vbus_driver *drv);
+
+/*
+ * driver-side IOQ helper - allocates device-shm and maps an IOQ on it
+ */
+int vbus_driver_ioq_alloc(struct vbus_device_proxy *dev, const char *name,
+ int id, int prio, size_t ringsize, struct ioq **ioq);
+
+#define VBUS_DRIVER_AUTOPROBE(name) MODULE_ALIAS("vbus-proxy:" name)
+
+#endif /* _LINUX_VBUS_DRIVER_H */
diff --git a/include/linux/vbus_pci.h b/include/linux/vbus_pci.h
new file mode 100644
index 000000000000..fe337590e644
--- /dev/null
+++ b/include/linux/vbus_pci.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * PCI to Virtual-Bus Bridge
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_VBUS_PCI_H
+#define _LINUX_VBUS_PCI_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define VBUS_PCI_ABI_MAGIC 0xbf53eef5
+#define VBUS_PCI_ABI_VERSION 2
+#define VBUS_PCI_HC_VERSION 1
+
+enum {
+ VBUS_PCI_BRIDGE_NEGOTIATE,
+ VBUS_PCI_BRIDGE_QREG,
+ VBUS_PCI_BRIDGE_SLOWCALL,
+ VBUS_PCI_BRIDGE_FASTCALL_ADD,
+ VBUS_PCI_BRIDGE_FASTCALL_DROP,
+
+ VBUS_PCI_BRIDGE_MAX, /* must be last */
+};
+
+enum {
+ VBUS_PCI_HC_DEVOPEN,
+ VBUS_PCI_HC_DEVCLOSE,
+ VBUS_PCI_HC_DEVCALL,
+ VBUS_PCI_HC_DEVSHM,
+
+ VBUS_PCI_HC_MAX, /* must be last */
+};
+
+struct vbus_pci_bridge_negotiate {
+ __u32 magic;
+ __u32 version;
+ __u64 capabilities;
+};
+
+struct vbus_pci_deviceopen {
+ __u32 devid;
+ __u32 version; /* device ABI version */
+ __u64 handle; /* return value for devh */
+};
+
+struct vbus_pci_devicecall {
+ __u64 devh; /* device-handle (returned from DEVICEOPEN */
+ __u32 func;
+ __u32 len;
+ __u32 flags;
+ __u64 datap;
+};
+
+struct vbus_pci_deviceshm {
+ __u64 devh; /* device-handle (returned from DEVICEOPEN */
+ __u32 id;
+ __u32 len;
+ __u32 flags;
+ struct {
+ __u32 offset;
+ __u32 prio;
+ __u64 cookie; /* token to pass back when signaling client */
+ } signal;
+ __u64 datap;
+};
+
+struct vbus_pci_call_desc {
+ __u32 vector;
+ __u32 len;
+ __u64 datap;
+};
+
+struct vbus_pci_fastcall_desc {
+ struct vbus_pci_call_desc call;
+ __u32 result;
+};
+
+struct vbus_pci_regs {
+ struct vbus_pci_call_desc bridgecall;
+ __u8 pad[48];
+};
+
+struct vbus_pci_signals {
+ __u32 eventq;
+ __u32 fastcall;
+ __u32 shmsignal;
+ __u8 pad[20];
+};
+
+struct vbus_pci_eventqreg {
+ __u32 count;
+ __u64 ring;
+ __u64 data;
+};
+
+struct vbus_pci_busreg {
+ __u32 count; /* supporting multiple queues allows for prio, etc */
+ struct vbus_pci_eventqreg eventq[1];
+};
+
+enum vbus_pci_eventid {
+ VBUS_PCI_EVENT_DEVADD,
+ VBUS_PCI_EVENT_DEVDROP,
+ VBUS_PCI_EVENT_SHMSIGNAL,
+ VBUS_PCI_EVENT_SHMCLOSE,
+};
+
+#define VBUS_MAX_DEVTYPE_LEN 128
+
+struct vbus_pci_add_event {
+ __u64 id;
+ char type[VBUS_MAX_DEVTYPE_LEN];
+};
+
+struct vbus_pci_handle_event {
+ __u64 handle;
+};
+
+struct vbus_pci_event {
+ __u32 eventid;
+ union {
+ struct vbus_pci_add_event add;
+ struct vbus_pci_handle_event handle;
+ } data;
+};
+
+#endif /* _LINUX_VBUS_PCI_H */
diff --git a/include/linux/venet.h b/include/linux/venet.h
new file mode 100644
index 000000000000..0578d797c973
--- /dev/null
+++ b/include/linux/venet.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * Virtual-Ethernet adapter
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef _LINUX_VENET_H
+#define _LINUX_VENET_H
+
+#include <linux/types.h>
+
+#define VENET_VERSION 1
+
+#define VENET_TYPE "virtual-ethernet"
+
+#define VENET_QUEUE_RX 0
+#define VENET_QUEUE_TX 1
+
+struct venet_capabilities {
+ __u32 gid;
+ __u32 bits;
+};
+
+#define VENET_CAP_GROUP_SG 0
+#define VENET_CAP_GROUP_EVENTQ 1
+#define VENET_CAP_GROUP_L4RO 2 /* layer-4 reassem offloading */
+
+/* CAPABILITIES-GROUP SG */
+#define VENET_CAP_SG (1 << 0)
+#define VENET_CAP_TSO4 (1 << 1)
+#define VENET_CAP_TSO6 (1 << 2)
+#define VENET_CAP_ECN (1 << 3)
+#define VENET_CAP_UFO (1 << 4)
+#define VENET_CAP_PMTD (1 << 5) /* pre-mapped tx desc */
+
+/* CAPABILITIES-GROUP EVENTQ */
+#define VENET_CAP_EVQ_LINKSTATE (1 << 0)
+#define VENET_CAP_EVQ_TXC (1 << 1) /* tx-complete */
+
+struct venet_iov {
+ __u32 len;
+ __u64 ptr;
+};
+
+#define VENET_SG_FLAG_NEEDS_CSUM (1 << 0)
+#define VENET_SG_FLAG_GSO (1 << 1)
+#define VENET_SG_FLAG_ECN (1 << 2)
+
+struct venet_sg {
+ __u64 cookie;
+ __u32 flags;
+ __u32 len; /* total length of all iovs */
+ struct {
+ __u16 start; /* csum starting position */
+ __u16 offset; /* offset to place csum */
+ } csum;
+ struct {
+#define VENET_GSO_TYPE_TCPV4 0 /* IPv4 TCP (TSO) */
+#define VENET_GSO_TYPE_UDP 1 /* IPv4 UDP (UFO) */
+#define VENET_GSO_TYPE_TCPV6 2 /* IPv6 TCP */
+ __u8 type;
+ __u16 hdrlen;
+ __u16 size;
+ } gso;
+ __u32 count; /* nr of iovs */
+ struct venet_iov iov[1];
+};
+
+struct venet_eventq_query {
+ __u32 flags;
+ __u32 evsize; /* size of each event */
+ __u32 dpid; /* descriptor pool-id */
+ __u32 qid;
+ __u8 pad[16];
+};
+
+#define VENET_EVENT_LINKSTATE 0
+#define VENET_EVENT_TXC 1
+
+struct venet_event_header {
+ __u32 flags;
+ __u32 size;
+ __u32 id;
+};
+
+struct venet_event_linkstate {
+ struct venet_event_header header;
+ __u8 state; /* 0 = down, 1 = up */
+};
+
+struct venet_event_txc {
+ struct venet_event_header header;
+ __u32 txqid;
+ __u64 cookie;
+};
+
+struct venet_l4ro_query {
+ __u32 flags;
+ __u32 dpid; /* descriptor pool-id */
+ __u32 pqid; /* page queue-id */
+ __u8 pad[20];
+};
+
+
+#define VSG_DESC_SIZE(count) (sizeof(struct venet_sg) + \
+ sizeof(struct venet_iov) * ((count) - 1))
+
+#define VENET_FUNC_LINKUP 0
+#define VENET_FUNC_LINKDOWN 1
+#define VENET_FUNC_MACQUERY 2
+#define VENET_FUNC_NEGCAP 3 /* negotiate capabilities */
+#define VENET_FUNC_FLUSHRX 4
+#define VENET_FUNC_PMTDQUERY 5
+#define VENET_FUNC_EVQQUERY 6
+#define VENET_FUNC_L4ROQUERY 7
+
+#endif /* _LINUX_VENET_H */
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index d4962a782b8a..c17fa68c94da 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -367,6 +367,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_OV511 v4l2_fourcc('O', '5', '1', '1') /* ov511 JPEG */
#define V4L2_PIX_FMT_OV518 v4l2_fourcc('O', '5', '1', '8') /* ov518 JPEG */
#define V4L2_PIX_FMT_STV0680 v4l2_fourcc('S', '6', '8', '0') /* stv0680 bayer */
+#define V4L2_PIX_FMT_TM6000 v4l2_fourcc('T', 'M', '6', '0') /* tm5600/tm60x0 */
/*
* F O R M A T E N U M E R A T I O N
diff --git a/include/linux/virtio_balloon.h b/include/linux/virtio_balloon.h
index 1418f048cb34..a50ecd1b81a2 100644
--- a/include/linux/virtio_balloon.h
+++ b/include/linux/virtio_balloon.h
@@ -7,6 +7,7 @@
/* The feature bitmap for virtio balloon */
#define VIRTIO_BALLOON_F_MUST_TELL_HOST 0 /* Tell before reclaiming pages */
+#define VIRTIO_BALLOON_F_STATS_VQ 1 /* Memory Stats virtqueue */
/* Size of a PFN in the balloon interface. */
#define VIRTIO_BALLOON_PFN_SHIFT 12
@@ -18,4 +19,18 @@ struct virtio_balloon_config
/* Number of pages we've actually got in balloon. */
__le32 actual;
};
+
+#define VIRTIO_BALLOON_S_SWAP_IN 0 /* Amount of memory swapped in */
+#define VIRTIO_BALLOON_S_SWAP_OUT 1 /* Amount of memory swapped out */
+#define VIRTIO_BALLOON_S_MAJFLT 2 /* Number of major faults */
+#define VIRTIO_BALLOON_S_MINFLT 3 /* Number of minor faults */
+#define VIRTIO_BALLOON_S_MEMFREE 4 /* Total amount of free memory */
+#define VIRTIO_BALLOON_S_MEMTOT 5 /* Total amount of memory */
+#define VIRTIO_BALLOON_S_NR 6
+
+struct virtio_balloon_stat {
+ u16 tag;
+ u64 val;
+} __attribute__((packed));
+
#endif /* _LINUX_VIRTIO_BALLOON_H */
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 0093dd7c1d6f..800617b4ddd5 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -109,7 +109,10 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
unsigned int fbit)
{
/* Did you forget to fix assumptions on max features? */
- MAYBE_BUILD_BUG_ON(fbit >= 32);
+ if (__builtin_constant_p(fbit))
+ BUILD_BUG_ON(fbit >= 32);
+ else
+ BUG_ON(fbit >= 32);
if (fbit < VIRTIO_TRANSPORT_F_START)
virtio_check_driver_offered_feature(vdev, fbit);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index ee03bba9c5df..117f0dd8ad03 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -78,22 +78,22 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
static inline void __count_vm_event(enum vm_event_item item)
{
- __this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
+ __this_cpu_inc(vm_event_states.event[item]);
}
static inline void count_vm_event(enum vm_event_item item)
{
- this_cpu_inc(per_cpu_var(vm_event_states).event[item]);
+ this_cpu_inc(vm_event_states.event[item]);
}
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
- __this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
+ __this_cpu_add(vm_event_states.event[item], delta);
}
static inline void count_vm_events(enum vm_event_item item, long delta)
{
- this_cpu_add(per_cpu_var(vm_event_states).event[item], delta);
+ this_cpu_add(vm_event_states.event[item], delta);
}
extern void all_vm_events(unsigned long *);
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 76e8903cd204..822913968482 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -62,6 +62,11 @@ struct writeback_control {
* so we use a single control to update them
*/
unsigned no_nrwrite_index_update:1;
+ /*
+ * The following is used by balance_dirty_pages() to
+ * force NFS to commit unstable pages.
+ */
+ unsigned force_commit_unstable:1;
};
/*
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index f456534dcaf9..fd882261225e 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -29,7 +29,7 @@
_FP_FRAC_DECL_##wc(X)
/*
- * Finish truely unpacking a native fp value by classifying the kind
+ * Finish truly unpacking a native fp value by classifying the kind
* of fp value and normalizing both the exponent and the fraction.
*/
diff --git a/include/media/ir-common.h b/include/media/ir-common.h
index 2c6af24b905e..015db75b42f6 100644
--- a/include/media/ir-common.h
+++ b/include/media/ir-common.h
@@ -35,7 +35,7 @@
struct ir_input_state {
/* configuration */
- int ir_type;
+ u64 ir_type;
/* key info */
u32 ir_key; /* ir scancode */
@@ -84,7 +84,7 @@ struct card_ir {
/* Routines from ir-functions.c */
int ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
- int ir_type);
+ const u64 ir_type);
void ir_input_nokey(struct input_dev *dev, struct ir_input_state *ir);
void ir_input_keydown(struct input_dev *dev, struct ir_input_state *ir,
u32 ir_key);
diff --git a/include/media/ir-core.h b/include/media/ir-core.h
index 299d201e1339..61c223bc3953 100644
--- a/include/media/ir-core.h
+++ b/include/media/ir-core.h
@@ -21,13 +21,11 @@ extern int ir_core_debug;
#define IR_dprintk(level, fmt, arg...) if (ir_core_debug >= level) \
printk(KERN_DEBUG "%s: " fmt , __func__, ## arg)
-enum ir_type {
- IR_TYPE_UNKNOWN = 0,
- IR_TYPE_RC5 = 1,
- IR_TYPE_PD = 2, /* Pulse distance encoded IR */
- IR_TYPE_NEC = 3,
- IR_TYPE_OTHER = 99,
-};
+#define IR_TYPE_UNKNOWN 0
+#define IR_TYPE_RC5 (1 << 0) /* Philips RC5 protocol */
+#define IR_TYPE_PD (1 << 1) /* Pulse distance encoded IR */
+#define IR_TYPE_NEC (1 << 2)
+#define IR_TYPE_OTHER (((u64)1) << 63l)
struct ir_scancode {
u16 scancode;
@@ -37,26 +35,40 @@ struct ir_scancode {
struct ir_scancode_table {
struct ir_scancode *scan;
int size;
- enum ir_type ir_type;
+ u64 ir_type;
spinlock_t lock;
};
+struct ir_dev_props {
+ unsigned long allowed_protos;
+ void *priv;
+ int (*change_protocol)(void *priv, u64 ir_type);
+};
+
+
struct ir_input_dev {
- struct input_dev *dev;
- struct ir_scancode_table rc_tab;
+ struct input_dev *dev; /* Input device*/
+ struct ir_scancode_table rc_tab; /* scan/key table */
+ unsigned long devno; /* device number */
+ struct attribute_group attr; /* IR attributes */
+ struct device *class_dev; /* virtual class dev */
+ const struct ir_dev_props *props; /* Device properties */
};
+#define to_ir_input_dev(_attr) container_of(_attr, struct ir_input_dev, attr)
/* Routines from ir-keytable.c */
u32 ir_g_keycode_from_table(struct input_dev *input_dev,
u32 scancode);
-int ir_set_keycode_table(struct input_dev *input_dev,
- struct ir_scancode_table *rc_tab);
-
-int ir_roundup_tablesize(int n_elems);
int ir_input_register(struct input_dev *dev,
- struct ir_scancode_table *ir_codes);
+ const struct ir_scancode_table *ir_codes,
+ const struct ir_dev_props *props);
void ir_input_unregister(struct input_dev *input_dev);
+/* Routines from ir-sysfs.c */
+
+int ir_register_class(struct input_dev *input_dev);
+void ir_unregister_class(struct input_dev *input_dev);
+
#endif
diff --git a/include/media/ir-kbd-i2c.h b/include/media/ir-kbd-i2c.h
index aaf65e8b1a40..9142936603cc 100644
--- a/include/media/ir-kbd-i2c.h
+++ b/include/media/ir-kbd-i2c.h
@@ -36,7 +36,7 @@ enum ir_kbd_get_key_fn {
struct IR_i2c_init_data {
struct ir_scancode_table *ir_codes;
const char *name;
- int type; /* IR_TYPE_RC5, IR_TYPE_PD, etc */
+ u64 type; /* IR_TYPE_RC5, IR_TYPE_PD, etc */
/*
* Specify either a function pointer or a value indicating one of
* ir_kbd_i2c's internal get_key functions
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 0884b9a0f778..2af52704e670 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -39,8 +39,8 @@
* @IEEE80211_BAND_5GHZ: around 5GHz band (4.9-5.7)
*/
enum ieee80211_band {
- IEEE80211_BAND_2GHZ,
- IEEE80211_BAND_5GHZ,
+ IEEE80211_BAND_2GHZ = NL80211_BAND_2GHZ,
+ IEEE80211_BAND_5GHZ = NL80211_BAND_5GHZ,
/* keep last */
IEEE80211_NUM_BANDS
@@ -626,8 +626,14 @@ enum cfg80211_signal_type {
* @beacon_interval: the beacon interval as from the frame
* @capability: the capability field in host byte order
* @information_elements: the information elements (Note that there
- * is no guarantee that these are well-formed!)
+ * is no guarantee that these are well-formed!); this is a pointer to
+ * either the beacon_ies or proberesp_ies depending on whether Probe
+ * Response frame has been received
* @len_information_elements: total length of the information elements
+ * @beacon_ies: the information elements from the last Beacon frame
+ * @len_beacon_ies: total length of the beacon_ies
+ * @proberesp_ies: the information elements from the last Probe Response frame
+ * @len_proberesp_ies: total length of the proberesp_ies
* @signal: signal strength value (type depends on the wiphy's signal_type)
* @free_priv: function pointer to free private data
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
@@ -641,6 +647,10 @@ struct cfg80211_bss {
u16 capability;
u8 *information_elements;
size_t len_information_elements;
+ u8 *beacon_ies;
+ size_t len_beacon_ies;
+ u8 *proberesp_ies;
+ size_t len_proberesp_ies;
s32 signal;
@@ -837,6 +847,7 @@ enum wiphy_params_flags {
WIPHY_PARAM_RETRY_LONG = 1 << 1,
WIPHY_PARAM_FRAG_THRESHOLD = 1 << 2,
WIPHY_PARAM_RTS_THRESHOLD = 1 << 3,
+ WIPHY_PARAM_COVERAGE_CLASS = 1 << 4,
};
/**
@@ -856,20 +867,11 @@ enum tx_power_setting {
* cfg80211_bitrate_mask - masks for bitrate control
*/
struct cfg80211_bitrate_mask {
-/*
- * As discussed in Berlin, this struct really
- * should look like this:
-
struct {
u32 legacy;
- u8 mcs[IEEE80211_HT_MCS_MASK_LEN];
+ /* TODO: add support for masking MCS rates; e.g.: */
+ /* u8 mcs[IEEE80211_HT_MCS_MASK_LEN]; */
} control[IEEE80211_NUM_BANDS];
-
- * Since we can always fix in-kernel users, let's keep
- * it simpler for now:
- */
- u32 fixed; /* fixed bitrate, 0 == not fixed */
- u32 maxrate; /* in kbps, 0 == no limit */
};
/**
* struct cfg80211_pmksa - PMK Security Association
@@ -988,6 +990,15 @@ struct cfg80211_pmksa {
*
* @dump_survey: get site survey information.
*
+ * @remain_on_channel: Request the driver to remain awake on the specified
+ * channel for the specified duration to complete an off-channel
+ * operation (e.g., public action frame exchange). When the driver is
+ * ready on the requested channel, it must indicate this with an event
+ * notification by calling cfg80211_ready_on_channel().
+ * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation.
+ * This allows the operation to be terminated prior to timeout based on
+ * the duration value.
+ *
* @testmode_cmd: run a test mode command
*
* @set_pmksa: Cache a PMKID for a BSSID. This is mostly useful for fullmac
@@ -1123,6 +1134,16 @@ struct cfg80211_ops {
struct cfg80211_pmksa *pmksa);
int (*flush_pmksa)(struct wiphy *wiphy, struct net_device *netdev);
+ int (*remain_on_channel)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration,
+ u64 *cookie);
+ int (*cancel_remain_on_channel)(struct wiphy *wiphy,
+ struct net_device *dev,
+ u64 cookie);
+
/* some temporary stuff to finish wext */
int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev,
bool enabled, int timeout);
@@ -1217,6 +1238,7 @@ struct wiphy {
u8 retry_long;
u32 frag_threshold;
u32 rts_threshold;
+ u8 coverage_class;
char fw_version[ETHTOOL_BUSINFO_LEN];
u32 hw_version;
@@ -1578,7 +1600,7 @@ unsigned int ieee80211_hdrlen(__le16 fc);
* @addr: the device MAC address
* @iftype: the virtual interface type
*/
-int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
+int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype);
/**
@@ -1589,10 +1611,28 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
* @bssid: the network bssid (used only for iftype STATION and ADHOC)
* @qos: build 802.11 QoS data frame
*/
-int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
+int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype, u8 *bssid, bool qos);
/**
+ * ieee80211_amsdu_to_8023s - decode an IEEE 802.11n A-MSDU frame
+ *
+ * Decode an IEEE 802.11n A-MSDU frame and convert it to a list of
+ * 802.3 frames. The @list will be empty if the decode fails. The
+ * @skb is consumed after the function returns.
+ *
+ * @skb: The input IEEE 802.11n A-MSDU frame.
+ * @list: The output list of 802.3 frames. It must be allocated and
+ * initialized by by the caller.
+ * @addr: The device MAC address.
+ * @iftype: The device interface type.
+ * @extra_headroom: The hardware extra headroom for SKBs in the @list.
+ */
+void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ const u8 *addr, enum nl80211_iftype iftype,
+ const unsigned int extra_headroom);
+
+/**
* cfg80211_classify8021d - determine the 802.1p/1d tag for a data frame
* @skb: the data frame
*/
@@ -2129,5 +2169,45 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
void cfg80211_disconnected(struct net_device *dev, u16 reason,
u8 *ie, size_t ie_len, gfp_t gfp);
+/**
+ * cfg80211_ready_on_channel - notification of remain_on_channel start
+ * @dev: network device
+ * @cookie: the request cookie
+ * @chan: The current channel (from remain_on_channel request)
+ * @channel_type: Channel type
+ * @duration: Duration in milliseconds that the driver intents to remain on the
+ * channel
+ * @gfp: allocation flags
+ */
+void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, gfp_t gfp);
+
+/**
+ * cfg80211_remain_on_channel_expired - remain_on_channel duration expired
+ * @dev: network device
+ * @cookie: the request cookie
+ * @chan: The current channel (from remain_on_channel request)
+ * @channel_type: Channel type
+ * @gfp: allocation flags
+ */
+void cfg80211_remain_on_channel_expired(struct net_device *dev,
+ u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ gfp_t gfp);
+
+
+/**
+ * cfg80211_new_sta - notify userspace about station
+ *
+ * @dev: the netdev
+ * @mac_addr: the station's address
+ * @sinfo: the station information
+ * @gfp: allocation flags
+ */
+void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp);
#endif /* __NET_CFG80211_H */
diff --git a/include/net/dst.h b/include/net/dst.h
index 39c4a5963e12..ce078cda6b74 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -83,8 +83,6 @@ struct dst_entry {
* (L1_CACHE_SIZE would be too much)
*/
#ifdef CONFIG_64BIT
- long __pad_to_align_refcnt[2];
-#else
long __pad_to_align_refcnt[1];
#endif
/*
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index bd4c53f75ac0..83fd34437cf1 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -122,10 +122,12 @@ struct inet_sock {
__be32 inet_saddr;
__s16 uc_ttl;
__u16 cmsg_flags;
- struct ip_options *opt;
__be16 inet_sport;
__u16 inet_id;
+
+ struct ip_options *opt;
__u8 tos;
+ __u8 min_ttl;
__u8 mc_ttl;
__u8 pmtudisc;
__u8 recverr:1,
diff --git a/include/net/ip.h b/include/net/ip.h
index 85108cfbb1ae..d9a0e74d8923 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -326,6 +326,22 @@ static __inline__ void inet_reset_saddr(struct sock *sk)
#endif
+static inline int sk_mc_loop(struct sock *sk)
+{
+ if (!sk)
+ return 1;
+ switch (sk->sk_family) {
+ case AF_INET:
+ return inet_sk(sk)->mc_loop;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case AF_INET6:
+ return inet6_sk(sk)->mc_loop;
+#endif
+ }
+ __WARN();
+ return 1;
+}
+
extern int ip_call_ra_chain(struct sk_buff *skb);
/*
diff --git a/include/net/llc.h b/include/net/llc.h
index 7940da1606e7..5503b74ab170 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -16,6 +16,9 @@
#include <linux/if_ether.h>
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/rculist_nulls.h>
+#include <linux/hash.h>
+#include <linux/jhash.h>
#include <asm/atomic.h>
@@ -31,6 +34,12 @@ struct llc_addr {
#define LLC_SAP_STATE_INACTIVE 1
#define LLC_SAP_STATE_ACTIVE 2
+#define LLC_SK_DEV_HASH_BITS 6
+#define LLC_SK_DEV_HASH_ENTRIES (1<<LLC_SK_DEV_HASH_BITS)
+
+#define LLC_SK_LADDR_HASH_BITS 6
+#define LLC_SK_LADDR_HASH_ENTRIES (1<<LLC_SK_LADDR_HASH_BITS)
+
/**
* struct llc_sap - Defines the SAP component
*
@@ -53,18 +62,38 @@ struct llc_sap {
struct net_device *orig_dev);
struct llc_addr laddr;
struct list_head node;
- struct {
- rwlock_t lock;
- struct hlist_head list;
- } sk_list;
+ spinlock_t sk_lock;
+ int sk_count;
+ struct hlist_nulls_head sk_laddr_hash[LLC_SK_LADDR_HASH_ENTRIES];
+ struct hlist_head sk_dev_hash[LLC_SK_DEV_HASH_ENTRIES];
};
+static inline
+struct hlist_head *llc_sk_dev_hash(struct llc_sap *sap, int ifindex)
+{
+ return &sap->sk_dev_hash[ifindex % LLC_SK_DEV_HASH_ENTRIES];
+}
+
+static inline
+u32 llc_sk_laddr_hashfn(struct llc_sap *sap, const struct llc_addr *laddr)
+{
+ return hash_32(jhash(laddr->mac, sizeof(laddr->mac), 0),
+ LLC_SK_LADDR_HASH_BITS);
+}
+
+static inline
+struct hlist_nulls_head *llc_sk_laddr_hash(struct llc_sap *sap,
+ const struct llc_addr *laddr)
+{
+ return &sap->sk_laddr_hash[llc_sk_laddr_hashfn(sap, laddr)];
+}
+
#define LLC_DEST_INVALID 0 /* Invalid LLC PDU type */
#define LLC_DEST_SAP 1 /* Type 1 goes here */
#define LLC_DEST_CONN 2 /* Type 2 goes here */
extern struct list_head llc_sap_list;
-extern rwlock_t llc_sap_list_lock;
+extern spinlock_t llc_sap_list_lock;
extern int llc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index e2374e34989f..2f97d8ddce92 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -76,6 +76,8 @@ struct llc_sock {
u32 rx_pdu_hdr; /* used for saving header of last pdu
received and caused sending FRMR.
Used for resending FRMR */
+ u32 cmsg_flags;
+ struct hlist_node dev_hash_node;
};
static inline struct llc_sock *llc_sk(const struct sock *sk)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 0bf369752274..f313a3cbabda 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -113,6 +113,7 @@ struct ieee80211_tx_queue_params {
u16 cw_min;
u16 cw_max;
u8 aifs;
+ bool uapsd;
};
/**
@@ -255,9 +256,6 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_CTL_RATE_CTRL_PROBE: internal to mac80211, can be
* set by rate control algorithms to indicate probe rate, will
* be cleared for fragmented frames (except on the last fragment)
- * @IEEE80211_TX_INTFL_RCALGO: mac80211 internal flag, do not test or
- * set this flag in the driver; indicates that the rate control
- * algorithm was used and should be notified of TX status
* @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211,
* used to indicate that a pending frame requires TX processing before
* it can be sent out.
@@ -287,7 +285,6 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_STAT_AMPDU = BIT(10),
IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11),
IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
- IEEE80211_TX_INTFL_RCALGO = BIT(13),
IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14),
IEEE80211_TX_INTFL_RETRIED = BIT(15),
IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16),
@@ -595,8 +592,10 @@ enum ieee80211_conf_flags {
* @IEEE80211_CONF_CHANGE_CHANNEL: the channel/channel_type changed
* @IEEE80211_CONF_CHANGE_RETRY_LIMITS: retry limits changed
* @IEEE80211_CONF_CHANGE_IDLE: Idle flag changed
+ * @IEEE80211_CONF_CHANGE_SMPS: Spatial multiplexing powersave mode changed
*/
enum ieee80211_conf_changed {
+ IEEE80211_CONF_CHANGE_SMPS = BIT(1),
IEEE80211_CONF_CHANGE_LISTEN_INTERVAL = BIT(2),
IEEE80211_CONF_CHANGE_MONITOR = BIT(3),
IEEE80211_CONF_CHANGE_PS = BIT(4),
@@ -607,6 +606,21 @@ enum ieee80211_conf_changed {
};
/**
+ * enum ieee80211_smps_mode - spatial multiplexing power save mode
+ *
+ * @
+ */
+enum ieee80211_smps_mode {
+ IEEE80211_SMPS_AUTOMATIC,
+ IEEE80211_SMPS_OFF,
+ IEEE80211_SMPS_STATIC,
+ IEEE80211_SMPS_DYNAMIC,
+
+ /* keep last */
+ IEEE80211_SMPS_NUM_MODES,
+};
+
+/**
* struct ieee80211_conf - configuration of the device
*
* This struct indicates how the driver shall configure the hardware.
@@ -634,6 +648,10 @@ enum ieee80211_conf_changed {
* @short_frame_max_tx_count: Maximum number of transmissions for a "short"
* frame, called "dot11ShortRetryLimit" in 802.11, but actually means the
* number of transmissions not the number of retries
+ *
+ * @smps_mode: spatial multiplexing powersave mode; note that
+ * %IEEE80211_SMPS_STATIC is used when the device is not
+ * configured for an HT channel
*/
struct ieee80211_conf {
u32 flags;
@@ -646,6 +664,7 @@ struct ieee80211_conf {
struct ieee80211_channel *channel;
enum nl80211_channel_type channel_type;
+ enum ieee80211_smps_mode smps_mode;
};
/**
@@ -657,12 +676,14 @@ struct ieee80211_conf {
* @type: type of this virtual interface
* @bss_conf: BSS configuration for this interface, either our own
* or the BSS we're associated to
+ * @addr: address of this interface
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *).
*/
struct ieee80211_vif {
enum nl80211_iftype type;
struct ieee80211_bss_conf bss_conf;
+ u8 addr[ETH_ALEN];
/* must be last */
u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
};
@@ -676,33 +697,6 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
}
/**
- * struct ieee80211_if_init_conf - initial configuration of an interface
- *
- * @vif: pointer to a driver-use per-interface structure. The pointer
- * itself is also used for various functions including
- * ieee80211_beacon_get() and ieee80211_get_buffered_bc().
- * @type: one of &enum nl80211_iftype constants. Determines the type of
- * added/removed interface.
- * @mac_addr: pointer to MAC address of the interface. This pointer is valid
- * until the interface is removed (i.e. it cannot be used after
- * remove_interface() callback was called for this interface).
- *
- * This structure is used in add_interface() and remove_interface()
- * callbacks of &struct ieee80211_hw.
- *
- * When you allow multiple interfaces to be added to your PHY, take care
- * that the hardware can actually handle multiple MAC addresses. However,
- * also take care that when there's no interface left with mac_addr != %NULL
- * you remove the MAC address from the device to avoid acknowledging packets
- * in pure monitor mode.
- */
-struct ieee80211_if_init_conf {
- enum nl80211_iftype type;
- struct ieee80211_vif *vif;
- void *mac_addr;
-};
-
-/**
* enum ieee80211_key_alg - key algorithm
* @ALG_WEP: WEP40 or WEP104
* @ALG_TKIP: TKIP
@@ -926,6 +920,21 @@ enum ieee80211_tkip_key_type {
* @IEEE80211_HW_BEACON_FILTER:
* Hardware supports dropping of irrelevant beacon frames to
* avoid waking up cpu.
+ *
+ * @IEEE80211_HW_SUPPORTS_STATIC_SMPS:
+ * Hardware supports static spatial multiplexing powersave,
+ * ie. can turn off all but one chain even on HT connections
+ * that should be using more chains.
+ *
+ * @IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS:
+ * Hardware supports dynamic spatial multiplexing powersave,
+ * ie. can turn off all but one chain and then wake the rest
+ * up as required after, for example, rts/cts handshake.
+ *
+ * @IEEE80211_HW_SUPPORTS_UAPSD:
+ * Hardware supports Unscheduled Automatic Power Save Delivery
+ * (U-APSD) in managed mode. The mode is configured with
+ * conf_tx() operation.
*/
enum ieee80211_hw_flags {
IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -943,6 +952,9 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
IEEE80211_HW_MFP_CAPABLE = 1<<13,
IEEE80211_HW_BEACON_FILTER = 1<<14,
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15,
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16,
+ IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
};
/**
@@ -1211,6 +1223,31 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
*/
/**
+ * DOC: Spatial multiplexing power save
+ *
+ * SMPS (Spatial multiplexing power save) is a mechanism to conserve
+ * power in an 802.11n implementation. For details on the mechanism
+ * and rationale, please refer to 802.11 (as amended by 802.11n-2009)
+ * "11.2.3 SM power save".
+ *
+ * The mac80211 implementation is capable of sending action frames
+ * to update the AP about the station's SMPS mode, and will instruct
+ * the driver to enter the specific mode. It will also announce the
+ * requested SMPS mode during the association handshake. Hardware
+ * support for this feature is required, and can be indicated by
+ * hardware flags.
+ *
+ * The default mode will be "automatic", which nl80211/cfg80211
+ * defines to be dynamic SMPS in (regular) powersave, and SMPS
+ * turned off otherwise.
+ *
+ * To support this feature, the driver must set the appropriate
+ * hardware support flags, and handle the SMPS flag to the config()
+ * operation. It will then with this mechanism be instructed to
+ * enter the requested SMPS mode while associated to an HT AP.
+ */
+
+/**
* DOC: Frame filtering
*
* mac80211 requires to see many management frames for proper
@@ -1347,7 +1384,7 @@ enum ieee80211_ampdu_mlme_action {
* When the device is started it should not have a MAC address
* to avoid acknowledging frames before a non-monitor device
* is added.
- * Must be implemented.
+ * Must be implemented and can sleep.
*
* @stop: Called after last netdevice attached to the hardware
* is disabled. This should turn off the hardware (at least
@@ -1355,7 +1392,7 @@ enum ieee80211_ampdu_mlme_action {
* May be called right after add_interface if that rejects
* an interface. If you added any work onto the mac80211 workqueue
* you should ensure to cancel it on this callback.
- * Must be implemented.
+ * Must be implemented and can sleep.
*
* @add_interface: Called when a netdevice attached to the hardware is
* enabled. Because it is not called for monitor mode devices, @start
@@ -1365,7 +1402,7 @@ enum ieee80211_ampdu_mlme_action {
* interface is given in the conf parameter.
* The callback may refuse to add an interface by returning a
* negative error code (which will be seen in userspace.)
- * Must be implemented.
+ * Must be implemented and can sleep.
*
* @remove_interface: Notifies a driver that an interface is going down.
* The @stop callback is called after this if it is the last interface
@@ -1374,19 +1411,20 @@ enum ieee80211_ampdu_mlme_action {
* must be cleared so the device no longer acknowledges packets,
* the mac_addr member of the conf structure is, however, set to the
* MAC address of the device going away.
- * Hence, this callback must be implemented.
+ * Hence, this callback must be implemented. It can sleep.
*
* @config: Handler for configuration requests. IEEE 802.11 code calls this
* function to change hardware configuration, e.g., channel.
* This function should never fail but returns a negative error code
- * if it does.
+ * if it does. The callback can sleep.
*
* @bss_info_changed: Handler for configuration requests related to BSS
* parameters that may vary during BSS's lifespan, and may affect low
* level driver (e.g. assoc/disassoc status, erp parameters).
* This function should not be used if no BSS has been set, unless
* for association indication. The @changed parameter indicates which
- * of the bss parameters has changed when a call is made.
+ * of the bss parameters has changed when a call is made. The callback
+ * can sleep.
*
* @prepare_multicast: Prepare for multicast filter configuration.
* This callback is optional, and its return value is passed
@@ -1394,20 +1432,22 @@ enum ieee80211_ampdu_mlme_action {
*
* @configure_filter: Configure the device's RX filter.
* See the section "Frame filtering" for more information.
- * This callback must be implemented.
+ * This callback must be implemented and can sleep.
*
* @set_tim: Set TIM bit. mac80211 calls this function when a TIM bit
* must be set or cleared for a given STA. Must be atomic.
*
* @set_key: See the section "Hardware crypto acceleration"
- * This callback can sleep, and is only called between add_interface
- * and remove_interface calls, i.e. while the given virtual interface
+ * This callback is only called between add_interface and
+ * remove_interface calls, i.e. while the given virtual interface
* is enabled.
* Returns a negative error code if the key can't be added.
+ * The callback can sleep.
*
* @update_tkip_key: See the section "Hardware crypto acceleration"
* This callback will be called in the context of Rx. Called for drivers
* which set IEEE80211_KEY_FLAG_TKIP_REQ_RX_P1_KEY.
+ * The callback can sleep.
*
* @hw_scan: Ask the hardware to service the scan request, no need to start
* the scan state machine in stack. The scan must honour the channel
@@ -1421,21 +1461,28 @@ enum ieee80211_ampdu_mlme_action {
* When the scan finishes, ieee80211_scan_completed() must be called;
* note that it also must be called when the scan cannot finish due to
* any error unless this callback returned a negative error code.
+ * The callback can sleep.
*
* @sw_scan_start: Notifier function that is called just before a software scan
* is started. Can be NULL, if the driver doesn't need this notification.
+ * The callback can sleep.
*
- * @sw_scan_complete: Notifier function that is called just after a software scan
- * finished. Can be NULL, if the driver doesn't need this notification.
+ * @sw_scan_complete: Notifier function that is called just after a
+ * software scan finished. Can be NULL, if the driver doesn't need
+ * this notification.
+ * The callback can sleep.
*
* @get_stats: Return low-level statistics.
* Returns zero if statistics are available.
+ * The callback can sleep.
*
* @get_tkip_seq: If your device implements TKIP encryption in hardware this
* callback should be provided to read the TKIP transmit IVs (both IV32
* and IV16) for the given key from hardware.
+ * The callback must be atomic.
*
* @set_rts_threshold: Configuration of RTS threshold (if device needs it)
+ * The callback can sleep.
*
* @sta_notify: Notifies low level driver about addition, removal or power
* state transition of an associated station, AP, IBSS/WDS/mesh peer etc.
@@ -1444,30 +1491,36 @@ enum ieee80211_ampdu_mlme_action {
* @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
* bursting) for a hardware TX queue.
* Returns a negative error code on failure.
+ * The callback can sleep.
*
* @get_tx_stats: Get statistics of the current TX queue status. This is used
* to get number of currently queued packets (queue length), maximum queue
* size (limit), and total number of packets sent using each TX queue
* (count). The 'stats' pointer points to an array that has hw->queues
* items.
+ * The callback must be atomic.
*
* @get_tsf: Get the current TSF timer value from firmware/hardware. Currently,
* this is only used for IBSS mode BSSID merging and debugging. Is not a
* required function.
+ * The callback can sleep.
*
* @set_tsf: Set the TSF timer to the specified value in the firmware/hardware.
* Currently, this is only used for IBSS mode debugging. Is not a
* required function.
+ * The callback can sleep.
*
* @reset_tsf: Reset the TSF timer and allow firmware/hardware to synchronize
* with other STAs in the IBSS. This is only used in IBSS mode. This
* function is optional if the firmware/hardware takes full care of
* TSF synchronization.
+ * The callback can sleep.
*
* @tx_last_beacon: Determine whether the last IBSS beacon was sent by us.
* This is needed only for IBSS mode and the result of this function is
* used to determine whether to reply to Probe Requests.
* Returns non-zero if this device sent the last beacon.
+ * The callback can sleep.
*
* @ampdu_action: Perform a certain A-MPDU action
* The RA/TID combination determines the destination and TID we want
@@ -1476,21 +1529,32 @@ enum ieee80211_ampdu_mlme_action {
* is the first frame we expect to perform the action on. Notice
* that TX/RX_STOP can pass NULL for this parameter.
* Returns a negative error code on failure.
+ * The callback must be atomic.
*
* @rfkill_poll: Poll rfkill hardware state. If you need this, you also
* need to set wiphy->rfkill_poll to %true before registration,
* and need to call wiphy_rfkill_set_hw_state() in the callback.
+ * The callback can sleep.
+ *
+ * @set_coverage_class: Set slot time for given coverage class as specified
+ * in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout
+ * accordingly. This callback is not required and may sleep.
*
* @testmode_cmd: Implement a cfg80211 test mode command.
+ * The callback can sleep.
+ *
+ * @flush: Flush all pending frames from the hardware queue, making sure
+ * that the hardware queues are empty. If the parameter @drop is set
+ * to %true, pending frames may be dropped. The callback can sleep.
*/
struct ieee80211_ops {
int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb);
int (*start)(struct ieee80211_hw *hw);
void (*stop)(struct ieee80211_hw *hw);
int (*add_interface)(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
void (*remove_interface)(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
int (*config)(struct ieee80211_hw *hw, u32 changed);
void (*bss_info_changed)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -1535,9 +1599,11 @@ struct ieee80211_ops {
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
void (*rfkill_poll)(struct ieee80211_hw *hw);
+ void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class);
#ifdef CONFIG_NL80211_TESTMODE
int (*testmode_cmd)(struct ieee80211_hw *hw, void *data, int len);
#endif
+ void (*flush)(struct ieee80211_hw *hw, bool drop);
};
/**
@@ -1777,7 +1843,7 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
/**
* ieee80211_beacon_get_tim - beacon generation function
* @hw: pointer obtained from ieee80211_alloc_hw().
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @tim_offset: pointer to variable that will receive the TIM IE offset.
* Set to 0 if invalid (in non-AP modes).
* @tim_length: pointer to variable that will receive the TIM IE length,
@@ -1805,7 +1871,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
/**
* ieee80211_beacon_get - beacon generation function
* @hw: pointer obtained from ieee80211_alloc_hw().
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
* See ieee80211_beacon_get_tim().
*/
@@ -1816,9 +1882,56 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
}
/**
+ * ieee80211_pspoll_get - retrieve a PS Poll template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Creates a PS Poll a template which can, for example, uploaded to
+ * hardware. The template must be updated after association so that correct
+ * AID, BSSID and MAC address is used.
+ *
+ * Note: Caller (or hardware) is responsible for setting the
+ * &IEEE80211_FCTL_PM bit.
+ */
+struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_nullfunc_get - retrieve a nullfunc template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * Creates a Nullfunc template which can, for example, uploaded to
+ * hardware. The template must be updated after association so that correct
+ * BSSID and address is used.
+ *
+ * Note: Caller (or hardware) is responsible for setting the
+ * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
+ */
+struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_probereq_get - retrieve a Probe Request template
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @ssid: SSID buffer
+ * @ssid_len: length of SSID
+ * @ie: buffer containing all IEs except SSID for the template
+ * @ie_len: length of the IE buffer
+ *
+ * Creates a Probe Request template which can, for example, be uploaded to
+ * hardware.
+ */
+struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len);
+
+/**
* ieee80211_rts_get - RTS frame generation function
* @hw: pointer obtained from ieee80211_alloc_hw().
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @frame: pointer to the frame that is going to be protected by the RTS.
* @frame_len: the frame length (in octets).
* @frame_txctl: &struct ieee80211_tx_info of the frame.
@@ -1837,7 +1950,7 @@ void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/**
* ieee80211_rts_duration - Get the duration field for an RTS frame
* @hw: pointer obtained from ieee80211_alloc_hw().
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @frame_len: the length of the frame that is going to be protected by the RTS.
* @frame_txctl: &struct ieee80211_tx_info of the frame.
*
@@ -1852,7 +1965,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
/**
* ieee80211_ctstoself_get - CTS-to-self frame generation function
* @hw: pointer obtained from ieee80211_alloc_hw().
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @frame: pointer to the frame that is going to be protected by the CTS-to-self.
* @frame_len: the frame length (in octets).
* @frame_txctl: &struct ieee80211_tx_info of the frame.
@@ -1872,7 +1985,7 @@ void ieee80211_ctstoself_get(struct ieee80211_hw *hw,
/**
* ieee80211_ctstoself_duration - Get the duration field for a CTS-to-self frame
* @hw: pointer obtained from ieee80211_alloc_hw().
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @frame_len: the length of the frame that is going to be protected by the CTS-to-self.
* @frame_txctl: &struct ieee80211_tx_info of the frame.
*
@@ -1888,7 +2001,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
/**
* ieee80211_generic_frame_duration - Calculate the duration field for a frame
* @hw: pointer obtained from ieee80211_alloc_hw().
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @frame_len: the length of the frame.
* @rate: the rate at which the frame is going to be transmitted.
*
@@ -1903,7 +2016,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
/**
* ieee80211_get_buffered_bc - accessing buffered broadcast and multicast frames
* @hw: pointer as obtained from ieee80211_alloc_hw().
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
* Function for accessing buffered broadcast and multicast frames. If
* hardware/firmware does not implement buffering of broadcast/multicast
@@ -2071,7 +2184,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *sta, u16 tid);
/**
* ieee80211_start_tx_ba_cb - low level driver ready to aggregate.
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
* @ra: receiver address of the BA session recipient.
* @tid: the TID to BA on.
*
@@ -2082,7 +2195,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid);
/**
* ieee80211_start_tx_ba_cb_irqsafe - low level driver ready to aggregate.
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
* @ra: receiver address of the BA session recipient.
* @tid: the TID to BA on.
*
@@ -2110,7 +2223,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *sta, u16 tid,
/**
* ieee80211_stop_tx_ba_cb - low level driver ready to stop aggregate.
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
* @ra: receiver address of the BA session recipient.
* @tid: the desired TID to BA on.
*
@@ -2121,7 +2234,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid);
/**
* ieee80211_stop_tx_ba_cb_irqsafe - low level driver ready to stop aggregate.
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback
* @ra: receiver address of the BA session recipient.
* @tid: the desired TID to BA on.
*
@@ -2200,7 +2313,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
/**
* ieee80211_beacon_loss - inform hardware does not receive beacons
*
- * @vif: &struct ieee80211_vif pointer from &struct ieee80211_if_init_conf.
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
* When beacon filtering is enabled with IEEE80211_HW_BEACON_FILTERING and
* IEEE80211_CONF_PS is set, the driver needs to inform whenever the
@@ -2234,8 +2347,12 @@ enum rate_control_changed {
* @short_preamble: whether mac80211 will request short-preamble transmission
* if the selected rate supports it
* @max_rate_idx: user-requested maximum rate (not MCS for now)
+ * (deprecated; this will be removed once drivers get updated to use
+ * rate_idx_mask)
+ * @rate_idx_mask: user-requested rate mask (not MCS for now)
* @skb: the skb that will be transmitted, the control information in it needs
* to be filled in
+ * @ap: whether this frame is sent out in AP mode
*/
struct ieee80211_tx_rate_control {
struct ieee80211_hw *hw;
@@ -2245,6 +2362,8 @@ struct ieee80211_tx_rate_control {
struct ieee80211_tx_rate reported_rate;
bool rts, short_preamble;
u8 max_rate_idx;
+ u32 rate_idx_mask;
+ bool ap;
};
struct rate_control_ops {
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h
index 4c61cdce4e5f..35672b1cf44a 100644
--- a/include/net/phonet/pep.h
+++ b/include/net/phonet/pep.h
@@ -44,6 +44,7 @@ struct pep_sock {
u8 rx_fc; /* RX flow control */
u8 tx_fc; /* TX flow control */
u8 init_enable; /* auto-enable at creation */
+ u8 aligned;
};
static inline struct pep_sock *pep_sk(struct sock *sk)
@@ -77,6 +78,7 @@ static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb)
enum {
PNS_PIPE_DATA = 0x20,
+ PNS_PIPE_ALIGNED_DATA,
PNS_PEP_CONNECT_REQ = 0x40,
PNS_PEP_CONNECT_RESP,
@@ -138,6 +140,7 @@ enum {
PN_PIPE_SB_NEGOTIATED_FC,
PN_PIPE_SB_REQUIRED_FC_TX,
PN_PIPE_SB_PREFERRED_FC_RX,
+ PN_PIPE_SB_ALIGNED_DATA,
};
/* Phonet pipe flow control models */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 34f5cc24d903..788c99f98597 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -856,13 +856,6 @@ static inline void tcp_check_probe_timer(struct sock *sk)
icsk->icsk_rto, TCP_RTO_MAX);
}
-static inline void tcp_push_pending_frames(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
-}
-
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
{
tp->snd_wl1 = seq;
@@ -972,7 +965,8 @@ static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
/* Determine a window scaling and initial window to offer. */
extern void tcp_select_initial_window(int __space, __u32 mss,
__u32 *rcv_wnd, __u32 *window_clamp,
- int wscale_ok, __u8 *rcv_wscale);
+ int wscale_ok, __u8 *rcv_wscale,
+ __u32 init_rcv_wnd);
static inline int tcp_win_from_space(int space)
{
@@ -1342,6 +1336,15 @@ static inline int tcp_write_queue_empty(struct sock *sk)
return skb_queue_empty(&sk->sk_write_queue);
}
+static inline void tcp_push_pending_frames(struct sock *sk)
+{
+ if (tcp_send_head(sk)) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
+ }
+}
+
/* Start sequence of the highest skb with SACKed bit, valid only if
* sacked > 0 or when the caller has ensured validity by itself.
*/
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index cbfba885eb85..f5b1031dccbe 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -137,6 +137,7 @@ struct pcmcia_socket {
spinlock_t lock;
socket_state_t socket;
u_int state;
+ u_int suspended_state; /* state before suspend */
u_short functions;
u_short lock_count;
pccard_mem_map cis_mem;
@@ -243,12 +244,6 @@ struct pcmcia_socket {
#endif /* CONFIG_PCMCIA_IOCTL */
#endif /* CONFIG_PCMCIA */
- /* cardbus (32-bit) */
-#ifdef CONFIG_CARDBUS
- struct resource *cb_cis_res;
- void __iomem *cb_cis_virt;
-#endif /* CONFIG_CARDBUS */
-
/* socket device */
struct device dev;
/* data internal to the socket driver */
@@ -263,13 +258,25 @@ struct pcmcia_socket {
* - pccard_static_ops iomem and ioport areas are assigned statically
* - pccard_iodyn_ops iomem areas is assigned statically, ioport
* areas dynamically
+ * If this option is selected, use
+ * "select PCCARD_IODYN" in Kconfig.
* - pccard_nonstatic_ops iomem and ioport areas are assigned dynamically.
* If this option is selected, use
* "select PCCARD_NONSTATIC" in Kconfig.
+ *
*/
extern struct pccard_resource_ops pccard_static_ops;
+#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE)
extern struct pccard_resource_ops pccard_iodyn_ops;
extern struct pccard_resource_ops pccard_nonstatic_ops;
+#else
+/* If PCMCIA is not used, but only CARDBUS, these functions are not used
+ * at all. Therefore, do not use the large (240K!) rsrc_nonstatic module
+ */
+#define pccard_iodyn_ops pccard_static_ops
+#define pccard_nonstatic_ops pccard_static_ops
+#endif
+
/* socket drivers are expected to use these callbacks in their .drv struct */
extern int pcmcia_socket_dev_suspend(struct device *dev);
diff --git a/include/sound/cs46xx_dsp_spos.h b/include/sound/cs46xx_dsp_spos.h
index 7c44667e79a6..49b03c9e5e55 100644
--- a/include/sound/cs46xx_dsp_spos.h
+++ b/include/sound/cs46xx_dsp_spos.h
@@ -118,9 +118,11 @@ struct dsp_scb_descriptor {
struct snd_info_entry *proc_info;
int ref_count;
- spinlock_t lock;
- int deleted;
+ u16 volume[2];
+ unsigned int deleted :1;
+ unsigned int updated :1;
+ unsigned int volume_set :1;
};
struct dsp_task_descriptor {
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index c83a4a79f16b..1d4ca2aae50d 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -262,6 +262,8 @@ struct snd_pcm_hw_constraint_list {
unsigned int mask;
};
+struct snd_pcm_hwptr_log;
+
struct snd_pcm_runtime {
/* -- Status -- */
struct snd_pcm_substream *trigger_master;
@@ -269,7 +271,6 @@ struct snd_pcm_runtime {
int overrange;
snd_pcm_uframes_t avail_max;
snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */
- snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time */
unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */
snd_pcm_sframes_t delay; /* extra delay; typically FIFO size */
@@ -310,6 +311,7 @@ struct snd_pcm_runtime {
struct snd_pcm_mmap_control *control;
/* -- locking / scheduling -- */
+ unsigned int nowake: 1; /* no wakeup (data-copy in progress) */
wait_queue_head_t sleep;
struct fasync_struct *fasync;
@@ -340,6 +342,10 @@ struct snd_pcm_runtime {
/* -- OSS things -- */
struct snd_pcm_oss_runtime oss;
#endif
+
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+ struct snd_pcm_hwptr_log *hwptr_log;
+#endif
};
struct snd_pcm_group { /* keep linked substreams */
@@ -834,6 +840,8 @@ void snd_pcm_set_sync(struct snd_pcm_substream *substream);
int snd_pcm_lib_interleave_len(struct snd_pcm_substream *substream);
int snd_pcm_lib_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
+int snd_pcm_update_state(struct snd_pcm_substream *substream,
+ struct snd_pcm_runtime *runtime);
int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream);
int snd_pcm_playback_xrun_check(struct snd_pcm_substream *substream);
int snd_pcm_capture_xrun_check(struct snd_pcm_substream *substream);
@@ -905,6 +913,44 @@ int snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size);
int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream);
+int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
+ size_t size, gfp_t gfp_flags);
+int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream);
+struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream,
+ unsigned long offset);
+#if 0 /* for kernel-doc */
+/**
+ * snd_pcm_lib_alloc_vmalloc_buffer - allocate virtual DMA buffer
+ * @substream: the substream to allocate the buffer to
+ * @size: the requested buffer size, in bytes
+ *
+ * Allocates the PCM substream buffer using vmalloc(), i.e., the memory is
+ * contiguous in kernel virtual space, but not in physical memory. Use this
+ * if the buffer is accessed by kernel code but not by device DMA.
+ *
+ * Returns 1 if the buffer was changed, 0 if not changed, or a negative error
+ * code.
+ */
+static int snd_pcm_lib_alloc_vmalloc_buffer
+ (struct snd_pcm_substream *substream, size_t size);
+/**
+ * snd_pcm_lib_alloc_vmalloc_32_buffer - allocate 32-bit-addressable buffer
+ * @substream: the substream to allocate the buffer to
+ * @size: the requested buffer size, in bytes
+ *
+ * This function works like snd_pcm_lib_alloc_vmalloc_buffer(), but uses
+ * vmalloc_32(), i.e., the pages are allocated from 32-bit-addressable memory.
+ */
+static int snd_pcm_lib_alloc_vmalloc_32_buffer
+ (struct snd_pcm_substream *substream, size_t size);
+#endif
+#define snd_pcm_lib_alloc_vmalloc_buffer(subs, size) \
+ _snd_pcm_lib_alloc_vmalloc_buffer \
+ (subs, size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO)
+#define snd_pcm_lib_alloc_vmalloc_32_buffer(subs, size) \
+ _snd_pcm_lib_alloc_vmalloc_buffer \
+ (subs, size, GFP_KERNEL | GFP_DMA32 | __GFP_ZERO)
+
#ifdef CONFIG_SND_DMA_SGBUF
/*
* SG-buffer handling
diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
index cc4e226f35fd..760c969d885d 100644
--- a/include/sound/pcm_oss.h
+++ b/include/sound/pcm_oss.h
@@ -61,7 +61,7 @@ struct snd_pcm_oss_runtime {
struct snd_pcm_plugin *plugin_first;
struct snd_pcm_plugin *plugin_last;
#endif
- unsigned int prev_hw_ptr_interrupt;
+ unsigned int prev_hw_ptr_period;
};
struct snd_pcm_oss_file {
diff --git a/include/sound/sb.h b/include/sound/sb.h
index 4e62ee1e4115..95353542256a 100644
--- a/include/sound/sb.h
+++ b/include/sound/sb.h
@@ -33,6 +33,7 @@ enum sb_hw_type {
SB_HW_20,
SB_HW_201,
SB_HW_PRO,
+ SB_HW_JAZZ16, /* Media Vision Jazz16 */
SB_HW_16,
SB_HW_16CSP, /* SB16 with CSP chip */
SB_HW_ALS100, /* Avance Logic ALS100 chip */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 0d7718f9280d..08909ccd235b 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -253,6 +253,9 @@ void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
/* codec register bit access */
int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg,
unsigned int mask, unsigned int value);
+int snd_soc_update_bits_locked(struct snd_soc_codec *codec,
+ unsigned short reg, unsigned int mask,
+ unsigned int value);
int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned short reg,
unsigned int mask, unsigned int value);
diff --git a/include/sound/tpa6130a2-plat.h b/include/sound/tpa6130a2-plat.h
index e8c901e749d8..e29fde6b5cbe 100644
--- a/include/sound/tpa6130a2-plat.h
+++ b/include/sound/tpa6130a2-plat.h
@@ -23,7 +23,13 @@
#ifndef TPA6130A2_PLAT_H
#define TPA6130A2_PLAT_H
+enum tpa_model {
+ TPA6130A2,
+ TPA6140A2,
+};
+
struct tpa6130a2_platform_data {
+ enum tpa_model id;
int power_gpio;
};
diff --git a/include/sound/version.h b/include/sound/version.h
index 22939142dd23..7fed23442db8 100644
--- a/include/sound/version.h
+++ b/include/sound/version.h
@@ -1,3 +1,3 @@
/* include/version.h */
-#define CONFIG_SND_VERSION "1.0.21"
+#define CONFIG_SND_VERSION "1.0.22.1"
#define CONFIG_SND_DATE ""
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
new file mode 100644
index 000000000000..d66575a601be
--- /dev/null
+++ b/include/sound/wm8904.h
@@ -0,0 +1,57 @@
+/*
+ * Platform data for WM8904
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM8994_PDATA_H__
+#define __MFD_WM8994_PDATA_H__
+
+#define WM8904_DRC_REGS 4
+#define WM8904_EQ_REGS 25
+
+/**
+ * DRC configurations are specified with a label and a set of register
+ * values to write (the enable bits will be ignored). At runtime an
+ * enumerated control will be presented for each DRC block allowing
+ * the user to choose the configration to use.
+ *
+ * Configurations may be generated by hand or by using the DRC control
+ * panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/
+ * for details.
+ */
+struct wm8904_drc_cfg {
+ const char *name;
+ u16 regs[WM8904_DRC_REGS];
+};
+
+/**
+ * ReTune Mobile configurations are specified with a label, sample
+ * rate and set of values to write (the enable bits will be ignored).
+ *
+ * Configurations are expected to be generated using the ReTune Mobile
+ * control panel in WISCE - see http://www.wolfsonmicro.com/wisce/
+ */
+struct wm8904_retune_mobile_cfg {
+ const char *name;
+ unsigned int rate;
+ u16 regs[WM8904_EQ_REGS];
+};
+
+struct wm8904_pdata {
+ int num_drc_cfgs;
+ struct wm8904_drc_cfg *drc_cfgs;
+
+ int num_retune_mobile_cfgs;
+ struct wm8904_retune_mobile_cfg *retune_mobile_cfgs;
+};
+
+#endif
diff --git a/include/sound/wm8955.h b/include/sound/wm8955.h
new file mode 100644
index 000000000000..5074ef499f40
--- /dev/null
+++ b/include/sound/wm8955.h
@@ -0,0 +1,26 @@
+/*
+ * Platform data for WM8955
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __WM8955_PDATA_H__
+#define __WM8955_PDATA_H__
+
+struct wm8955_pdata {
+ /* Configure LOUT2/ROUT2 to drive a speaker */
+ unsigned int out2_speaker:1;
+
+ /* Configure MONOIN+/- in differential mode */
+ unsigned int monoin_diff:1;
+};
+
+#endif
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index c6fe03e902ca..4a46a60c2077 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -499,7 +499,7 @@ static inline int ftrace_get_offsets_##call( \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
/*
* Generate the functions needed for tracepoint perf_event support.
@@ -542,7 +542,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#endif
+#endif /* CONFIG_PERF_EVENTS */
/*
* Stage 4 of the trace events.
@@ -627,7 +627,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
*
*/
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
#define _TRACE_PROFILE_INIT(call) \
.profile_enable = ftrace_profile_enable_##call, \
@@ -635,7 +635,7 @@ static void ftrace_profile_disable_##name(struct ftrace_event_call *unused)\
#else
#define _TRACE_PROFILE_INIT(call)
-#endif
+#endif /* CONFIG_PERF_EVENTS */
#undef __entry
#define __entry entry
@@ -835,7 +835,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
* }
*/
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
#undef __perf_addr
#define __perf_addr(a) __addr = (a)
@@ -927,7 +927,7 @@ static void ftrace_profile_##call(proto) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-#endif /* CONFIG_EVENT_PROFILE */
+#endif /* CONFIG_PERF_EVENTS */
#undef _TRACE_PROFILE_INIT
diff --git a/include/trace/syscall.h b/include/trace/syscall.h
index 961fda3556bb..3d463dcef298 100644
--- a/include/trace/syscall.h
+++ b/include/trace/syscall.h
@@ -49,12 +49,12 @@ ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags);
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags);
#endif
-#ifdef CONFIG_EVENT_PROFILE
+
+#ifdef CONFIG_PERF_EVENTS
int prof_sysenter_enable(struct ftrace_event_call *call);
void prof_sysenter_disable(struct ftrace_event_call *call);
int prof_sysexit_enable(struct ftrace_event_call *call);
void prof_sysexit_disable(struct ftrace_event_call *call);
-
#endif
#endif /* _TRACE_SYSCALL_H */
diff --git a/init/Kconfig b/init/Kconfig
index a23da9f01803..8db90673e5ce 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -76,6 +76,14 @@ config INIT_ENV_ARG_LIMIT
variables passed to init from the kernel command line.
+config CROSS_COMPILE
+ string "Cross-compiler tool prefix"
+ help
+ Same as running 'make CROSS_COMPILE=prefix-' but stored for
+ default make runs in this kernel build directory. You don't
+ need to set this unless you want the configured kernel build
+ directory to select the cross-compiler automatically.
+
config LOCALVERSION
string "Local version - append to kernel release"
help
@@ -115,10 +123,13 @@ config HAVE_KERNEL_BZIP2
config HAVE_KERNEL_LZMA
bool
+config HAVE_KERNEL_LZO
+ bool
+
choice
prompt "Kernel compression mode"
default KERNEL_GZIP
- depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA
+ depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_LZO
help
The linux kernel is a kind of self-extracting executable.
Several compression algorithms are available, which differ
@@ -141,9 +152,8 @@ config KERNEL_GZIP
bool "Gzip"
depends on HAVE_KERNEL_GZIP
help
- The old and tried gzip compression. Its compression ratio is
- the poorest among the 3 choices; however its speed (both
- compression and decompression) is the fastest.
+ The old and tried gzip compression. It provides a good balance
+ between compression ratio and decompression speed.
config KERNEL_BZIP2
bool "Bzip2"
@@ -164,6 +174,14 @@ config KERNEL_LZMA
two. Compression is slowest. The kernel size is about 33%
smaller with LZMA in comparison to gzip.
+config KERNEL_LZO
+ bool "LZO"
+ depends on HAVE_KERNEL_LZO
+ help
+ Its compression ratio is the poorest among the 4. The kernel
+ size is about about 10% bigger than gzip; however its speed
+ (both compression and decompression) is the fastest.
+
endchoice
config SWAP
@@ -302,13 +320,17 @@ config AUDITSYSCALL
help
Enable low-overhead system-call auditing infrastructure that
can be used independently or with another kernel subsystem,
- such as SELinux. To use audit's filesystem watch feature, please
- ensure that INOTIFY is configured.
+ such as SELinux.
+
+config AUDIT_WATCH
+ def_bool y
+ depends on AUDITSYSCALL
+ select FSNOTIFY
config AUDIT_TREE
def_bool y
depends on AUDITSYSCALL
- select INOTIFY
+ select FSNOTIFY
menu "RCU Subsystem"
@@ -966,19 +988,6 @@ config PERF_EVENTS
Say Y if unsure.
-config EVENT_PROFILE
- bool "Tracepoint profiling sources"
- depends on PERF_EVENTS && EVENT_TRACING
- default y
- help
- Allow the use of tracepoints as software performance events.
-
- When this is enabled, you can create perf events based on
- tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID
- found in debugfs://tracing/events/*/*/id. (The -e/--events
- option to the perf tool can parse and interpret symbolic
- tracepoints, in the subsystem:tracepoint_name format.)
-
config PERF_COUNTERS
bool "Kernel performance counters (old config option)"
depends on HAVE_PERF_EVENTS
@@ -1252,4 +1261,8 @@ source "block/Kconfig"
config PREEMPT_NOTIFIERS
bool
+config PADATA
+ depends on SMP
+ bool
+
source "kernel/Kconfig.locks"
diff --git a/init/main.c b/init/main.c
index dac44a9356a5..52a24e566e07 100644
--- a/init/main.c
+++ b/init/main.c
@@ -63,6 +63,7 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/idr.h>
+#include <linux/kdb.h>
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/kmemcheck.h>
@@ -647,6 +648,11 @@ asmlinkage void __init start_kernel(void)
calibrate_delay();
pidmap_init();
anon_vma_init();
+
+#ifdef CONFIG_KGDB_KDB
+ kdb_init();
+#endif /* CONFIG_KGDB_KDB */
+
#ifdef CONFIG_X86
if (efi_enabled)
efi_enter_virtual_mode();
diff --git a/kernel/Makefile b/kernel/Makefile
index 864ff75d65f2..702260a4a9f9 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -69,12 +69,13 @@ obj-$(CONFIG_IKCONFIG) += configs.o
obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
-obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o
+obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
-obj-$(CONFIG_GCOV_KERNEL) += gcov/
+obj-$(CONFIG_AUDIT_WATCH) += audit_watch.o
obj-$(CONFIG_AUDIT_TREE) += audit_tree.o
+obj-$(CONFIG_GCOV_KERNEL) += gcov/
obj-$(CONFIG_KPROBES) += kprobes.o
-obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_KGDB) += debug/
obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o
obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
@@ -100,6 +101,7 @@ obj-$(CONFIG_SLOW_WORK_DEBUG) += slow-work-debugfs.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_USER_RETURN_NOTIFIER) += user-return-notifier.o
+obj-$(CONFIG_PADATA) += padata.o
ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/audit.c b/kernel/audit.c
index 5feed232be9d..c3b6cb5da9eb 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -55,7 +55,6 @@
#include <net/netlink.h>
#include <linux/skbuff.h>
#include <linux/netlink.h>
-#include <linux/inotify.h>
#include <linux/freezer.h>
#include <linux/tty.h>
@@ -398,7 +397,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
skb_get(skb);
err = netlink_unicast(audit_sock, skb, audit_nlk_pid, 0);
if (err < 0) {
- BUG_ON(err != -ECONNREFUSED); /* Shoudn't happen */
+ BUG_ON(err != -ECONNREFUSED); /* Shouldn't happen */
printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
audit_log_lost("auditd dissapeared\n");
audit_pid = 0;
diff --git a/kernel/audit.h b/kernel/audit.h
index 208687be4f30..f7206db4e13d 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -103,21 +103,27 @@ extern struct mutex audit_filter_mutex;
extern void audit_free_rule_rcu(struct rcu_head *);
extern struct list_head audit_filter_list[];
+extern struct audit_entry *audit_dupe_rule(struct audit_krule *old);
+
/* audit watch functions */
-extern unsigned long audit_watch_inode(struct audit_watch *watch);
-extern dev_t audit_watch_dev(struct audit_watch *watch);
+#ifdef CONFIG_AUDIT_WATCH
extern void audit_put_watch(struct audit_watch *watch);
extern void audit_get_watch(struct audit_watch *watch);
extern int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op);
-extern int audit_add_watch(struct audit_krule *krule);
-extern void audit_remove_watch(struct audit_watch *watch);
-extern void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list);
-extern void audit_inotify_unregister(struct list_head *in_list);
+extern int audit_add_watch(struct audit_krule *krule, struct list_head **list);
+extern void audit_remove_watch_rule(struct audit_krule *krule);
extern char *audit_watch_path(struct audit_watch *watch);
-extern struct list_head *audit_watch_rules(struct audit_watch *watch);
-
-extern struct audit_entry *audit_dupe_rule(struct audit_krule *old,
- struct audit_watch *watch);
+extern int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev);
+#else
+#define audit_put_watch(w) {}
+#define audit_get_watch(w) {}
+#define audit_to_watch(k, p, l, o) (-EINVAL)
+#define audit_add_watch(k, l) (-EINVAL)
+#define audit_remove_watch_rule(k) BUG()
+#define audit_watch_path(w) ""
+#define audit_watch_compare(w, i, d) 0
+
+#endif /* CONFIG_AUDIT_WATCH */
#ifdef CONFIG_AUDIT_TREE
extern struct audit_chunk *audit_tree_lookup(const struct inode *);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 4b05bd9479db..a9167f9ec215 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -1,5 +1,5 @@
#include "audit.h"
-#include <linux/inotify.h>
+#include <linux/fsnotify_backend.h>
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/kthread.h>
@@ -21,7 +21,7 @@ struct audit_tree {
struct audit_chunk {
struct list_head hash;
- struct inotify_watch watch;
+ struct fsnotify_mark mark;
struct list_head trees; /* with root here */
int dead;
int count;
@@ -58,7 +58,7 @@ static LIST_HEAD(prune_list);
* tree is refcounted; one reference for "some rules on rules_list refer to
* it", one for each chunk with pointer to it.
*
- * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
+ * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
* of watch contributes 1 to .refs).
*
* node.index allows to get from node.list to containing chunk.
@@ -67,7 +67,7 @@ static LIST_HEAD(prune_list);
* that makes a difference. Some.
*/
-static struct inotify_handle *rtree_ih;
+static struct fsnotify_group *audit_tree_group;
static struct audit_tree *alloc_tree(const char *s)
{
@@ -110,29 +110,6 @@ const char *audit_tree_path(struct audit_tree *tree)
return tree->pathname;
}
-static struct audit_chunk *alloc_chunk(int count)
-{
- struct audit_chunk *chunk;
- size_t size;
- int i;
-
- size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
- chunk = kzalloc(size, GFP_KERNEL);
- if (!chunk)
- return NULL;
-
- INIT_LIST_HEAD(&chunk->hash);
- INIT_LIST_HEAD(&chunk->trees);
- chunk->count = count;
- atomic_long_set(&chunk->refs, 1);
- for (i = 0; i < count; i++) {
- INIT_LIST_HEAD(&chunk->owners[i].list);
- chunk->owners[i].index = i;
- }
- inotify_init_watch(&chunk->watch);
- return chunk;
-}
-
static void free_chunk(struct audit_chunk *chunk)
{
int i;
@@ -156,6 +133,35 @@ static void __put_chunk(struct rcu_head *rcu)
audit_put_chunk(chunk);
}
+static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
+{
+ struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
+ call_rcu(&chunk->head, __put_chunk);
+}
+
+static struct audit_chunk *alloc_chunk(int count)
+{
+ struct audit_chunk *chunk;
+ size_t size;
+ int i;
+
+ size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
+ chunk = kzalloc(size, GFP_KERNEL);
+ if (!chunk)
+ return NULL;
+
+ INIT_LIST_HEAD(&chunk->hash);
+ INIT_LIST_HEAD(&chunk->trees);
+ chunk->count = count;
+ atomic_long_set(&chunk->refs, 1);
+ for (i = 0; i < count; i++) {
+ INIT_LIST_HEAD(&chunk->owners[i].list);
+ chunk->owners[i].index = i;
+ }
+ fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
+ return chunk;
+}
+
enum {HASH_SIZE = 128};
static struct list_head chunk_hash_heads[HASH_SIZE];
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
@@ -166,10 +172,15 @@ static inline struct list_head *chunk_hash(const struct inode *inode)
return chunk_hash_heads + n % HASH_SIZE;
}
-/* hash_lock is held by caller */
+/* hash_lock & entry->lock is held by caller */
static void insert_hash(struct audit_chunk *chunk)
{
- struct list_head *list = chunk_hash(chunk->watch.inode);
+ struct fsnotify_mark *entry = &chunk->mark;
+ struct list_head *list;
+
+ if (!entry->i.inode)
+ return;
+ list = chunk_hash(entry->i.inode);
list_add_rcu(&chunk->hash, list);
}
@@ -180,7 +191,8 @@ struct audit_chunk *audit_tree_lookup(const struct inode *inode)
struct audit_chunk *p;
list_for_each_entry_rcu(p, list, hash) {
- if (p->watch.inode == inode) {
+ /* mark.inode may have gone NULL, but who cares? */
+ if (p->mark.i.inode == inode) {
atomic_long_inc(&p->refs);
return p;
}
@@ -209,38 +221,19 @@ static struct audit_chunk *find_chunk(struct node *p)
static void untag_chunk(struct node *p)
{
struct audit_chunk *chunk = find_chunk(p);
+ struct fsnotify_mark *entry = &chunk->mark;
struct audit_chunk *new;
struct audit_tree *owner;
int size = chunk->count - 1;
int i, j;
- if (!pin_inotify_watch(&chunk->watch)) {
- /*
- * Filesystem is shutting down; all watches are getting
- * evicted, just take it off the node list for this
- * tree and let the eviction logics take care of the
- * rest.
- */
- owner = p->owner;
- if (owner->root == chunk) {
- list_del_init(&owner->same_root);
- owner->root = NULL;
- }
- list_del_init(&p->list);
- p->owner = NULL;
- put_tree(owner);
- return;
- }
+ fsnotify_get_mark(entry);
spin_unlock(&hash_lock);
- /*
- * pin_inotify_watch() succeeded, so the watch won't go away
- * from under us.
- */
- mutex_lock(&chunk->watch.inode->inotify_mutex);
- if (chunk->dead) {
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
+ spin_lock(&entry->lock);
+ if (chunk->dead || !entry->i.inode) {
+ spin_unlock(&entry->lock);
goto out;
}
@@ -255,16 +248,17 @@ static void untag_chunk(struct node *p)
list_del_init(&p->list);
list_del_rcu(&chunk->hash);
spin_unlock(&hash_lock);
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+ fsnotify_put_mark(entry);
goto out;
}
new = alloc_chunk(size);
if (!new)
goto Fallback;
- if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) {
+ fsnotify_duplicate_mark(&new->mark, entry);
+ if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) {
free_chunk(new);
goto Fallback;
}
@@ -297,9 +291,9 @@ static void untag_chunk(struct node *p)
list_for_each_entry(owner, &new->trees, same_root)
owner->root = new;
spin_unlock(&hash_lock);
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+ fsnotify_put_mark(entry);
goto out;
Fallback:
@@ -313,31 +307,33 @@ Fallback:
p->owner = NULL;
put_tree(owner);
spin_unlock(&hash_lock);
- mutex_unlock(&chunk->watch.inode->inotify_mutex);
+ spin_unlock(&entry->lock);
out:
- unpin_inotify_watch(&chunk->watch);
+ fsnotify_put_mark(entry);
spin_lock(&hash_lock);
}
static int create_chunk(struct inode *inode, struct audit_tree *tree)
{
+ struct fsnotify_mark *entry;
struct audit_chunk *chunk = alloc_chunk(1);
if (!chunk)
return -ENOMEM;
- if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) {
+ entry = &chunk->mark;
+ if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
free_chunk(chunk);
return -ENOSPC;
}
- mutex_lock(&inode->inotify_mutex);
+ spin_lock(&entry->lock);
spin_lock(&hash_lock);
if (tree->goner) {
spin_unlock(&hash_lock);
chunk->dead = 1;
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&entry->lock);
+ fsnotify_destroy_mark(entry);
+ fsnotify_put_mark(entry);
return 0;
}
chunk->owners[0].index = (1U << 31);
@@ -350,30 +346,31 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree)
}
insert_hash(chunk);
spin_unlock(&hash_lock);
- mutex_unlock(&inode->inotify_mutex);
+ spin_unlock(&entry->lock);
return 0;
}
/* the first tagged inode becomes root of tree */
static int tag_chunk(struct inode *inode, struct audit_tree *tree)
{
- struct inotify_watch *watch;
+ struct fsnotify_mark *old_entry, *chunk_entry;
struct audit_tree *owner;
struct audit_chunk *chunk, *old;
struct node *p;
int n;
- if (inotify_find_watch(rtree_ih, inode, &watch) < 0)
+ old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
+ if (!old_entry)
return create_chunk(inode, tree);
- old = container_of(watch, struct audit_chunk, watch);
+ old = container_of(old_entry, struct audit_chunk, mark);
/* are we already there? */
spin_lock(&hash_lock);
for (n = 0; n < old->count; n++) {
if (old->owners[n].owner == tree) {
spin_unlock(&hash_lock);
- put_inotify_watch(&old->watch);
+ fsnotify_put_mark(old_entry);
return 0;
}
}
@@ -381,25 +378,44 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
chunk = alloc_chunk(old->count + 1);
if (!chunk) {
- put_inotify_watch(&old->watch);
+ fsnotify_put_mark(old_entry);
return -ENOMEM;
}
- mutex_lock(&inode->inotify_mutex);
- if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) {
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&old->watch);
+ chunk_entry = &chunk->mark;
+
+ spin_lock(&old_entry->lock);
+ if (!old_entry->i.inode) {
+ /* old_entry is being shot, lets just lie */
+ spin_unlock(&old_entry->lock);
+ fsnotify_put_mark(old_entry);
free_chunk(chunk);
+ return -ENOENT;
+ }
+
+ fsnotify_duplicate_mark(chunk_entry, old_entry);
+ if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) {
+ spin_unlock(&old_entry->lock);
+ free_chunk(chunk);
+ fsnotify_put_mark(old_entry);
return -ENOSPC;
}
+
+ /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
+ spin_lock(&chunk_entry->lock);
spin_lock(&hash_lock);
+
+ /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
if (tree->goner) {
spin_unlock(&hash_lock);
chunk->dead = 1;
- inotify_evict_watch(&chunk->watch);
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&old->watch);
- put_inotify_watch(&chunk->watch);
+ spin_unlock(&chunk_entry->lock);
+ spin_unlock(&old_entry->lock);
+
+ fsnotify_destroy_mark(chunk_entry);
+
+ fsnotify_put_mark(chunk_entry);
+ fsnotify_put_mark(old_entry);
return 0;
}
list_replace_init(&old->trees, &chunk->trees);
@@ -425,10 +441,11 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
list_add(&tree->same_root, &chunk->trees);
}
spin_unlock(&hash_lock);
- inotify_evict_watch(&old->watch);
- mutex_unlock(&inode->inotify_mutex);
- put_inotify_watch(&old->watch); /* pair to inotify_find_watch */
- put_inotify_watch(&old->watch); /* and kill it */
+ spin_unlock(&chunk_entry->lock);
+ spin_unlock(&old_entry->lock);
+ fsnotify_destroy_mark(old_entry);
+ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
+ fsnotify_put_mark(old_entry); /* and kill it */
return 0;
}
@@ -581,7 +598,8 @@ void audit_trim_trees(void)
spin_lock(&hash_lock);
list_for_each_entry(node, &tree->chunks, list) {
struct audit_chunk *chunk = find_chunk(node);
- struct inode *inode = chunk->watch.inode;
+ /* this could be NULL if the watch is dieing else where... */
+ struct inode *inode = chunk->mark.i.inode;
struct vfsmount *mnt;
node->index |= 1U<<31;
list_for_each_entry(mnt, &list, mnt_list) {
@@ -889,7 +907,6 @@ void audit_kill_trees(struct list_head *list)
* Here comes the stuff asynchronous to auditctl operations
*/
-/* inode->inotify_mutex is locked */
static void evict_chunk(struct audit_chunk *chunk)
{
struct audit_tree *owner;
@@ -928,35 +945,42 @@ static void evict_chunk(struct audit_chunk *chunk)
mutex_unlock(&audit_filter_mutex);
}
-static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask,
- u32 cookie, const char *dname, struct inode *inode)
+static int audit_tree_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
{
- struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
+ BUG();
+ return -EOPNOTSUPP;
+}
- if (mask & IN_IGNORED) {
- evict_chunk(chunk);
- put_inotify_watch(watch);
- }
+static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
+{
+ struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
+
+ evict_chunk(chunk);
+ fsnotify_put_mark(entry);
}
-static void destroy_watch(struct inotify_watch *watch)
+static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_type)
{
- struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
- call_rcu(&chunk->head, __put_chunk);
+ return 0;
}
-static const struct inotify_operations rtree_inotify_ops = {
- .handle_event = handle_event,
- .destroy_watch = destroy_watch,
+static const struct fsnotify_ops audit_tree_ops = {
+ .handle_event = audit_tree_handle_event,
+ .should_send_event = audit_tree_send_event,
+ .free_group_priv = NULL,
+ .free_event_priv = NULL,
+ .freeing_mark = audit_tree_freeing_mark,
};
static int __init audit_tree_init(void)
{
int i;
- rtree_ih = inotify_init(&rtree_inotify_ops);
- if (IS_ERR(rtree_ih))
- audit_panic("cannot initialize inotify handle for rectree watches");
+ audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
+ if (IS_ERR(audit_tree_group))
+ audit_panic("cannot initialize fsnotify group for rectree watches");
for (i = 0; i < HASH_SIZE; i++)
INIT_LIST_HEAD(&chunk_hash_heads[i]);
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index cc7e87936cbc..31f9be8b62eb 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -24,17 +24,17 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/fs.h>
+#include <linux/fsnotify_backend.h>
#include <linux/namei.h>
#include <linux/netlink.h>
#include <linux/sched.h>
-#include <linux/inotify.h>
#include <linux/security.h>
#include "audit.h"
/*
* Reference counting:
*
- * audit_parent: lifetime is from audit_init_parent() to receipt of an IN_IGNORED
+ * audit_parent: lifetime is from audit_init_parent() to receipt of an FS_IGNORED
* event. Each audit_watch holds a reference to its associated parent.
*
* audit_watch: if added to lists, lifetime is from audit_init_watch() to
@@ -50,40 +50,61 @@ struct audit_watch {
unsigned long ino; /* associated inode number */
struct audit_parent *parent; /* associated parent */
struct list_head wlist; /* entry in parent->watches list */
- struct list_head rules; /* associated rules */
+ struct list_head rules; /* anchor for krule->rlist */
};
struct audit_parent {
- struct list_head ilist; /* entry in inotify registration list */
- struct list_head watches; /* associated watches */
- struct inotify_watch wdata; /* inotify watch data */
- unsigned flags; /* status flags */
+ struct list_head watches; /* anchor for audit_watch->wlist */
+ struct fsnotify_mark mark; /* fsnotify mark on the inode */
};
-/* Inotify handle. */
-struct inotify_handle *audit_ih;
+/* fsnotify handle. */
+struct fsnotify_group *audit_watch_group;
-/*
- * audit_parent status flags:
- *
- * AUDIT_PARENT_INVALID - set anytime rules/watches are auto-removed due to
- * a filesystem event to ensure we're adding audit watches to a valid parent.
- * Technically not needed for IN_DELETE_SELF or IN_UNMOUNT events, as we cannot
- * receive them while we have nameidata, but must be used for IN_MOVE_SELF which
- * we can receive while holding nameidata.
- */
-#define AUDIT_PARENT_INVALID 0x001
+/* fsnotify events we care about. */
+#define AUDIT_FS_WATCH (FS_MOVE | FS_CREATE | FS_DELETE | FS_DELETE_SELF |\
+ FS_MOVE_SELF | FS_EVENT_ON_CHILD)
-/* Inotify events we care about. */
-#define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF
+static void audit_free_parent(struct audit_parent *parent)
+{
+ WARN_ON(!list_empty(&parent->watches));
+ kfree(parent);
+}
-static void audit_free_parent(struct inotify_watch *i_watch)
+static void audit_watch_free_mark(struct fsnotify_mark *entry)
{
struct audit_parent *parent;
- parent = container_of(i_watch, struct audit_parent, wdata);
- WARN_ON(!list_empty(&parent->watches));
- kfree(parent);
+ parent = container_of(entry, struct audit_parent, mark);
+ audit_free_parent(parent);
+}
+
+static void audit_get_parent(struct audit_parent *parent)
+{
+ if (likely(parent))
+ fsnotify_get_mark(&parent->mark);
+}
+
+static void audit_put_parent(struct audit_parent *parent)
+{
+ if (likely(parent))
+ fsnotify_put_mark(&parent->mark);
+}
+
+/*
+ * Find and return the audit_parent on the given inode. If found a reference
+ * is taken on this parent.
+ */
+static inline struct audit_parent *audit_find_parent(struct inode *inode)
+{
+ struct audit_parent *parent = NULL;
+ struct fsnotify_mark *entry;
+
+ entry = fsnotify_find_inode_mark(audit_watch_group, inode);
+ if (entry)
+ parent = container_of(entry, struct audit_parent, mark);
+
+ return parent;
}
void audit_get_watch(struct audit_watch *watch)
@@ -104,7 +125,7 @@ void audit_put_watch(struct audit_watch *watch)
void audit_remove_watch(struct audit_watch *watch)
{
list_del(&watch->wlist);
- put_inotify_watch(&watch->parent->wdata);
+ audit_put_parent(watch->parent);
watch->parent = NULL;
audit_put_watch(watch); /* match initial get */
}
@@ -114,44 +135,36 @@ char *audit_watch_path(struct audit_watch *watch)
return watch->path;
}
-struct list_head *audit_watch_rules(struct audit_watch *watch)
+int audit_watch_compare(struct audit_watch *watch, unsigned long ino, dev_t dev)
{
- return &watch->rules;
-}
-
-unsigned long audit_watch_inode(struct audit_watch *watch)
-{
- return watch->ino;
-}
-
-dev_t audit_watch_dev(struct audit_watch *watch)
-{
- return watch->dev;
+ return (watch->ino != (unsigned long)-1) &&
+ (watch->ino == ino) &&
+ (watch->dev == dev);
}
/* Initialize a parent watch entry. */
static struct audit_parent *audit_init_parent(struct nameidata *ndp)
{
+ struct inode *inode = ndp->path.dentry->d_inode;
struct audit_parent *parent;
- s32 wd;
+ int ret;
parent = kzalloc(sizeof(*parent), GFP_KERNEL);
if (unlikely(!parent))
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&parent->watches);
- parent->flags = 0;
-
- inotify_init_watch(&parent->wdata);
- /* grab a ref so inotify watch hangs around until we take audit_filter_mutex */
- get_inotify_watch(&parent->wdata);
- wd = inotify_add_watch(audit_ih, &parent->wdata,
- ndp->path.dentry->d_inode, AUDIT_IN_WATCH);
- if (wd < 0) {
- audit_free_parent(&parent->wdata);
- return ERR_PTR(wd);
+
+ fsnotify_init_mark(&parent->mark, audit_watch_free_mark);
+ parent->mark.mask = AUDIT_FS_WATCH;
+ ret = fsnotify_add_mark(&parent->mark, audit_watch_group, inode, NULL, 0);
+ if (ret < 0) {
+ audit_free_parent(parent);
+ return ERR_PTR(ret);
}
+ fsnotify_recalc_group_mask(audit_watch_group);
+
return parent;
}
@@ -178,7 +191,7 @@ int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op)
{
struct audit_watch *watch;
- if (!audit_ih)
+ if (!audit_watch_group)
return -EOPNOTSUPP;
if (path[0] != '/' || path[len-1] == '/' ||
@@ -216,7 +229,7 @@ static struct audit_watch *audit_dupe_watch(struct audit_watch *old)
new->dev = old->dev;
new->ino = old->ino;
- get_inotify_watch(&old->parent->wdata);
+ audit_get_parent(old->parent);
new->parent = old->parent;
out:
@@ -250,15 +263,19 @@ static void audit_update_watch(struct audit_parent *parent,
struct audit_entry *oentry, *nentry;
mutex_lock(&audit_filter_mutex);
+ /* Run all of the watches on this parent looking for the one that
+ * matches the given dname */
list_for_each_entry_safe(owatch, nextw, &parent->watches, wlist) {
if (audit_compare_dname_path(dname, owatch->path, NULL))
continue;
/* If the update involves invalidating rules, do the inode-based
* filtering now, so we don't omit records. */
- if (invalidating && current->audit_context)
+ if (invalidating && !audit_dummy_context())
audit_filter_inodes(current, current->audit_context);
+ /* updating ino will likely change which audit_hash_list we
+ * are on so we need a new watch for the new list */
nwatch = audit_dupe_watch(owatch);
if (IS_ERR(nwatch)) {
mutex_unlock(&audit_filter_mutex);
@@ -274,12 +291,21 @@ static void audit_update_watch(struct audit_parent *parent,
list_del(&oentry->rule.rlist);
list_del_rcu(&oentry->list);
- nentry = audit_dupe_rule(&oentry->rule, nwatch);
+ nentry = audit_dupe_rule(&oentry->rule);
if (IS_ERR(nentry)) {
list_del(&oentry->rule.list);
audit_panic("error updating watch, removing");
} else {
int h = audit_hash_ino((u32)ino);
+
+ /*
+ * nentry->rule.watch == oentry->rule.watch so
+ * we must drop that reference and set it to our
+ * new watch.
+ */
+ audit_put_watch(nentry->rule.watch);
+ audit_get_watch(nwatch);
+ nentry->rule.watch = nwatch;
list_add(&nentry->rule.rlist, &nwatch->rules);
list_add_rcu(&nentry->list, &audit_inode_hash[h]);
list_replace(&oentry->rule.list,
@@ -311,7 +337,6 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
struct audit_entry *e;
mutex_lock(&audit_filter_mutex);
- parent->flags |= AUDIT_PARENT_INVALID;
list_for_each_entry_safe(w, nextw, &parent->watches, wlist) {
list_for_each_entry_safe(r, nextr, &w->rules, rlist) {
e = container_of(r, struct audit_entry, rule);
@@ -324,20 +349,11 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
audit_remove_watch(w);
}
mutex_unlock(&audit_filter_mutex);
-}
-/* Unregister inotify watches for parents on in_list.
- * Generates an IN_IGNORED event. */
-void audit_inotify_unregister(struct list_head *in_list)
-{
- struct audit_parent *p, *n;
+ fsnotify_destroy_mark(&parent->mark);
+
+ fsnotify_recalc_group_mask(audit_watch_group);
- list_for_each_entry_safe(p, n, in_list, ilist) {
- list_del(&p->ilist);
- inotify_rm_watch(audit_ih, &p->wdata);
- /* the unpin matching the pin in audit_do_del_rule() */
- unpin_inotify_watch(&p->wdata);
- }
}
/* Get path information necessary for adding watches. */
@@ -388,7 +404,7 @@ static void audit_put_nd(struct nameidata *ndp, struct nameidata *ndw)
}
}
-/* Associate the given rule with an existing parent inotify_watch.
+/* Associate the given rule with an existing parent.
* Caller must hold audit_filter_mutex. */
static void audit_add_to_parent(struct audit_krule *krule,
struct audit_parent *parent)
@@ -396,6 +412,8 @@ static void audit_add_to_parent(struct audit_krule *krule,
struct audit_watch *w, *watch = krule->watch;
int watch_found = 0;
+ BUG_ON(!mutex_is_locked(&audit_filter_mutex));
+
list_for_each_entry(w, &parent->watches, wlist) {
if (strcmp(watch->path, w->path))
continue;
@@ -412,7 +430,7 @@ static void audit_add_to_parent(struct audit_krule *krule,
}
if (!watch_found) {
- get_inotify_watch(&parent->wdata);
+ audit_get_parent(parent);
watch->parent = parent;
list_add(&watch->wlist, &parent->watches);
@@ -422,13 +440,12 @@ static void audit_add_to_parent(struct audit_krule *krule,
/* Find a matching watch entry, or add this one.
* Caller must hold audit_filter_mutex. */
-int audit_add_watch(struct audit_krule *krule)
+int audit_add_watch(struct audit_krule *krule, struct list_head **list)
{
struct audit_watch *watch = krule->watch;
- struct inotify_watch *i_watch;
struct audit_parent *parent;
struct nameidata *ndp = NULL, *ndw = NULL;
- int ret = 0;
+ int h, ret = 0;
mutex_unlock(&audit_filter_mutex);
@@ -440,47 +457,38 @@ int audit_add_watch(struct audit_krule *krule)
goto error;
}
+ mutex_lock(&audit_filter_mutex);
+
/* update watch filter fields */
if (ndw) {
watch->dev = ndw->path.dentry->d_inode->i_sb->s_dev;
watch->ino = ndw->path.dentry->d_inode->i_ino;
}
- /* The audit_filter_mutex must not be held during inotify calls because
- * we hold it during inotify event callback processing. If an existing
- * inotify watch is found, inotify_find_watch() grabs a reference before
- * returning.
- */
- if (inotify_find_watch(audit_ih, ndp->path.dentry->d_inode,
- &i_watch) < 0) {
+ /* either find an old parent or attach a new one */
+ parent = audit_find_parent(ndp->path.dentry->d_inode);
+ if (!parent) {
parent = audit_init_parent(ndp);
if (IS_ERR(parent)) {
- /* caller expects mutex locked */
- mutex_lock(&audit_filter_mutex);
ret = PTR_ERR(parent);
goto error;
}
- } else
- parent = container_of(i_watch, struct audit_parent, wdata);
-
- mutex_lock(&audit_filter_mutex);
+ }
- /* parent was moved before we took audit_filter_mutex */
- if (parent->flags & AUDIT_PARENT_INVALID)
- ret = -ENOENT;
- else
- audit_add_to_parent(krule, parent);
+ audit_add_to_parent(krule, parent);
- /* match get in audit_init_parent or inotify_find_watch */
- put_inotify_watch(&parent->wdata);
+ /* match get in audit_find_parent or audit_init_parent */
+ audit_put_parent(parent);
+ h = audit_hash_ino((u32)watch->ino);
+ *list = &audit_inode_hash[h];
error:
audit_put_nd(ndp, ndw); /* NULL args OK */
return ret;
}
-void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list)
+void audit_remove_watch_rule(struct audit_krule *krule)
{
struct audit_watch *watch = krule->watch;
struct audit_parent *parent = watch->parent;
@@ -491,53 +499,92 @@ void audit_remove_watch_rule(struct audit_krule *krule, struct list_head *list)
audit_remove_watch(watch);
if (list_empty(&parent->watches)) {
- /* Put parent on the inotify un-registration
- * list. Grab a reference before releasing
- * audit_filter_mutex, to be released in
- * audit_inotify_unregister().
- * If filesystem is going away, just leave
- * the sucker alone, eviction will take
- * care of it. */
- if (pin_inotify_watch(&parent->wdata))
- list_add(&parent->ilist, list);
+ audit_get_parent(parent);
+ fsnotify_destroy_mark(&parent->mark);
+ audit_put_parent(parent);
}
}
+
+ fsnotify_recalc_group_mask(audit_watch_group);
+
}
-/* Update watch data in audit rules based on inotify events. */
-static void audit_handle_ievent(struct inotify_watch *i_watch, u32 wd, u32 mask,
- u32 cookie, const char *dname, struct inode *inode)
+static bool audit_watch_should_send_event(struct fsnotify_group *group, struct inode *inode,
+ struct vfsmount *mnt, __u32 mask, void *data,
+ int data_type)
{
+ struct fsnotify_mark *entry;
+ bool send;
+
+ entry = fsnotify_find_inode_mark(group, inode);
+ if (!entry)
+ return false;
+
+ mask = (mask & ~FS_EVENT_ON_CHILD);
+ send = (entry->mask & mask);
+
+ /* find took a reference */
+ fsnotify_put_mark(entry);
+
+ return send;
+}
+
+/* Update watch data in audit rules based on fsnotify events. */
+static int audit_watch_handle_event(struct fsnotify_group *group, struct fsnotify_event *event)
+{
+ struct inode *inode;
+ __u32 mask = event->mask;
+ const char *dname = event->file_name;
struct audit_parent *parent;
- parent = container_of(i_watch, struct audit_parent, wdata);
+ BUG_ON(group != audit_watch_group);
- if (mask & (IN_CREATE|IN_MOVED_TO) && inode)
- audit_update_watch(parent, dname, inode->i_sb->s_dev,
- inode->i_ino, 0);
- else if (mask & (IN_DELETE|IN_MOVED_FROM))
+ parent = audit_find_parent(event->to_tell);
+ if (unlikely(!parent))
+ return 0;
+
+ switch (event->data_type) {
+ case (FSNOTIFY_EVENT_PATH):
+ inode = event->path.dentry->d_inode;
+ break;
+ case (FSNOTIFY_EVENT_INODE):
+ inode = event->inode;
+ break;
+ default:
+ BUG();
+ inode = NULL;
+ break;
+ };
+
+ if (mask & (FS_CREATE|FS_MOVED_TO) && inode)
+ audit_update_watch(parent, dname, inode->i_sb->s_dev, inode->i_ino, 0);
+ else if (mask & (FS_DELETE|FS_MOVED_FROM))
audit_update_watch(parent, dname, (dev_t)-1, (unsigned long)-1, 1);
- /* inotify automatically removes the watch and sends IN_IGNORED */
- else if (mask & (IN_DELETE_SELF|IN_UNMOUNT))
+ else if (mask & (FS_DELETE_SELF|FS_UNMOUNT|FS_MOVE_SELF))
audit_remove_parent_watches(parent);
- /* inotify does not remove the watch, so remove it manually */
- else if(mask & IN_MOVE_SELF) {
- audit_remove_parent_watches(parent);
- inotify_remove_watch_locked(audit_ih, i_watch);
- } else if (mask & IN_IGNORED)
- put_inotify_watch(i_watch);
+ /* moved put_inotify_watch to freeing mark */
+
+ /* matched the ref taken by audit_find_parent */
+ audit_put_parent(parent);
+
+ return 0;
}
-static const struct inotify_operations audit_inotify_ops = {
- .handle_event = audit_handle_ievent,
- .destroy_watch = audit_free_parent,
+static const struct fsnotify_ops audit_watch_fsnotify_ops = {
+ .should_send_event = audit_watch_should_send_event,
+ .handle_event = audit_watch_handle_event,
+ .free_group_priv = NULL,
+ .freeing_mark = NULL,
+ .free_event_priv = NULL,
};
static int __init audit_watch_init(void)
{
- audit_ih = inotify_init(&audit_inotify_ops);
- if (IS_ERR(audit_ih))
- audit_panic("cannot initialize inotify handle");
+ audit_watch_group = fsnotify_alloc_group(&audit_watch_fsnotify_ops);
+ if (IS_ERR(audit_watch_group)) {
+ audit_watch_group = NULL;
+ audit_panic("cannot create audit fsnotify group");
+ }
return 0;
}
-subsys_initcall(audit_watch_init);
+device_initcall(audit_watch_init);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index a70604047f3c..f5e4cae5ad82 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -70,6 +70,7 @@ static inline void audit_free_rule(struct audit_entry *e)
{
int i;
struct audit_krule *erule = &e->rule;
+
/* some rules don't have associated watches */
if (erule->watch)
audit_put_watch(erule->watch);
@@ -745,8 +746,7 @@ static inline int audit_dupe_lsm_field(struct audit_field *df,
* rule with the new rule in the filterlist, then free the old rule.
* The rlist element is undefined; list manipulations are handled apart from
* the initial copy. */
-struct audit_entry *audit_dupe_rule(struct audit_krule *old,
- struct audit_watch *watch)
+struct audit_entry *audit_dupe_rule(struct audit_krule *old)
{
u32 fcount = old->field_count;
struct audit_entry *entry;
@@ -768,8 +768,8 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old,
new->prio = old->prio;
new->buflen = old->buflen;
new->inode_f = old->inode_f;
- new->watch = NULL;
new->field_count = old->field_count;
+
/*
* note that we are OK with not refcounting here; audit_match_tree()
* never dereferences tree and we can't get false positives there
@@ -810,9 +810,9 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old,
}
}
- if (watch) {
- audit_get_watch(watch);
- new->watch = watch;
+ if (old->watch) {
+ audit_get_watch(old->watch);
+ new->watch = old->watch;
}
return entry;
@@ -865,7 +865,7 @@ static inline int audit_add_rule(struct audit_entry *entry)
struct audit_watch *watch = entry->rule.watch;
struct audit_tree *tree = entry->rule.tree;
struct list_head *list;
- int h, err;
+ int err;
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
@@ -888,15 +888,11 @@ static inline int audit_add_rule(struct audit_entry *entry)
if (watch) {
/* audit_filter_mutex is dropped and re-taken during this call */
- err = audit_add_watch(&entry->rule);
+ err = audit_add_watch(&entry->rule, &list);
if (err) {
mutex_unlock(&audit_filter_mutex);
goto error;
}
- /* entry->rule.watch may have changed during audit_add_watch() */
- watch = entry->rule.watch;
- h = audit_hash_ino((u32)audit_watch_inode(watch));
- list = &audit_inode_hash[h];
}
if (tree) {
err = audit_add_tree_rule(&entry->rule);
@@ -948,7 +944,6 @@ static inline int audit_del_rule(struct audit_entry *entry)
struct audit_watch *watch = entry->rule.watch;
struct audit_tree *tree = entry->rule.tree;
struct list_head *list;
- LIST_HEAD(inotify_list);
int ret = 0;
#ifdef CONFIG_AUDITSYSCALL
int dont_count = 0;
@@ -968,7 +963,7 @@ static inline int audit_del_rule(struct audit_entry *entry)
}
if (e->rule.watch)
- audit_remove_watch_rule(&e->rule, &inotify_list);
+ audit_remove_watch_rule(&e->rule);
if (e->rule.tree)
audit_remove_tree_rule(&e->rule);
@@ -986,9 +981,6 @@ static inline int audit_del_rule(struct audit_entry *entry)
#endif
mutex_unlock(&audit_filter_mutex);
- if (!list_empty(&inotify_list))
- audit_inotify_unregister(&inotify_list);
-
out:
if (watch)
audit_put_watch(watch); /* match initial get */
@@ -1322,30 +1314,23 @@ static int update_lsm_rule(struct audit_krule *r)
{
struct audit_entry *entry = container_of(r, struct audit_entry, rule);
struct audit_entry *nentry;
- struct audit_watch *watch;
- struct audit_tree *tree;
int err = 0;
if (!security_audit_rule_known(r))
return 0;
- watch = r->watch;
- tree = r->tree;
- nentry = audit_dupe_rule(r, watch);
+ nentry = audit_dupe_rule(r);
if (IS_ERR(nentry)) {
/* save the first error encountered for the
* return value */
err = PTR_ERR(nentry);
audit_panic("error updating LSM filters");
- if (watch)
+ if (r->watch)
list_del(&r->rlist);
list_del_rcu(&entry->list);
list_del(&r->list);
} else {
- if (watch) {
- list_add(&nentry->rule.rlist, audit_watch_rules(watch));
- list_del(&r->rlist);
- } else if (tree)
+ if (r->watch || r->tree)
list_replace_init(&r->rlist, &nentry->rule.rlist);
list_replace_rcu(&entry->list, &nentry->list);
list_replace(&r->list, &nentry->rule.list);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index fc0f928167e7..b58fd4bae28f 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -64,7 +64,6 @@
#include <linux/binfmts.h>
#include <linux/highmem.h>
#include <linux/syscalls.h>
-#include <linux/inotify.h>
#include <linux/capability.h>
#include <linux/fs_struct.h>
@@ -548,9 +547,8 @@ static int audit_filter_rules(struct task_struct *tsk,
}
break;
case AUDIT_WATCH:
- if (name && audit_watch_inode(rule->watch) != (unsigned long)-1)
- result = (name->dev == audit_watch_dev(rule->watch) &&
- name->ino == audit_watch_inode(rule->watch));
+ if (name)
+ result = audit_watch_compare(rule->watch, name->ino, name->dev);
break;
case AUDIT_DIR:
if (ctx)
@@ -1725,7 +1723,7 @@ static inline void handle_one(const struct inode *inode)
struct audit_tree_refs *p;
struct audit_chunk *chunk;
int count;
- if (likely(list_empty(&inode->inotify_watches)))
+ if (likely(hlist_empty(&inode->i_fsnotify_marks)))
return;
context = current->audit_context;
p = context->trees;
@@ -1768,7 +1766,7 @@ retry:
seq = read_seqbegin(&rename_lock);
for(;;) {
struct inode *inode = d->d_inode;
- if (inode && unlikely(!list_empty(&inode->inotify_watches))) {
+ if (inode && unlikely(!hlist_empty(&inode->i_fsnotify_marks))) {
struct audit_chunk *chunk;
chunk = audit_tree_lookup(inode);
if (chunk) {
diff --git a/kernel/capability.c b/kernel/capability.c
index 7f876e60521f..9e4697e9b276 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -135,7 +135,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
if (pid && (pid != task_pid_vnr(current))) {
struct task_struct *target;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
target = find_task_by_vpid(pid);
if (!target)
@@ -143,7 +143,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
else
ret = security_capget(target, pEp, pIp, pPp);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
} else
ret = security_capget(current, pEp, pIp, pPp);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 0249f4be9b5c..1fbcc748044a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2468,7 +2468,6 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
/* make sure l doesn't vanish out from under us */
down_write(&l->mutex);
mutex_unlock(&cgrp->pidlist_mutex);
- l->use_count++;
return l;
}
}
diff --git a/kernel/compat.c b/kernel/compat.c
index f6c204f07ea6..c820552d0dc4 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -274,29 +274,50 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
return ret;
}
+static int get_compat_rlimit(struct rlimit *dst,
+ const struct compat_rlimit __user *src)
+{
+ if (!access_ok(VERIFY_READ, src, sizeof(*src)) ||
+ __get_user(dst->rlim_cur, &src->rlim_cur) ||
+ __get_user(dst->rlim_max, &src->rlim_max))
+ return -EFAULT;
+
+ if (dst->rlim_cur == COMPAT_RLIM_INFINITY)
+ dst->rlim_cur = RLIM_INFINITY;
+ if (dst->rlim_max == COMPAT_RLIM_INFINITY)
+ dst->rlim_max = RLIM_INFINITY;
+ return 0;
+}
+
+static int put_compat_rlimit(const struct rlimit *src,
+ struct compat_rlimit __user *dst)
+{
+ struct rlimit r = *src;
+
+ if (r.rlim_cur > COMPAT_RLIM_INFINITY)
+ r.rlim_cur = COMPAT_RLIM_INFINITY;
+ if (r.rlim_max > COMPAT_RLIM_INFINITY)
+ r.rlim_max = COMPAT_RLIM_INFINITY;
+
+ if (!access_ok(VERIFY_WRITE, dst, sizeof(*dst)) ||
+ __put_user(r.rlim_cur, &dst->rlim_cur) ||
+ __put_user(r.rlim_max, &dst->rlim_max))
+ return -EFAULT;
+
+ return 0;
+}
+
asmlinkage long compat_sys_setrlimit(unsigned int resource,
struct compat_rlimit __user *rlim)
{
struct rlimit r;
int ret;
- mm_segment_t old_fs = get_fs ();
- if (resource >= RLIM_NLIMITS)
- return -EINVAL;
-
- if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) ||
- __get_user(r.rlim_cur, &rlim->rlim_cur) ||
- __get_user(r.rlim_max, &rlim->rlim_max))
- return -EFAULT;
+ ret = get_compat_rlimit(&r, rlim);
+ if (ret)
+ return ret;
- if (r.rlim_cur == COMPAT_RLIM_INFINITY)
- r.rlim_cur = RLIM_INFINITY;
- if (r.rlim_max == COMPAT_RLIM_INFINITY)
- r.rlim_max = RLIM_INFINITY;
- set_fs(KERNEL_DS);
- ret = sys_setrlimit(resource, (struct rlimit __user *) &r);
- set_fs(old_fs);
- return ret;
+ return do_setrlimit(current, resource, &r);
}
#ifdef COMPAT_RLIM_OLD_INFINITY
@@ -336,19 +357,42 @@ asmlinkage long compat_sys_getrlimit (unsigned int resource,
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
- ret = sys_getrlimit(resource, (struct rlimit __user *) &r);
+ ret = sys_getrlimit(resource, (struct rlimit __force __user *)&r);
set_fs(old_fs);
- if (!ret) {
- if (r.rlim_cur > COMPAT_RLIM_INFINITY)
- r.rlim_cur = COMPAT_RLIM_INFINITY;
- if (r.rlim_max > COMPAT_RLIM_INFINITY)
- r.rlim_max = COMPAT_RLIM_INFINITY;
+ if (!ret)
+ ret = put_compat_rlimit(&r, rlim);
+ return ret;
+}
- if (!access_ok(VERIFY_WRITE, rlim, sizeof(*rlim)) ||
- __put_user(r.rlim_cur, &rlim->rlim_cur) ||
- __put_user(r.rlim_max, &rlim->rlim_max))
- return -EFAULT;
- }
+asmlinkage long compat_sys_setprlimit(pid_t pid, unsigned int resource,
+ struct compat_rlimit __user *rlim)
+{
+ mm_segment_t old_fs = get_fs();
+ struct rlimit r;
+ int ret;
+
+ ret = get_compat_rlimit(&r, rlim);
+ if (ret)
+ return ret;
+
+ set_fs(KERNEL_DS);
+ ret = sys_setprlimit(pid, resource, (struct rlimit __force __user *)&r);
+ set_fs(old_fs);
+ return ret;
+}
+
+asmlinkage long compat_sys_getprlimit(pid_t pid, unsigned int resource,
+ struct compat_rlimit __user *rlim)
+{
+ mm_segment_t old_fs = get_fs();
+ struct rlimit r;
+ int ret;
+
+ set_fs(KERNEL_DS);
+ ret = sys_getprlimit(pid, resource, (struct rlimit __force __user *)&r);
+ set_fs(old_fs);
+ if (!ret)
+ ret = put_compat_rlimit(&r, rlim);
return ret;
}
diff --git a/kernel/debug/Makefile b/kernel/debug/Makefile
new file mode 100644
index 000000000000..fe342c07b154
--- /dev/null
+++ b/kernel/debug/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the linux kernel debugger
+#
+
+obj-$(CONFIG_KGDB) += debug_core.o gdbstub.o
+obj-$(CONFIG_VT) += kms_hooks.o
+obj-$(CONFIG_KGDB_KDB) += kdb/
+
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
new file mode 100644
index 000000000000..e0ccaf06a659
--- /dev/null
+++ b/kernel/debug/debug_core.c
@@ -0,0 +1,986 @@
+/*
+ * Kernel Debug Core
+ *
+ * Maintainer: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (C) 2000-2001 VERITAS Software Corporation.
+ * Copyright (C) 2002-2004 Timesys Corporation
+ * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
+ * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
+ * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
+ * Copyright (C) 2005-2009 Wind River Systems, Inc.
+ * Copyright (C) 2007 MontaVista Software, Inc.
+ * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *
+ * Contributors at various stages not listed above:
+ * Jason Wessel ( jason.wessel@windriver.com )
+ * George Anzinger <george@mvista.com>
+ * Anurekh Saxena (anurekh.saxena@timesys.com)
+ * Lake Stevens Instrument Division (Glenn Engel)
+ * Jim Kingdon, Cygnus Support.
+ *
+ * Original KGDB stub: David Grothe <dave@gcom.com>,
+ * Tigran Aivazian <tigran@sco.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+#include <linux/pid_namespace.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/console.h>
+#include <linux/threads.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/init.h>
+#include <linux/kgdb.h>
+#include <linux/pid.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/kdb.h>
+
+#include <asm/cacheflush.h>
+#include <asm/byteorder.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+#include "debug_core.h"
+
+static int kgdb_break_asap;
+
+struct debuggerinfo_struct kgdb_info[NR_CPUS];
+
+/**
+ * kgdb_connected - Is a host GDB connected to us?
+ */
+int kgdb_connected;
+EXPORT_SYMBOL_GPL(kgdb_connected);
+
+/* All the KGDB handlers are installed */
+int kgdb_io_module_registered;
+
+/* Guard for recursive entry */
+static int exception_level;
+
+struct kgdb_io *dbg_io_ops;
+static DEFINE_SPINLOCK(kgdb_registration_lock);
+
+/* kgdb console driver is loaded */
+static int kgdb_con_registered;
+/* determine if kgdb console output should be used */
+static int kgdb_use_con;
+/* Next cpu to become the master debug core */
+int dbg_switch_cpu;
+
+/* Use kdb or gdbserver mode */
+int dbg_kdb_mode = 1;
+
+static int __init opt_kgdb_con(char *str)
+{
+ kgdb_use_con = 1;
+ return 0;
+}
+
+early_param("kgdbcon", opt_kgdb_con);
+
+module_param(kgdb_use_con, int, 0644);
+
+/*
+ * Holds information about breakpoints in a kernel. These breakpoints are
+ * added and removed by gdb.
+ */
+static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
+ [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
+};
+
+/*
+ * The CPU# of the active CPU, or -1 if none:
+ */
+atomic_t kgdb_active = ATOMIC_INIT(-1);
+EXPORT_SYMBOL_GPL(kgdb_active);
+
+/*
+ * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
+ * bootup code (which might not have percpu set up yet):
+ */
+static atomic_t passive_cpu_wait[NR_CPUS];
+static atomic_t cpu_in_kgdb[NR_CPUS];
+static atomic_t kgdb_break_tasklet_var;
+atomic_t kgdb_setting_breakpoint;
+
+struct task_struct *kgdb_usethread;
+struct task_struct *kgdb_contthread;
+
+int kgdb_single_step;
+static pid_t kgdb_sstep_pid;
+
+/* to keep track of the CPU which is doing the single stepping*/
+atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
+
+/*
+ * If you are debugging a problem where roundup (the collection of
+ * all other CPUs) is a problem [this should be extremely rare],
+ * then use the nokgdbroundup option to avoid roundup. In that case
+ * the other CPUs might interfere with your debugging context, so
+ * use this with care:
+ */
+static int kgdb_do_roundup = 1;
+
+static int __init opt_nokgdbroundup(char *str)
+{
+ kgdb_do_roundup = 0;
+
+ return 0;
+}
+
+early_param("nokgdbroundup", opt_nokgdbroundup);
+
+/*
+ * Finally, some KGDB code :-)
+ */
+
+/*
+ * Weak aliases for breakpoint management,
+ * can be overriden by architectures when needed:
+ */
+int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
+{
+ int err;
+
+ err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+
+ return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+}
+
+int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
+{
+ return probe_kernel_write((char *)addr,
+ (char *)bundle, BREAK_INSTR_SIZE);
+}
+
+int __weak kgdb_validate_break_address(unsigned long addr)
+{
+ char tmp_variable[BREAK_INSTR_SIZE];
+ int err;
+ /* Validate setting the breakpoint and then removing it. In the
+ * remove fails, the kernel needs to emit a bad message because we
+ * are deep trouble not being able to put things back the way we
+ * found them.
+ */
+ err = kgdb_arch_set_breakpoint(addr, tmp_variable);
+ if (err)
+ return err;
+ err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
+ if (err)
+ printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
+ "memory destroyed at: %lx", addr);
+ return err;
+}
+
+unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int __weak kgdb_arch_init(void)
+{
+ return 0;
+}
+
+int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
+{
+ return 0;
+}
+
+/**
+ * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
+ * @regs: Current &struct pt_regs.
+ *
+ * This function will be called if the particular architecture must
+ * disable hardware debugging while it is processing gdb packets or
+ * handling exception.
+ */
+void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
+{
+}
+
+/*
+ * Some architectures need cache flushes when we set/clear a
+ * breakpoint:
+ */
+static void kgdb_flush_swbreak_addr(unsigned long addr)
+{
+ if (!CACHE_FLUSH_IS_SAFE)
+ return;
+
+ if (current->mm && current->mm->mmap_cache) {
+ flush_cache_range(current->mm->mmap_cache,
+ addr, addr + BREAK_INSTR_SIZE);
+ }
+ /* Force flush instruction cache if it was outside the mm */
+ flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+}
+
+/*
+ * SW breakpoint management:
+ */
+int dbg_activate_sw_breakpoints(void)
+{
+ unsigned long addr;
+ int error;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state != BP_SET)
+ continue;
+
+ addr = kgdb_break[i].bpt_addr;
+ error = kgdb_arch_set_breakpoint(addr,
+ kgdb_break[i].saved_instr);
+ if (error) {
+ ret = error;
+ printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
+ continue;
+ }
+
+ kgdb_flush_swbreak_addr(addr);
+ kgdb_break[i].state = BP_ACTIVE;
+ }
+ return ret;
+}
+
+int dbg_set_sw_break(unsigned long addr)
+{
+ int err = kgdb_validate_break_address(addr);
+ int breakno = -1;
+ int i;
+
+ if (err)
+ return err;
+
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if ((kgdb_break[i].state == BP_SET) &&
+ (kgdb_break[i].bpt_addr == addr))
+ return -EEXIST;
+ }
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state == BP_REMOVED &&
+ kgdb_break[i].bpt_addr == addr) {
+ breakno = i;
+ break;
+ }
+ }
+
+ if (breakno == -1) {
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state == BP_UNDEFINED) {
+ breakno = i;
+ break;
+ }
+ }
+ }
+
+ if (breakno == -1)
+ return -E2BIG;
+
+ kgdb_break[breakno].state = BP_SET;
+ kgdb_break[breakno].type = BP_BREAKPOINT;
+ kgdb_break[breakno].bpt_addr = addr;
+
+ return 0;
+}
+
+int dbg_deactivate_sw_breakpoints(void)
+{
+ unsigned long addr;
+ int error;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state != BP_ACTIVE)
+ continue;
+ addr = kgdb_break[i].bpt_addr;
+ error = kgdb_arch_remove_breakpoint(addr,
+ kgdb_break[i].saved_instr);
+ if (error) {
+ printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
+ ret = error;
+ }
+
+ kgdb_flush_swbreak_addr(addr);
+ kgdb_break[i].state = BP_SET;
+ }
+ return ret;
+}
+
+int dbg_remove_sw_break(unsigned long addr)
+{
+ int i;
+
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if ((kgdb_break[i].state == BP_SET) &&
+ (kgdb_break[i].bpt_addr == addr)) {
+ kgdb_break[i].state = BP_REMOVED;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+int kgdb_isremovedbreak(unsigned long addr)
+{
+ int i;
+
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if ((kgdb_break[i].state == BP_REMOVED) &&
+ (kgdb_break[i].bpt_addr == addr))
+ return 1;
+ }
+ return 0;
+}
+
+int dbg_remove_all_break(void)
+{
+ unsigned long addr;
+ int error;
+ int i;
+
+ /* Clear memory breakpoints. */
+ for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
+ if (kgdb_break[i].state != BP_ACTIVE)
+ goto setundefined;
+ addr = kgdb_break[i].bpt_addr;
+ error = kgdb_arch_remove_breakpoint(addr,
+ kgdb_break[i].saved_instr);
+ if (error)
+ printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+ addr);
+setundefined:
+ kgdb_break[i].state = BP_UNDEFINED;
+ }
+
+ /* Clear hardware breakpoints. */
+ if (arch_kgdb_ops.remove_all_hw_break)
+ arch_kgdb_ops.remove_all_hw_break();
+
+ return 0;
+}
+
+/*
+ * Return true if there is a valid kgdb I/O module. Also if no
+ * debugger is attached a message can be printed to the console about
+ * waiting for the debugger to attach.
+ *
+ * The print_wait argument is only to be true when called from inside
+ * the core kgdb_handle_exception, because it will wait for the
+ * debugger to attach.
+ */
+static int kgdb_io_ready(int print_wait)
+{
+ if (!dbg_io_ops)
+ return 0;
+ if (kgdb_connected)
+ return 1;
+ if (atomic_read(&kgdb_setting_breakpoint))
+ return 1;
+ if (print_wait) {
+#ifdef CONFIG_KGDB_KDB
+ if (!dbg_kdb_mode)
+ printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
+#else
+ printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
+#endif
+ }
+ return 1;
+}
+
+static int kgdb_reenter_check(struct kgdb_state *ks)
+{
+ unsigned long addr;
+
+ if (atomic_read(&kgdb_active) != raw_smp_processor_id())
+ return 0;
+
+ /* Panic on recursive debugger calls: */
+ exception_level++;
+ addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
+ dbg_deactivate_sw_breakpoints();
+
+ /*
+ * If the break point removed ok at the place exception
+ * occurred, try to recover and print a warning to the end
+ * user because the user planted a breakpoint in a place that
+ * KGDB needs in order to function.
+ */
+ if (dbg_remove_sw_break(addr) == 0) {
+ exception_level = 0;
+ kgdb_skipexception(ks->ex_vector, ks->linux_regs);
+ dbg_activate_sw_breakpoints();
+ printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
+ addr);
+ WARN_ON_ONCE(1);
+
+ return 1;
+ }
+ dbg_remove_all_break();
+ kgdb_skipexception(ks->ex_vector, ks->linux_regs);
+
+ if (exception_level > 1) {
+ dump_stack();
+ panic("Recursive entry to debugger");
+ }
+
+ printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
+#ifdef CONFIG_KGDB_KDB
+ /* Allow kdb to debug itself one level */
+ return 0;
+#endif
+ dump_stack();
+ panic("Recursive entry to debugger");
+
+ return 1;
+}
+
+static void dbg_cpu_switch(int cpu, int next_cpu)
+{
+ /* Mark the cpu we are switching away from as a slave when it
+ * holds the kgdb_active token. This must be done so that the
+ * that all the cpus wait in for the debug core will not enter
+ * again as the master. */
+ if (cpu == atomic_read(&kgdb_active)) {
+ kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
+ kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER;
+ }
+ kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER;
+}
+
+static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
+{
+ unsigned long flags;
+ int sstep_tries = 100;
+ int error;
+ int i, cpu;
+ int trace_on = 0;
+acquirelock:
+ /*
+ * Interrupts will be restored by the 'trap return' code, except when
+ * single stepping.
+ */
+ local_irq_save(flags);
+
+ cpu = ks->cpu;
+ kgdb_info[cpu].debuggerinfo = regs;
+ kgdb_info[cpu].task = current;
+ kgdb_info[cpu].ret_state = 0;
+ kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
+ /*
+ * Make sure the above info reaches the primary CPU before
+ * our cpu_in_kgdb[] flag setting does:
+ */
+ smp_wmb();
+ atomic_set(&cpu_in_kgdb[cpu], 1);
+
+ if (exception_level == 1)
+ goto cpu_master_loop;
+
+ /*
+ * CPU will loop if it is a slave or request to become a kgdb
+ * master cpu and acquire the kgdb_active lock:
+ */
+ while (1) {
+cpu_loop:
+ if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
+ kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
+ goto cpu_master_loop;
+ } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
+ if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
+ break;
+ } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
+ if (!atomic_read(&passive_cpu_wait[cpu]))
+ goto return_normal;
+ } else {
+return_normal:
+ /* Return to normal operation by executing any
+ * hw breakpoint fixup.
+ */
+ if (arch_kgdb_ops.correct_hw_break)
+ arch_kgdb_ops.correct_hw_break();
+ if (trace_on)
+ tracing_on();
+ atomic_set(&cpu_in_kgdb[cpu], 0);
+ touch_softlockup_watchdog_sync();
+ clocksource_touch_watchdog();
+ local_irq_restore(flags);
+ return 0;
+ }
+ cpu_relax();
+ }
+
+ /*
+ * For single stepping, try to only enter on the processor
+ * that was single stepping. To gaurd against a deadlock, the
+ * kernel will only try for the value of sstep_tries before
+ * giving up and continuing on.
+ */
+ if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
+ (kgdb_info[cpu].task &&
+ kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
+ atomic_set(&kgdb_active, -1);
+ touch_softlockup_watchdog_sync();
+ clocksource_touch_watchdog();
+ local_irq_restore(flags);
+
+ goto acquirelock;
+ }
+
+ if (!kgdb_io_ready(1)) {
+ kgdb_info[cpu].ret_state = 1;
+ goto kgdb_restore; /* No I/O connection, resume the system */
+ }
+
+ /*
+ * Don't enter if we have hit a removed breakpoint.
+ */
+ if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
+ goto kgdb_restore;
+
+ /* Call the I/O driver's pre_exception routine */
+ if (dbg_io_ops->pre_exception)
+ dbg_io_ops->pre_exception();
+
+ kgdb_disable_hw_debug(ks->linux_regs);
+
+ /*
+ * Get the passive CPU lock which will hold all the non-primary
+ * CPU in a spin state while the debugger is active
+ */
+ if (!kgdb_single_step) {
+ for (i = 0; i < NR_CPUS; i++)
+ atomic_set(&passive_cpu_wait[i], 1);
+ }
+
+#ifdef CONFIG_SMP
+ /* Signal the other CPUs to enter kgdb_wait() */
+ if ((!kgdb_single_step) && kgdb_do_roundup)
+ kgdb_roundup_cpus(flags);
+#endif
+
+ /*
+ * Wait for the other CPUs to be notified and be waiting for us:
+ */
+ for_each_online_cpu(i) {
+ while (!atomic_read(&cpu_in_kgdb[i]))
+ cpu_relax();
+ }
+
+ /*
+ * At this point the primary processor is completely
+ * in the debugger and all secondary CPUs are quiescent
+ */
+ dbg_deactivate_sw_breakpoints();
+ kgdb_single_step = 0;
+ kgdb_contthread = current;
+ exception_level = 0;
+ trace_on = tracing_is_on();
+ if (trace_on)
+ tracing_off();
+
+ while (1) {
+cpu_master_loop:
+ if (dbg_kdb_mode) {
+ error = kdb_stub(ks);
+ } else {
+ /* Talk to debugger with gdbserial protocol */
+ error = gdb_serial_stub(ks);
+ }
+ if (error == DBG_PASS_EVENT) {
+ dbg_kdb_mode = !dbg_kdb_mode;
+ } else if (error == DBG_SWITCH_CPU_EVENT) {
+ dbg_cpu_switch(cpu, dbg_switch_cpu);
+ goto cpu_loop;
+ } else {
+ kgdb_info[cpu].ret_state = error;
+ break;
+ }
+ }
+
+ /* Call the I/O driver's post_exception routine */
+ if (dbg_io_ops->post_exception)
+ dbg_io_ops->post_exception();
+
+ atomic_set(&cpu_in_kgdb[ks->cpu], 0);
+
+ if (!kgdb_single_step) {
+ for (i = NR_CPUS-1; i >= 0; i--)
+ atomic_set(&passive_cpu_wait[i], 0);
+ /*
+ * Wait till all the CPUs have quit from the debugger,
+ * but allow a CPU that hit an exception and is
+ * waiting to become the master to remain in the debug
+ * core.
+ */
+ for_each_online_cpu(i) {
+ while (atomic_read(&cpu_in_kgdb[i]) &&
+ !(kgdb_info[i].exception_state &
+ DCPU_WANT_MASTER))
+ cpu_relax();
+ }
+ }
+
+kgdb_restore:
+ if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
+ int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
+ if (kgdb_info[sstep_cpu].task)
+ kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
+ else
+ kgdb_sstep_pid = 0;
+ }
+ if (trace_on)
+ tracing_on();
+ /* Free kgdb_active */
+ atomic_set(&kgdb_active, -1);
+ touch_softlockup_watchdog_sync();
+ clocksource_touch_watchdog();
+ local_irq_restore(flags);
+
+ return kgdb_info[cpu].ret_state;
+}
+
+/*
+ * kgdb_handle_exception() - main entry point from a kernel exception
+ *
+ * Locking hierarchy:
+ * interface locks, if any (begin_session)
+ * kgdb lock (kgdb_active)
+ */
+int
+kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
+{
+ struct kgdb_state kgdb_var;
+ struct kgdb_state *ks = &kgdb_var;
+ int ret;
+
+ ks->cpu = raw_smp_processor_id();
+ ks->ex_vector = evector;
+ ks->signo = signo;
+ ks->err_code = ecode;
+ ks->kgdb_usethreadid = 0;
+ ks->linux_regs = regs;
+
+ if (kgdb_reenter_check(ks))
+ return 0; /* Ouch, double exception ! */
+ kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
+ ret = kgdb_cpu_enter(ks, regs);
+ kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER |
+ DCPU_IS_SLAVE);
+ return ret;
+}
+
+int kgdb_nmicallback(int cpu, void *regs)
+{
+#ifdef CONFIG_SMP
+ struct kgdb_state kgdb_var;
+ struct kgdb_state *ks = &kgdb_var;
+
+ memset(ks, 0, sizeof(struct kgdb_state));
+ ks->cpu = cpu;
+ ks->linux_regs = regs;
+
+ if (!atomic_read(&cpu_in_kgdb[cpu]) &&
+ atomic_read(&kgdb_active) != -1 &&
+ atomic_read(&kgdb_active) != cpu) {
+ kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
+ kgdb_cpu_enter(ks, regs);
+ kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
+ return 0;
+ }
+#endif
+ return 1;
+}
+
+static void kgdb_console_write(struct console *co, const char *s,
+ unsigned count)
+{
+ unsigned long flags;
+
+ /* If we're debugging, or KGDB has not connected, don't try
+ * and print. */
+ if (!kgdb_connected || atomic_read(&kgdb_active) != -1)
+ return;
+
+ local_irq_save(flags);
+ gdbstub_msg_write(s, count);
+ local_irq_restore(flags);
+}
+
+static struct console kgdbcons = {
+ .name = "kgdb",
+ .write = kgdb_console_write,
+ .flags = CON_PRINTBUFFER | CON_ENABLED,
+ .index = -1,
+};
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void sysrq_handle_dbg(int key, struct tty_struct *tty)
+{
+ if (!dbg_io_ops) {
+ printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
+ return;
+ }
+ if (!kgdb_connected) {
+#ifdef CONFIG_KGDB_KDB
+ if (!dbg_kdb_mode)
+ printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
+#else
+ printk(KERN_CRIT "Entering KGDB\n");
+#endif
+ }
+
+ kgdb_breakpoint();
+}
+
+static struct sysrq_key_op sysrq_dbg_op = {
+ .handler = sysrq_handle_dbg,
+ .help_msg = "debug(G)",
+ .action_msg = "DEBUG",
+};
+#endif
+
+static int kgdb_panic_event(struct notifier_block *self,
+ unsigned long val,
+ void *data)
+{
+ if (dbg_kdb_mode)
+ kdb_printf("PANIC: %s\n", (char *)data);
+ kgdb_breakpoint();
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kgdb_panic_event_nb = {
+ .notifier_call = kgdb_panic_event,
+};
+
+static void kgdb_register_callbacks(void)
+{
+ if (!kgdb_io_module_registered) {
+ kgdb_io_module_registered = 1;
+ kgdb_arch_init();
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kgdb_panic_event_nb);
+#ifdef CONFIG_MAGIC_SYSRQ
+ register_sysrq_key('g', &sysrq_dbg_op);
+#endif
+ if (kgdb_use_con && !kgdb_con_registered) {
+ register_console(&kgdbcons);
+ kgdb_con_registered = 1;
+ }
+ }
+}
+
+struct dbg_kms_console_ops *dbg_kms_console_core;
+EXPORT_SYMBOL_GPL(dbg_kms_console_core);
+
+int dbg_kms_console_ops_register(struct dbg_kms_console_ops *ops)
+{
+ if (dbg_kms_console_core) {
+ printk(KERN_ERR "dbg_core: KMS ops already in use\n");
+ return -1;
+ }
+ dbg_kms_console_core = ops;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dbg_kms_console_ops_register);
+
+int dbg_kms_console_ops_unregister(struct dbg_kms_console_ops *ops)
+{
+ if (dbg_kms_console_core != ops)
+ printk(KERN_ERR "dbg_core: KMS ops do not match\n");
+ dbg_kms_console_core = NULL;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dbg_kms_console_ops_unregister);
+
+static void kgdb_unregister_callbacks(void)
+{
+ /*
+ * When this routine is called KGDB should unregister from the
+ * panic handler and clean up, making sure it is not handling any
+ * break exceptions at the time.
+ */
+ if (kgdb_io_module_registered) {
+ kgdb_io_module_registered = 0;
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &kgdb_panic_event_nb);
+ kgdb_arch_exit();
+#ifdef CONFIG_MAGIC_SYSRQ
+ unregister_sysrq_key('g', &sysrq_dbg_op);
+#endif
+ if (kgdb_con_registered) {
+ unregister_console(&kgdbcons);
+ kgdb_con_registered = 0;
+ }
+ }
+}
+
+/*
+ * There are times a tasklet needs to be used vs a compiled in
+ * break point so as to cause an exception outside a kgdb I/O module,
+ * such as is the case with kgdboe, where calling a breakpoint in the
+ * I/O driver itself would be fatal.
+ */
+static void kgdb_tasklet_bpt(unsigned long ing)
+{
+ kgdb_breakpoint();
+ atomic_set(&kgdb_break_tasklet_var, 0);
+}
+
+static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
+
+void kgdb_schedule_breakpoint(void)
+{
+ if (atomic_read(&kgdb_break_tasklet_var) ||
+ atomic_read(&kgdb_active) != -1 ||
+ atomic_read(&kgdb_setting_breakpoint))
+ return;
+ atomic_inc(&kgdb_break_tasklet_var);
+ tasklet_schedule(&kgdb_tasklet_breakpoint);
+}
+EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
+
+static void kgdb_initial_breakpoint(void)
+{
+ kgdb_break_asap = 0;
+
+ printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
+ kgdb_breakpoint();
+}
+
+/**
+ * kgdb_register_io_module - register KGDB IO module
+ * @new_dbg_io_ops: the io ops vector
+ *
+ * Register it with the KGDB core.
+ */
+int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
+{
+ int err;
+
+ spin_lock(&kgdb_registration_lock);
+
+ if (dbg_io_ops) {
+ spin_unlock(&kgdb_registration_lock);
+
+ printk(KERN_ERR "kgdb: Another I/O driver is already "
+ "registered with KGDB.\n");
+ return -EBUSY;
+ }
+
+ if (new_dbg_io_ops->init) {
+ err = new_dbg_io_ops->init();
+ if (err) {
+ spin_unlock(&kgdb_registration_lock);
+ return err;
+ }
+ }
+
+ dbg_io_ops = new_dbg_io_ops;
+
+ spin_unlock(&kgdb_registration_lock);
+
+ printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
+ new_dbg_io_ops->name);
+
+ /* Arm KGDB now. */
+ kgdb_register_callbacks();
+
+ if (kgdb_break_asap)
+ kgdb_initial_breakpoint();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kgdb_register_io_module);
+
+/**
+ * kkgdb_unregister_io_module - unregister KGDB IO module
+ * @old_dbg_io_ops: the io ops vector
+ *
+ * Unregister it with the KGDB core.
+ */
+void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
+{
+ BUG_ON(kgdb_connected);
+
+ /*
+ * KGDB is no longer able to communicate out, so
+ * unregister our callbacks and reset state.
+ */
+ kgdb_unregister_callbacks();
+
+ spin_lock(&kgdb_registration_lock);
+
+ WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
+ dbg_io_ops = NULL;
+
+ spin_unlock(&kgdb_registration_lock);
+
+ printk(KERN_INFO
+ "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
+ old_dbg_io_ops->name);
+}
+EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
+
+int dbg_io_get_char(void)
+{
+ int ret = dbg_io_ops->read_char();
+ if (ret == NO_POLL_CHAR)
+ return -1;
+ if (!dbg_kdb_mode)
+ return ret;
+ if (ret == 127)
+ return 8;
+ return ret;
+}
+
+/**
+ * kgdb_breakpoint - generate breakpoint exception
+ *
+ * This function will generate a breakpoint exception. It is used at the
+ * beginning of a program to sync up with a debugger and can be used
+ * otherwise as a quick means to stop program execution and "break" into
+ * the debugger.
+ */
+void kgdb_breakpoint(void)
+{
+ atomic_set(&kgdb_setting_breakpoint, 1);
+ wmb(); /* Sync point before breakpoint */
+ arch_kgdb_breakpoint();
+ wmb(); /* Sync point after breakpoint */
+ atomic_set(&kgdb_setting_breakpoint, 0);
+}
+EXPORT_SYMBOL_GPL(kgdb_breakpoint);
+
+static int __init opt_kgdb_wait(char *str)
+{
+ kgdb_break_asap = 1;
+
+ if (kgdb_io_module_registered)
+ kgdb_initial_breakpoint();
+
+ return 0;
+}
+
+early_param("kgdbwait", opt_kgdb_wait);
diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h
new file mode 100644
index 000000000000..7d374c0d59fb
--- /dev/null
+++ b/kernel/debug/debug_core.h
@@ -0,0 +1,82 @@
+/*
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _DEBUG_CORE_H_
+#define _DEBUG_CORE_H_
+/*
+ * These are the private implementation headers between the kernel
+ * debugger core and the debugger front end code.
+ */
+
+/* kernel debug core data structures */
+struct kgdb_state {
+ int ex_vector;
+ int signo;
+ int err_code;
+ int cpu;
+ int pass_exception;
+ unsigned long thr_query;
+ unsigned long threadid;
+ long kgdb_usethreadid;
+ struct pt_regs *linux_regs;
+};
+
+/* Exception state values */
+#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
+#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
+#define DCPU_IS_SLAVE 0x4 /* Slave cpu enter exception */
+#define DCPU_SSTEP 0x8 /* CPU is single stepping */
+
+struct debuggerinfo_struct {
+ void *debuggerinfo;
+ struct task_struct *task;
+ int exception_state;
+ int ret_state;
+ int irq_depth;
+};
+
+extern struct debuggerinfo_struct kgdb_info[];
+extern struct kgdb_io *dbg_io_ops;
+
+/* kernel debug core break point routines */
+extern int dbg_remove_all_break(void);
+extern int dbg_set_sw_break(unsigned long addr);
+extern int dbg_remove_sw_break(unsigned long addr);
+extern int dbg_activate_sw_breakpoints(void);
+extern int dbg_deactivate_sw_breakpoints(void);
+
+/* polled character access to i/o module */
+extern int dbg_io_get_char(void);
+
+/* stub return value for switching between the gdbstub and kdb */
+#define DBG_PASS_EVENT -12345
+/* Switch from one cpu to another */
+#define DBG_SWITCH_CPU_EVENT -123456
+extern int dbg_switch_cpu;
+
+/* gdbstub interface functions */
+extern int gdb_serial_stub(struct kgdb_state *ks);
+extern void gdbstub_msg_write(const char *s, int len);
+
+/* gdbstub functions used for kdb <-> gdbstub transition */
+extern int gdbstub_state(struct kgdb_state *ks, char *cmd);
+extern int dbg_kdb_mode;
+
+#ifdef CONFIG_KGDB_KDB
+extern int kdb_stub(struct kgdb_state *ks);
+extern int kdb_parse(const char *cmdstr);
+#else /* ! CONFIG_KGDB_KDB */
+static inline int kdb_stub(struct kgdb_state *ks)
+{
+ return DBG_PASS_EVENT;
+}
+#endif /* CONFIG_KGDB_KDB */
+
+#endif /* _DEBUG_CORE_H_ */
diff --git a/kernel/kgdb.c b/kernel/debug/gdbstub.c
index 2eb517e23514..576ff07718ea 100644
--- a/kernel/kgdb.c
+++ b/kernel/debug/gdbstub.c
@@ -1,5 +1,5 @@
/*
- * KGDB stub.
+ * Kernel Debug Core
*
* Maintainer: Jason Wessel <jason.wessel@windriver.com>
*
@@ -9,7 +9,7 @@
* Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
* Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
* Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
- * Copyright (C) 2005-2008 Wind River Systems, Inc.
+ * Copyright (C) 2005-2009 Wind River Systems, Inc.
* Copyright (C) 2007 MontaVista Software, Inc.
* Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
@@ -27,109 +27,17 @@
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
-#include <linux/pid_namespace.h>
-#include <linux/clocksource.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/console.h>
-#include <linux/threads.h>
-#include <linux/uaccess.h>
+
#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/ptrace.h>
-#include <linux/reboot.h>
-#include <linux/string.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/sysrq.h>
-#include <linux/init.h>
#include <linux/kgdb.h>
-#include <linux/pid.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-
+#include <linux/kdb.h>
+#include <linux/reboot.h>
+#include <linux/uaccess.h>
#include <asm/cacheflush.h>
-#include <asm/byteorder.h>
-#include <asm/atomic.h>
-#include <asm/system.h>
#include <asm/unaligned.h>
-
-static int kgdb_break_asap;
+#include "debug_core.h"
#define KGDB_MAX_THREAD_QUERY 17
-struct kgdb_state {
- int ex_vector;
- int signo;
- int err_code;
- int cpu;
- int pass_exception;
- unsigned long thr_query;
- unsigned long threadid;
- long kgdb_usethreadid;
- struct pt_regs *linux_regs;
-};
-
-static struct debuggerinfo_struct {
- void *debuggerinfo;
- struct task_struct *task;
-} kgdb_info[NR_CPUS];
-
-/**
- * kgdb_connected - Is a host GDB connected to us?
- */
-int kgdb_connected;
-EXPORT_SYMBOL_GPL(kgdb_connected);
-
-/* All the KGDB handlers are installed */
-static int kgdb_io_module_registered;
-
-/* Guard for recursive entry */
-static int exception_level;
-
-static struct kgdb_io *kgdb_io_ops;
-static DEFINE_SPINLOCK(kgdb_registration_lock);
-
-/* kgdb console driver is loaded */
-static int kgdb_con_registered;
-/* determine if kgdb console output should be used */
-static int kgdb_use_con;
-
-static int __init opt_kgdb_con(char *str)
-{
- kgdb_use_con = 1;
- return 0;
-}
-
-early_param("kgdbcon", opt_kgdb_con);
-
-module_param(kgdb_use_con, int, 0644);
-
-/*
- * Holds information about breakpoints in a kernel. These breakpoints are
- * added and removed by gdb.
- */
-static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
- [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
-};
-
-/*
- * The CPU# of the active CPU, or -1 if none:
- */
-atomic_t kgdb_active = ATOMIC_INIT(-1);
-
-/*
- * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
- * bootup code (which might not have percpu set up yet):
- */
-static atomic_t passive_cpu_wait[NR_CPUS];
-static atomic_t cpu_in_kgdb[NR_CPUS];
-atomic_t kgdb_setting_breakpoint;
-
-struct task_struct *kgdb_usethread;
-struct task_struct *kgdb_contthread;
-
-int kgdb_single_step;
-pid_t kgdb_sstep_pid;
/* Our I/O buffers. */
static char remcom_in_buffer[BUFMAX];
@@ -140,105 +48,6 @@ static unsigned long gdb_regs[(NUMREGBYTES +
sizeof(unsigned long) - 1) /
sizeof(unsigned long)];
-/* to keep track of the CPU which is doing the single stepping*/
-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
-
-/*
- * If you are debugging a problem where roundup (the collection of
- * all other CPUs) is a problem [this should be extremely rare],
- * then use the nokgdbroundup option to avoid roundup. In that case
- * the other CPUs might interfere with your debugging context, so
- * use this with care:
- */
-static int kgdb_do_roundup = 1;
-
-static int __init opt_nokgdbroundup(char *str)
-{
- kgdb_do_roundup = 0;
-
- return 0;
-}
-
-early_param("nokgdbroundup", opt_nokgdbroundup);
-
-/*
- * Finally, some KGDB code :-)
- */
-
-/*
- * Weak aliases for breakpoint management,
- * can be overriden by architectures when needed:
- */
-int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
-{
- int err;
-
- err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
- if (err)
- return err;
-
- return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE);
-}
-
-int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
-{
- return probe_kernel_write((char *)addr,
- (char *)bundle, BREAK_INSTR_SIZE);
-}
-
-int __weak kgdb_validate_break_address(unsigned long addr)
-{
- char tmp_variable[BREAK_INSTR_SIZE];
- int err;
- /* Validate setting the breakpoint and then removing it. In the
- * remove fails, the kernel needs to emit a bad message because we
- * are deep trouble not being able to put things back the way we
- * found them.
- */
- err = kgdb_arch_set_breakpoint(addr, tmp_variable);
- if (err)
- return err;
- err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
- if (err)
- printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
- "memory destroyed at: %lx", addr);
- return err;
-}
-
-unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
-{
- return instruction_pointer(regs);
-}
-
-int __weak kgdb_arch_init(void)
-{
- return 0;
-}
-
-int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
-{
- return 0;
-}
-
-void __weak
-kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
-{
- return;
-}
-
-/**
- * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
- * @regs: Current &struct pt_regs.
- *
- * This function will be called if the particular architecture must
- * disable hardware debugging while it is processing gdb packets or
- * handling exception.
- */
-void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
-{
-}
-
/*
* GDB remote protocol parser:
*/
@@ -254,6 +63,30 @@ static int hex(char ch)
return -1;
}
+#ifdef CONFIG_KGDB_KDB
+static int gdbstub_read_wait(void)
+{
+ int ret = -1;
+ int i;
+
+ /* poll any additional I/O interfaces that are defined */
+ while (ret < 0)
+ for (i = 0; kdb_poll_funcs[i] != NULL; i++) {
+ ret = kdb_poll_funcs[i]();
+ if (ret > 0)
+ break;
+ }
+ return ret;
+}
+#else
+static int gdbstub_read_wait(void)
+{
+ int ret = dbg_io_ops->read_char();
+ while (ret == NO_POLL_CHAR)
+ ret = dbg_io_ops->read_char();
+ return ret;
+}
+#endif
/* scan for the sequence $<data>#<checksum> */
static void get_packet(char *buffer)
{
@@ -267,7 +100,7 @@ static void get_packet(char *buffer)
* Spin and wait around for the start character, ignore all
* other characters:
*/
- while ((ch = (kgdb_io_ops->read_char())) != '$')
+ while ((ch = (gdbstub_read_wait())) != '$')
/* nothing */;
kgdb_connected = 1;
@@ -280,7 +113,7 @@ static void get_packet(char *buffer)
* now, read until a # or end of buffer is found:
*/
while (count < (BUFMAX - 1)) {
- ch = kgdb_io_ops->read_char();
+ ch = gdbstub_read_wait();
if (ch == '#')
break;
checksum = checksum + ch;
@@ -290,17 +123,17 @@ static void get_packet(char *buffer)
buffer[count] = 0;
if (ch == '#') {
- xmitcsum = hex(kgdb_io_ops->read_char()) << 4;
- xmitcsum += hex(kgdb_io_ops->read_char());
+ xmitcsum = hex(gdbstub_read_wait()) << 4;
+ xmitcsum += hex(gdbstub_read_wait());
if (checksum != xmitcsum)
/* failed checksum */
- kgdb_io_ops->write_char('-');
+ dbg_io_ops->write_char('-');
else
/* successful transfer */
- kgdb_io_ops->write_char('+');
- if (kgdb_io_ops->flush)
- kgdb_io_ops->flush();
+ dbg_io_ops->write_char('+');
+ if (dbg_io_ops->flush)
+ dbg_io_ops->flush();
}
} while (checksum != xmitcsum);
}
@@ -319,27 +152,27 @@ static void put_packet(char *buffer)
* $<packet info>#<checksum>.
*/
while (1) {
- kgdb_io_ops->write_char('$');
+ dbg_io_ops->write_char('$');
checksum = 0;
count = 0;
while ((ch = buffer[count])) {
- kgdb_io_ops->write_char(ch);
+ dbg_io_ops->write_char(ch);
checksum += ch;
count++;
}
- kgdb_io_ops->write_char('#');
- kgdb_io_ops->write_char(hex_asc_hi(checksum));
- kgdb_io_ops->write_char(hex_asc_lo(checksum));
- if (kgdb_io_ops->flush)
- kgdb_io_ops->flush();
+ dbg_io_ops->write_char('#');
+ dbg_io_ops->write_char(hex_asc_hi(checksum));
+ dbg_io_ops->write_char(hex_asc_lo(checksum));
+ if (dbg_io_ops->flush)
+ dbg_io_ops->flush();
/* Now see what we get in reply. */
- ch = kgdb_io_ops->read_char();
+ ch = gdbstub_read_wait();
if (ch == 3)
- ch = kgdb_io_ops->read_char();
+ ch = gdbstub_read_wait();
/* If we get an ACK, we are done. */
if (ch == '+')
@@ -352,17 +185,56 @@ static void put_packet(char *buffer)
* packet.
*/
if (ch == '$') {
- kgdb_io_ops->write_char('-');
- if (kgdb_io_ops->flush)
- kgdb_io_ops->flush();
+ dbg_io_ops->write_char('-');
+ if (dbg_io_ops->flush)
+ dbg_io_ops->flush();
return;
}
}
}
+static char gdbmsgbuf[BUFMAX + 1];
+
+void gdbstub_msg_write(const char *s, int len)
+{
+ char *bufptr;
+ int wcount;
+ int i;
+
+ if (len == 0)
+ len = strlen(s);
+
+ /* 'O'utput */
+ gdbmsgbuf[0] = 'O';
+
+ /* Fill and send buffers... */
+ while (len > 0) {
+ bufptr = gdbmsgbuf + 1;
+
+ /* Calculate how many this time */
+ if ((len << 1) > (BUFMAX - 2))
+ wcount = (BUFMAX - 2) >> 1;
+ else
+ wcount = len;
+
+ /* Pack in hex chars */
+ for (i = 0; i < wcount; i++)
+ bufptr = pack_hex_byte(bufptr, s[i]);
+ *bufptr = '\0';
+
+ /* Move up */
+ s += wcount;
+ len -= wcount;
+
+ /* Write packet */
+ put_packet(gdbmsgbuf);
+ }
+}
+
/*
- * Convert the memory pointed to by mem into hex, placing result in buf.
- * Return a pointer to the last char put in buf (null). May return an error.
+ * Convert the memory pointed to by mem into hex, placing result in
+ * buf. Return a pointer to the last char put in buf (null). May
+ * return an error.
*/
int kgdb_mem2hex(char *mem, char *buf, int count)
{
@@ -390,34 +262,9 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
}
/*
- * Copy the binary array pointed to by buf into mem. Fix $, #, and
- * 0x7d escaped with 0x7d. Return a pointer to the character after
- * the last byte written.
- */
-static int kgdb_ebin2mem(char *buf, char *mem, int count)
-{
- int err = 0;
- char c;
-
- while (count-- > 0) {
- c = *buf++;
- if (c == 0x7d)
- c = *buf++ ^ 0x20;
-
- err = probe_kernel_write(mem, &c, 1);
- if (err)
- break;
-
- mem++;
- }
-
- return err;
-}
-
-/*
- * Convert the hex array pointed to by buf into binary to be placed in mem.
- * Return a pointer to the character AFTER the last byte written.
- * May return an error.
+ * Convert the hex array pointed to by buf into binary to be placed in
+ * mem. Return a pointer to the character AFTER the last byte
+ * written. May return an error.
*/
int kgdb_hex2mem(char *buf, char *mem, int count)
{
@@ -472,6 +319,31 @@ int kgdb_hex2long(char **ptr, unsigned long *long_val)
return num;
}
+/*
+ * Copy the binary array pointed to by buf into mem. Fix $, #, and
+ * 0x7d escaped with 0x7d. Return a pointer to the character after
+ * the last byte written.
+ */
+static int kgdb_ebin2mem(char *buf, char *mem, int count)
+{
+ int err = 0;
+ char c;
+
+ while (count-- > 0) {
+ c = *buf++;
+ if (c == 0x7d)
+ c = *buf++ ^ 0x20;
+
+ err = probe_kernel_write(mem, &c, 1);
+ if (err)
+ break;
+
+ mem++;
+ }
+
+ return err;
+}
+
/* Write memory due to an 'M' or 'X' packet. */
static int write_mem_msg(int binary)
{
@@ -562,209 +434,6 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid)
return find_task_by_pid_ns(tid, &init_pid_ns);
}
-/*
- * CPU debug state control:
- */
-
-#ifdef CONFIG_SMP
-static void kgdb_wait(struct pt_regs *regs)
-{
- unsigned long flags;
- int cpu;
-
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- kgdb_info[cpu].debuggerinfo = regs;
- kgdb_info[cpu].task = current;
- /*
- * Make sure the above info reaches the primary CPU before
- * our cpu_in_kgdb[] flag setting does:
- */
- smp_wmb();
- atomic_set(&cpu_in_kgdb[cpu], 1);
-
- /* Wait till primary CPU is done with debugging */
- while (atomic_read(&passive_cpu_wait[cpu]))
- cpu_relax();
-
- kgdb_info[cpu].debuggerinfo = NULL;
- kgdb_info[cpu].task = NULL;
-
- /* fix up hardware debug registers on local cpu */
- if (arch_kgdb_ops.correct_hw_break)
- arch_kgdb_ops.correct_hw_break();
-
- /* Signal the primary CPU that we are done: */
- atomic_set(&cpu_in_kgdb[cpu], 0);
- touch_softlockup_watchdog();
- clocksource_touch_watchdog();
- local_irq_restore(flags);
-}
-#endif
-
-/*
- * Some architectures need cache flushes when we set/clear a
- * breakpoint:
- */
-static void kgdb_flush_swbreak_addr(unsigned long addr)
-{
- if (!CACHE_FLUSH_IS_SAFE)
- return;
-
- if (current->mm && current->mm->mmap_cache) {
- flush_cache_range(current->mm->mmap_cache,
- addr, addr + BREAK_INSTR_SIZE);
- }
- /* Force flush instruction cache if it was outside the mm */
- flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
-}
-
-/*
- * SW breakpoint management:
- */
-static int kgdb_activate_sw_breakpoints(void)
-{
- unsigned long addr;
- int error;
- int ret = 0;
- int i;
-
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if (kgdb_break[i].state != BP_SET)
- continue;
-
- addr = kgdb_break[i].bpt_addr;
- error = kgdb_arch_set_breakpoint(addr,
- kgdb_break[i].saved_instr);
- if (error) {
- ret = error;
- printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
- continue;
- }
-
- kgdb_flush_swbreak_addr(addr);
- kgdb_break[i].state = BP_ACTIVE;
- }
- return ret;
-}
-
-static int kgdb_set_sw_break(unsigned long addr)
-{
- int err = kgdb_validate_break_address(addr);
- int breakno = -1;
- int i;
-
- if (err)
- return err;
-
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if ((kgdb_break[i].state == BP_SET) &&
- (kgdb_break[i].bpt_addr == addr))
- return -EEXIST;
- }
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if (kgdb_break[i].state == BP_REMOVED &&
- kgdb_break[i].bpt_addr == addr) {
- breakno = i;
- break;
- }
- }
-
- if (breakno == -1) {
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if (kgdb_break[i].state == BP_UNDEFINED) {
- breakno = i;
- break;
- }
- }
- }
-
- if (breakno == -1)
- return -E2BIG;
-
- kgdb_break[breakno].state = BP_SET;
- kgdb_break[breakno].type = BP_BREAKPOINT;
- kgdb_break[breakno].bpt_addr = addr;
-
- return 0;
-}
-
-static int kgdb_deactivate_sw_breakpoints(void)
-{
- unsigned long addr;
- int error;
- int ret = 0;
- int i;
-
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if (kgdb_break[i].state != BP_ACTIVE)
- continue;
- addr = kgdb_break[i].bpt_addr;
- error = kgdb_arch_remove_breakpoint(addr,
- kgdb_break[i].saved_instr);
- if (error) {
- printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
- ret = error;
- }
-
- kgdb_flush_swbreak_addr(addr);
- kgdb_break[i].state = BP_SET;
- }
- return ret;
-}
-
-static int kgdb_remove_sw_break(unsigned long addr)
-{
- int i;
-
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if ((kgdb_break[i].state == BP_SET) &&
- (kgdb_break[i].bpt_addr == addr)) {
- kgdb_break[i].state = BP_REMOVED;
- return 0;
- }
- }
- return -ENOENT;
-}
-
-int kgdb_isremovedbreak(unsigned long addr)
-{
- int i;
-
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if ((kgdb_break[i].state == BP_REMOVED) &&
- (kgdb_break[i].bpt_addr == addr))
- return 1;
- }
- return 0;
-}
-
-static int remove_all_break(void)
-{
- unsigned long addr;
- int error;
- int i;
-
- /* Clear memory breakpoints. */
- for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
- if (kgdb_break[i].state != BP_ACTIVE)
- goto setundefined;
- addr = kgdb_break[i].bpt_addr;
- error = kgdb_arch_remove_breakpoint(addr,
- kgdb_break[i].saved_instr);
- if (error)
- printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
- addr);
-setundefined:
- kgdb_break[i].state = BP_UNDEFINED;
- }
-
- /* Clear hardware breakpoints. */
- if (arch_kgdb_ops.remove_all_hw_break)
- arch_kgdb_ops.remove_all_hw_break();
-
- return 0;
-}
/*
* Remap normal tasks to their real PID,
@@ -778,63 +447,6 @@ static inline int shadow_pid(int realpid)
return -raw_smp_processor_id() - 2;
}
-static char gdbmsgbuf[BUFMAX + 1];
-
-static void kgdb_msg_write(const char *s, int len)
-{
- char *bufptr;
- int wcount;
- int i;
-
- /* 'O'utput */
- gdbmsgbuf[0] = 'O';
-
- /* Fill and send buffers... */
- while (len > 0) {
- bufptr = gdbmsgbuf + 1;
-
- /* Calculate how many this time */
- if ((len << 1) > (BUFMAX - 2))
- wcount = (BUFMAX - 2) >> 1;
- else
- wcount = len;
-
- /* Pack in hex chars */
- for (i = 0; i < wcount; i++)
- bufptr = pack_hex_byte(bufptr, s[i]);
- *bufptr = '\0';
-
- /* Move up */
- s += wcount;
- len -= wcount;
-
- /* Write packet */
- put_packet(gdbmsgbuf);
- }
-}
-
-/*
- * Return true if there is a valid kgdb I/O module. Also if no
- * debugger is attached a message can be printed to the console about
- * waiting for the debugger to attach.
- *
- * The print_wait argument is only to be true when called from inside
- * the core kgdb_handle_exception, because it will wait for the
- * debugger to attach.
- */
-static int kgdb_io_ready(int print_wait)
-{
- if (!kgdb_io_ops)
- return 0;
- if (kgdb_connected)
- return 1;
- if (atomic_read(&kgdb_setting_breakpoint))
- return 1;
- if (print_wait)
- printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
- return 1;
-}
-
/*
* All the functions that start with gdb_cmd are the various
* operations to implement the handlers for the gdbserial protocol
@@ -850,7 +462,7 @@ static void gdb_cmd_status(struct kgdb_state *ks)
* we clear out our breakpoints now in case
* GDB is reconnecting.
*/
- remove_all_break();
+ dbg_remove_all_break();
remcom_out_buffer[0] = 'S';
pack_hex_byte(&remcom_out_buffer[1], ks->signo);
@@ -961,7 +573,7 @@ static void gdb_cmd_detachkill(struct kgdb_state *ks)
/* The detach case */
if (remcom_in_buffer[0] == 'D') {
- error = remove_all_break();
+ error = dbg_remove_all_break();
if (error < 0) {
error_packet(remcom_out_buffer, error);
} else {
@@ -974,7 +586,7 @@ static void gdb_cmd_detachkill(struct kgdb_state *ks)
* Assume the kill case, with no exit code checking,
* trying to force detach the debugger:
*/
- remove_all_break();
+ dbg_remove_all_break();
kgdb_connected = 0;
}
}
@@ -1081,6 +693,25 @@ static void gdb_cmd_query(struct kgdb_state *ks)
kgdb_mem2hex(tmpstr, remcom_out_buffer, strlen(tmpstr));
}
break;
+#ifdef CONFIG_KGDB_KDB
+ case 'R':
+ if (strncmp(remcom_in_buffer, "qRcmd,", 6) == 0) {
+ int len = strlen(remcom_in_buffer + 6);
+
+ if ((len % 2) != 0) {
+ strcpy(remcom_out_buffer, "E01");
+ break;
+ }
+ kgdb_hex2mem(remcom_in_buffer + 6,
+ remcom_out_buffer, len);
+ len = len / 2;
+ remcom_out_buffer[len++] = 0;
+
+ kdb_parse(remcom_out_buffer);
+ strcpy(remcom_out_buffer, "OK");
+ }
+ break;
+#endif
}
}
@@ -1181,9 +812,9 @@ static void gdb_cmd_break(struct kgdb_state *ks)
}
if (remcom_in_buffer[0] == 'Z' && *bpt_type == '0')
- error = kgdb_set_sw_break(addr);
+ error = dbg_set_sw_break(addr);
else if (remcom_in_buffer[0] == 'z' && *bpt_type == '0')
- error = kgdb_remove_sw_break(addr);
+ error = dbg_remove_sw_break(addr);
else if (remcom_in_buffer[0] == 'Z')
error = arch_kgdb_ops.set_hw_breakpoint(addr,
(int)length, *bpt_type - '0');
@@ -1212,12 +843,12 @@ static int gdb_cmd_exception_pass(struct kgdb_state *ks)
ks->pass_exception = 1;
remcom_in_buffer[0] = 'D';
- remove_all_break();
+ dbg_remove_all_break();
kgdb_connected = 0;
return 1;
} else {
- kgdb_msg_write("KGDB only knows signal 9 (pass)"
+ gdbstub_msg_write("KGDB only knows signal 9 (pass)"
" and 15 (pass and disconnect)\n"
"Executing a continue without signal passing\n", 0);
remcom_in_buffer[0] = 'c';
@@ -1230,7 +861,7 @@ static int gdb_cmd_exception_pass(struct kgdb_state *ks)
/*
* This function performs all gdbserial command procesing
*/
-static int gdb_serial_stub(struct kgdb_state *ks)
+int gdb_serial_stub(struct kgdb_state *ks)
{
int error = 0;
int tmp;
@@ -1308,6 +939,13 @@ static int gdb_serial_stub(struct kgdb_state *ks)
case 'Z': /* Break point set */
gdb_cmd_break(ks);
break;
+#ifdef CONFIG_KGDB_KDB
+ case '3': /* Escape into the comm_passthrough */
+ if (remcom_in_buffer[1] == '\0') {
+ gdb_cmd_detachkill(ks);
+ return DBG_PASS_EVENT;
+ }
+#endif
case 'C': /* Exception passing */
tmp = gdb_cmd_exception_pass(ks);
if (tmp > 0)
@@ -1322,7 +960,7 @@ static int gdb_serial_stub(struct kgdb_state *ks)
error_packet(remcom_out_buffer, -EINVAL);
break;
}
- kgdb_activate_sw_breakpoints();
+ dbg_activate_sw_breakpoints();
/* Fall through to default processing */
default:
default_handle:
@@ -1354,407 +992,31 @@ kgdb_exit:
return error;
}
-static int kgdb_reenter_check(struct kgdb_state *ks)
+int gdbstub_state(struct kgdb_state *ks, char *cmd)
{
- unsigned long addr;
-
- if (atomic_read(&kgdb_active) != raw_smp_processor_id())
- return 0;
-
- /* Panic on recursive debugger calls: */
- exception_level++;
- addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
- kgdb_deactivate_sw_breakpoints();
-
- /*
- * If the break point removed ok at the place exception
- * occurred, try to recover and print a warning to the end
- * user because the user planted a breakpoint in a place that
- * KGDB needs in order to function.
- */
- if (kgdb_remove_sw_break(addr) == 0) {
- exception_level = 0;
- kgdb_skipexception(ks->ex_vector, ks->linux_regs);
- kgdb_activate_sw_breakpoints();
- printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
- addr);
- WARN_ON_ONCE(1);
-
- return 1;
- }
- remove_all_break();
- kgdb_skipexception(ks->ex_vector, ks->linux_regs);
-
- if (exception_level > 1) {
- dump_stack();
- panic("Recursive entry to debugger");
- }
-
- printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
- dump_stack();
- panic("Recursive entry to debugger");
-
- return 1;
-}
-
-/*
- * kgdb_handle_exception() - main entry point from a kernel exception
- *
- * Locking hierarchy:
- * interface locks, if any (begin_session)
- * kgdb lock (kgdb_active)
- */
-int
-kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
-{
- struct kgdb_state kgdb_var;
- struct kgdb_state *ks = &kgdb_var;
- unsigned long flags;
- int sstep_tries = 100;
- int error = 0;
- int i, cpu;
-
- ks->cpu = raw_smp_processor_id();
- ks->ex_vector = evector;
- ks->signo = signo;
- ks->ex_vector = evector;
- ks->err_code = ecode;
- ks->kgdb_usethreadid = 0;
- ks->linux_regs = regs;
-
- if (kgdb_reenter_check(ks))
- return 0; /* Ouch, double exception ! */
-
-acquirelock:
- /*
- * Interrupts will be restored by the 'trap return' code, except when
- * single stepping.
- */
- local_irq_save(flags);
-
- cpu = raw_smp_processor_id();
-
- /*
- * Acquire the kgdb_active lock:
- */
- while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1)
- cpu_relax();
-
- /*
- * For single stepping, try to only enter on the processor
- * that was single stepping. To gaurd against a deadlock, the
- * kernel will only try for the value of sstep_tries before
- * giving up and continuing on.
- */
- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
- (kgdb_info[cpu].task &&
- kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
- atomic_set(&kgdb_active, -1);
- touch_softlockup_watchdog();
- clocksource_touch_watchdog();
- local_irq_restore(flags);
-
- goto acquirelock;
- }
-
- if (!kgdb_io_ready(1)) {
- error = 1;
- goto kgdb_restore; /* No I/O connection, so resume the system */
- }
-
- /*
- * Don't enter if we have hit a removed breakpoint.
- */
- if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
- goto kgdb_restore;
-
- /* Call the I/O driver's pre_exception routine */
- if (kgdb_io_ops->pre_exception)
- kgdb_io_ops->pre_exception();
-
- kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs;
- kgdb_info[ks->cpu].task = current;
-
- kgdb_disable_hw_debug(ks->linux_regs);
-
- /*
- * Get the passive CPU lock which will hold all the non-primary
- * CPU in a spin state while the debugger is active
- */
- if (!kgdb_single_step) {
- for (i = 0; i < NR_CPUS; i++)
- atomic_set(&passive_cpu_wait[i], 1);
- }
-
- /*
- * spin_lock code is good enough as a barrier so we don't
- * need one here:
- */
- atomic_set(&cpu_in_kgdb[ks->cpu], 1);
-
-#ifdef CONFIG_SMP
- /* Signal the other CPUs to enter kgdb_wait() */
- if ((!kgdb_single_step) && kgdb_do_roundup)
- kgdb_roundup_cpus(flags);
-#endif
-
- /*
- * Wait for the other CPUs to be notified and be waiting for us:
- */
- for_each_online_cpu(i) {
- while (!atomic_read(&cpu_in_kgdb[i]))
- cpu_relax();
- }
-
- /*
- * At this point the primary processor is completely
- * in the debugger and all secondary CPUs are quiescent
- */
- kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code);
- kgdb_deactivate_sw_breakpoints();
- kgdb_single_step = 0;
- kgdb_contthread = current;
- exception_level = 0;
-
- /* Talk to debugger with gdbserial protocol */
- error = gdb_serial_stub(ks);
-
- /* Call the I/O driver's post_exception routine */
- if (kgdb_io_ops->post_exception)
- kgdb_io_ops->post_exception();
-
- kgdb_info[ks->cpu].debuggerinfo = NULL;
- kgdb_info[ks->cpu].task = NULL;
- atomic_set(&cpu_in_kgdb[ks->cpu], 0);
-
- if (!kgdb_single_step) {
- for (i = NR_CPUS-1; i >= 0; i--)
- atomic_set(&passive_cpu_wait[i], 0);
- /*
- * Wait till all the CPUs have quit
- * from the debugger.
- */
- for_each_online_cpu(i) {
- while (atomic_read(&cpu_in_kgdb[i]))
- cpu_relax();
- }
- }
-
-kgdb_restore:
- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
- if (kgdb_info[sstep_cpu].task)
- kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
- else
- kgdb_sstep_pid = 0;
- }
- /* Free kgdb_active */
- atomic_set(&kgdb_active, -1);
- touch_softlockup_watchdog();
- clocksource_touch_watchdog();
- local_irq_restore(flags);
-
- return error;
-}
+ int error;
-int kgdb_nmicallback(int cpu, void *regs)
-{
-#ifdef CONFIG_SMP
- if (!atomic_read(&cpu_in_kgdb[cpu]) &&
- atomic_read(&kgdb_active) != cpu &&
- atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) {
- kgdb_wait((struct pt_regs *)regs);
+ switch (cmd[0]) {
+ case 'e':
+ error = kgdb_arch_handle_exception(ks->ex_vector,
+ ks->signo,
+ ks->err_code,
+ remcom_in_buffer,
+ remcom_out_buffer,
+ ks->linux_regs);
+ return error;
+ case 's':
+ case 'c':
+ strcpy(remcom_in_buffer, cmd);
return 0;
+ case '?':
+ gdb_cmd_status(ks);
+ break;
+ case '\0':
+ strcpy(remcom_out_buffer, "");
+ break;
}
-#endif
- return 1;
-}
-
-static void kgdb_console_write(struct console *co, const char *s,
- unsigned count)
-{
- unsigned long flags;
-
- /* If we're debugging, or KGDB has not connected, don't try
- * and print. */
- if (!kgdb_connected || atomic_read(&kgdb_active) != -1)
- return;
-
- local_irq_save(flags);
- kgdb_msg_write(s, count);
- local_irq_restore(flags);
-}
-
-static struct console kgdbcons = {
- .name = "kgdb",
- .write = kgdb_console_write,
- .flags = CON_PRINTBUFFER | CON_ENABLED,
- .index = -1,
-};
-
-#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_gdb(int key, struct tty_struct *tty)
-{
- if (!kgdb_io_ops) {
- printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
- return;
- }
- if (!kgdb_connected)
- printk(KERN_CRIT "Entering KGDB\n");
-
- kgdb_breakpoint();
-}
-
-static struct sysrq_key_op sysrq_gdb_op = {
- .handler = sysrq_handle_gdb,
- .help_msg = "debug(G)",
- .action_msg = "DEBUG",
-};
-#endif
-
-static void kgdb_register_callbacks(void)
-{
- if (!kgdb_io_module_registered) {
- kgdb_io_module_registered = 1;
- kgdb_arch_init();
-#ifdef CONFIG_MAGIC_SYSRQ
- register_sysrq_key('g', &sysrq_gdb_op);
-#endif
- if (kgdb_use_con && !kgdb_con_registered) {
- register_console(&kgdbcons);
- kgdb_con_registered = 1;
- }
- }
-}
-
-static void kgdb_unregister_callbacks(void)
-{
- /*
- * When this routine is called KGDB should unregister from the
- * panic handler and clean up, making sure it is not handling any
- * break exceptions at the time.
- */
- if (kgdb_io_module_registered) {
- kgdb_io_module_registered = 0;
- kgdb_arch_exit();
-#ifdef CONFIG_MAGIC_SYSRQ
- unregister_sysrq_key('g', &sysrq_gdb_op);
-#endif
- if (kgdb_con_registered) {
- unregister_console(&kgdbcons);
- kgdb_con_registered = 0;
- }
- }
-}
-
-static void kgdb_initial_breakpoint(void)
-{
- kgdb_break_asap = 0;
-
- printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
- kgdb_breakpoint();
-}
-
-/**
- * kgdb_register_io_module - register KGDB IO module
- * @new_kgdb_io_ops: the io ops vector
- *
- * Register it with the KGDB core.
- */
-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
-{
- int err;
-
- spin_lock(&kgdb_registration_lock);
-
- if (kgdb_io_ops) {
- spin_unlock(&kgdb_registration_lock);
-
- printk(KERN_ERR "kgdb: Another I/O driver is already "
- "registered with KGDB.\n");
- return -EBUSY;
- }
-
- if (new_kgdb_io_ops->init) {
- err = new_kgdb_io_ops->init();
- if (err) {
- spin_unlock(&kgdb_registration_lock);
- return err;
- }
- }
-
- kgdb_io_ops = new_kgdb_io_ops;
-
- spin_unlock(&kgdb_registration_lock);
-
- printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
- new_kgdb_io_ops->name);
-
- /* Arm KGDB now. */
- kgdb_register_callbacks();
-
- if (kgdb_break_asap)
- kgdb_initial_breakpoint();
-
+ dbg_io_ops->write_char('+');
+ put_packet(remcom_out_buffer);
return 0;
}
-EXPORT_SYMBOL_GPL(kgdb_register_io_module);
-
-/**
- * kkgdb_unregister_io_module - unregister KGDB IO module
- * @old_kgdb_io_ops: the io ops vector
- *
- * Unregister it with the KGDB core.
- */
-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
-{
- BUG_ON(kgdb_connected);
-
- /*
- * KGDB is no longer able to communicate out, so
- * unregister our callbacks and reset state.
- */
- kgdb_unregister_callbacks();
-
- spin_lock(&kgdb_registration_lock);
-
- WARN_ON_ONCE(kgdb_io_ops != old_kgdb_io_ops);
- kgdb_io_ops = NULL;
-
- spin_unlock(&kgdb_registration_lock);
-
- printk(KERN_INFO
- "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
- old_kgdb_io_ops->name);
-}
-EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
-
-/**
- * kgdb_breakpoint - generate breakpoint exception
- *
- * This function will generate a breakpoint exception. It is used at the
- * beginning of a program to sync up with a debugger and can be used
- * otherwise as a quick means to stop program execution and "break" into
- * the debugger.
- */
-void kgdb_breakpoint(void)
-{
- atomic_set(&kgdb_setting_breakpoint, 1);
- wmb(); /* Sync point before breakpoint */
- arch_kgdb_breakpoint();
- wmb(); /* Sync point after breakpoint */
- atomic_set(&kgdb_setting_breakpoint, 0);
-}
-EXPORT_SYMBOL_GPL(kgdb_breakpoint);
-
-static int __init opt_kgdb_wait(char *str)
-{
- kgdb_break_asap = 1;
-
- if (kgdb_io_module_registered)
- kgdb_initial_breakpoint();
-
- return 0;
-}
-
-early_param("kgdbwait", opt_kgdb_wait);
diff --git a/kernel/debug/kdb/.gitignore b/kernel/debug/kdb/.gitignore
new file mode 100644
index 000000000000..396d12eda9e8
--- /dev/null
+++ b/kernel/debug/kdb/.gitignore
@@ -0,0 +1 @@
+gen-kdb_cmds.c
diff --git a/kernel/debug/kdb/Makefile b/kernel/debug/kdb/Makefile
new file mode 100644
index 000000000000..93f9e643abf2
--- /dev/null
+++ b/kernel/debug/kdb/Makefile
@@ -0,0 +1,24 @@
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+# Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+#
+
+CCVERSION := $(shell $(CC) -v 2>&1 | sed -ne '$$p')
+obj-y := kdb_io.o kdb_main.o kdb_support.o kdb_bt.o gen-kdb_cmds.o kdb_bp.o kdb_debugger.o
+
+clean-files := gen-kdb_cmds.c
+
+quiet_cmd_gen-kdb = GENKDB $@
+ cmd_gen-kdb = $(AWK) 'BEGIN {print "\#include <linux/stddef.h>"; print "\#include <linux/init.h>"} \
+ /^\#/{next} \
+ /^[ \t]*$$/{next} \
+ {gsub(/"/, "\\\"", $$0); \
+ print "static __initdata char kdb_cmd" cmds++ "[] = \"" $$0 "\\n\";"} \
+ END {print "extern char *kdb_cmds[]; char __initdata *kdb_cmds[] = {"; for (i = 0; i < cmds; ++i) {print " kdb_cmd" i ","}; print(" NULL\n};");}' \
+ $(filter-out %/Makefile,$^) > $@#
+
+$(obj)/gen-kdb_cmds.c: $(src)/kdb_cmds $(if $(KDB_CMDS),(wildcard $(TOPDIR)/arch/$(KDB_CMDS))) $(src)/Makefile
+ $(call cmd,gen-kdb)
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
new file mode 100644
index 000000000000..2e3b1469276c
--- /dev/null
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -0,0 +1,567 @@
+/*
+ * Kernel Debugger Architecture Independent Breakpoint Handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kdb.h>
+#include <linux/kgdb.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include "kdb_private.h"
+
+/*
+ * Table of kdb_breakpoints
+ */
+kdb_bp_t kdb_breakpoints[KDB_MAXBPT];
+
+static void kdb_setsinglestep(struct pt_regs *regs)
+{
+ KDB_STATE_SET(DOING_SS);
+}
+
+static char *kdb_rwtypes[] = {
+ "Instruction(i)",
+ "Instruction(Register)",
+ "Data Write",
+ "I/O",
+ "Data Access"
+};
+
+static char *kdb_bptype(kdb_bp_t *bp)
+{
+ if (bp->bp_type < 0 || bp->bp_type > 4)
+ return "";
+
+ return kdb_rwtypes[bp->bp_type];
+}
+
+static int kdb_parsebp(int argc, const char **argv, int *nextargp, kdb_bp_t *bp)
+{
+ int nextarg = *nextargp;
+ int diag;
+
+ bp->bph_length = 1;
+ if ((argc + 1) != nextarg) {
+ if (strnicmp(argv[nextarg], "datar", sizeof("datar")) == 0)
+ bp->bp_type = BP_ACCESS_WATCHPOINT;
+ else if (strnicmp(argv[nextarg], "dataw", sizeof("dataw")) == 0)
+ bp->bp_type = BP_WRITE_WATCHPOINT;
+ else if (strnicmp(argv[nextarg], "inst", sizeof("inst")) == 0)
+ bp->bp_type = BP_HARDWARE_BREAKPOINT;
+ else
+ return KDB_ARGCOUNT;
+
+ bp->bph_length = 1;
+
+ nextarg++;
+
+ if ((argc + 1) != nextarg) {
+ unsigned long len;
+
+ diag = kdbgetularg((char *)argv[nextarg],
+ &len);
+ if (diag)
+ return diag;
+
+
+ if (len > 8)
+ return KDB_BADLENGTH;
+
+ bp->bph_length = len;
+ nextarg++;
+ }
+
+ if ((argc + 1) != nextarg)
+ return KDB_ARGCOUNT;
+ }
+
+ *nextargp = nextarg;
+ return 0;
+}
+
+static int _kdb_bp_remove(kdb_bp_t *bp)
+{
+ int ret = 1;
+ if (!bp->bp_installed)
+ return ret;
+ if (!bp->bp_type)
+ ret = dbg_remove_sw_break(bp->bp_addr);
+ else
+ ret = arch_kgdb_ops.remove_hw_breakpoint(bp->bp_addr,
+ bp->bph_length,
+ bp->bp_type);
+ if (ret == 0)
+ bp->bp_installed = 0;
+ return ret;
+}
+
+static void kdb_handle_bp(struct pt_regs *regs, kdb_bp_t *bp)
+{
+ if (KDB_NULL_REGS(regs))
+ return;
+
+ if (KDB_DEBUG(BP))
+ kdb_printf("regs->ip = 0x%lx\n", instruction_pointer(regs));
+
+ /*
+ * Setup single step
+ */
+ kdb_setsinglestep(regs);
+
+ /*
+ * Reset delay attribute
+ */
+ bp->bp_delay = 0;
+ bp->bp_delayed = 1;
+}
+
+static int _kdb_bp_install(struct pt_regs *regs, kdb_bp_t *bp)
+{
+ int ret;
+ /*
+ * Install the breakpoint, if it is not already installed.
+ */
+
+ if (KDB_DEBUG(BP))
+ kdb_printf("%s: bp_installed %d\n",
+ __func__, bp->bp_installed);
+ if (!KDB_STATE(SSBPT))
+ bp->bp_delay = 0;
+ if (bp->bp_installed)
+ return 1;
+ if (bp->bp_delay || (bp->bp_delayed && KDB_STATE(DOING_SS))) {
+ if (KDB_DEBUG(BP))
+ kdb_printf("%s: delayed bp\n", __func__);
+ kdb_handle_bp(regs, bp);
+ return 0;
+ }
+ if (!bp->bp_type)
+ ret = dbg_set_sw_break(bp->bp_addr);
+ else
+ ret = arch_kgdb_ops.set_hw_breakpoint(bp->bp_addr,
+ bp->bph_length,
+ bp->bp_type);
+ if (ret == 0) {
+ bp->bp_installed = 1;
+ } else {
+ kdb_printf("%s: failed to set breakpoint at 0x%lx\n",
+ __func__, bp->bp_addr);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * kdb_bp_install
+ *
+ * Install kdb_breakpoints prior to returning from the
+ * kernel debugger. This allows the kdb_breakpoints to be set
+ * upon functions that are used internally by kdb, such as
+ * printk(). This function is only called once per kdb session.
+ */
+void kdb_bp_install(struct pt_regs *regs)
+{
+ int i;
+
+ for (i = 0; i < KDB_MAXBPT; i++) {
+ kdb_bp_t *bp = &kdb_breakpoints[i];
+
+ if (KDB_DEBUG(BP)) {
+ kdb_printf("%s: bp %d bp_enabled %d\n",
+ __func__, i, bp->bp_enabled);
+ }
+ if (bp->bp_enabled)
+ _kdb_bp_install(regs, bp);
+ }
+}
+
+/*
+ * kdb_bp_remove
+ *
+ * Remove kdb_breakpoints upon entry to the kernel debugger.
+ *
+ * Parameters:
+ * None.
+ * Outputs:
+ * None.
+ * Returns:
+ * None.
+ * Locking:
+ * None.
+ * Remarks:
+ */
+void kdb_bp_remove(void)
+{
+ int i;
+
+ for (i = KDB_MAXBPT - 1; i >= 0; i--) {
+ kdb_bp_t *bp = &kdb_breakpoints[i];
+
+ if (KDB_DEBUG(BP)) {
+ kdb_printf("%s: bp %d bp_enabled %d\n",
+ __func__, i, bp->bp_enabled);
+ }
+ if (bp->bp_enabled)
+ _kdb_bp_remove(bp);
+ }
+}
+
+
+/*
+ * kdb_printbp
+ *
+ * Internal function to format and print a breakpoint entry.
+ *
+ * Parameters:
+ * None.
+ * Outputs:
+ * None.
+ * Returns:
+ * None.
+ * Locking:
+ * None.
+ * Remarks:
+ */
+
+static void kdb_printbp(kdb_bp_t *bp, int i)
+{
+ kdb_printf("%s ", kdb_bptype(bp));
+ kdb_printf("BP #%d at ", i);
+ kdb_symbol_print(bp->bp_addr, NULL, KDB_SP_DEFAULT);
+
+ if (bp->bp_enabled)
+ kdb_printf("\n is enabled");
+ else
+ kdb_printf("\n is disabled");
+
+ kdb_printf("\taddr at %016lx, hardtype=%d installed=%d\n",
+ bp->bp_addr, bp->bp_type, bp->bp_installed);
+
+ kdb_printf("\n");
+}
+
+/*
+ * kdb_bp
+ *
+ * Handle the bp commands.
+ *
+ * [bp|bph] <addr-expression> [DATAR|DATAW]
+ *
+ * Parameters:
+ * argc Count of arguments in argv
+ * argv Space delimited command line arguments
+ * Outputs:
+ * None.
+ * Returns:
+ * Zero for success, a kdb diagnostic if failure.
+ * Locking:
+ * None.
+ * Remarks:
+ *
+ * bp Set breakpoint on all cpus. Only use hardware assist if need.
+ * bph Set breakpoint on all cpus. Force hardware register
+ */
+
+static int kdb_bp(int argc, const char **argv)
+{
+ int i, bpno;
+ kdb_bp_t *bp, *bp_check;
+ int diag;
+ int free;
+ char *symname = NULL;
+ long offset = 0ul;
+ int nextarg;
+ kdb_bp_t template = {0};
+
+ if (argc == 0) {
+ /*
+ * Display breakpoint table
+ */
+ for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT;
+ bpno++, bp++) {
+ if (bp->bp_free)
+ continue;
+ kdb_printbp(bp, bpno);
+ }
+
+ return 0;
+ }
+
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &template.bp_addr,
+ &offset, &symname);
+ if (diag)
+ return diag;
+ if (!template.bp_addr)
+ return KDB_BADINT;
+
+ /*
+ * Find an empty bp structure to allocate
+ */
+ free = KDB_MAXBPT;
+ for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
+ if (bp->bp_free)
+ break;
+ }
+
+ if (bpno == KDB_MAXBPT)
+ return KDB_TOOMANYBPT;
+
+ if (strcmp(argv[0], "bph") == 0) {
+ template.bp_type = BP_HARDWARE_BREAKPOINT;
+ diag = kdb_parsebp(argc, argv, &nextarg, &template);
+ if (diag)
+ return diag;
+ } else {
+ template.bp_type = BP_BREAKPOINT;
+ }
+
+ /*
+ * Check for clashing breakpoints.
+ *
+ * Note, in this design we can't have hardware breakpoints
+ * enabled for both read and write on the same address.
+ */
+ for (i = 0, bp_check = kdb_breakpoints; i < KDB_MAXBPT;
+ i++, bp_check++) {
+ if (!bp_check->bp_free &&
+ bp_check->bp_addr == template.bp_addr) {
+ kdb_printf("You already have a breakpoint at "
+ kdb_bfd_vma_fmt0 "\n", template.bp_addr);
+ return KDB_DUPBPT;
+ }
+ }
+
+ template.bp_enabled = 1;
+
+ /*
+ * Actually allocate the breakpoint found earlier
+ */
+ *bp = template;
+ bp->bp_free = 0;
+
+ kdb_printbp(bp, bpno);
+
+ return 0;
+}
+
+/*
+ * kdb_bc
+ *
+ * Handles the 'bc', 'be', and 'bd' commands
+ *
+ * [bd|bc|be] <breakpoint-number>
+ * [bd|bc|be] *
+ *
+ * Parameters:
+ * argc Count of arguments in argv
+ * argv Space delimited command line arguments
+ * Outputs:
+ * None.
+ * Returns:
+ * Zero for success, a kdb diagnostic for failure
+ * Locking:
+ * None.
+ * Remarks:
+ */
+static int kdb_bc(int argc, const char **argv)
+{
+ unsigned long addr;
+ kdb_bp_t *bp = NULL;
+ int lowbp = KDB_MAXBPT;
+ int highbp = 0;
+ int done = 0;
+ int i;
+ int diag = 0;
+
+ int cmd; /* KDBCMD_B? */
+#define KDBCMD_BC 0
+#define KDBCMD_BE 1
+#define KDBCMD_BD 2
+
+ if (strcmp(argv[0], "be") == 0)
+ cmd = KDBCMD_BE;
+ else if (strcmp(argv[0], "bd") == 0)
+ cmd = KDBCMD_BD;
+ else
+ cmd = KDBCMD_BC;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ if (strcmp(argv[1], "*") == 0) {
+ lowbp = 0;
+ highbp = KDB_MAXBPT;
+ } else {
+ diag = kdbgetularg(argv[1], &addr);
+ if (diag)
+ return diag;
+
+ /*
+ * For addresses less than the maximum breakpoint number,
+ * assume that the breakpoint number is desired.
+ */
+ if (addr < KDB_MAXBPT) {
+ bp = &kdb_breakpoints[addr];
+ lowbp = highbp = addr;
+ highbp++;
+ } else {
+ for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT;
+ i++, bp++) {
+ if (bp->bp_addr == addr) {
+ lowbp = highbp = i;
+ highbp++;
+ break;
+ }
+ }
+ }
+ }
+
+ /*
+ * Now operate on the set of breakpoints matching the input
+ * criteria (either '*' for all, or an individual breakpoint).
+ */
+ for (bp = &kdb_breakpoints[lowbp], i = lowbp;
+ i < highbp;
+ i++, bp++) {
+ if (bp->bp_free)
+ continue;
+
+ done++;
+
+ switch (cmd) {
+ case KDBCMD_BC:
+ bp->bp_enabled = 0;
+
+ kdb_printf("Breakpoint %d at "
+ kdb_bfd_vma_fmt " cleared\n",
+ i, bp->bp_addr);
+
+ bp->bp_addr = 0;
+ bp->bp_free = 1;
+
+ break;
+ case KDBCMD_BE:
+ bp->bp_enabled = 1;
+
+ kdb_printf("Breakpoint %d at "
+ kdb_bfd_vma_fmt " enabled",
+ i, bp->bp_addr);
+
+ kdb_printf("\n");
+ break;
+ case KDBCMD_BD:
+ if (!bp->bp_enabled)
+ break;
+
+ bp->bp_enabled = 0;
+
+ kdb_printf("Breakpoint %d at "
+ kdb_bfd_vma_fmt " disabled\n",
+ i, bp->bp_addr);
+
+ break;
+ }
+ if (bp->bp_delay && (cmd == KDBCMD_BC || cmd == KDBCMD_BD)) {
+ bp->bp_delay = 0;
+ KDB_STATE_CLEAR(SSBPT);
+ }
+ }
+
+ return (!done) ? KDB_BPTNOTFOUND : 0;
+}
+
+/*
+ * kdb_ss
+ *
+ * Process the 'ss' (Single Step) and 'ssb' (Single Step to Branch)
+ * commands.
+ *
+ * ss
+ * ssb
+ *
+ * Parameters:
+ * argc Argument count
+ * argv Argument vector
+ * Outputs:
+ * None.
+ * Returns:
+ * KDB_CMD_SS[B] for success, a kdb error if failure.
+ * Locking:
+ * None.
+ * Remarks:
+ *
+ * Set the arch specific option to trigger a debug trap after the next
+ * instruction.
+ *
+ * For 'ssb', set the trace flag in the debug trap handler
+ * after printing the current insn and return directly without
+ * invoking the kdb command processor, until a branch instruction
+ * is encountered.
+ */
+
+static int kdb_ss(int argc, const char **argv)
+{
+ int ssb = 0;
+
+ ssb = (strcmp(argv[0], "ssb") == 0);
+ if (argc != 0)
+ return KDB_ARGCOUNT;
+ /*
+ * Set trace flag and go.
+ */
+ KDB_STATE_SET(DOING_SS);
+ if (ssb) {
+ KDB_STATE_SET(DOING_SSB);
+ return KDB_CMD_SSB;
+ }
+ return KDB_CMD_SS;
+}
+
+/* Initialize the breakpoint table and register breakpoint commands. */
+
+void __init kdb_initbptab(void)
+{
+ int i;
+ kdb_bp_t *bp;
+
+ /*
+ * First time initialization.
+ */
+ memset(&kdb_breakpoints, '\0', sizeof(kdb_breakpoints));
+
+ for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
+ bp->bp_free = 1;
+
+ kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
+ "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
+ "Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+ if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
+ kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
+ "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("bc", kdb_bc, "<bpnum>",
+ "Clear Breakpoint", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("be", kdb_bc, "<bpnum>",
+ "Enable Breakpoint", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("bd", kdb_bc, "<bpnum>",
+ "Disable Breakpoint", 0, KDB_REPEAT_NONE);
+
+ kdb_register_repeat("ss", kdb_ss, "",
+ "Single Step", 1, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("ssb", kdb_ss, "",
+ "Single step to branch/call", 0, KDB_REPEAT_NO_ARGS);
+ /*
+ * Architecture dependent initialization.
+ */
+}
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
new file mode 100644
index 000000000000..176da1a707e6
--- /dev/null
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -0,0 +1,217 @@
+/*
+ * Kernel Debugger Architecture Independent Stack Traceback
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kdb.h>
+#include <linux/nmi.h>
+#include <asm/system.h>
+#include "kdb_private.h"
+
+
+static void kdb_show_stack(struct task_struct *p, void *addr)
+{
+ int old_lvl = console_loglevel;
+ console_loglevel = 15;
+ kdb_trap_printk++;
+ kdb_set_current_task(p);
+ if (addr) {
+ show_stack((struct task_struct *)p, addr);
+ } else if (kdb_current_regs) {
+#ifdef CONFIG_X86
+ show_stack(p, &kdb_current_regs->sp);
+#else
+ show_stack(p, NULL);
+#endif
+ } else {
+ show_stack(p, NULL);
+ }
+ console_loglevel = old_lvl;
+ kdb_trap_printk--;
+}
+
+/*
+ * kdb_bt
+ *
+ * This function implements the 'bt' command. Print a stack
+ * traceback.
+ *
+ * bt [<address-expression>] (addr-exp is for alternate stacks)
+ * btp <pid> Kernel stack for <pid>
+ * btt <address-expression> Kernel stack for task structure at
+ * <address-expression>
+ * bta [DRSTCZEUIMA] All useful processes, optionally
+ * filtered by state
+ * btc [<cpu>] The current process on one cpu,
+ * default is all cpus
+ *
+ * bt <address-expression> refers to a address on the stack, that location
+ * is assumed to contain a return address.
+ *
+ * btt <address-expression> refers to the address of a struct task.
+ *
+ * Inputs:
+ * argc argument count
+ * argv argument vector
+ * Outputs:
+ * None.
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ * Locking:
+ * none.
+ * Remarks:
+ * Backtrack works best when the code uses frame pointers. But even
+ * without frame pointers we should get a reasonable trace.
+ *
+ * mds comes in handy when examining the stack to do a manual traceback or
+ * to get a starting point for bt <address-expression>.
+ */
+
+static int
+kdb_bt1(struct task_struct *p, unsigned long mask,
+ int argcount, int btaprompt)
+{
+ char buffer[2];
+ if (kdb_getarea(buffer[0], (unsigned long)p) ||
+ kdb_getarea(buffer[0], (unsigned long)(p+1)-1))
+ return KDB_BADADDR;
+ if (!kdb_task_state(p, mask))
+ return 0;
+ kdb_printf("Stack traceback for pid %d\n", p->pid);
+ kdb_ps1(p);
+ kdb_show_stack(p, NULL);
+ if (btaprompt) {
+ kdb_getstr(buffer, sizeof(buffer),
+ "Enter <q> to end, <cr> to continue:");
+ if (buffer[0] == 'q') {
+ kdb_printf("\n");
+ return 1;
+ }
+ }
+ touch_nmi_watchdog();
+ return 0;
+}
+
+int
+kdb_bt(int argc, const char **argv)
+{
+ int diag;
+ int argcount = 5;
+ int btaprompt = 1;
+ int nextarg;
+ unsigned long addr;
+ long offset;
+
+ kdbgetintenv("BTARGS", &argcount); /* Arguments to print */
+ kdbgetintenv("BTAPROMPT", &btaprompt); /* Prompt after each
+ * proc in bta */
+
+ if (strcmp(argv[0], "bta") == 0) {
+ struct task_struct *g, *p;
+ unsigned long cpu;
+ unsigned long mask = kdb_task_state_string(argc ? argv[1] :
+ NULL);
+ if (argc == 0)
+ kdb_ps_suppressed();
+ /* Run the active tasks first */
+ for_each_online_cpu(cpu) {
+ p = kdb_curr_task(cpu);
+ if (kdb_bt1(p, mask, argcount, btaprompt))
+ return 0;
+ }
+ /* Now the inactive tasks */
+ kdb_do_each_thread(g, p) {
+ if (task_curr(p))
+ continue;
+ if (kdb_bt1(p, mask, argcount, btaprompt))
+ return 0;
+ } kdb_while_each_thread(g, p);
+ } else if (strcmp(argv[0], "btp") == 0) {
+ struct task_struct *p;
+ unsigned long pid;
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+ diag = kdbgetularg((char *)argv[1], &pid);
+ if (diag)
+ return diag;
+ p = find_task_by_pid_ns(pid, &init_pid_ns);
+ if (p) {
+ kdb_set_current_task(p);
+ return kdb_bt1(p, ~0UL, argcount, 0);
+ }
+ kdb_printf("No process with pid == %ld found\n", pid);
+ return 0;
+ } else if (strcmp(argv[0], "btt") == 0) {
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+ diag = kdbgetularg((char *)argv[1], &addr);
+ if (diag)
+ return diag;
+ kdb_set_current_task((struct task_struct *)addr);
+ return kdb_bt1((struct task_struct *)addr, ~0UL, argcount, 0);
+ } else if (strcmp(argv[0], "btc") == 0) {
+ unsigned long cpu = ~0;
+ struct kdb_running_process *krp;
+ struct task_struct *save_current_task = kdb_current_task;
+ char buf[80];
+ if (argc > 1)
+ return KDB_ARGCOUNT;
+ if (argc == 1) {
+ diag = kdbgetularg((char *)argv[1], &cpu);
+ if (diag)
+ return diag;
+ }
+ /* Recursive use of kdb_parse, do not use argv after
+ * this point */
+ argv = NULL;
+ if (cpu != ~0) {
+ krp = kdb_running_process + cpu;
+ if (cpu >= num_possible_cpus() || !krp->seqno ||
+ !cpu_online(cpu)) {
+ kdb_printf("no process for cpu %ld\n", cpu);
+ return 0;
+ }
+ sprintf(buf, "btt 0x%p\n", krp->p);
+ kdb_parse(buf);
+ return 0;
+ }
+ kdb_printf("btc: cpu status: ");
+ kdb_parse("cpu\n");
+ for (cpu = 0, krp = kdb_running_process;
+ cpu < num_possible_cpus();
+ cpu++, krp++) {
+ if (!cpu_online(cpu) || !krp->seqno)
+ continue;
+ sprintf(buf, "btt 0x%p\n", krp->p);
+ kdb_parse(buf);
+ touch_nmi_watchdog();
+ }
+ kdb_set_current_task(save_current_task);
+ return 0;
+ } else {
+ if (argc) {
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
+ &offset, NULL);
+ if (diag)
+ return diag;
+ kdb_show_stack(kdb_current_task, (void *)addr);
+ return 0;
+ } else {
+ return kdb_bt1(kdb_current_task, ~0UL, argcount, 0);
+ }
+ }
+
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/kernel/debug/kdb/kdb_cmds b/kernel/debug/kdb/kdb_cmds
new file mode 100644
index 000000000000..343955ca6a21
--- /dev/null
+++ b/kernel/debug/kdb/kdb_cmds
@@ -0,0 +1,32 @@
+# Initial commands for kdb, alter to suit your needs.
+# These commands are executed in kdb_init() context, no SMP, no
+# processes. Commands that require process data (including stack or
+# registers) are not reliable this early. set and bp commands should
+# be safe. Global breakpoint commands affect each cpu as it is booted.
+
+# Standard debugging information for first level support, just type archkdb
+# or archkdbcpu or archkdbshort at the kdb prompt.
+
+defcmd archkdb "" "First line arch debugging"
+ set BTSYMARG 1
+ set BTARGS 9
+ pid R
+ -archkdbcommon
+ -bta
+endefcmd
+
+defcmd archkdbcpu "" "archkdb with only tasks on cpus"
+ set BTSYMARG 1
+ set BTARGS 9
+ pid R
+ -archkdbcommon
+ -btc
+endefcmd
+
+defcmd archkdbshort "" "archkdb with less detailed backtrace"
+ set BTSYMARG 0
+ set BTARGS 0
+ pid R
+ -archkdbcommon
+ -bta
+endefcmd
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
new file mode 100644
index 000000000000..966ea367b7fd
--- /dev/null
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -0,0 +1,167 @@
+/*
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/kdebug.h>
+#include "kdb_private.h"
+#include "../debug_core.h"
+
+/*
+ * KDB interface to KGDB internals
+ */
+get_char_func kdb_poll_funcs[] = {
+ dbg_io_get_char,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(kdb_poll_funcs);
+
+int kdb_poll_idx = 1;
+EXPORT_SYMBOL_GPL(kdb_poll_idx);
+
+int kdb_stub(struct kgdb_state *ks)
+{
+ int error = 0;
+ kdb_bp_t *bp;
+ unsigned long addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
+ kdb_reason_t reason = KDB_REASON_OOPS;
+ kdb_dbtrap_t db_result = KDB_DB_NOBPT;
+ int i;
+
+ if (KDB_STATE(REENTRY)) {
+ reason = KDB_REASON_SWITCH;
+ KDB_STATE_CLEAR(REENTRY);
+ addr = instruction_pointer(ks->linux_regs);
+ }
+ ks->pass_exception = 0;
+ if (atomic_read(&kgdb_setting_breakpoint))
+ reason = KDB_REASON_KEYBOARD;
+
+ for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
+ if ((bp->bp_enabled) && (bp->bp_addr == addr)) {
+ reason = KDB_REASON_BREAK;
+ db_result = KDB_DB_BPT;
+ if (addr != instruction_pointer(ks->linux_regs))
+ kgdb_arch_set_pc(ks->linux_regs, addr);
+ break;
+ }
+ }
+ if (reason == KDB_REASON_BREAK || reason == KDB_REASON_SWITCH) {
+ for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) {
+ if (bp->bp_free)
+ continue;
+ if (bp->bp_addr == addr) {
+ bp->bp_delay = 1;
+ bp->bp_delayed = 1;
+ /* SSBPT is set when the kernel debugger must
+ * single step a task in order to re-establish
+ * an instruction breakpoint which uses the
+ * instruction replacement mechanism. It is
+ * cleared by any action that removes the need
+ * to single-step the breakpoint.
+ */
+ reason = KDB_REASON_BREAK;
+ db_result = KDB_DB_BPT;
+ KDB_STATE_SET(SSBPT);
+ break;
+ }
+ }
+ }
+
+ if (reason != KDB_REASON_BREAK && ks->ex_vector == 0 &&
+ ks->signo == SIGTRAP) {
+ reason = KDB_REASON_SSTEP;
+ db_result = KDB_DB_BPT;
+ }
+ /* Set initial kdb state variables */
+ KDB_STATE_CLEAR(KGDB_TRANS);
+ kdb_initial_cpu = ks->cpu;
+ kdb_current_task = kgdb_info[ks->cpu].task;
+ kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
+ /* Remove any breakpoints as needed by kdb and clear single step*/
+ kdb_bp_remove();
+ KDB_STATE_CLEAR(DOING_SS);
+ KDB_STATE_CLEAR(DOING_SSB);
+ KDB_STATE_SET(PAGER);
+ for_each_online_cpu(i) {
+ kdb_save_running_cpu(kgdb_info[i].debuggerinfo,
+ kgdb_info[i].task, i);
+ }
+ if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) {
+ ks->pass_exception = 1;
+ KDB_FLAG_SET(CATASTROPHIC);
+ }
+ kdb_initial_cpu = ks->cpu;
+ if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
+ KDB_STATE_CLEAR(SSBPT);
+ KDB_STATE_CLEAR(DOING_SS);
+ } else {
+ /* Start kdb main loop */
+ error = kdb_main_loop(KDB_REASON_ENTER, reason,
+ ks->err_code, db_result, ks->linux_regs);
+ }
+ /* Upon exit from the kdb main loop setup break points and restart
+ * the system based on the requested continue state
+ */
+ kdb_initial_cpu = -1;
+ kdb_current_task = NULL;
+ kdb_current_regs = NULL;
+ KDB_STATE_CLEAR(PAGER);
+ kdbnearsym_cleanup();
+ if (error == KDB_CMD_KGDB) {
+ if (KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)) {
+ /* This inteface glue which allows kdb to
+ * transition in into the gdb stub. In order
+ * to do this the '?' gdb serial packet
+ * response is processed here. Or the empty
+ * packet response is sent to the connected
+ * debugger to complete the initial gdb
+ * handshake.
+ */
+ if (KDB_STATE(DOING_KGDB))
+ gdbstub_state(ks, "?");
+ else
+ gdbstub_state(ks, "");
+ KDB_STATE_CLEAR(DOING_KGDB);
+ KDB_STATE_CLEAR(DOING_KGDB2);
+ }
+ return DBG_PASS_EVENT;
+ }
+ kdb_bp_install(ks->linux_regs);
+ dbg_activate_sw_breakpoints();
+ /* Set the exit state to a single step or a continue */
+ if (KDB_STATE(DOING_SS))
+ gdbstub_state(ks, "s");
+ else
+ gdbstub_state(ks, "c");
+
+ KDB_FLAG_CLEAR(CATASTROPHIC);
+
+ /* Invoke any final arch specific exception handling before
+ * resuming the system
+ */
+ kgdb_info[ks->cpu].ret_state = gdbstub_state(ks, "e");
+ if (ks->pass_exception)
+ kgdb_info[ks->cpu].ret_state = 1;
+ if (error == KDB_CMD_CPU) {
+ KDB_STATE_SET_CPU(REENTRY, dbg_switch_cpu);
+ /* Force clear the single step bit because kdb emulates this
+ * differently vs the gdbstub */
+ kgdb_single_step = 0;
+ dbg_deactivate_sw_breakpoints();
+ return DBG_SWITCH_CPU_EVENT;
+ }
+ return kgdb_info[ks->cpu].ret_state;
+}
+
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
new file mode 100644
index 000000000000..dd75c20b2ec2
--- /dev/null
+++ b/kernel/debug/kdb/kdb_io.c
@@ -0,0 +1,825 @@
+/*
+ * Kernel Debugger Architecture Independent Console I/O handler
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2006 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kdev_t.h>
+#include <linux/console.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/kallsyms.h>
+#include "kdb_private.h"
+
+#define CMD_BUFLEN 256
+char kdb_prompt_str[CMD_BUFLEN];
+
+int kdb_trap_printk;
+
+static void kgdb_transition_check(char *buffer)
+{
+ int slen = strlen(buffer);
+ if (strncmp(buffer, "$?#3f", slen) != 0 &&
+ strncmp(buffer, "$qSupported#37", slen) != 0 &&
+ strncmp(buffer, "+$qSupported#37", slen) != 0) {
+ KDB_STATE_SET(KGDB_TRANS);
+ kdb_printf("%s", buffer);
+ }
+}
+
+static int kdb_read_get_key(char *buffer, size_t bufsize)
+{
+#define ESCAPE_UDELAY 1000
+#define ESCAPE_DELAY (2*1000000/ESCAPE_UDELAY) /* 2 seconds worth of udelays */
+ char escape_data[5]; /* longest vt100 escape sequence is 4 bytes */
+ char *ped = escape_data;
+ int escape_delay = 0;
+ get_char_func *f, *f_escape = NULL;
+ int key;
+
+ for (f = &kdb_poll_funcs[0]; ; ++f) {
+ if (*f == NULL) {
+ /* Reset NMI watchdog once per poll loop */
+ touch_nmi_watchdog();
+ f = &kdb_poll_funcs[0];
+ }
+ if (escape_delay == 2) {
+ *ped = '\0';
+ ped = escape_data;
+ --escape_delay;
+ }
+ if (escape_delay == 1) {
+ key = *ped++;
+ if (!*ped)
+ --escape_delay;
+ break;
+ }
+ key = (*f)();
+ if (key == -1) {
+ if (escape_delay) {
+ udelay(ESCAPE_UDELAY);
+ --escape_delay;
+ }
+ continue;
+ }
+ if (bufsize <= 2) {
+ if (key == '\r')
+ key = '\n';
+ *buffer++ = key;
+ *buffer = '\0';
+ return -1;
+ }
+ if (escape_delay == 0 && key == '\e') {
+ escape_delay = ESCAPE_DELAY;
+ ped = escape_data;
+ f_escape = f;
+ }
+ if (escape_delay) {
+ *ped++ = key;
+ if (f_escape != f) {
+ escape_delay = 2;
+ continue;
+ }
+ if (ped - escape_data == 1) {
+ /* \e */
+ continue;
+ } else if (ped - escape_data == 2) {
+ /* \e<something> */
+ if (key != '[')
+ escape_delay = 2;
+ continue;
+ } else if (ped - escape_data == 3) {
+ /* \e[<something> */
+ int mapkey = 0;
+ switch (key) {
+ case 'A': /* \e[A, up arrow */
+ mapkey = 16;
+ break;
+ case 'B': /* \e[B, down arrow */
+ mapkey = 14;
+ break;
+ case 'C': /* \e[C, right arrow */
+ mapkey = 6;
+ break;
+ case 'D': /* \e[D, left arrow */
+ mapkey = 2;
+ break;
+ case '1': /* dropthrough */
+ case '3': /* dropthrough */
+ /* \e[<1,3,4>], may be home, del, end */
+ case '4':
+ mapkey = -1;
+ break;
+ }
+ if (mapkey != -1) {
+ if (mapkey > 0) {
+ escape_data[0] = mapkey;
+ escape_data[1] = '\0';
+ }
+ escape_delay = 2;
+ }
+ continue;
+ } else if (ped - escape_data == 4) {
+ /* \e[<1,3,4><something> */
+ int mapkey = 0;
+ if (key == '~') {
+ switch (escape_data[2]) {
+ case '1': /* \e[1~, home */
+ mapkey = 1;
+ break;
+ case '3': /* \e[3~, del */
+ mapkey = 4;
+ break;
+ case '4': /* \e[4~, end */
+ mapkey = 5;
+ break;
+ }
+ }
+ if (mapkey > 0) {
+ escape_data[0] = mapkey;
+ escape_data[1] = '\0';
+ }
+ escape_delay = 2;
+ continue;
+ }
+ }
+ break; /* A key to process */
+ }
+ return key;
+}
+
+/*
+ * kdb_read
+ *
+ * This function reads a string of characters, terminated by
+ * a newline, or by reaching the end of the supplied buffer,
+ * from the current kernel debugger console device.
+ * Parameters:
+ * buffer - Address of character buffer to receive input characters.
+ * bufsize - size, in bytes, of the character buffer
+ * Returns:
+ * Returns a pointer to the buffer containing the received
+ * character string. This string will be terminated by a
+ * newline character.
+ * Locking:
+ * No locks are required to be held upon entry to this
+ * function. It is not reentrant - it relies on the fact
+ * that while kdb is running on only one "master debug" cpu.
+ * Remarks:
+ *
+ * The buffer size must be >= 2. A buffer size of 2 means that the caller only
+ * wants a single key.
+ *
+ * An escape key could be the start of a vt100 control sequence such as \e[D
+ * (left arrow) or it could be a character in its own right. The standard
+ * method for detecting the difference is to wait for 2 seconds to see if there
+ * are any other characters. kdb is complicated by the lack of a timer service
+ * (interrupts are off), by multiple input sources and by the need to sometimes
+ * return after just one key. Escape sequence processing has to be done as
+ * states in the polling loop.
+ */
+
+static char *kdb_read(char *buffer, size_t bufsize)
+{
+ char *cp = buffer;
+ char *bufend = buffer+bufsize-2; /* Reserve space for newline
+ * and null byte */
+ char *lastchar;
+ char *p_tmp;
+ char tmp;
+ static char tmpbuffer[CMD_BUFLEN];
+ int len = strlen(buffer);
+ int len_tmp;
+ int tab = 0;
+ int count;
+ int i;
+ int diag, dtab_count;
+ int key;
+
+
+ diag = kdbgetintenv("DTABCOUNT", &dtab_count);
+ if (diag)
+ dtab_count = 30;
+
+ if (len > 0) {
+ cp += len;
+ if (*(buffer+len-1) == '\n')
+ cp--;
+ }
+
+ lastchar = cp;
+ *cp = '\0';
+ kdb_printf("%s", buffer);
+poll_again:
+ key = kdb_read_get_key(buffer, bufsize);
+ if (key == -1)
+ return buffer;
+ if (key != 9)
+ tab = 0;
+ switch (key) {
+ case 8: /* backspace */
+ if (cp > buffer) {
+ if (cp < lastchar) {
+ memcpy(tmpbuffer, cp, lastchar - cp);
+ memcpy(cp-1, tmpbuffer, lastchar - cp);
+ }
+ *(--lastchar) = '\0';
+ --cp;
+ kdb_printf("\b%s \r", cp);
+ tmp = *cp;
+ *cp = '\0';
+ kdb_printf(kdb_prompt_str);
+ kdb_printf("%s", buffer);
+ *cp = tmp;
+ }
+ break;
+ case 13: /* enter */
+ *lastchar++ = '\n';
+ *lastchar++ = '\0';
+ kdb_printf("\n");
+ return buffer;
+ case 4: /* Del */
+ if (cp < lastchar) {
+ memcpy(tmpbuffer, cp+1, lastchar - cp - 1);
+ memcpy(cp, tmpbuffer, lastchar - cp - 1);
+ *(--lastchar) = '\0';
+ kdb_printf("%s \r", cp);
+ tmp = *cp;
+ *cp = '\0';
+ kdb_printf(kdb_prompt_str);
+ kdb_printf("%s", buffer);
+ *cp = tmp;
+ }
+ break;
+ case 1: /* Home */
+ if (cp > buffer) {
+ kdb_printf("\r");
+ kdb_printf(kdb_prompt_str);
+ cp = buffer;
+ }
+ break;
+ case 5: /* End */
+ if (cp < lastchar) {
+ kdb_printf("%s", cp);
+ cp = lastchar;
+ }
+ break;
+ case 2: /* Left */
+ if (cp > buffer) {
+ kdb_printf("\b");
+ --cp;
+ }
+ break;
+ case 14: /* Down */
+ memset(tmpbuffer, ' ',
+ strlen(kdb_prompt_str) + (lastchar-buffer));
+ *(tmpbuffer+strlen(kdb_prompt_str) +
+ (lastchar-buffer)) = '\0';
+ kdb_printf("\r%s\r", tmpbuffer);
+ *lastchar = (char)key;
+ *(lastchar+1) = '\0';
+ return lastchar;
+ case 6: /* Right */
+ if (cp < lastchar) {
+ kdb_printf("%c", *cp);
+ ++cp;
+ }
+ break;
+ case 16: /* Up */
+ memset(tmpbuffer, ' ',
+ strlen(kdb_prompt_str) + (lastchar-buffer));
+ *(tmpbuffer+strlen(kdb_prompt_str) +
+ (lastchar-buffer)) = '\0';
+ kdb_printf("\r%s\r", tmpbuffer);
+ *lastchar = (char)key;
+ *(lastchar+1) = '\0';
+ return lastchar;
+ case 9: /* Tab */
+ if (tab < 2)
+ ++tab;
+ p_tmp = buffer;
+ while (*p_tmp == ' ')
+ p_tmp++;
+ if (p_tmp > cp)
+ break;
+ memcpy(tmpbuffer, p_tmp, cp-p_tmp);
+ *(tmpbuffer + (cp-p_tmp)) = '\0';
+ p_tmp = strrchr(tmpbuffer, ' ');
+ if (p_tmp)
+ ++p_tmp;
+ else
+ p_tmp = tmpbuffer;
+ len = strlen(p_tmp);
+ count = kallsyms_symbol_complete(p_tmp,
+ sizeof(tmpbuffer) -
+ (p_tmp - tmpbuffer));
+ if (tab == 2 && count > 0) {
+ kdb_printf("\n%d symbols are found.", count);
+ if (count > dtab_count) {
+ count = dtab_count;
+ kdb_printf(" But only first %d symbols will"
+ " be printed.\nYou can change the"
+ " environment variable DTABCOUNT.",
+ count);
+ }
+ kdb_printf("\n");
+ for (i = 0; i < count; i++) {
+ if (kallsyms_symbol_next(p_tmp, i) < 0)
+ break;
+ kdb_printf("%s ", p_tmp);
+ *(p_tmp + len) = '\0';
+ }
+ if (i >= dtab_count)
+ kdb_printf("...");
+ kdb_printf("\n");
+ kdb_printf(kdb_prompt_str);
+ kdb_printf("%s", buffer);
+ } else if (tab != 2 && count > 0) {
+ len_tmp = strlen(p_tmp);
+ strncpy(p_tmp+len_tmp, cp, lastchar-cp+1);
+ len_tmp = strlen(p_tmp);
+ strncpy(cp, p_tmp+len, len_tmp-len + 1);
+ len = len_tmp - len;
+ kdb_printf("%s", cp);
+ cp += len;
+ lastchar += len;
+ }
+ kdb_nextline = 1; /* reset output line number */
+ break;
+ default:
+ if (key >= 32 && lastchar < bufend) {
+ if (cp < lastchar) {
+ memcpy(tmpbuffer, cp, lastchar - cp);
+ memcpy(cp+1, tmpbuffer, lastchar - cp);
+ *++lastchar = '\0';
+ *cp = key;
+ kdb_printf("%s\r", cp);
+ ++cp;
+ tmp = *cp;
+ *cp = '\0';
+ kdb_printf(kdb_prompt_str);
+ kdb_printf("%s", buffer);
+ *cp = tmp;
+ } else {
+ *++lastchar = '\0';
+ *cp++ = key;
+ /* The kgdb transition check will hide
+ * printed characters if we think that
+ * kgdb is connecting, until the check
+ * fails */
+ if (!KDB_STATE(KGDB_TRANS))
+ kgdb_transition_check(buffer);
+ else
+ kdb_printf("%c", key);
+ }
+ /* Special escape to kgdb */
+ if (lastchar - buffer >= 5 &&
+ strcmp(lastchar - 5, "$?#3f") == 0) {
+ strcpy(buffer, "kgdb");
+ KDB_STATE_SET(DOING_KGDB);
+ return buffer;
+ }
+ if (lastchar - buffer >= 14 &&
+ strcmp(lastchar - 14, "$qSupported#37") == 0) {
+ strcpy(buffer, "kgdb");
+ KDB_STATE_SET(DOING_KGDB2);
+ return buffer;
+ }
+ }
+ break;
+ }
+ goto poll_again;
+}
+
+/*
+ * kdb_getstr
+ *
+ * Print the prompt string and read a command from the
+ * input device.
+ *
+ * Parameters:
+ * buffer Address of buffer to receive command
+ * bufsize Size of buffer in bytes
+ * prompt Pointer to string to use as prompt string
+ * Returns:
+ * Pointer to command buffer.
+ * Locking:
+ * None.
+ * Remarks:
+ * For SMP kernels, the processor number will be
+ * substituted for %d, %x or %o in the prompt.
+ */
+
+char *kdb_getstr(char *buffer, size_t bufsize, char *prompt)
+{
+ if (prompt && kdb_prompt_str != prompt)
+ strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
+ kdb_printf(kdb_prompt_str);
+ kdb_nextline = 1; /* Prompt and input resets line number */
+ return kdb_read(buffer, bufsize);
+}
+
+/*
+ * kdb_input_flush
+ *
+ * Get rid of any buffered console input.
+ *
+ * Parameters:
+ * none
+ * Returns:
+ * nothing
+ * Locking:
+ * none
+ * Remarks:
+ * Call this function whenever you want to flush input. If there is any
+ * outstanding input, it ignores all characters until there has been no
+ * data for approximately half a second.
+ */
+
+#define FLUSH_UDELAY 100
+#define FLUSH_DELAY (500000/FLUSH_UDELAY) /* 0.5 seconds worth of udelays */
+
+static void kdb_input_flush(void)
+{
+ get_char_func *f;
+ int flush_delay = 1;
+ while (flush_delay--) {
+ touch_nmi_watchdog();
+ for (f = &kdb_poll_funcs[0]; *f; ++f) {
+ if ((*f)() != -1) {
+ flush_delay = FLUSH_DELAY;
+ break;
+ }
+ }
+ if (flush_delay)
+ udelay(FLUSH_UDELAY);
+ }
+}
+
+/*
+ * kdb_printf
+ *
+ * Print a string to the output device(s).
+ *
+ * Parameters:
+ * printf-like format and optional args.
+ * Returns:
+ * 0
+ * Locking:
+ * None.
+ * Remarks:
+ * use 'kdbcons->write()' to avoid polluting 'log_buf' with
+ * kdb output.
+ *
+ * If the user is doing a cmd args | grep srch
+ * then kdb_grepping_flag is set.
+ * In that case we need to accumulate full lines (ending in \n) before
+ * searching for the pattern.
+ */
+
+static char kdb_buffer[256]; /* A bit too big to go on stack */
+static char *next_avail = kdb_buffer;
+static int size_avail;
+static int suspend_grep;
+
+/*
+ * search arg1 to see if it contains arg2
+ * (kdmain.c provides flags for ^pat and pat$)
+ *
+ * return 1 for found, 0 for not found
+ */
+static int kdb_search_string(char *searched, char *searchfor)
+{
+ char firstchar, *cp;
+ int len1, len2;
+
+ /* not counting the newline at the end of "searched" */
+ len1 = strlen(searched)-1;
+ len2 = strlen(searchfor);
+ if (len1 < len2)
+ return 0;
+ if (kdb_grep_leading && kdb_grep_trailing && len1 != len2)
+ return 0;
+ if (kdb_grep_leading) {
+ if (!strncmp(searched, searchfor, len2))
+ return 1;
+ } else if (kdb_grep_trailing) {
+ if (!strncmp(searched+len1-len2, searchfor, len2))
+ return 1;
+ } else {
+ firstchar = *searchfor;
+ cp = searched;
+ while ((cp = strchr(cp, firstchar))) {
+ if (!strncmp(cp, searchfor, len2))
+ return 1;
+ cp++;
+ }
+ }
+ return 0;
+}
+
+int vkdb_printf(const char *fmt, va_list ap)
+{
+ int diag;
+ int linecount;
+ int logging, saved_loglevel = 0;
+ int saved_trap_printk;
+ int got_printf_lock = 0;
+ int retlen = 0;
+ int fnd, len;
+ char *cp, *cp2, *cphold = NULL, replaced_byte = ' ';
+ char *moreprompt = "more> ";
+ struct console *c = console_drivers;
+ static DEFINE_SPINLOCK(kdb_printf_lock);
+ unsigned long uninitialized_var(flags);
+
+ preempt_disable();
+ saved_trap_printk = kdb_trap_printk;
+ kdb_trap_printk = 0;
+
+ /* Serialize kdb_printf if multiple cpus try to write at once.
+ * But if any cpu goes recursive in kdb, just print the output,
+ * even if it is interleaved with any other text.
+ */
+ if (!KDB_STATE(PRINTF_LOCK)) {
+ KDB_STATE_SET(PRINTF_LOCK);
+ spin_lock_irqsave(&kdb_printf_lock, flags);
+ got_printf_lock = 1;
+ atomic_inc(&kdb_event);
+ } else {
+ __acquire(kdb_printf_lock);
+ }
+
+ diag = kdbgetintenv("LINES", &linecount);
+ if (diag || linecount <= 1)
+ linecount = 24;
+
+ diag = kdbgetintenv("LOGGING", &logging);
+ if (diag)
+ logging = 0;
+
+ if (!kdb_grepping_flag || suspend_grep) {
+ /* normally, every vsnprintf starts a new buffer */
+ next_avail = kdb_buffer;
+ size_avail = sizeof(kdb_buffer);
+ }
+ vsnprintf(next_avail, size_avail, fmt, ap);
+
+ /*
+ * If kdb_parse() found that the command was cmd xxx | grep yyy
+ * then kdb_grepping_flag is set, and kdb_grep_string contains yyy
+ *
+ * Accumulate the print data up to a newline before searching it.
+ * (vsnprintf does null-terminate the string that it generates)
+ */
+
+ /* skip the search if prints are temporarily unconditional */
+ if (!suspend_grep && kdb_grepping_flag) {
+ cp = strchr(kdb_buffer, '\n');
+ if (!cp) {
+ /*
+ * Special cases that don't end with newlines
+ * but should be written without one:
+ * The "[nn]kdb> " prompt should
+ * appear at the front of the buffer.
+ *
+ * The "[nn]more " prompt should also be
+ * (MOREPROMPT -> moreprompt)
+ * written * but we print that ourselves,
+ * we set the suspend_grep flag to make
+ * it unconditional.
+ *
+ */
+ if (next_avail == kdb_buffer) {
+ /*
+ * these should occur after a newline,
+ * so they will be at the front of the
+ * buffer
+ */
+ cp2 = kdb_buffer;
+ len = strlen(kdb_prompt_str);
+ if (!strncmp(cp2, kdb_prompt_str, len)) {
+ /*
+ * We're about to start a new
+ * command, so we can go back
+ * to normal mode.
+ */
+ kdb_grepping_flag = 0;
+ goto kdb_printit;
+ }
+ }
+ /* no newline; don't search/write the buffer
+ until one is there */
+ len = strlen(kdb_buffer);
+ next_avail = kdb_buffer + len;
+ size_avail = sizeof(kdb_buffer) - len;
+ goto kdb_print_out;
+ }
+
+ /*
+ * The newline is present; print through it or discard
+ * it, depending on the results of the search.
+ */
+ cp++; /* to byte after the newline */
+ replaced_byte = *cp; /* remember what/where it was */
+ cphold = cp;
+ *cp = '\0'; /* end the string for our search */
+
+ /*
+ * We now have a newline at the end of the string
+ * Only continue with this output if it contains the
+ * search string.
+ */
+ fnd = kdb_search_string(kdb_buffer, kdb_grep_string);
+ if (!fnd) {
+ /*
+ * At this point the complete line at the start
+ * of kdb_buffer can be discarded, as it does
+ * not contain what the user is looking for.
+ * Shift the buffer left.
+ */
+ *cphold = replaced_byte;
+ strcpy(kdb_buffer, cphold);
+ len = strlen(kdb_buffer);
+ next_avail = kdb_buffer + len;
+ size_avail = sizeof(kdb_buffer) - len;
+ goto kdb_print_out;
+ }
+ /*
+ * at this point the string is a full line and
+ * should be printed, up to the null.
+ */
+ }
+kdb_printit:
+
+ /*
+ * Write to all consoles.
+ */
+ retlen = strlen(kdb_buffer);
+ if (!dbg_kdb_mode && kgdb_connected) {
+ gdbstub_msg_write(kdb_buffer, retlen);
+ } else {
+ if (!dbg_io_ops->is_console) {
+ len = strlen(kdb_buffer);
+ cp = kdb_buffer;
+ while (len--) {
+ dbg_io_ops->write_char(*cp);
+ cp++;
+ }
+ }
+ while (c) {
+ c->write(c, kdb_buffer, retlen);
+ touch_nmi_watchdog();
+ c = c->next;
+ }
+ }
+ if (logging) {
+ saved_loglevel = console_loglevel;
+ console_loglevel = 0;
+ printk(KERN_INFO "%s", kdb_buffer);
+ }
+
+ if (KDB_STATE(PAGER) && strchr(kdb_buffer, '\n'))
+ kdb_nextline++;
+
+ /* check for having reached the LINES number of printed lines */
+ if (kdb_nextline == linecount) {
+ char buf1[16] = "";
+#if defined(CONFIG_SMP)
+ char buf2[32];
+#endif
+
+ /* Watch out for recursion here. Any routine that calls
+ * kdb_printf will come back through here. And kdb_read
+ * uses kdb_printf to echo on serial consoles ...
+ */
+ kdb_nextline = 1; /* In case of recursion */
+
+ /*
+ * Pause until cr.
+ */
+ moreprompt = kdbgetenv("MOREPROMPT");
+ if (moreprompt == NULL)
+ moreprompt = "more> ";
+
+#if defined(CONFIG_SMP)
+ if (strchr(moreprompt, '%')) {
+ sprintf(buf2, moreprompt, get_cpu());
+ put_cpu();
+ moreprompt = buf2;
+ }
+#endif
+
+ kdb_input_flush();
+ c = console_drivers;
+
+ if (!dbg_io_ops->is_console) {
+ len = strlen(moreprompt);
+ cp = moreprompt;
+ while (len--) {
+ dbg_io_ops->write_char(*cp);
+ cp++;
+ }
+ }
+ while (c) {
+ c->write(c, moreprompt, strlen(moreprompt));
+ touch_nmi_watchdog();
+ c = c->next;
+ }
+
+ if (logging)
+ printk("%s", moreprompt);
+
+ kdb_read(buf1, 2); /* '2' indicates to return
+ * immediately after getting one key. */
+ kdb_nextline = 1; /* Really set output line 1 */
+
+ /* empty and reset the buffer: */
+ kdb_buffer[0] = '\0';
+ next_avail = kdb_buffer;
+ size_avail = sizeof(kdb_buffer);
+ if ((buf1[0] == 'q') || (buf1[0] == 'Q')) {
+ /* user hit q or Q */
+ KDB_FLAG_SET(CMD_INTERRUPT); /* command interrupted */
+ KDB_STATE_CLEAR(PAGER);
+ /* end of command output; back to normal mode */
+ kdb_grepping_flag = 0;
+ kdb_printf("\n");
+ } else if (buf1[0] == ' ') {
+ kdb_printf("\n");
+ suspend_grep = 1; /* for this recursion */
+ } else if (buf1[0] == '\n') {
+ kdb_nextline = linecount - 1;
+ kdb_printf("\r");
+ suspend_grep = 1; /* for this recursion */
+ } else if (buf1[0] && buf1[0] != '\n') {
+ /* user hit something other than enter */
+ suspend_grep = 1; /* for this recursion */
+ kdb_printf("\nOnly 'q' or 'Q' are processed at more "
+ "prompt, input ignored\n");
+ } else if (kdb_grepping_flag) {
+ /* user hit enter */
+ suspend_grep = 1; /* for this recursion */
+ kdb_printf("\n");
+ }
+ kdb_input_flush();
+ }
+
+ /*
+ * For grep searches, shift the printed string left.
+ * replaced_byte contains the character that was overwritten with
+ * the terminating null, and cphold points to the null.
+ * Then adjust the notion of available space in the buffer.
+ */
+ if (kdb_grepping_flag && !suspend_grep) {
+ *cphold = replaced_byte;
+ strcpy(kdb_buffer, cphold);
+ len = strlen(kdb_buffer);
+ next_avail = kdb_buffer + len;
+ size_avail = sizeof(kdb_buffer) - len;
+ }
+
+kdb_print_out:
+ suspend_grep = 0; /* end of what may have been a recursive call */
+ if (logging)
+ console_loglevel = saved_loglevel;
+ if (KDB_STATE(PRINTF_LOCK) && got_printf_lock) {
+ got_printf_lock = 0;
+ spin_unlock_irqrestore(&kdb_printf_lock, flags);
+ KDB_STATE_CLEAR(PRINTF_LOCK);
+ atomic_dec(&kdb_event);
+ } else {
+ __release(kdb_printf_lock);
+ }
+ kdb_trap_printk = saved_trap_printk;
+ preempt_enable();
+ return retlen;
+}
+
+int kdb_printf(const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = vkdb_printf(fmt, ap);
+ va_end(ap);
+
+ return r;
+}
+
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
new file mode 100644
index 000000000000..852e003923b0
--- /dev/null
+++ b/kernel/debug/kdb/kdb_main.c
@@ -0,0 +1,2857 @@
+/*
+ * Kernel Debugger Architecture Independent Main Code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2000 Stephane Eranian <eranian@hpl.hp.com>
+ * Xscale (R) modifications copyright (C) 2003 Intel Corporation.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/sysrq.h>
+#include <linux/smp.h>
+#include <linux/utsname.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/nmi.h>
+#include <linux/time.h>
+#include <linux/ptrace.h>
+#include <linux/sysctl.h>
+#include <linux/cpu.h>
+#include <linux/kdebug.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <linux/swap.h>
+#include "kdb_private.h"
+
+#define GREP_LEN 256
+char kdb_grep_string[GREP_LEN];
+int kdb_grepping_flag;
+EXPORT_SYMBOL(kdb_grepping_flag);
+int kdb_grep_leading;
+int kdb_grep_trailing;
+
+/*
+ * Kernel debugger state flags
+ */
+int kdb_flags;
+atomic_t kdb_event;
+
+/*
+ * kdb_lock protects updates to kdb_initial_cpu. Used to
+ * single thread processors through the kernel debugger.
+ */
+int kdb_initial_cpu = -1; /* cpu number that owns kdb */
+int kdb_seqno = 2; /* how many times kdb has been entered */
+
+int kdb_nextline = 1;
+int kdb_state[NR_CPUS]; /* Per cpu state */
+
+struct task_struct *kdb_current_task;
+EXPORT_SYMBOL(kdb_current_task);
+struct pt_regs *kdb_current_regs;
+
+const char *kdb_diemsg;
+static int kdb_go_count;
+#ifdef CONFIG_KDB_CONTINUE_CATASTROPHIC
+static unsigned int kdb_continue_catastrophic =
+ CONFIG_KDB_CONTINUE_CATASTROPHIC;
+#else
+static unsigned int kdb_continue_catastrophic;
+#endif
+
+ /*
+ * kdb_commands describes the available commands.
+ */
+static kdbtab_t *kdb_commands;
+static int kdb_max_commands;
+
+typedef struct _kdbmsg {
+ int km_diag; /* kdb diagnostic */
+ char *km_msg; /* Corresponding message text */
+} kdbmsg_t;
+
+#define KDBMSG(msgnum, text) \
+ { KDB_##msgnum, text }
+
+static kdbmsg_t kdbmsgs[] = {
+ KDBMSG(NOTFOUND, "Command Not Found"),
+ KDBMSG(ARGCOUNT, "Improper argument count, see usage."),
+ KDBMSG(BADWIDTH, "Illegal value for BYTESPERWORD use 1, 2, 4 or 8, "
+ "8 is only allowed on 64 bit systems"),
+ KDBMSG(BADRADIX, "Illegal value for RADIX use 8, 10 or 16"),
+ KDBMSG(NOTENV, "Cannot find environment variable"),
+ KDBMSG(NOENVVALUE, "Environment variable should have value"),
+ KDBMSG(NOTIMP, "Command not implemented"),
+ KDBMSG(ENVFULL, "Environment full"),
+ KDBMSG(ENVBUFFULL, "Environment buffer full"),
+ KDBMSG(TOOMANYBPT, "Too many breakpoints defined"),
+#ifdef CONFIG_CPU_XSCALE
+ KDBMSG(TOOMANYDBREGS, "More breakpoints than ibcr registers defined"),
+#else
+ KDBMSG(TOOMANYDBREGS, "More breakpoints than db registers defined"),
+#endif
+ KDBMSG(DUPBPT, "Duplicate breakpoint address"),
+ KDBMSG(BPTNOTFOUND, "Breakpoint not found"),
+ KDBMSG(BADMODE, "Invalid IDMODE"),
+ KDBMSG(BADINT, "Illegal numeric value"),
+ KDBMSG(INVADDRFMT, "Invalid symbolic address format"),
+ KDBMSG(BADREG, "Invalid register name"),
+ KDBMSG(BADCPUNUM, "Invalid cpu number"),
+ KDBMSG(BADLENGTH, "Invalid length field"),
+ KDBMSG(NOBP, "No Breakpoint exists"),
+ KDBMSG(BADADDR, "Invalid address"),
+};
+#undef KDBMSG
+
+static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
+
+
+/*
+ * Initial environment. This is all kept static and local to
+ * this file. We don't want to rely on the memory allocation
+ * mechanisms in the kernel, so we use a very limited allocate-only
+ * heap for new and altered environment variables. The entire
+ * environment is limited to a fixed number of entries (add more
+ * to __env[] if required) and a fixed amount of heap (add more to
+ * KDB_ENVBUFSIZE if required).
+ */
+
+static char *__env[] = {
+#if defined(CONFIG_SMP)
+ "PROMPT=[%d]kdb> ",
+ "MOREPROMPT=[%d]more> ",
+#else
+ "PROMPT=kdb> ",
+ "MOREPROMPT=more> ",
+#endif
+ "RADIX=16",
+ "MDCOUNT=8", /* lines of md output */
+ "BTARGS=9", /* 9 possible args in bt */
+ KDB_PLATFORM_ENV,
+ "DTABCOUNT=30",
+ "NOSECT=1",
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+ (char *)0,
+};
+
+static const int __nenv = (sizeof(__env) / sizeof(char *));
+
+struct task_struct *kdb_curr_task(int cpu)
+{
+ struct task_struct *p = curr_task(cpu);
+#ifdef _TIF_MCA_INIT
+ struct kdb_running_process *krp = kdb_running_process + cpu;
+ if ((task_thread_info(p)->flags & _TIF_MCA_INIT) && krp->p)
+ p = krp->p;
+#endif
+ return p;
+}
+
+/*
+ * kdbgetenv - This function will return the character string value of
+ * an environment variable.
+ * Parameters:
+ * match A character string representing an environment variable.
+ * Returns:
+ * NULL No environment variable matches 'match'
+ * char* Pointer to string value of environment variable.
+ */
+char *kdbgetenv(const char *match)
+{
+ char **ep = __env;
+ int matchlen = strlen(match);
+ int i;
+
+ for (i = 0; i < __nenv; i++) {
+ char *e = *ep++;
+
+ if (!e)
+ continue;
+
+ if ((strncmp(match, e, matchlen) == 0)
+ && ((e[matchlen] == '\0')
+ || (e[matchlen] == '='))) {
+ char *cp = strchr(e, '=');
+ return cp ? ++cp : "";
+ }
+ }
+ return NULL;
+}
+
+/*
+ * kdballocenv - This function is used to allocate bytes for
+ * environment entries.
+ * Parameters:
+ * match A character string representing a numeric value
+ * Outputs:
+ * *value the unsigned long representation of the env variable 'match'
+ * Returns:
+ * Zero on success, a kdb diagnostic on failure.
+ * Remarks:
+ * We use a static environment buffer (envbuffer) to hold the values
+ * of dynamically generated environment variables (see kdb_set). Buffer
+ * space once allocated is never free'd, so over time, the amount of space
+ * (currently 512 bytes) will be exhausted if env variables are changed
+ * frequently.
+ */
+static char *kdballocenv(size_t bytes)
+{
+#define KDB_ENVBUFSIZE 512
+ static char envbuffer[KDB_ENVBUFSIZE];
+ static int envbufsize;
+ char *ep = NULL;
+
+ if ((KDB_ENVBUFSIZE - envbufsize) >= bytes) {
+ ep = &envbuffer[envbufsize];
+ envbufsize += bytes;
+ }
+ return ep;
+}
+
+/*
+ * kdbgetulenv - This function will return the value of an unsigned
+ * long-valued environment variable.
+ * Parameters:
+ * match A character string representing a numeric value
+ * Outputs:
+ * *value the unsigned long represntation of the env variable 'match'
+ * Returns:
+ * Zero on success, a kdb diagnostic on failure.
+ */
+static int kdbgetulenv(const char *match, unsigned long *value)
+{
+ char *ep;
+
+ ep = kdbgetenv(match);
+ if (!ep)
+ return KDB_NOTENV;
+ if (strlen(ep) == 0)
+ return KDB_NOENVVALUE;
+
+ *value = simple_strtoul(ep, NULL, 0);
+
+ return 0;
+}
+
+/*
+ * kdbgetintenv - This function will return the value of an
+ * integer-valued environment variable.
+ * Parameters:
+ * match A character string representing an integer-valued env variable
+ * Outputs:
+ * *value the integer representation of the environment variable 'match'
+ * Returns:
+ * Zero on success, a kdb diagnostic on failure.
+ */
+int kdbgetintenv(const char *match, int *value)
+{
+ unsigned long val;
+ int diag;
+
+ diag = kdbgetulenv(match, &val);
+ if (!diag)
+ *value = (int) val;
+ return diag;
+}
+
+/*
+ * kdbgetularg - This function will convert a numeric string into an
+ * unsigned long value.
+ * Parameters:
+ * arg A character string representing a numeric value
+ * Outputs:
+ * *value the unsigned long represntation of arg.
+ * Returns:
+ * Zero on success, a kdb diagnostic on failure.
+ */
+int kdbgetularg(const char *arg, unsigned long *value)
+{
+ char *endp;
+ unsigned long val;
+
+ val = simple_strtoul(arg, &endp, 0);
+
+ if (endp == arg) {
+ /*
+ * Try base 16, for us folks too lazy to type the
+ * leading 0x...
+ */
+ val = simple_strtoul(arg, &endp, 16);
+ if (endp == arg)
+ return KDB_BADINT;
+ }
+
+ *value = val;
+
+ return 0;
+}
+
+/*
+ * kdb_set - This function implements the 'set' command. Alter an
+ * existing environment variable or create a new one.
+ */
+int kdb_set(int argc, const char **argv)
+{
+ int i;
+ char *ep;
+ size_t varlen, vallen;
+
+ /*
+ * we can be invoked two ways:
+ * set var=value argv[1]="var", argv[2]="value"
+ * set var = value argv[1]="var", argv[2]="=", argv[3]="value"
+ * - if the latter, shift 'em down.
+ */
+ if (argc == 3) {
+ argv[2] = argv[3];
+ argc--;
+ }
+
+ if (argc != 2)
+ return KDB_ARGCOUNT;
+
+ /*
+ * Check for internal variables
+ */
+ if (strcmp(argv[1], "KDBDEBUG") == 0) {
+ unsigned int debugflags;
+ char *cp;
+
+ debugflags = simple_strtoul(argv[2], &cp, 0);
+ if (cp == argv[2] || debugflags & ~KDB_DEBUG_FLAG_MASK) {
+ kdb_printf("kdb: illegal debug flags '%s'\n",
+ argv[2]);
+ return 0;
+ }
+ kdb_flags = (kdb_flags &
+ ~(KDB_DEBUG_FLAG_MASK << KDB_DEBUG_FLAG_SHIFT))
+ | (debugflags << KDB_DEBUG_FLAG_SHIFT);
+
+ return 0;
+ }
+
+ /*
+ * Tokenizer squashed the '=' sign. argv[1] is variable
+ * name, argv[2] = value.
+ */
+ varlen = strlen(argv[1]);
+ vallen = strlen(argv[2]);
+ ep = kdballocenv(varlen + vallen + 2);
+ if (ep == (char *)0)
+ return KDB_ENVBUFFULL;
+
+ sprintf(ep, "%s=%s", argv[1], argv[2]);
+
+ ep[varlen+vallen+1] = '\0';
+
+ for (i = 0; i < __nenv; i++) {
+ if (__env[i]
+ && ((strncmp(__env[i], argv[1], varlen) == 0)
+ && ((__env[i][varlen] == '\0')
+ || (__env[i][varlen] == '=')))) {
+ __env[i] = ep;
+ return 0;
+ }
+ }
+
+ /*
+ * Wasn't existing variable. Fit into slot.
+ */
+ for (i = 0; i < __nenv-1; i++) {
+ if (__env[i] == (char *)0) {
+ __env[i] = ep;
+ return 0;
+ }
+ }
+
+ return KDB_ENVFULL;
+}
+
+static int kdb_check_regs(void)
+{
+ if (!kdb_current_regs) {
+ kdb_printf("No current kdb registers."
+ " You may need to select another task\n");
+ return KDB_BADREG;
+ }
+ return 0;
+}
+
+/*
+ * kdbgetaddrarg - This function is responsible for parsing an
+ * address-expression and returning the value of the expression,
+ * symbol name, and offset to the caller.
+ *
+ * The argument may consist of a numeric value (decimal or
+ * hexidecimal), a symbol name, a register name (preceeded by the
+ * percent sign), an environment variable with a numeric value
+ * (preceeded by a dollar sign) or a simple arithmetic expression
+ * consisting of a symbol name, +/-, and a numeric constant value
+ * (offset).
+ * Parameters:
+ * argc - count of arguments in argv
+ * argv - argument vector
+ * *nextarg - index to next unparsed argument in argv[]
+ * regs - Register state at time of KDB entry
+ * Outputs:
+ * *value - receives the value of the address-expression
+ * *offset - receives the offset specified, if any
+ * *name - receives the symbol name, if any
+ * *nextarg - index to next unparsed argument in argv[]
+ * Returns:
+ * zero is returned on success, a kdb diagnostic code is
+ * returned on error.
+ */
+int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
+ unsigned long *value, long *offset,
+ char **name)
+{
+ unsigned long addr;
+ unsigned long off = 0;
+ int positive;
+ int diag;
+ int found = 0;
+ char *symname;
+ char symbol = '\0';
+ char *cp;
+ kdb_symtab_t symtab;
+
+ /*
+ * Process arguments which follow the following syntax:
+ *
+ * symbol | numeric-address [+/- numeric-offset]
+ * %register
+ * $environment-variable
+ */
+
+ if (*nextarg > argc)
+ return KDB_ARGCOUNT;
+
+ symname = (char *)argv[*nextarg];
+
+ /*
+ * If there is no whitespace between the symbol
+ * or address and the '+' or '-' symbols, we
+ * remember the character and replace it with a
+ * null so the symbol/value can be properly parsed
+ */
+ cp = strpbrk(symname, "+-");
+ if (cp != NULL) {
+ symbol = *cp;
+ *cp++ = '\0';
+ }
+
+ if (symname[0] == '$') {
+ diag = kdbgetulenv(&symname[1], &addr);
+ if (diag)
+ return diag;
+ } else if (symname[0] == '%') {
+ diag = kdb_check_regs();
+ if (diag)
+ return diag;
+ /* Implement register values with % at a later time as it is
+ * arch optional.
+ */
+ return KDB_NOTIMP;
+ } else {
+ found = kdbgetsymval(symname, &symtab);
+ if (found) {
+ addr = symtab.sym_start;
+ } else {
+ diag = kdbgetularg(argv[*nextarg], &addr);
+ if (diag)
+ return diag;
+ }
+ }
+
+ if (!found)
+ found = kdbnearsym(addr, &symtab);
+
+ (*nextarg)++;
+
+ if (name)
+ *name = symname;
+ if (value)
+ *value = addr;
+ if (offset && name && *name)
+ *offset = addr - symtab.sym_start;
+
+ if ((*nextarg > argc)
+ && (symbol == '\0'))
+ return 0;
+
+ /*
+ * check for +/- and offset
+ */
+
+ if (symbol == '\0') {
+ if ((argv[*nextarg][0] != '+')
+ && (argv[*nextarg][0] != '-')) {
+ /*
+ * Not our argument. Return.
+ */
+ return 0;
+ } else {
+ positive = (argv[*nextarg][0] == '+');
+ (*nextarg)++;
+ }
+ } else
+ positive = (symbol == '+');
+
+ /*
+ * Now there must be an offset!
+ */
+ if ((*nextarg > argc)
+ && (symbol == '\0')) {
+ return KDB_INVADDRFMT;
+ }
+
+ if (!symbol) {
+ cp = (char *)argv[*nextarg];
+ (*nextarg)++;
+ }
+
+ diag = kdbgetularg(cp, &off);
+ if (diag)
+ return diag;
+
+ if (!positive)
+ off = -off;
+
+ if (offset)
+ *offset += off;
+
+ if (value)
+ *value += off;
+
+ return 0;
+}
+
+static void kdb_cmderror(int diag)
+{
+ int i;
+
+ if (diag >= 0) {
+ kdb_printf("no error detected (diagnostic is %d)\n", diag);
+ return;
+ }
+
+ for (i = 0; i < __nkdb_err; i++) {
+ if (kdbmsgs[i].km_diag == diag) {
+ kdb_printf("diag: %d: %s\n", diag, kdbmsgs[i].km_msg);
+ return;
+ }
+ }
+
+ kdb_printf("Unknown diag %d\n", -diag);
+}
+
+/*
+ * kdb_defcmd, kdb_defcmd2 - This function implements the 'defcmd'
+ * command which defines one command as a set of other commands,
+ * terminated by endefcmd. kdb_defcmd processes the initial
+ * 'defcmd' command, kdb_defcmd2 is invoked from kdb_parse for
+ * the following commands until 'endefcmd'.
+ * Inputs:
+ * argc argument count
+ * argv argument vector
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ */
+struct defcmd_set {
+ int count;
+ int usable;
+ char *name;
+ char *usage;
+ char *help;
+ char **command;
+};
+static struct defcmd_set *defcmd_set;
+static int defcmd_set_count;
+static int defcmd_in_progress;
+
+/* Forward references */
+static int kdb_exec_defcmd(int argc, const char **argv);
+
+static int kdb_defcmd2(const char *cmdstr, const char *argv0)
+{
+ struct defcmd_set *s = defcmd_set + defcmd_set_count - 1;
+ char **save_command = s->command;
+ if (strcmp(argv0, "endefcmd") == 0) {
+ defcmd_in_progress = 0;
+ if (!s->count)
+ s->usable = 0;
+ if (s->usable)
+ kdb_register(s->name, kdb_exec_defcmd,
+ s->usage, s->help, 0);
+ return 0;
+ }
+ if (!s->usable)
+ return KDB_NOTIMP;
+ s->command = kmalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB);
+ if (!s->command) {
+ kdb_printf("Could not allocate new kdb_defcmd table for %s\n",
+ cmdstr);
+ s->usable = 0;
+ return KDB_NOTIMP;
+ }
+ memcpy(s->command, save_command, s->count * sizeof(*(s->command)));
+ s->command[s->count++] = kdb_strdup(cmdstr, GFP_KDB);
+ kfree(save_command);
+ return 0;
+}
+
+static int kdb_defcmd(int argc, const char **argv)
+{
+ struct defcmd_set *save_defcmd_set = defcmd_set, *s;
+ if (defcmd_in_progress) {
+ kdb_printf("kdb: nested defcmd detected, assuming missing "
+ "endefcmd\n");
+ kdb_defcmd2("endefcmd", "endefcmd");
+ }
+ if (argc == 0) {
+ int i;
+ for (s = defcmd_set; s < defcmd_set + defcmd_set_count; ++s) {
+ kdb_printf("defcmd %s \"%s\" \"%s\"\n", s->name,
+ s->usage, s->help);
+ for (i = 0; i < s->count; ++i)
+ kdb_printf("%s", s->command[i]);
+ kdb_printf("endefcmd\n");
+ }
+ return 0;
+ }
+ if (argc != 3)
+ return KDB_ARGCOUNT;
+ defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
+ GFP_KDB);
+ if (!defcmd_set) {
+ kdb_printf("Could not allocate new defcmd_set entry for %s\n",
+ argv[1]);
+ defcmd_set = save_defcmd_set;
+ return KDB_NOTIMP;
+ }
+ memcpy(defcmd_set, save_defcmd_set,
+ defcmd_set_count * sizeof(*defcmd_set));
+ kfree(save_defcmd_set);
+ s = defcmd_set + defcmd_set_count;
+ memset(s, 0, sizeof(*s));
+ s->usable = 1;
+ s->name = kdb_strdup(argv[1], GFP_KDB);
+ s->usage = kdb_strdup(argv[2], GFP_KDB);
+ s->help = kdb_strdup(argv[3], GFP_KDB);
+ if (s->usage[0] == '"') {
+ strcpy(s->usage, s->usage+1);
+ s->usage[strlen(s->usage)-1] = '\0';
+ }
+ if (s->help[0] == '"') {
+ strcpy(s->help, s->help+1);
+ s->help[strlen(s->help)-1] = '\0';
+ }
+ ++defcmd_set_count;
+ defcmd_in_progress = 1;
+ return 0;
+}
+
+/*
+ * kdb_exec_defcmd - Execute the set of commands associated with this
+ * defcmd name.
+ * Inputs:
+ * argc argument count
+ * argv argument vector
+ * Returns:
+ * zero for success, a kdb diagnostic if error
+ */
+static int kdb_exec_defcmd(int argc, const char **argv)
+{
+ int i, ret;
+ struct defcmd_set *s;
+ if (argc != 0)
+ return KDB_ARGCOUNT;
+ for (s = defcmd_set, i = 0; i < defcmd_set_count; ++i, ++s) {
+ if (strcmp(s->name, argv[0]) == 0)
+ break;
+ }
+ if (i == defcmd_set_count) {
+ kdb_printf("kdb_exec_defcmd: could not find commands for %s\n",
+ argv[0]);
+ return KDB_NOTIMP;
+ }
+ for (i = 0; i < s->count; ++i) {
+ /* Recursive use of kdb_parse, do not use argv after
+ * this point */
+ argv = NULL;
+ kdb_printf("[%s]kdb> %s\n", s->name, s->command[i]);
+ ret = kdb_parse(s->command[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/* Command history */
+#define KDB_CMD_HISTORY_COUNT 32
+#define CMD_BUFLEN 200 /* kdb_printf: max printline
+ * size == 256 */
+static unsigned int cmd_head, cmd_tail;
+static unsigned int cmdptr;
+static char cmd_hist[KDB_CMD_HISTORY_COUNT][CMD_BUFLEN];
+static char cmd_cur[CMD_BUFLEN];
+
+/*
+ * The "str" argument may point to something like | grep xyz
+ */
+static void parse_grep(const char *str)
+{
+ int len;
+ char *cp = (char *)str, *cp2;
+
+ /* sanity check: we should have been called with the \ first */
+ if (*cp != '|')
+ return;
+ cp++;
+ while (isspace(*cp))
+ cp++;
+ if (strncmp(cp, "grep ", 5)) {
+ kdb_printf("invalid 'pipe', see grephelp\n");
+ return;
+ }
+ cp += 5;
+ while (isspace(*cp))
+ cp++;
+ cp2 = strchr(cp, '\n');
+ if (cp2)
+ *cp2 = '\0'; /* remove the trailing newline */
+ len = strlen(cp);
+ if (len == 0) {
+ kdb_printf("invalid 'pipe', see grephelp\n");
+ return;
+ }
+ /* now cp points to a nonzero length search string */
+ if (*cp == '"') {
+ /* allow it be "x y z" by removing the "'s - there must
+ be two of them */
+ cp++;
+ cp2 = strchr(cp, '"');
+ if (!cp2) {
+ kdb_printf("invalid quoted string, see grephelp\n");
+ return;
+ }
+ *cp2 = '\0'; /* end the string where the 2nd " was */
+ }
+ kdb_grep_leading = 0;
+ if (*cp == '^') {
+ kdb_grep_leading = 1;
+ cp++;
+ }
+ len = strlen(cp);
+ kdb_grep_trailing = 0;
+ if (*(cp+len-1) == '$') {
+ kdb_grep_trailing = 1;
+ *(cp+len-1) = '\0';
+ }
+ len = strlen(cp);
+ if (!len)
+ return;
+ if (len >= GREP_LEN) {
+ kdb_printf("search string too long\n");
+ return;
+ }
+ strcpy(kdb_grep_string, cp);
+ kdb_grepping_flag++;
+ return;
+}
+
+/*
+ * kdb_parse - Parse the command line, search the command table for a
+ * matching command and invoke the command function. This
+ * function may be called recursively, if it is, the second call
+ * will overwrite argv and cbuf. It is the caller's
+ * responsibility to save their argv if they recursively call
+ * kdb_parse().
+ * Parameters:
+ * cmdstr The input command line to be parsed.
+ * regs The registers at the time kdb was entered.
+ * Returns:
+ * Zero for success, a kdb diagnostic if failure.
+ * Remarks:
+ * Limited to 20 tokens.
+ *
+ * Real rudimentary tokenization. Basically only whitespace
+ * is considered a token delimeter (but special consideration
+ * is taken of the '=' sign as used by the 'set' command).
+ *
+ * The algorithm used to tokenize the input string relies on
+ * there being at least one whitespace (or otherwise useless)
+ * character between tokens as the character immediately following
+ * the token is altered in-place to a null-byte to terminate the
+ * token string.
+ */
+
+#define MAXARGC 20
+
+int kdb_parse(const char *cmdstr)
+{
+ static char *argv[MAXARGC];
+ static int argc;
+ static char cbuf[CMD_BUFLEN+2];
+ char *cp;
+ char *cpp, quoted;
+ kdbtab_t *tp;
+ int i, escaped, ignore_errors = 0, check_grep;
+
+ /*
+ * First tokenize the command string.
+ */
+ cp = (char *)cmdstr;
+ kdb_grepping_flag = check_grep = 0;
+
+ if (KDB_FLAG(CMD_INTERRUPT)) {
+ /* Previous command was interrupted, newline must not
+ * repeat the command */
+ KDB_FLAG_CLEAR(CMD_INTERRUPT);
+ KDB_STATE_SET(PAGER);
+ argc = 0; /* no repeat */
+ }
+
+ if (*cp != '\n' && *cp != '\0') {
+ argc = 0;
+ cpp = cbuf;
+ while (*cp) {
+ /* skip whitespace */
+ while (isspace(*cp))
+ cp++;
+ if ((*cp == '\0') || (*cp == '\n') ||
+ (*cp == '#' && !defcmd_in_progress))
+ break;
+ /* special case: check for | grep pattern */
+ if (*cp == '|') {
+ check_grep++;
+ break;
+ }
+ if (cpp >= cbuf + CMD_BUFLEN) {
+ kdb_printf("kdb_parse: command buffer "
+ "overflow, command ignored\n%s\n",
+ cmdstr);
+ return KDB_NOTFOUND;
+ }
+ if (argc >= MAXARGC - 1) {
+ kdb_printf("kdb_parse: too many arguments, "
+ "command ignored\n%s\n", cmdstr);
+ return KDB_NOTFOUND;
+ }
+ argv[argc++] = cpp;
+ escaped = 0;
+ quoted = '\0';
+ /* Copy to next unquoted and unescaped
+ * whitespace or '=' */
+ while (*cp && *cp != '\n' &&
+ (escaped || quoted || !isspace(*cp))) {
+ if (cpp >= cbuf + CMD_BUFLEN)
+ break;
+ if (escaped) {
+ escaped = 0;
+ *cpp++ = *cp++;
+ continue;
+ }
+ if (*cp == '\\') {
+ escaped = 1;
+ ++cp;
+ continue;
+ }
+ if (*cp == quoted)
+ quoted = '\0';
+ else if (*cp == '\'' || *cp == '"')
+ quoted = *cp;
+ *cpp = *cp++;
+ if (*cpp == '=' && !quoted)
+ break;
+ ++cpp;
+ }
+ *cpp++ = '\0'; /* Squash a ws or '=' character */
+ }
+ }
+ if (!argc)
+ return 0;
+ if (check_grep)
+ parse_grep(cp);
+ if (defcmd_in_progress) {
+ int result = kdb_defcmd2(cmdstr, argv[0]);
+ if (!defcmd_in_progress) {
+ argc = 0; /* avoid repeat on endefcmd */
+ *(argv[0]) = '\0';
+ }
+ return result;
+ }
+ if (argv[0][0] == '-' && argv[0][1] &&
+ (argv[0][1] < '0' || argv[0][1] > '9')) {
+ ignore_errors = 1;
+ ++argv[0];
+ }
+
+ for (tp = kdb_commands, i = 0; i < kdb_max_commands; i++, tp++) {
+ if (tp->cmd_name) {
+ /*
+ * If this command is allowed to be abbreviated,
+ * check to see if this is it.
+ */
+
+ if (tp->cmd_minlen
+ && (strlen(argv[0]) <= tp->cmd_minlen)) {
+ if (strncmp(argv[0],
+ tp->cmd_name,
+ tp->cmd_minlen) == 0) {
+ break;
+ }
+ }
+
+ if (strcmp(argv[0], tp->cmd_name) == 0)
+ break;
+ }
+ }
+
+ /*
+ * If we don't find a command by this name, see if the first
+ * few characters of this match any of the known commands.
+ * e.g., md1c20 should match md.
+ */
+ if (i == kdb_max_commands) {
+ for (tp = kdb_commands, i = 0; i < kdb_max_commands;
+ i++, tp++) {
+ if (tp->cmd_name) {
+ if (strncmp(argv[0],
+ tp->cmd_name,
+ strlen(tp->cmd_name)) == 0) {
+ break;
+ }
+ }
+ }
+ }
+
+ if (i < kdb_max_commands) {
+ int result;
+ KDB_STATE_SET(CMD);
+ result = (*tp->cmd_func)(argc-1, (const char **)argv);
+ if (result && ignore_errors && result > KDB_CMD_GO)
+ result = 0;
+ KDB_STATE_CLEAR(CMD);
+ switch (tp->cmd_repeat) {
+ case KDB_REPEAT_NONE:
+ argc = 0;
+ if (argv[0])
+ *(argv[0]) = '\0';
+ break;
+ case KDB_REPEAT_NO_ARGS:
+ argc = 1;
+ if (argv[1])
+ *(argv[1]) = '\0';
+ break;
+ case KDB_REPEAT_WITH_ARGS:
+ break;
+ }
+ return result;
+ }
+
+ /*
+ * If the input with which we were presented does not
+ * map to an existing command, attempt to parse it as an
+ * address argument and display the result. Useful for
+ * obtaining the address of a variable, or the nearest symbol
+ * to an address contained in a register.
+ */
+ {
+ unsigned long value;
+ char *name = NULL;
+ long offset;
+ int nextarg = 0;
+
+ if (kdbgetaddrarg(0, (const char **)argv, &nextarg,
+ &value, &offset, &name)) {
+ return KDB_NOTFOUND;
+ }
+
+ kdb_printf("%s = ", argv[0]);
+ kdb_symbol_print(value, NULL, KDB_SP_DEFAULT);
+ kdb_printf("\n");
+ return 0;
+ }
+}
+
+
+static int handle_ctrl_cmd(char *cmd)
+{
+#define CTRL_P 16
+#define CTRL_N 14
+
+ /* initial situation */
+ if (cmd_head == cmd_tail)
+ return 0;
+ switch (*cmd) {
+ case CTRL_P:
+ if (cmdptr != cmd_tail)
+ cmdptr = (cmdptr-1) % KDB_CMD_HISTORY_COUNT;
+ strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+ return 1;
+ case CTRL_N:
+ if (cmdptr != cmd_head)
+ cmdptr = (cmdptr+1) % KDB_CMD_HISTORY_COUNT;
+ strncpy(cmd_cur, cmd_hist[cmdptr], CMD_BUFLEN);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * kdb_reboot - This function implements the 'reboot' command. Reboot
+ * the system immediately, or loop for ever on failure.
+ */
+static int kdb_reboot(int argc, const char **argv)
+{
+ emergency_restart();
+ kdb_printf("Hmm, kdb_reboot did not reboot, spinning here\n");
+ while (1)
+ cpu_relax();
+ /* NOTREACHED */
+ return 0;
+}
+
+static void kdb_dumpregs(struct pt_regs *regs)
+{
+ int old_lvl = console_loglevel;
+ console_loglevel = 15;
+ kdb_trap_printk++;
+ show_regs(regs);
+ kdb_trap_printk--;
+ kdb_printf("\n");
+ console_loglevel = old_lvl;
+}
+
+void kdb_set_current_task(struct task_struct *p)
+{
+ kdb_current_task = p;
+
+ if (kdb_task_has_cpu(p)) {
+ struct kdb_running_process *krp = kdb_running_process +
+ kdb_process_cpu(p);
+ kdb_current_regs = krp->regs;
+ return;
+ }
+ kdb_current_regs = NULL;
+}
+
+/*
+ * kdb_local - The main code for kdb. This routine is invoked on a
+ * specific processor, it is not global. The main kdb() routine
+ * ensures that only one processor at a time is in this routine.
+ * This code is called with the real reason code on the first
+ * entry to a kdb session, thereafter it is called with reason
+ * SWITCH, even if the user goes back to the original cpu.
+ * Inputs:
+ * reason The reason KDB was invoked
+ * error The hardware-defined error code
+ * regs The exception frame at time of fault/breakpoint.
+ * db_result Result code from the break or debug point.
+ * Returns:
+ * 0 KDB was invoked for an event which it wasn't responsible
+ * 1 KDB handled the event for which it was invoked.
+ * KDB_CMD_GO User typed 'go'.
+ * KDB_CMD_CPU User switched to another cpu.
+ * KDB_CMD_SS Single step.
+ * KDB_CMD_SSB Single step until branch.
+ */
+static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
+ kdb_dbtrap_t db_result)
+{
+ char *cmdbuf;
+ int diag;
+ struct task_struct *kdb_current =
+ kdb_curr_task(raw_smp_processor_id());
+
+ KDB_DEBUG_STATE("kdb_local 1", reason);
+ kdb_go_count = 0;
+ if (reason == KDB_REASON_DEBUG) {
+ /* special case below */
+ } else {
+ kdb_printf("\nEntering kdb (current=0x%p, pid %d) ",
+ kdb_current, kdb_current->pid);
+#if defined(CONFIG_SMP)
+ kdb_printf("on processor %d ", raw_smp_processor_id());
+#endif
+ }
+
+ switch (reason) {
+ case KDB_REASON_DEBUG:
+ {
+ /*
+ * If re-entering kdb after a single step
+ * command, don't print the message.
+ */
+ switch (db_result) {
+ case KDB_DB_BPT:
+ kdb_printf("\nEntering kdb (0x%p, pid %d) ",
+ kdb_current, kdb_current->pid);
+#if defined(CONFIG_SMP)
+ kdb_printf("on processor %d ", raw_smp_processor_id());
+#endif
+ kdb_printf("due to Debug @ " kdb_machreg_fmt "\n",
+ instruction_pointer(regs));
+ break;
+ case KDB_DB_SSB:
+ /*
+ * In the midst of ssb command. Just return.
+ */
+ KDB_DEBUG_STATE("kdb_local 3", reason);
+ return KDB_CMD_SSB; /* Continue with SSB command */
+
+ break;
+ case KDB_DB_SS:
+ break;
+ case KDB_DB_SSBPT:
+ KDB_DEBUG_STATE("kdb_local 4", reason);
+ return 1; /* kdba_db_trap did the work */
+ default:
+ kdb_printf("kdb: Bad result from kdba_db_trap: %d\n",
+ db_result);
+ break;
+ }
+
+ }
+ break;
+ case KDB_REASON_ENTER:
+ if (KDB_STATE(KEYBOARD))
+ kdb_printf("due to Keyboard Entry\n");
+ else
+ kdb_printf("due to KDB_ENTER()\n");
+ break;
+ case KDB_REASON_KEYBOARD:
+ KDB_STATE_SET(KEYBOARD);
+ kdb_printf("due to Keyboard Entry\n");
+ break;
+ case KDB_REASON_ENTER_SLAVE:
+ /* drop through, slaves only get released via cpu switch */
+ case KDB_REASON_SWITCH:
+ kdb_printf("due to cpu switch\n");
+ break;
+ case KDB_REASON_OOPS:
+ kdb_printf("Oops: %s\n", kdb_diemsg);
+ kdb_printf("due to oops @ " kdb_machreg_fmt "\n",
+ instruction_pointer(regs));
+ kdb_dumpregs(regs);
+ break;
+ case KDB_REASON_NMI:
+ kdb_printf("due to NonMaskable Interrupt @ "
+ kdb_machreg_fmt "\n",
+ instruction_pointer(regs));
+ kdb_dumpregs(regs);
+ break;
+ case KDB_REASON_SSTEP:
+ case KDB_REASON_BREAK:
+ kdb_printf("due to %s @ " kdb_machreg_fmt "\n",
+ reason == KDB_REASON_BREAK ?
+ "Breakpoint" : "SS trap", instruction_pointer(regs));
+ /*
+ * Determine if this breakpoint is one that we
+ * are interested in.
+ */
+ if (db_result != KDB_DB_BPT) {
+ kdb_printf("kdb: error return from kdba_bp_trap: %d\n",
+ db_result);
+ KDB_DEBUG_STATE("kdb_local 6", reason);
+ return 0; /* Not for us, dismiss it */
+ }
+ break;
+ case KDB_REASON_RECURSE:
+ kdb_printf("due to Recursion @ " kdb_machreg_fmt "\n",
+ instruction_pointer(regs));
+ break;
+ default:
+ kdb_printf("kdb: unexpected reason code: %d\n", reason);
+ KDB_DEBUG_STATE("kdb_local 8", reason);
+ return 0; /* Not for us, dismiss it */
+ }
+
+ while (1) {
+ /*
+ * Initialize pager context.
+ */
+ kdb_nextline = 1;
+ KDB_STATE_CLEAR(SUPPRESS);
+
+ cmdbuf = cmd_cur;
+ *cmdbuf = '\0';
+ *(cmd_hist[cmd_head]) = '\0';
+
+ if (KDB_FLAG(ONLY_DO_DUMP)) {
+ /* kdb is off but a catastrophic error requires a dump.
+ * Take the dump and reboot.
+ * Turn on logging so the kdb output appears in the log
+ * buffer in the dump.
+ */
+ const char *setargs[] = { "set", "LOGGING", "1" };
+ kdb_set(2, setargs);
+ kdb_reboot(0, NULL);
+ /*NOTREACHED*/
+ }
+
+do_full_getstr:
+#if defined(CONFIG_SMP)
+ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"),
+ raw_smp_processor_id());
+#else
+ snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"));
+#endif
+ if (defcmd_in_progress)
+ strncat(kdb_prompt_str, "[defcmd]", CMD_BUFLEN);
+
+ /*
+ * Fetch command from keyboard
+ */
+ cmdbuf = kdb_getstr(cmdbuf, CMD_BUFLEN, kdb_prompt_str);
+ if (*cmdbuf != '\n') {
+ if (*cmdbuf < 32) {
+ if (cmdptr == cmd_head) {
+ strncpy(cmd_hist[cmd_head], cmd_cur,
+ CMD_BUFLEN);
+ *(cmd_hist[cmd_head] +
+ strlen(cmd_hist[cmd_head])-1) = '\0';
+ }
+ if (!handle_ctrl_cmd(cmdbuf))
+ *(cmd_cur+strlen(cmd_cur)-1) = '\0';
+ cmdbuf = cmd_cur;
+ goto do_full_getstr;
+ } else {
+ strncpy(cmd_hist[cmd_head], cmd_cur,
+ CMD_BUFLEN);
+ }
+
+ cmd_head = (cmd_head+1) % KDB_CMD_HISTORY_COUNT;
+ if (cmd_head == cmd_tail)
+ cmd_tail = (cmd_tail+1) % KDB_CMD_HISTORY_COUNT;
+ }
+
+ cmdptr = cmd_head;
+ diag = kdb_parse(cmdbuf);
+ if (diag == KDB_NOTFOUND) {
+ kdb_printf("Unknown kdb command: '%s'\n", cmdbuf);
+ diag = 0;
+ }
+ if (diag == KDB_CMD_GO
+ || diag == KDB_CMD_CPU
+ || diag == KDB_CMD_SS
+ || diag == KDB_CMD_SSB
+ || diag == KDB_CMD_KGDB)
+ break;
+
+ if (diag)
+ kdb_cmderror(diag);
+ }
+ KDB_DEBUG_STATE("kdb_local 9", diag);
+ return diag;
+}
+
+
+/*
+ * kdb_print_state - Print the state data for the current processor
+ * for debugging.
+ * Inputs:
+ * text Identifies the debug point
+ * value Any integer value to be printed, e.g. reason code.
+ */
+void kdb_print_state(const char *text, int value)
+{
+ kdb_printf("state: %s cpu %d value %d initial %d state %x\n",
+ text, raw_smp_processor_id(), value, kdb_initial_cpu,
+ kdb_state[raw_smp_processor_id()]);
+}
+
+/*
+ * kdb_main_loop - After initial setup and assignment of the
+ * controlling cpu, all cpus are in this loop. One cpu is in
+ * control and will issue the kdb prompt, the others will spin
+ * until 'go' or cpu switch.
+ *
+ * To get a consistent view of the kernel stacks for all
+ * processes, this routine is invoked from the main kdb code via
+ * an architecture specific routine. kdba_main_loop is
+ * responsible for making the kernel stacks consistent for all
+ * processes, there should be no difference between a blocked
+ * process and a running process as far as kdb is concerned.
+ * Inputs:
+ * reason The reason KDB was invoked
+ * error The hardware-defined error code
+ * reason2 kdb's current reason code.
+ * Initially error but can change
+ * acording to kdb state.
+ * db_result Result code from break or debug point.
+ * regs The exception frame at time of fault/breakpoint.
+ * should always be valid.
+ * Returns:
+ * 0 KDB was invoked for an event which it wasn't responsible
+ * 1 KDB handled the event for which it was invoked.
+ */
+int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
+ kdb_dbtrap_t db_result, struct pt_regs *regs)
+{
+ int result = 1;
+ /* Stay in kdb() until 'go', 'ss[b]' or an error */
+ while (1) {
+ /*
+ * All processors except the one that is in control
+ * will spin here.
+ */
+ KDB_DEBUG_STATE("kdb_main_loop 1", reason);
+ while (KDB_STATE(HOLD_CPU)) {
+ /* state KDB is turned off by kdb_cpu to see if the
+ * other cpus are still live, each cpu in this loop
+ * turns it back on.
+ */
+ if (!KDB_STATE(KDB))
+ KDB_STATE_SET(KDB);
+ }
+
+ KDB_STATE_CLEAR(SUPPRESS);
+ KDB_DEBUG_STATE("kdb_main_loop 2", reason);
+ if (KDB_STATE(LEAVING))
+ break; /* Another cpu said 'go' */
+ /* Still using kdb, this processor is in control */
+ result = kdb_local(reason2, error, regs, db_result);
+ KDB_DEBUG_STATE("kdb_main_loop 3", result);
+
+ if (result == KDB_CMD_CPU)
+ break;
+
+ if (result == KDB_CMD_SS) {
+ KDB_STATE_SET(DOING_SS);
+ break;
+ }
+
+ if (result == KDB_CMD_SSB) {
+ KDB_STATE_SET(DOING_SS);
+ KDB_STATE_SET(DOING_SSB);
+ break;
+ }
+
+ if (result == KDB_CMD_KGDB) {
+ if (!(KDB_STATE(DOING_KGDB) || KDB_STATE(DOING_KGDB2)))
+ kdb_printf("Entering please attach debugger "
+ "or use $D#44+ or $3#33\n");
+ break;
+ }
+ if (result && result != 1 && result != KDB_CMD_GO)
+ kdb_printf("\nUnexpected kdb_local return code %d\n",
+ result);
+ KDB_DEBUG_STATE("kdb_main_loop 4", reason);
+ break;
+ }
+ if (KDB_STATE(DOING_SS))
+ KDB_STATE_CLEAR(SSBPT);
+
+ return result;
+}
+
+/*
+ * kdb_mdr - This function implements the guts of the 'mdr', memory
+ * read command.
+ * mdr <addr arg>,<byte count>
+ * Inputs:
+ * addr Start address
+ * count Number of bytes
+ * Returns:
+ * Always 0. Any errors are detected and printed by kdb_getarea.
+ */
+static int kdb_mdr(unsigned long addr, unsigned int count)
+{
+ unsigned char c;
+ while (count--) {
+ if (kdb_getarea(c, addr))
+ return 0;
+ kdb_printf("%02x", c);
+ addr++;
+ }
+ kdb_printf("\n");
+ return 0;
+}
+
+/*
+ * kdb_md - This function implements the 'md', 'md1', 'md2', 'md4',
+ * 'md8' 'mdr' and 'mds' commands.
+ *
+ * md|mds [<addr arg> [<line count> [<radix>]]]
+ * mdWcN [<addr arg> [<line count> [<radix>]]]
+ * where W = is the width (1, 2, 4 or 8) and N is the count.
+ * for eg., md1c20 reads 20 bytes, 1 at a time.
+ * mdr <addr arg>,<byte count>
+ */
+static void kdb_md_line(const char *fmtstr, unsigned long addr,
+ int symbolic, int nosect, int bytesperword,
+ int num, int repeat, int phys)
+{
+ /* print just one line of data */
+ kdb_symtab_t symtab;
+ char cbuf[32];
+ char *c = cbuf;
+ int i;
+ unsigned long word;
+
+ memset(cbuf, '\0', sizeof(cbuf));
+ if (phys)
+ kdb_printf("phys " kdb_machreg_fmt0 " ", addr);
+ else
+ kdb_printf(kdb_machreg_fmt0 " ", addr);
+
+ for (i = 0; i < num && repeat--; i++) {
+ if (phys) {
+ if (kdb_getphysword(&word, addr, bytesperword))
+ break;
+ } else if (kdb_getword(&word, addr, bytesperword))
+ break;
+ kdb_printf(fmtstr, word);
+ if (symbolic)
+ kdbnearsym(word, &symtab);
+ else
+ memset(&symtab, 0, sizeof(symtab));
+ if (symtab.sym_name) {
+ kdb_symbol_print(word, &symtab, 0);
+ if (!nosect) {
+ kdb_printf("\n");
+ kdb_printf(" %s %s "
+ kdb_machreg_fmt " "
+ kdb_machreg_fmt " "
+ kdb_machreg_fmt, symtab.mod_name,
+ symtab.sec_name, symtab.sec_start,
+ symtab.sym_start, symtab.sym_end);
+ }
+ addr += bytesperword;
+ } else {
+ union {
+ u64 word;
+ unsigned char c[8];
+ } wc;
+ unsigned char *cp;
+#ifdef __BIG_ENDIAN
+ cp = wc.c + 8 - bytesperword;
+#else
+ cp = wc.c;
+#endif
+ wc.word = word;
+#define printable_char(c) \
+ ({unsigned char __c = c; isascii(__c) && isprint(__c) ? __c : '.'; })
+ switch (bytesperword) {
+ case 8:
+ *c++ = printable_char(*cp++);
+ *c++ = printable_char(*cp++);
+ *c++ = printable_char(*cp++);
+ *c++ = printable_char(*cp++);
+ addr += 4;
+ case 4:
+ *c++ = printable_char(*cp++);
+ *c++ = printable_char(*cp++);
+ addr += 2;
+ case 2:
+ *c++ = printable_char(*cp++);
+ addr++;
+ case 1:
+ *c++ = printable_char(*cp++);
+ addr++;
+ break;
+ }
+#undef printable_char
+ }
+ }
+ kdb_printf("%*s %s\n", (int)((num-i)*(2*bytesperword + 1)+1),
+ " ", cbuf);
+}
+
+static int kdb_md(int argc, const char **argv)
+{
+ static unsigned long last_addr;
+ static int last_radix, last_bytesperword, last_repeat;
+ int radix = 16, mdcount = 8, bytesperword = KDB_WORD_SIZE, repeat;
+ int nosect = 0;
+ char fmtchar, fmtstr[64];
+ unsigned long addr;
+ unsigned long word;
+ long offset = 0;
+ int symbolic = 0;
+ int valid = 0;
+ int phys = 0;
+
+ kdbgetintenv("MDCOUNT", &mdcount);
+ kdbgetintenv("RADIX", &radix);
+ kdbgetintenv("BYTESPERWORD", &bytesperword);
+
+ /* Assume 'md <addr>' and start with environment values */
+ repeat = mdcount * 16 / bytesperword;
+
+ if (strcmp(argv[0], "mdr") == 0) {
+ if (argc != 2)
+ return KDB_ARGCOUNT;
+ valid = 1;
+ } else if (isdigit(argv[0][2])) {
+ bytesperword = (int)(argv[0][2] - '0');
+ if (bytesperword == 0) {
+ bytesperword = last_bytesperword;
+ if (bytesperword == 0)
+ bytesperword = 4;
+ }
+ last_bytesperword = bytesperword;
+ repeat = mdcount * 16 / bytesperword;
+ if (!argv[0][3])
+ valid = 1;
+ else if (argv[0][3] == 'c' && argv[0][4]) {
+ char *p;
+ repeat = simple_strtoul(argv[0] + 4, &p, 10);
+ mdcount = ((repeat * bytesperword) + 15) / 16;
+ valid = !*p;
+ }
+ last_repeat = repeat;
+ } else if (strcmp(argv[0], "md") == 0)
+ valid = 1;
+ else if (strcmp(argv[0], "mds") == 0)
+ valid = 1;
+ else if (strcmp(argv[0], "mdp") == 0) {
+ phys = valid = 1;
+ }
+ if (!valid)
+ return KDB_NOTFOUND;
+
+ if (argc == 0) {
+ if (last_addr == 0)
+ return KDB_ARGCOUNT;
+ addr = last_addr;
+ radix = last_radix;
+ bytesperword = last_bytesperword;
+ repeat = last_repeat;
+ mdcount = ((repeat * bytesperword) + 15) / 16;
+ }
+
+ if (argc) {
+ unsigned long val;
+ int diag, nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr,
+ &offset, NULL);
+ if (diag)
+ return diag;
+ if (argc > nextarg+2)
+ return KDB_ARGCOUNT;
+
+ if (argc >= nextarg) {
+ diag = kdbgetularg(argv[nextarg], &val);
+ if (!diag) {
+ mdcount = (int) val;
+ repeat = mdcount * 16 / bytesperword;
+ }
+ }
+ if (argc >= nextarg+1) {
+ diag = kdbgetularg(argv[nextarg+1], &val);
+ if (!diag)
+ radix = (int) val;
+ }
+ }
+
+ if (strcmp(argv[0], "mdr") == 0)
+ return kdb_mdr(addr, mdcount);
+
+ switch (radix) {
+ case 10:
+ fmtchar = 'd';
+ break;
+ case 16:
+ fmtchar = 'x';
+ break;
+ case 8:
+ fmtchar = 'o';
+ break;
+ default:
+ return KDB_BADRADIX;
+ }
+
+ last_radix = radix;
+
+ if (bytesperword > KDB_WORD_SIZE)
+ return KDB_BADWIDTH;
+
+ switch (bytesperword) {
+ case 8:
+ sprintf(fmtstr, "%%16.16l%c ", fmtchar);
+ break;
+ case 4:
+ sprintf(fmtstr, "%%8.8l%c ", fmtchar);
+ break;
+ case 2:
+ sprintf(fmtstr, "%%4.4l%c ", fmtchar);
+ break;
+ case 1:
+ sprintf(fmtstr, "%%2.2l%c ", fmtchar);
+ break;
+ default:
+ return KDB_BADWIDTH;
+ }
+
+ last_repeat = repeat;
+ last_bytesperword = bytesperword;
+
+ if (strcmp(argv[0], "mds") == 0) {
+ symbolic = 1;
+ /* Do not save these changes as last_*, they are temporary mds
+ * overrides.
+ */
+ bytesperword = KDB_WORD_SIZE;
+ repeat = mdcount;
+ kdbgetintenv("NOSECT", &nosect);
+ }
+
+ /* Round address down modulo BYTESPERWORD */
+
+ addr &= ~(bytesperword-1);
+
+ while (repeat > 0) {
+ unsigned long a;
+ int n, z, num = (symbolic ? 1 : (16 / bytesperword));
+
+ if (KDB_FLAG(CMD_INTERRUPT))
+ return 0;
+ for (a = addr, z = 0; z < repeat; a += bytesperword, ++z) {
+ if (phys) {
+ if (kdb_getphysword(&word, a, bytesperword)
+ || word)
+ break;
+ } else if (kdb_getword(&word, a, bytesperword) || word)
+ break;
+ }
+ n = min(num, repeat);
+ kdb_md_line(fmtstr, addr, symbolic, nosect, bytesperword,
+ num, repeat, phys);
+ addr += bytesperword * n;
+ repeat -= n;
+ z = (z + num - 1) / num;
+ if (z > 2) {
+ int s = num * (z-2);
+ kdb_printf(kdb_machreg_fmt0 "-" kdb_machreg_fmt0
+ " zero suppressed\n",
+ addr, addr + bytesperword * s - 1);
+ addr += bytesperword * s;
+ repeat -= s;
+ }
+ }
+ last_addr = addr;
+
+ return 0;
+}
+
+/*
+ * kdb_mm - This function implements the 'mm' command.
+ * mm address-expression new-value
+ * Remarks:
+ * mm works on machine words, mmW works on bytes.
+ */
+static int kdb_mm(int argc, const char **argv)
+{
+ int diag;
+ unsigned long addr;
+ long offset = 0;
+ unsigned long contents;
+ int nextarg;
+ int width;
+
+ if (argv[0][2] && !isdigit(argv[0][2]))
+ return KDB_NOTFOUND;
+
+ if (argc < 2)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+ if (diag)
+ return diag;
+
+ if (nextarg > argc)
+ return KDB_ARGCOUNT;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &contents, NULL, NULL);
+ if (diag)
+ return diag;
+
+ if (nextarg != argc + 1)
+ return KDB_ARGCOUNT;
+
+ width = argv[0][2] ? (argv[0][2] - '0') : (KDB_WORD_SIZE);
+ diag = kdb_putword(addr, contents, width);
+ if (diag)
+ return diag;
+
+ kdb_printf(kdb_machreg_fmt " = " kdb_machreg_fmt "\n", addr, contents);
+
+ return 0;
+}
+
+/*
+ * kdb_go - This function implements the 'go' command.
+ * go [address-expression]
+ */
+static int kdb_go(int argc, const char **argv)
+{
+ unsigned long addr;
+ int diag;
+ int nextarg;
+ long offset;
+
+ if (argc == 1) {
+ if (raw_smp_processor_id() != kdb_initial_cpu) {
+ kdb_printf("go <address> must be issued from the "
+ "initial cpu, do cpu %d first\n",
+ kdb_initial_cpu);
+ return KDB_ARGCOUNT;
+ }
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg,
+ &addr, &offset, NULL);
+ if (diag)
+ return diag;
+ } else if (argc) {
+ return KDB_ARGCOUNT;
+ }
+
+ diag = KDB_CMD_GO;
+ if (KDB_FLAG(CATASTROPHIC)) {
+ kdb_printf("Catastrophic error detected\n");
+ kdb_printf("kdb_continue_catastrophic=%d, ",
+ kdb_continue_catastrophic);
+ if (kdb_continue_catastrophic == 0 && kdb_go_count++ == 0) {
+ kdb_printf("type go a second time if you really want "
+ "to continue\n");
+ return 0;
+ }
+ if (kdb_continue_catastrophic == 2) {
+ kdb_printf("forcing reboot\n");
+ kdb_reboot(0, NULL);
+ }
+ kdb_printf("attempting to continue\n");
+ }
+ if (raw_smp_processor_id() != kdb_initial_cpu) {
+ char buf[80];
+ kdb_printf("go was not issued from initial cpu, switching "
+ "back to cpu %d\n", kdb_initial_cpu);
+ sprintf(buf, "cpu %d\n", kdb_initial_cpu);
+ /* Recursive use of kdb_parse, do not use argv after
+ * this point */
+ argv = NULL;
+ diag = kdb_parse(buf);
+ if (diag == KDB_CMD_CPU)
+ KDB_STATE_SET_CPU(GO_SWITCH, kdb_initial_cpu);
+ }
+ return diag;
+}
+
+/*
+ * kdb_rd - This function implements the 'rd' command.
+ */
+static int kdb_rd(int argc, const char **argv)
+{
+ int diag = kdb_check_regs();
+ if (diag)
+ return diag;
+
+ kdb_dumpregs(kdb_current_regs);
+ return 0;
+}
+
+/*
+ * kdb_rm - This function implements the 'rm' (register modify) command.
+ * rm register-name new-contents
+ * Remarks:
+ * Currently doesn't allow modification of control or
+ * debug registers.
+ */
+static int kdb_rm(int argc, const char **argv)
+{
+ int diag;
+ int ind = 0;
+ unsigned long contents;
+
+ if (argc != 2)
+ return KDB_ARGCOUNT;
+ /*
+ * Allow presence or absence of leading '%' symbol.
+ */
+ if (argv[1][0] == '%')
+ ind = 1;
+
+ diag = kdbgetularg(argv[2], &contents);
+ if (diag)
+ return diag;
+
+ diag = kdb_check_regs();
+ if (diag)
+ return diag;
+ kdb_printf("ERROR: Register set currently not implemented\n");
+ return 0;
+}
+
+#if defined(CONFIG_MAGIC_SYSRQ)
+/*
+ * kdb_sr - This function implements the 'sr' (SYSRQ key) command
+ * which interfaces to the soi-disant MAGIC SYSRQ functionality.
+ * sr <magic-sysrq-code>
+ */
+static int kdb_sr(int argc, const char **argv)
+{
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+ if (!__sysrq_enabled) {
+ kdb_printf("Auto activating sysrq\n");
+ __sysrq_enabled = 1;
+ }
+
+ kdb_trap_printk++;
+ handle_sysrq(*argv[1], NULL);
+ kdb_trap_printk--;
+
+ return 0;
+}
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+/*
+ * kdb_ef - This function implements the 'regs' (display exception
+ * frame) command. This command takes an address and expects to
+ * find an exception frame at that address, formats and prints
+ * it.
+ * regs address-expression
+ * Remarks:
+ * Not done yet.
+ */
+static int kdb_ef(int argc, const char **argv)
+{
+ int diag;
+ unsigned long addr;
+ long offset;
+ int nextarg;
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+ if (diag)
+ return diag;
+ show_regs((struct pt_regs *)addr);
+ return 0;
+}
+
+#if defined(CONFIG_MODULES)
+/* modules using other modules */
+struct module_use {
+ struct list_head list;
+ struct module *module_which_uses;
+};
+
+/*
+ * kdb_lsmod - This function implements the 'lsmod' command. Lists
+ * currently loaded kernel modules.
+ * Mostly taken from userland lsmod.
+ */
+static int kdb_lsmod(int argc, const char **argv)
+{
+ struct module *mod;
+
+ if (argc != 0)
+ return KDB_ARGCOUNT;
+
+ kdb_printf("Module Size modstruct Used by\n");
+ list_for_each_entry(mod, kdb_modules, list) {
+
+ kdb_printf("%-20s%8u 0x%p ", mod->name,
+ mod->core_size, (void *)mod);
+#ifdef CONFIG_MODULE_UNLOAD
+ kdb_printf("%4d ", module_refcount(mod));
+#endif
+ if (mod->state == MODULE_STATE_GOING)
+ kdb_printf(" (Unloading)");
+ else if (mod->state == MODULE_STATE_COMING)
+ kdb_printf(" (Loading)");
+ else
+ kdb_printf(" (Live)");
+
+#ifdef CONFIG_MODULE_UNLOAD
+ {
+ struct module_use *use;
+ kdb_printf(" [ ");
+ list_for_each_entry(use, &mod->modules_which_use_me,
+ list)
+ kdb_printf("%s ", use->module_which_uses->name);
+ kdb_printf("]\n");
+ }
+#endif
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_MODULES */
+
+/*
+ * kdb_env - This function implements the 'env' command. Display the
+ * current environment variables.
+ */
+
+static int kdb_env(int argc, const char **argv)
+{
+ int i;
+
+ for (i = 0; i < __nenv; i++) {
+ if (__env[i])
+ kdb_printf("%s\n", __env[i]);
+ }
+
+ if (KDB_DEBUG(MASK))
+ kdb_printf("KDBFLAGS=0x%x\n", kdb_flags);
+
+ return 0;
+}
+
+#ifdef CONFIG_PRINTK
+/*
+ * kdb_dmesg - This function implements the 'dmesg' command to display
+ * the contents of the syslog buffer.
+ * dmesg [lines] [adjust]
+ */
+static int kdb_dmesg(int argc, const char **argv)
+{
+ char *syslog_data[4], *start, *end, c = '\0', *p;
+ int diag, logging, logsize, lines = 0, adjust = 0, n;
+
+ if (argc > 2)
+ return KDB_ARGCOUNT;
+ if (argc) {
+ char *cp;
+ lines = simple_strtol(argv[1], &cp, 0);
+ if (*cp)
+ lines = 0;
+ if (argc > 1) {
+ adjust = simple_strtoul(argv[2], &cp, 0);
+ if (*cp || adjust < 0)
+ adjust = 0;
+ }
+ }
+
+ /* disable LOGGING if set */
+ diag = kdbgetintenv("LOGGING", &logging);
+ if (!diag && logging) {
+ const char *setargs[] = { "set", "LOGGING", "0" };
+ kdb_set(2, setargs);
+ }
+
+ /* syslog_data[0,1] physical start, end+1. syslog_data[2,3]
+ * logical start, end+1. */
+ kdb_syslog_data(syslog_data);
+ if (syslog_data[2] == syslog_data[3])
+ return 0;
+ logsize = syslog_data[1] - syslog_data[0];
+ start = syslog_data[2];
+ end = syslog_data[3];
+#define KDB_WRAP(p) (((p - syslog_data[0]) % logsize) + syslog_data[0])
+ for (n = 0, p = start; p < end; ++p) {
+ c = *KDB_WRAP(p);
+ if (c == '\n')
+ ++n;
+ }
+ if (c != '\n')
+ ++n;
+ if (lines < 0) {
+ if (adjust >= n)
+ kdb_printf("buffer only contains %d lines, nothing "
+ "printed\n", n);
+ else if (adjust - lines >= n)
+ kdb_printf("buffer only contains %d lines, last %d "
+ "lines printed\n", n, n - adjust);
+ if (adjust) {
+ for (; start < end && adjust; ++start) {
+ if (*KDB_WRAP(start) == '\n')
+ --adjust;
+ }
+ if (start < end)
+ ++start;
+ }
+ for (p = start; p < end && lines; ++p) {
+ if (*KDB_WRAP(p) == '\n')
+ ++lines;
+ }
+ end = p;
+ } else if (lines > 0) {
+ int skip = n - (adjust + lines);
+ if (adjust >= n) {
+ kdb_printf("buffer only contains %d lines, "
+ "nothing printed\n", n);
+ skip = n;
+ } else if (skip < 0) {
+ lines += skip;
+ skip = 0;
+ kdb_printf("buffer only contains %d lines, first "
+ "%d lines printed\n", n, lines);
+ }
+ for (; start < end && skip; ++start) {
+ if (*KDB_WRAP(start) == '\n')
+ --skip;
+ }
+ for (p = start; p < end && lines; ++p) {
+ if (*KDB_WRAP(p) == '\n')
+ --lines;
+ }
+ end = p;
+ }
+ /* Do a line at a time (max 200 chars) to reduce protocol overhead */
+ c = '\n';
+ while (start != end) {
+ char buf[201];
+ p = buf;
+ if (KDB_FLAG(CMD_INTERRUPT))
+ return 0;
+ while (start < end && (c = *KDB_WRAP(start)) &&
+ (p - buf) < sizeof(buf)-1) {
+ ++start;
+ *p++ = c;
+ if (c == '\n')
+ break;
+ }
+ *p = '\0';
+ kdb_printf("%s", buf);
+ }
+ if (c != '\n')
+ kdb_printf("\n");
+
+ return 0;
+}
+#endif /* CONFIG_PRINTK */
+/*
+ * kdb_cpu - This function implements the 'cpu' command.
+ * cpu [<cpunum>]
+ * Returns:
+ * KDB_CMD_CPU for success, a kdb diagnostic if error
+ */
+static void kdb_cpu_status(void)
+{
+ int i, start_cpu, first_print = 1;
+ char state, prev_state = '?';
+
+ kdb_printf("Currently on cpu %d\n", raw_smp_processor_id());
+ kdb_printf("Available cpus: ");
+ for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
+ if (!cpu_online(i)) {
+ state = 'F'; /* cpu is offline */
+ } else {
+ struct kdb_running_process *krp =
+ kdb_running_process + i;
+ state = ' '; /* cpu is responding to kdb */
+ if (kdb_task_state_char(krp->p) == 'I')
+ state = 'I'; /* idle task */
+ }
+ if (state != prev_state) {
+ if (prev_state != '?') {
+ if (!first_print)
+ kdb_printf(", ");
+ first_print = 0;
+ kdb_printf("%d", start_cpu);
+ if (start_cpu < i-1)
+ kdb_printf("-%d", i-1);
+ if (prev_state != ' ')
+ kdb_printf("(%c)", prev_state);
+ }
+ prev_state = state;
+ start_cpu = i;
+ }
+ }
+ /* print the trailing cpus, ignoring them if they are all offline */
+ if (prev_state != 'F') {
+ if (!first_print)
+ kdb_printf(", ");
+ kdb_printf("%d", start_cpu);
+ if (start_cpu < i-1)
+ kdb_printf("-%d", i-1);
+ if (prev_state != ' ')
+ kdb_printf("(%c)", prev_state);
+ }
+ kdb_printf("\n");
+}
+
+static int kdb_cpu(int argc, const char **argv)
+{
+ unsigned long cpunum;
+ int diag;
+
+ if (argc == 0) {
+ kdb_cpu_status();
+ return 0;
+ }
+
+ if (argc != 1)
+ return KDB_ARGCOUNT;
+
+ diag = kdbgetularg(argv[1], &cpunum);
+ if (diag)
+ return diag;
+
+ /*
+ * Validate cpunum
+ */
+ if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
+ return KDB_BADCPUNUM;
+
+ dbg_switch_cpu = cpunum;
+
+ /*
+ * Switch to other cpu
+ */
+ return KDB_CMD_CPU;
+}
+
+/* The user may not realize that ps/bta with no parameters does not print idle
+ * or sleeping system daemon processes, so tell them how many were suppressed.
+ */
+void kdb_ps_suppressed(void)
+{
+ int idle = 0, daemon = 0;
+ unsigned long mask_I = kdb_task_state_string("I"),
+ mask_M = kdb_task_state_string("M");
+ unsigned long cpu;
+ const struct task_struct *p, *g;
+ for_each_online_cpu(cpu) {
+ p = kdb_curr_task(cpu);
+ if (kdb_task_state(p, mask_I))
+ ++idle;
+ }
+ kdb_do_each_thread(g, p) {
+ if (kdb_task_state(p, mask_M))
+ ++daemon;
+ } kdb_while_each_thread(g, p);
+ if (idle || daemon) {
+ if (idle)
+ kdb_printf("%d idle process%s (state I)%s\n",
+ idle, idle == 1 ? "" : "es",
+ daemon ? " and " : "");
+ if (daemon)
+ kdb_printf("%d sleeping system daemon (state M) "
+ "process%s", daemon,
+ daemon == 1 ? "" : "es");
+ kdb_printf(" suppressed,\nuse 'ps A' to see all.\n");
+ }
+}
+
+/*
+ * kdb_ps - This function implements the 'ps' command which shows a
+ * list of the active processes.
+ * ps [DRSTCZEUIMA] All processes, optionally filtered by state
+ */
+void kdb_ps1(const struct task_struct *p)
+{
+ struct kdb_running_process *krp = kdb_running_process +
+ kdb_process_cpu(p);
+ kdb_printf("0x%p %8d %8d %d %4d %c 0x%p %c%s\n",
+ (void *)p, p->pid, p->parent->pid,
+ kdb_task_has_cpu(p), kdb_process_cpu(p),
+ kdb_task_state_char(p),
+ (void *)(&p->thread),
+ p == kdb_curr_task(raw_smp_processor_id()) ? '*' : ' ',
+ p->comm);
+ if (kdb_task_has_cpu(p)) {
+ if (!krp->seqno || !krp->p)
+ kdb_printf(" Error: no saved data for this cpu\n");
+ else {
+ if (krp->seqno < kdb_seqno - 1)
+ kdb_printf(" Warning: process state is "
+ "stale\n");
+ if (krp->p != p)
+ kdb_printf(" Error: does not match running "
+ "process table (0x%p)\n", krp->p);
+ }
+ }
+}
+
+static int kdb_ps(int argc, const char **argv)
+{
+ struct task_struct *g, *p;
+ unsigned long mask, cpu;
+
+ if (argc == 0)
+ kdb_ps_suppressed();
+ kdb_printf("%-*s Pid Parent [*] cpu State %-*s Command\n",
+ (int)(2*sizeof(void *))+2, "Task Addr",
+ (int)(2*sizeof(void *))+2, "Thread");
+ mask = kdb_task_state_string(argc ? argv[1] : NULL);
+ /* Run the active tasks first */
+ for_each_online_cpu(cpu) {
+ if (KDB_FLAG(CMD_INTERRUPT))
+ return 0;
+ p = kdb_curr_task(cpu);
+ if (kdb_task_state(p, mask))
+ kdb_ps1(p);
+ }
+ kdb_printf("\n");
+ /* Now the real tasks */
+ kdb_do_each_thread(g, p) {
+ if (KDB_FLAG(CMD_INTERRUPT))
+ return 0;
+ if (kdb_task_state(p, mask))
+ kdb_ps1(p);
+ } kdb_while_each_thread(g, p);
+
+ return 0;
+}
+
+/*
+ * kdb_pid - This function implements the 'pid' command which switches
+ * the currently active process.
+ * pid [<pid> | R]
+ */
+static int kdb_pid(int argc, const char **argv)
+{
+ struct task_struct *p;
+ unsigned long val;
+ int diag;
+
+ if (argc > 1)
+ return KDB_ARGCOUNT;
+
+ if (argc) {
+ if (strcmp(argv[1], "R") == 0) {
+ p = KDB_RUNNING_PROCESS_ORIGINAL[kdb_initial_cpu].p;
+ } else {
+ diag = kdbgetularg(argv[1], &val);
+ if (diag)
+ return KDB_BADINT;
+
+ p = find_task_by_pid_ns((pid_t)val, &init_pid_ns);
+ if (!p) {
+ kdb_printf("No task with pid=%d\n", (pid_t)val);
+ return 0;
+ }
+ }
+ kdb_set_current_task(p);
+ }
+ kdb_printf("KDB current process is %s(pid=%d)\n",
+ kdb_current_task->comm,
+ kdb_current_task->pid);
+
+ return 0;
+}
+
+/*
+ * kdb_ll - This function implements the 'll' command which follows a
+ * linked list and executes an arbitrary command for each
+ * element.
+ */
+static int kdb_ll(int argc, const char **argv)
+{
+ int diag;
+ unsigned long addr;
+ long offset = 0;
+ unsigned long va;
+ unsigned long linkoffset;
+ int nextarg;
+ const char *command;
+
+ if (argc != 3)
+ return KDB_ARGCOUNT;
+
+ nextarg = 1;
+ diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
+ if (diag)
+ return diag;
+
+ diag = kdbgetularg(argv[2], &linkoffset);
+ if (diag)
+ return diag;
+
+ /*
+ * Using the starting address as
+ * the first element in the list, and assuming that
+ * the list ends with a null pointer.
+ */
+
+ va = addr;
+ command = kdb_strdup(argv[3], GFP_KDB);
+ if (!command) {
+ kdb_printf("%s: cannot duplicate command\n", __func__);
+ return 0;
+ }
+ /* Recursive use of kdb_parse, do not use argv after this point */
+ argv = NULL;
+
+ while (va) {
+ char buf[80];
+
+ sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
+ diag = kdb_parse(buf);
+ if (diag)
+ return diag;
+
+ addr = va + linkoffset;
+ if (kdb_getword(&va, addr, sizeof(va)))
+ return 0;
+ }
+ kfree(command);
+
+ return 0;
+}
+
+static int kdb_kgdb(int argc, const char **argv)
+{
+ return KDB_CMD_KGDB;
+}
+
+/*
+ * kdb_help - This function implements the 'help' and '?' commands.
+ */
+static int kdb_help(int argc, const char **argv)
+{
+ kdbtab_t *kt;
+ int i;
+
+ kdb_printf("%-15.15s %-20.20s %s\n", "Command", "Usage", "Description");
+ kdb_printf("-----------------------------"
+ "-----------------------------\n");
+ for (i = 0, kt = kdb_commands; i < kdb_max_commands; i++, kt++) {
+ if (kt->cmd_name)
+ kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name,
+ kt->cmd_usage, kt->cmd_help);
+ }
+ return 0;
+}
+
+/*
+ * kdb_kill - This function implements the 'kill' commands.
+ */
+static int kdb_kill(int argc, const char **argv)
+{
+ long sig, pid;
+ char *endp;
+ struct task_struct *p;
+ struct siginfo info;
+
+ if (argc != 2)
+ return KDB_ARGCOUNT;
+
+ sig = simple_strtol(argv[1], &endp, 0);
+ if (*endp)
+ return KDB_BADINT;
+ if (sig >= 0) {
+ kdb_printf("Invalid signal parameter.<-signal>\n");
+ return 0;
+ }
+ sig = -sig;
+
+ pid = simple_strtol(argv[2], &endp, 0);
+ if (*endp)
+ return KDB_BADINT;
+ if (pid <= 0) {
+ kdb_printf("Process ID must be large than 0.\n");
+ return 0;
+ }
+
+ /* Find the process. */
+ p = find_task_by_pid_ns(pid, &init_pid_ns);
+ if (!p) {
+ kdb_printf("The specified process isn't found.\n");
+ return 0;
+ }
+ p = p->group_leader;
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_code = SI_USER;
+ info.si_pid = pid; /* same capabilities as process being signalled */
+ info.si_uid = 0; /* kdb has root authority */
+ kdb_send_sig_info(p, &info, kdb_seqno);
+ return 0;
+}
+
+struct kdb_tm {
+ int tm_sec; /* seconds */
+ int tm_min; /* minutes */
+ int tm_hour; /* hours */
+ int tm_mday; /* day of the month */
+ int tm_mon; /* month */
+ int tm_year; /* year */
+};
+
+static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm)
+{
+ /* This will work from 1970-2099, 2100 is not a leap year */
+ static int mon_day[] = { 31, 29, 31, 30, 31, 30, 31,
+ 31, 30, 31, 30, 31 };
+ memset(tm, 0, sizeof(*tm));
+ tm->tm_sec = tv->tv_sec % (24 * 60 * 60);
+ tm->tm_mday = tv->tv_sec / (24 * 60 * 60) +
+ (2 * 365 + 1); /* shift base from 1970 to 1968 */
+ tm->tm_min = tm->tm_sec / 60 % 60;
+ tm->tm_hour = tm->tm_sec / 60 / 60;
+ tm->tm_sec = tm->tm_sec % 60;
+ tm->tm_year = 68 + 4*(tm->tm_mday / (4*365+1));
+ tm->tm_mday %= (4*365+1);
+ mon_day[1] = 29;
+ while (tm->tm_mday >= mon_day[tm->tm_mon]) {
+ tm->tm_mday -= mon_day[tm->tm_mon];
+ if (++tm->tm_mon == 12) {
+ tm->tm_mon = 0;
+ ++tm->tm_year;
+ mon_day[1] = 28;
+ }
+ }
+ ++tm->tm_mday;
+}
+
+/*
+ * Most of this code has been lifted from kernel/timer.c::sys_sysinfo().
+ * I cannot call that code directly from kdb, it has an unconditional
+ * cli()/sti() and calls routines that take locks which can stop the debugger.
+ */
+static void kdb_sysinfo(struct sysinfo *val)
+{
+ struct timespec uptime;
+ do_posix_clock_monotonic_gettime(&uptime);
+ memset(val, 0, sizeof(*val));
+ val->uptime = uptime.tv_sec;
+ val->loads[0] = avenrun[0];
+ val->loads[1] = avenrun[1];
+ val->loads[2] = avenrun[2];
+ val->procs = nr_threads-1;
+ si_meminfo(val);
+ __si_swapinfo(val);
+
+ return;
+}
+
+/*
+ * kdb_summary - This function implements the 'summary' command.
+ */
+static int kdb_summary(int argc, const char **argv)
+{
+ struct kdb_tm tm;
+ struct sysinfo val;
+
+ if (argc)
+ return KDB_ARGCOUNT;
+
+ kdb_printf("sysname %s\n", init_uts_ns.name.sysname);
+ kdb_printf("release %s\n", init_uts_ns.name.release);
+ kdb_printf("version %s\n", init_uts_ns.name.version);
+ kdb_printf("machine %s\n", init_uts_ns.name.machine);
+ kdb_printf("nodename %s\n", init_uts_ns.name.nodename);
+ kdb_printf("domainname %s\n", init_uts_ns.name.domainname);
+ kdb_printf("ccversion %s\n", __stringify(CCVERSION));
+
+ kdb_gmtime(&xtime, &tm);
+ kdb_printf("date %04d-%02d-%02d %02d:%02d:%02d "
+ "tz_minuteswest %d\n",
+ 1900+tm.tm_year, tm.tm_mon+1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec,
+ sys_tz.tz_minuteswest);
+
+ kdb_sysinfo(&val);
+ kdb_printf("uptime ");
+ if (val.uptime > (24*60*60)) {
+ int days = val.uptime / (24*60*60);
+ val.uptime %= (24*60*60);
+ kdb_printf("%d day%s ", days, days == 1 ? "" : "s");
+ }
+ kdb_printf("%02ld:%02ld\n", val.uptime/(60*60), (val.uptime/60)%60);
+
+ /* lifted from fs/proc/proc_misc.c::loadavg_read_proc() */
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+ kdb_printf("load avg %ld.%02ld %ld.%02ld %ld.%02ld\n",
+ LOAD_INT(val.loads[0]), LOAD_FRAC(val.loads[0]),
+ LOAD_INT(val.loads[1]), LOAD_FRAC(val.loads[1]),
+ LOAD_INT(val.loads[2]), LOAD_FRAC(val.loads[2]));
+ kdb_printf("\n");
+#undef LOAD_INT
+#undef LOAD_FRAC
+
+ kdb_seq_file_reset();
+ _meminfo_proc_show(&kdb_seq_file, NULL, 0); /* in fs/proc/meminfo.c */
+ kdb_seq_file_print();
+
+ return 0;
+}
+
+/*
+ * kdb_per_cpu - This function implements the 'per_cpu' command.
+ */
+static int kdb_per_cpu(int argc, const char **argv)
+{
+ char buf[256], fmtstr[64];
+ kdb_symtab_t symtab;
+ cpumask_t suppress = CPU_MASK_NONE;
+ int cpu, diag;
+ unsigned long addr, val, bytesperword = 0, whichcpu = ~0UL;
+
+ if (argc < 1 || argc > 3)
+ return KDB_ARGCOUNT;
+
+ snprintf(buf, sizeof(buf), "per_cpu__%s", argv[1]);
+ if (!kdbgetsymval(buf, &symtab)) {
+ kdb_printf("%s is not a per_cpu variable\n", argv[1]);
+ return KDB_BADADDR;
+ }
+ if (argc >= 2) {
+ diag = kdbgetularg(argv[2], &bytesperword);
+ if (diag)
+ return diag;
+ }
+ if (!bytesperword)
+ bytesperword = KDB_WORD_SIZE;
+ else if (bytesperword > KDB_WORD_SIZE)
+ return KDB_BADWIDTH;
+ sprintf(fmtstr, "%%0%dlx ", (int)(2*bytesperword));
+ if (argc >= 3) {
+ diag = kdbgetularg(argv[3], &whichcpu);
+ if (diag)
+ return diag;
+ if (!cpu_online(whichcpu)) {
+ kdb_printf("cpu %ld is not online\n", whichcpu);
+ return KDB_BADCPUNUM;
+ }
+ }
+
+ /* Most architectures use __per_cpu_offset[cpu], some use
+ * __per_cpu_offset(cpu), smp has no __per_cpu_offset.
+ */
+#ifdef __per_cpu_offset
+#define KDB_PCU(cpu) __per_cpu_offset(cpu)
+#else
+#ifdef CONFIG_SMP
+#define KDB_PCU(cpu) __per_cpu_offset[cpu]
+#else
+#define KDB_PCU(cpu) 0
+#endif
+#endif
+
+ for_each_online_cpu(cpu) {
+ if (whichcpu != ~0UL && whichcpu != cpu)
+ continue;
+ addr = symtab.sym_start + KDB_PCU(cpu);
+ diag = kdb_getword(&val, addr, bytesperword);
+ if (diag) {
+ kdb_printf("%5d " kdb_bfd_vma_fmt0 " - unable to "
+ "read, diag=%d\n", cpu, addr, diag);
+ continue;
+ }
+#ifdef CONFIG_SMP
+ if (!val) {
+ cpu_set(cpu, suppress);
+ continue;
+ }
+#endif /* CONFIG_SMP */
+ kdb_printf("%5d ", cpu);
+ kdb_md_line(fmtstr, addr,
+ bytesperword == KDB_WORD_SIZE,
+ 1, bytesperword, 1, 1, 0);
+ }
+ if (cpus_weight(suppress) == 0)
+ return 0;
+ kdb_printf("Zero suppressed cpu(s):");
+ for (cpu = first_cpu(suppress); cpu < num_possible_cpus();
+ cpu = next_cpu(cpu, suppress)) {
+ kdb_printf(" %d", cpu);
+ if (cpu == num_possible_cpus() - 1 ||
+ next_cpu(cpu, suppress) != cpu + 1)
+ continue;
+ while (cpu < num_possible_cpus() &&
+ next_cpu(cpu, suppress) == cpu + 1)
+ ++cpu;
+ kdb_printf("-%d", cpu);
+ }
+ kdb_printf("\n");
+
+#undef KDB_PCU
+
+ return 0;
+}
+
+/*
+ * display help for the use of cmd | grep pattern
+ */
+static int kdb_grep_help(int argc, const char **argv)
+{
+ kdb_printf("Usage of cmd args | grep pattern:\n");
+ kdb_printf(" Any command's output may be filtered through an ");
+ kdb_printf("emulated 'pipe'.\n");
+ kdb_printf(" 'grep' is just a key word.\n");
+ kdb_printf(" The pattern may include a very limited set of "
+ "metacharacters:\n");
+ kdb_printf(" pattern or ^pattern or pattern$ or ^pattern$\n");
+ kdb_printf(" And if there are spaces in the pattern, you may "
+ "quote it:\n");
+ kdb_printf(" \"pat tern\" or \"^pat tern\" or \"pat tern$\""
+ " or \"^pat tern$\"\n");
+ return 0;
+}
+
+/*
+ * kdb_register_repeat - This function is used to register a kernel
+ * debugger command.
+ * Inputs:
+ * cmd Command name
+ * func Function to execute the command
+ * usage A simple usage string showing arguments
+ * help A simple help string describing command
+ * repeat Does the command auto repeat on enter?
+ * Returns:
+ * zero for success, one if a duplicate command.
+ */
+#define kdb_command_extend 50 /* arbitrary */
+int kdb_register_repeat(char *cmd,
+ kdb_func_t func,
+ char *usage,
+ char *help,
+ short minlen,
+ kdb_repeat_t repeat)
+{
+ int i;
+ kdbtab_t *kp;
+
+ /*
+ * Brute force method to determine duplicates
+ */
+ for (i = 0, kp = kdb_commands; i < kdb_max_commands; i++, kp++) {
+ if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
+ kdb_printf("Duplicate kdb command registered: "
+ "%s, func %p help %s\n", cmd, func, help);
+ return 1;
+ }
+ }
+
+ /*
+ * Insert command into first available location in table
+ */
+ for (i = 0, kp = kdb_commands; i < kdb_max_commands; i++, kp++) {
+ if (kp->cmd_name == NULL)
+ break;
+ }
+
+ if (i >= kdb_max_commands) {
+ kdbtab_t *new = kmalloc((kdb_max_commands +
+ kdb_command_extend) * sizeof(*new), GFP_KDB);
+ if (!new) {
+ kdb_printf("Could not allocate new kdb_command "
+ "table\n");
+ return 1;
+ }
+ if (kdb_commands) {
+ memcpy(new, kdb_commands,
+ kdb_max_commands * sizeof(*new));
+ kfree(kdb_commands);
+ }
+ memset(new + kdb_max_commands, 0,
+ kdb_command_extend * sizeof(*new));
+ kdb_commands = new;
+ kp = kdb_commands + kdb_max_commands;
+ kdb_max_commands += kdb_command_extend;
+ }
+
+ kp->cmd_name = cmd;
+ kp->cmd_func = func;
+ kp->cmd_usage = usage;
+ kp->cmd_help = help;
+ kp->cmd_flags = 0;
+ kp->cmd_minlen = minlen;
+ kp->cmd_repeat = repeat;
+
+ return 0;
+}
+
+/*
+ * kdb_register - Compatibility register function for commands that do
+ * not need to specify a repeat state. Equivalent to
+ * kdb_register_repeat with KDB_REPEAT_NONE.
+ * Inputs:
+ * cmd Command name
+ * func Function to execute the command
+ * usage A simple usage string showing arguments
+ * help A simple help string describing command
+ * Returns:
+ * zero for success, one if a duplicate command.
+ */
+int kdb_register(char *cmd,
+ kdb_func_t func,
+ char *usage,
+ char *help,
+ short minlen)
+{
+ return kdb_register_repeat(cmd, func, usage, help, minlen,
+ KDB_REPEAT_NONE);
+}
+
+/*
+ * kdb_unregister - This function is used to unregister a kernel
+ * debugger command. It is generally called when a module which
+ * implements kdb commands is unloaded.
+ * Inputs:
+ * cmd Command name
+ * Returns:
+ * zero for success, one command not registered.
+ */
+int kdb_unregister(char *cmd)
+{
+ int i;
+ kdbtab_t *kp;
+
+ /*
+ * find the command.
+ */
+ for (i = 0, kp = kdb_commands; i < kdb_max_commands; i++, kp++) {
+ if (kp->cmd_name && (strcmp(kp->cmd_name, cmd) == 0)) {
+ kp->cmd_name = NULL;
+ return 0;
+ }
+ }
+
+ /* Couldn't find it. */
+ return 1;
+}
+
+/* Initialize the kdb command table. */
+static void __init kdb_inittab(void)
+{
+ int i;
+ kdbtab_t *kp;
+
+ for (i = 0, kp = kdb_commands; i < kdb_max_commands; i++, kp++)
+ kp->cmd_name = NULL;
+
+ kdb_register_repeat("md", kdb_md, "<vaddr>",
+ "Display Memory Contents, also mdWcN, e.g. md8c1", 1,
+ KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
+ "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
+ "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("mds", kdb_md, "<vaddr>",
+ "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
+ "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_repeat("go", kdb_go, "[<vaddr>]",
+ "Continue Execution", 1, KDB_REPEAT_NONE);
+ kdb_register_repeat("rd", kdb_rd, "",
+ "Display Registers", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
+ "Modify Registers", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("ef", kdb_ef, "<vaddr>",
+ "Display exception frame", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
+ "Stack traceback", 1, KDB_REPEAT_NONE);
+ kdb_register_repeat("btp", kdb_bt, "<pid>",
+ "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]",
+ "Display stack all processes", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("btc", kdb_bt, "",
+ "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("btt", kdb_bt, "<vaddr>",
+ "Backtrace process given its struct task address", 0,
+ KDB_REPEAT_NONE);
+ kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>",
+ "Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("env", kdb_env, "",
+ "Show environment variables", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("set", kdb_set, "",
+ "Set environment variables", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("help", kdb_help, "",
+ "Display Help Message", 1, KDB_REPEAT_NONE);
+ kdb_register_repeat("?", kdb_help, "",
+ "Display Help Message", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
+ "Switch to new cpu", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("kgdb", kdb_kgdb, "",
+ "Enter kgdb mode", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
+ "Display active task list", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("pid", kdb_pid, "<pidnum>",
+ "Switch to another task", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("reboot", kdb_reboot, "",
+ "Reboot the machine immediately", 0, KDB_REPEAT_NONE);
+#if defined(CONFIG_MODULES)
+ kdb_register_repeat("lsmod", kdb_lsmod, "",
+ "List loaded kernel modules", 0, KDB_REPEAT_NONE);
+#endif
+#if defined(CONFIG_MAGIC_SYSRQ)
+ kdb_register_repeat("sr", kdb_sr, "<key>",
+ "Magic SysRq key", 0, KDB_REPEAT_NONE);
+#endif
+#if defined(CONFIG_PRINTK)
+ kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
+ "Display syslog buffer", 0, KDB_REPEAT_NONE);
+#endif
+ kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
+ "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
+ "Send a signal to a process", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("summary", kdb_summary, "",
+ "Summarize the system", 4, KDB_REPEAT_NONE);
+ kdb_register_repeat("per_cpu", kdb_per_cpu, "",
+ "Display per_cpu variables", 3, KDB_REPEAT_NONE);
+ kdb_register_repeat("grephelp", kdb_grep_help, "",
+ "Display help on | grep", 0, KDB_REPEAT_NONE);
+}
+
+/* Execute any commands defined in kdb_cmds. */
+static void __init kdb_cmd_init(void)
+{
+ int i, diag;
+ for (i = 0; kdb_cmds[i]; ++i) {
+ diag = kdb_parse(kdb_cmds[i]);
+ if (diag)
+ kdb_printf("kdb command %s failed, kdb diag %d\n",
+ kdb_cmds[i], diag);
+ }
+ if (defcmd_in_progress) {
+ kdb_printf("Incomplete 'defcmd' set, forcing endefcmd\n");
+ kdb_parse("endefcmd");
+ }
+}
+
+/* Intialize kdb_printf, breakpoint tables and kdb state */
+void __init kdb_init(void)
+{
+ kdb_inittab(); /* Initialize Command Table */
+ kdb_initbptab(); /* Initialize Breakpoint Table */
+ kdb_cmd_init(); /* Preset commands from kdb_cmds */
+ kdb_initial_cpu = -1; /* Avoid recursion problems */
+#if defined(CONFIG_KDB_KEYBOARD) && defined(CONFIG_KGDB_SERIAL_CONSOLE)
+ early_kgdboc_init();
+#endif /* CONFIG_KDB_KEYBOARD && CONFIG_KGDB_SERIAL_CONSOLE */
+}
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
new file mode 100644
index 000000000000..2db38ffd649a
--- /dev/null
+++ b/kernel/debug/kdb/kdb_private.h
@@ -0,0 +1,399 @@
+#ifndef _KDBPRIVATE_H
+#define _KDBPRIVATE_H
+
+/*
+ * Kernel Debugger Architecture Independent Private Headers
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ */
+
+/*
+ * Kernel Debugger Error codes. Must not overlap with command codes.
+ */
+
+#include <linux/kgdb.h>
+#include "../debug_core.h"
+#define KDB_NOTFOUND (-1)
+#define KDB_ARGCOUNT (-2)
+#define KDB_BADWIDTH (-3)
+#define KDB_BADRADIX (-4)
+#define KDB_NOTENV (-5)
+#define KDB_NOENVVALUE (-6)
+#define KDB_NOTIMP (-7)
+#define KDB_ENVFULL (-8)
+#define KDB_ENVBUFFULL (-9)
+#define KDB_TOOMANYBPT (-10)
+#define KDB_TOOMANYDBREGS (-11)
+#define KDB_DUPBPT (-12)
+#define KDB_BPTNOTFOUND (-13)
+#define KDB_BADMODE (-14)
+#define KDB_BADINT (-15)
+#define KDB_INVADDRFMT (-16)
+#define KDB_BADREG (-17)
+#define KDB_BADCPUNUM (-18)
+#define KDB_BADLENGTH (-19)
+#define KDB_NOBP (-20)
+#define KDB_BADADDR (-21)
+
+ /*
+ * Kernel Debugger Command codes. Must not overlap with error codes.
+ */
+#define KDB_CMD_GO (-1001)
+#define KDB_CMD_CPU (-1002)
+#define KDB_CMD_SS (-1003)
+#define KDB_CMD_SSB (-1004)
+#define KDB_CMD_KGDB (-1005)
+#define KDB_CMD_KGDB2 (-1006)
+
+ /*
+ * Internal debug flags
+ */
+#define KDB_DEBUG_FLAG_BP 0x0002 /* Breakpoint subsystem debug */
+#define KDB_DEBUG_FLAG_BB_SUMM 0x0004 /* Basic block analysis, summary only */
+#define KDB_DEBUG_FLAG_AR 0x0008 /* Activation record, generic */
+#define KDB_DEBUG_FLAG_ARA 0x0010 /* Activation record, arch specific */
+#define KDB_DEBUG_FLAG_BB 0x0020 /* All basic block analysis */
+#define KDB_DEBUG_FLAG_STATE 0x0040 /* State flags */
+#define KDB_DEBUG_FLAG_MASK 0xffff /* All debug flags */
+#define KDB_DEBUG_FLAG_SHIFT 16 /* Shift factor for dbflags */
+
+#define KDB_DEBUG(flag) (kdb_flags & \
+ (KDB_DEBUG_FLAG_##flag << KDB_DEBUG_FLAG_SHIFT))
+#define KDB_DEBUG_STATE(text, value) if (KDB_DEBUG(STATE)) \
+ kdb_print_state(text, value)
+
+#if BITS_PER_LONG == 32
+
+#define KDB_PLATFORM_ENV "BYTESPERWORD=4"
+
+#define kdb_machreg_fmt "0x%lx"
+#define kdb_machreg_fmt0 "0x%08lx"
+#define kdb_bfd_vma_fmt "0x%lx"
+#define kdb_bfd_vma_fmt0 "0x%08lx"
+#define kdb_elfw_addr_fmt "0x%x"
+#define kdb_elfw_addr_fmt0 "0x%08x"
+#define kdb_f_count_fmt "%d"
+
+#elif BITS_PER_LONG == 64
+
+#define KDB_PLATFORM_ENV "BYTESPERWORD=8"
+
+#define kdb_machreg_fmt "0x%lx"
+#define kdb_machreg_fmt0 "0x%016lx"
+#define kdb_bfd_vma_fmt "0x%lx"
+#define kdb_bfd_vma_fmt0 "0x%016lx"
+#define kdb_elfw_addr_fmt "0x%x"
+#define kdb_elfw_addr_fmt0 "0x%016x"
+#define kdb_f_count_fmt "%ld"
+
+#endif
+
+/*
+ * KDB_MAXBPT describes the total number of breakpoints
+ * supported by this architecure.
+ */
+#define KDB_MAXBPT 16
+
+/* Maximum number of arguments to a function */
+#define KDB_MAXARGS 16
+
+typedef enum {
+ KDB_REPEAT_NONE = 0, /* Do not repeat this command */
+ KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */
+ KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */
+} kdb_repeat_t;
+
+typedef int (*kdb_func_t)(int, const char **);
+
+ /*
+ * Symbol table format returned by kallsyms.
+ */
+
+typedef struct __ksymtab {
+ unsigned long value; /* Address of symbol */
+ const char *mod_name; /* Module containing symbol or
+ * "kernel" */
+ unsigned long mod_start;
+ unsigned long mod_end;
+ const char *sec_name; /* Section containing symbol */
+ unsigned long sec_start;
+ unsigned long sec_end;
+ const char *sym_name; /* Full symbol name, including
+ * any version */
+ unsigned long sym_start;
+ unsigned long sym_end;
+ } kdb_symtab_t;
+extern int kallsyms_symbol_next(char *prefix_name, int flag);
+extern int kallsyms_symbol_complete(char *prefix_name, int max_len);
+
+ /*
+ * Exported Symbols for kernel loadable modules to use.
+ */
+extern int kdb_register(char *, kdb_func_t, char *, char *, short);
+extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
+ short, kdb_repeat_t);
+extern int kdb_unregister(char *);
+
+extern int kdb_getarea_size(void *, unsigned long, size_t);
+extern int kdb_putarea_size(unsigned long, void *, size_t);
+
+/* Like get_user and put_user, kdb_getarea and kdb_putarea take variable
+ * names, not pointers. The underlying *_size functions take pointers.
+ */
+#define kdb_getarea(x, addr) kdb_getarea_size(&(x), addr, sizeof((x)))
+#define kdb_putarea(addr, x) kdb_putarea_size(addr, &(x), sizeof((x)))
+
+extern int kdb_getphysword(unsigned long *word,
+ unsigned long addr, size_t size);
+extern int kdb_getword(unsigned long *, unsigned long, size_t);
+extern int kdb_putword(unsigned long, unsigned long, size_t);
+
+extern int kdbgetularg(const char *, unsigned long *);
+extern int kdb_set(int, const char **);
+extern char *kdbgetenv(const char *);
+extern int kdbgetintenv(const char *, int *);
+extern int kdbgetaddrarg(int, const char **, int*, unsigned long *,
+ long *, char **);
+extern int kdbgetsymval(const char *, kdb_symtab_t *);
+extern int kdbnearsym(unsigned long, kdb_symtab_t *);
+extern void kdbnearsym_cleanup(void);
+extern char *kdb_strdup(const char *str, gfp_t type);
+extern void kdb_symbol_print(unsigned long, const kdb_symtab_t *, unsigned int);
+
+ /*
+ * Do we have a set of registers?
+ */
+
+#define KDB_NULL_REGS(regs) \
+ (regs == (struct pt_regs *)NULL ? \
+ kdb_printf("%s: null regs - should never happen\n", __func__), \
+ 1 : 0)
+
+ /*
+ * Routine for debugging the debugger state.
+ */
+
+extern void kdb_print_state(const char *, int);
+
+ /*
+ * Per cpu kdb state. A cpu can be under kdb control but outside kdb,
+ * for example when doing single step.
+ */
+extern int kdb_state[]; /* [NR_CPUS] */
+#define KDB_STATE_KDB 0x00000001 /* Cpu is inside kdb */
+#define KDB_STATE_LEAVING 0x00000002 /* Cpu is leaving kdb */
+#define KDB_STATE_CMD 0x00000004 /* Running a kdb command */
+#define KDB_STATE_KDB_CONTROL 0x00000008 /* This cpu is under
+ * kdb control */
+#define KDB_STATE_HOLD_CPU 0x00000010 /* Hold this cpu inside kdb */
+#define KDB_STATE_DOING_SS 0x00000020 /* Doing ss command */
+#define KDB_STATE_DOING_SSB 0x00000040 /* Doing ssb command,
+ * DOING_SS is also set */
+#define KDB_STATE_SSBPT 0x00000080 /* Install breakpoint
+ * after one ss, independent of
+ * DOING_SS */
+#define KDB_STATE_REENTRY 0x00000100 /* Valid re-entry into kdb */
+#define KDB_STATE_SUPPRESS 0x00000200 /* Suppress error messages */
+#define KDB_STATE_PAGER 0x00000400 /* pager is available */
+#define KDB_STATE_GO_SWITCH 0x00000800 /* go is switching
+ * back to initial cpu */
+#define KDB_STATE_PRINTF_LOCK 0x00001000 /* Holds kdb_printf lock */
+#define KDB_STATE_WAIT_IPI 0x00002000 /* Waiting for kdb_ipi() NMI */
+#define KDB_STATE_RECURSE 0x00004000 /* Recursive entry to kdb */
+#define KDB_STATE_IP_ADJUSTED 0x00008000 /* Restart IP has been
+ * adjusted */
+#define KDB_STATE_GO1 0x00010000 /* go only releases one cpu */
+#define KDB_STATE_KEYBOARD 0x00020000 /* kdb entered via
+ * keyboard on this cpu */
+#define KDB_STATE_KEXEC 0x00040000 /* kexec issued */
+#define KDB_STATE_DOING_KGDB 0x00080000 /* kgdb enter now issued */
+#define KDB_STATE_DOING_KGDB2 0x00100000 /* kgdb enter now issued */
+#define KDB_STATE_KGDB_TRANS 0x00200000 /* Transition to kgdb */
+#define KDB_STATE_ARCH 0xff000000 /* Reserved for arch
+ * specific use */
+
+#define KDB_STATE_CPU(flag, cpu) (kdb_state[cpu] & KDB_STATE_##flag)
+#define KDB_STATE_SET_CPU(flag, cpu) \
+ ((void)(kdb_state[cpu] |= KDB_STATE_##flag))
+#define KDB_STATE_CLEAR_CPU(flag, cpu) \
+ ((void)(kdb_state[cpu] &= ~KDB_STATE_##flag))
+
+#define KDB_STATE(flag) KDB_STATE_CPU(flag, raw_smp_processor_id())
+#define KDB_STATE_SET(flag) KDB_STATE_SET_CPU(flag, raw_smp_processor_id())
+#define KDB_STATE_CLEAR(flag) KDB_STATE_CLEAR_CPU(flag, raw_smp_processor_id())
+
+ /*
+ * kdb_nextline
+ *
+ * Contains the current line number on the screen. Used
+ * to handle the built-in pager (LINES env variable)
+ */
+extern int kdb_nextline;
+
+ /*
+ * Breakpoint state
+ *
+ * Each active and inactive breakpoint is represented by
+ * an instance of the following data structure.
+ */
+
+typedef struct _kdb_bp {
+ unsigned long bp_addr; /* Address breakpoint is present at */
+ unsigned int bp_free:1; /* This entry is available */
+ unsigned int bp_enabled:1; /* Breakpoint is active in register */
+ unsigned int bp_type:4; /* Uses hardware register */
+ unsigned int bp_installed:1; /* Breakpoint is installed */
+ unsigned int bp_delay:1; /* Do delayed bp handling */
+ unsigned int bp_delayed:1; /* Delayed breakpoint */
+ unsigned int bph_length; /* HW break length */
+} kdb_bp_t;
+
+#ifdef CONFIG_KGDB_KDB
+ /*
+ * Breakpoint handling subsystem global variables
+ */
+extern kdb_bp_t kdb_breakpoints[/* KDB_MAXBPT */];
+
+ /*
+ * KDB Command Table
+ */
+
+typedef struct _kdbtab {
+ char *cmd_name; /* Command name */
+ kdb_func_t cmd_func; /* Function to execute command */
+ char *cmd_usage; /* Usage String for this command */
+ char *cmd_help; /* Help message for this command */
+ short cmd_flags; /* Parsing flags */
+ short cmd_minlen; /* Minimum legal # command
+ * chars required */
+ kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */
+} kdbtab_t;
+
+ /*
+ * External command function declarations
+ */
+extern int kdb_bt(int, const char **);
+
+ /*
+ * External utility function declarations
+ */
+extern char *kdb_getstr(char *, size_t, char *);
+
+ /*
+ * Breakpoint handling - External interfaces
+ */
+extern void kdb_initbptab(void);
+extern void kdb_bp_install(struct pt_regs *);
+extern void kdb_bp_remove(void);
+
+typedef enum {
+ KDB_DB_BPT, /* Breakpoint */
+ KDB_DB_SS, /* Single-step trap */
+ KDB_DB_SSB, /* Single step to branch */
+ KDB_DB_SSBPT, /* Single step over breakpoint */
+ KDB_DB_NOBPT /* Spurious breakpoint */
+} kdb_dbtrap_t;
+
+extern int kdb_main_loop(kdb_reason_t, kdb_reason_t,
+ int, kdb_dbtrap_t, struct pt_regs *);
+
+ /*
+ * Miscellaneous functions and data areas
+ */
+extern int kdb_grepping_flag;
+extern char kdb_grep_string[];
+extern int kdb_grep_leading;
+extern int kdb_grep_trailing;
+extern char *kdb_cmds[];
+extern void kdb_syslog_data(char *syslog_data[]);
+extern unsigned long kdb_task_state_string(const char *);
+extern char kdb_task_state_char (const struct task_struct *);
+extern unsigned long kdb_task_state(const struct task_struct *p,
+ unsigned long mask);
+extern void kdb_ps_suppressed(void);
+extern void kdb_ps1(const struct task_struct *p);
+extern void kdb_print_nameval(const char *name, unsigned long val);
+extern void kdb_send_sig_info(struct task_struct *p,
+ struct siginfo *info, int seqno);
+extern void kdb_meminfo_proc_show(void);
+extern const char *kdb_walk_kallsyms(loff_t *pos);
+
+ /*
+ * Defines for kdb_symbol_print.
+ */
+#define KDB_SP_SPACEB 0x0001 /* Space before string */
+#define KDB_SP_SPACEA 0x0002 /* Space after string */
+#define KDB_SP_PAREN 0x0004 /* Parenthesis around string */
+#define KDB_SP_VALUE 0x0008 /* Print the value of the address */
+#define KDB_SP_SYMSIZE 0x0010 /* Print the size of the symbol */
+#define KDB_SP_NEWLINE 0x0020 /* Newline after string */
+#define KDB_SP_DEFAULT (KDB_SP_VALUE|KDB_SP_PAREN)
+
+/* Save data about running processes */
+
+struct kdb_running_process {
+ struct task_struct *p;
+ struct pt_regs *regs;
+ int seqno; /* kdb sequence number */
+ int irq_depth; /* irq count */
+ /*struct kdba_running_process arch; */ /* arch dependent save data */
+};
+
+extern struct kdb_running_process kdb_running_process[/* NR_CPUS */];
+
+extern void kdb_save_running_cpu(struct pt_regs *, struct task_struct *, int);
+extern int kdb_save_running(struct pt_regs *, kdb_reason_t,
+ kdb_reason_t, int, kdb_dbtrap_t);
+extern struct task_struct *kdb_curr_task(int);
+
+/* Incremented each time the main kdb loop is entered on the initial cpu,
+ * it gives some indication of how old the saved data is.
+ */
+extern int kdb_seqno;
+
+#define kdb_task_has_cpu(p) (task_curr(p))
+
+/* Simplify coexistence with NPTL */
+#define kdb_do_each_thread(g, p) do_each_thread(g, p)
+#define kdb_while_each_thread(g, p) while_each_thread(g, p)
+
+#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
+
+extern void *debug_kmalloc(size_t size, gfp_t flags);
+extern void debug_kfree(void *);
+extern void debug_kusage(void);
+
+extern void kdb_set_current_task(struct task_struct *);
+extern struct task_struct *kdb_current_task;
+extern struct pt_regs *kdb_current_regs;
+#ifdef CONFIG_MODULES
+extern struct list_head *kdb_modules;
+#endif /* CONFIG_MODULES */
+
+#ifndef KDB_RUNNING_PROCESS_ORIGINAL
+#define KDB_RUNNING_PROCESS_ORIGINAL kdb_running_process
+#endif
+
+extern char kdb_prompt_str[];
+extern struct seq_file kdb_seq_file;
+extern void kdb_seq_file_reset(void);
+extern void kdb_seq_file_print(void);
+/* Access to un-exported kernel internals */
+#ifdef CONFIG_PROC_FS
+extern int _meminfo_proc_show(struct seq_file *m, void *v, int lock);
+#else
+static inline int _meminfo_proc_show(struct seq_file *m, void *v, int lock)
+{
+ return 0;
+}
+#endif
+
+#define KDB_WORD_SIZE ((int)sizeof(unsigned long))
+
+#endif /* CONFIG_KGDB_KDB */
+#endif /* !_KDBPRIVATE_H */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
new file mode 100644
index 000000000000..132438706db6
--- /dev/null
+++ b/kernel/debug/kdb/kdb_support.c
@@ -0,0 +1,1007 @@
+/*
+ * Kernel Debugger Architecture Independent Support Functions
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1999-2004 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ * 03/02/13 added new 2.5 kallsyms <xavier.bru@bull.net>
+ */
+
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kallsyms.h>
+#include <linux/stddef.h>
+#include <linux/vmalloc.h>
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/hardirq.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/kdb.h>
+#include "kdb_private.h"
+
+#define KDB_SEQ_BUF_SIZE 2048
+static char kdb_seq_buf[KDB_SEQ_BUF_SIZE];
+
+static void *kdb_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ (*pos)++;
+ if (*pos >= KDB_SEQ_BUF_SIZE)
+ return NULL;
+ return pos;
+}
+
+static void *kdb_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return (*pos < KDB_SEQ_BUF_SIZE) ? pos : NULL;
+}
+
+static void kdb_seq_stop(struct seq_file *seq, void *v)
+{
+ seq->count = 0;
+}
+
+static int kdb_seq_show(struct seq_file *seq, void *v)
+{
+ return 0;
+}
+
+static struct seq_operations kdb_seq_ops = {
+ .start = kdb_seq_start,
+ .next = kdb_seq_next,
+ .stop = kdb_seq_stop,
+ .show = kdb_seq_show,
+};
+
+struct seq_file kdb_seq_file = {
+ .buf = kdb_seq_buf,
+ .size = KDB_SEQ_BUF_SIZE,
+ .op = &kdb_seq_ops
+};
+
+void kdb_seq_file_reset(void)
+{
+ kdb_seq_file.count = 0;
+}
+
+void kdb_seq_file_print(void)
+{
+ char *ptr = kdb_seq_file.buf;
+ char *tmp;
+
+ if (kdb_seq_file.count < kdb_seq_file.size)
+ kdb_seq_file.buf[kdb_seq_file.count] = '\0';
+ else
+ kdb_seq_file.buf[kdb_seq_file.size - 1] = '\0';
+ while (ptr < &kdb_seq_file.buf[kdb_seq_file.count]) {
+ if (KDB_FLAG(CMD_INTERRUPT))
+ return;
+ tmp = strchr(ptr, '\n');
+ if (tmp)
+ *tmp = '\0';
+ kdb_printf("%s\n", ptr);
+ ptr += strlen(ptr) + 1;
+ }
+}
+
+/*
+ * kdbgetsymval - Return the address of the given symbol.
+ *
+ * Parameters:
+ * symname Character string containing symbol name
+ * symtab Structure to receive results
+ * Returns:
+ * 0 Symbol not found, symtab zero filled
+ * 1 Symbol mapped to module/symbol/section, data in symtab
+ */
+int kdbgetsymval(const char *symname, kdb_symtab_t *symtab)
+{
+ if (KDB_DEBUG(AR))
+ kdb_printf("kdbgetsymval: symname=%s, symtab=%p\n", symname,
+ symtab);
+ memset(symtab, 0, sizeof(*symtab));
+ symtab->sym_start = kallsyms_lookup_name(symname);
+ if (symtab->sym_start) {
+ if (KDB_DEBUG(AR))
+ kdb_printf("kdbgetsymval: returns 1, "
+ "symtab->sym_start=0x%lx\n",
+ symtab->sym_start);
+ return 1;
+ }
+ if (KDB_DEBUG(AR))
+ kdb_printf("kdbgetsymval: returns 0\n");
+ return 0;
+}
+EXPORT_SYMBOL(kdbgetsymval);
+
+static char *kdb_name_table[100]; /* arbitrary size */
+
+/*
+ * kdbnearsym - Return the name of the symbol with the nearest address
+ * less than 'addr'.
+ *
+ * Parameters:
+ * addr Address to check for symbol near
+ * symtab Structure to receive results
+ * Returns:
+ * 0 No sections contain this address, symtab zero filled
+ * 1 Address mapped to module/symbol/section, data in symtab
+ * Remarks:
+ * 2.6 kallsyms has a "feature" where it unpacks the name into a
+ * string. If that string is reused before the caller expects it
+ * then the caller sees its string change without warning. To
+ * avoid cluttering up the main kdb code with lots of kdb_strdup,
+ * tests and kfree calls, kdbnearsym maintains an LRU list of the
+ * last few unique strings. The list is sized large enough to
+ * hold active strings, no kdb caller of kdbnearsym makes more
+ * than ~20 later calls before using a saved value.
+ */
+int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
+{
+ int ret = 0;
+ unsigned long symbolsize;
+ unsigned long offset;
+#define knt1_size 128 /* must be >= kallsyms table size */
+ char *knt1 = NULL;
+
+ if (KDB_DEBUG(AR))
+ kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab);
+ memset(symtab, 0, sizeof(*symtab));
+
+ if (addr < 4096)
+ goto out;
+ knt1 = debug_kmalloc(knt1_size, GFP_ATOMIC);
+ if (!knt1) {
+ kdb_printf("kdbnearsym: addr=0x%lx cannot kmalloc knt1\n",
+ addr);
+ goto out;
+ }
+ symtab->sym_name = kallsyms_lookup(addr, &symbolsize , &offset,
+ (char **)(&symtab->mod_name), knt1);
+ if (offset > 8*1024*1024) {
+ symtab->sym_name = NULL;
+ addr = offset = symbolsize = 0;
+ }
+ symtab->sym_start = addr - offset;
+ symtab->sym_end = symtab->sym_start + symbolsize;
+ ret = symtab->sym_name != NULL && *(symtab->sym_name) != '\0';
+
+ if (ret) {
+ int i;
+ /* Another 2.6 kallsyms "feature". Sometimes the sym_name is
+ * set but the buffer passed into kallsyms_lookup is not used,
+ * so it contains garbage. The caller has to work out which
+ * buffer needs to be saved.
+ *
+ * What was Rusty smoking when he wrote that code?
+ */
+ if (symtab->sym_name != knt1) {
+ strncpy(knt1, symtab->sym_name, knt1_size);
+ knt1[knt1_size-1] = '\0';
+ }
+ for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
+ if (kdb_name_table[i] &&
+ strcmp(kdb_name_table[i], knt1) == 0)
+ break;
+ }
+ if (i >= ARRAY_SIZE(kdb_name_table)) {
+ debug_kfree(kdb_name_table[0]);
+ memcpy(kdb_name_table, kdb_name_table+1,
+ sizeof(kdb_name_table[0]) *
+ (ARRAY_SIZE(kdb_name_table)-1));
+ } else {
+ debug_kfree(knt1);
+ knt1 = kdb_name_table[i];
+ memcpy(kdb_name_table+i, kdb_name_table+i+1,
+ sizeof(kdb_name_table[0]) *
+ (ARRAY_SIZE(kdb_name_table)-i-1));
+ }
+ i = ARRAY_SIZE(kdb_name_table) - 1;
+ kdb_name_table[i] = knt1;
+ symtab->sym_name = kdb_name_table[i];
+ knt1 = NULL;
+ }
+
+ if (symtab->mod_name == NULL)
+ symtab->mod_name = "kernel";
+ if (KDB_DEBUG(AR))
+ kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, "
+ "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret,
+ symtab->sym_start, symtab->mod_name, symtab->sym_name,
+ symtab->sym_name);
+
+out:
+ debug_kfree(knt1);
+ return ret;
+}
+
+void kdbnearsym_cleanup(void)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) {
+ if (kdb_name_table[i]) {
+ debug_kfree(kdb_name_table[i]);
+ kdb_name_table[i] = NULL;
+ }
+ }
+}
+
+static char ks_namebuf[KSYM_NAME_LEN+1], ks_namebuf_prev[KSYM_NAME_LEN+1];
+
+/*
+ * kallsyms_symbol_complete
+ *
+ * Parameters:
+ * prefix_name prefix of a symbol name to lookup
+ * max_len maximum length that can be returned
+ * Returns:
+ * Number of symbols which match the given prefix.
+ * Notes:
+ * prefix_name is changed to contain the longest unique prefix that
+ * starts with this prefix (tab completion).
+ */
+int kallsyms_symbol_complete(char *prefix_name, int max_len)
+{
+ loff_t pos = 0;
+ int prefix_len = strlen(prefix_name), prev_len = 0;
+ int i, number = 0;
+ const char *name;
+
+ while ((name = kdb_walk_kallsyms(&pos))) {
+ if (strncmp(name, prefix_name, prefix_len) == 0) {
+ strcpy(ks_namebuf, name);
+ /* Work out the longest name that matches the prefix */
+ if (++number == 1) {
+ prev_len = min_t(int, max_len-1,
+ strlen(ks_namebuf));
+ memcpy(ks_namebuf_prev, ks_namebuf, prev_len);
+ ks_namebuf_prev[prev_len] = '\0';
+ continue;
+ }
+ for (i = 0; i < prev_len; i++) {
+ if (ks_namebuf[i] != ks_namebuf_prev[i]) {
+ prev_len = i;
+ ks_namebuf_prev[i] = '\0';
+ break;
+ }
+ }
+ }
+ }
+ if (prev_len > prefix_len)
+ memcpy(prefix_name, ks_namebuf_prev, prev_len+1);
+ return number;
+}
+
+/*
+ * kallsyms_symbol_next
+ *
+ * Parameters:
+ * prefix_name prefix of a symbol name to lookup
+ * flag 0 means search from the head, 1 means continue search.
+ * Returns:
+ * 1 if a symbol matches the given prefix.
+ * 0 if no string found
+ */
+int kallsyms_symbol_next(char *prefix_name, int flag)
+{
+ int prefix_len = strlen(prefix_name);
+ static loff_t pos;
+ const char *name;
+
+ if (!flag)
+ pos = 0;
+
+ while ((name = kdb_walk_kallsyms(&pos))) {
+ if (strncmp(name, prefix_name, prefix_len) == 0) {
+ strncpy(prefix_name, name, strlen(name)+1);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * kdb_symbol_print - Standard method for printing a symbol name and offset.
+ * Inputs:
+ * addr Address to be printed.
+ * symtab Address of symbol data, if NULL this routine does its
+ * own lookup.
+ * punc Punctuation for string, bit field.
+ * Remarks:
+ * The string and its punctuation is only printed if the address
+ * is inside the kernel, except that the value is always printed
+ * when requested.
+ */
+void kdb_symbol_print(unsigned long addr, const kdb_symtab_t *symtab_p,
+ unsigned int punc)
+{
+ kdb_symtab_t symtab, *symtab_p2;
+ if (symtab_p) {
+ symtab_p2 = (kdb_symtab_t *)symtab_p;
+ } else {
+ symtab_p2 = &symtab;
+ kdbnearsym(addr, symtab_p2);
+ }
+ if (!(symtab_p2->sym_name || (punc & KDB_SP_VALUE)))
+ return;
+ if (punc & KDB_SP_SPACEB)
+ kdb_printf(" ");
+ if (punc & KDB_SP_VALUE)
+ kdb_printf(kdb_machreg_fmt0, addr);
+ if (symtab_p2->sym_name) {
+ if (punc & KDB_SP_VALUE)
+ kdb_printf(" ");
+ if (punc & KDB_SP_PAREN)
+ kdb_printf("(");
+ if (strcmp(symtab_p2->mod_name, "kernel"))
+ kdb_printf("[%s]", symtab_p2->mod_name);
+ kdb_printf("%s", symtab_p2->sym_name);
+ if (addr != symtab_p2->sym_start)
+ kdb_printf("+0x%lx", addr - symtab_p2->sym_start);
+ if (punc & KDB_SP_SYMSIZE)
+ kdb_printf("/0x%lx",
+ symtab_p2->sym_end - symtab_p2->sym_start);
+ if (punc & KDB_SP_PAREN)
+ kdb_printf(")");
+ }
+ if (punc & KDB_SP_SPACEA)
+ kdb_printf(" ");
+ if (punc & KDB_SP_NEWLINE)
+ kdb_printf("\n");
+}
+
+/*
+ * kdb_strdup - kdb equivalent of strdup, for disasm code.
+ * Inputs:
+ * str The string to duplicate.
+ * type Flags to kmalloc for the new string.
+ * Returns:
+ * Address of the new string, NULL if storage could not be allocated.
+ * Remarks:
+ * This is not in lib/string.c because it uses kmalloc which is not
+ * available when string.o is used in boot loaders.
+ */
+char *kdb_strdup(const char *str, gfp_t type)
+{
+ int n = strlen(str)+1;
+ char *s = kmalloc(n, type);
+ if (!s)
+ return NULL;
+ return strcpy(s, str);
+}
+
+/*
+ * kdb_getarea_size - Read an area of data. The kdb equivalent of
+ * copy_from_user, with kdb messages for invalid addresses.
+ * Inputs:
+ * res Pointer to the area to receive the result.
+ * addr Address of the area to copy.
+ * size Size of the area.
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+int kdb_getarea_size(void *res, unsigned long addr, size_t size)
+{
+ int ret = probe_kernel_read((char *)res, (char *)addr, size);
+ if (ret) {
+ if (!KDB_STATE(SUPPRESS)) {
+ kdb_printf("kdb_getarea: Bad address 0x%lx\n", addr);
+ KDB_STATE_SET(SUPPRESS);
+ }
+ ret = KDB_BADADDR;
+ } else {
+ KDB_STATE_CLEAR(SUPPRESS);
+ }
+ return ret;
+}
+
+/*
+ * kdb_putarea_size - Write an area of data. The kdb equivalent of
+ * copy_to_user, with kdb messages for invalid addresses.
+ * Inputs:
+ * addr Address of the area to write to.
+ * res Pointer to the area holding the data.
+ * size Size of the area.
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+int kdb_putarea_size(unsigned long addr, void *res, size_t size)
+{
+ int ret = probe_kernel_read((char *)addr, (char *)res, size);
+ if (ret) {
+ if (!KDB_STATE(SUPPRESS)) {
+ kdb_printf("kdb_putarea: Bad address 0x%lx\n", addr);
+ KDB_STATE_SET(SUPPRESS);
+ }
+ ret = KDB_BADADDR;
+ } else {
+ KDB_STATE_CLEAR(SUPPRESS);
+ }
+ return ret;
+}
+
+/*
+ * kdb_getphys - Read data from a physical address. Validate the
+ * address is in range, use kmap_atomic() to get data
+ * similar to kdb_getarea() - but for phys addresses
+ * Inputs:
+ * res Pointer to the word to receive the result
+ * addr Physical address of the area to copy
+ * size Size of the area
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+static int kdb_getphys(void *res, unsigned long addr, size_t size)
+{
+ unsigned long pfn;
+ void *vaddr;
+ struct page *page;
+
+ pfn = (addr >> PAGE_SHIFT);
+ if (!pfn_valid(pfn))
+ return 1;
+ page = pfn_to_page(pfn);
+ vaddr = kmap_atomic(page, KM_KDB);
+ memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size);
+ kunmap_atomic(vaddr, KM_KDB);
+
+ return 0;
+}
+
+/*
+ * kdb_getphysword
+ * Inputs:
+ * word Pointer to the word to receive the result.
+ * addr Address of the area to copy.
+ * size Size of the area.
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+int kdb_getphysword(unsigned long *word, unsigned long addr, size_t size)
+{
+ int diag;
+ __u8 w1;
+ __u16 w2;
+ __u32 w4;
+ __u64 w8;
+ *word = 0; /* Default value if addr or size is invalid */
+
+ switch (size) {
+ case 1:
+ diag = kdb_getphys(&w1, addr, sizeof(w1));
+ if (!diag)
+ *word = w1;
+ break;
+ case 2:
+ diag = kdb_getphys(&w2, addr, sizeof(w2));
+ if (!diag)
+ *word = w2;
+ break;
+ case 4:
+ diag = kdb_getphys(&w4, addr, sizeof(w4));
+ if (!diag)
+ *word = w4;
+ break;
+ case 8:
+ if (size <= sizeof(*word)) {
+ diag = kdb_getphys(&w8, addr, sizeof(w8));
+ if (!diag)
+ *word = w8;
+ break;
+ }
+ /* drop through */
+ default:
+ diag = KDB_BADWIDTH;
+ kdb_printf("kdb_getphysword: bad width %ld\n", (long) size);
+ }
+ return diag;
+}
+
+/*
+ * kdb_getword - Read a binary value. Unlike kdb_getarea, this treats
+ * data as numbers.
+ * Inputs:
+ * word Pointer to the word to receive the result.
+ * addr Address of the area to copy.
+ * size Size of the area.
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+int kdb_getword(unsigned long *word, unsigned long addr, size_t size)
+{
+ int diag;
+ __u8 w1;
+ __u16 w2;
+ __u32 w4;
+ __u64 w8;
+ *word = 0; /* Default value if addr or size is invalid */
+ switch (size) {
+ case 1:
+ diag = kdb_getarea(w1, addr);
+ if (!diag)
+ *word = w1;
+ break;
+ case 2:
+ diag = kdb_getarea(w2, addr);
+ if (!diag)
+ *word = w2;
+ break;
+ case 4:
+ diag = kdb_getarea(w4, addr);
+ if (!diag)
+ *word = w4;
+ break;
+ case 8:
+ if (size <= sizeof(*word)) {
+ diag = kdb_getarea(w8, addr);
+ if (!diag)
+ *word = w8;
+ break;
+ }
+ /* drop through */
+ default:
+ diag = KDB_BADWIDTH;
+ kdb_printf("kdb_getword: bad width %ld\n", (long) size);
+ }
+ return diag;
+}
+
+/*
+ * kdb_putword - Write a binary value. Unlike kdb_putarea, this
+ * treats data as numbers.
+ * Inputs:
+ * addr Address of the area to write to..
+ * word The value to set.
+ * size Size of the area.
+ * Returns:
+ * 0 for success, < 0 for error.
+ */
+int kdb_putword(unsigned long addr, unsigned long word, size_t size)
+{
+ int diag;
+ __u8 w1;
+ __u16 w2;
+ __u32 w4;
+ __u64 w8;
+ switch (size) {
+ case 1:
+ w1 = word;
+ diag = kdb_putarea(addr, w1);
+ break;
+ case 2:
+ w2 = word;
+ diag = kdb_putarea(addr, w2);
+ break;
+ case 4:
+ w4 = word;
+ diag = kdb_putarea(addr, w4);
+ break;
+ case 8:
+ if (size <= sizeof(word)) {
+ w8 = word;
+ diag = kdb_putarea(addr, w8);
+ break;
+ }
+ /* drop through */
+ default:
+ diag = KDB_BADWIDTH;
+ kdb_printf("kdb_putword: bad width %ld\n", (long) size);
+ }
+ return diag;
+}
+
+/*
+ * kdb_task_state_string - Convert a string containing any of the
+ * letters DRSTCZEUIMA to a mask for the process state field and
+ * return the value. If no argument is supplied, return the mask
+ * that corresponds to environment variable PS, DRSTCZEU by
+ * default.
+ * Inputs:
+ * s String to convert
+ * Returns:
+ * Mask for process state.
+ * Notes:
+ * The mask folds data from several sources into a single long value, so
+ * be carefull not to overlap the bits. TASK_* bits are in the LSB,
+ * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there
+ * is no overlap between TASK_* and EXIT_* but that may not always be
+ * true, so EXIT_* bits are shifted left 16 bits before being stored in
+ * the mask.
+ */
+
+/* unrunnable is < 0 */
+#define UNRUNNABLE (1UL << (8*sizeof(unsigned long) - 1))
+#define RUNNING (1UL << (8*sizeof(unsigned long) - 2))
+#define IDLE (1UL << (8*sizeof(unsigned long) - 3))
+#define DAEMON (1UL << (8*sizeof(unsigned long) - 4))
+
+unsigned long kdb_task_state_string(const char *s)
+{
+ long res = 0;
+ if (!s) {
+ s = kdbgetenv("PS");
+ if (!s)
+ s = "DRSTCZEU"; /* default value for ps */
+ }
+ while (*s) {
+ switch (*s) {
+ case 'D':
+ res |= TASK_UNINTERRUPTIBLE;
+ break;
+ case 'R':
+ res |= RUNNING;
+ break;
+ case 'S':
+ res |= TASK_INTERRUPTIBLE;
+ break;
+ case 'T':
+ res |= TASK_STOPPED;
+ break;
+ case 'C':
+ res |= TASK_TRACED;
+ break;
+ case 'Z':
+ res |= EXIT_ZOMBIE << 16;
+ break;
+ case 'E':
+ res |= EXIT_DEAD << 16;
+ break;
+ case 'U':
+ res |= UNRUNNABLE;
+ break;
+ case 'I':
+ res |= IDLE;
+ break;
+ case 'M':
+ res |= DAEMON;
+ break;
+ case 'A':
+ res = ~0UL;
+ break;
+ default:
+ kdb_printf("%s: unknown flag '%c' ignored\n",
+ __func__, *s);
+ break;
+ }
+ ++s;
+ }
+ return res;
+}
+
+/*
+ * kdb_task_state_char - Return the character that represents the task state.
+ * Inputs:
+ * p struct task for the process
+ * Returns:
+ * One character to represent the task state.
+ */
+char kdb_task_state_char (const struct task_struct *p)
+{
+ int cpu = kdb_process_cpu(p);
+ struct kdb_running_process *krp = kdb_running_process + cpu;
+ char state = (p->state == 0) ? 'R' :
+ (p->state < 0) ? 'U' :
+ (p->state & TASK_UNINTERRUPTIBLE) ? 'D' :
+ (p->state & TASK_STOPPED) ? 'T' :
+ (p->state & TASK_TRACED) ? 'C' :
+ (p->exit_state & EXIT_ZOMBIE) ? 'Z' :
+ (p->exit_state & EXIT_DEAD) ? 'E' :
+ (p->state & TASK_INTERRUPTIBLE) ? 'S' : '?';
+ if (p->pid == 0) {
+ /* Idle task. Is it really idle, apart from the kdb
+ * interrupt? */
+ if (!kdb_task_has_cpu(p) || krp->irq_depth == 1) {
+ /* There is a corner case when the idle task takes an
+ * interrupt and dies in the interrupt code. It has an
+ * interrupt count of 1 but that did not come from kdb.
+ * This corner case can only occur on the initial cpu,
+ * all the others were entered via the kdb IPI.
+ */
+ if (cpu != kdb_initial_cpu ||
+ KDB_STATE_CPU(KEYBOARD, cpu))
+ state = 'I'; /* idle task */
+ }
+ } else if (!p->mm && state == 'S') {
+ state = 'M'; /* sleeping system daemon */
+ }
+ return state;
+}
+
+/*
+ * kdb_task_state - Return true if a process has the desired state
+ * given by the mask.
+ * Inputs:
+ * p struct task for the process
+ * mask mask from kdb_task_state_string to select processes
+ * Returns:
+ * True if the process matches at least one criteria defined by the mask.
+ */
+unsigned long kdb_task_state(const struct task_struct *p, unsigned long mask)
+{
+ char state[] = { kdb_task_state_char(p), '\0' };
+ return (mask & kdb_task_state_string(state)) != 0;
+}
+
+struct kdb_running_process kdb_running_process[NR_CPUS];
+
+/* Save the state of a running process and invoke kdb_main_loop. This is
+ * invoked on the current process on each cpu (assuming the cpu is responding).
+ */
+void
+kdb_save_running_cpu(struct pt_regs *regs, struct task_struct *task, int cpu)
+{
+ struct kdb_running_process *krp = kdb_running_process + cpu;
+ krp->p = task;
+ krp->regs = regs;
+ krp->seqno = kdb_seqno;
+ krp->irq_depth = kgdb_info[cpu].irq_depth;
+}
+
+/*
+ * kdb_print_nameval - Print a name and its value, converting the
+ * value to a symbol lookup if possible.
+ * Inputs:
+ * name field name to print
+ * val value of field
+ */
+void kdb_print_nameval(const char *name, unsigned long val)
+{
+ kdb_symtab_t symtab;
+ kdb_printf(" %-11.11s ", name);
+ if (kdbnearsym(val, &symtab))
+ kdb_symbol_print(val, &symtab,
+ KDB_SP_VALUE|KDB_SP_SYMSIZE|KDB_SP_NEWLINE);
+ else
+ kdb_printf("0x%lx\n", val);
+}
+
+/* Last ditch allocator for debugging, so we can still debug even when
+ * the GFP_ATOMIC pool has been exhausted. The algorithms are tuned
+ * for space usage, not for speed. One smallish memory pool, the free
+ * chain is always in ascending address order to allow coalescing,
+ * allocations are done in brute force best fit.
+ */
+
+struct debug_alloc_header {
+ u32 next; /* offset of next header from start of pool */
+ u32 size;
+ void *caller;
+};
+
+/* The memory returned by this allocator must be aligned, which means
+ * so must the header size. Do not assume that sizeof(struct
+ * debug_alloc_header) is a multiple of the alignment, explicitly
+ * calculate the overhead of this header, including the alignment.
+ * The rest of this code must not use sizeof() on any header or
+ * pointer to a header.
+ */
+#define dah_align 8
+#define dah_overhead ALIGN(sizeof(struct debug_alloc_header), dah_align)
+
+static u64 debug_alloc_pool_aligned[256*1024/dah_align]; /* 256K pool */
+static char *debug_alloc_pool = (char *)debug_alloc_pool_aligned;
+static u32 dah_first, dah_first_call = 1, dah_used, dah_used_max;
+
+/* Locking is awkward. The debug code is called from all contexts,
+ * including non maskable interrupts. A normal spinlock is not safe
+ * in NMI context. Try to get the debug allocator lock, if it cannot
+ * be obtained after a second then give up. If the lock could not be
+ * previously obtained on this cpu then only try once.
+ *
+ * sparse has no annotation for "this function _sometimes_ acquires a
+ * lock", so fudge the acquire/release notation.
+ */
+static DEFINE_SPINLOCK(dap_lock);
+static int get_dap_lock(void)
+ __acquires(dap_lock)
+{
+ static int dap_locked = -1;
+ int count;
+ if (dap_locked == smp_processor_id())
+ count = 1;
+ else
+ count = 1000;
+ while (1) {
+ if (spin_trylock(&dap_lock)) {
+ dap_locked = -1;
+ return 1;
+ }
+ if (!count--)
+ break;
+ udelay(1000);
+ }
+ dap_locked = smp_processor_id();
+ __acquire(dap_lock);
+ return 0;
+}
+
+void *debug_kmalloc(size_t size, gfp_t flags)
+{
+ unsigned int rem, h_offset;
+ struct debug_alloc_header *best, *bestprev, *prev, *h;
+ void *p = NULL;
+ if (!get_dap_lock()) {
+ __release(dap_lock); /* we never actually got it */
+ return NULL;
+ }
+ h = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
+ if (dah_first_call) {
+ h->size = sizeof(debug_alloc_pool_aligned) - dah_overhead;
+ dah_first_call = 0;
+ }
+ size = ALIGN(size, dah_align);
+ prev = best = bestprev = NULL;
+ while (1) {
+ if (h->size >= size && (!best || h->size < best->size)) {
+ best = h;
+ bestprev = prev;
+ if (h->size == size)
+ break;
+ }
+ if (!h->next)
+ break;
+ prev = h;
+ h = (struct debug_alloc_header *)(debug_alloc_pool + h->next);
+ }
+ if (!best)
+ goto out;
+ rem = best->size - size;
+ /* The pool must always contain at least one header */
+ if (best->next == 0 && bestprev == NULL && rem < dah_overhead)
+ goto out;
+ if (rem >= dah_overhead) {
+ best->size = size;
+ h_offset = ((char *)best - debug_alloc_pool) +
+ dah_overhead + best->size;
+ h = (struct debug_alloc_header *)(debug_alloc_pool + h_offset);
+ h->size = rem - dah_overhead;
+ h->next = best->next;
+ } else
+ h_offset = best->next;
+ best->caller = __builtin_return_address(0);
+ dah_used += best->size;
+ dah_used_max = max(dah_used, dah_used_max);
+ if (bestprev)
+ bestprev->next = h_offset;
+ else
+ dah_first = h_offset;
+ p = (char *)best + dah_overhead;
+ memset(p, POISON_INUSE, best->size - 1);
+ *((char *)p + best->size - 1) = POISON_END;
+out:
+ spin_unlock(&dap_lock);
+ return p;
+}
+
+void debug_kfree(void *p)
+{
+ struct debug_alloc_header *h;
+ unsigned int h_offset;
+ if (!p)
+ return;
+ if ((char *)p < debug_alloc_pool ||
+ (char *)p >= debug_alloc_pool + sizeof(debug_alloc_pool_aligned)) {
+ kfree(p);
+ return;
+ }
+ if (!get_dap_lock()) {
+ __release(dap_lock); /* we never actually got it */
+ return; /* memory leak, cannot be helped */
+ }
+ h = (struct debug_alloc_header *)((char *)p - dah_overhead);
+ memset(p, POISON_FREE, h->size - 1);
+ *((char *)p + h->size - 1) = POISON_END;
+ h->caller = NULL;
+ dah_used -= h->size;
+ h_offset = (char *)h - debug_alloc_pool;
+ if (h_offset < dah_first) {
+ h->next = dah_first;
+ dah_first = h_offset;
+ } else {
+ struct debug_alloc_header *prev;
+ unsigned int prev_offset;
+ prev = (struct debug_alloc_header *)(debug_alloc_pool +
+ dah_first);
+ while (1) {
+ if (!prev->next || prev->next > h_offset)
+ break;
+ prev = (struct debug_alloc_header *)
+ (debug_alloc_pool + prev->next);
+ }
+ prev_offset = (char *)prev - debug_alloc_pool;
+ if (prev_offset + dah_overhead + prev->size == h_offset) {
+ prev->size += dah_overhead + h->size;
+ memset(h, POISON_FREE, dah_overhead - 1);
+ *((char *)h + dah_overhead - 1) = POISON_END;
+ h = prev;
+ h_offset = prev_offset;
+ } else {
+ h->next = prev->next;
+ prev->next = h_offset;
+ }
+ }
+ if (h_offset + dah_overhead + h->size == h->next) {
+ struct debug_alloc_header *next;
+ next = (struct debug_alloc_header *)
+ (debug_alloc_pool + h->next);
+ h->size += dah_overhead + next->size;
+ h->next = next->next;
+ memset(next, POISON_FREE, dah_overhead - 1);
+ *((char *)next + dah_overhead - 1) = POISON_END;
+ }
+ spin_unlock(&dap_lock);
+}
+
+void debug_kusage(void)
+{
+ struct debug_alloc_header *h_free, *h_used;
+#ifdef CONFIG_IA64
+ /* FIXME: using dah for ia64 unwind always results in a memory leak.
+ * Fix that memory leak first, then set debug_kusage_one_time = 1 for
+ * all architectures.
+ */
+ static int debug_kusage_one_time;
+#else
+ static int debug_kusage_one_time = 1;
+#endif
+ if (!get_dap_lock()) {
+ __release(dap_lock); /* we never actually got it */
+ return;
+ }
+ h_free = (struct debug_alloc_header *)(debug_alloc_pool + dah_first);
+ if (dah_first == 0 &&
+ (h_free->size == sizeof(debug_alloc_pool_aligned) - dah_overhead ||
+ dah_first_call))
+ goto out;
+ if (!debug_kusage_one_time)
+ goto out;
+ debug_kusage_one_time = 0;
+ kdb_printf("%s: debug_kmalloc memory leak dah_first %d\n",
+ __func__, dah_first);
+ if (dah_first) {
+ h_used = (struct debug_alloc_header *)debug_alloc_pool;
+ kdb_printf("%s: h_used %p size %d\n", __func__, h_used,
+ h_used->size);
+ }
+ do {
+ h_used = (struct debug_alloc_header *)
+ ((char *)h_free + dah_overhead + h_free->size);
+ kdb_printf("%s: h_used %p size %d caller %p\n",
+ __func__, h_used, h_used->size, h_used->caller);
+ h_free = (struct debug_alloc_header *)
+ (debug_alloc_pool + h_free->next);
+ } while (h_free->next);
+ h_used = (struct debug_alloc_header *)
+ ((char *)h_free + dah_overhead + h_free->size);
+ if ((char *)h_used - debug_alloc_pool !=
+ sizeof(debug_alloc_pool_aligned))
+ kdb_printf("%s: h_used %p size %d caller %p\n",
+ __func__, h_used, h_used->size, h_used->caller);
+out:
+ spin_unlock(&dap_lock);
+}
+
+/* Maintain a small stack of kdb_flags to allow recursion without disturbing
+ * the global kdb state.
+ */
+
+static int kdb_flags_stack[4], kdb_flags_index;
+
+void kdb_save_flags(void)
+{
+ BUG_ON(kdb_flags_index >= ARRAY_SIZE(kdb_flags_stack));
+ kdb_flags_stack[kdb_flags_index++] = kdb_flags;
+}
+
+void kdb_restore_flags(void)
+{
+ BUG_ON(kdb_flags_index <= 0);
+ kdb_flags = kdb_flags_stack[--kdb_flags_index];
+}
diff --git a/kernel/debug/kms_hooks.c b/kernel/debug/kms_hooks.c
new file mode 100644
index 000000000000..c56b7ce28989
--- /dev/null
+++ b/kernel/debug/kms_hooks.c
@@ -0,0 +1,62 @@
+/*
+ * Created by: Jason Wessel <jason.wessel@windriver.com>
+ *
+ * Copyright (c) 2009 Wind River Systems, Inc. All Rights Reserved.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifdef CONFIG_VT
+#include <linux/kgdb.h>
+#include <linux/console.h>
+#include <linux/vt_kern.h>
+#include <linux/selection.h>
+#include <linux/kdb.h>
+#include "kdb/kdb_private.h"
+
+static int dbg_orig_vc_mode;
+static int saved_fg_con;
+static int saved_last_con;
+static int saved_want_con;
+
+void dbg_pre_vt_hook(void)
+{
+ struct vc_data *vc = vc_cons[fg_console].d;
+ saved_fg_con = fg_console;
+ saved_last_con = last_console;
+ saved_want_con = want_console;
+ dbg_orig_vc_mode = vc->vc_mode;
+ vc->vc_mode = KD_TEXT;
+ console_blanked = 0;
+ vc->vc_sw->con_blank(vc, 0, 1);
+ vc->vc_sw->con_set_palette(vc, color_table);
+#ifdef CONFIG_KGDB_KDB
+ /* Set the initial LINES variable if it is not already set */
+ if (vc->vc_rows < 999) {
+ int linecount;
+ char lns[4];
+ const char *setargs[3] = {
+ "set",
+ "LINES",
+ lns,
+ };
+ if (kdbgetintenv(setargs[0], &linecount)) {
+ snprintf(lns, 4, "%i", vc->vc_rows);
+ kdb_set(2, setargs);
+ }
+ }
+#endif /* CONFIG_KGDB_KDB */
+}
+EXPORT_SYMBOL_GPL(dbg_pre_vt_hook);
+
+void dbg_post_vt_hook(void)
+{
+ fg_console = saved_fg_con;
+ last_console = saved_last_con;
+ want_console = saved_want_con;
+ vc_cons[fg_console].d->vc_mode = dbg_orig_vc_mode;
+}
+EXPORT_SYMBOL_GPL(dbg_post_vt_hook);
+#endif /* CONFIG_VT */
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 235716556bf1..d49afb2395e5 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -146,7 +146,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
struct task_struct *p;
ret = -ESRCH;
- read_lock(&tasklist_lock);
+ rcu_read_lock();
p = find_task_by_vpid(pid);
if (!p)
goto err_unlock;
@@ -157,7 +157,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
!capable(CAP_SYS_PTRACE))
goto err_unlock;
head = p->compat_robust_list;
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
}
if (put_user(sizeof(*head), len_ptr))
@@ -165,7 +165,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
return put_user(ptr_to_compat(head), head_ptr);
err_unlock:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
return ret;
}
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 8e5288a8a355..dc08f8ba9fc5 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
+#include <linux/kdb.h>
#include <linux/err.h>
#include <linux/proc_fs.h>
#include <linux/sched.h> /* for cond_resched */
@@ -515,6 +516,26 @@ static int kallsyms_open(struct inode *inode, struct file *file)
return ret;
}
+#ifdef CONFIG_KGDB_KDB
+const char *kdb_walk_kallsyms(loff_t *pos)
+{
+ static struct kallsym_iter kdb_walk_kallsyms_iter;
+ if (*pos == 0) {
+ memset(&kdb_walk_kallsyms_iter, 0,
+ sizeof(kdb_walk_kallsyms_iter));
+ reset_iter(&kdb_walk_kallsyms_iter, 0);
+ }
+ while (1) {
+ if (!update_iter(&kdb_walk_kallsyms_iter, *pos))
+ return NULL;
+ ++*pos;
+ /* Some debugging symbols have no name. Ignore them. */
+ if (kdb_walk_kallsyms_iter.name[0])
+ return kdb_walk_kallsyms_iter.name;
+ }
+}
+#endif /* CONFIG_KGDB_KDB */
+
static const struct file_operations kallsyms_operations = {
.open = kallsyms_open,
.read = seq_read,
diff --git a/kernel/kexec.c b/kernel/kexec.c
index a9a93d9ee7a7..ef077fb73155 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -32,6 +32,7 @@
#include <linux/console.h>
#include <linux/vmalloc.h>
#include <linux/swap.h>
+#include <linux/kmsg_dump.h>
#include <asm/page.h>
#include <asm/uaccess.h>
@@ -1074,6 +1075,9 @@ void crash_kexec(struct pt_regs *regs)
if (mutex_trylock(&kexec_mutex)) {
if (kexec_crash_image) {
struct pt_regs fixed_regs;
+
+ kmsg_dump(KMSG_DUMP_KEXEC);
+
crash_setup_regs(&fixed_regs, regs);
crash_save_vmcoreinfo();
machine_crash_shutdown(&fixed_regs);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 25b103190364..bf0e231d9702 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -520,13 +520,15 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
return -ENOMEM;
ret = call_usermodehelper_stdinpipe(sub_info, filp);
- if (ret < 0)
- goto out;
+ if (ret < 0) {
+ call_usermodehelper_freeinfo(sub_info);
+ return ret;
+ }
- return call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
+ ret = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC);
+ if (ret < 0) /* Failed to execute helper, close pipe */
+ filp_close(*filp, NULL);
- out:
- call_usermodehelper_freeinfo(sub_info);
return ret;
}
EXPORT_SYMBOL(call_usermodehelper_pipe);
diff --git a/kernel/module.c b/kernel/module.c
index e96b8ed1cb6a..1fb7a0cfd82d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -79,6 +79,10 @@ EXPORT_TRACEPOINT_SYMBOL(module_get);
DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules);
+#ifdef CONFIG_KGDB_KDB
+struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
+#endif /* CONFIG_KGDB_KDB */
+
/* Block module loading/unloading? */
int modules_disabled = 0;
@@ -474,9 +478,10 @@ static void module_unload_init(struct module *mod)
INIT_LIST_HEAD(&mod->modules_which_use_me);
for_each_possible_cpu(cpu)
- local_set(__module_ref_addr(mod, cpu), 0);
+ per_cpu_ptr(mod->refptr, cpu)->count = 0;
+
/* Hold reference count during initialization. */
- local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
+ __this_cpu_write(mod->refptr->count, 1);
/* Backwards compatibility macros put refcount during init. */
mod->waiter = current;
}
@@ -619,7 +624,7 @@ unsigned int module_refcount(struct module *mod)
int cpu;
for_each_possible_cpu(cpu)
- total += local_read(__module_ref_addr(mod, cpu));
+ total += per_cpu_ptr(mod->refptr, cpu)->count;
return total;
}
EXPORT_SYMBOL(module_refcount);
@@ -796,14 +801,15 @@ static struct module_attribute refcnt = {
void module_put(struct module *module)
{
if (module) {
- unsigned int cpu = get_cpu();
- local_dec(__module_ref_addr(module, cpu));
+ preempt_disable();
+ __this_cpu_dec(module->refptr->count);
+
trace_module_put(module, _RET_IP_,
- local_read(__module_ref_addr(module, cpu)));
+ __this_cpu_read(module->refptr->count));
/* Maybe they're waiting for us to drop reference? */
if (unlikely(!module_is_live(module)))
wake_up_process(module->waiter);
- put_cpu();
+ preempt_enable();
}
}
EXPORT_SYMBOL(module_put);
@@ -1010,6 +1016,12 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
* J. Corbet <corbet@lwn.net>
*/
#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
+
+static inline bool sect_empty(const Elf_Shdr *sect)
+{
+ return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
+}
+
struct module_sect_attr
{
struct module_attribute mattr;
@@ -1051,8 +1063,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
/* Count loaded sections and allocate structures */
for (i = 0; i < nsect; i++)
- if (sechdrs[i].sh_flags & SHF_ALLOC
- && sechdrs[i].sh_size)
+ if (!sect_empty(&sechdrs[i]))
nloaded++;
size[0] = ALIGN(sizeof(*sect_attrs)
+ nloaded * sizeof(sect_attrs->attrs[0]),
@@ -1070,9 +1081,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
sattr = &sect_attrs->attrs[0];
gattr = &sect_attrs->grp.attrs[0];
for (i = 0; i < nsect; i++) {
- if (! (sechdrs[i].sh_flags & SHF_ALLOC))
- continue;
- if (!sechdrs[i].sh_size)
+ if (sect_empty(&sechdrs[i]))
continue;
sattr->address = sechdrs[i].sh_addr;
sattr->name = kstrdup(secstrings + sechdrs[i].sh_name,
@@ -1156,7 +1165,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
/* Count notes sections and allocate structures. */
notes = 0;
for (i = 0; i < nsect; i++)
- if ((sechdrs[i].sh_flags & SHF_ALLOC) &&
+ if (!sect_empty(&sechdrs[i]) &&
(sechdrs[i].sh_type == SHT_NOTE))
++notes;
@@ -1172,7 +1181,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
notes_attrs->notes = notes;
nattr = &notes_attrs->attrs[0];
for (loaded = i = 0; i < nsect; ++i) {
- if (!(sechdrs[i].sh_flags & SHF_ALLOC))
+ if (sect_empty(&sechdrs[i]))
continue;
if (sechdrs[i].sh_type == SHT_NOTE) {
nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
@@ -1394,9 +1403,9 @@ static void free_module(struct module *mod)
kfree(mod->args);
if (mod->percpu)
percpu_modfree(mod->percpu);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
+#if defined(CONFIG_MODULE_UNLOAD)
if (mod->refptr)
- percpu_modfree(mod->refptr);
+ free_percpu(mod->refptr);
#endif
/* Free lock-classes: */
lockdep_free_key_range(mod->module_core, mod->core_size);
@@ -2159,9 +2168,8 @@ static noinline struct module *load_module(void __user *umod,
mod = (void *)sechdrs[modindex].sh_addr;
kmemleak_load_module(mod, hdr, sechdrs, secstrings);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
- mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
- mod->name);
+#if defined(CONFIG_MODULE_UNLOAD)
+ mod->refptr = alloc_percpu(struct module_ref);
if (!mod->refptr) {
err = -ENOMEM;
goto free_init;
@@ -2393,8 +2401,8 @@ static noinline struct module *load_module(void __user *umod,
kobject_put(&mod->mkobj.kobj);
free_unload:
module_unload_free(mod);
-#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
- percpu_modfree(mod->refptr);
+#if defined(CONFIG_MODULE_UNLOAD)
+ free_percpu(mod->refptr);
free_init:
#endif
module_free(mod, mod->module_init);
diff --git a/kernel/padata.c b/kernel/padata.c
new file mode 100644
index 000000000000..6f9bcb8313d6
--- /dev/null
+++ b/kernel/padata.c
@@ -0,0 +1,690 @@
+/*
+ * padata.c - generic interface to process data streams in parallel
+ *
+ * Copyright (C) 2008, 2009 secunet Security Networks AG
+ * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/module.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/cpu.h>
+#include <linux/padata.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/rcupdate.h>
+
+#define MAX_SEQ_NR INT_MAX - NR_CPUS
+#define MAX_OBJ_NUM 10000 * NR_CPUS
+
+static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
+{
+ int cpu, target_cpu;
+
+ target_cpu = cpumask_first(pd->cpumask);
+ for (cpu = 0; cpu < cpu_index; cpu++)
+ target_cpu = cpumask_next(target_cpu, pd->cpumask);
+
+ return target_cpu;
+}
+
+static int padata_cpu_hash(struct padata_priv *padata)
+{
+ int cpu_index;
+ struct parallel_data *pd;
+
+ pd = padata->pd;
+
+ /*
+ * Hash the sequence numbers to the cpus by taking
+ * seq_nr mod. number of cpus in use.
+ */
+ cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask);
+
+ return padata_index_to_cpu(pd, cpu_index);
+}
+
+static void padata_parallel_worker(struct work_struct *work)
+{
+ struct padata_queue *queue;
+ struct parallel_data *pd;
+ struct padata_instance *pinst;
+ LIST_HEAD(local_list);
+
+ local_bh_disable();
+ queue = container_of(work, struct padata_queue, pwork);
+ pd = queue->pd;
+ pinst = pd->pinst;
+
+ spin_lock(&queue->parallel.lock);
+ list_replace_init(&queue->parallel.list, &local_list);
+ spin_unlock(&queue->parallel.lock);
+
+ while (!list_empty(&local_list)) {
+ struct padata_priv *padata;
+
+ padata = list_entry(local_list.next,
+ struct padata_priv, list);
+
+ list_del_init(&padata->list);
+
+ padata->parallel(padata);
+ }
+
+ local_bh_enable();
+}
+
+/*
+ * padata_do_parallel - padata parallelization function
+ *
+ * @pinst: padata instance
+ * @padata: object to be parallelized
+ * @cb_cpu: cpu the serialization callback function will run on,
+ * must be in the cpumask of padata.
+ *
+ * The parallelization callback function will run with BHs off.
+ * Note: Every object which is parallelized by padata_do_parallel
+ * must be seen by padata_do_serial.
+ */
+int padata_do_parallel(struct padata_instance *pinst,
+ struct padata_priv *padata, int cb_cpu)
+{
+ int target_cpu, err;
+ struct padata_queue *queue;
+ struct parallel_data *pd;
+
+ rcu_read_lock_bh();
+
+ pd = rcu_dereference(pinst->pd);
+
+ err = 0;
+ if (!(pinst->flags & PADATA_INIT))
+ goto out;
+
+ err = -EBUSY;
+ if ((pinst->flags & PADATA_RESET))
+ goto out;
+
+ if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
+ goto out;
+
+ err = -EINVAL;
+ if (!cpumask_test_cpu(cb_cpu, pd->cpumask))
+ goto out;
+
+ err = -EINPROGRESS;
+ atomic_inc(&pd->refcnt);
+ padata->pd = pd;
+ padata->cb_cpu = cb_cpu;
+
+ if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
+ atomic_set(&pd->seq_nr, -1);
+
+ padata->seq_nr = atomic_inc_return(&pd->seq_nr);
+
+ target_cpu = padata_cpu_hash(padata);
+ queue = per_cpu_ptr(pd->queue, target_cpu);
+
+ spin_lock(&queue->parallel.lock);
+ list_add_tail(&padata->list, &queue->parallel.list);
+ spin_unlock(&queue->parallel.lock);
+
+ queue_work_on(target_cpu, pinst->wq, &queue->pwork);
+
+out:
+ rcu_read_unlock_bh();
+
+ return err;
+}
+EXPORT_SYMBOL(padata_do_parallel);
+
+static struct padata_priv *padata_get_next(struct parallel_data *pd)
+{
+ int cpu, num_cpus, empty, calc_seq_nr;
+ int seq_nr, next_nr, overrun, next_overrun;
+ struct padata_queue *queue, *next_queue;
+ struct padata_priv *padata;
+ struct padata_list *reorder;
+
+ empty = 0;
+ next_nr = -1;
+ next_overrun = 0;
+ next_queue = NULL;
+
+ num_cpus = cpumask_weight(pd->cpumask);
+
+ for_each_cpu(cpu, pd->cpumask) {
+ queue = per_cpu_ptr(pd->queue, cpu);
+ reorder = &queue->reorder;
+
+ /*
+ * Calculate the seq_nr of the object that should be
+ * next in this queue.
+ */
+ overrun = 0;
+ calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus)
+ + queue->cpu_index;
+
+ if (unlikely(calc_seq_nr > pd->max_seq_nr)) {
+ calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1;
+ overrun = 1;
+ }
+
+ if (!list_empty(&reorder->list)) {
+ padata = list_entry(reorder->list.next,
+ struct padata_priv, list);
+
+ seq_nr = padata->seq_nr;
+ BUG_ON(calc_seq_nr != seq_nr);
+ } else {
+ seq_nr = calc_seq_nr;
+ empty++;
+ }
+
+ if (next_nr < 0 || seq_nr < next_nr
+ || (next_overrun && !overrun)) {
+ next_nr = seq_nr;
+ next_overrun = overrun;
+ next_queue = queue;
+ }
+ }
+
+ padata = NULL;
+
+ if (empty == num_cpus)
+ goto out;
+
+ reorder = &next_queue->reorder;
+
+ if (!list_empty(&reorder->list)) {
+ padata = list_entry(reorder->list.next,
+ struct padata_priv, list);
+
+ if (unlikely(next_overrun)) {
+ for_each_cpu(cpu, pd->cpumask) {
+ queue = per_cpu_ptr(pd->queue, cpu);
+ atomic_set(&queue->num_obj, 0);
+ }
+ }
+
+ spin_lock(&reorder->lock);
+ list_del_init(&padata->list);
+ atomic_dec(&pd->reorder_objects);
+ spin_unlock(&reorder->lock);
+
+ atomic_inc(&next_queue->num_obj);
+
+ goto out;
+ }
+
+ if (next_nr % num_cpus == next_queue->cpu_index) {
+ padata = ERR_PTR(-ENODATA);
+ goto out;
+ }
+
+ padata = ERR_PTR(-EINPROGRESS);
+out:
+ return padata;
+}
+
+static void padata_reorder(struct parallel_data *pd)
+{
+ struct padata_priv *padata;
+ struct padata_queue *queue;
+ struct padata_instance *pinst = pd->pinst;
+
+try_again:
+ if (!spin_trylock_bh(&pd->lock))
+ goto out;
+
+ while (1) {
+ padata = padata_get_next(pd);
+
+ if (!padata || PTR_ERR(padata) == -EINPROGRESS)
+ break;
+
+ if (PTR_ERR(padata) == -ENODATA) {
+ spin_unlock_bh(&pd->lock);
+ goto out;
+ }
+
+ queue = per_cpu_ptr(pd->queue, padata->cb_cpu);
+
+ spin_lock(&queue->serial.lock);
+ list_add_tail(&padata->list, &queue->serial.list);
+ spin_unlock(&queue->serial.lock);
+
+ queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork);
+ }
+
+ spin_unlock_bh(&pd->lock);
+
+ if (atomic_read(&pd->reorder_objects))
+ goto try_again;
+
+out:
+ return;
+}
+
+static void padata_serial_worker(struct work_struct *work)
+{
+ struct padata_queue *queue;
+ struct parallel_data *pd;
+ LIST_HEAD(local_list);
+
+ local_bh_disable();
+ queue = container_of(work, struct padata_queue, swork);
+ pd = queue->pd;
+
+ spin_lock(&queue->serial.lock);
+ list_replace_init(&queue->serial.list, &local_list);
+ spin_unlock(&queue->serial.lock);
+
+ while (!list_empty(&local_list)) {
+ struct padata_priv *padata;
+
+ padata = list_entry(local_list.next,
+ struct padata_priv, list);
+
+ list_del_init(&padata->list);
+
+ padata->serial(padata);
+ atomic_dec(&pd->refcnt);
+ }
+ local_bh_enable();
+}
+
+/*
+ * padata_do_serial - padata serialization function
+ *
+ * @padata: object to be serialized.
+ *
+ * padata_do_serial must be called for every parallelized object.
+ * The serialization callback function will run with BHs off.
+ */
+void padata_do_serial(struct padata_priv *padata)
+{
+ int cpu;
+ struct padata_queue *queue;
+ struct parallel_data *pd;
+
+ pd = padata->pd;
+
+ cpu = get_cpu();
+ queue = per_cpu_ptr(pd->queue, cpu);
+
+ spin_lock(&queue->reorder.lock);
+ atomic_inc(&pd->reorder_objects);
+ list_add_tail(&padata->list, &queue->reorder.list);
+ spin_unlock(&queue->reorder.lock);
+
+ put_cpu();
+
+ padata_reorder(pd);
+}
+EXPORT_SYMBOL(padata_do_serial);
+
+static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
+ const struct cpumask *cpumask)
+{
+ int cpu, cpu_index, num_cpus;
+ struct padata_queue *queue;
+ struct parallel_data *pd;
+
+ cpu_index = 0;
+
+ pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
+ if (!pd)
+ goto err;
+
+ pd->queue = alloc_percpu(struct padata_queue);
+ if (!pd->queue)
+ goto err_free_pd;
+
+ if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL))
+ goto err_free_queue;
+
+ for_each_possible_cpu(cpu) {
+ queue = per_cpu_ptr(pd->queue, cpu);
+
+ queue->pd = pd;
+
+ if (cpumask_test_cpu(cpu, cpumask)
+ && cpumask_test_cpu(cpu, cpu_active_mask)) {
+ queue->cpu_index = cpu_index;
+ cpu_index++;
+ } else
+ queue->cpu_index = -1;
+
+ INIT_LIST_HEAD(&queue->reorder.list);
+ INIT_LIST_HEAD(&queue->parallel.list);
+ INIT_LIST_HEAD(&queue->serial.list);
+ spin_lock_init(&queue->reorder.lock);
+ spin_lock_init(&queue->parallel.lock);
+ spin_lock_init(&queue->serial.lock);
+
+ INIT_WORK(&queue->pwork, padata_parallel_worker);
+ INIT_WORK(&queue->swork, padata_serial_worker);
+ atomic_set(&queue->num_obj, 0);
+ }
+
+ cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
+
+ num_cpus = cpumask_weight(pd->cpumask);
+ pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1;
+
+ atomic_set(&pd->seq_nr, -1);
+ atomic_set(&pd->reorder_objects, 0);
+ atomic_set(&pd->refcnt, 0);
+ pd->pinst = pinst;
+ spin_lock_init(&pd->lock);
+
+ return pd;
+
+err_free_queue:
+ free_percpu(pd->queue);
+err_free_pd:
+ kfree(pd);
+err:
+ return NULL;
+}
+
+static void padata_free_pd(struct parallel_data *pd)
+{
+ free_cpumask_var(pd->cpumask);
+ free_percpu(pd->queue);
+ kfree(pd);
+}
+
+static void padata_replace(struct padata_instance *pinst,
+ struct parallel_data *pd_new)
+{
+ struct parallel_data *pd_old = pinst->pd;
+
+ pinst->flags |= PADATA_RESET;
+
+ rcu_assign_pointer(pinst->pd, pd_new);
+
+ synchronize_rcu();
+
+ while (atomic_read(&pd_old->refcnt) != 0)
+ yield();
+
+ flush_workqueue(pinst->wq);
+
+ padata_free_pd(pd_old);
+
+ pinst->flags &= ~PADATA_RESET;
+}
+
+/*
+ * padata_set_cpumask - set the cpumask that padata should use
+ *
+ * @pinst: padata instance
+ * @cpumask: the cpumask to use
+ */
+int padata_set_cpumask(struct padata_instance *pinst,
+ cpumask_var_t cpumask)
+{
+ struct parallel_data *pd;
+ int err = 0;
+
+ might_sleep();
+
+ mutex_lock(&pinst->lock);
+
+ pd = padata_alloc_pd(pinst, cpumask);
+ if (!pd) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ cpumask_copy(pinst->cpumask, cpumask);
+
+ padata_replace(pinst, pd);
+
+out:
+ mutex_unlock(&pinst->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(padata_set_cpumask);
+
+static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
+{
+ struct parallel_data *pd;
+
+ if (cpumask_test_cpu(cpu, cpu_active_mask)) {
+ pd = padata_alloc_pd(pinst, pinst->cpumask);
+ if (!pd)
+ return -ENOMEM;
+
+ padata_replace(pinst, pd);
+ }
+
+ return 0;
+}
+
+/*
+ * padata_add_cpu - add a cpu to the padata cpumask
+ *
+ * @pinst: padata instance
+ * @cpu: cpu to add
+ */
+int padata_add_cpu(struct padata_instance *pinst, int cpu)
+{
+ int err;
+
+ might_sleep();
+
+ mutex_lock(&pinst->lock);
+
+ cpumask_set_cpu(cpu, pinst->cpumask);
+ err = __padata_add_cpu(pinst, cpu);
+
+ mutex_unlock(&pinst->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(padata_add_cpu);
+
+static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
+{
+ struct parallel_data *pd;
+
+ if (cpumask_test_cpu(cpu, cpu_online_mask)) {
+ pd = padata_alloc_pd(pinst, pinst->cpumask);
+ if (!pd)
+ return -ENOMEM;
+
+ padata_replace(pinst, pd);
+ }
+
+ return 0;
+}
+
+/*
+ * padata_remove_cpu - remove a cpu from the padata cpumask
+ *
+ * @pinst: padata instance
+ * @cpu: cpu to remove
+ */
+int padata_remove_cpu(struct padata_instance *pinst, int cpu)
+{
+ int err;
+
+ might_sleep();
+
+ mutex_lock(&pinst->lock);
+
+ cpumask_clear_cpu(cpu, pinst->cpumask);
+ err = __padata_remove_cpu(pinst, cpu);
+
+ mutex_unlock(&pinst->lock);
+
+ return err;
+}
+EXPORT_SYMBOL(padata_remove_cpu);
+
+/*
+ * padata_start - start the parallel processing
+ *
+ * @pinst: padata instance to start
+ */
+void padata_start(struct padata_instance *pinst)
+{
+ might_sleep();
+
+ mutex_lock(&pinst->lock);
+ pinst->flags |= PADATA_INIT;
+ mutex_unlock(&pinst->lock);
+}
+EXPORT_SYMBOL(padata_start);
+
+/*
+ * padata_stop - stop the parallel processing
+ *
+ * @pinst: padata instance to stop
+ */
+void padata_stop(struct padata_instance *pinst)
+{
+ might_sleep();
+
+ mutex_lock(&pinst->lock);
+ pinst->flags &= ~PADATA_INIT;
+ mutex_unlock(&pinst->lock);
+}
+EXPORT_SYMBOL(padata_stop);
+
+static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ int err;
+ struct padata_instance *pinst;
+ int cpu = (unsigned long)hcpu;
+
+ pinst = container_of(nfb, struct padata_instance, cpu_notifier);
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ if (!cpumask_test_cpu(cpu, pinst->cpumask))
+ break;
+ mutex_lock(&pinst->lock);
+ err = __padata_add_cpu(pinst, cpu);
+ mutex_unlock(&pinst->lock);
+ if (err)
+ return NOTIFY_BAD;
+ break;
+
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ if (!cpumask_test_cpu(cpu, pinst->cpumask))
+ break;
+ mutex_lock(&pinst->lock);
+ err = __padata_remove_cpu(pinst, cpu);
+ mutex_unlock(&pinst->lock);
+ if (err)
+ return NOTIFY_BAD;
+ break;
+
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ if (!cpumask_test_cpu(cpu, pinst->cpumask))
+ break;
+ mutex_lock(&pinst->lock);
+ __padata_remove_cpu(pinst, cpu);
+ mutex_unlock(&pinst->lock);
+
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ if (!cpumask_test_cpu(cpu, pinst->cpumask))
+ break;
+ mutex_lock(&pinst->lock);
+ __padata_add_cpu(pinst, cpu);
+ mutex_unlock(&pinst->lock);
+ }
+
+ return NOTIFY_OK;
+}
+
+/*
+ * padata_alloc - allocate and initialize a padata instance
+ *
+ * @cpumask: cpumask that padata uses for parallelization
+ * @wq: workqueue to use for the allocated padata instance
+ */
+struct padata_instance *padata_alloc(const struct cpumask *cpumask,
+ struct workqueue_struct *wq)
+{
+ int err;
+ struct padata_instance *pinst;
+ struct parallel_data *pd;
+
+ pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
+ if (!pinst)
+ goto err;
+
+ pd = padata_alloc_pd(pinst, cpumask);
+ if (!pd)
+ goto err_free_inst;
+
+ rcu_assign_pointer(pinst->pd, pd);
+
+ pinst->wq = wq;
+
+ cpumask_copy(pinst->cpumask, cpumask);
+
+ pinst->flags = 0;
+
+ pinst->cpu_notifier.notifier_call = padata_cpu_callback;
+ pinst->cpu_notifier.priority = 0;
+ err = register_hotcpu_notifier(&pinst->cpu_notifier);
+ if (err)
+ goto err_free_pd;
+
+ mutex_init(&pinst->lock);
+
+ return pinst;
+
+err_free_pd:
+ padata_free_pd(pd);
+err_free_inst:
+ kfree(pinst);
+err:
+ return NULL;
+}
+EXPORT_SYMBOL(padata_alloc);
+
+/*
+ * padata_free - free a padata instance
+ *
+ * @ padata_inst: padata instance to free
+ */
+void padata_free(struct padata_instance *pinst)
+{
+ padata_stop(pinst);
+
+ synchronize_rcu();
+
+ while (atomic_read(&pinst->pd->refcnt) != 0)
+ yield();
+
+ unregister_hotcpu_notifier(&pinst->cpu_notifier);
+ padata_free_pd(pinst->pd);
+ kfree(pinst);
+}
+EXPORT_SYMBOL(padata_free);
diff --git a/kernel/panic.c b/kernel/panic.c
index 5827f7b97254..428d15b42e23 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -66,16 +66,18 @@ NORET_TYPE void panic(const char * fmt, ...)
*/
preempt_disable();
- bust_spinlocks(1);
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
+
+ atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
+
+ bust_spinlocks(1);
printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
#ifdef CONFIG_DEBUG_BUGVERBOSE
dump_stack();
#endif
- kmsg_dump(KMSG_DUMP_PANIC);
/*
* If we have crashed and we have a crash kernel loaded let it handle
* everything else.
@@ -83,6 +85,8 @@ NORET_TYPE void panic(const char * fmt, ...)
*/
crash_kexec(NULL);
+ kmsg_dump(KMSG_DUMP_PANIC);
+
/*
* Note smp_send_stop is the usual smp shutdown function, which
* unfortunately means it may not be hardened to work in a panic
@@ -90,8 +94,6 @@ NORET_TYPE void panic(const char * fmt, ...)
*/
smp_send_stop();
- atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
-
bust_spinlocks(0);
if (!panic_blink)
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index ec0bfe5ca1d6..faaa9e6d6692 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1170,9 +1170,9 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
* not restart the event.
*/
void perf_event_task_sched_out(struct task_struct *task,
- struct task_struct *next, int cpu)
+ struct task_struct *next)
{
- struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = task->perf_event_ctxp;
struct perf_event_context *next_ctx;
struct perf_event_context *parent;
@@ -1252,8 +1252,9 @@ static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
static void
__perf_event_sched_in(struct perf_event_context *ctx,
- struct perf_cpu_context *cpuctx, int cpu)
+ struct perf_cpu_context *cpuctx)
{
+ int cpu = smp_processor_id();
struct perf_event *event;
int can_add_hw = 1;
@@ -1326,24 +1327,24 @@ __perf_event_sched_in(struct perf_event_context *ctx,
* accessing the event control register. If a NMI hits, then it will
* keep the event running.
*/
-void perf_event_task_sched_in(struct task_struct *task, int cpu)
+void perf_event_task_sched_in(struct task_struct *task)
{
- struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
struct perf_event_context *ctx = task->perf_event_ctxp;
if (likely(!ctx))
return;
if (cpuctx->task_ctx == ctx)
return;
- __perf_event_sched_in(ctx, cpuctx, cpu);
+ __perf_event_sched_in(ctx, cpuctx);
cpuctx->task_ctx = ctx;
}
-static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
+static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = &cpuctx->ctx;
- __perf_event_sched_in(ctx, cpuctx, cpu);
+ __perf_event_sched_in(ctx, cpuctx);
}
#define MAX_INTERRUPTS (~0ULL)
@@ -1461,7 +1462,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
raw_spin_unlock(&ctx->lock);
}
-void perf_event_task_tick(struct task_struct *curr, int cpu)
+void perf_event_task_tick(struct task_struct *curr)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
@@ -1469,7 +1470,7 @@ void perf_event_task_tick(struct task_struct *curr, int cpu)
if (!atomic_read(&nr_events))
return;
- cpuctx = &per_cpu(perf_cpu_context, cpu);
+ cpuctx = &__get_cpu_var(perf_cpu_context);
ctx = curr->perf_event_ctxp;
perf_ctx_adjust_freq(&cpuctx->ctx);
@@ -1484,9 +1485,9 @@ void perf_event_task_tick(struct task_struct *curr, int cpu)
if (ctx)
rotate_ctx(ctx);
- perf_event_cpu_sched_in(cpuctx, cpu);
+ perf_event_cpu_sched_in(cpuctx);
if (ctx)
- perf_event_task_sched_in(curr, cpu);
+ perf_event_task_sched_in(curr);
}
/*
@@ -1527,7 +1528,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
raw_spin_unlock(&ctx->lock);
- perf_event_task_sched_in(task, smp_processor_id());
+ perf_event_task_sched_in(task);
out:
local_irq_restore(flags);
}
@@ -4176,7 +4177,7 @@ static const struct pmu perf_ops_task_clock = {
.read = task_clock_perf_event_read,
};
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_EVENT_TRACING
void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
int entry_size)
@@ -4281,7 +4282,7 @@ static void perf_event_free_filter(struct perf_event *event)
{
}
-#endif /* CONFIG_EVENT_PROFILE */
+#endif /* CONFIG_EVENT_TRACING */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
static void bp_perf_event_destroy(struct perf_event *event)
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 3db49b9ca374..8576f40f8470 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -102,12 +102,24 @@ static struct pm_qos_object network_throughput_pm_qos = {
.comparitor = max_compare
};
+static BLOCKING_NOTIFIER_HEAD(system_bus_freq_notifier);
+static struct pm_qos_object system_bus_freq_pm_qos = {
+ .requirements =
+ {LIST_HEAD_INIT(system_bus_freq_pm_qos.requirements.list)},
+ .notifiers = &system_bus_freq_notifier,
+ .name = "system_bus_freq",
+ .default_value = 0,
+ .target_value = ATOMIC_INIT(0),
+ .comparitor = max_compare
+};
+
-static struct pm_qos_object *pm_qos_array[] = {
- &null_pm_qos,
- &cpu_dma_pm_qos,
- &network_lat_pm_qos,
- &network_throughput_pm_qos
+static struct pm_qos_object *pm_qos_array[PM_QOS_NUM_CLASSES] = {
+ [PM_QOS_RESERVED] = &null_pm_qos,
+ [PM_QOS_CPU_DMA_LATENCY] = &cpu_dma_pm_qos,
+ [PM_QOS_NETWORK_LATENCY] = &network_lat_pm_qos,
+ [PM_QOS_NETWORK_THROUGHPUT] = &network_throughput_pm_qos,
+ [PM_QOS_SYSTEM_BUS_FREQ] = &system_bus_freq_pm_qos,
};
static DEFINE_SPINLOCK(pm_qos_lock);
@@ -313,7 +325,7 @@ EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
* will register the notifier into a notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
- int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
+int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
@@ -409,9 +421,15 @@ static int __init pm_qos_power_init(void)
return ret;
}
ret = register_pm_qos_misc(&network_throughput_pm_qos);
- if (ret < 0)
+ if (ret < 0) {
printk(KERN_ERR
"pm_qos_param: network_throughput setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&system_bus_freq_pm_qos);
+ if (ret < 0)
+ printk(KERN_ERR
+ "pm_qos_param: system_bus_freq setup failed\n");
return ret;
}
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 438ff4523513..d8e543cfeddf 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -13,16 +13,16 @@
/*
* Called after updating RLIMIT_CPU to set timer expiration if necessary.
*/
-void update_rlimit_cpu(unsigned long rlim_new)
+void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
{
cputime_t cputime = secs_to_cputime(rlim_new);
- struct signal_struct *const sig = current->signal;
+ struct signal_struct *const sig = task->signal;
if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
- spin_lock_irq(&current->sighand->siglock);
- set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
- spin_unlock_irq(&current->sighand->siglock);
+ spin_lock_irq(&task->sighand->siglock);
+ set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
+ spin_unlock_irq(&task->sighand->siglock);
}
}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 91e09d3b2eb2..39263f41c534 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -27,6 +27,15 @@ config PM_DEBUG
code. This is helpful when debugging and reporting PM bugs, like
suspend support.
+config PM_ADVANCED_DEBUG
+ bool "Extra PM attributes in sysfs for low-level debugging/testing"
+ depends on PM_DEBUG
+ default n
+ ---help---
+ Add extra sysfs attributes allowing one to access some Power Management
+ fields of device objects from user space. If you are not a kernel
+ developer interested in debugging/testing Power Management, say "no".
+
config PM_VERBOSE
bool "Verbose Power Management debugging"
depends on PM_DEBUG
@@ -85,6 +94,11 @@ config PM_SLEEP
depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE
default y
+config PM_SLEEP_ADVANCED_DEBUG
+ bool
+ depends on PM_ADVANCED_DEBUG
+ default n
+
config SUSPEND
bool "Suspend to RAM and standby"
depends on PM && ARCH_SUSPEND_POSSIBLE
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 0998c7139053..b58800b21fc0 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -44,6 +44,32 @@ int pm_notifier_call_chain(unsigned long val)
== NOTIFY_BAD) ? -EINVAL : 0;
}
+/* If set, devices may be suspended and resumed asynchronously. */
+int pm_async_enabled = 1;
+
+static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%d\n", pm_async_enabled);
+}
+
+static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val))
+ return -EINVAL;
+
+ if (val > 1)
+ return -EINVAL;
+
+ pm_async_enabled = val;
+ return n;
+}
+
+power_attr(pm_async);
+
#ifdef CONFIG_PM_DEBUG
int pm_test_level = TEST_NONE;
@@ -208,9 +234,12 @@ static struct attribute * g[] = {
#ifdef CONFIG_PM_TRACE
&pm_trace_attr.attr,
#endif
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PM_DEBUG)
+#ifdef CONFIG_PM_SLEEP
+ &pm_async_attr.attr,
+#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
+#endif
NULL,
};
diff --git a/kernel/printk.c b/kernel/printk.c
index 17463ca2e229..be9bd706e513 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -33,6 +33,7 @@
#include <linux/bootmem.h>
#include <linux/syscalls.h>
#include <linux/kexec.h>
+#include <linux/kdb.h>
#include <linux/ratelimit.h>
#include <linux/kmsg_dump.h>
@@ -420,6 +421,22 @@ SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
return do_syslog(type, buf, len);
}
+#ifdef CONFIG_KGDB_KDB
+/* kdb dmesg command needs access to the syslog buffer. do_syslog()
+ * uses locks so it cannot be used during debugging. Just tell kdb
+ * where the start and end of the physical and logical logs are. This
+ * is equivalent to do_syslog(3).
+ */
+void kdb_syslog_data(char *syslog_data[4])
+{
+ syslog_data[0] = log_buf;
+ syslog_data[1] = log_buf + log_buf_len;
+ syslog_data[2] = log_buf + log_end -
+ (logged_chars < log_buf_len ? logged_chars : log_buf_len);
+ syslog_data[3] = log_buf + log_end;
+}
+#endif /* CONFIG_KGDB_KDB */
+
/*
* Call the console drivers on a range of log_buf
*/
@@ -593,6 +610,14 @@ asmlinkage int printk(const char *fmt, ...)
va_list args;
int r;
+#ifdef CONFIG_KGDB_KDB
+ if (unlikely(kdb_trap_printk)) {
+ va_start(args, fmt);
+ r = vkdb_printf(fmt, args);
+ va_end(args);
+ return r;
+ }
+#endif
va_start(args, fmt);
r = vprintk(fmt, args);
va_end(args);
@@ -1467,6 +1492,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_unregister);
static const char const *kmsg_reasons[] = {
[KMSG_DUMP_OOPS] = "oops",
[KMSG_DUMP_PANIC] = "panic",
+ [KMSG_DUMP_KEXEC] = "kexec",
};
static const char *kmsg_to_str(enum kmsg_dump_reason reason)
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 9bb52177af02..0b5217535f71 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -763,13 +763,13 @@ static void rcu_torture_timer(unsigned long unused)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
+ __this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
+ __this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
cur_ops->readunlock(idx);
}
@@ -818,13 +818,13 @@ rcu_torture_reader(void *arg)
/* Should not happen, but... */
pipe_count = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
+ __this_cpu_inc(rcu_torture_count[pipe_count]);
completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
- __this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
+ __this_cpu_inc(rcu_torture_batch[completed]);
preempt_enable();
cur_ops->readunlock(idx);
schedule();
diff --git a/kernel/sched.c b/kernel/sched.c
index 9f251d8fea21..faefd5b29f0d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2783,7 +2783,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
*/
prev_state = prev->state;
finish_arch_switch(prev);
- perf_event_task_sched_in(current, cpu_of(rq));
+ perf_event_task_sched_in(current);
finish_lock_switch(rq, prev);
fire_sched_in_preempt_notifiers(current);
@@ -5298,7 +5298,7 @@ void scheduler_tick(void)
curr->sched_class->task_tick(rq, curr, 0);
raw_spin_unlock(&rq->lock);
- perf_event_task_tick(curr, cpu);
+ perf_event_task_tick(curr);
#ifdef CONFIG_SMP
rq->idle_at_tick = idle_cpu(cpu);
@@ -5512,7 +5512,7 @@ need_resched_nonpreemptible:
if (likely(prev != next)) {
sched_info_switch(prev, next);
- perf_event_task_sched_out(prev, next, cpu);
+ perf_event_task_sched_out(prev, next);
rq->nr_switches++;
rq->curr = next;
@@ -9698,7 +9698,7 @@ static inline int preempt_count_equals(int preempt_offset)
return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
}
-void __might_sleep(char *file, int line, int preempt_offset)
+void __might_sleep(const char *file, int line, int preempt_offset)
{
#ifdef in_atomic
static unsigned long prev_jiffy; /* ratelimiting */
@@ -9788,9 +9788,9 @@ void normalize_rt_tasks(void)
#endif /* CONFIG_MAGIC_SYSRQ */
-#ifdef CONFIG_IA64
+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
/*
- * These functions are only useful for the IA64 MCA handling.
+ * These functions are only useful for the IA64 MCA handling, or kdb.
*
* They can only be called when the whole system has been
* stopped - every CPU needs to be quiescent, and no scheduling
@@ -9810,6 +9810,9 @@ struct task_struct *curr_task(int cpu)
return cpu_curr(cpu);
}
+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
+
+#ifdef CONFIG_IA64
/**
* set_curr_task - set the current task for a given cpu.
* @cpu: the processor in question.
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 597b33099dfa..3db4b1a0e921 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -58,7 +58,7 @@ static int convert_prio(int prio)
* @lowest_mask: A mask to fill in with selected CPUs (or NULL)
*
* Note: This function returns the recommended CPUs as calculated during the
- * current invokation. By the time the call returns, the CPUs may have in
+ * current invocation. By the time the call returns, the CPUs may have in
* fact changed priorities any number of times. While not ideal, it is not
* an issue of correctness since the normal rebalancer logic will correct
* any discrepancies created by racing against the uncertainty of the current
diff --git a/kernel/signal.c b/kernel/signal.c
index d09692b40376..4a2df1a958fb 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -979,7 +979,8 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
for (i = 0; i < 16; i++) {
unsigned char insn;
- __get_user(insn, (unsigned char *)(regs->ip + i));
+ if (get_user(insn, (unsigned char *)(regs->ip + i)))
+ break;
printk("%02x ", insn);
}
}
@@ -2717,3 +2718,45 @@ void __init signals_init(void)
{
sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
}
+
+#ifdef CONFIG_KGDB_KDB
+#include <linux/kdb.h>
+/*
+ * kdb_send_sig_info - Allows kdb to send signals without exposing
+ * signal internals. This function checks if the required locks are
+ * available before calling the main signal code, to avoid kdb
+ * deadlocks.
+ */
+void
+kdb_send_sig_info(struct task_struct *t, struct siginfo *info, int seqno)
+{
+ static struct task_struct *kdb_prev_t;
+ static int kdb_prev_seqno;
+ int sig, new_t;
+ if (!spin_trylock(&t->sighand->siglock)) {
+ kdb_printf("Can't do kill command now.\n"
+ "The sigmask lock is held somewhere else in "
+ "kernel, try again later\n");
+ return;
+ }
+ spin_unlock(&t->sighand->siglock);
+ new_t = kdb_prev_t != t || kdb_prev_seqno != seqno;
+ kdb_prev_t = t;
+ kdb_prev_seqno = seqno;
+ if (t->state != TASK_RUNNING && new_t) {
+ kdb_printf("Process is not RUNNING, sending a signal from "
+ "kdb risks deadlock\n"
+ "on the run queue locks. "
+ "The signal has _not_ been sent.\n"
+ "Reissue the kill command if you want to risk "
+ "the deadlock.\n");
+ return;
+ }
+ sig = info->si_signo;
+ if (send_sig_info(sig, info, t))
+ kdb_printf("Fail to deliver Signal %d to process %d.\n",
+ sig, t->pid);
+ else
+ kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
+}
+#endif /* CONFIG_KGDB_KDB */
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index d22579087e27..57f129533a53 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -79,6 +79,14 @@ void touch_softlockup_watchdog(void)
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
+static int softlock_touch_sync[NR_CPUS];
+
+void touch_softlockup_watchdog_sync(void)
+{
+ softlock_touch_sync[raw_smp_processor_id()] = 1;
+ __raw_get_cpu_var(softlockup_touch_ts) = 0;
+}
+
void touch_all_softlockup_watchdogs(void)
{
int cpu;
@@ -118,6 +126,14 @@ void softlockup_tick(void)
}
if (touch_ts == 0) {
+ if (unlikely(softlock_touch_sync[this_cpu])) {
+ /*
+ * If the time stamp was touched atomically
+ * make sure the scheduler tick is up to date.
+ */
+ softlock_touch_sync[this_cpu] = 0;
+ sched_clock_tick();
+ }
__touch_softlockup_watchdog();
return;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index 26a6b73a6b85..c801a55c169f 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1214,6 +1214,61 @@ SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
}
}
+static int check_prlimit_permission(struct task_struct *task)
+{
+ const struct cred *cred = current_cred(), *tcred;
+ int ret = 0;
+
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if ((cred->uid != tcred->euid ||
+ cred->uid != tcred->suid ||
+ cred->uid != tcred->uid ||
+ cred->gid != tcred->egid ||
+ cred->gid != tcred->sgid ||
+ cred->gid != tcred->gid) &&
+ !capable(CAP_SYS_RESOURCE)) {
+ ret = -EPERM;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+SYSCALL_DEFINE3(getprlimit, pid_t, pid, unsigned int, resource,
+ struct rlimit __user *, rlim)
+{
+ struct rlimit val;
+ struct task_struct *tsk;
+ int ret;
+
+ if (resource >= RLIM_NLIMITS)
+ return -EINVAL;
+
+ read_lock(&tasklist_lock);
+
+ tsk = find_task_by_vpid(pid);
+ if (!tsk || !tsk->sighand) {
+ ret = -ESRCH;
+ goto err_unlock;
+ }
+
+ ret = check_prlimit_permission(tsk);
+ if (ret)
+ goto err_unlock;
+
+ task_lock(tsk->group_leader);
+ val = tsk->signal->rlim[resource];
+ task_unlock(tsk->group_leader);
+
+ read_unlock(&tasklist_lock);
+
+ return copy_to_user(rlim, &val, sizeof(*rlim)) ? -EFAULT : 0;
+err_unlock:
+ read_unlock(&tasklist_lock);
+ return ret;
+}
+
+
#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
/*
@@ -1239,43 +1294,52 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
#endif
-SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
+/* make sure you are allowed to change @tsk limits before calling this */
+int do_setrlimit(struct task_struct *tsk, unsigned int resource,
+ struct rlimit *new_rlim)
{
- struct rlimit new_rlim, *old_rlim;
- int retval;
+ struct rlimit *old_rlim;
+ int retval = 0;
if (resource >= RLIM_NLIMITS)
return -EINVAL;
- if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
- return -EFAULT;
- if (new_rlim.rlim_cur > new_rlim.rlim_max)
+ if (new_rlim->rlim_cur > new_rlim->rlim_max)
return -EINVAL;
- old_rlim = current->signal->rlim + resource;
- if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
- !capable(CAP_SYS_RESOURCE))
- return -EPERM;
- if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
+ if (resource == RLIMIT_NOFILE && new_rlim->rlim_max > sysctl_nr_open)
return -EPERM;
- retval = security_task_setrlimit(resource, &new_rlim);
- if (retval)
- return retval;
+ /* optimization: 'current' doesn't need locking, e.g. setrlimit */
+ if (tsk != current) {
+ /* protect tsk->signal and tsk->sighand from disappearing */
+ read_lock(&tasklist_lock);
+ if (!tsk->sighand) {
+ retval = -ESRCH;
+ goto out;
+ }
+ }
- if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {
+ if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
/*
* The caller is asking for an immediate RLIMIT_CPU
* expiry. But we use the zero value to mean "it was
* never set". So let's cheat and make it one second
* instead
*/
- new_rlim.rlim_cur = 1;
+ new_rlim->rlim_cur = 1;
}
- task_lock(current->group_leader);
- *old_rlim = new_rlim;
- task_unlock(current->group_leader);
-
- if (resource != RLIMIT_CPU)
+ old_rlim = tsk->signal->rlim + resource;
+ task_lock(tsk->group_leader);
+ if ((new_rlim->rlim_max > old_rlim->rlim_max) &&
+ !capable(CAP_SYS_RESOURCE))
+ retval = -EPERM;
+ if (!retval)
+ retval = security_task_setrlimit(tsk, resource, new_rlim);
+ if (!retval)
+ *old_rlim = *new_rlim;
+ task_unlock(tsk->group_leader);
+
+ if (retval || resource != RLIMIT_CPU)
goto out;
/*
@@ -1284,12 +1348,51 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
* very long-standing error, and fixing it now risks breakage of
* applications, so we live with it
*/
- if (new_rlim.rlim_cur == RLIM_INFINITY)
+ if (new_rlim->rlim_cur == RLIM_INFINITY)
goto out;
- update_rlimit_cpu(new_rlim.rlim_cur);
+ update_rlimit_cpu(tsk, new_rlim->rlim_cur);
out:
- return 0;
+ if (tsk != current)
+ read_unlock(&tasklist_lock);
+ return retval;
+}
+
+SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
+{
+ struct rlimit new_rlim;
+
+ if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
+ return -EFAULT;
+ return do_setrlimit(current, resource, &new_rlim);
+}
+
+SYSCALL_DEFINE3(setprlimit, pid_t, pid, unsigned int, resource,
+ struct rlimit __user *, rlim)
+{
+ struct task_struct *tsk;
+ struct rlimit new_rlim;
+ int ret;
+
+ if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
+ return -EFAULT;
+
+ rcu_read_lock();
+ tsk = find_task_by_vpid(pid);
+ if (!tsk) {
+ rcu_read_unlock();
+ return -ESRCH;
+ }
+ get_task_struct(tsk);
+ rcu_read_unlock();
+
+ ret = check_prlimit_permission(tsk);
+ if (!ret)
+ ret = do_setrlimit(tsk, resource, &new_rlim);
+
+ put_task_struct(tsk);
+
+ return ret;
}
/*
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 695384f12a7d..96ed4e6c43e5 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -180,3 +180,7 @@ cond_syscall(sys_eventfd2);
/* performance counters: */
cond_syscall(sys_perf_event_open);
+
+/* fanotify! */
+cond_syscall(sys_fanotify_init);
+cond_syscall(sys_fanotify_mark);
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index cd9ecd89ec77..3b3881cae406 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -51,10 +51,15 @@ endif
obj-$(CONFIG_EVENT_TRACING) += trace_events.o
obj-$(CONFIG_EVENT_TRACING) += trace_export.o
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
-obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
+ifeq ($(CONFIG_PERF_EVENTS),y)
+obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o
+endif
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o
obj-$(CONFIG_EVENT_TRACING) += power-traces.o
+ifeq ($(CONFIG_TRACING),y)
+obj-$(CONFIG_KGDB_KDB) += trace_kdb.o
+endif
libftrace-y := ftrace.o
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2326b04c95c4..a38bcf0a7722 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -20,6 +20,7 @@
#include <linux/cpu.h>
#include <linux/fs.h>
+#include <asm/local.h>
#include "trace.h"
/*
@@ -2539,7 +2540,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
* @buffer: The ring buffer to enable writes
*
* Note, multiple disables will need the same number of enables
- * to truely enable the writing (much like preempt_disable).
+ * to truly enable the writing (much like preempt_disable).
*/
void ring_buffer_record_enable(struct ring_buffer *buffer)
{
@@ -2575,7 +2576,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
* @cpu: The CPU to enable.
*
* Note, multiple disables will need the same number of enables
- * to truely enable the writing (much like preempt_disable).
+ * to truly enable the writing (much like preempt_disable).
*/
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
{
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c
index b2477caf09c2..df74c7982255 100644
--- a/kernel/trace/ring_buffer_benchmark.c
+++ b/kernel/trace/ring_buffer_benchmark.c
@@ -8,6 +8,7 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/time.h>
+#include <asm/local.h>
struct rb_page {
u64 ts;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0df1b0f2cb9e..a473697607f3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -91,23 +91,20 @@ DEFINE_PER_CPU(int, ftrace_cpu_disabled);
static inline void ftrace_disable_cpu(void)
{
preempt_disable();
- __this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
+ __this_cpu_inc(ftrace_cpu_disabled);
}
static inline void ftrace_enable_cpu(void)
{
- __this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
+ __this_cpu_dec(ftrace_cpu_disabled);
preempt_enable();
}
-static cpumask_var_t __read_mostly tracing_buffer_mask;
+cpumask_var_t __read_mostly tracing_buffer_mask;
/* Define which cpu buffers are currently read in trace_pipe */
static cpumask_var_t tracing_reader_cpumask;
-#define for_each_tracing_cpu(cpu) \
- for_each_cpu(cpu, tracing_buffer_mask)
-
/*
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops
*
@@ -1084,7 +1081,7 @@ trace_function(struct trace_array *tr,
struct ftrace_entry *entry;
/* If we are reading the ring buffer, don't trace */
- if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -1409,11 +1406,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
}
EXPORT_SYMBOL_GPL(trace_vprintk);
-enum trace_file_type {
- TRACE_FILE_LAT_FMT = 1,
- TRACE_FILE_ANNOTATE = 2,
-};
-
static void trace_iterator_increment(struct trace_iterator *iter)
{
/* Don't allow ftrace to trace into the ring buffers */
@@ -1503,7 +1495,7 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
}
/* Find the next real entry, and increment the iterator to the next entry */
-static void *find_next_entry_inc(struct trace_iterator *iter)
+void *trace_find_next_entry_inc(struct trace_iterator *iter)
{
iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
@@ -1536,12 +1528,12 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos)
return NULL;
if (iter->idx < 0)
- ent = find_next_entry_inc(iter);
+ ent = trace_find_next_entry_inc(iter);
else
ent = iter;
while (ent && iter->idx < i)
- ent = find_next_entry_inc(iter);
+ ent = trace_find_next_entry_inc(iter);
iter->pos = *pos;
@@ -1878,7 +1870,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED;
}
-static int trace_empty(struct trace_iterator *iter)
+int trace_empty(struct trace_iterator *iter)
{
int cpu;
@@ -1909,7 +1901,7 @@ static int trace_empty(struct trace_iterator *iter)
}
/* Called with trace_event_read_lock() held. */
-static enum print_line_t print_trace_line(struct trace_iterator *iter)
+enum print_line_t print_trace_line(struct trace_iterator *iter)
{
enum print_line_t ret;
@@ -3074,7 +3066,7 @@ waitagain:
iter->pos = -1;
trace_event_read_lock();
- while (find_next_entry_inc(iter) != NULL) {
+ while (trace_find_next_entry_inc(iter) != NULL) {
enum print_line_t ret;
int len = iter->seq.len;
@@ -3156,7 +3148,7 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
if (ret != TRACE_TYPE_NO_CONSUME)
trace_consume(iter);
rem -= count;
- if (!find_next_entry_inc(iter)) {
+ if (!trace_find_next_entry_inc(iter)) {
rem = 0;
iter->ent = NULL;
break;
@@ -3209,7 +3201,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
if (ret <= 0)
goto out_err;
- if (!iter->ent && !find_next_entry_inc(iter)) {
+ if (!iter->ent && !trace_find_next_entry_inc(iter)) {
ret = -EFAULT;
goto out_err;
}
@@ -4262,7 +4254,7 @@ static struct notifier_block trace_die_notifier = {
*/
#define KERN_TRACE KERN_EMERG
-static void
+void
trace_printk_seq(struct trace_seq *s)
{
/* Probably should print a warning here. */
@@ -4277,6 +4269,13 @@ trace_printk_seq(struct trace_seq *s)
trace_seq_init(s);
}
+void trace_init_global_iter(struct trace_iterator *iter)
+{
+ iter->tr = &global_trace;
+ iter->trace = current_trace;
+ iter->cpu_file = TRACE_PIPE_ALL_CPU;
+}
+
static void __ftrace_dump(bool disable_tracing)
{
static arch_spinlock_t ftrace_dump_lock =
@@ -4301,8 +4300,10 @@ static void __ftrace_dump(bool disable_tracing)
if (disable_tracing)
ftrace_kill();
+ trace_init_global_iter(&iter);
+
for_each_tracing_cpu(cpu) {
- atomic_inc(&global_trace.data[cpu]->disabled);
+ atomic_inc(&iter.tr->data[cpu]->disabled);
}
old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
@@ -4312,11 +4313,6 @@ static void __ftrace_dump(bool disable_tracing)
printk(KERN_TRACE "Dumping ftrace buffer:\n");
- /* Simulate the iterator */
- iter.tr = &global_trace;
- iter.trace = current_trace;
- iter.cpu_file = TRACE_PIPE_ALL_CPU;
-
/*
* We need to stop all tracing on all CPUS to read the
* the next buffer. This is a bit expensive, but is
@@ -4338,7 +4334,7 @@ static void __ftrace_dump(bool disable_tracing)
iter.iter_flags |= TRACE_FILE_LAT_FMT;
iter.pos = -1;
- if (find_next_entry_inc(&iter) != NULL) {
+ if (trace_find_next_entry_inc(&iter) != NULL) {
int ret;
ret = print_trace_line(&iter);
@@ -4359,7 +4355,7 @@ static void __ftrace_dump(bool disable_tracing)
trace_flags |= old_userobj;
for_each_tracing_cpu(cpu) {
- atomic_dec(&global_trace.data[cpu]->disabled);
+ atomic_dec(&iter.tr->data[cpu]->disabled);
}
tracing_on();
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 4df6a77eb196..563e6f8d1fb0 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -352,6 +352,12 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts);
+int trace_empty(struct trace_iterator *iter);
+
+void *trace_find_next_entry_inc(struct trace_iterator *iter);
+
+void trace_init_global_iter(struct trace_iterator *iter);
+
void default_wait_pipe(struct trace_iterator *iter);
void poll_wait_pipe(struct trace_iterator *iter);
@@ -391,6 +397,15 @@ void tracing_start_sched_switch_record(void);
int register_tracer(struct tracer *type);
void unregister_tracer(struct tracer *type);
int is_tracing_stopped(void);
+enum trace_file_type {
+ TRACE_FILE_LAT_FMT = 1,
+ TRACE_FILE_ANNOTATE = 2,
+};
+
+extern cpumask_var_t __read_mostly tracing_buffer_mask;
+
+#define for_each_tracing_cpu(cpu) \
+ for_each_cpu(cpu, tracing_buffer_mask)
extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr);
@@ -483,6 +498,8 @@ trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args);
int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...);
+void trace_printk_seq(struct trace_seq *s);
+enum print_line_t print_trace_line(struct trace_iterator *iter);
extern unsigned long trace_flags;
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 50504cb228de..74563d7e102e 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1360,7 +1360,7 @@ out_unlock:
return err;
}
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
void ftrace_profile_free_filter(struct perf_event *event)
{
@@ -1428,5 +1428,5 @@ out_unlock:
return err;
}
-#endif /* CONFIG_EVENT_PROFILE */
+#endif /* CONFIG_PERF_EVENTS */
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index b1342c5d37cf..9d976f3249a3 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -187,7 +187,7 @@ static int __trace_graph_entry(struct trace_array *tr,
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ent_entry *entry;
- if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return 0;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
@@ -251,7 +251,7 @@ static void __trace_graph_return(struct trace_array *tr,
struct ring_buffer *buffer = tr->buffer;
struct ftrace_graph_ret_entry *entry;
- if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
new file mode 100644
index 000000000000..9227ff3038cb
--- /dev/null
+++ b/kernel/trace/trace_kdb.c
@@ -0,0 +1,116 @@
+/*
+ * kdb helper for dumping the ftrace buffer
+ *
+ * Copyright (C) 2010 Jason Wessel <jason.wessel@windriver.com>
+ *
+ * ftrace_dump_buf based on ftrace_dump:
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
+ *
+ */
+#include <linux/init.h>
+#include <linux/kgdb.h>
+#include <linux/kdb.h>
+#include <linux/ftrace.h>
+
+#include "../debug/kdb/kdb_private.h"
+#include "trace.h"
+#include "trace_output.h"
+
+static void ftrace_dump_buf(int skip_lines)
+{
+ /* use static because iter can be a bit big for the stack */
+ static struct trace_iterator iter;
+ unsigned int old_userobj;
+ int cnt = 0, cpu;
+
+ trace_init_global_iter(&iter);
+
+ for_each_tracing_cpu(cpu) {
+ atomic_inc(&iter.tr->data[cpu]->disabled);
+ }
+
+ old_userobj = trace_flags;
+
+ /* don't look at user memory in panic mode */
+ trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
+
+ kdb_printf("Dumping ftrace buffer:\n");
+
+ /* reset all but tr, trace, and overruns */
+ memset(&iter.seq, 0,
+ sizeof(struct trace_iterator) -
+ offsetof(struct trace_iterator, seq));
+ iter.iter_flags |= TRACE_FILE_LAT_FMT;
+ iter.pos = -1;
+
+ for_each_tracing_cpu(cpu)
+ iter.buffer_iter[cpu] =
+ ring_buffer_read_start(iter.tr->buffer, cpu);
+
+ if (!trace_empty(&iter))
+ trace_find_next_entry_inc(&iter);
+ while (!trace_empty(&iter)) {
+ if (!cnt)
+ kdb_printf("---------------------------------\n");
+ cnt++;
+
+ if (trace_find_next_entry_inc(&iter) != NULL && !skip_lines)
+ print_trace_line(&iter);
+ if (!skip_lines)
+ trace_printk_seq(&iter.seq);
+ else
+ skip_lines--;
+ if (KDB_FLAG(CMD_INTERRUPT))
+ goto out;
+ }
+
+ if (!cnt)
+ kdb_printf(" (ftrace buffer empty)\n");
+ else
+ kdb_printf("---------------------------------\n");
+
+out:
+ trace_flags = old_userobj;
+
+ for_each_tracing_cpu(cpu) {
+ atomic_dec(&iter.tr->data[cpu]->disabled);
+ }
+
+ for_each_tracing_cpu(cpu)
+ if (iter.buffer_iter[cpu])
+ ring_buffer_read_finish(iter.buffer_iter[cpu]);
+}
+
+/*
+ * kdb_ftdump - Dump the ftrace log buffer
+ */
+static int kdb_ftdump(int argc, const char **argv)
+{
+ int skip_lines = 0;
+ char *cp;
+
+ if (argc > 1)
+ return KDB_ARGCOUNT;
+
+ if (argc) {
+ skip_lines = simple_strtol(argv[1], &cp, 0);
+ if (*cp)
+ skip_lines = 0;
+ }
+
+ kdb_trap_printk++;
+ ftrace_dump_buf(skip_lines);
+ kdb_trap_printk--;
+
+ return 0;
+}
+
+static __init int kdb_ftrace_register(void)
+{
+ kdb_register_repeat("ftdump", kdb_ftdump, "", "Dump ftrace log",
+ 0, KDB_REPEAT_NONE);
+ return 0;
+}
+
+late_initcall(kdb_ftrace_register);
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 6ea90c0e2c96..47f54ab57b68 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1250,7 +1250,7 @@ static int kretprobe_event_show_format(struct ftrace_event_call *call,
", REC->" FIELD_STRING_RETIP);
}
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
/* Kprobe profile handler */
static __kprobes int kprobe_profile_func(struct kprobe *kp,
@@ -1408,7 +1408,7 @@ static void probe_profile_disable(struct ftrace_event_call *call)
disable_kprobe(&tp->rp.kp);
}
}
-#endif /* CONFIG_EVENT_PROFILE */
+#endif /* CONFIG_PERF_EVENTS */
static __kprobes
@@ -1418,10 +1418,10 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
if (tp->flags & TP_FLAG_TRACE)
kprobe_trace_func(kp, regs);
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
if (tp->flags & TP_FLAG_PROFILE)
kprobe_profile_func(kp, regs);
-#endif /* CONFIG_EVENT_PROFILE */
+#endif
return 0; /* We don't tweek kernel, so just return 0 */
}
@@ -1432,10 +1432,10 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
if (tp->flags & TP_FLAG_TRACE)
kretprobe_trace_func(ri, regs);
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
if (tp->flags & TP_FLAG_PROFILE)
kretprobe_profile_func(ri, regs);
-#endif /* CONFIG_EVENT_PROFILE */
+#endif
return 0; /* We don't tweek kernel, so just return 0 */
}
@@ -1464,7 +1464,7 @@ static int register_probe_event(struct trace_probe *tp)
call->regfunc = probe_event_enable;
call->unregfunc = probe_event_disable;
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
call->profile_enable = probe_profile_enable;
call->profile_disable = probe_profile_disable;
#endif
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 75289f372dd2..f694f66d75b0 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -421,7 +421,7 @@ int __init init_ftrace_syscalls(void)
}
core_initcall(init_ftrace_syscalls);
-#ifdef CONFIG_EVENT_PROFILE
+#ifdef CONFIG_PERF_EVENTS
static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
@@ -626,6 +626,5 @@ void prof_sysexit_disable(struct ftrace_event_call *call)
mutex_unlock(&syscall_trace_lock);
}
-#endif
-
+#endif /* CONFIG_PERF_EVENTS */
diff --git a/lib/Kconfig b/lib/Kconfig
index 1cfe51628e1b..af12831f2eea 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -117,6 +117,10 @@ config DECOMPRESS_BZIP2
config DECOMPRESS_LZMA
tristate
+config DECOMPRESS_LZO
+ select LZO_DECOMPRESS
+ tristate
+
#
# Generic allocator support is selected if needed
#
@@ -156,6 +160,9 @@ config TEXTSEARCH_BM
config TEXTSEARCH_FSM
tristate
+config BTREE
+ boolean
+
config HAS_IOMEM
boolean
depends on !NO_IOMEM
@@ -203,4 +210,25 @@ config GENERIC_ATOMIC64
config LRU_CACHE
tristate
+config SHM_SIGNAL
+ tristate "SHM Signal - Generic shared-memory signaling mechanism"
+ default n
+ help
+ Provides a shared-memory based signaling mechanism to indicate
+ memory-dirty notifications between two end-points.
+
+ If unsure, say N
+
+config IOQ
+ tristate "IO-Queue library - Generic shared-memory queue"
+ select SHM_SIGNAL
+ default n
+ help
+ IOQ is a generic shared-memory-queue mechanism that happens to be
+ friendly to virtualization boundaries. It can be used in a variety
+ of ways, though its intended purpose is to become a low-level
+ communication path for paravirtualized drivers.
+
+ If unsure, say N
+
endmenu
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 9b5d1d7f2ef7..ce4fa6b9e98f 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -57,4 +57,26 @@ config KGDB_TESTS_BOOT_STRING
information about other strings you could use beyond the
default of V1F100.
+config KGDB_LOW_LEVEL_TRAP
+ bool "KGDB: Allow debugging with traps in notifiers"
+ depends on X86 || PPC || MIPS
+ default n
+ help
+ This will add an extra call back to kgdb for the breakpoint
+ exception handler on which will will allow kgdb to step
+ through a notify handler.
+
+config KGDB_KDB
+ bool "KGDB_KDB: include kdb frontend for kgdb"
+ default n
+ help
+ KDB frontend for kernel
+
+config KDB_KEYBOARD
+ bool "KGDB_KDB: keyboard as input device"
+ depends on VT && KGDB_KDB
+ default y
+ help
+ KDB can use a PS/2 type keyboard for an input device
+
endif # KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 347ad8db29d3..68d01a58ba4c 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -41,6 +41,7 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
+obj-$(CONFIG_BTREE) += btree.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
@@ -69,6 +70,7 @@ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/
lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o
lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o
lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o
+lib-$(CONFIG_DECOMPRESS_LZO) += decompress_unlzo.o
obj-$(CONFIG_TEXTSEARCH) += textsearch.o
obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
@@ -76,6 +78,8 @@ obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+obj-$(CONFIG_SHM_SIGNAL) += shm_signal.o
+obj-$(CONFIG_IOQ) += ioq.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
diff --git a/lib/btree.c b/lib/btree.c
new file mode 100644
index 000000000000..41859a820218
--- /dev/null
+++ b/lib/btree.c
@@ -0,0 +1,797 @@
+/*
+ * lib/btree.c - Simple In-memory B+Tree
+ *
+ * As should be obvious for Linux kernel code, license is GPLv2
+ *
+ * Copyright (c) 2007-2008 Joern Engel <joern@logfs.org>
+ * Bits and pieces stolen from Peter Zijlstra's code, which is
+ * Copyright 2007, Red Hat Inc. Peter Zijlstra <pzijlstr@redhat.com>
+ * GPLv2
+ *
+ * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch
+ *
+ * A relatively simple B+Tree implementation. I have written it as a learning
+ * excercise to understand how B+Trees work. Turned out to be useful as well.
+ *
+ * B+Trees can be used similar to Linux radix trees (which don't have anything
+ * in common with textbook radix trees, beware). Prerequisite for them working
+ * well is that access to a random tree node is much faster than a large number
+ * of operations within each node.
+ *
+ * Disks have fulfilled the prerequisite for a long time. More recently DRAM
+ * has gained similar properties, as memory access times, when measured in cpu
+ * cycles, have increased. Cacheline sizes have increased as well, which also
+ * helps B+Trees.
+ *
+ * Compared to radix trees, B+Trees are more efficient when dealing with a
+ * sparsely populated address space. Between 25% and 50% of the memory is
+ * occupied with valid pointers. When densely populated, radix trees contain
+ * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2%
+ * pointers.
+ *
+ * This particular implementation stores pointers identified by a long value.
+ * Storing NULL pointers is illegal, lookup will return NULL when no entry
+ * was found.
+ *
+ * A tricks was used that is not commonly found in textbooks. The lowest
+ * values are to the right, not to the left. All used slots within a node
+ * are on the left, all unused slots contain NUL values. Most operations
+ * simply loop once over all slots and terminate on the first NUL.
+ */
+
+#include <linux/btree.h>
+#include <linux/cache.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#define NODESIZE MAX(L1_CACHE_BYTES, 128)
+
+struct btree_geo {
+ int keylen;
+ int no_pairs;
+ int no_longs;
+};
+
+struct btree_geo btree_geo32 = {
+ .keylen = 1,
+ .no_pairs = NODESIZE / sizeof(long) / 2,
+ .no_longs = NODESIZE / sizeof(long) / 2,
+};
+EXPORT_SYMBOL_GPL(btree_geo32);
+
+#define LONG_PER_U64 (64 / BITS_PER_LONG)
+struct btree_geo btree_geo64 = {
+ .keylen = LONG_PER_U64,
+ .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64),
+ .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)),
+};
+EXPORT_SYMBOL_GPL(btree_geo64);
+
+struct btree_geo btree_geo128 = {
+ .keylen = 2 * LONG_PER_U64,
+ .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64),
+ .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)),
+};
+EXPORT_SYMBOL_GPL(btree_geo128);
+
+static struct kmem_cache *btree_cachep;
+
+void *btree_alloc(gfp_t gfp_mask, void *pool_data)
+{
+ return kmem_cache_alloc(btree_cachep, gfp_mask);
+}
+EXPORT_SYMBOL_GPL(btree_alloc);
+
+void btree_free(void *element, void *pool_data)
+{
+ kmem_cache_free(btree_cachep, element);
+}
+EXPORT_SYMBOL_GPL(btree_free);
+
+static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp)
+{
+ unsigned long *node;
+
+ node = mempool_alloc(head->mempool, gfp);
+ memset(node, 0, NODESIZE);
+ return node;
+}
+
+static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++) {
+ if (l1[i] < l2[i])
+ return -1;
+ if (l1[i] > l2[i])
+ return 1;
+ }
+ return 0;
+}
+
+static unsigned long *longcpy(unsigned long *dest, const unsigned long *src,
+ size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ dest[i] = src[i];
+ return dest;
+}
+
+static unsigned long *longset(unsigned long *s, unsigned long c, size_t n)
+{
+ size_t i;
+
+ for (i = 0; i < n; i++)
+ s[i] = c;
+ return s;
+}
+
+static void dec_key(struct btree_geo *geo, unsigned long *key)
+{
+ unsigned long val;
+ int i;
+
+ for (i = geo->keylen - 1; i >= 0; i--) {
+ val = key[i];
+ key[i] = val - 1;
+ if (val)
+ break;
+ }
+}
+
+static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n)
+{
+ return &node[n * geo->keylen];
+}
+
+static void *bval(struct btree_geo *geo, unsigned long *node, int n)
+{
+ return (void *)node[geo->no_longs + n];
+}
+
+static void setkey(struct btree_geo *geo, unsigned long *node, int n,
+ unsigned long *key)
+{
+ longcpy(bkey(geo, node, n), key, geo->keylen);
+}
+
+static void setval(struct btree_geo *geo, unsigned long *node, int n,
+ void *val)
+{
+ node[geo->no_longs + n] = (unsigned long) val;
+}
+
+static void clearpair(struct btree_geo *geo, unsigned long *node, int n)
+{
+ longset(bkey(geo, node, n), 0, geo->keylen);
+ node[geo->no_longs + n] = 0;
+}
+
+static inline void __btree_init(struct btree_head *head)
+{
+ head->node = NULL;
+ head->height = 0;
+}
+
+void btree_init_mempool(struct btree_head *head, mempool_t *mempool)
+{
+ __btree_init(head);
+ head->mempool = mempool;
+}
+EXPORT_SYMBOL_GPL(btree_init_mempool);
+
+int btree_init(struct btree_head *head)
+{
+ __btree_init(head);
+ head->mempool = mempool_create(0, btree_alloc, btree_free, NULL);
+ if (!head->mempool)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btree_init);
+
+void btree_destroy(struct btree_head *head)
+{
+ mempool_destroy(head->mempool);
+ head->mempool = NULL;
+}
+EXPORT_SYMBOL_GPL(btree_destroy);
+
+void *btree_last(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ int height = head->height;
+ unsigned long *node = head->node;
+
+ if (height == 0)
+ return NULL;
+
+ for ( ; height > 1; height--)
+ node = bval(geo, node, 0);
+
+ longcpy(key, bkey(geo, node, 0), geo->keylen);
+ return bval(geo, node, 0);
+}
+EXPORT_SYMBOL_GPL(btree_last);
+
+static int keycmp(struct btree_geo *geo, unsigned long *node, int pos,
+ unsigned long *key)
+{
+ return longcmp(bkey(geo, node, pos), key, geo->keylen);
+}
+
+static int keyzero(struct btree_geo *geo, unsigned long *key)
+{
+ int i;
+
+ for (i = 0; i < geo->keylen; i++)
+ if (key[i])
+ return 0;
+
+ return 1;
+}
+
+void *btree_lookup(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ int i, height = head->height;
+ unsigned long *node = head->node;
+
+ if (height == 0)
+ return NULL;
+
+ for ( ; height > 1; height--) {
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+ if (i == geo->no_pairs)
+ return NULL;
+ node = bval(geo, node, i);
+ if (!node)
+ return NULL;
+ }
+
+ if (!node)
+ return NULL;
+
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) == 0)
+ return bval(geo, node, i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(btree_lookup);
+
+int btree_update(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val)
+{
+ int i, height = head->height;
+ unsigned long *node = head->node;
+
+ if (height == 0)
+ return -ENOENT;
+
+ for ( ; height > 1; height--) {
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+ if (i == geo->no_pairs)
+ return -ENOENT;
+ node = bval(geo, node, i);
+ if (!node)
+ return -ENOENT;
+ }
+
+ if (!node)
+ return -ENOENT;
+
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) == 0) {
+ setval(geo, node, i, val);
+ return 0;
+ }
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(btree_update);
+
+/*
+ * Usually this function is quite similar to normal lookup. But the key of
+ * a parent node may be smaller than the smallest key of all its siblings.
+ * In such a case we cannot just return NULL, as we have only proven that no
+ * key smaller than __key, but larger than this parent key exists.
+ * So we set __key to the parent key and retry. We have to use the smallest
+ * such parent key, which is the last parent key we encountered.
+ */
+void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *__key)
+{
+ int i, height;
+ unsigned long *node, *oldnode;
+ unsigned long *retry_key = NULL, key[geo->keylen];
+
+ if (keyzero(geo, __key))
+ return NULL;
+
+ if (head->height == 0)
+ return NULL;
+retry:
+ longcpy(key, __key, geo->keylen);
+ dec_key(geo, key);
+
+ node = head->node;
+ for (height = head->height ; height > 1; height--) {
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+ if (i == geo->no_pairs)
+ goto miss;
+ oldnode = node;
+ node = bval(geo, node, i);
+ if (!node)
+ goto miss;
+ retry_key = bkey(geo, oldnode, i);
+ }
+
+ if (!node)
+ goto miss;
+
+ for (i = 0; i < geo->no_pairs; i++) {
+ if (keycmp(geo, node, i, key) <= 0) {
+ if (bval(geo, node, i)) {
+ longcpy(__key, bkey(geo, node, i), geo->keylen);
+ return bval(geo, node, i);
+ } else
+ goto miss;
+ }
+ }
+miss:
+ if (retry_key) {
+ __key = retry_key;
+ retry_key = NULL;
+ goto retry;
+ }
+ return NULL;
+}
+
+static int getpos(struct btree_geo *geo, unsigned long *node,
+ unsigned long *key)
+{
+ int i;
+
+ for (i = 0; i < geo->no_pairs; i++) {
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+ }
+ return i;
+}
+
+static int getfill(struct btree_geo *geo, unsigned long *node, int start)
+{
+ int i;
+
+ for (i = start; i < geo->no_pairs; i++)
+ if (!bval(geo, node, i))
+ break;
+ return i;
+}
+
+/*
+ * locate the correct leaf node in the btree
+ */
+static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level)
+{
+ unsigned long *node = head->node;
+ int i, height;
+
+ for (height = head->height; height > level; height--) {
+ for (i = 0; i < geo->no_pairs; i++)
+ if (keycmp(geo, node, i, key) <= 0)
+ break;
+
+ if ((i == geo->no_pairs) || !bval(geo, node, i)) {
+ /* right-most key is too large, update it */
+ /* FIXME: If the right-most key on higher levels is
+ * always zero, this wouldn't be necessary. */
+ i--;
+ setkey(geo, node, i, key);
+ }
+ BUG_ON(i < 0);
+ node = bval(geo, node, i);
+ }
+ BUG_ON(!node);
+ return node;
+}
+
+static int btree_grow(struct btree_head *head, struct btree_geo *geo,
+ gfp_t gfp)
+{
+ unsigned long *node;
+ int fill;
+
+ node = btree_node_alloc(head, gfp);
+ if (!node)
+ return -ENOMEM;
+ if (head->node) {
+ fill = getfill(geo, head->node, 0);
+ setkey(geo, node, 0, bkey(geo, head->node, fill - 1));
+ setval(geo, node, 0, head->node);
+ }
+ head->node = node;
+ head->height++;
+ return 0;
+}
+
+static void btree_shrink(struct btree_head *head, struct btree_geo *geo)
+{
+ unsigned long *node;
+ int fill;
+
+ if (head->height <= 1)
+ return;
+
+ node = head->node;
+ fill = getfill(geo, node, 0);
+ BUG_ON(fill > 1);
+ head->node = bval(geo, node, 0);
+ head->height--;
+ mempool_free(node, head->mempool);
+}
+
+static int btree_insert_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val, int level,
+ gfp_t gfp)
+{
+ unsigned long *node;
+ int i, pos, fill, err;
+
+ BUG_ON(!val);
+ if (head->height < level) {
+ err = btree_grow(head, geo, gfp);
+ if (err)
+ return err;
+ }
+
+retry:
+ node = find_level(head, geo, key, level);
+ pos = getpos(geo, node, key);
+ fill = getfill(geo, node, pos);
+ /* two identical keys are not allowed */
+ BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0);
+
+ if (fill == geo->no_pairs) {
+ /* need to split node */
+ unsigned long *new;
+
+ new = btree_node_alloc(head, gfp);
+ if (!new)
+ return -ENOMEM;
+ err = btree_insert_level(head, geo,
+ bkey(geo, node, fill / 2 - 1),
+ new, level + 1, gfp);
+ if (err) {
+ mempool_free(new, head->mempool);
+ return err;
+ }
+ for (i = 0; i < fill / 2; i++) {
+ setkey(geo, new, i, bkey(geo, node, i));
+ setval(geo, new, i, bval(geo, node, i));
+ setkey(geo, node, i, bkey(geo, node, i + fill / 2));
+ setval(geo, node, i, bval(geo, node, i + fill / 2));
+ clearpair(geo, node, i + fill / 2);
+ }
+ if (fill & 1) {
+ setkey(geo, node, i, bkey(geo, node, fill - 1));
+ setval(geo, node, i, bval(geo, node, fill - 1));
+ clearpair(geo, node, fill - 1);
+ }
+ goto retry;
+ }
+ BUG_ON(fill >= geo->no_pairs);
+
+ /* shift and insert */
+ for (i = fill; i > pos; i--) {
+ setkey(geo, node, i, bkey(geo, node, i - 1));
+ setval(geo, node, i, bval(geo, node, i - 1));
+ }
+ setkey(geo, node, pos, key);
+ setval(geo, node, pos, val);
+
+ return 0;
+}
+
+int btree_insert(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, void *val, gfp_t gfp)
+{
+ return btree_insert_level(head, geo, key, val, 1, gfp);
+}
+EXPORT_SYMBOL_GPL(btree_insert);
+
+static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level);
+static void merge(struct btree_head *head, struct btree_geo *geo, int level,
+ unsigned long *left, int lfill,
+ unsigned long *right, int rfill,
+ unsigned long *parent, int lpos)
+{
+ int i;
+
+ for (i = 0; i < rfill; i++) {
+ /* Move all keys to the left */
+ setkey(geo, left, lfill + i, bkey(geo, right, i));
+ setval(geo, left, lfill + i, bval(geo, right, i));
+ }
+ /* Exchange left and right child in parent */
+ setval(geo, parent, lpos, right);
+ setval(geo, parent, lpos + 1, left);
+ /* Remove left (formerly right) child from parent */
+ btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1);
+ mempool_free(right, head->mempool);
+}
+
+static void rebalance(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level, unsigned long *child, int fill)
+{
+ unsigned long *parent, *left = NULL, *right = NULL;
+ int i, no_left, no_right;
+
+ if (fill == 0) {
+ /* Because we don't steal entries from a neigbour, this case
+ * can happen. Parent node contains a single child, this
+ * node, so merging with a sibling never happens.
+ */
+ btree_remove_level(head, geo, key, level + 1);
+ mempool_free(child, head->mempool);
+ return;
+ }
+
+ parent = find_level(head, geo, key, level + 1);
+ i = getpos(geo, parent, key);
+ BUG_ON(bval(geo, parent, i) != child);
+
+ if (i > 0) {
+ left = bval(geo, parent, i - 1);
+ no_left = getfill(geo, left, 0);
+ if (fill + no_left <= geo->no_pairs) {
+ merge(head, geo, level,
+ left, no_left,
+ child, fill,
+ parent, i - 1);
+ return;
+ }
+ }
+ if (i + 1 < getfill(geo, parent, i)) {
+ right = bval(geo, parent, i + 1);
+ no_right = getfill(geo, right, 0);
+ if (fill + no_right <= geo->no_pairs) {
+ merge(head, geo, level,
+ child, fill,
+ right, no_right,
+ parent, i);
+ return;
+ }
+ }
+ /*
+ * We could also try to steal one entry from the left or right
+ * neighbor. By not doing so we changed the invariant from
+ * "all nodes are at least half full" to "no two neighboring
+ * nodes can be merged". Which means that the average fill of
+ * all nodes is still half or better.
+ */
+}
+
+static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key, int level)
+{
+ unsigned long *node;
+ int i, pos, fill;
+ void *ret;
+
+ if (level > head->height) {
+ /* we recursed all the way up */
+ head->height = 0;
+ head->node = NULL;
+ return NULL;
+ }
+
+ node = find_level(head, geo, key, level);
+ pos = getpos(geo, node, key);
+ fill = getfill(geo, node, pos);
+ if ((level == 1) && (keycmp(geo, node, pos, key) != 0))
+ return NULL;
+ ret = bval(geo, node, pos);
+
+ /* remove and shift */
+ for (i = pos; i < fill - 1; i++) {
+ setkey(geo, node, i, bkey(geo, node, i + 1));
+ setval(geo, node, i, bval(geo, node, i + 1));
+ }
+ clearpair(geo, node, fill - 1);
+
+ if (fill - 1 < geo->no_pairs / 2) {
+ if (level < head->height)
+ rebalance(head, geo, key, level, node, fill - 1);
+ else if (fill - 1 == 1)
+ btree_shrink(head, geo);
+ }
+
+ return ret;
+}
+
+void *btree_remove(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *key)
+{
+ if (head->height == 0)
+ return NULL;
+
+ return btree_remove_level(head, geo, key, 1);
+}
+EXPORT_SYMBOL_GPL(btree_remove);
+
+int btree_merge(struct btree_head *target, struct btree_head *victim,
+ struct btree_geo *geo, gfp_t gfp)
+{
+ unsigned long key[geo->keylen];
+ unsigned long dup[geo->keylen];
+ void *val;
+ int err;
+
+ BUG_ON(target == victim);
+
+ if (!(target->node)) {
+ /* target is empty, just copy fields over */
+ target->node = victim->node;
+ target->height = victim->height;
+ __btree_init(victim);
+ return 0;
+ }
+
+ /* TODO: This needs some optimizations. Currently we do three tree
+ * walks to remove a single object from the victim.
+ */
+ for (;;) {
+ if (!btree_last(victim, geo, key))
+ break;
+ val = btree_lookup(victim, geo, key);
+ err = btree_insert(target, geo, key, val, gfp);
+ if (err)
+ return err;
+ /* We must make a copy of the key, as the original will get
+ * mangled inside btree_remove. */
+ longcpy(dup, key, geo->keylen);
+ btree_remove(victim, geo, dup);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btree_merge);
+
+static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo,
+ unsigned long *node, unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key, size_t index,
+ void *func2),
+ void *func2, int reap, int height, size_t count)
+{
+ int i;
+ unsigned long *child;
+
+ for (i = 0; i < geo->no_pairs; i++) {
+ child = bval(geo, node, i);
+ if (!child)
+ break;
+ if (height > 1)
+ count = __btree_for_each(head, geo, child, opaque,
+ func, func2, reap, height - 1, count);
+ else
+ func(child, opaque, bkey(geo, node, i), count++,
+ func2);
+ }
+ if (reap)
+ mempool_free(node, head->mempool);
+ return count;
+}
+
+static void empty(void *elem, unsigned long opaque, unsigned long *key,
+ size_t index, void *func2)
+{
+}
+
+void visitorl(void *elem, unsigned long opaque, unsigned long *key,
+ size_t index, void *__func)
+{
+ visitorl_t func = __func;
+
+ func(elem, opaque, *key, index);
+}
+EXPORT_SYMBOL_GPL(visitorl);
+
+void visitor32(void *elem, unsigned long opaque, unsigned long *__key,
+ size_t index, void *__func)
+{
+ visitor32_t func = __func;
+ u32 *key = (void *)__key;
+
+ func(elem, opaque, *key, index);
+}
+EXPORT_SYMBOL_GPL(visitor32);
+
+void visitor64(void *elem, unsigned long opaque, unsigned long *__key,
+ size_t index, void *__func)
+{
+ visitor64_t func = __func;
+ u64 *key = (void *)__key;
+
+ func(elem, opaque, *key, index);
+}
+EXPORT_SYMBOL_GPL(visitor64);
+
+void visitor128(void *elem, unsigned long opaque, unsigned long *__key,
+ size_t index, void *__func)
+{
+ visitor128_t func = __func;
+ u64 *key = (void *)__key;
+
+ func(elem, opaque, key[0], key[1], index);
+}
+EXPORT_SYMBOL_GPL(visitor128);
+
+size_t btree_visitor(struct btree_head *head, struct btree_geo *geo,
+ unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key,
+ size_t index, void *func2),
+ void *func2)
+{
+ size_t count = 0;
+
+ if (!func2)
+ func = empty;
+ if (head->node)
+ count = __btree_for_each(head, geo, head->node, opaque, func,
+ func2, 0, head->height, 0);
+ return count;
+}
+EXPORT_SYMBOL_GPL(btree_visitor);
+
+size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo,
+ unsigned long opaque,
+ void (*func)(void *elem, unsigned long opaque,
+ unsigned long *key,
+ size_t index, void *func2),
+ void *func2)
+{
+ size_t count = 0;
+
+ if (!func2)
+ func = empty;
+ if (head->node)
+ count = __btree_for_each(head, geo, head->node, opaque, func,
+ func2, 1, head->height, 0);
+ __btree_init(head);
+ return count;
+}
+EXPORT_SYMBOL_GPL(btree_grim_visitor);
+
+static int __init btree_module_init(void)
+{
+ btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ return 0;
+}
+
+static void __exit btree_module_exit(void)
+{
+ kmem_cache_destroy(btree_cachep);
+}
+
+/* If core code starts using btree, initialization should happen even earlier */
+module_init(btree_module_init);
+module_exit(btree_module_exit);
+
+MODULE_AUTHOR("Joern Engel <joern@logfs.org>");
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_LICENSE("GPL");
diff --git a/lib/decompress.c b/lib/decompress.c
index d2842f571674..a7606815541f 100644
--- a/lib/decompress.c
+++ b/lib/decompress.c
@@ -9,6 +9,7 @@
#include <linux/decompress/bunzip2.h>
#include <linux/decompress/unlzma.h>
#include <linux/decompress/inflate.h>
+#include <linux/decompress/unlzo.h>
#include <linux/types.h>
#include <linux/string.h>
@@ -22,6 +23,9 @@
#ifndef CONFIG_DECOMPRESS_LZMA
# define unlzma NULL
#endif
+#ifndef CONFIG_DECOMPRESS_LZO
+# define unlzo NULL
+#endif
static const struct compress_format {
unsigned char magic[2];
@@ -32,6 +36,7 @@ static const struct compress_format {
{ {037, 0236}, "gzip", gunzip },
{ {0x42, 0x5a}, "bzip2", bunzip2 },
{ {0x5d, 0x00}, "lzma", unlzma },
+ { {0x89, 0x4c}, "lzo", unlzo },
{ {0, 0}, NULL, NULL }
};
diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c
new file mode 100644
index 000000000000..db521f45626e
--- /dev/null
+++ b/lib/decompress_unlzo.c
@@ -0,0 +1,209 @@
+/*
+ * LZO decompressor for the Linux kernel. Code borrowed from the lzo
+ * implementation by Markus Franz Xaver Johannes Oberhumer.
+ *
+ * Linux kernel adaptation:
+ * Copyright (C) 2009
+ * Albin Tonnerre, Free Electrons <albin.tonnerre@free-electrons.com>
+ *
+ * Original code:
+ * Copyright (C) 1996-2005 Markus Franz Xaver Johannes Oberhumer
+ * All Rights Reserved.
+ *
+ * lzop and the LZO library are free software; you can redistribute them
+ * and/or modify them under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.
+ * If not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Markus F.X.J. Oberhumer
+ * <markus@oberhumer.com>
+ * http://www.oberhumer.com/opensource/lzop/
+ */
+
+#ifdef STATIC
+#include "lzo/lzo1x_decompress.c"
+#else
+#include <linux/slab.h>
+#include <linux/decompress/unlzo.h>
+#endif
+
+#include <linux/types.h>
+#include <linux/lzo.h>
+#include <linux/decompress/mm.h>
+
+#include <linux/compiler.h>
+#include <asm/unaligned.h>
+
+static const unsigned char lzop_magic[] = {
+ 0x89, 0x4c, 0x5a, 0x4f, 0x00, 0x0d, 0x0a, 0x1a, 0x0a };
+
+#define LZO_BLOCK_SIZE (256*1024l)
+#define HEADER_HAS_FILTER 0x00000800L
+
+STATIC inline int INIT parse_header(u8 *input, u8 *skip)
+{
+ int l;
+ u8 *parse = input;
+ u8 level = 0;
+ u16 version;
+
+ /* read magic: 9 first bits */
+ for (l = 0; l < 9; l++) {
+ if (*parse++ != lzop_magic[l])
+ return 0;
+ }
+ /* get version (2bytes), skip library version (2),
+ * 'need to be extracted' version (2) and
+ * method (1) */
+ version = get_unaligned_be16(parse);
+ parse += 7;
+ if (version >= 0x0940)
+ level = *parse++;
+ if (get_unaligned_be32(parse) & HEADER_HAS_FILTER)
+ parse += 8; /* flags + filter info */
+ else
+ parse += 4; /* flags */
+
+ /* skip mode and mtime_low */
+ parse += 8;
+ if (version >= 0x0940)
+ parse += 4; /* skip mtime_high */
+
+ l = *parse++;
+ /* don't care about the file name, and skip checksum */
+ parse += l + 4;
+
+ *skip = parse - input;
+ return 1;
+}
+
+STATIC inline int INIT unlzo(u8 *input, int in_len,
+ int (*fill) (void *, unsigned int),
+ int (*flush) (void *, unsigned int),
+ u8 *output, int *posp,
+ void (*error_fn) (char *x))
+{
+ u8 skip = 0, r = 0;
+ u32 src_len, dst_len;
+ size_t tmp;
+ u8 *in_buf, *in_buf_save, *out_buf;
+ int obytes_processed = 0;
+
+ set_error_fn(error_fn);
+
+ if (output) {
+ out_buf = output;
+ } else if (!flush) {
+ error("NULL output pointer and no flush function provided");
+ goto exit;
+ } else {
+ out_buf = malloc(LZO_BLOCK_SIZE);
+ if (!out_buf) {
+ error("Could not allocate output buffer");
+ goto exit;
+ }
+ }
+
+ if (input && fill) {
+ error("Both input pointer and fill function provided, don't know what to do");
+ goto exit_1;
+ } else if (input) {
+ in_buf = input;
+ } else if (!fill || !posp) {
+ error("NULL input pointer and missing position pointer or fill function");
+ goto exit_1;
+ } else {
+ in_buf = malloc(lzo1x_worst_compress(LZO_BLOCK_SIZE));
+ if (!in_buf) {
+ error("Could not allocate input buffer");
+ goto exit_1;
+ }
+ }
+ in_buf_save = in_buf;
+
+ if (posp)
+ *posp = 0;
+
+ if (fill)
+ fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
+
+ if (!parse_header(input, &skip)) {
+ error("invalid header");
+ goto exit_2;
+ }
+ in_buf += skip;
+
+ if (posp)
+ *posp = skip;
+
+ for (;;) {
+ /* read uncompressed block size */
+ dst_len = get_unaligned_be32(in_buf);
+ in_buf += 4;
+
+ /* exit if last block */
+ if (dst_len == 0) {
+ if (posp)
+ *posp += 4;
+ break;
+ }
+
+ if (dst_len > LZO_BLOCK_SIZE) {
+ error("dest len longer than block size");
+ goto exit_2;
+ }
+
+ /* read compressed block size, and skip block checksum info */
+ src_len = get_unaligned_be32(in_buf);
+ in_buf += 8;
+
+ if (src_len <= 0 || src_len > dst_len) {
+ error("file corrupted");
+ goto exit_2;
+ }
+
+ /* decompress */
+ tmp = dst_len;
+ r = lzo1x_decompress_safe((u8 *) in_buf, src_len,
+ out_buf, &tmp);
+
+ if (r != LZO_E_OK || dst_len != tmp) {
+ error("Compressed data violation");
+ goto exit_2;
+ }
+
+ obytes_processed += dst_len;
+ if (flush)
+ flush(out_buf, dst_len);
+ if (output)
+ out_buf += dst_len;
+ if (posp)
+ *posp += src_len + 12;
+ if (fill) {
+ in_buf = in_buf_save;
+ fill(in_buf, lzo1x_worst_compress(LZO_BLOCK_SIZE));
+ } else
+ in_buf += src_len;
+ }
+
+exit_2:
+ if (!input)
+ free(in_buf);
+exit_1:
+ if (!output)
+ free(out_buf);
+exit:
+ return obytes_processed;
+}
+
+#define decompress unlzo
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index cf906201aecf..7d2f0b33e5a8 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -913,6 +913,9 @@ static void check_sync(struct device *dev,
ref->size);
}
+ if (entry->direction == DMA_BIDIRECTIONAL)
+ goto out;
+
if (ref->direction != entry->direction) {
err_printk(dev, entry, "DMA-API: device driver syncs "
"DMA memory with different direction "
@@ -923,9 +926,6 @@ static void check_sync(struct device *dev,
dir2name[ref->direction]);
}
- if (entry->direction == DMA_BIDIRECTIONAL)
- goto out;
-
if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
!(ref->direction == DMA_TO_DEVICE))
err_printk(dev, entry, "DMA-API: device driver syncs "
@@ -948,7 +948,6 @@ static void check_sync(struct device *dev,
out:
put_hash_bucket(bucket, &flags);
-
}
void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
diff --git a/lib/hweight.c b/lib/hweight.c
index 389424ecb129..63ee4eb1228d 100644
--- a/lib/hweight.c
+++ b/lib/hweight.c
@@ -11,11 +11,18 @@
unsigned int hweight32(unsigned int w)
{
+#ifdef ARCH_HAS_FAST_MULTIPLIER
+ w -= (w >> 1) & 0x55555555;
+ w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
+ w = (w + (w >> 4)) & 0x0f0f0f0f;
+ return (w * 0x01010101) >> 24;
+#else
unsigned int res = w - ((w >> 1) & 0x55555555);
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
res = (res + (res >> 4)) & 0x0F0F0F0F;
res = res + (res >> 8);
return (res + (res >> 16)) & 0x000000FF;
+#endif
}
EXPORT_SYMBOL(hweight32);
diff --git a/lib/ioq.c b/lib/ioq.c
new file mode 100644
index 000000000000..4027848d7436
--- /dev/null
+++ b/lib/ioq.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * See include/linux/ioq.h for documentation
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/ioq.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+static int ioq_iter_setpos(struct ioq_iterator *iter, u32 pos)
+{
+ struct ioq *ioq = iter->ioq;
+
+ BUG_ON(pos >= ioq->count);
+
+ iter->pos = pos;
+ iter->desc = &ioq->ring[pos];
+
+ return 0;
+}
+
+static inline u32 modulo_inc(u32 val, u32 mod)
+{
+ BUG_ON(val >= mod);
+
+ if (val == (mod - 1))
+ return 0;
+
+ return val + 1;
+}
+
+static inline int idx_full(struct ioq_ring_idx *idx)
+{
+ return idx->full && (idx->head == idx->tail);
+}
+
+int ioq_iter_seek(struct ioq_iterator *iter, enum ioq_seek_type type,
+ long offset, int flags)
+{
+ struct ioq_ring_idx *idx = iter->idx;
+ u32 pos;
+
+ switch (type) {
+ case ioq_seek_next:
+ pos = modulo_inc(iter->pos, iter->ioq->count);
+ break;
+ case ioq_seek_tail:
+ pos = le32_to_cpu(idx->tail);
+ break;
+ case ioq_seek_head:
+ pos = le32_to_cpu(idx->head);
+ break;
+ case ioq_seek_set:
+ if (offset >= iter->ioq->count)
+ return -1;
+ pos = offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ioq_iter_setpos(iter, pos);
+}
+EXPORT_SYMBOL_GPL(ioq_iter_seek);
+
+static int ioq_ring_count(struct ioq_ring_idx *idx, int count)
+{
+ u32 head = le32_to_cpu(idx->head);
+ u32 tail = le32_to_cpu(idx->tail);
+
+ if (idx->full && (head == tail))
+ return count;
+ else if (tail >= head)
+ return tail - head;
+ else
+ return (tail + count) - head;
+}
+
+static void idx_tail_push(struct ioq_ring_idx *idx, int count)
+{
+ u32 tail = modulo_inc(le32_to_cpu(idx->tail), count);
+ u32 head = le32_to_cpu(idx->head);
+
+ if (head == tail) {
+ rmb();
+
+ /*
+ * Setting full here may look racy, but note that we havent
+ * flipped the owner bit yet. So it is impossible for the
+ * remote locale to move head in such a way that this operation
+ * becomes invalid
+ */
+ idx->full = 1;
+ wmb();
+ }
+
+ idx->tail = cpu_to_le32(tail);
+}
+
+int ioq_iter_push(struct ioq_iterator *iter, int flags)
+{
+ struct ioq_ring_head *head_desc = iter->ioq->head_desc;
+ struct ioq_ring_idx *idx = iter->idx;
+ int ret;
+
+ /*
+ * Its only valid to push if we are currently pointed at the tail
+ */
+ if (iter->pos != le32_to_cpu(idx->tail) || iter->desc->sown != iter->ioq->locale)
+ return -EINVAL;
+
+ idx_tail_push(idx, iter->ioq->count);
+ if (iter->dualidx) {
+ idx_tail_push(&head_desc->idx[ioq_idxtype_inuse],
+ iter->ioq->count);
+ if (head_desc->idx[ioq_idxtype_inuse].tail !=
+ head_desc->idx[ioq_idxtype_valid].tail) {
+ SHM_SIGNAL_FAULT(iter->ioq->signal,
+ "Tails not synchronized");
+ return -EINVAL;
+ }
+ }
+
+ wmb(); /* the index must be visible before the sown, or signal */
+
+ if (iter->flipowner) {
+ iter->desc->sown = !iter->ioq->locale;
+ wmb(); /* sown must be visible before we signal */
+ }
+
+ ret = ioq_iter_seek(iter, ioq_seek_next, 0, flags);
+
+ if (iter->update)
+ ioq_signal(iter->ioq, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioq_iter_push);
+
+int ioq_iter_pop(struct ioq_iterator *iter, int flags)
+{
+ struct ioq_ring_idx *idx = iter->idx;
+ int ret;
+
+ /*
+ * Its only valid to pop if we are currently pointed at the head
+ */
+ if (iter->pos != le32_to_cpu(idx->head) || iter->desc->sown != iter->ioq->locale)
+ return -EINVAL;
+
+ idx->head = cpu_to_le32(modulo_inc(le32_to_cpu(idx->head), iter->ioq->count));
+ wmb(); /* head must be visible before full */
+
+ if (idx->full) {
+ idx->full = 0;
+ wmb(); /* full must be visible before sown */
+ }
+
+ if (iter->flipowner) {
+ iter->desc->sown = !iter->ioq->locale;
+ wmb(); /* sown must be visible before we signal */
+ }
+
+ ret = ioq_iter_seek(iter, ioq_seek_next, 0, flags);
+
+ if (iter->update)
+ ioq_signal(iter->ioq, 0);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ioq_iter_pop);
+
+static struct ioq_ring_idx *idxtype_to_idx(struct ioq *ioq,
+ enum ioq_idx_type type)
+{
+ struct ioq_ring_idx *idx;
+
+ switch (type) {
+ case ioq_idxtype_valid:
+ case ioq_idxtype_inuse:
+ idx = &ioq->head_desc->idx[type];
+ break;
+ default:
+ panic("IOQ: illegal index type: %d", type);
+ break;
+ }
+
+ return idx;
+}
+
+int ioq_iter_init(struct ioq *ioq, struct ioq_iterator *iter,
+ enum ioq_idx_type type, int flags)
+{
+ iter->ioq = ioq;
+ iter->update = (flags & IOQ_ITER_AUTOUPDATE);
+ iter->flipowner = !(flags & IOQ_ITER_NOFLIPOWNER);
+ iter->pos = -1;
+ iter->desc = NULL;
+ iter->dualidx = 0;
+
+ if (type == ioq_idxtype_both) {
+ /*
+ * "both" is a special case, so we set the dualidx flag.
+ *
+ * However, we also just want to use the valid-index
+ * for normal processing, so override that here
+ */
+ type = ioq_idxtype_valid;
+ iter->dualidx = 1;
+ }
+
+ iter->idx = idxtype_to_idx(ioq, type);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ioq_iter_init);
+
+int ioq_count(struct ioq *ioq, enum ioq_idx_type type)
+{
+ return ioq_ring_count(idxtype_to_idx(ioq, type), ioq->count);
+}
+EXPORT_SYMBOL_GPL(ioq_count);
+
+int ioq_remain(struct ioq *ioq, enum ioq_idx_type type)
+{
+ int count = ioq_ring_count(idxtype_to_idx(ioq, type), ioq->count);
+
+ return ioq->count - count;
+}
+EXPORT_SYMBOL_GPL(ioq_remain);
+
+int ioq_size(struct ioq *ioq)
+{
+ return ioq->count;
+}
+EXPORT_SYMBOL_GPL(ioq_size);
+
+int ioq_full(struct ioq *ioq, enum ioq_idx_type type)
+{
+ struct ioq_ring_idx *idx = idxtype_to_idx(ioq, type);
+
+ return idx_full(idx);
+}
+EXPORT_SYMBOL_GPL(ioq_full);
+
+static void ioq_shm_signal(struct shm_signal_notifier *notifier)
+{
+ struct ioq *ioq = container_of(notifier, struct ioq, shm_notifier);
+
+ if (waitqueue_active(&ioq->wq))
+ wake_up(&ioq->wq);
+
+ if (ioq->notifier)
+ ioq->notifier->signal(ioq->notifier);
+}
+
+void ioq_init(struct ioq *ioq,
+ struct ioq_ops *ops,
+ enum ioq_locality locale,
+ struct ioq_ring_head *head,
+ struct shm_signal *signal,
+ size_t count)
+{
+ memset(ioq, 0, sizeof(*ioq));
+ kref_init(&ioq->kref);
+ init_waitqueue_head(&ioq->wq);
+
+ ioq->ops = ops;
+ ioq->locale = locale;
+ ioq->head_desc = head;
+ ioq->ring = &head->ring[0];
+ ioq->count = count;
+ ioq->signal = signal;
+
+ ioq->shm_notifier.signal = &ioq_shm_signal;
+ signal->notifier = &ioq->shm_notifier;
+}
+EXPORT_SYMBOL_GPL(ioq_init);
diff --git a/lib/lzo/lzo1x_decompress.c b/lib/lzo/lzo1x_decompress.c
index 5dc6b29c1575..f2fd09850223 100644
--- a/lib/lzo/lzo1x_decompress.c
+++ b/lib/lzo/lzo1x_decompress.c
@@ -11,11 +11,13 @@
* Richard Purdie <rpurdie@openedhand.com>
*/
+#ifndef STATIC
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/lzo.h>
-#include <asm/byteorder.h>
+#endif
+
#include <asm/unaligned.h>
+#include <linux/lzo.h>
#include "lzodefs.h"
#define HAVE_IP(x, ip_end, ip) ((size_t)(ip_end - ip) < (x))
@@ -244,9 +246,10 @@ lookbehind_overrun:
*out_len = op - out;
return LZO_E_LOOKBEHIND_OVERRUN;
}
-
+#ifndef STATIC
EXPORT_SYMBOL_GPL(lzo1x_decompress_safe);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("LZO1X Decompressor");
+#endif
diff --git a/lib/rational.c b/lib/rational.c
index b3c099b5478e..3ed247b80662 100644
--- a/lib/rational.c
+++ b/lib/rational.c
@@ -7,6 +7,7 @@
*/
#include <linux/rational.h>
+#include <linux/module.h>
/*
* calculate best rational approximation for a given fraction
diff --git a/lib/shm_signal.c b/lib/shm_signal.c
new file mode 100644
index 000000000000..8d3e9b418a27
--- /dev/null
+++ b/lib/shm_signal.c
@@ -0,0 +1,196 @@
+/*
+ * Copyright 2009 Novell. All Rights Reserved.
+ *
+ * See include/linux/shm_signal.h for documentation
+ *
+ * Author:
+ * Gregory Haskins <ghaskins@novell.com>
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/shm_signal.h>
+
+MODULE_AUTHOR("Gregory Haskins");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1");
+
+int shm_signal_enable(struct shm_signal *s, int flags)
+{
+ struct shm_signal_irq *irq = &s->desc->irq[s->locale];
+ unsigned long iflags;
+
+ spin_lock_irqsave(&s->lock, iflags);
+
+ irq->enabled = 1;
+ wmb();
+
+ if ((irq->dirty || irq->pending)
+ && !test_bit(shm_signal_in_wakeup, &s->flags)) {
+ rmb();
+ tasklet_schedule(&s->deferred_notify);
+ }
+
+ spin_unlock_irqrestore(&s->lock, iflags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(shm_signal_enable);
+
+int shm_signal_disable(struct shm_signal *s, int flags)
+{
+ struct shm_signal_irq *irq = &s->desc->irq[s->locale];
+
+ irq->enabled = 0;
+ wmb();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(shm_signal_disable);
+
+/*
+ * signaling protocol:
+ *
+ * each side of the shm_signal has an "irq" structure with the following
+ * fields:
+ *
+ * - enabled: controlled by shm_signal_enable/disable() to mask/unmask
+ * the notification locally
+ * - dirty: indicates if the shared-memory is dirty or clean. This
+ * is updated regardless of the enabled/pending state so that
+ * the state is always accurately tracked.
+ * - pending: indicates if a signal is pending to the remote locale.
+ * This allows us to determine if a remote-notification is
+ * already in flight to optimize spurious notifications away.
+ */
+int shm_signal_inject(struct shm_signal *s, int flags)
+{
+ /* Load the irq structure from the other locale */
+ struct shm_signal_irq *irq = &s->desc->irq[!s->locale];
+
+ /*
+ * We always mark the remote side as dirty regardless of whether
+ * they need to be notified.
+ */
+ irq->dirty = 1;
+ wmb(); /* dirty must be visible before we test the pending state */
+
+ if (irq->enabled && !irq->pending) {
+ rmb();
+
+ /*
+ * If the remote side has enabled notifications, and we do
+ * not see a notification pending, we must inject a new one.
+ */
+ irq->pending = 1;
+ wmb(); /* make it visible before we do the injection */
+
+ s->ops->inject(s);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(shm_signal_inject);
+
+void _shm_signal_wakeup(struct shm_signal *s)
+{
+ struct shm_signal_irq *irq = &s->desc->irq[s->locale];
+ int dirty;
+ unsigned long flags;
+
+ spin_lock_irqsave(&s->lock, flags);
+
+ __set_bit(shm_signal_in_wakeup, &s->flags);
+
+ /*
+ * The outer loop protects against race conditions between
+ * irq->dirty and irq->pending updates
+ */
+ while (irq->enabled && (irq->dirty || irq->pending)) {
+
+ /*
+ * Run until we completely exhaust irq->dirty (it may
+ * be re-dirtied by the remote side while we are in the
+ * callback). We let "pending" remain untouched until we have
+ * processed them all so that the remote side knows we do not
+ * need a new notification (yet).
+ */
+ do {
+ irq->dirty = 0;
+ /* the unlock is an implicit wmb() for dirty = 0 */
+ spin_unlock_irqrestore(&s->lock, flags);
+
+ if (s->notifier)
+ s->notifier->signal(s->notifier);
+
+ spin_lock_irqsave(&s->lock, flags);
+ dirty = irq->dirty;
+ rmb();
+
+ } while (irq->enabled && dirty);
+
+ barrier();
+
+ /*
+ * We can finally acknowledge the notification by clearing
+ * "pending" after all of the dirty memory has been processed
+ * Races against this clearing are handled by the outer loop.
+ * Subsequent iterations of this loop will execute with
+ * pending=0 potentially leading to future spurious
+ * notifications, but this is an acceptable tradeoff as this
+ * will be rare and harmless.
+ */
+ irq->pending = 0;
+ wmb();
+
+ }
+
+ __clear_bit(shm_signal_in_wakeup, &s->flags);
+ spin_unlock_irqrestore(&s->lock, flags);
+
+}
+EXPORT_SYMBOL_GPL(_shm_signal_wakeup);
+
+void _shm_signal_release(struct kref *kref)
+{
+ struct shm_signal *s = container_of(kref, struct shm_signal, kref);
+
+ s->ops->release(s);
+}
+EXPORT_SYMBOL_GPL(_shm_signal_release);
+
+static void
+deferred_notify(unsigned long data)
+{
+ struct shm_signal *s = (struct shm_signal *)data;
+
+ _shm_signal_wakeup(s);
+}
+
+void shm_signal_init(struct shm_signal *s, enum shm_signal_locality locale,
+ struct shm_signal_ops *ops, struct shm_signal_desc *desc)
+{
+ memset(s, 0, sizeof(*s));
+ kref_init(&s->kref);
+ spin_lock_init(&s->lock);
+ tasklet_init(&s->deferred_notify,
+ deferred_notify,
+ (unsigned long)s);
+ s->locale = locale;
+ s->ops = ops;
+ s->desc = desc;
+}
+EXPORT_SYMBOL_GPL(shm_signal_init);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index d4996cf46eb6..3a04976b6cb2 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -681,11 +681,18 @@ static char *mac_address_string(char *buf, char *end, u8 *addr,
char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")];
char *p = mac_addr;
int i;
+ char separator;
+
+ if (fmt[1] == 'F') { /* FDDI canonical format */
+ separator = '-';
+ } else {
+ separator = ':';
+ }
for (i = 0; i < 6; i++) {
p = pack_hex_byte(p, addr[i]);
if (fmt[0] == 'M' && i != 5)
- *p++ = ':';
+ *p++ = separator;
}
*p = '\0';
@@ -896,6 +903,8 @@ static char *uuid_string(char *buf, char *end, const u8 *addr,
* - 'M' For a 6-byte MAC address, it prints the address in the
* usual colon-separated hex notation
* - 'm' For a 6-byte MAC address, it prints the hex address without colons
+ * - 'MF' For a 6-byte MAC FDDI address, it prints the address
+ * with a dash-separated hex notation
* - 'I' [46] for IPv4/IPv6 addresses printed in the usual way
* IPv4 uses dot-separated decimal without leading 0's (1.2.3.4)
* IPv6 uses colon separated network-order 16 bit hex with leading 0's
@@ -903,7 +912,7 @@ static char *uuid_string(char *buf, char *end, const u8 *addr,
* IPv6 omits the colons (01020304...0f)
* IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
* - 'I6c' for IPv6 addresses printed as specified by
- * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
+ * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00
* - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
* "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
* Options for %pU are:
@@ -939,6 +948,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return resource_string(buf, end, ptr, spec, fmt);
case 'M': /* Colon separated: 00:01:02:03:04:05 */
case 'm': /* Contiguous: 000102030405 */
+ /* [mM]F (FDDI, bit reversed) */
return mac_address_string(buf, end, ptr, spec, fmt);
case 'I': /* Formatted IP supported
* 4: 1.2.3.4
@@ -1188,7 +1198,7 @@ qualifier:
* %pI6 print an IPv6 address with colons
* %pi6 print an IPv6 address without colons
* %pI6c print an IPv6 address as specified by
- * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt
+ * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00
* %pU[bBlL] print a UUID/GUID in big or little endian using lower or upper
* case.
* %n is ignored
diff --git a/mm/Kconfig b/mm/Kconfig
index 17b8947aa7da..d34c2b971032 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -195,7 +195,7 @@ config BOUNCE
config NR_QUICK
int
depends on QUICKLIST
- default "2" if SUPERH || AVR32
+ default "2" if AVR32
default "1"
config VIRT_TO_BUS
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 0e8ca0347707..88f3655a9792 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -88,7 +88,8 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
#define K(x) ((x) << (PAGE_SHIFT - 10))
seq_printf(m,
"BdiWriteback: %8lu kB\n"
- "BdiReclaimable: %8lu kB\n"
+ "BdiDirty: %8lu kB\n"
+ "BdiUnstable: %8lu kB\n"
"BdiDirtyThresh: %8lu kB\n"
"DirtyThresh: %8lu kB\n"
"BackgroundThresh: %8lu kB\n"
@@ -102,7 +103,8 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
"wb_list: %8u\n"
"wb_cnt: %8u\n",
(unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
- (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
+ (unsigned long) K(bdi_stat(bdi, BDI_DIRTY)),
+ (unsigned long) K(bdi_stat(bdi, BDI_UNSTABLE)),
K(bdi_thresh), K(dirty_thresh),
K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
!list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask,
diff --git a/mm/filemap.c b/mm/filemap.c
index 96ac6b0eb6cb..458387d21d2d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -136,7 +136,7 @@ void __remove_from_page_cache(struct page *page)
*/
if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
- dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+ dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTY);
}
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 65f38c218207..e91b81b63670 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -402,7 +402,7 @@ static void clear_huge_page(struct page *page,
{
int i;
- if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
+ if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
clear_gigantic_page(page, addr, sz);
return;
}
diff --git a/mm/maccess.c b/mm/maccess.c
index 9073695ff25f..4e348dbaecd7 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -14,7 +14,11 @@
* Safely read from address @src to the buffer at @dst. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-long probe_kernel_read(void *dst, void *src, size_t size)
+
+long __weak probe_kernel_read(void *dst, void *src, size_t size)
+ __attribute__((alias("__probe_kernel_read")));
+
+long __probe_kernel_read(void *dst, void *src, size_t size)
{
long ret;
mm_segment_t old_fs = get_fs();
@@ -39,7 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
* Safely write to address @dst from the buffer at @src. If a kernel fault
* happens, handle that and return -EFAULT.
*/
-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
+long __weak probe_kernel_write(void *dst, void *src, size_t size)
+ __attribute__((alias("__probe_kernel_write")));
+
+long __probe_kernel_write(void *dst, void *src, size_t size)
{
long ret;
mm_segment_t old_fs = get_fs();
diff --git a/mm/nommu.c b/mm/nommu.c
index 6f9248f89bde..17773862619b 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -432,6 +432,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
/*
* Ok, looks good - let it rip.
*/
+ flush_icache_range(mm->brk, brk);
return mm->brk = brk;
}
@@ -1353,10 +1354,14 @@ unsigned long do_mmap_pgoff(struct file *file,
share:
add_vma_to_mm(current->mm, vma);
- up_write(&nommu_region_sem);
+ /* we flush the region from the icache only when the first executable
+ * mapping of it is made */
+ if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
+ flush_icache_range(region->vm_start, region->vm_end);
+ region->vm_icache_flushed = true;
+ }
- if (prot & PROT_EXEC)
- flush_icache_range(result, result + len);
+ up_write(&nommu_region_sem);
kleave(" = %lx", result);
return result;
@@ -1916,9 +1921,11 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
/* only read or write mappings where it is permitted */
if (write && vma->vm_flags & VM_MAYWRITE)
- len -= copy_to_user((void *) addr, buf, len);
+ copy_to_user_page(vma, NULL, addr,
+ (void *) addr, buf, len);
else if (!write && vma->vm_flags & VM_MAYREAD)
- len -= copy_from_user(buf, (void *) addr, len);
+ copy_from_user_page(vma, NULL, addr,
+ buf, (void *) addr, len);
else
len = 0;
} else {
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0b19943ecf8b..6a0aec7182c0 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -272,8 +272,10 @@ static void clip_bdi_dirty_limit(struct backing_dev_info *bdi,
else
avail_dirty = 0;
- avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
+ avail_dirty += bdi_stat(bdi, BDI_DIRTY) +
bdi_stat(bdi, BDI_WRITEBACK);
+ if (bdi_cap_account_unstable(bdi))
+ avail_dirty += bdi_stat(bdi, BDI_UNSTABLE);
*pbdi_dirty = min(*pbdi_dirty, avail_dirty);
}
@@ -501,6 +503,7 @@ static void balance_dirty_pages(struct address_space *mapping,
.nr_to_write = write_chunk,
.range_cyclic = 1,
};
+ long bdi_nr_unstable = 0;
get_dirty_limits(&background_thresh, &dirty_thresh,
&bdi_thresh, bdi);
@@ -509,7 +512,11 @@ static void balance_dirty_pages(struct address_space *mapping,
global_page_state(NR_UNSTABLE_NFS);
nr_writeback = global_page_state(NR_WRITEBACK);
- bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
+ bdi_nr_reclaimable = bdi_stat(bdi, BDI_DIRTY);
+ if (bdi_cap_account_unstable(bdi)) {
+ bdi_nr_unstable = bdi_stat(bdi, BDI_UNSTABLE);
+ bdi_nr_reclaimable += bdi_nr_unstable;
+ }
bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
@@ -537,6 +544,11 @@ static void balance_dirty_pages(struct address_space *mapping,
* up.
*/
if (bdi_nr_reclaimable > bdi_thresh) {
+ wbc.force_commit_unstable = 0;
+ /* Force NFS to also free up unstable writes. */
+ if (bdi_nr_unstable > bdi_nr_reclaimable / 2)
+ wbc.force_commit_unstable = 1;
+
writeback_inodes_wbc(&wbc);
pages_written += write_chunk - wbc.nr_to_write;
get_dirty_limits(&background_thresh, &dirty_thresh,
@@ -554,10 +566,16 @@ static void balance_dirty_pages(struct address_space *mapping,
* deltas.
*/
if (bdi_thresh < 2*bdi_stat_error(bdi)) {
- bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
+ bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_DIRTY);
+ if (bdi_cap_account_unstable(bdi))
+ bdi_nr_reclaimable +=
+ bdi_stat_sum(bdi, BDI_UNSTABLE);
bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
} else if (bdi_nr_reclaimable) {
- bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
+ bdi_nr_reclaimable = bdi_stat(bdi, BDI_DIRTY);
+ if (bdi_cap_account_unstable(bdi))
+ bdi_nr_reclaimable +=
+ bdi_stat(bdi, BDI_UNSTABLE);
bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
}
@@ -1079,7 +1097,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
{
if (mapping_cap_account_dirty(mapping)) {
__inc_zone_page_state(page, NR_FILE_DIRTY);
- __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+ __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTY);
task_dirty_inc(current);
task_io_account_write(PAGE_CACHE_SIZE);
}
@@ -1255,7 +1273,7 @@ int clear_page_dirty_for_io(struct page *page)
if (TestClearPageDirty(page)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
dec_bdi_stat(mapping->backing_dev_info,
- BDI_RECLAIMABLE);
+ BDI_DIRTY);
return 1;
}
return 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4e9f5cc5fb59..6849e870de54 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1008,10 +1008,10 @@ static void drain_pages(unsigned int cpu)
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- pset = zone_pcp(zone, cpu);
+ local_irq_save(flags);
+ pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
- local_irq_save(flags);
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
local_irq_restore(flags);
@@ -1095,7 +1095,6 @@ static void free_hot_cold_page(struct page *page, int cold)
arch_free_page(page, 0);
kernel_map_pages(page, 1, 0);
- pcp = &zone_pcp(zone, get_cpu())->pcp;
migratetype = get_pageblock_migratetype(page);
set_page_private(page, migratetype);
local_irq_save(flags);
@@ -1118,6 +1117,7 @@ static void free_hot_cold_page(struct page *page, int cold)
migratetype = MIGRATE_MOVABLE;
}
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
if (cold)
list_add_tail(&page->lru, &pcp->lists[migratetype]);
else
@@ -1130,7 +1130,6 @@ static void free_hot_cold_page(struct page *page, int cold)
out:
local_irq_restore(flags);
- put_cpu();
}
void free_hot_page(struct page *page)
@@ -1180,17 +1179,15 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
unsigned long flags;
struct page *page;
int cold = !!(gfp_flags & __GFP_COLD);
- int cpu;
again:
- cpu = get_cpu();
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
struct list_head *list;
- pcp = &zone_pcp(zone, cpu)->pcp;
- list = &pcp->lists[migratetype];
local_irq_save(flags);
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
@@ -1231,7 +1228,6 @@ again:
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone);
local_irq_restore(flags);
- put_cpu();
VM_BUG_ON(bad_range(zone, page));
if (prep_new_page(page, order, gfp_flags))
@@ -1240,7 +1236,6 @@ again:
failed:
local_irq_restore(flags);
- put_cpu();
return NULL;
}
@@ -2179,7 +2174,7 @@ void show_free_areas(void)
for_each_online_cpu(cpu) {
struct per_cpu_pageset *pageset;
- pageset = zone_pcp(zone, cpu);
+ pageset = per_cpu_ptr(zone->pageset, cpu);
printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
cpu, pageset->pcp.high,
@@ -2744,10 +2739,29 @@ static void build_zonelist_cache(pg_data_t *pgdat)
#endif /* CONFIG_NUMA */
+/*
+ * Boot pageset table. One per cpu which is going to be used for all
+ * zones and all nodes. The parameters will be set in such a way
+ * that an item put on a list will immediately be handed over to
+ * the buddy list. This is safe since pageset manipulation is done
+ * with interrupts disabled.
+ *
+ * The boot_pagesets must be kept even after bootup is complete for
+ * unused processors and/or zones. They do play a role for bootstrapping
+ * hotplugged processors.
+ *
+ * zoneinfo_show() and maybe other functions do
+ * not check if the processor is online before following the pageset pointer.
+ * Other parts of the kernel may not check if the zone is available.
+ */
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
+static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
+
/* return values int ....just for stop_machine() */
static int __build_all_zonelists(void *dummy)
{
int nid;
+ int cpu;
#ifdef CONFIG_NUMA
memset(node_load, 0, sizeof(node_load));
@@ -2758,6 +2772,23 @@ static int __build_all_zonelists(void *dummy)
build_zonelists(pgdat);
build_zonelist_cache(pgdat);
}
+
+ /*
+ * Initialize the boot_pagesets that are going to be used
+ * for bootstrapping processors. The real pagesets for
+ * each zone will be allocated later when the per cpu
+ * allocator is available.
+ *
+ * boot_pagesets are used also for bootstrapping offline
+ * cpus if the system is already booted because the pagesets
+ * are needed to initialize allocators on a specific cpu too.
+ * F.e. the percpu allocator needs the page allocator which
+ * needs the percpu allocator in order to allocate its pagesets
+ * (a chicken-egg dilemma).
+ */
+ for_each_possible_cpu(cpu)
+ setup_pageset(&per_cpu(boot_pageset, cpu), 0);
+
return 0;
}
@@ -3095,121 +3126,33 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
pcp->batch = PAGE_SHIFT * 8;
}
-
-#ifdef CONFIG_NUMA
-/*
- * Boot pageset table. One per cpu which is going to be used for all
- * zones and all nodes. The parameters will be set in such a way
- * that an item put on a list will immediately be handed over to
- * the buddy list. This is safe since pageset manipulation is done
- * with interrupts disabled.
- *
- * Some NUMA counter updates may also be caught by the boot pagesets.
- *
- * The boot_pagesets must be kept even after bootup is complete for
- * unused processors and/or zones. They do play a role for bootstrapping
- * hotplugged processors.
- *
- * zoneinfo_show() and maybe other functions do
- * not check if the processor is online before following the pageset pointer.
- * Other parts of the kernel may not check if the zone is available.
- */
-static struct per_cpu_pageset boot_pageset[NR_CPUS];
-
/*
- * Dynamically allocate memory for the
- * per cpu pageset array in struct zone.
+ * Allocate per cpu pagesets and initialize them.
+ * Before this call only boot pagesets were available.
+ * Boot pagesets will no longer be used by this processorr
+ * after setup_per_cpu_pageset().
*/
-static int __cpuinit process_zones(int cpu)
+void __init setup_per_cpu_pageset(void)
{
- struct zone *zone, *dzone;
- int node = cpu_to_node(cpu);
-
- node_set_state(node, N_CPU); /* this node has a cpu */
+ struct zone *zone;
+ int cpu;
for_each_populated_zone(zone) {
- zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
- GFP_KERNEL, node);
- if (!zone_pcp(zone, cpu))
- goto bad;
-
- setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
-
- if (percpu_pagelist_fraction)
- setup_pagelist_highmark(zone_pcp(zone, cpu),
- (zone->present_pages / percpu_pagelist_fraction));
- }
-
- return 0;
-bad:
- for_each_zone(dzone) {
- if (!populated_zone(dzone))
- continue;
- if (dzone == zone)
- break;
- kfree(zone_pcp(dzone, cpu));
- zone_pcp(dzone, cpu) = &boot_pageset[cpu];
- }
- return -ENOMEM;
-}
+ zone->pageset = alloc_percpu(struct per_cpu_pageset);
-static inline void free_zone_pagesets(int cpu)
-{
- struct zone *zone;
-
- for_each_zone(zone) {
- struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
+ for_each_possible_cpu(cpu) {
+ struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
- /* Free per_cpu_pageset if it is slab allocated */
- if (pset != &boot_pageset[cpu])
- kfree(pset);
- zone_pcp(zone, cpu) = &boot_pageset[cpu];
- }
-}
+ setup_pageset(pcp, zone_batchsize(zone));
-static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- int cpu = (long)hcpu;
- int ret = NOTIFY_OK;
-
- switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- if (process_zones(cpu))
- ret = NOTIFY_BAD;
- break;
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- free_zone_pagesets(cpu);
- break;
- default:
- break;
+ if (percpu_pagelist_fraction)
+ setup_pagelist_highmark(pcp,
+ (zone->present_pages /
+ percpu_pagelist_fraction));
+ }
}
- return ret;
}
-static struct notifier_block __cpuinitdata pageset_notifier =
- { &pageset_cpuup_callback, NULL, 0 };
-
-void __init setup_per_cpu_pageset(void)
-{
- int err;
-
- /* Initialize per_cpu_pageset for cpu 0.
- * A cpuup callback will do this for every cpu
- * as it comes online
- */
- err = process_zones(smp_processor_id());
- BUG_ON(err);
- register_cpu_notifier(&pageset_notifier);
-}
-
-#endif
-
static noinline __init_refok
int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
{
@@ -3263,7 +3206,7 @@ static int __zone_pcp_update(void *data)
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
- pset = zone_pcp(zone, cpu);
+ pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
local_irq_save(flags);
@@ -3281,21 +3224,17 @@ void zone_pcp_update(struct zone *zone)
static __meminit void zone_pcp_init(struct zone *zone)
{
- int cpu;
- unsigned long batch = zone_batchsize(zone);
+ /*
+ * per cpu subsystem is not up at this point. The following code
+ * relies on the ability of the linker to provide the
+ * offset of a (static) per cpu variable into the per cpu area.
+ */
+ zone->pageset = &boot_pageset;
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
-#ifdef CONFIG_NUMA
- /* Early boot. Slab allocator not functional yet */
- zone_pcp(zone, cpu) = &boot_pageset[cpu];
- setup_pageset(&boot_pageset[cpu],0);
-#else
- setup_pageset(zone_pcp(zone,cpu), batch);
-#endif
- }
if (zone->present_pages)
- printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%lu\n",
- zone->name, zone->present_pages, batch);
+ printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
+ zone->name, zone->present_pages,
+ zone_batchsize(zone));
}
__meminit int init_currently_empty_zone(struct zone *zone,
@@ -4809,10 +4748,11 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
if (!write || (ret == -EINVAL))
return ret;
for_each_populated_zone(zone) {
- for_each_online_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
unsigned long high;
high = zone->present_pages / percpu_pagelist_fraction;
- setup_pagelist_highmark(zone_pcp(zone, cpu), high);
+ setup_pagelist_highmark(
+ per_cpu_ptr(zone->pageset, cpu), high);
}
}
return 0;
diff --git a/mm/percpu.c b/mm/percpu.c
index 442010cc91c6..b336638d20e7 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -913,11 +913,10 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
int rs, re;
/* quick path, check whether it's empty already */
- pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
- if (rs == page_start && re == page_end)
- return;
- break;
- }
+ rs = page_start;
+ pcpu_next_unpop(chunk, &rs, &re, page_end);
+ if (rs == page_start && re == page_end)
+ return;
/* immutable chunks can't be depopulated */
WARN_ON(chunk->immutable);
@@ -968,11 +967,10 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
int rs, re, rc;
/* quick path, check whether all pages are already there */
- pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) {
- if (rs == page_start && re == page_end)
- goto clear;
- break;
- }
+ rs = page_start;
+ pcpu_next_pop(chunk, &rs, &re, page_end);
+ if (rs == page_start && re == page_end)
+ goto clear;
/* need to allocate and map pages, this chunk can't be immutable */
WARN_ON(chunk->immutable);
@@ -1271,7 +1269,7 @@ static void pcpu_reclaim(struct work_struct *work)
*/
void free_percpu(void *ptr)
{
- void *addr = __pcpu_ptr_to_addr(ptr);
+ void *addr;
struct pcpu_chunk *chunk;
unsigned long flags;
int off;
@@ -1279,6 +1277,8 @@ void free_percpu(void *ptr)
if (!ptr)
return;
+ addr = __pcpu_ptr_to_addr(ptr);
+
spin_lock_irqsave(&pcpu_lock, flags);
chunk = pcpu_chunk_addr_search(addr);
diff --git a/mm/slab.c b/mm/slab.c
index 7451bdacaf18..ff44eb202165 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -983,13 +983,11 @@ static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
if (limit > 1)
limit = 12;
- ac_ptr = kmalloc_node(memsize, gfp, node);
+ ac_ptr = kzalloc_node(memsize, gfp, node);
if (ac_ptr) {
for_each_node(i) {
- if (i == node || !node_online(i)) {
- ac_ptr[i] = NULL;
+ if (i == node || !node_online(i))
continue;
- }
ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
if (!ac_ptr[i]) {
for (i--; i >= 0; i--)
diff --git a/mm/slub.c b/mm/slub.c
index 8d71aaf888d7..9e86e6bd6161 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -217,10 +217,10 @@ static inline void sysfs_slab_remove(struct kmem_cache *s)
#endif
-static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
+static inline void stat(struct kmem_cache *s, enum stat_item si)
{
#ifdef CONFIG_SLUB_STATS
- c->stat[si]++;
+ __this_cpu_inc(s->cpu_slab->stat[si]);
#endif
}
@@ -242,15 +242,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
#endif
}
-static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
-{
-#ifdef CONFIG_SMP
- return s->cpu_slab[cpu];
-#else
- return &s->cpu_slab;
-#endif
-}
-
/* Verify that a pointer has an address that is valid within a slab page */
static inline int check_valid_pointer(struct kmem_cache *s,
struct page *page, const void *object)
@@ -269,13 +260,6 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1;
}
-/*
- * Slow version of get and set free pointer.
- *
- * This version requires touching the cache lines of kmem_cache which
- * we avoid to do in the fast alloc free paths. There we obtain the offset
- * from the page struct.
- */
static inline void *get_freepointer(struct kmem_cache *s, void *object)
{
return *(void **)(object + s->offset);
@@ -1124,7 +1108,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (!page)
return NULL;
- stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
+ stat(s, ORDER_FALLBACK);
}
if (kmemcheck_enabled
@@ -1422,23 +1406,22 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
- struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
__ClearPageSlubFrozen(page);
if (page->inuse) {
if (page->freelist) {
add_partial(n, page, tail);
- stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
+ stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
} else {
- stat(c, DEACTIVATE_FULL);
+ stat(s, DEACTIVATE_FULL);
if (SLABDEBUG && PageSlubDebug(page) &&
(s->flags & SLAB_STORE_USER))
add_full(n, page);
}
slab_unlock(page);
} else {
- stat(c, DEACTIVATE_EMPTY);
+ stat(s, DEACTIVATE_EMPTY);
if (n->nr_partial < s->min_partial) {
/*
* Adding an empty slab to the partial slabs in order
@@ -1454,7 +1437,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
slab_unlock(page);
} else {
slab_unlock(page);
- stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
+ stat(s, FREE_SLAB);
discard_slab(s, page);
}
}
@@ -1469,7 +1452,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
int tail = 1;
if (page->freelist)
- stat(c, DEACTIVATE_REMOTE_FREES);
+ stat(s, DEACTIVATE_REMOTE_FREES);
/*
* Merge cpu freelist into slab freelist. Typically we get here
* because both freelists are empty. So this is unlikely
@@ -1482,10 +1465,10 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
/* Retrieve object from cpu_freelist */
object = c->freelist;
- c->freelist = c->freelist[c->offset];
+ c->freelist = get_freepointer(s, c->freelist);
/* And put onto the regular freelist */
- object[c->offset] = page->freelist;
+ set_freepointer(s, object, page->freelist);
page->freelist = object;
page->inuse--;
}
@@ -1495,7 +1478,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
- stat(c, CPUSLAB_FLUSH);
+ stat(s, CPUSLAB_FLUSH);
slab_lock(c->page);
deactivate_slab(s, c);
}
@@ -1507,7 +1490,7 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
*/
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
{
- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
if (likely(c && c->page))
flush_slab(s, c);
@@ -1635,7 +1618,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
if (unlikely(!node_match(c, node)))
goto another_slab;
- stat(c, ALLOC_REFILL);
+ stat(s, ALLOC_REFILL);
load_freelist:
object = c->page->freelist;
@@ -1644,13 +1627,13 @@ load_freelist:
if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
goto debug;
- c->freelist = object[c->offset];
+ c->freelist = get_freepointer(s, object);
c->page->inuse = c->page->objects;
c->page->freelist = NULL;
c->node = page_to_nid(c->page);
unlock_out:
slab_unlock(c->page);
- stat(c, ALLOC_SLOWPATH);
+ stat(s, ALLOC_SLOWPATH);
return object;
another_slab:
@@ -1660,7 +1643,7 @@ new_slab:
new = get_partial(s, gfpflags, node);
if (new) {
c->page = new;
- stat(c, ALLOC_FROM_PARTIAL);
+ stat(s, ALLOC_FROM_PARTIAL);
goto load_freelist;
}
@@ -1673,8 +1656,8 @@ new_slab:
local_irq_disable();
if (new) {
- c = get_cpu_slab(s, smp_processor_id());
- stat(c, ALLOC_SLAB);
+ c = __this_cpu_ptr(s->cpu_slab);
+ stat(s, ALLOC_SLAB);
if (c->page)
flush_slab(s, c);
slab_lock(new);
@@ -1690,7 +1673,7 @@ debug:
goto another_slab;
c->page->inuse++;
- c->page->freelist = object[c->offset];
+ c->page->freelist = get_freepointer(s, object);
c->node = -1;
goto unlock_out;
}
@@ -1711,7 +1694,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void **object;
struct kmem_cache_cpu *c;
unsigned long flags;
- unsigned int objsize;
gfpflags &= gfp_allowed_mask;
@@ -1722,24 +1704,23 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
return NULL;
local_irq_save(flags);
- c = get_cpu_slab(s, smp_processor_id());
- objsize = c->objsize;
- if (unlikely(!c->freelist || !node_match(c, node)))
+ c = __this_cpu_ptr(s->cpu_slab);
+ object = c->freelist;
+ if (unlikely(!object || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
- object = c->freelist;
- c->freelist = object[c->offset];
- stat(c, ALLOC_FASTPATH);
+ c->freelist = get_freepointer(s, object);
+ stat(s, ALLOC_FASTPATH);
}
local_irq_restore(flags);
if (unlikely(gfpflags & __GFP_ZERO) && object)
- memset(object, 0, objsize);
+ memset(object, 0, s->objsize);
- kmemcheck_slab_alloc(s, gfpflags, object, c->objsize);
- kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags);
+ kmemcheck_slab_alloc(s, gfpflags, object, s->objsize);
+ kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, gfpflags);
return object;
}
@@ -1794,26 +1775,25 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_notrace);
* handling required then we can return immediately.
*/
static void __slab_free(struct kmem_cache *s, struct page *page,
- void *x, unsigned long addr, unsigned int offset)
+ void *x, unsigned long addr)
{
void *prior;
void **object = (void *)x;
- struct kmem_cache_cpu *c;
- c = get_cpu_slab(s, raw_smp_processor_id());
- stat(c, FREE_SLOWPATH);
+ stat(s, FREE_SLOWPATH);
slab_lock(page);
if (unlikely(SLABDEBUG && PageSlubDebug(page)))
goto debug;
checks_ok:
- prior = object[offset] = page->freelist;
+ prior = page->freelist;
+ set_freepointer(s, object, prior);
page->freelist = object;
page->inuse--;
if (unlikely(PageSlubFrozen(page))) {
- stat(c, FREE_FROZEN);
+ stat(s, FREE_FROZEN);
goto out_unlock;
}
@@ -1826,7 +1806,7 @@ checks_ok:
*/
if (unlikely(!prior)) {
add_partial(get_node(s, page_to_nid(page)), page, 1);
- stat(c, FREE_ADD_PARTIAL);
+ stat(s, FREE_ADD_PARTIAL);
}
out_unlock:
@@ -1839,10 +1819,10 @@ slab_empty:
* Slab still on the partial list.
*/
remove_partial(s, page);
- stat(c, FREE_REMOVE_PARTIAL);
+ stat(s, FREE_REMOVE_PARTIAL);
}
slab_unlock(page);
- stat(c, FREE_SLAB);
+ stat(s, FREE_SLAB);
discard_slab(s, page);
return;
@@ -1872,17 +1852,17 @@ static __always_inline void slab_free(struct kmem_cache *s,
kmemleak_free_recursive(x, s->flags);
local_irq_save(flags);
- c = get_cpu_slab(s, smp_processor_id());
- kmemcheck_slab_free(s, object, c->objsize);
- debug_check_no_locks_freed(object, c->objsize);
+ c = __this_cpu_ptr(s->cpu_slab);
+ kmemcheck_slab_free(s, object, s->objsize);
+ debug_check_no_locks_freed(object, s->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
- debug_check_no_obj_freed(object, c->objsize);
+ debug_check_no_obj_freed(object, s->objsize);
if (likely(page == c->page && c->node >= 0)) {
- object[c->offset] = c->freelist;
+ set_freepointer(s, object, c->freelist);
c->freelist = object;
- stat(c, FREE_FASTPATH);
+ stat(s, FREE_FASTPATH);
} else
- __slab_free(s, page, x, addr, c->offset);
+ __slab_free(s, page, x, addr);
local_irq_restore(flags);
}
@@ -2069,19 +2049,6 @@ static unsigned long calculate_alignment(unsigned long flags,
return ALIGN(align, sizeof(void *));
}
-static void init_kmem_cache_cpu(struct kmem_cache *s,
- struct kmem_cache_cpu *c)
-{
- c->page = NULL;
- c->freelist = NULL;
- c->node = 0;
- c->offset = s->offset / sizeof(void *);
- c->objsize = s->objsize;
-#ifdef CONFIG_SLUB_STATS
- memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
-#endif
-}
-
static void
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{
@@ -2095,130 +2062,24 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
#endif
}
-#ifdef CONFIG_SMP
-/*
- * Per cpu array for per cpu structures.
- *
- * The per cpu array places all kmem_cache_cpu structures from one processor
- * close together meaning that it becomes possible that multiple per cpu
- * structures are contained in one cacheline. This may be particularly
- * beneficial for the kmalloc caches.
- *
- * A desktop system typically has around 60-80 slabs. With 100 here we are
- * likely able to get per cpu structures for all caches from the array defined
- * here. We must be able to cover all kmalloc caches during bootstrap.
- *
- * If the per cpu array is exhausted then fall back to kmalloc
- * of individual cachelines. No sharing is possible then.
- */
-#define NR_KMEM_CACHE_CPU 100
-
-static DEFINE_PER_CPU(struct kmem_cache_cpu [NR_KMEM_CACHE_CPU],
- kmem_cache_cpu);
-
-static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
-static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS);
-
-static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
- int cpu, gfp_t flags)
-{
- struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
-
- if (c)
- per_cpu(kmem_cache_cpu_free, cpu) =
- (void *)c->freelist;
- else {
- /* Table overflow: So allocate ourselves */
- c = kmalloc_node(
- ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
- flags, cpu_to_node(cpu));
- if (!c)
- return NULL;
- }
-
- init_kmem_cache_cpu(s, c);
- return c;
-}
-
-static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
-{
- if (c < per_cpu(kmem_cache_cpu, cpu) ||
- c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
- kfree(c);
- return;
- }
- c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
- per_cpu(kmem_cache_cpu_free, cpu) = c;
-}
-
-static void free_kmem_cache_cpus(struct kmem_cache *s)
-{
- int cpu;
-
- for_each_online_cpu(cpu) {
- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
-
- if (c) {
- s->cpu_slab[cpu] = NULL;
- free_kmem_cache_cpu(c, cpu);
- }
- }
-}
-
-static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
-{
- int cpu;
-
- for_each_online_cpu(cpu) {
- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[SLUB_PAGE_SHIFT]);
- if (c)
- continue;
-
- c = alloc_kmem_cache_cpu(s, cpu, flags);
- if (!c) {
- free_kmem_cache_cpus(s);
- return 0;
- }
- s->cpu_slab[cpu] = c;
- }
- return 1;
-}
-
-/*
- * Initialize the per cpu array.
- */
-static void init_alloc_cpu_cpu(int cpu)
-{
- int i;
-
- if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)))
- return;
-
- for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
- free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
-
- cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once));
-}
-
-static void __init init_alloc_cpu(void)
+static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
{
- int cpu;
-
- for_each_online_cpu(cpu)
- init_alloc_cpu_cpu(cpu);
- }
+ if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
+ /*
+ * Boot time creation of the kmalloc array. Use static per cpu data
+ * since the per cpu allocator is not available yet.
+ */
+ s->cpu_slab = kmalloc_percpu + (s - kmalloc_caches);
+ else
+ s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
-#else
-static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
-static inline void init_alloc_cpu(void) {}
+ if (!s->cpu_slab)
+ return 0;
-static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
-{
- init_kmem_cache_cpu(s, &s->cpu_slab);
return 1;
}
-#endif
#ifdef CONFIG_NUMA
/*
@@ -2502,6 +2363,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
return 1;
+
free_kmem_cache_nodes(s);
error:
if (flags & SLAB_PANIC)
@@ -2609,9 +2471,8 @@ static inline int kmem_cache_close(struct kmem_cache *s)
int node;
flush_all(s);
-
+ free_percpu(s->cpu_slab);
/* Attempt to free all objects */
- free_kmem_cache_cpus(s);
for_each_node_state(node, N_NORMAL_MEMORY) {
struct kmem_cache_node *n = get_node(s, node);
@@ -2651,7 +2512,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
* Kmalloc subsystem
*******************************************************************/
-struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
+struct kmem_cache kmalloc_caches[KMALLOC_CACHES] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str)
@@ -2741,6 +2602,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
char *text;
size_t realsize;
unsigned long slabflags;
+ int i;
s = kmalloc_caches_dma[index];
if (s)
@@ -2760,7 +2622,14 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
realsize = kmalloc_caches[index].objsize;
text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
(unsigned int)realsize);
- s = kmalloc(kmem_size, flags & ~SLUB_DMA);
+
+ s = NULL;
+ for (i = 0; i < KMALLOC_CACHES; i++)
+ if (!kmalloc_caches[i].size)
+ break;
+
+ BUG_ON(i >= KMALLOC_CACHES);
+ s = kmalloc_caches + i;
/*
* Must defer sysfs creation to a workqueue because we don't know
@@ -2774,7 +2643,7 @@ static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
if (!s || !text || !kmem_cache_open(s, flags, text,
realsize, ARCH_KMALLOC_MINALIGN, slabflags, NULL)) {
- kfree(s);
+ s->size = 0;
kfree(text);
goto unlock_out;
}
@@ -3086,7 +2955,7 @@ static void slab_mem_offline_callback(void *arg)
/*
* if n->nr_slabs > 0, slabs still exist on the node
* that is going down. We were unable to free them,
- * and offline_pages() function shoudn't call this
+ * and offline_pages() function shouldn't call this
* callback. So, we must fail.
*/
BUG_ON(slabs_node(s, offline_node));
@@ -3176,8 +3045,6 @@ void __init kmem_cache_init(void)
int i;
int caches = 0;
- init_alloc_cpu();
-
#ifdef CONFIG_NUMA
/*
* Must first have the slab cache available for the allocations of the
@@ -3261,8 +3128,10 @@ void __init kmem_cache_init(void)
#ifdef CONFIG_SMP
register_cpu_notifier(&slab_notifier);
- kmem_size = offsetof(struct kmem_cache, cpu_slab) +
- nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
+#endif
+#ifdef CONFIG_NUMA
+ kmem_size = offsetof(struct kmem_cache, node) +
+ nr_node_ids * sizeof(struct kmem_cache_node *);
#else
kmem_size = sizeof(struct kmem_cache);
#endif
@@ -3351,22 +3220,12 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
down_write(&slub_lock);
s = find_mergeable(size, align, flags, name, ctor);
if (s) {
- int cpu;
-
s->refcount++;
/*
* Adjust the object sizes so that we clear
* the complete object on kzalloc.
*/
s->objsize = max(s->objsize, (int)size);
-
- /*
- * And then we need to update the object size in the
- * per cpu structures
- */
- for_each_online_cpu(cpu)
- get_cpu_slab(s, cpu)->objsize = s->objsize;
-
s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
up_write(&slub_lock);
@@ -3420,29 +3279,15 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
unsigned long flags;
switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- init_alloc_cpu_cpu(cpu);
- down_read(&slub_lock);
- list_for_each_entry(s, &slab_caches, list)
- s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
- GFP_KERNEL);
- up_read(&slub_lock);
- break;
-
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
case CPU_DEAD_FROZEN:
down_read(&slub_lock);
list_for_each_entry(s, &slab_caches, list) {
- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
-
local_irq_save(flags);
__flush_cpu_slab(s, cpu);
local_irq_restore(flags);
- free_kmem_cache_cpu(c, cpu);
- s->cpu_slab[cpu] = NULL;
}
up_read(&slub_lock);
break;
@@ -3928,7 +3773,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
int cpu;
for_each_possible_cpu(cpu) {
- struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
if (!c || c->node < 0)
continue;
@@ -4353,7 +4198,7 @@ static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
return -ENOMEM;
for_each_online_cpu(cpu) {
- unsigned x = get_cpu_slab(s, cpu)->stat[si];
+ unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
data[cpu] = x;
sum += x;
@@ -4376,7 +4221,7 @@ static void clear_stat(struct kmem_cache *s, enum stat_item si)
int cpu;
for_each_online_cpu(cpu)
- get_cpu_slab(s, cpu)->stat[si] = 0;
+ per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
}
#define STAT_ATTR(si, text) \
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6c0585b16418..dc2039edef00 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -13,6 +13,7 @@
#include <linux/swap.h>
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
+#include <linux/kdb.h>
#include <linux/namei.h>
#include <linux/shm.h>
#include <linux/blkdev.h>
@@ -2056,12 +2057,11 @@ out:
return error;
}
-void si_swapinfo(struct sysinfo *val)
+void __si_swapinfo(struct sysinfo *val)
{
unsigned int type;
unsigned long nr_to_be_unused = 0;
- spin_lock(&swap_lock);
for (type = 0; type < nr_swapfiles; type++) {
struct swap_info_struct *si = swap_info[type];
@@ -2070,6 +2070,12 @@ void si_swapinfo(struct sysinfo *val)
}
val->freeswap = nr_swap_pages + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
+}
+
+void si_swapinfo(struct sysinfo *val)
+{
+ spin_lock(&swap_lock);
+ __si_swapinfo(val);
spin_unlock(&swap_lock);
}
diff --git a/mm/truncate.c b/mm/truncate.c
index 342deee22684..b0ce8fbc554b 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -75,7 +75,7 @@ void cancel_dirty_page(struct page *page, unsigned int account_size)
if (mapping && mapping_cap_account_dirty(mapping)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
dec_bdi_stat(mapping->backing_dev_info,
- BDI_RECLAIMABLE);
+ BDI_DIRTY);
if (account_size)
task_io_account_cancelled_write(account_size);
}
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6051fbab67ba..fc5aa183bc45 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -139,7 +139,8 @@ static void refresh_zone_stat_thresholds(void)
threshold = calculate_threshold(zone);
for_each_online_cpu(cpu)
- zone_pcp(zone, cpu)->stat_threshold = threshold;
+ per_cpu_ptr(zone->pageset, cpu)->stat_threshold
+ = threshold;
}
}
@@ -149,7 +150,8 @@ static void refresh_zone_stat_thresholds(void)
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
int delta)
{
- struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
+ struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
+
s8 *p = pcp->vm_stat_diff + item;
long x;
@@ -202,7 +204,7 @@ EXPORT_SYMBOL(mod_zone_page_state);
*/
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
{
- struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
+ struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
s8 *p = pcp->vm_stat_diff + item;
(*p)++;
@@ -223,7 +225,7 @@ EXPORT_SYMBOL(__inc_zone_page_state);
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
{
- struct per_cpu_pageset *pcp = zone_pcp(zone, smp_processor_id());
+ struct per_cpu_pageset *pcp = this_cpu_ptr(zone->pageset);
s8 *p = pcp->vm_stat_diff + item;
(*p)--;
@@ -300,7 +302,7 @@ void refresh_cpu_vm_stats(int cpu)
for_each_populated_zone(zone) {
struct per_cpu_pageset *p;
- p = zone_pcp(zone, cpu);
+ p = per_cpu_ptr(zone->pageset, cpu);
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (p->vm_stat_diff[i]) {
@@ -741,7 +743,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
- pageset = zone_pcp(zone, i);
+ pageset = per_cpu_ptr(zone->pageset, i);
seq_printf(m,
"\n cpu: %i"
"\n count: %i"
@@ -906,6 +908,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
start_cpu_timer(cpu);
+ node_set_state(cpu_to_node(cpu), N_CPU);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index e75a2f3b10af..c0316e0ca6e8 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -14,6 +14,7 @@ int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
if (skb_bond_should_drop(skb))
goto drop;
+ skb->skb_iif = skb->dev->ifindex;
__vlan_hwaccel_put_tag(skb, vlan_tci);
skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
@@ -85,6 +86,7 @@ vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
if (skb_bond_should_drop(skb))
goto drop;
+ skb->skb_iif = skb->dev->ifindex;
__vlan_hwaccel_put_tag(skb, vlan_tci);
skb->dev = vlan_group_get_device(grp, vlan_tci & VLAN_VID_MASK);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index b7889782047e..77a49ffdd0ef 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -263,11 +263,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
vhdr->h_vlan_TCI = htons(vlan_tci);
/*
- * Set the protocol type. For a packet of type ETH_P_802_3 we
- * put the length in here instead. It is up to the 802.2
- * layer to carry protocol information.
+ * Set the protocol type. For a packet of type ETH_P_802_3/2 we
+ * put the length in here instead.
*/
- if (type != ETH_P_802_3)
+ if (type != ETH_P_802_3 && type != ETH_P_802_2)
vhdr->h_vlan_encapsulated_proto = htons(type);
else
vhdr->h_vlan_encapsulated_proto = htons(len);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index bd1c65425d4f..0b7f262cd148 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1406,6 +1406,9 @@ static int do_ebt_set_ctl(struct sock *sk,
{
int ret;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
switch(cmd) {
case EBT_SO_SET_ENTRIES:
ret = do_replace(sock_net(sk), user, len);
@@ -1425,6 +1428,9 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
struct ebt_replace tmp;
struct ebt_table *t;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
if (copy_from_user(&tmp, user, sizeof(tmp)))
return -EFAULT;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 51adc4c2b860..bc18b084ffdb 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -77,8 +77,8 @@ static int stats_timer __read_mostly = 1;
module_param(stats_timer, int, S_IRUGO);
MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
-HLIST_HEAD(can_rx_dev_list);
-static struct dev_rcv_lists can_rx_alldev_list;
+/* receive filters subscribed for 'all' CAN devices */
+struct dev_rcv_lists can_rx_alldev_list;
static DEFINE_SPINLOCK(can_rcvlists_lock);
static struct kmem_cache *rcv_cache __read_mostly;
@@ -292,28 +292,10 @@ EXPORT_SYMBOL(can_send);
static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
{
- struct dev_rcv_lists *d = NULL;
- struct hlist_node *n;
-
- /*
- * find receive list for this device
- *
- * The hlist_for_each_entry*() macros curse through the list
- * using the pointer variable n and set d to the containing
- * struct in each list iteration. Therefore, after list
- * iteration, d is unmodified when the list is empty, and it
- * points to last list element, when the list is non-empty
- * but no match in the loop body is found. I.e. d is *not*
- * NULL when no match is found. We can, however, use the
- * cursor variable n to decide if a match was found.
- */
-
- hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
- if (d->dev == dev)
- break;
- }
-
- return n ? d : NULL;
+ if (!dev)
+ return &can_rx_alldev_list;
+ else
+ return (struct dev_rcv_lists *)dev->ml_priv;
}
/**
@@ -468,16 +450,6 @@ int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
EXPORT_SYMBOL(can_rx_register);
/*
- * can_rx_delete_device - rcu callback for dev_rcv_lists structure removal
- */
-static void can_rx_delete_device(struct rcu_head *rp)
-{
- struct dev_rcv_lists *d = container_of(rp, struct dev_rcv_lists, rcu);
-
- kfree(d);
-}
-
-/*
* can_rx_delete_receiver - rcu callback for single receiver entry removal
*/
static void can_rx_delete_receiver(struct rcu_head *rp)
@@ -541,7 +513,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
"dev %s, id %03X, mask %03X\n",
DNAME(dev), can_id, mask);
r = NULL;
- d = NULL;
goto out;
}
@@ -552,10 +523,10 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
can_pstats.rcv_entries--;
/* remove device structure requested by NETDEV_UNREGISTER */
- if (d->remove_on_zero_entries && !d->entries)
- hlist_del_rcu(&d->list);
- else
- d = NULL;
+ if (d->remove_on_zero_entries && !d->entries) {
+ kfree(d);
+ dev->ml_priv = NULL;
+ }
out:
spin_unlock(&can_rcvlists_lock);
@@ -563,10 +534,6 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
/* schedule the receiver item for deletion */
if (r)
call_rcu(&r->rcu, can_rx_delete_receiver);
-
- /* schedule the device structure for deletion */
- if (d)
- call_rcu(&d->rcu, can_rx_delete_device);
}
EXPORT_SYMBOL(can_rx_unregister);
@@ -780,48 +747,35 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
case NETDEV_REGISTER:
- /*
- * create new dev_rcv_lists for this device
- *
- * N.B. zeroing the struct is the correct initialization
- * for the embedded hlist_head structs.
- * Another list type, e.g. list_head, would require
- * explicit initialization.
- */
-
+ /* create new dev_rcv_lists for this device */
d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d) {
printk(KERN_ERR
"can: allocation of receive list failed\n");
return NOTIFY_DONE;
}
- d->dev = dev;
-
- spin_lock(&can_rcvlists_lock);
- hlist_add_head_rcu(&d->list, &can_rx_dev_list);
- spin_unlock(&can_rcvlists_lock);
+ BUG_ON(dev->ml_priv);
+ dev->ml_priv = d;
break;
case NETDEV_UNREGISTER:
spin_lock(&can_rcvlists_lock);
- d = find_dev_rcv_lists(dev);
+ d = dev->ml_priv;
if (d) {
- if (d->entries) {
+ if (d->entries)
d->remove_on_zero_entries = 1;
- d = NULL;
- } else
- hlist_del_rcu(&d->list);
+ else {
+ kfree(d);
+ dev->ml_priv = NULL;
+ }
} else
printk(KERN_ERR "can: notifier: receive list not "
"found for dev %s\n", dev->name);
spin_unlock(&can_rcvlists_lock);
- if (d)
- call_rcu(&d->rcu, can_rx_delete_device);
-
break;
}
@@ -853,21 +807,13 @@ static __init int can_init(void)
{
printk(banner);
+ memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
+
rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
0, 0, NULL);
if (!rcv_cache)
return -ENOMEM;
- /*
- * Insert can_rx_alldev_list for reception on all devices.
- * This struct is zero initialized which is correct for the
- * embedded hlist heads, the dev pointer, and the entries counter.
- */
-
- spin_lock(&can_rcvlists_lock);
- hlist_add_head_rcu(&can_rx_alldev_list.list, &can_rx_dev_list);
- spin_unlock(&can_rcvlists_lock);
-
if (stats_timer) {
/* the statistics are updated every second (timer triggered) */
setup_timer(&can_stattimer, can_stat_update, 0);
@@ -887,8 +833,7 @@ static __init int can_init(void)
static __exit void can_exit(void)
{
- struct dev_rcv_lists *d;
- struct hlist_node *n, *next;
+ struct net_device *dev;
if (stats_timer)
del_timer(&can_stattimer);
@@ -900,14 +845,19 @@ static __exit void can_exit(void)
unregister_netdevice_notifier(&can_netdev_notifier);
sock_unregister(PF_CAN);
- /* remove can_rx_dev_list */
- spin_lock(&can_rcvlists_lock);
- hlist_del(&can_rx_alldev_list.list);
- hlist_for_each_entry_safe(d, n, next, &can_rx_dev_list, list) {
- hlist_del(&d->list);
- kfree(d);
+ /* remove created dev_rcv_lists from still registered CAN devices */
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
+ if (dev->type == ARPHRD_CAN && dev->ml_priv){
+
+ struct dev_rcv_lists *d = dev->ml_priv;
+
+ BUG_ON(d->entries);
+ kfree(d);
+ dev->ml_priv = NULL;
+ }
}
- spin_unlock(&can_rcvlists_lock);
+ rcu_read_unlock();
rcu_barrier(); /* Wait for completion of call_rcu()'s */
diff --git a/net/can/af_can.h b/net/can/af_can.h
index 18f91e37cc30..34253b84e30f 100644
--- a/net/can/af_can.h
+++ b/net/can/af_can.h
@@ -63,10 +63,8 @@ struct receiver {
enum { RX_ERR, RX_ALL, RX_FIL, RX_INV, RX_EFF, RX_MAX };
+/* per device receive filters linked at dev->ml_priv */
struct dev_rcv_lists {
- struct hlist_node list;
- struct rcu_head rcu;
- struct net_device *dev;
struct hlist_head rx[RX_MAX];
struct hlist_head rx_sff[0x800];
int remove_on_zero_entries;
diff --git a/net/can/proc.c b/net/can/proc.c
index 9b9ad29be567..f4265cc9c3fb 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -45,6 +45,7 @@
#include <linux/proc_fs.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
+#include <linux/if_arp.h>
#include <linux/can/core.h>
#include "af_can.h"
@@ -84,6 +85,9 @@ static const char rx_list_name[][8] = {
[RX_EFF] = "rx_eff",
};
+/* receive filters subscribed for 'all' CAN devices */
+extern struct dev_rcv_lists can_rx_alldev_list;
+
/*
* af_can statistics stuff
*/
@@ -190,10 +194,6 @@ void can_stat_update(unsigned long data)
/*
* proc read functions
- *
- * From known use-cases we expect about 10 entries in a receive list to be
- * printed in the proc_fs. So PAGE_SIZE is definitely enough space here.
- *
*/
static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
@@ -202,7 +202,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
struct receiver *r;
struct hlist_node *n;
- rcu_read_lock();
hlist_for_each_entry_rcu(r, n, rx_list, list) {
char *fmt = (r->can_id & CAN_EFF_FLAG)?
" %-5s %08X %08x %08x %08x %8ld %s\n" :
@@ -212,7 +211,6 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
(unsigned long)r->func, (unsigned long)r->data,
r->matches, r->ident);
}
- rcu_read_unlock();
}
static void can_print_recv_banner(struct seq_file *m)
@@ -346,24 +344,39 @@ static const struct file_operations can_version_proc_fops = {
.release = single_release,
};
+static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
+ struct net_device *dev,
+ struct dev_rcv_lists *d)
+{
+ if (!hlist_empty(&d->rx[idx])) {
+ can_print_recv_banner(m);
+ can_print_rcvlist(m, &d->rx[idx], dev);
+ } else
+ seq_printf(m, " (%s: no entry)\n", DNAME(dev));
+
+}
+
static int can_rcvlist_proc_show(struct seq_file *m, void *v)
{
/* double cast to prevent GCC warning */
int idx = (int)(long)m->private;
+ struct net_device *dev;
struct dev_rcv_lists *d;
- struct hlist_node *n;
seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
rcu_read_lock();
- hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
- if (!hlist_empty(&d->rx[idx])) {
- can_print_recv_banner(m);
- can_print_rcvlist(m, &d->rx[idx], d->dev);
- } else
- seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
+ /* receive list for 'all' CAN devices (dev == NULL) */
+ d = &can_rx_alldev_list;
+ can_rcvlist_proc_show_one(m, idx, NULL, d);
+
+ /* receive list for registered CAN devices */
+ for_each_netdev_rcu(&init_net, dev) {
+ if (dev->type == ARPHRD_CAN && dev->ml_priv)
+ can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv);
}
+
rcu_read_unlock();
seq_putc(m, '\n');
@@ -383,34 +396,50 @@ static const struct file_operations can_rcvlist_proc_fops = {
.release = single_release,
};
+static inline void can_rcvlist_sff_proc_show_one(struct seq_file *m,
+ struct net_device *dev,
+ struct dev_rcv_lists *d)
+{
+ int i;
+ int all_empty = 1;
+
+ /* check wether at least one list is non-empty */
+ for (i = 0; i < 0x800; i++)
+ if (!hlist_empty(&d->rx_sff[i])) {
+ all_empty = 0;
+ break;
+ }
+
+ if (!all_empty) {
+ can_print_recv_banner(m);
+ for (i = 0; i < 0x800; i++) {
+ if (!hlist_empty(&d->rx_sff[i]))
+ can_print_rcvlist(m, &d->rx_sff[i], dev);
+ }
+ } else
+ seq_printf(m, " (%s: no entry)\n", DNAME(dev));
+}
+
static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
{
+ struct net_device *dev;
struct dev_rcv_lists *d;
- struct hlist_node *n;
/* RX_SFF */
seq_puts(m, "\nreceive list 'rx_sff':\n");
rcu_read_lock();
- hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
- int i, all_empty = 1;
- /* check wether at least one list is non-empty */
- for (i = 0; i < 0x800; i++)
- if (!hlist_empty(&d->rx_sff[i])) {
- all_empty = 0;
- break;
- }
-
- if (!all_empty) {
- can_print_recv_banner(m);
- for (i = 0; i < 0x800; i++) {
- if (!hlist_empty(&d->rx_sff[i]))
- can_print_rcvlist(m, &d->rx_sff[i],
- d->dev);
- }
- } else
- seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
+
+ /* sff receive list for 'all' CAN devices (dev == NULL) */
+ d = &can_rx_alldev_list;
+ can_rcvlist_sff_proc_show_one(m, NULL, d);
+
+ /* sff receive list for registered CAN devices */
+ for_each_netdev_rcu(&init_net, dev) {
+ if (dev->type == ARPHRD_CAN && dev->ml_priv)
+ can_rcvlist_sff_proc_show_one(m, dev, dev->ml_priv);
}
+
rcu_read_unlock();
seq_putc(m, '\n');
diff --git a/net/core/dev.c b/net/core/dev.c
index be9924f60ec3..a008f6987a95 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1853,6 +1853,14 @@ gso:
skb->next = nskb->next;
nskb->next = NULL;
+
+ /*
+ * If device doesnt need nskb->dst, release it right now while
+ * its hot in this cpu cache
+ */
+ if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
+ skb_dst_drop(nskb);
+
rc = ops->ndo_start_xmit(nskb, dev);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
@@ -2422,6 +2430,7 @@ int netif_receive_skb(struct sk_buff *skb)
struct packet_type *ptype, *pt_prev;
struct net_device *orig_dev;
struct net_device *null_or_orig;
+ struct net_device *null_or_bond;
int ret = NET_RX_DROP;
__be16 type;
@@ -2487,12 +2496,24 @@ ncls:
if (!skb)
goto out;
+ /*
+ * Make sure frames received on VLAN interfaces stacked on
+ * bonding interfaces still make their way to any base bonding
+ * device that may have registered for a specific ptype. The
+ * handler may have to adjust skb->dev and orig_dev.
+ */
+ null_or_bond = NULL;
+ if ((skb->dev->priv_flags & IFF_802_1Q_VLAN) &&
+ (vlan_dev_real_dev(skb->dev)->priv_flags & IFF_BONDING)) {
+ null_or_bond = vlan_dev_real_dev(skb->dev);
+ }
+
type = skb->protocol;
list_for_each_entry_rcu(ptype,
&ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
- if (ptype->type == type &&
- (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
- ptype->dev == orig_dev)) {
+ if (ptype->type == type && (ptype->dev == null_or_orig ||
+ ptype->dev == skb->dev || ptype->dev == orig_dev ||
+ ptype->dev == null_or_bond)) {
if (pt_prev)
ret = deliver_skb(skb, pt_prev, orig_dev);
pt_prev = ptype;
@@ -3185,7 +3206,7 @@ static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
{
const struct net_device_stats *stats = dev_get_stats(dev);
- seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
+ seq_printf(seq, "%6s: %7lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
"%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
dev->name, stats->rx_bytes, stats->rx_packets,
stats->rx_errors,
diff --git a/net/core/sock.c b/net/core/sock.c
index 76ff58d43e26..e1f6f225f012 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1205,6 +1205,10 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
if (newsk->sk_prot->sockets_allocated)
percpu_counter_inc(newsk->sk_prot->sockets_allocated);
+
+ if (sock_flag(newsk, SOCK_TIMESTAMP) ||
+ sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE))
+ net_enable_timestamp();
}
out:
return newsk;
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index dd3db88f8f0a..205a1c12f3c0 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -73,8 +73,8 @@ __setup("ether=", netdev_boot_setup);
* @len: packet length (<= skb->len)
*
*
- * Set the protocol type. For a packet of type ETH_P_802_3 we put the length
- * in here instead. It is up to the 802.2 layer to carry protocol information.
+ * Set the protocol type. For a packet of type ETH_P_802_3/2 we put the length
+ * in here instead.
*/
int eth_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
@@ -82,7 +82,7 @@ int eth_header(struct sk_buff *skb, struct net_device *dev,
{
struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
- if (type != ETH_P_802_3)
+ if (type != ETH_P_802_3 && type != ETH_P_802_2)
eth->h_proto = htons(type);
else
eth->h_proto = htons(len);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index c95cd93acf29..078709233bc4 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -70,6 +70,7 @@
* bonding can change the skb before
* sending (e.g. insert 8021q tag).
* Harald Welte : convert to make use of jenkins hash
+ * Jesper D. Brouer: Proxy ARP PVLAN RFC 3069 support.
*/
#include <linux/module.h>
@@ -524,12 +525,15 @@ int arp_bind_neighbour(struct dst_entry *dst)
/*
* Check if we can use proxy ARP for this path
*/
-
-static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
+static inline int arp_fwd_proxy(struct in_device *in_dev,
+ struct net_device *dev, struct rtable *rt)
{
struct in_device *out_dev;
int imi, omi = -1;
+ if (rt->u.dst.dev == dev)
+ return 0;
+
if (!IN_DEV_PROXY_ARP(in_dev))
return 0;
@@ -548,6 +552,43 @@ static inline int arp_fwd_proxy(struct in_device *in_dev, struct rtable *rt)
}
/*
+ * Check for RFC3069 proxy arp private VLAN (allow to send back to same dev)
+ *
+ * RFC3069 supports proxy arp replies back to the same interface. This
+ * is done to support (ethernet) switch features, like RFC 3069, where
+ * the individual ports are not allowed to communicate with each
+ * other, BUT they are allowed to talk to the upstream router. As
+ * described in RFC 3069, it is possible to allow these hosts to
+ * communicate through the upstream router, by proxy_arp'ing.
+ *
+ * RFC 3069: "VLAN Aggregation for Efficient IP Address Allocation"
+ *
+ * This technology is known by different names:
+ * In RFC 3069 it is called VLAN Aggregation.
+ * Cisco and Allied Telesyn call it Private VLAN.
+ * Hewlett-Packard call it Source-Port filtering or port-isolation.
+ * Ericsson call it MAC-Forced Forwarding (RFC Draft).
+ *
+ */
+static inline int arp_fwd_pvlan(struct in_device *in_dev,
+ struct net_device *dev, struct rtable *rt,
+ __be32 sip, __be32 tip)
+{
+ /* Private VLAN is only concerned about the same ethernet segment */
+ if (rt->u.dst.dev != dev)
+ return 0;
+
+ /* Don't reply on self probes (often done by windowz boxes)*/
+ if (sip == tip)
+ return 0;
+
+ if (IN_DEV_PROXY_ARP_PVLAN(in_dev))
+ return 1;
+ else
+ return 0;
+}
+
+/*
* Interface to link layer: send routine and receive handler.
*/
@@ -833,8 +874,11 @@ static int arp_process(struct sk_buff *skb)
}
goto out;
} else if (IN_DEV_FORWARD(in_dev)) {
- if (addr_type == RTN_UNICAST && rt->u.dst.dev != dev &&
- (arp_fwd_proxy(in_dev, rt) || pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) {
+ if (addr_type == RTN_UNICAST &&
+ (arp_fwd_proxy(in_dev, dev, rt) ||
+ arp_fwd_pvlan(in_dev, dev, rt, sip, tip) ||
+ pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))
+ {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n)
neigh_release(n);
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 040c4f05b653..cd71a3908391 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1408,6 +1408,7 @@ static struct devinet_sysctl_table {
DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
+ DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e34013a78ef4..3451799e3dbf 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -254,7 +254,7 @@ int ip_mc_output(struct sk_buff *skb)
*/
if (rt->rt_flags&RTCF_MULTICAST) {
- if ((!sk || inet_sk(sk)->mc_loop)
+ if (sk_mc_loop(sk)
#ifdef CONFIG_IP_MROUTE
/* Small optimization: do not loopback not local frames,
which returned after forwarding; they will be dropped
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index cafad9baff03..644dc43a55de 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -451,7 +451,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
(1<<IP_TTL) | (1<<IP_HDRINCL) |
(1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) |
(1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) |
- (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) ||
+ (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) |
+ (1<<IP_MINTTL))) ||
optname == IP_MULTICAST_TTL ||
optname == IP_MULTICAST_ALL ||
optname == IP_MULTICAST_LOOP ||
@@ -936,6 +937,14 @@ mc_msf_out:
inet->transparent = !!val;
break;
+ case IP_MINTTL:
+ if (optlen < 1)
+ goto e_inval;
+ if (val < 0 || val > 255)
+ goto e_inval;
+ inet->min_ttl = val;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -1198,6 +1207,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
case IP_TRANSPARENT:
val = inet->transparent;
break;
+ case IP_MINTTL:
+ val = inet->min_ttl;
+ break;
default:
release_sock(sk);
return -ENOPROTOOPT;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index e446496f564f..1cc339441e7d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1988,8 +1988,13 @@ static int __mkroute_input(struct sk_buff *skb,
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
* invalid for proxy arp. DNAT routes are always valid.
+ *
+ * Proxy arp feature have been extended to allow, ARP
+ * replies back to the same interface, to support
+ * Private VLAN switch technologies. See arp.c.
*/
- if (out_dev == in_dev) {
+ if (out_dev == in_dev &&
+ IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
err = -EINVAL;
goto cleanup;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 66fd80ef2473..5c24db4a3c91 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -358,7 +358,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
- ireq->wscale_ok, &rcv_wscale);
+ ireq->wscale_ok, &rcv_wscale,
+ dst_metric(&rt->u.dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b0a26bb25e2e..d5d69ea8f249 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -536,8 +536,7 @@ static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
tp->nonagle &= ~TCP_NAGLE_PUSH;
}
-static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
- struct sk_buff *skb)
+static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
{
if (flags & MSG_OOB)
tp->snd_up = tp->write_seq;
@@ -546,13 +545,13 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
static inline void tcp_push(struct sock *sk, int flags, int mss_now,
int nonagle)
{
- struct tcp_sock *tp = tcp_sk(sk);
-
if (tcp_send_head(sk)) {
- struct sk_buff *skb = tcp_write_queue_tail(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+
if (!(flags & MSG_MORE) || forced_push(tp))
- tcp_mark_push(tp, skb);
- tcp_mark_urg(tp, flags, skb);
+ tcp_mark_push(tp, tcp_write_queue_tail(sk));
+
+ tcp_mark_urg(tp, flags);
__tcp_push_pending_frames(sk, mss_now,
(flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
}
@@ -877,12 +876,12 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
#define TCP_OFF(sk) (sk->sk_sndmsg_off)
-static inline int select_size(struct sock *sk)
+static inline int select_size(struct sock *sk, int sg)
{
struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache;
- if (sk->sk_route_caps & NETIF_F_SG) {
+ if (sg) {
if (sk_can_gso(sk))
tmp = 0;
else {
@@ -906,7 +905,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
struct sk_buff *skb;
int iovlen, flags;
int mss_now, size_goal;
- int err, copied;
+ int sg, err, copied;
long timeo;
lock_sock(sk);
@@ -934,6 +933,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
goto out_err;
+ sg = sk->sk_route_caps & NETIF_F_SG;
+
while (--iovlen >= 0) {
int seglen = iov->iov_len;
unsigned char __user *from = iov->iov_base;
@@ -959,8 +960,9 @@ new_segment:
if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf;
- skb = sk_stream_alloc_skb(sk, select_size(sk),
- sk->sk_allocation);
+ skb = sk_stream_alloc_skb(sk,
+ select_size(sk, sg),
+ sk->sk_allocation);
if (!skb)
goto wait_for_memory;
@@ -997,9 +999,7 @@ new_segment:
/* We can extend the last page
* fragment. */
merge = 1;
- } else if (i == MAX_SKB_FRAGS ||
- (!i &&
- !(sk->sk_route_caps & NETIF_F_SG))) {
+ } else if (i == MAX_SKB_FRAGS || !sg) {
/* Need to add new fragment and cannot
* do this because interface is non-SG,
* or because all the page slots are
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 65b8ebfd078a..382f667238ec 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1649,6 +1649,9 @@ int tcp_v4_rcv(struct sk_buff *skb)
if (!sk)
goto no_tcp_socket;
+ if (iph->ttl < inet_sk(sk)->min_ttl)
+ goto discard_and_relse;
+
process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 383ce237640f..4a1605d3f909 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -183,7 +183,8 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
*/
void tcp_select_initial_window(int __space, __u32 mss,
__u32 *rcv_wnd, __u32 *window_clamp,
- int wscale_ok, __u8 *rcv_wscale)
+ int wscale_ok, __u8 *rcv_wscale,
+ __u32 init_rcv_wnd)
{
unsigned int space = (__space < 0 ? 0 : __space);
@@ -232,7 +233,13 @@ void tcp_select_initial_window(int __space, __u32 mss,
init_cwnd = 2;
else if (mss > 1460)
init_cwnd = 3;
- if (*rcv_wnd > init_cwnd * mss)
+ /* when initializing use the value from init_rcv_wnd
+ * rather than the default from above
+ */
+ if (init_rcv_wnd &&
+ (*rcv_wnd > init_rcv_wnd * mss))
+ *rcv_wnd = init_rcv_wnd * mss;
+ else if (*rcv_wnd > init_cwnd * mss)
*rcv_wnd = init_cwnd * mss;
}
@@ -1794,11 +1801,6 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle)
{
- struct sk_buff *skb = tcp_send_head(sk);
-
- if (!skb)
- return;
-
/* If we are closed, the bytes will have to remain here.
* In time closedown will finish, we empty the write queue and
* all will be happy.
@@ -2422,7 +2424,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
&req->rcv_wnd,
&req->window_clamp,
ireq->wscale_ok,
- &rcv_wscale);
+ &rcv_wscale,
+ dst_metric(dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
}
@@ -2549,7 +2552,8 @@ static void tcp_connect_init(struct sock *sk)
&tp->rcv_wnd,
&tp->window_clamp,
sysctl_tcp_window_scaling,
- &rcv_wscale);
+ &rcv_wscale,
+ dst_metric(dst, RTAX_INITRWND));
tp->rx_opt.rcv_wscale = rcv_wscale;
tp->rcv_ssthresh = tp->rcv_wnd;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cd48801a8d6f..eb6d09728633 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -121,10 +121,9 @@ static int ip6_output2(struct sk_buff *skb)
skb->dev = dev;
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
- struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL;
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
- if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) &&
+ if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
((mroute6_socket(dev_net(dev)) &&
!(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index c9605c3ad91f..7b197b7132e0 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -259,7 +259,7 @@ int snmp6_unregister_dev(struct inet6_dev *idev)
struct net *net = dev_net(idev->dev);
if (!net->mib.proc_net_devsnmp6)
return -ENOENT;
- if (!idev || !idev->stats.proc_dir_entry)
+ if (!idev->stats.proc_dir_entry)
return -EINVAL;
remove_proc_entry(idev->stats.proc_dir_entry->name,
net->mib.proc_net_devsnmp6);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 7208a06576c6..34d1f0690d7e 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -269,7 +269,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
tcp_select_initial_window(tcp_full_space(sk), req->mss,
&req->rcv_wnd, &req->window_clamp,
- ireq->wscale_ok, &rcv_wscale);
+ ireq->wscale_ok, &rcv_wscale,
+ dst_metric(dst, RTAX_INITRWND));
ireq->rcv_wscale = rcv_wscale;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index febfd595a40d..1c832bf198b3 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -876,7 +876,7 @@ static int tcp_v6_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
if (net_ratelimit()) {
- printk(KERN_INFO "MD5 Hash %s for (%pI6, %u)->(%pI6, %u)\n",
+ printk(KERN_INFO "MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
genhash ? "failed" : "mismatch",
&ip6h->saddr, ntohs(th->source),
&ip6h->daddr, ntohs(th->dest));
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index 811984d9324b..8b85d774e47f 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -496,9 +496,6 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
IRDA_DEBUG(0, "%s()\n", __func__ );
- if (!tty)
- return;
-
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
@@ -1007,9 +1004,6 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
- if (!tty)
- return;
-
/* ircomm_tty_flush_buffer(tty); */
ircomm_tty_shutdown(self);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 3a66546cad06..e35d907fba2c 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -47,6 +47,10 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout);
#define dprintk(args...)
#endif
+/* Maybe we'll add some more in the future. */
+#define LLC_CMSG_PKTINFO 1
+
+
/**
* llc_ui_next_link_no - return the next unused link number for a sap
* @sap: Address of sap to get link number from.
@@ -136,6 +140,7 @@ static struct proto llc_proto = {
.name = "LLC",
.owner = THIS_MODULE,
.obj_size = sizeof(struct llc_sock),
+ .slab_flags = SLAB_DESTROY_BY_RCU,
};
/**
@@ -192,10 +197,8 @@ static int llc_ui_release(struct socket *sock)
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
- if (!sock_flag(sk, SOCK_ZAPPED)) {
- llc_sap_put(llc->sap);
+ if (!sock_flag(sk, SOCK_ZAPPED))
llc_sap_remove_socket(llc->sap, sk);
- }
release_sock(sk);
if (llc->dev)
dev_put(llc->dev);
@@ -255,7 +258,14 @@ static int llc_ui_autobind(struct socket *sock, struct sockaddr_llc *addr)
if (!sock_flag(sk, SOCK_ZAPPED))
goto out;
rc = -ENODEV;
- llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
+ if (sk->sk_bound_dev_if) {
+ llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
+ if (llc->dev && addr->sllc_arphrd != llc->dev->type) {
+ dev_put(llc->dev);
+ llc->dev = NULL;
+ }
+ } else
+ llc->dev = dev_getfirstbyhwtype(&init_net, addr->sllc_arphrd);
if (!llc->dev)
goto out;
rc = -EUSERS;
@@ -306,7 +316,25 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
goto out;
rc = -ENODEV;
rtnl_lock();
- llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, addr->sllc_mac);
+ if (sk->sk_bound_dev_if) {
+ llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
+ if (llc->dev) {
+ if (!addr->sllc_arphrd)
+ addr->sllc_arphrd = llc->dev->type;
+ if (llc_mac_null(addr->sllc_mac))
+ memcpy(addr->sllc_mac, llc->dev->dev_addr,
+ IFHWADDRLEN);
+ if (addr->sllc_arphrd != llc->dev->type ||
+ !llc_mac_match(addr->sllc_mac,
+ llc->dev->dev_addr)) {
+ rc = -EINVAL;
+ dev_put(llc->dev);
+ llc->dev = NULL;
+ }
+ }
+ } else
+ llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
+ addr->sllc_mac);
rtnl_unlock();
if (!llc->dev)
goto out;
@@ -322,7 +350,6 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
rc = -EBUSY; /* some other network layer is using the sap */
if (!sap)
goto out;
- llc_sap_hold(sap);
} else {
struct llc_addr laddr, daddr;
struct sock *ask;
@@ -591,6 +618,20 @@ static int llc_wait_data(struct sock *sk, long timeo)
return rc;
}
+static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
+{
+ struct llc_sock *llc = llc_sk(skb->sk);
+
+ if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
+ struct llc_pktinfo info;
+
+ info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
+ llc_pdu_decode_dsap(skb, &info.lpi_sap);
+ llc_pdu_decode_da(skb, info.lpi_mac);
+ put_cmsg(msg, SOL_LLC, LLC_OPT_PKTINFO, sizeof(info), &info);
+ }
+}
+
/**
* llc_ui_accept - accept a new incoming connection.
* @sock: Socket which connections arrive on.
@@ -812,6 +853,8 @@ copy_uaddr:
memcpy(uaddr, llc_ui_skb_cb(skb), sizeof(*uaddr));
msg->msg_namelen = sizeof(*uaddr);
}
+ if (llc_sk(sk)->cmsg_flags)
+ llc_cmsg_rcv(msg, skb);
goto out;
}
@@ -1030,6 +1073,12 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
goto out;
llc->rw = opt;
break;
+ case LLC_OPT_PKTINFO:
+ if (opt)
+ llc->cmsg_flags |= LLC_CMSG_PKTINFO;
+ else
+ llc->cmsg_flags &= ~LLC_CMSG_PKTINFO;
+ break;
default:
rc = -ENOPROTOOPT;
goto out;
@@ -1083,6 +1132,9 @@ static int llc_ui_getsockopt(struct socket *sock, int level, int optname,
val = llc->k; break;
case LLC_OPT_RX_WIN:
val = llc->rw; break;
+ case LLC_OPT_PKTINFO:
+ val = (llc->cmsg_flags & LLC_CMSG_PKTINFO) != 0;
+ break;
default:
rc = -ENOPROTOOPT;
goto out;
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index c6bab39b018e..a8dde9b010da 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -468,6 +468,19 @@ static int llc_exec_conn_trans_actions(struct sock *sk,
return rc;
}
+static inline bool llc_estab_match(const struct llc_sap *sap,
+ const struct llc_addr *daddr,
+ const struct llc_addr *laddr,
+ const struct sock *sk)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ return llc->laddr.lsap == laddr->lsap &&
+ llc->daddr.lsap == daddr->lsap &&
+ llc_mac_match(llc->laddr.mac, laddr->mac) &&
+ llc_mac_match(llc->daddr.mac, daddr->mac);
+}
+
/**
* __llc_lookup_established - Finds connection for the remote/local sap/mac
* @sap: SAP
@@ -484,23 +497,35 @@ static struct sock *__llc_lookup_established(struct llc_sap *sap,
struct llc_addr *laddr)
{
struct sock *rc;
- struct hlist_node *node;
-
- read_lock(&sap->sk_list.lock);
- sk_for_each(rc, node, &sap->sk_list.list) {
- struct llc_sock *llc = llc_sk(rc);
-
- if (llc->laddr.lsap == laddr->lsap &&
- llc->daddr.lsap == daddr->lsap &&
- llc_mac_match(llc->laddr.mac, laddr->mac) &&
- llc_mac_match(llc->daddr.mac, daddr->mac)) {
- sock_hold(rc);
+ struct hlist_nulls_node *node;
+ int slot = llc_sk_laddr_hashfn(sap, laddr);
+ struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
+
+ rcu_read_lock();
+again:
+ sk_nulls_for_each_rcu(rc, node, laddr_hb) {
+ if (llc_estab_match(sap, daddr, laddr, rc)) {
+ /* Extra checks required by SLAB_DESTROY_BY_RCU */
+ if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+ goto again;
+ if (unlikely(llc_sk(rc)->sap != sap ||
+ !llc_estab_match(sap, daddr, laddr, rc))) {
+ sock_put(rc);
+ continue;
+ }
goto found;
}
}
rc = NULL;
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (unlikely(get_nulls_value(node) != slot))
+ goto again;
found:
- read_unlock(&sap->sk_list.lock);
+ rcu_read_unlock();
return rc;
}
@@ -516,6 +541,53 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
return sk;
}
+static inline bool llc_listener_match(const struct llc_sap *sap,
+ const struct llc_addr *laddr,
+ const struct sock *sk)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
+ llc->laddr.lsap == laddr->lsap &&
+ llc_mac_match(llc->laddr.mac, laddr->mac);
+}
+
+static struct sock *__llc_lookup_listener(struct llc_sap *sap,
+ struct llc_addr *laddr)
+{
+ struct sock *rc;
+ struct hlist_nulls_node *node;
+ int slot = llc_sk_laddr_hashfn(sap, laddr);
+ struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
+
+ rcu_read_lock();
+again:
+ sk_nulls_for_each_rcu(rc, node, laddr_hb) {
+ if (llc_listener_match(sap, laddr, rc)) {
+ /* Extra checks required by SLAB_DESTROY_BY_RCU */
+ if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+ goto again;
+ if (unlikely(llc_sk(rc)->sap != sap ||
+ !llc_listener_match(sap, laddr, rc))) {
+ sock_put(rc);
+ continue;
+ }
+ goto found;
+ }
+ }
+ rc = NULL;
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (unlikely(get_nulls_value(node) != slot))
+ goto again;
+found:
+ rcu_read_unlock();
+ return rc;
+}
+
/**
* llc_lookup_listener - Finds listener for local MAC + SAP
* @sap: SAP
@@ -529,24 +601,12 @@ struct sock *llc_lookup_established(struct llc_sap *sap,
static struct sock *llc_lookup_listener(struct llc_sap *sap,
struct llc_addr *laddr)
{
- struct sock *rc;
- struct hlist_node *node;
+ static struct llc_addr null_addr;
+ struct sock *rc = __llc_lookup_listener(sap, laddr);
- read_lock(&sap->sk_list.lock);
- sk_for_each(rc, node, &sap->sk_list.list) {
- struct llc_sock *llc = llc_sk(rc);
+ if (!rc)
+ rc = __llc_lookup_listener(sap, &null_addr);
- if (rc->sk_type == SOCK_STREAM && rc->sk_state == TCP_LISTEN &&
- llc->laddr.lsap == laddr->lsap &&
- (llc_mac_match(llc->laddr.mac, laddr->mac) ||
- llc_mac_null(llc->laddr.mac))) {
- sock_hold(rc);
- goto found;
- }
- }
- rc = NULL;
-found:
- read_unlock(&sap->sk_list.lock);
return rc;
}
@@ -647,15 +707,22 @@ static int llc_find_offset(int state, int ev_type)
* @sap: SAP
* @sk: socket
*
- * This function adds a socket to sk_list of a SAP.
+ * This function adds a socket to the hash tables of a SAP.
*/
void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
{
+ struct llc_sock *llc = llc_sk(sk);
+ struct hlist_head *dev_hb = llc_sk_dev_hash(sap, llc->dev->ifindex);
+ struct hlist_nulls_head *laddr_hb = llc_sk_laddr_hash(sap, &llc->laddr);
+
llc_sap_hold(sap);
- write_lock_bh(&sap->sk_list.lock);
llc_sk(sk)->sap = sap;
- sk_add_node(sk, &sap->sk_list.list);
- write_unlock_bh(&sap->sk_list.lock);
+
+ spin_lock_bh(&sap->sk_lock);
+ sap->sk_count++;
+ sk_nulls_add_node_rcu(sk, laddr_hb);
+ hlist_add_head(&llc->dev_hash_node, dev_hb);
+ spin_unlock_bh(&sap->sk_lock);
}
/**
@@ -663,14 +730,18 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk)
* @sap: SAP
* @sk: socket
*
- * This function removes a connection from sk_list.list of a SAP if
+ * This function removes a connection from the hash tables of a SAP if
* the connection was in this list.
*/
void llc_sap_remove_socket(struct llc_sap *sap, struct sock *sk)
{
- write_lock_bh(&sap->sk_list.lock);
- sk_del_node_init(sk);
- write_unlock_bh(&sap->sk_list.lock);
+ struct llc_sock *llc = llc_sk(sk);
+
+ spin_lock_bh(&sap->sk_lock);
+ sk_nulls_del_node_init_rcu(sk);
+ hlist_del(&llc->dev_hash_node);
+ sap->sk_count--;
+ spin_unlock_bh(&sap->sk_lock);
llc_sap_put(sap);
}
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index ff4c0ab96a69..78167e81dfeb 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -23,7 +23,7 @@
#include <net/llc.h>
LIST_HEAD(llc_sap_list);
-DEFINE_RWLOCK(llc_sap_list_lock);
+DEFINE_SPINLOCK(llc_sap_list_lock);
/**
* llc_sap_alloc - allocates and initializes sap.
@@ -33,40 +33,19 @@ DEFINE_RWLOCK(llc_sap_list_lock);
static struct llc_sap *llc_sap_alloc(void)
{
struct llc_sap *sap = kzalloc(sizeof(*sap), GFP_ATOMIC);
+ int i;
if (sap) {
/* sap->laddr.mac - leave as a null, it's filled by bind */
sap->state = LLC_SAP_STATE_ACTIVE;
- rwlock_init(&sap->sk_list.lock);
+ spin_lock_init(&sap->sk_lock);
+ for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++)
+ INIT_HLIST_NULLS_HEAD(&sap->sk_laddr_hash[i], i);
atomic_set(&sap->refcnt, 1);
}
return sap;
}
-/**
- * llc_add_sap - add sap to station list
- * @sap: Address of the sap
- *
- * Adds a sap to the LLC's station sap list.
- */
-static void llc_add_sap(struct llc_sap *sap)
-{
- list_add_tail(&sap->node, &llc_sap_list);
-}
-
-/**
- * llc_del_sap - del sap from station list
- * @sap: Address of the sap
- *
- * Removes a sap to the LLC's station sap list.
- */
-static void llc_del_sap(struct llc_sap *sap)
-{
- write_lock_bh(&llc_sap_list_lock);
- list_del(&sap->node);
- write_unlock_bh(&llc_sap_list_lock);
-}
-
static struct llc_sap *__llc_sap_find(unsigned char sap_value)
{
struct llc_sap* sap;
@@ -90,13 +69,13 @@ out:
*/
struct llc_sap *llc_sap_find(unsigned char sap_value)
{
- struct llc_sap* sap;
+ struct llc_sap *sap;
- read_lock_bh(&llc_sap_list_lock);
+ rcu_read_lock_bh();
sap = __llc_sap_find(sap_value);
if (sap)
llc_sap_hold(sap);
- read_unlock_bh(&llc_sap_list_lock);
+ rcu_read_unlock_bh();
return sap;
}
@@ -117,7 +96,7 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
{
struct llc_sap *sap = NULL;
- write_lock_bh(&llc_sap_list_lock);
+ spin_lock_bh(&llc_sap_list_lock);
if (__llc_sap_find(lsap)) /* SAP already exists */
goto out;
sap = llc_sap_alloc();
@@ -125,9 +104,9 @@ struct llc_sap *llc_sap_open(unsigned char lsap,
goto out;
sap->laddr.lsap = lsap;
sap->rcv_func = func;
- llc_add_sap(sap);
+ list_add_tail_rcu(&sap->node, &llc_sap_list);
out:
- write_unlock_bh(&llc_sap_list_lock);
+ spin_unlock_bh(&llc_sap_list_lock);
return sap;
}
@@ -142,8 +121,14 @@ out:
*/
void llc_sap_close(struct llc_sap *sap)
{
- WARN_ON(!hlist_empty(&sap->sk_list.list));
- llc_del_sap(sap);
+ WARN_ON(sap->sk_count);
+
+ spin_lock_bh(&llc_sap_list_lock);
+ list_del_rcu(&sap->node);
+ spin_unlock_bh(&llc_sap_list_lock);
+
+ synchronize_rcu();
+
kfree(sap);
}
diff --git a/net/llc/llc_output.c b/net/llc/llc_output.c
index 754f4fedc852..b38a1079a98e 100644
--- a/net/llc/llc_output.c
+++ b/net/llc/llc_output.c
@@ -33,48 +33,19 @@
int llc_mac_hdr_init(struct sk_buff *skb,
const unsigned char *sa, const unsigned char *da)
{
- int rc = 0;
+ int rc = -EINVAL;
switch (skb->dev->type) {
-#ifdef CONFIG_TR
- case ARPHRD_IEEE802_TR: {
- struct net_device *dev = skb->dev;
- struct trh_hdr *trh;
-
- skb_push(skb, sizeof(*trh));
- skb_reset_mac_header(skb);
- trh = tr_hdr(skb);
- trh->ac = AC;
- trh->fc = LLC_FRAME;
- if (sa)
- memcpy(trh->saddr, sa, dev->addr_len);
- else
- memset(trh->saddr, 0, dev->addr_len);
- if (da) {
- memcpy(trh->daddr, da, dev->addr_len);
- tr_source_route(skb, trh, dev);
- skb_reset_mac_header(skb);
- }
- break;
- }
-#endif
+ case ARPHRD_IEEE802_TR:
case ARPHRD_ETHER:
- case ARPHRD_LOOPBACK: {
- unsigned short len = skb->len;
- struct ethhdr *eth;
-
- skb_push(skb, sizeof(*eth));
- skb_reset_mac_header(skb);
- eth = eth_hdr(skb);
- eth->h_proto = htons(len);
- memcpy(eth->h_dest, da, ETH_ALEN);
- memcpy(eth->h_source, sa, ETH_ALEN);
+ case ARPHRD_LOOPBACK:
+ rc = dev_hard_header(skb, skb->dev, ETH_P_802_2, da, sa,
+ skb->len);
+ if (rc > 0)
+ rc = 0;
break;
- }
default:
- printk(KERN_WARNING "device type not supported: %d\n",
- skb->dev->type);
- rc = -EINVAL;
+ WARN(1, "device type not supported: %d\n", skb->dev->type);
}
return rc;
}
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index be47ac427f6b..7af1ff2d1f19 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -32,21 +32,23 @@ static void llc_ui_format_mac(struct seq_file *seq, u8 *addr)
static struct sock *llc_get_sk_idx(loff_t pos)
{
- struct list_head *sap_entry;
struct llc_sap *sap;
- struct hlist_node *node;
struct sock *sk = NULL;
-
- list_for_each(sap_entry, &llc_sap_list) {
- sap = list_entry(sap_entry, struct llc_sap, node);
-
- read_lock_bh(&sap->sk_list.lock);
- sk_for_each(sk, node, &sap->sk_list.list) {
- if (!pos)
- goto found;
- --pos;
+ int i;
+
+ list_for_each_entry_rcu(sap, &llc_sap_list, node) {
+ spin_lock_bh(&sap->sk_lock);
+ for (i = 0; i < LLC_SK_LADDR_HASH_ENTRIES; i++) {
+ struct hlist_nulls_head *head = &sap->sk_laddr_hash[i];
+ struct hlist_nulls_node *node;
+
+ sk_nulls_for_each(sk, node, head) {
+ if (!pos)
+ goto found; /* keep the lock */
+ --pos;
+ }
}
- read_unlock_bh(&sap->sk_list.lock);
+ spin_unlock_bh(&sap->sk_lock);
}
sk = NULL;
found:
@@ -57,10 +59,23 @@ static void *llc_seq_start(struct seq_file *seq, loff_t *pos)
{
loff_t l = *pos;
- read_lock_bh(&llc_sap_list_lock);
+ rcu_read_lock_bh();
return l ? llc_get_sk_idx(--l) : SEQ_START_TOKEN;
}
+static struct sock *laddr_hash_next(struct llc_sap *sap, int bucket)
+{
+ struct hlist_nulls_node *node;
+ struct sock *sk = NULL;
+
+ while (++bucket < LLC_SK_LADDR_HASH_ENTRIES)
+ sk_nulls_for_each(sk, node, &sap->sk_laddr_hash[bucket])
+ goto out;
+
+out:
+ return sk;
+}
+
static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sock* sk, *next;
@@ -73,25 +88,23 @@ static void *llc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
goto out;
}
sk = v;
- next = sk_next(sk);
+ next = sk_nulls_next(sk);
if (next) {
sk = next;
goto out;
}
llc = llc_sk(sk);
sap = llc->sap;
- read_unlock_bh(&sap->sk_list.lock);
- sk = NULL;
- for (;;) {
- if (sap->node.next == &llc_sap_list)
- break;
- sap = list_entry(sap->node.next, struct llc_sap, node);
- read_lock_bh(&sap->sk_list.lock);
- if (!hlist_empty(&sap->sk_list.list)) {
- sk = sk_head(&sap->sk_list.list);
- break;
- }
- read_unlock_bh(&sap->sk_list.lock);
+ sk = laddr_hash_next(sap, llc_sk_laddr_hashfn(sap, &llc->laddr));
+ if (sk)
+ goto out;
+ spin_unlock_bh(&sap->sk_lock);
+ list_for_each_entry_continue_rcu(sap, &llc_sap_list, node) {
+ spin_lock_bh(&sap->sk_lock);
+ sk = laddr_hash_next(sap, -1);
+ if (sk)
+ break; /* keep the lock */
+ spin_unlock_bh(&sap->sk_lock);
}
out:
return sk;
@@ -104,9 +117,9 @@ static void llc_seq_stop(struct seq_file *seq, void *v)
struct llc_sock *llc = llc_sk(sk);
struct llc_sap *sap = llc->sap;
- read_unlock_bh(&sap->sk_list.lock);
+ spin_unlock_bh(&sap->sk_lock);
}
- read_unlock_bh(&llc_sap_list_lock);
+ rcu_read_unlock_bh();
}
static int llc_seq_socket_show(struct seq_file *seq, void *v)
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 008de1fc42ca..ad6e6e1cf22f 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -297,6 +297,17 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
llc_sap_state_process(sap, skb);
}
+static inline bool llc_dgram_match(const struct llc_sap *sap,
+ const struct llc_addr *laddr,
+ const struct sock *sk)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ return sk->sk_type == SOCK_DGRAM &&
+ llc->laddr.lsap == laddr->lsap &&
+ llc_mac_match(llc->laddr.mac, laddr->mac);
+}
+
/**
* llc_lookup_dgram - Finds dgram socket for the local sap/mac
* @sap: SAP
@@ -309,25 +320,68 @@ static struct sock *llc_lookup_dgram(struct llc_sap *sap,
const struct llc_addr *laddr)
{
struct sock *rc;
- struct hlist_node *node;
-
- read_lock_bh(&sap->sk_list.lock);
- sk_for_each(rc, node, &sap->sk_list.list) {
- struct llc_sock *llc = llc_sk(rc);
-
- if (rc->sk_type == SOCK_DGRAM &&
- llc->laddr.lsap == laddr->lsap &&
- llc_mac_match(llc->laddr.mac, laddr->mac)) {
- sock_hold(rc);
+ struct hlist_nulls_node *node;
+ int slot = llc_sk_laddr_hashfn(sap, laddr);
+ struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];
+
+ rcu_read_lock_bh();
+again:
+ sk_nulls_for_each_rcu(rc, node, laddr_hb) {
+ if (llc_dgram_match(sap, laddr, rc)) {
+ /* Extra checks required by SLAB_DESTROY_BY_RCU */
+ if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
+ goto again;
+ if (unlikely(llc_sk(rc)->sap != sap ||
+ !llc_dgram_match(sap, laddr, rc))) {
+ sock_put(rc);
+ continue;
+ }
goto found;
}
}
rc = NULL;
+ /*
+ * if the nulls value we got at the end of this lookup is
+ * not the expected one, we must restart lookup.
+ * We probably met an item that was moved to another chain.
+ */
+ if (unlikely(get_nulls_value(node) != slot))
+ goto again;
found:
- read_unlock_bh(&sap->sk_list.lock);
+ rcu_read_unlock_bh();
return rc;
}
+static inline bool llc_mcast_match(const struct llc_sap *sap,
+ const struct llc_addr *laddr,
+ const struct sk_buff *skb,
+ const struct sock *sk)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ return sk->sk_type == SOCK_DGRAM &&
+ llc->laddr.lsap == laddr->lsap &&
+ llc->dev == skb->dev;
+}
+
+static void llc_do_mcast(struct llc_sap *sap, struct sk_buff *skb,
+ struct sock **stack, int count)
+{
+ struct sk_buff *skb1;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ skb1 = skb_clone(skb, GFP_ATOMIC);
+ if (!skb1) {
+ sock_put(stack[i]);
+ continue;
+ }
+
+ llc_sap_rcv(sap, skb1, stack[i]);
+ sock_put(stack[i]);
+ }
+}
+
/**
* llc_sap_mcast - Deliver multicast PDU's to all matching datagram sockets.
* @sap: SAP
@@ -340,32 +394,31 @@ static void llc_sap_mcast(struct llc_sap *sap,
const struct llc_addr *laddr,
struct sk_buff *skb)
{
- struct sock *sk;
+ int i = 0, count = 256 / sizeof(struct sock *);
+ struct sock *sk, *stack[count];
struct hlist_node *node;
+ struct llc_sock *llc;
+ struct hlist_head *dev_hb = llc_sk_dev_hash(sap, skb->dev->ifindex);
- read_lock_bh(&sap->sk_list.lock);
- sk_for_each(sk, node, &sap->sk_list.list) {
- struct llc_sock *llc = llc_sk(sk);
- struct sk_buff *skb1;
+ spin_lock_bh(&sap->sk_lock);
+ hlist_for_each_entry(llc, node, dev_hb, dev_hash_node) {
- if (sk->sk_type != SOCK_DGRAM)
- continue;
+ sk = &llc->sk;
- if (llc->laddr.lsap != laddr->lsap)
+ if (!llc_mcast_match(sap, laddr, skb, sk))
continue;
- if (llc->dev != skb->dev)
- continue;
-
- skb1 = skb_clone(skb, GFP_ATOMIC);
- if (!skb1)
- break;
-
sock_hold(sk);
- llc_sap_rcv(sap, skb1, sk);
- sock_put(sk);
+ if (i < count)
+ stack[i++] = sk;
+ else {
+ llc_do_mcast(sap, skb, stack, i);
+ i = 0;
+ }
}
- read_unlock_bh(&sap->sk_list.lock);
+ spin_unlock_bh(&sap->sk_lock);
+
+ llc_do_mcast(sap, skb, stack, i);
}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a10d508b07e1..a952b7f8c648 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -96,18 +96,6 @@ menuconfig MAC80211_DEBUG_MENU
---help---
This option collects various mac80211 debug settings.
-config MAC80211_DEBUG_PACKET_ALIGNMENT
- bool "Enable packet alignment debugging"
- depends on MAC80211_DEBUG_MENU
- ---help---
- This option is recommended for driver authors and strongly
- discouraged for everybody else, it will trigger a warning
- when a driver hands mac80211 a buffer that is aligned in
- a way that will cause problems with the IP stack on some
- architectures.
-
- Say N unless you're writing a mac80211 based driver.
-
config MAC80211_NOINLINE
bool "Do not inline TX/RX handlers"
depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 298cfcc1bf8d..04420291e7ad 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -6,10 +6,10 @@ mac80211-y := \
sta_info.o \
wep.o \
wpa.o \
- scan.o \
+ scan.o offchannel.o \
ht.o agg-tx.o agg-rx.o \
ibss.o \
- mlme.o \
+ mlme.o work.o \
iface.o \
rate.o \
michael.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 51c7dc3c4c3b..a978e666ed6f 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -41,8 +41,7 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
sta->sta.addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- if (drv_ampdu_action(local, &sta->sdata->vif,
- IEEE80211_AMPDU_RX_STOP,
+ if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
&sta->sta, tid, NULL))
printk(KERN_DEBUG "HW problem - can not stop rx "
"aggregation for tid %d\n", tid);
@@ -83,12 +82,11 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
u16 initiator, u16 reason)
{
- struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
rcu_read_lock();
- sta = sta_info_get(local, ra);
+ sta = sta_info_get(sdata, ra);
if (!sta) {
rcu_read_unlock();
return;
@@ -136,7 +134,7 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer "
- "for addba resp frame\n", sdata->dev->name);
+ "for addba resp frame\n", sdata->name);
return;
}
@@ -144,10 +142,10 @@ static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *d
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
@@ -281,8 +279,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
goto end;
}
- ret = drv_ampdu_action(local, &sta->sdata->vif,
- IEEE80211_AMPDU_RX_START,
+ ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
&sta->sta, tid, &start_seq_num);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Rx A-MPDU request on tid %d result %d\n", tid, ret);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 5e3a7eccef5a..718fbcff84d2 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -58,17 +58,17 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
if (!skb) {
printk(KERN_ERR "%s: failed to allocate buffer "
- "for addba request frame\n", sdata->dev->name);
+ "for addba request frame\n", sdata->name);
return;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
@@ -104,7 +104,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
skb = dev_alloc_skb(sizeof(*bar) + local->hw.extra_tx_headroom);
if (!skb) {
printk(KERN_ERR "%s: failed to allocate buffer for "
- "bar frame\n", sdata->dev->name);
+ "bar frame\n", sdata->name);
return;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -113,7 +113,7 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
bar->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
IEEE80211_STYPE_BACK_REQ);
memcpy(bar->ra, ra, ETH_ALEN);
- memcpy(bar->ta, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(bar->ta, sdata->vif.addr, ETH_ALEN);
bar_control |= (u16)IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL;
bar_control |= (u16)IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA;
bar_control |= (u16)(tid << 12);
@@ -144,7 +144,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
*state = HT_AGG_STATE_REQ_STOP_BA_MSK |
(initiator << HT_AGG_STATE_INITIATOR_SHIFT);
- ret = drv_ampdu_action(local, &sta->sdata->vif,
+ ret = drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_STOP,
&sta->sta, tid, NULL);
@@ -179,7 +179,8 @@ static void sta_addba_resp_timer_expired(unsigned long data)
/* check if the TID waits for addBA response */
spin_lock_bh(&sta->lock);
- if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) !=
+ if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK |
+ HT_AGG_STATE_REQ_STOP_BA_MSK)) !=
HT_ADDBA_REQUESTED_MSK) {
spin_unlock_bh(&sta->lock);
*state = HT_AGG_STATE_IDLE;
@@ -301,10 +302,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
* call back right away, it must see that the flow has begun */
*state |= HT_ADDBA_REQUESTED_MSK;
- start_seq_num = sta->tid_seq[tid];
+ start_seq_num = sta->tid_seq[tid] >> 4;
- ret = drv_ampdu_action(local, &sdata->vif,
- IEEE80211_AMPDU_TX_START,
+ ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
pubsta, tid, &start_seq_num);
if (ret) {
@@ -420,7 +420,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
ieee80211_agg_splice_finish(local, sta, tid);
spin_unlock(&local->ampdu_lock);
- drv_ampdu_action(local, &sta->sdata->vif,
+ drv_ampdu_action(local, sta->sdata,
IEEE80211_AMPDU_TX_OPERATIONAL,
&sta->sta, tid, NULL);
}
@@ -441,7 +441,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
}
rcu_read_lock();
- sta = sta_info_get(local, ra);
+ sta = sta_info_get(sdata, ra);
if (!sta) {
rcu_read_unlock();
#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -489,7 +489,7 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_WARNING "%s: Not enough memory, "
- "dropping start BA session", skb->dev->name);
+ "dropping start BA session", sdata->name);
#endif
return;
}
@@ -564,7 +564,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
#endif /* CONFIG_MAC80211_HT_DEBUG */
rcu_read_lock();
- sta = sta_info_get(local, ra);
+ sta = sta_info_get(sdata, ra);
if (!sta) {
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "Could not find station: %pM\n", ra);
@@ -621,7 +621,7 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
#ifdef CONFIG_MAC80211_HT_DEBUG
if (net_ratelimit())
printk(KERN_WARNING "%s: Not enough memory, "
- "dropping stop BA session", skb->dev->name);
+ "dropping stop BA session", sdata->name);
#endif
return;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 6dc3579c0ac5..8286df5822d5 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -78,17 +78,15 @@ static int ieee80211_change_iface(struct wiphy *wiphy,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
int ret;
- if (netif_running(dev))
+ if (ieee80211_sdata_running(sdata))
return -EBUSY;
if (!nl80211_params_check(type, params))
return -EINVAL;
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
ret = ieee80211_if_change_type(sdata, type);
if (ret)
return ret;
@@ -150,7 +148,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
rcu_read_lock();
if (mac_addr) {
- sta = sta_info_get(sdata->local, mac_addr);
+ sta = sta_info_get_bss(sdata, mac_addr);
if (!sta) {
ieee80211_key_free(key);
err = -ENOENT;
@@ -181,7 +179,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
if (mac_addr) {
ret = -ENOENT;
- sta = sta_info_get(sdata->local, mac_addr);
+ sta = sta_info_get_bss(sdata, mac_addr);
if (!sta)
goto out_unlock;
@@ -228,7 +226,7 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev,
rcu_read_lock();
if (mac_addr) {
- sta = sta_info_get(sdata->local, mac_addr);
+ sta = sta_info_get_bss(sdata, mac_addr);
if (!sta)
goto out;
@@ -415,15 +413,13 @@ static int ieee80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
static int ieee80211_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct sta_info *sta;
int ret = -ENOENT;
rcu_read_lock();
- /* XXX: verify sta->dev == dev */
-
- sta = sta_info_get(local, mac);
+ sta = sta_info_get_bss(sdata, mac);
if (sta) {
ret = 0;
sta_set_sinfo(sta, sinfo);
@@ -732,7 +728,7 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
} else
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (compare_ether_addr(mac, dev->dev_addr) == 0)
+ if (compare_ether_addr(mac, sdata->vif.addr) == 0)
return -EINVAL;
if (is_multicast_ether_addr(mac))
@@ -779,8 +775,7 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
if (mac) {
rcu_read_lock();
- /* XXX: get sta belonging to dev */
- sta = sta_info_get(local, mac);
+ sta = sta_info_get_bss(sdata, mac);
if (!sta) {
rcu_read_unlock();
return -ENOENT;
@@ -801,14 +796,14 @@ static int ieee80211_change_station(struct wiphy *wiphy,
u8 *mac,
struct station_parameters *params)
{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = wiphy_priv(wiphy);
struct sta_info *sta;
struct ieee80211_sub_if_data *vlansdata;
rcu_read_lock();
- /* XXX: get sta belonging to dev */
- sta = sta_info_get(local, mac);
+ sta = sta_info_get_bss(sdata, mac);
if (!sta) {
rcu_read_unlock();
return -ENOENT;
@@ -847,7 +842,6 @@ static int ieee80211_change_station(struct wiphy *wiphy,
static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
u8 *dst, u8 *next_hop)
{
- struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata;
struct mesh_path *mpath;
struct sta_info *sta;
@@ -856,7 +850,7 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
rcu_read_lock();
- sta = sta_info_get(local, next_hop);
+ sta = sta_info_get(sdata, next_hop);
if (!sta) {
rcu_read_unlock();
return -ENOENT;
@@ -895,7 +889,6 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
struct net_device *dev,
u8 *dst, u8 *next_hop)
{
- struct ieee80211_local *local = wiphy_priv(wiphy);
struct ieee80211_sub_if_data *sdata;
struct mesh_path *mpath;
struct sta_info *sta;
@@ -904,7 +897,7 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
rcu_read_lock();
- sta = sta_info_get(local, next_hop);
+ sta = sta_info_get(sdata, next_hop);
if (!sta) {
rcu_read_unlock();
return -ENOENT;
@@ -1135,6 +1128,13 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
p.cw_max = params->cwmax;
p.cw_min = params->cwmin;
p.txop = params->txop;
+
+ /*
+ * Setting tx queue params disables u-apsd because it's only
+ * called in master mode.
+ */
+ p.uapsd = false;
+
if (drv_conf_tx(local, params->queue, &p)) {
printk(KERN_DEBUG "%s: failed to set TX queue "
"parameters for queue %d\n",
@@ -1237,6 +1237,13 @@ static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
struct ieee80211_local *local = wiphy_priv(wiphy);
int err;
+ if (changed & WIPHY_PARAM_COVERAGE_CLASS) {
+ err = drv_set_coverage_class(local, wiphy->coverage_class);
+
+ if (err)
+ return err;
+ }
+
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
err = drv_set_rts_threshold(local, wiphy->rts_threshold);
@@ -1324,6 +1331,50 @@ static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
}
#endif
+int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode)
+{
+ const u8 *ap;
+ enum ieee80211_smps_mode old_req;
+ int err;
+
+ old_req = sdata->u.mgd.req_smps;
+ sdata->u.mgd.req_smps = smps_mode;
+
+ if (old_req == smps_mode &&
+ smps_mode != IEEE80211_SMPS_AUTOMATIC)
+ return 0;
+
+ /*
+ * If not associated, or current association is not an HT
+ * association, there's no need to send an action frame.
+ */
+ if (!sdata->u.mgd.associated ||
+ sdata->local->oper_channel_type == NL80211_CHAN_NO_HT) {
+ mutex_lock(&sdata->local->iflist_mtx);
+ ieee80211_recalc_smps(sdata->local, sdata);
+ mutex_unlock(&sdata->local->iflist_mtx);
+ return 0;
+ }
+
+ ap = sdata->u.mgd.associated->bssid;
+
+ if (smps_mode == IEEE80211_SMPS_AUTOMATIC) {
+ if (sdata->u.mgd.powersave)
+ smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ smps_mode = IEEE80211_SMPS_OFF;
+ }
+
+ /* send SM PS frame to AP */
+ err = ieee80211_send_smps_action(sdata, smps_mode,
+ ap, ap);
+ if (err)
+ sdata->u.mgd.req_smps = old_req;
+
+ return err;
+}
+
static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
bool enabled, int timeout)
{
@@ -1341,6 +1392,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
sdata->u.mgd.powersave = enabled;
conf->dynamic_ps_timeout = timeout;
+ /* no change, but if automatic follow powersave */
+ mutex_lock(&sdata->u.mgd.mtx);
+ __ieee80211_request_smps(sdata, sdata->u.mgd.req_smps);
+ mutex_unlock(&sdata->u.mgd.mtx);
+
if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
@@ -1356,39 +1412,43 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
- int i, err = -EINVAL;
- u32 target_rate;
- struct ieee80211_supported_band *sband;
+ int i;
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+ /*
+ * This _could_ be supported by providing a hook for
+ * drivers for this function, but at this point it
+ * doesn't seem worth bothering.
+ */
+ if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
+ return -EOPNOTSUPP;
- /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates
- * target_rate = X, rate->fixed = 1 means only rate X
- * target_rate = X, rate->fixed = 0 means all rates <= X */
- sdata->max_ratectrl_rateidx = -1;
- sdata->force_unicast_rateidx = -1;
- if (mask->fixed)
- target_rate = mask->fixed / 100;
- else if (mask->maxrate)
- target_rate = mask->maxrate / 100;
- else
- return 0;
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ sdata->rc_rateidx_mask[i] = mask->control[i].legacy;
- for (i=0; i< sband->n_bitrates; i++) {
- struct ieee80211_rate *brate = &sband->bitrates[i];
- int this_rate = brate->bitrate;
+ return 0;
+}
- if (target_rate == this_rate) {
- sdata->max_ratectrl_rateidx = i;
- if (mask->fixed)
- sdata->force_unicast_rateidx = i;
- err = 0;
- break;
- }
- }
+static int ieee80211_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration,
+ u64 *cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- return err;
+ return ieee80211_wk_remain_on_channel(sdata, chan, channel_type,
+ duration, cookie);
+}
+
+static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy,
+ struct net_device *dev,
+ u64 cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ return ieee80211_wk_cancel_remain_on_channel(sdata, cookie);
}
struct cfg80211_ops mac80211_config_ops = {
@@ -1437,4 +1497,6 @@ struct cfg80211_ops mac80211_config_ops = {
CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
.set_power_mgmt = ieee80211_set_power_mgmt,
.set_bitrate_mask = ieee80211_set_bitrate_mask,
+ .remain_on_channel = ieee80211_remain_on_channel,
+ .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index e4b54093d41b..b3bc32b62a5a 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -158,6 +158,98 @@ static const struct file_operations noack_ops = {
.open = mac80211_open_file_generic
};
+static ssize_t uapsd_queues_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ int res;
+ char buf[10];
+
+ res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_queues);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+}
+
+static ssize_t uapsd_queues_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ unsigned long val;
+ char buf[10];
+ size_t len;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ ret = strict_strtoul(buf, 0, &val);
+
+ if (ret)
+ return -EINVAL;
+
+ if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
+ return -ERANGE;
+
+ local->uapsd_queues = val;
+
+ return count;
+}
+
+static const struct file_operations uapsd_queues_ops = {
+ .read = uapsd_queues_read,
+ .write = uapsd_queues_write,
+ .open = mac80211_open_file_generic
+};
+
+static ssize_t uapsd_max_sp_len_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ int res;
+ char buf[10];
+
+ res = scnprintf(buf, sizeof(buf), "0x%x\n", local->uapsd_max_sp_len);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+}
+
+static ssize_t uapsd_max_sp_len_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ unsigned long val;
+ char buf[10];
+ size_t len;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ ret = strict_strtoul(buf, 0, &val);
+
+ if (ret)
+ return -EINVAL;
+
+ if (val & ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
+ return -ERANGE;
+
+ local->uapsd_max_sp_len = val;
+
+ return count;
+}
+
+static const struct file_operations uapsd_max_sp_len_ops = {
+ .read = uapsd_max_sp_len_read,
+ .write = uapsd_max_sp_len_write,
+ .open = mac80211_open_file_generic
+};
+
static ssize_t queues_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -314,6 +406,8 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(queues);
DEBUGFS_ADD_MODE(reset, 0200);
DEBUGFS_ADD(noack);
+ DEBUGFS_ADD(uapsd_queues);
+ DEBUGFS_ADD(uapsd_max_sp_len);
statsd = debugfs_create_dir("statistics", phyd);
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
index e0f5224630da..d12e743cb4e1 100644
--- a/net/mac80211/debugfs_key.c
+++ b/net/mac80211/debugfs_key.c
@@ -56,7 +56,7 @@ KEY_CONF_FILE(keyidx, D);
KEY_CONF_FILE(hw_key_idx, D);
KEY_FILE(flags, X);
KEY_FILE(tx_rx_count, D);
-KEY_READ(ifindex, sdata->dev->ifindex, 20, "%d\n");
+KEY_READ(ifindex, sdata->name, IFNAMSIZ + 2, "%s\n");
KEY_OPS(ifindex);
static ssize_t key_algorithm_read(struct file *file,
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 472b2039906c..9affe2cd185f 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -41,6 +41,30 @@ static ssize_t ieee80211_if_read(
return ret;
}
+static ssize_t ieee80211_if_write(
+ struct ieee80211_sub_if_data *sdata,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos,
+ ssize_t (*write)(struct ieee80211_sub_if_data *, const char *, int))
+{
+ u8 *buf;
+ ssize_t ret = -ENODEV;
+
+ buf = kzalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ rtnl_lock();
+ if (sdata->dev->reg_state == NETREG_REGISTERED)
+ ret = (*write)(sdata, buf, count);
+ rtnl_unlock();
+
+ return ret;
+}
+
#define IEEE80211_IF_FMT(name, field, format_string) \
static ssize_t ieee80211_if_fmt_##name( \
const struct ieee80211_sub_if_data *sdata, char *buf, \
@@ -71,7 +95,7 @@ static ssize_t ieee80211_if_fmt_##name( \
return scnprintf(buf, buflen, "%pM\n", sdata->field); \
}
-#define __IEEE80211_IF_FILE(name) \
+#define __IEEE80211_IF_FILE(name, _write) \
static ssize_t ieee80211_if_read_##name(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
@@ -82,22 +106,99 @@ static ssize_t ieee80211_if_read_##name(struct file *file, \
} \
static const struct file_operations name##_ops = { \
.read = ieee80211_if_read_##name, \
+ .write = (_write), \
.open = mac80211_open_file_generic, \
}
+#define __IEEE80211_IF_FILE_W(name) \
+static ssize_t ieee80211_if_write_##name(struct file *file, \
+ const char __user *userbuf, \
+ size_t count, loff_t *ppos) \
+{ \
+ return ieee80211_if_write(file->private_data, userbuf, count, \
+ ppos, ieee80211_if_parse_##name); \
+} \
+__IEEE80211_IF_FILE(name, ieee80211_if_write_##name)
+
+
#define IEEE80211_IF_FILE(name, field, format) \
IEEE80211_IF_FMT_##format(name, field) \
- __IEEE80211_IF_FILE(name)
+ __IEEE80211_IF_FILE(name, NULL)
/* common attributes */
IEEE80211_IF_FILE(drop_unencrypted, drop_unencrypted, DEC);
-IEEE80211_IF_FILE(force_unicast_rateidx, force_unicast_rateidx, DEC);
-IEEE80211_IF_FILE(max_ratectrl_rateidx, max_ratectrl_rateidx, DEC);
+IEEE80211_IF_FILE(rc_rateidx_mask_2ghz, rc_rateidx_mask[IEEE80211_BAND_2GHZ],
+ HEX);
+IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
+ HEX);
/* STA attributes */
IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
-IEEE80211_IF_FILE(capab, u.mgd.capab, HEX);
+
+static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode)
+{
+ struct ieee80211_local *local = sdata->local;
+ int err;
+
+ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_STATIC_SMPS) &&
+ smps_mode == IEEE80211_SMPS_STATIC)
+ return -EINVAL;
+
+ /* auto should be dynamic if in PS mode */
+ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS) &&
+ (smps_mode == IEEE80211_SMPS_DYNAMIC ||
+ smps_mode == IEEE80211_SMPS_AUTOMATIC))
+ return -EINVAL;
+
+ /* supported only on managed interfaces for now */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&local->iflist_mtx);
+ err = __ieee80211_request_smps(sdata, smps_mode);
+ mutex_unlock(&local->iflist_mtx);
+
+ return err;
+}
+
+static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
+ [IEEE80211_SMPS_AUTOMATIC] = "auto",
+ [IEEE80211_SMPS_OFF] = "off",
+ [IEEE80211_SMPS_STATIC] = "static",
+ [IEEE80211_SMPS_DYNAMIC] = "dynamic",
+};
+
+static ssize_t ieee80211_if_fmt_smps(const struct ieee80211_sub_if_data *sdata,
+ char *buf, int buflen)
+{
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ return snprintf(buf, buflen, "request: %s\nused: %s\n",
+ smps_modes[sdata->u.mgd.req_smps],
+ smps_modes[sdata->u.mgd.ap_smps]);
+}
+
+static ssize_t ieee80211_if_parse_smps(struct ieee80211_sub_if_data *sdata,
+ const char *buf, int buflen)
+{
+ enum ieee80211_smps_mode mode;
+
+ for (mode = 0; mode < IEEE80211_SMPS_NUM_MODES; mode++) {
+ if (strncmp(buf, smps_modes[mode], buflen) == 0) {
+ int err = ieee80211_set_smps(sdata, mode);
+ if (!err)
+ return buflen;
+ return err;
+ }
+ }
+
+ return -EINVAL;
+}
+
+__IEEE80211_IF_FILE_W(smps);
/* AP attributes */
IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
@@ -109,7 +210,7 @@ static ssize_t ieee80211_if_fmt_num_buffered_multicast(
return scnprintf(buf, buflen, "%u\n",
skb_queue_len(&sdata->u.ap.ps_bc_buf));
}
-__IEEE80211_IF_FILE(num_buffered_multicast);
+__IEEE80211_IF_FILE(num_buffered_multicast, NULL);
/* WDS attributes */
IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
@@ -154,46 +255,50 @@ IEEE80211_IF_FILE(dot11MeshHWMPRootMode,
#endif
-#define DEBUGFS_ADD(name, type) \
+#define DEBUGFS_ADD(name) \
debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
sdata, &name##_ops);
+#define DEBUGFS_ADD_MODE(name, mode) \
+ debugfs_create_file(#name, mode, sdata->debugfs.dir, \
+ sdata, &name##_ops);
+
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(drop_unencrypted, sta);
- DEBUGFS_ADD(force_unicast_rateidx, sta);
- DEBUGFS_ADD(max_ratectrl_rateidx, sta);
+ DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(rc_rateidx_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mask_5ghz);
- DEBUGFS_ADD(bssid, sta);
- DEBUGFS_ADD(aid, sta);
- DEBUGFS_ADD(capab, sta);
+ DEBUGFS_ADD(bssid);
+ DEBUGFS_ADD(aid);
+ DEBUGFS_ADD_MODE(smps, 0600);
}
static void add_ap_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(drop_unencrypted, ap);
- DEBUGFS_ADD(force_unicast_rateidx, ap);
- DEBUGFS_ADD(max_ratectrl_rateidx, ap);
+ DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(rc_rateidx_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mask_5ghz);
- DEBUGFS_ADD(num_sta_ps, ap);
- DEBUGFS_ADD(dtim_count, ap);
- DEBUGFS_ADD(num_buffered_multicast, ap);
+ DEBUGFS_ADD(num_sta_ps);
+ DEBUGFS_ADD(dtim_count);
+ DEBUGFS_ADD(num_buffered_multicast);
}
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(drop_unencrypted, wds);
- DEBUGFS_ADD(force_unicast_rateidx, wds);
- DEBUGFS_ADD(max_ratectrl_rateidx, wds);
+ DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(rc_rateidx_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mask_5ghz);
- DEBUGFS_ADD(peer, wds);
+ DEBUGFS_ADD(peer);
}
static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
{
- DEBUGFS_ADD(drop_unencrypted, vlan);
- DEBUGFS_ADD(force_unicast_rateidx, vlan);
- DEBUGFS_ADD(max_ratectrl_rateidx, vlan);
+ DEBUGFS_ADD(drop_unencrypted);
+ DEBUGFS_ADD(rc_rateidx_mask_2ghz);
+ DEBUGFS_ADD(rc_rateidx_mask_5ghz);
}
static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -280,16 +385,11 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
}
}
-static int notif_registered;
-
void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata)
{
char buf[10+IFNAMSIZ];
- if (!notif_registered)
- return;
-
- sprintf(buf, "netdev:%s", sdata->dev->name);
+ sprintf(buf, "netdev:%s", sdata->name);
sdata->debugfs.dir = debugfs_create_dir(buf,
sdata->local->hw.wiphy->debugfsdir);
add_files(sdata);
@@ -304,58 +404,18 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
sdata->debugfs.dir = NULL;
}
-static int netdev_notify(struct notifier_block *nb,
- unsigned long state,
- void *ndev)
+void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
{
- struct net_device *dev = ndev;
struct dentry *dir;
- struct ieee80211_sub_if_data *sdata;
- char buf[10+IFNAMSIZ];
-
- if (state != NETDEV_CHANGENAME)
- return 0;
-
- if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
- return 0;
-
- if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
- return 0;
-
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ char buf[10 + IFNAMSIZ];
dir = sdata->debugfs.dir;
if (!dir)
- return 0;
+ return;
- sprintf(buf, "netdev:%s", dev->name);
+ sprintf(buf, "netdev:%s", sdata->name);
if (!debugfs_rename(dir->d_parent, dir, dir->d_parent, buf))
printk(KERN_ERR "mac80211: debugfs: failed to rename debugfs "
"dir to %s\n", buf);
-
- return 0;
-}
-
-static struct notifier_block mac80211_debugfs_netdev_notifier = {
- .notifier_call = netdev_notify,
-};
-
-void ieee80211_debugfs_netdev_init(void)
-{
- int err;
-
- err = register_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
- if (err) {
- printk(KERN_ERR
- "mac80211: failed to install netdev notifier,"
- " disabling per-netdev debugfs!\n");
- } else
- notif_registered = 1;
-}
-
-void ieee80211_debugfs_netdev_exit(void)
-{
- unregister_netdevice_notifier(&mac80211_debugfs_netdev_notifier);
- notif_registered = 0;
}
diff --git a/net/mac80211/debugfs_netdev.h b/net/mac80211/debugfs_netdev.h
index 7af731f0b731..79025e79f4d6 100644
--- a/net/mac80211/debugfs_netdev.h
+++ b/net/mac80211/debugfs_netdev.h
@@ -6,8 +6,7 @@
#ifdef CONFIG_MAC80211_DEBUGFS
void ieee80211_debugfs_add_netdev(struct ieee80211_sub_if_data *sdata);
void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata);
-void ieee80211_debugfs_netdev_init(void);
-void ieee80211_debugfs_netdev_exit(void);
+void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata);
#else
static inline void ieee80211_debugfs_add_netdev(
struct ieee80211_sub_if_data *sdata)
@@ -15,10 +14,8 @@ static inline void ieee80211_debugfs_add_netdev(
static inline void ieee80211_debugfs_remove_netdev(
struct ieee80211_sub_if_data *sdata)
{}
-static inline void ieee80211_debugfs_netdev_init(void)
-{}
-
-static inline void ieee80211_debugfs_netdev_exit(void)
+static inline void ieee80211_debugfs_rename_netdev(
+ struct ieee80211_sub_if_data *sdata)
{}
#endif
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 3f41608c8081..0d4a759ba72c 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -44,7 +44,7 @@ static const struct file_operations sta_ ##name## _ops = { \
STA_OPS(name)
STA_FILE(aid, sta.aid, D);
-STA_FILE(dev, sdata->dev->name, S);
+STA_FILE(dev, sdata->name, S);
STA_FILE(rx_packets, rx_packets, LU);
STA_FILE(tx_packets, tx_packets, LU);
STA_FILE(rx_bytes, rx_bytes, LU);
@@ -160,7 +160,12 @@ STA_OPS(agg_status);
static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[200], *p = buf;
+#define PRINT_HT_CAP(_cond, _str) \
+ do { \
+ if (_cond) \
+ p += scnprintf(p, sizeof(buf)+buf-p, "\t" _str "\n"); \
+ } while (0)
+ char buf[1024], *p = buf;
int i;
struct sta_info *sta = file->private_data;
struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
@@ -168,15 +173,64 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
htc->ht_supported ? "" : "not ");
if (htc->ht_supported) {
- p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.2x\n", htc->cap);
+ p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
+
+ PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP");
+ PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
+ PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
+
+ PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 0, "Static SM Power Save");
+ PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 1, "Dynamic SM Power Save");
+ PRINT_HT_CAP(((htc->cap >> 2) & 0x3) == 3, "SM Power Save disabled");
+
+ PRINT_HT_CAP((htc->cap & BIT(4)), "RX Greenfield");
+ PRINT_HT_CAP((htc->cap & BIT(5)), "RX HT20 SGI");
+ PRINT_HT_CAP((htc->cap & BIT(6)), "RX HT40 SGI");
+ PRINT_HT_CAP((htc->cap & BIT(7)), "TX STBC");
+
+ PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 0, "No RX STBC");
+ PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 1, "RX STBC 1-stream");
+ PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 2, "RX STBC 2-streams");
+ PRINT_HT_CAP(((htc->cap >> 8) & 0x3) == 3, "RX STBC 3-streams");
+
+ PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
+
+ PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
+ "3839 bytes");
+ PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
+ "7935 bytes");
+
+ /*
+ * For beacons and probe response this would mean the BSS
+ * does or does not allow the usage of DSSS/CCK HT40.
+ * Otherwise it means the STA does or does not use
+ * DSSS/CCK HT40.
+ */
+ PRINT_HT_CAP((htc->cap & BIT(12)), "DSSS/CCK HT40");
+ PRINT_HT_CAP(!(htc->cap & BIT(12)), "No DSSS/CCK HT40");
+
+ /* BIT(13) is reserved */
+
+ PRINT_HT_CAP((htc->cap & BIT(14)), "40 MHz Intolerant");
+
+ PRINT_HT_CAP((htc->cap & BIT(15)), "L-SIG TXOP protection");
+
p += scnprintf(p, sizeof(buf)+buf-p, "ampdu factor/density: %d/%d\n",
htc->ampdu_factor, htc->ampdu_density);
p += scnprintf(p, sizeof(buf)+buf-p, "MCS mask:");
+
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
p += scnprintf(p, sizeof(buf)+buf-p, " %.2x",
htc->mcs.rx_mask[i]);
- p += scnprintf(p, sizeof(buf)+buf-p, "\nMCS rx highest: %d\n",
- le16_to_cpu(htc->mcs.rx_highest));
+ p += scnprintf(p, sizeof(buf)+buf-p, "\n");
+
+ /* If not set this is meaningless */
+ if (le16_to_cpu(htc->mcs.rx_highest)) {
+ p += scnprintf(p, sizeof(buf)+buf-p,
+ "MCS rx highest: %d Mbps\n",
+ le16_to_cpu(htc->mcs.rx_highest));
+ }
+
p += scnprintf(p, sizeof(buf)+buf-p, "MCS tx params: %x\n",
htc->mcs.tx_params);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 921dd9c9ff62..de91d39e0276 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -14,6 +14,8 @@ static inline int drv_start(struct ieee80211_local *local)
{
int ret;
+ might_sleep();
+
local->started = true;
smp_mb();
ret = local->ops->start(&local->hw);
@@ -23,6 +25,8 @@ static inline int drv_start(struct ieee80211_local *local)
static inline void drv_stop(struct ieee80211_local *local)
{
+ might_sleep();
+
local->ops->stop(&local->hw);
trace_drv_stop(local);
@@ -36,35 +40,47 @@ static inline void drv_stop(struct ieee80211_local *local)
}
static inline int drv_add_interface(struct ieee80211_local *local,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
- int ret = local->ops->add_interface(&local->hw, conf);
- trace_drv_add_interface(local, conf->mac_addr, conf->vif, ret);
+ int ret;
+
+ might_sleep();
+
+ ret = local->ops->add_interface(&local->hw, vif);
+ trace_drv_add_interface(local, vif_to_sdata(vif), ret);
return ret;
}
static inline void drv_remove_interface(struct ieee80211_local *local,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
- local->ops->remove_interface(&local->hw, conf);
- trace_drv_remove_interface(local, conf->mac_addr, conf->vif);
+ might_sleep();
+
+ local->ops->remove_interface(&local->hw, vif);
+ trace_drv_remove_interface(local, vif_to_sdata(vif));
}
static inline int drv_config(struct ieee80211_local *local, u32 changed)
{
- int ret = local->ops->config(&local->hw, changed);
+ int ret;
+
+ might_sleep();
+
+ ret = local->ops->config(&local->hw, changed);
trace_drv_config(local, changed, ret);
return ret;
}
static inline void drv_bss_info_changed(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *info,
u32 changed)
{
+ might_sleep();
+
if (local->ops->bss_info_changed)
- local->ops->bss_info_changed(&local->hw, vif, info, changed);
- trace_drv_bss_info_changed(local, vif, info, changed);
+ local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed);
+ trace_drv_bss_info_changed(local, sdata, info, changed);
}
static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
@@ -106,12 +122,17 @@ static inline int drv_set_tim(struct ieee80211_local *local,
}
static inline int drv_set_key(struct ieee80211_local *local,
- enum set_key_cmd cmd, struct ieee80211_vif *vif,
+ enum set_key_cmd cmd,
+ struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- int ret = local->ops->set_key(&local->hw, cmd, vif, sta, key);
- trace_drv_set_key(local, cmd, vif, sta, key, ret);
+ int ret;
+
+ might_sleep();
+
+ ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key);
+ trace_drv_set_key(local, cmd, sdata, sta, key, ret);
return ret;
}
@@ -120,6 +141,8 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
const u8 *address, u32 iv32,
u16 *phase1key)
{
+ might_sleep();
+
if (local->ops->update_tkip_key)
local->ops->update_tkip_key(&local->hw, conf, address,
iv32, phase1key);
@@ -129,13 +152,19 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
static inline int drv_hw_scan(struct ieee80211_local *local,
struct cfg80211_scan_request *req)
{
- int ret = local->ops->hw_scan(&local->hw, req);
+ int ret;
+
+ might_sleep();
+
+ ret = local->ops->hw_scan(&local->hw, req);
trace_drv_hw_scan(local, req, ret);
return ret;
}
static inline void drv_sw_scan_start(struct ieee80211_local *local)
{
+ might_sleep();
+
if (local->ops->sw_scan_start)
local->ops->sw_scan_start(&local->hw);
trace_drv_sw_scan_start(local);
@@ -143,6 +172,8 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local)
static inline void drv_sw_scan_complete(struct ieee80211_local *local)
{
+ might_sleep();
+
if (local->ops->sw_scan_complete)
local->ops->sw_scan_complete(&local->hw);
trace_drv_sw_scan_complete(local);
@@ -153,6 +184,8 @@ static inline int drv_get_stats(struct ieee80211_local *local,
{
int ret = -EOPNOTSUPP;
+ might_sleep();
+
if (local->ops->get_stats)
ret = local->ops->get_stats(&local->hw, stats);
trace_drv_get_stats(local, stats, ret);
@@ -172,26 +205,47 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local,
u32 value)
{
int ret = 0;
+
+ might_sleep();
+
if (local->ops->set_rts_threshold)
ret = local->ops->set_rts_threshold(&local->hw, value);
trace_drv_set_rts_threshold(local, value, ret);
return ret;
}
+static inline int drv_set_coverage_class(struct ieee80211_local *local,
+ u8 value)
+{
+ int ret = 0;
+ might_sleep();
+
+ if (local->ops->set_coverage_class)
+ local->ops->set_coverage_class(&local->hw, value);
+ else
+ ret = -EOPNOTSUPP;
+
+ trace_drv_set_coverage_class(local, value, ret);
+ return ret;
+}
+
static inline void drv_sta_notify(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
if (local->ops->sta_notify)
- local->ops->sta_notify(&local->hw, vif, cmd, sta);
- trace_drv_sta_notify(local, vif, cmd, sta);
+ local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta);
+ trace_drv_sta_notify(local, sdata, cmd, sta);
}
static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
if (local->ops->conf_tx)
ret = local->ops->conf_tx(&local->hw, queue, params);
trace_drv_conf_tx(local, queue, params, ret);
@@ -209,6 +263,9 @@ static inline int drv_get_tx_stats(struct ieee80211_local *local,
static inline u64 drv_get_tsf(struct ieee80211_local *local)
{
u64 ret = -1ULL;
+
+ might_sleep();
+
if (local->ops->get_tsf)
ret = local->ops->get_tsf(&local->hw);
trace_drv_get_tsf(local, ret);
@@ -217,6 +274,8 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local)
static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
{
+ might_sleep();
+
if (local->ops->set_tsf)
local->ops->set_tsf(&local->hw, tsf);
trace_drv_set_tsf(local, tsf);
@@ -224,6 +283,8 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
static inline void drv_reset_tsf(struct ieee80211_local *local)
{
+ might_sleep();
+
if (local->ops->reset_tsf)
local->ops->reset_tsf(&local->hw);
trace_drv_reset_tsf(local);
@@ -232,6 +293,9 @@ static inline void drv_reset_tsf(struct ieee80211_local *local)
static inline int drv_tx_last_beacon(struct ieee80211_local *local)
{
int ret = 1;
+
+ might_sleep();
+
if (local->ops->tx_last_beacon)
ret = local->ops->tx_last_beacon(&local->hw);
trace_drv_tx_last_beacon(local, ret);
@@ -239,23 +303,34 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
}
static inline int drv_ampdu_action(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
u16 *ssn)
{
int ret = -EOPNOTSUPP;
if (local->ops->ampdu_action)
- ret = local->ops->ampdu_action(&local->hw, vif, action,
+ ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
sta, tid, ssn);
- trace_drv_ampdu_action(local, vif, action, sta, tid, ssn, ret);
+ trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret);
return ret;
}
static inline void drv_rfkill_poll(struct ieee80211_local *local)
{
+ might_sleep();
+
if (local->ops->rfkill_poll)
local->ops->rfkill_poll(&local->hw);
}
+
+static inline void drv_flush(struct ieee80211_local *local, bool drop)
+{
+ might_sleep();
+
+ trace_drv_flush(local, drop);
+ if (local->ops->flush)
+ local->ops->flush(&local->hw, drop);
+}
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index ee94ea0c67e9..0ea258123b8e 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -25,10 +25,12 @@ static inline void trace_ ## name(proto) {}
#define STA_PR_FMT " sta:%pM"
#define STA_PR_ARG __entry->sta_addr
-#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, vif)
-#define VIF_ASSIGN __entry->vif_type = vif ? vif->type : 0; __entry->vif = vif
-#define VIF_PR_FMT " vif:%p(%d)"
-#define VIF_PR_ARG __entry->vif, __entry->vif_type
+#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, sdata) \
+ __string(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+#define VIF_ASSIGN __entry->vif_type = sdata->vif.type; __entry->sdata = sdata; \
+ __assign_str(vif_name, sdata->dev ? sdata->dev->name : "<nodev>")
+#define VIF_PR_FMT " vif:%s(%d)"
+#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type
TRACE_EVENT(drv_start,
TP_PROTO(struct ieee80211_local *local, int ret),
@@ -70,11 +72,10 @@ TRACE_EVENT(drv_stop,
TRACE_EVENT(drv_add_interface,
TP_PROTO(struct ieee80211_local *local,
- const u8 *addr,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
int ret),
- TP_ARGS(local, addr, vif, ret),
+ TP_ARGS(local, sdata, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -86,7 +87,7 @@ TRACE_EVENT(drv_add_interface,
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- memcpy(__entry->addr, addr, 6);
+ memcpy(__entry->addr, sdata->vif.addr, 6);
__entry->ret = ret;
),
@@ -97,10 +98,9 @@ TRACE_EVENT(drv_add_interface,
);
TRACE_EVENT(drv_remove_interface,
- TP_PROTO(struct ieee80211_local *local,
- const u8 *addr, struct ieee80211_vif *vif),
+ TP_PROTO(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata),
- TP_ARGS(local, addr, vif),
+ TP_ARGS(local, sdata),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -111,7 +111,7 @@ TRACE_EVENT(drv_remove_interface,
TP_fast_assign(
LOCAL_ASSIGN;
VIF_ASSIGN;
- memcpy(__entry->addr, addr, 6);
+ memcpy(__entry->addr, sdata->vif.addr, 6);
),
TP_printk(
@@ -140,6 +140,7 @@ TRACE_EVENT(drv_config,
__field(u8, short_frame_max_tx_count)
__field(int, center_freq)
__field(int, channel_type)
+ __field(int, smps)
),
TP_fast_assign(
@@ -155,6 +156,7 @@ TRACE_EVENT(drv_config,
__entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
__entry->center_freq = local->hw.conf.channel->center_freq;
__entry->channel_type = local->hw.conf.channel_type;
+ __entry->smps = local->hw.conf.smps_mode;
),
TP_printk(
@@ -165,11 +167,11 @@ TRACE_EVENT(drv_config,
TRACE_EVENT(drv_bss_info_changed,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss_conf *info,
u32 changed),
- TP_ARGS(local, vif, info, changed),
+ TP_ARGS(local, sdata, info, changed),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -293,11 +295,11 @@ TRACE_EVENT(drv_set_tim,
TRACE_EVENT(drv_set_key,
TP_PROTO(struct ieee80211_local *local,
- enum set_key_cmd cmd, struct ieee80211_vif *vif,
+ enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key, int ret),
- TP_ARGS(local, cmd, vif, sta, key, ret),
+ TP_ARGS(local, cmd, sdata, sta, key, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -489,13 +491,36 @@ TRACE_EVENT(drv_set_rts_threshold,
)
);
+TRACE_EVENT(drv_set_coverage_class,
+ TP_PROTO(struct ieee80211_local *local, u8 value, int ret),
+
+ TP_ARGS(local, value, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u8, value)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->ret = ret;
+ __entry->value = value;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " value:%d ret:%d",
+ LOCAL_PR_ARG, __entry->value, __entry->ret
+ )
+);
+
TRACE_EVENT(drv_sta_notify,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta),
- TP_ARGS(local, vif, cmd, sta),
+ TP_ARGS(local, sdata, cmd, sta),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -656,12 +681,12 @@ TRACE_EVENT(drv_tx_last_beacon,
TRACE_EVENT(drv_ampdu_action,
TP_PROTO(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
+ struct ieee80211_sub_if_data *sdata,
enum ieee80211_ampdu_mlme_action action,
struct ieee80211_sta *sta, u16 tid,
u16 *ssn, int ret),
- TP_ARGS(local, vif, action, sta, tid, ssn, ret),
+ TP_ARGS(local, sdata, action, sta, tid, ssn, ret),
TP_STRUCT__entry(
LOCAL_ENTRY
@@ -688,6 +713,27 @@ TRACE_EVENT(drv_ampdu_action,
LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
)
);
+
+TRACE_EVENT(drv_flush,
+ TP_PROTO(struct ieee80211_local *local, bool drop),
+
+ TP_ARGS(local, drop),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(bool, drop)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->drop = drop;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " drop:%d",
+ LOCAL_PR_ARG, __entry->drop
+ )
+);
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index d7dcee680728..bb677a73b7c9 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -125,7 +125,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
if (!skb) {
printk(KERN_ERR "%s: failed to allocate buffer "
- "for delba frame\n", sdata->dev->name);
+ "for delba frame\n", sdata->name);
return;
}
@@ -133,10 +133,10 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
if (sdata->vif.type == NL80211_IFTYPE_AP ||
sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
else if (sdata->vif.type == NL80211_IFTYPE_STATION)
memcpy(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN);
@@ -185,3 +185,50 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
spin_unlock_bh(&sta->lock);
}
}
+
+int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps, const u8 *da,
+ const u8 *bssid)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff *skb;
+ struct ieee80211_mgmt *action_frame;
+
+ /* 27 = header + category + action + smps mode */
+ skb = dev_alloc_skb(27 + local->hw.extra_tx_headroom);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+ action_frame = (void *)skb_put(skb, 27);
+ memcpy(action_frame->da, da, ETH_ALEN);
+ memcpy(action_frame->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(action_frame->bssid, bssid, ETH_ALEN);
+ action_frame->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+ action_frame->u.action.category = WLAN_CATEGORY_HT;
+ action_frame->u.action.u.ht_smps.action = WLAN_HT_ACTION_SMPS;
+ switch (smps) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_NUM_MODES:
+ WARN_ON(1);
+ case IEEE80211_SMPS_OFF:
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_DISABLED;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ action_frame->u.action.u.ht_smps.smps_control =
+ WLAN_HT_SMPS_CONTROL_DYNAMIC;
+ break;
+ }
+
+ /* we'll do more on status of this frame */
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ ieee80211_tx_skb(sdata, skb);
+
+ return 0;
+}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 1f2db647bb5c..5bcde4c3fba1 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -117,7 +117,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
memset(mgmt->da, 0xff, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, ifibss->bssid, ETH_ALEN);
mgmt->u.beacon.beacon_int = cpu_to_le16(beacon_int);
mgmt->u.beacon.timestamp = cpu_to_le64(tsf);
@@ -187,15 +187,17 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct ieee80211_bss *bss)
{
+ struct cfg80211_bss *cbss =
+ container_of((void *)bss, struct cfg80211_bss, priv);
struct ieee80211_supported_band *sband;
u32 basic_rates;
int i, j;
- u16 beacon_int = bss->cbss.beacon_interval;
+ u16 beacon_int = cbss->beacon_interval;
if (beacon_int < 10)
beacon_int = 10;
- sband = sdata->local->hw.wiphy->bands[bss->cbss.channel->band];
+ sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
basic_rates = 0;
@@ -212,12 +214,12 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
}
}
- __ieee80211_sta_join_ibss(sdata, bss->cbss.bssid,
+ __ieee80211_sta_join_ibss(sdata, cbss->bssid,
beacon_int,
- bss->cbss.channel,
+ cbss->channel,
basic_rates,
- bss->cbss.capability,
- bss->cbss.tsf);
+ cbss->capability,
+ cbss->tsf);
}
static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
@@ -229,6 +231,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
int freq;
+ struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
struct sta_info *sta;
struct ieee80211_channel *channel;
@@ -252,7 +255,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
- sta = sta_info_get(local, mgmt->sa);
+ sta = sta_info_get(sdata, mgmt->sa);
if (sta) {
u32 prev_rates;
@@ -266,7 +269,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: updated supp_rates set "
"for %pM based on beacon info (0x%llx | "
"0x%llx -> 0x%llx)\n",
- sdata->dev->name,
+ sdata->name,
sta->sta.addr,
(unsigned long long) prev_rates,
(unsigned long long) supp_rates,
@@ -283,8 +286,10 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
if (!bss)
return;
+ cbss = container_of((void *)bss, struct cfg80211_bss, priv);
+
/* was just updated in ieee80211_bss_info_update */
- beacon_timestamp = bss->cbss.tsf;
+ beacon_timestamp = cbss->tsf;
/* check if we need to merge IBSS */
@@ -297,11 +302,11 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
goto put_bss;
/* not an IBSS */
- if (!(bss->cbss.capability & WLAN_CAPABILITY_IBSS))
+ if (!(cbss->capability & WLAN_CAPABILITY_IBSS))
goto put_bss;
/* different channel */
- if (bss->cbss.channel != local->oper_channel)
+ if (cbss->channel != local->oper_channel)
goto put_bss;
/* different SSID */
@@ -311,7 +316,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
goto put_bss;
/* same BSSID */
- if (memcmp(bss->cbss.bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
+ if (memcmp(cbss->bssid, sdata->u.ibss.bssid, ETH_ALEN) == 0)
goto put_bss;
if (rx_status->flag & RX_FLAG_TSFT) {
@@ -364,7 +369,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: beacon TSF higher than "
"local TSF - IBSS merge with BSSID %pM\n",
- sdata->dev->name, mgmt->bssid);
+ sdata->name, mgmt->bssid);
#endif
ieee80211_sta_join_ibss(sdata, bss);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, supp_rates);
@@ -394,7 +399,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
if (local->num_sta >= IEEE80211_IBSS_MAX_STA_ENTRIES) {
if (net_ratelimit())
printk(KERN_DEBUG "%s: No room for a new IBSS STA entry %pM\n",
- sdata->dev->name, addr);
+ sdata->name, addr);
return NULL;
}
@@ -406,7 +411,7 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
printk(KERN_DEBUG "%s: Adding new IBSS station %pM (dev=%s)\n",
- wiphy_name(local->hw.wiphy), addr, sdata->dev->name);
+ wiphy_name(local->hw.wiphy), addr, sdata->name);
#endif
sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -470,7 +475,7 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
return;
printk(KERN_DEBUG "%s: No active IBSS STAs - trying to scan for other "
- "IBSS networks with same SSID (merge)\n", sdata->dev->name);
+ "IBSS networks with same SSID (merge)\n", sdata->name);
ieee80211_request_internal_scan(sdata, ifibss->ssid, ifibss->ssid_len);
}
@@ -492,13 +497,13 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
* random number generator get different BSSID. */
get_random_bytes(bssid, ETH_ALEN);
for (i = 0; i < ETH_ALEN; i++)
- bssid[i] ^= sdata->dev->dev_addr[i];
+ bssid[i] ^= sdata->vif.addr[i];
bssid[0] &= ~0x01;
bssid[0] |= 0x02;
}
printk(KERN_DEBUG "%s: Creating new IBSS network, BSSID %pM\n",
- sdata->dev->name, bssid);
+ sdata->name, bssid);
sband = local->hw.wiphy->bands[ifibss->channel->band];
@@ -518,7 +523,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_bss *bss;
+ struct cfg80211_bss *cbss;
struct ieee80211_channel *chan = NULL;
const u8 *bssid = NULL;
int active_ibss;
@@ -527,7 +532,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
active_ibss = ieee80211_sta_active_ibss(sdata);
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: sta_find_ibss (active_ibss=%d)\n",
- sdata->dev->name, active_ibss);
+ sdata->name, active_ibss);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
if (active_ibss)
@@ -542,21 +547,23 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
chan = ifibss->channel;
if (!is_zero_ether_addr(ifibss->bssid))
bssid = ifibss->bssid;
- bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan, bssid,
- ifibss->ssid, ifibss->ssid_len,
- WLAN_CAPABILITY_IBSS |
- WLAN_CAPABILITY_PRIVACY,
- capability);
+ cbss = cfg80211_get_bss(local->hw.wiphy, chan, bssid,
+ ifibss->ssid, ifibss->ssid_len,
+ WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_PRIVACY,
+ capability);
+
+ if (cbss) {
+ struct ieee80211_bss *bss;
- if (bss) {
+ bss = (void *)cbss->priv;
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG " sta_find_ibss: selected %pM current "
- "%pM\n", bss->cbss.bssid, ifibss->bssid);
+ "%pM\n", cbss->bssid, ifibss->bssid);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
printk(KERN_DEBUG "%s: Selected IBSS BSSID %pM"
" based on configured SSID\n",
- sdata->dev->name, bss->cbss.bssid);
+ sdata->name, cbss->bssid);
ieee80211_sta_join_ibss(sdata, bss);
ieee80211_rx_bss_put(local, bss);
@@ -575,7 +582,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
} else if (time_after(jiffies, ifibss->last_scan_completed +
IEEE80211_SCAN_INTERVAL)) {
printk(KERN_DEBUG "%s: Trigger new scan to find an IBSS to "
- "join\n", sdata->dev->name);
+ "join\n", sdata->name);
ieee80211_request_internal_scan(sdata, ifibss->ssid,
ifibss->ssid_len);
@@ -589,7 +596,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
return;
}
printk(KERN_DEBUG "%s: IBSS not allowed on"
- " %d MHz\n", sdata->dev->name,
+ " %d MHz\n", sdata->name,
local->hw.conf.channel->center_freq);
/* No IBSS found - decrease scan interval and continue
@@ -623,7 +630,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: RX ProbeReq SA=%pM DA=%pM BSSID=%pM"
" (tx_last_beacon=%d)\n",
- sdata->dev->name, mgmt->sa, mgmt->da,
+ sdata->name, mgmt->sa, mgmt->da,
mgmt->bssid, tx_last_beacon);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
@@ -641,7 +648,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: Invalid SSID IE in ProbeReq "
"from %pM\n",
- sdata->dev->name, mgmt->sa);
+ sdata->name, mgmt->sa);
#endif
return;
}
@@ -661,7 +668,7 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
memcpy(resp->da, mgmt->sa, ETH_ALEN);
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: Sending ProbeResp to %pM\n",
- sdata->dev->name, resp->da);
+ sdata->name, resp->da);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
@@ -675,7 +682,7 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
size_t baselen;
struct ieee802_11_elems elems;
- if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
+ if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
return; /* ignore ProbeResp to foreign address */
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -748,7 +755,7 @@ static void ieee80211_ibss_work(struct work_struct *work)
if (WARN_ON(local->suspended))
return;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
return;
if (local->scanning)
@@ -831,7 +838,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
continue;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 91dc8636d644..c18f576f1848 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -58,6 +58,15 @@ struct ieee80211_local;
#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024))
+#define IEEE80211_DEFAULT_UAPSD_QUEUES \
+ (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BE | \
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI | \
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+
+#define IEEE80211_DEFAULT_MAX_SP_LEN \
+ IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
+
struct ieee80211_fragment_entry {
unsigned long first_frag_time;
unsigned int seq;
@@ -71,9 +80,6 @@ struct ieee80211_fragment_entry {
struct ieee80211_bss {
- /* Yes, this is a hack */
- struct cfg80211_bss cbss;
-
/* don't want to look up all the time */
size_t ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -81,6 +87,7 @@ struct ieee80211_bss {
u8 dtim_period;
bool wmm_used;
+ bool uapsd_supported;
unsigned long last_probe_resp;
@@ -140,7 +147,6 @@ typedef unsigned __bitwise__ ieee80211_tx_result;
struct ieee80211_tx_data {
struct sk_buff *skb;
- struct net_device *dev;
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
@@ -228,31 +234,78 @@ struct mesh_preq_queue {
u8 flags;
};
-enum ieee80211_mgd_state {
- IEEE80211_MGD_STATE_IDLE,
- IEEE80211_MGD_STATE_PROBE,
- IEEE80211_MGD_STATE_AUTH,
- IEEE80211_MGD_STATE_ASSOC,
+enum ieee80211_work_type {
+ IEEE80211_WORK_ABORT,
+ IEEE80211_WORK_DIRECT_PROBE,
+ IEEE80211_WORK_AUTH,
+ IEEE80211_WORK_ASSOC,
+ IEEE80211_WORK_REMAIN_ON_CHANNEL,
+};
+
+/**
+ * enum work_done_result - indicates what to do after work was done
+ *
+ * @WORK_DONE_DESTROY: This work item is no longer needed, destroy.
+ * @WORK_DONE_REQUEUE: This work item was reset to be reused, and
+ * should be requeued.
+ */
+enum work_done_result {
+ WORK_DONE_DESTROY,
+ WORK_DONE_REQUEUE,
};
-struct ieee80211_mgd_work {
+struct ieee80211_work {
struct list_head list;
- struct ieee80211_bss *bss;
- int ie_len;
- u8 prev_bssid[ETH_ALEN];
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 ssid_len;
+
+ struct rcu_head rcu_head;
+
+ struct ieee80211_sub_if_data *sdata;
+
+ enum work_done_result (*done)(struct ieee80211_work *wk,
+ struct sk_buff *skb);
+
+ struct ieee80211_channel *chan;
+ enum nl80211_channel_type chan_type;
+
unsigned long timeout;
- enum ieee80211_mgd_state state;
- u16 auth_alg, auth_transaction;
+ enum ieee80211_work_type type;
- int tries;
+ u8 filter_ta[ETH_ALEN];
- u8 key[WLAN_KEY_LEN_WEP104];
- u8 key_len, key_idx;
+ bool started;
+
+ union {
+ struct {
+ int tries;
+ u16 algorithm, transaction;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ u8 key[WLAN_KEY_LEN_WEP104];
+ u8 key_len, key_idx;
+ bool privacy;
+ } probe_auth;
+ struct {
+ struct cfg80211_bss *bss;
+ const u8 *supp_rates;
+ const u8 *ht_information_ie;
+ enum ieee80211_smps_mode smps;
+ int tries;
+ u16 capability;
+ u8 prev_bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ u8 supp_rates_len;
+ bool wmm_used, use_11n, uapsd_used;
+ } assoc;
+ struct {
+ u32 duration;
+ bool started;
+ } remain;
+ };
+ int ie_len;
/* must be last */
- u8 ie[0]; /* for auth or assoc frame, not probe */
+ u8 ie[0];
};
/* flags used in struct ieee80211_if_managed.flags */
@@ -260,15 +313,10 @@ enum ieee80211_sta_flags {
IEEE80211_STA_BEACON_POLL = BIT(0),
IEEE80211_STA_CONNECTION_POLL = BIT(1),
IEEE80211_STA_CONTROL_PORT = BIT(2),
- IEEE80211_STA_WMM_ENABLED = BIT(3),
IEEE80211_STA_DISABLE_11N = BIT(4),
IEEE80211_STA_CSA_RECEIVED = BIT(5),
IEEE80211_STA_MFP_ENABLED = BIT(6),
-};
-
-/* flags for MLME request */
-enum ieee80211_sta_request {
- IEEE80211_STA_REQ_SCAN,
+ IEEE80211_STA_UAPSD_ENABLED = BIT(7),
};
struct ieee80211_if_managed {
@@ -285,21 +333,18 @@ struct ieee80211_if_managed {
int probe_send_count;
struct mutex mtx;
- struct ieee80211_bss *associated;
- struct ieee80211_mgd_work *old_associate_work;
- struct list_head work_list;
+ struct cfg80211_bss *associated;
u8 bssid[ETH_ALEN];
u16 aid;
- u16 capab;
struct sk_buff_head skb_queue;
unsigned long timers_running; /* used for quiesce/restart */
bool powersave; /* powersave requested for this iface */
-
- unsigned long request;
+ enum ieee80211_smps_mode req_smps, /* requested smps mode */
+ ap_smps; /* smps mode AP thinks we're in */
unsigned int flags;
@@ -433,6 +478,8 @@ struct ieee80211_sub_if_data {
int drop_unencrypted;
+ char name[IFNAMSIZ];
+
/*
* keep track of whether the HT opmode (stored in
* vif.bss_info.ht_operation_mode) is valid.
@@ -458,8 +505,8 @@ struct ieee80211_sub_if_data {
*/
struct ieee80211_if_ap *bss;
- int force_unicast_rateidx; /* forced TX rateidx for unicast frames */
- int max_ratectrl_rateidx; /* max TX rateidx for rate control */
+ /* bitmap of allowed (non-MCS) rate indexes for rate control */
+ u32 rc_rateidx_mask[IEEE80211_NUM_BANDS];
union {
struct ieee80211_if_ap ap;
@@ -565,6 +612,15 @@ struct ieee80211_local {
const struct ieee80211_ops *ops;
/*
+ * work stuff, potentially off-channel (in the future)
+ */
+ struct mutex work_mtx;
+ struct list_head work_list;
+ struct timer_list work_timer;
+ struct work_struct work_work;
+ struct sk_buff_head work_skb_queue;
+
+ /*
* private workqueue to mac80211. mac80211 makes this accessible
* via ieee80211_queue_work()
*/
@@ -586,6 +642,9 @@ struct ieee80211_local {
/* used for uploading changed mc list */
struct work_struct reconfig_filter;
+ /* used to reconfigure hardware SM PS */
+ struct work_struct recalc_smps;
+
/* aggregated multicast list */
struct dev_addr_list *mc_list;
int mc_count;
@@ -689,6 +748,10 @@ struct ieee80211_local {
enum nl80211_channel_type oper_channel_type;
struct ieee80211_channel *oper_channel, *csa_channel;
+ /* Temporary remain-on-channel for off-channel operations */
+ struct ieee80211_channel *tmp_channel;
+ enum nl80211_channel_type tmp_channel_type;
+
/* SNMP counters */
/* dot11CountersTable */
u32 dot11TransmittedFragmentCount;
@@ -745,8 +808,22 @@ struct ieee80211_local {
int wifi_wme_noack_test;
unsigned int wmm_acm; /* bit field of ACM bits (BIT(802.1D tag)) */
+ /*
+ * Bitmask of enabled u-apsd queues,
+ * IEEE80211_WMM_IE_STA_QOSINFO_AC_BE & co. Needs a new association
+ * to take effect.
+ */
+ unsigned int uapsd_queues;
+
+ /*
+ * Maximum number of buffered frames AP can deliver during a
+ * service period, IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL or similar.
+ * Needs a new association to take effect.
+ */
+ unsigned int uapsd_max_sp_len;
+
bool pspolling;
- bool scan_ps_enabled;
+ bool offchannel_ps_enabled;
/*
* PS can only be enabled when we have exactly one managed
* interface (and monitors) in PS, this then points there.
@@ -760,6 +837,8 @@ struct ieee80211_local {
int user_power_level; /* in dBm */
int power_constr_level; /* in dBm */
+ enum ieee80211_smps_mode smps_mode;
+
struct work_struct restart_work;
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -874,6 +953,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
void ieee80211_configure_filter(struct ieee80211_local *local);
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
+extern bool ieee80211_disable_40mhz_24ghz;
+
/* STA code */
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
@@ -937,7 +1018,15 @@ ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
void ieee80211_rx_bss_put(struct ieee80211_local *local,
struct ieee80211_bss *bss);
+/* off-channel helpers */
+void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local);
+void ieee80211_offchannel_stop_station(struct ieee80211_local *local);
+void ieee80211_offchannel_return(struct ieee80211_local *local,
+ bool enable_beaconing);
+
/* interface handling */
+int ieee80211_iface_init(void);
+void ieee80211_iface_exit(void);
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
struct net_device **new_dev, enum nl80211_iftype type,
struct vif_params *params);
@@ -948,6 +1037,11 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local);
u32 __ieee80211_recalc_idle(struct ieee80211_local *local);
void ieee80211_recalc_idle(struct ieee80211_local *local);
+static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
+{
+ return netif_running(sdata->dev);
+}
+
/* tx handling */
void ieee80211_clear_tx_pending(struct ieee80211_local *local);
void ieee80211_tx_pending(unsigned long data);
@@ -976,6 +1070,9 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
const u8 *da, u16 tid,
u16 initiator, u16 reason_code);
+int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps, const u8 *da,
+ const u8 *bssid);
void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
u16 tid, u16 initiator, u16 reason);
@@ -1086,6 +1183,28 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
struct ieee802_11_elems *elems,
enum ieee80211_band band);
+int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
+ enum ieee80211_smps_mode smps_mode);
+void ieee80211_recalc_smps(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *forsdata);
+
+size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset);
+size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
+
+/* internal work items */
+void ieee80211_work_init(struct ieee80211_local *local);
+void ieee80211_add_work(struct ieee80211_work *wk);
+void free_work(struct ieee80211_work *wk);
+void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata);
+ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
+int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, u64 *cookie);
+int ieee80211_wk_cancel_remain_on_channel(
+ struct ieee80211_sub_if_data *sdata, u64 cookie);
#ifdef CONFIG_MAC80211_NOINLINE
#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 80c16f6e2af6..edf21cebeee8 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -15,12 +15,14 @@
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
#include "ieee80211_i.h"
#include "sta_info.h"
#include "debugfs_netdev.h"
#include "mesh.h"
#include "led.h"
#include "driver-ops.h"
+#include "wme.h"
/**
* DOC: Interface list locking
@@ -60,6 +62,23 @@ static int ieee80211_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int ieee80211_change_mac(struct net_device *dev, void *addr)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct sockaddr *sa = addr;
+ int ret;
+
+ if (ieee80211_sdata_running(sdata))
+ return -EBUSY;
+
+ ret = eth_mac_addr(dev, sa);
+
+ if (ret == 0)
+ memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN);
+
+ return ret;
+}
+
static inline int identical_mac_addr_allowed(int type1, int type2)
{
return type1 == NL80211_IFTYPE_MONITOR ||
@@ -80,7 +99,6 @@ static int ieee80211_open(struct net_device *dev)
struct ieee80211_sub_if_data *nsdata;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
- struct ieee80211_if_init_conf conf;
u32 changed = 0;
int res;
u32 hw_reconf_flags = 0;
@@ -95,7 +113,7 @@ static int ieee80211_open(struct net_device *dev)
list_for_each_entry(nsdata, &local->interfaces, list) {
struct net_device *ndev = nsdata->dev;
- if (ndev != dev && netif_running(ndev)) {
+ if (ndev != dev && ieee80211_sdata_running(nsdata)) {
/*
* Allow only a single IBSS interface to be up at any
* time. This is restricted because beacon distribution
@@ -181,7 +199,7 @@ static int ieee80211_open(struct net_device *dev)
struct net_device *ndev = nsdata->dev;
/*
- * No need to check netif_running since we do not allow
+ * No need to check running since we do not allow
* it to start up with this invalid address.
*/
if (compare_ether_addr(null_addr, ndev->dev_addr) == 0) {
@@ -232,10 +250,7 @@ static int ieee80211_open(struct net_device *dev)
ieee80211_configure_filter(local);
break;
default:
- conf.vif = &sdata->vif;
- conf.type = sdata->vif.type;
- conf.mac_addr = dev->dev_addr;
- res = drv_add_interface(local, &conf);
+ res = drv_add_interface(local, &sdata->vif);
if (res)
goto err_stop;
@@ -314,11 +329,11 @@ static int ieee80211_open(struct net_device *dev)
if (sdata->vif.type == NL80211_IFTYPE_STATION)
ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
err_del_interface:
- drv_remove_interface(local, &conf);
+ drv_remove_interface(local, &sdata->vif);
err_stop:
if (!local->open_count)
drv_stop(local);
@@ -333,7 +348,6 @@ static int ieee80211_stop(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_init_conf conf;
struct sta_info *sta;
unsigned long flags;
struct sk_buff *skb, *tmp;
@@ -343,7 +357,12 @@ static int ieee80211_stop(struct net_device *dev)
/*
* Stop TX on this interface first.
*/
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
+
+ /*
+ * Purge work for this interface.
+ */
+ ieee80211_work_purge(sdata);
/*
* Now delete all active aggregation sessions.
@@ -512,12 +531,9 @@ static int ieee80211_stop(struct net_device *dev)
BSS_CHANGED_BEACON_ENABLED);
}
- conf.vif = &sdata->vif;
- conf.type = sdata->vif.type;
- conf.mac_addr = dev->dev_addr;
/* disable all keys for as long as this netdev is down */
ieee80211_disable_keys(sdata);
- drv_remove_interface(local, &conf);
+ drv_remove_interface(local, &sdata->vif);
}
sdata->bss = NULL;
@@ -644,6 +660,12 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
WARN_ON(flushed);
}
+static u16 ieee80211_netdev_select_queue(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
+}
+
static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
@@ -651,9 +673,39 @@ static const struct net_device_ops ieee80211_dataif_ops = {
.ndo_start_xmit = ieee80211_subif_start_xmit,
.ndo_set_multicast_list = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = ieee80211_change_mac,
+ .ndo_select_queue = ieee80211_netdev_select_queue,
};
+static u16 ieee80211_monitor_select_queue(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_radiotap_header *rtap = (void *)skb->data;
+ u8 *p;
+
+ if (local->hw.queues < 4)
+ return 0;
+
+ if (skb->len < 4 ||
+ skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */)
+ return 0; /* doesn't matter, frame will be dropped */
+
+ hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len));
+
+ if (!ieee80211_is_data_qos(hdr->frame_control)) {
+ skb->priority = 7;
+ return ieee802_1d_to_ac[skb->priority];
+ }
+
+ p = ieee80211_get_qos_ctl(hdr);
+ skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK;
+
+ return ieee80211_downgrade_queue(local, skb);
+}
+
static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_open = ieee80211_open,
.ndo_stop = ieee80211_stop,
@@ -662,6 +714,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = {
.ndo_set_multicast_list = ieee80211_set_multicast_list,
.ndo_change_mtu = ieee80211_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_select_queue = ieee80211_monitor_select_queue,
};
static void ieee80211_if_setup(struct net_device *dev)
@@ -740,7 +793,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
* and goes into the requested mode.
*/
- if (netif_running(sdata->dev))
+ if (ieee80211_sdata_running(sdata))
return -EBUSY;
/* Purge and reset type-dependent state. */
@@ -768,8 +821,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
ASSERT_RTNL();
- ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size,
- name, ieee80211_if_setup);
+ ndev = alloc_netdev_mq(sizeof(*sdata) + local->hw.vif_data_size,
+ name, ieee80211_if_setup, local->hw.queues);
if (!ndev)
return -ENOMEM;
dev_net_set(ndev, wiphy_net(local->hw.wiphy));
@@ -794,6 +847,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
/* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
sdata = netdev_priv(ndev);
ndev->ieee80211_ptr = &sdata->wdev;
+ memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN);
+ memcpy(sdata->name, ndev->name, IFNAMSIZ);
/* initialise type-independent data */
sdata->wdev.wiphy = local->hw.wiphy;
@@ -805,8 +860,12 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
INIT_LIST_HEAD(&sdata->key_list);
- sdata->force_unicast_rateidx = -1;
- sdata->max_ratectrl_rateidx = -1;
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+ struct ieee80211_supported_band *sband;
+ sband = local->hw.wiphy->bands[i];
+ sdata->rc_rateidx_mask[i] =
+ sband ? (1 << sband->n_bitrates) - 1 : 0;
+ }
/* setup type-dependent data */
ieee80211_setup_sdata(sdata, type);
@@ -899,6 +958,8 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)
wiphy_name(local->hw.wiphy));
#endif
+ drv_flush(local, false);
+
local->hw.conf.flags |= IEEE80211_CONF_IDLE;
return IEEE80211_CONF_CHANGE_IDLE;
}
@@ -908,16 +969,18 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
struct ieee80211_sub_if_data *sdata;
int count = 0;
+ if (!list_empty(&local->work_list))
+ return ieee80211_idle_off(local, "working");
+
if (local->scanning)
return ieee80211_idle_off(local, "scanning");
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
/* do not count disabled managed interfaces */
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- !sdata->u.mgd.associated &&
- list_empty(&sdata->u.mgd.work_list))
+ !sdata->u.mgd.associated)
continue;
/* do not count unused IBSS interfaces */
if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
@@ -945,3 +1008,41 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)
if (chg)
ieee80211_hw_config(local, chg);
}
+
+static int netdev_notify(struct notifier_block *nb,
+ unsigned long state,
+ void *ndev)
+{
+ struct net_device *dev = ndev;
+ struct ieee80211_sub_if_data *sdata;
+
+ if (state != NETDEV_CHANGENAME)
+ return 0;
+
+ if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy)
+ return 0;
+
+ if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid)
+ return 0;
+
+ sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ memcpy(sdata->name, sdata->name, IFNAMSIZ);
+
+ ieee80211_debugfs_rename_netdev(sdata);
+ return 0;
+}
+
+static struct notifier_block mac80211_netdev_notifier = {
+ .notifier_call = netdev_notify,
+};
+
+int ieee80211_iface_init(void)
+{
+ return register_netdevice_notifier(&mac80211_netdev_notifier);
+}
+
+void ieee80211_iface_exit(void)
+{
+ unregister_netdevice_notifier(&mac80211_netdev_notifier);
+}
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 659a42d529e3..8160d9c5372e 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -139,7 +139,7 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
struct ieee80211_sub_if_data,
u.ap);
- ret = drv_set_key(key->local, SET_KEY, &sdata->vif, sta, &key->conf);
+ ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf);
if (!ret) {
spin_lock_bh(&todo_lock);
@@ -181,7 +181,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
struct ieee80211_sub_if_data,
u.ap);
- ret = drv_set_key(key->local, DISABLE_KEY, &sdata->vif,
+ ret = drv_set_key(key->local, DISABLE_KEY, sdata,
sta, &key->conf);
if (ret)
@@ -421,7 +421,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
*/
/* same here, the AP could be using QoS */
- ap = sta_info_get(key->local, key->sdata->u.mgd.bssid);
+ ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
if (ap) {
if (test_sta_flags(ap, WLAN_STA_WME))
key->conf.flags |=
@@ -443,7 +443,7 @@ void ieee80211_key_link(struct ieee80211_key *key,
add_todo(old_key, KEY_FLAG_TODO_DELETE);
add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS);
- if (netif_running(sdata->dev))
+ if (ieee80211_sdata_running(sdata))
add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
spin_unlock_irqrestore(&sdata->local->key_lock, flags);
@@ -509,7 +509,7 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
{
ASSERT_RTNL();
- if (WARN_ON(!netif_running(sdata->dev)))
+ if (WARN_ON(!ieee80211_sdata_running(sdata)))
return;
ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD);
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index a49f93b79e92..bdc2968c2bbe 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -59,11 +59,17 @@ enum ieee80211_internal_key_flags {
KEY_FLAG_TODO_DEFMGMTKEY = BIT(6),
};
+enum ieee80211_internal_tkip_state {
+ TKIP_STATE_NOT_INIT,
+ TKIP_STATE_PHASE1_DONE,
+ TKIP_STATE_PHASE1_HW_UPLOADED,
+};
+
struct tkip_ctx {
u32 iv32;
u16 iv16;
u16 p1k[5];
- int initialized;
+ enum ieee80211_internal_tkip_state state;
};
struct ieee80211_key {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 0d2d94881f1f..ec8f767ba95b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -17,7 +17,6 @@
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
-#include <linux/wireless.h>
#include <linux/rtnetlink.h>
#include <linux/bitmap.h>
#include <linux/pm_qos_params.h>
@@ -32,7 +31,12 @@
#include "led.h"
#include "cfg.h"
#include "debugfs.h"
-#include "debugfs_netdev.h"
+
+
+bool ieee80211_disable_40mhz_24ghz;
+module_param(ieee80211_disable_40mhz_24ghz, bool, 0644);
+MODULE_PARM_DESC(ieee80211_disable_40mhz_24ghz,
+ "Disable 40MHz support in the 2.4GHz band");
void ieee80211_configure_filter(struct ieee80211_local *local)
{
@@ -102,6 +106,9 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
if (scan_chan) {
chan = scan_chan;
channel_type = NL80211_CHAN_NO_HT;
+ } else if (local->tmp_channel) {
+ chan = scan_chan = local->tmp_channel;
+ channel_type = local->tmp_channel_type;
} else {
chan = local->oper_channel;
channel_type = local->oper_channel_type;
@@ -114,6 +121,18 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
changed |= IEEE80211_CONF_CHANGE_CHANNEL;
}
+ if (!conf_is_ht(&local->hw.conf)) {
+ /*
+ * mac80211.h documents that this is only valid
+ * when the channel is set to an HT type, and
+ * that otherwise STATIC is used.
+ */
+ local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
+ } else if (local->hw.conf.smps_mode != local->smps_mode) {
+ local->hw.conf.smps_mode = local->smps_mode;
+ changed |= IEEE80211_CONF_CHANGE_SMPS;
+ }
+
if (scan_chan)
power = chan->max_power;
else
@@ -173,7 +192,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
else if (sdata->vif.type == NL80211_IFTYPE_AP)
- sdata->vif.bss_conf.bssid = sdata->dev->dev_addr;
+ sdata->vif.bss_conf.bssid = sdata->vif.addr;
else if (ieee80211_vif_is_mesh(&sdata->vif)) {
sdata->vif.bss_conf.bssid = zero;
} else {
@@ -195,7 +214,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
}
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- if (local->quiescing || !netif_running(sdata->dev) ||
+ if (local->quiescing || !ieee80211_sdata_running(sdata) ||
test_bit(SCAN_SW_SCANNING, &local->scanning)) {
sdata->vif.bss_conf.enable_beacon = false;
} else {
@@ -223,8 +242,7 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
}
}
- drv_bss_info_changed(local, &sdata->vif,
- &sdata->vif.bss_conf, changed);
+ drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed);
}
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
@@ -299,6 +317,16 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL(ieee80211_restart_hw);
+static void ieee80211_recalc_smps_work(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, recalc_smps);
+
+ mutex_lock(&local->iflist_mtx);
+ ieee80211_recalc_smps(local, NULL);
+ mutex_unlock(&local->iflist_mtx);
+}
+
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
const struct ieee80211_ops *ops)
{
@@ -333,9 +361,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
WIPHY_FLAG_4ADDR_STATION;
wiphy->privid = mac80211_wiphy_privid;
- /* Yes, putting cfg80211_bss into ieee80211_bss is a hack */
- wiphy->bss_priv_size = sizeof(struct ieee80211_bss) -
- sizeof(struct cfg80211_bss);
+ wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
local = wiphy_priv(wiphy);
@@ -358,6 +384,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
local->user_power_level = -1;
+ local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
+ local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
INIT_LIST_HEAD(&local->interfaces);
mutex_init(&local->iflist_mtx);
@@ -369,9 +397,13 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
+ ieee80211_work_init(local);
+
INIT_WORK(&local->restart_work, ieee80211_restart_work);
INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
+ INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work);
+ local->smps_mode = IEEE80211_SMPS_OFF;
INIT_WORK(&local->dynamic_ps_enable_work,
ieee80211_dynamic_ps_enable_work);
@@ -461,6 +493,10 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+ WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
+ && (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
+ "U-APSD not supported with HW_PS_NULLFUNC_STACK\n");
+
/*
* Calculate scan IE length -- we need this to alloc
* memory and to subtract from the driver limit. It
@@ -674,11 +710,19 @@ static int __init ieee80211_init(void)
ret = rc80211_pid_init();
if (ret)
- return ret;
+ goto err_pid;
- ieee80211_debugfs_netdev_init();
+ ret = ieee80211_iface_init();
+ if (ret)
+ goto err_netdev;
return 0;
+ err_netdev:
+ rc80211_pid_exit();
+ err_pid:
+ rc80211_minstrel_exit();
+
+ return ret;
}
static void __exit ieee80211_exit(void)
@@ -695,7 +739,7 @@ static void __exit ieee80211_exit(void)
if (mesh_allocated)
ieee80211s_stop();
- ieee80211_debugfs_netdev_exit();
+ ieee80211_iface_exit();
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 6a4331429598..61080c5fad50 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -457,7 +457,7 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
printk(KERN_DEBUG "%s: running mesh housekeeping\n",
- sdata->dev->name);
+ sdata->name);
#endif
ieee80211_sta_expire(sdata, IEEE80211_MESH_PEER_INACTIVITY_LIMIT);
@@ -565,7 +565,7 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
/* ignore ProbeResp to foreign address */
if (stype == IEEE80211_STYPE_PROBE_RESP &&
- compare_ether_addr(mgmt->da, sdata->dev->dev_addr))
+ compare_ether_addr(mgmt->da, sdata->vif.addr))
return;
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -645,7 +645,7 @@ static void ieee80211_mesh_work(struct work_struct *work)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct sk_buff *skb;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
return;
if (local->scanning)
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index d28acb6b1f81..ce84237ebad3 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -128,9 +128,9 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID == SA */
- memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
@@ -222,7 +222,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, ra, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID is left zeroed, wildcard value */
mgmt->u.action.category = MESH_PATH_SEL_CATEGORY;
mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
@@ -335,7 +335,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
bool process = true;
rcu_read_lock();
- sta = sta_info_get(local, mgmt->sa);
+ sta = sta_info_get(sdata, mgmt->sa);
if (!sta) {
rcu_read_unlock();
return 0;
@@ -374,7 +374,7 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
new_metric = MAX_METRIC;
exp_time = TU_TO_EXP_TIME(orig_lifetime);
- if (memcmp(orig_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
+ if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) {
/* This MP is the originator, we are not interested in this
* frame, except for updating transmitter's path info.
*/
@@ -486,7 +486,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
mhwmp_dbg("received PREQ from %pM\n", orig_addr);
- if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0) {
+ if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0) {
mhwmp_dbg("PREQ is for us\n");
forward = false;
reply = true;
@@ -579,7 +579,7 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
* replies
*/
target_addr = PREP_IE_TARGET_ADDR(prep_elem);
- if (memcmp(target_addr, sdata->dev->dev_addr, ETH_ALEN) == 0)
+ if (memcmp(target_addr, sdata->vif.addr, ETH_ALEN) == 0)
/* destination, no forwarding required */
return;
@@ -890,7 +890,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
target_flags = MP_F_RF;
spin_unlock_bh(&mpath->state_lock);
- mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->dev->dev_addr,
+ mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr,
cpu_to_le32(ifmsh->sn), target_flags, mpath->dst,
cpu_to_le32(mpath->sn), broadcast_addr, 0,
ttl, cpu_to_le32(lifetime), 0,
@@ -939,7 +939,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
if (time_after(jiffies,
mpath->exp_time -
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time)) &&
- !memcmp(sdata->dev->dev_addr, hdr->addr4, ETH_ALEN) &&
+ !memcmp(sdata->vif.addr, hdr->addr4, ETH_ALEN) &&
!(mpath->flags & MESH_PATH_RESOLVING) &&
!(mpath->flags & MESH_PATH_FIXED)) {
mesh_queue_preq(mpath,
@@ -1010,7 +1010,7 @@ mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->dev->dev_addr,
+ mesh_path_sel_frame_tx(MPATH_RANN, 0, sdata->vif.addr,
cpu_to_le32(++ifmsh->sn),
0, NULL, 0, broadcast_addr,
0, MESH_TTL, 0, 0, 0, sdata);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 0192cfdacae4..2312efe04c62 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -260,7 +260,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
+ if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -377,7 +377,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
+ if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -605,7 +605,7 @@ void mesh_path_discard_frame(struct sk_buff *skb,
struct mesh_path *mpath;
u32 sn = 0;
- if (memcmp(hdr->addr4, sdata->dev->dev_addr, ETH_ALEN) != 0) {
+ if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
u8 *ra, *da;
da = hdr->addr3;
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 0f7c6e6a4248..7985e5150898 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -169,7 +169,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
memcpy(mgmt->da, da, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
/* BSSID is left zeroed, wildcard value */
mgmt->u.action.category = MESH_PLINK_CATEGORY;
mgmt->u.action.u.plink_action.action_code = action;
@@ -234,7 +234,7 @@ void mesh_neighbour_update(u8 *hw_addr, u32 rates, struct ieee80211_sub_if_data
rcu_read_lock();
- sta = sta_info_get(local, hw_addr);
+ sta = sta_info_get(sdata, hw_addr);
if (!sta) {
sta = mesh_plink_alloc(sdata, hw_addr, rates);
if (!sta) {
@@ -455,7 +455,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
rcu_read_lock();
- sta = sta_info_get(local, mgmt->sa);
+ sta = sta_info_get(sdata, mgmt->sa);
if (!sta && ftype != PLINK_OPEN) {
mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
rcu_read_unlock();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index c79e59f82fd9..251055b38d67 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -75,11 +75,8 @@ enum rx_mgmt_action {
/* caller must call cfg80211_send_disassoc() */
RX_MGMT_CFG80211_DISASSOC,
- /* caller must call cfg80211_auth_timeout() & free work */
- RX_MGMT_CFG80211_AUTH_TO,
-
- /* caller must call cfg80211_assoc_timeout() & free work */
- RX_MGMT_CFG80211_ASSOC_TO,
+ /* caller must tell cfg80211 about internal error */
+ RX_MGMT_CFG80211_ASSOC_ERROR,
};
/* utils */
@@ -122,27 +119,6 @@ static int ecw2cw(int ecw)
return (1 << ecw) - 1;
}
-static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
- struct ieee80211_supported_band *sband,
- u32 *rates)
-{
- int i, j, count;
- *rates = 0;
- count = 0;
- for (i = 0; i < bss->supp_rates_len; i++) {
- int rate = (bss->supp_rates[i] & 0x7F) * 5;
-
- for (j = 0; j < sband->n_bitrates; j++)
- if (sband->bitrates[j].bitrate == rate) {
- *rates |= BIT(j);
- count++;
- break;
- }
- }
-
- return count;
-}
-
/*
* ieee80211_enable_ht should be called only after the operating band
* has been determined as ht configuration depends on the hw's
@@ -202,7 +178,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
ieee80211_hw_config(local, 0);
rcu_read_lock();
- sta = sta_info_get(local, bssid);
+ sta = sta_info_get(sdata, bssid);
if (sta)
rate_control_rate_update(local, sband, sta,
IEEE80211_RC_HT_CHANGED);
@@ -228,209 +204,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
/* frame sending functions */
-static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
- struct sk_buff *skb;
- struct ieee80211_mgmt *mgmt;
- u8 *pos;
- const u8 *ies, *ht_ie;
- int i, len, count, rates_len, supp_rates_len;
- u16 capab;
- int wmm = 0;
- struct ieee80211_supported_band *sband;
- u32 rates = 0;
-
- skb = dev_alloc_skb(local->hw.extra_tx_headroom +
- sizeof(*mgmt) + 200 + wk->ie_len +
- wk->ssid_len);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
- "frame\n", sdata->dev->name);
- return;
- }
- skb_reserve(skb, local->hw.extra_tx_headroom);
-
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-
- capab = ifmgd->capab;
-
- if (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) {
- if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
- capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
- if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
- capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
- }
-
- if (wk->bss->cbss.capability & WLAN_CAPABILITY_PRIVACY)
- capab |= WLAN_CAPABILITY_PRIVACY;
- if (wk->bss->wmm_used)
- wmm = 1;
-
- /* get all rates supported by the device and the AP as
- * some APs don't like getting a superset of their rates
- * in the association request (e.g. D-Link DAP 1353 in
- * b-only mode) */
- rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates);
-
- if ((wk->bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
- (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
- capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
-
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
- memset(mgmt, 0, 24);
- memcpy(mgmt->da, wk->bss->cbss.bssid, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
- memcpy(mgmt->bssid, wk->bss->cbss.bssid, ETH_ALEN);
-
- if (!is_zero_ether_addr(wk->prev_bssid)) {
- skb_put(skb, 10);
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_REASSOC_REQ);
- mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
- mgmt->u.reassoc_req.listen_interval =
- cpu_to_le16(local->hw.conf.listen_interval);
- memcpy(mgmt->u.reassoc_req.current_ap, wk->prev_bssid,
- ETH_ALEN);
- } else {
- skb_put(skb, 4);
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_ASSOC_REQ);
- mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
- mgmt->u.assoc_req.listen_interval =
- cpu_to_le16(local->hw.conf.listen_interval);
- }
-
- /* SSID */
- ies = pos = skb_put(skb, 2 + wk->ssid_len);
- *pos++ = WLAN_EID_SSID;
- *pos++ = wk->ssid_len;
- memcpy(pos, wk->ssid, wk->ssid_len);
-
- /* add all rates which were marked to be used above */
- supp_rates_len = rates_len;
- if (supp_rates_len > 8)
- supp_rates_len = 8;
-
- len = sband->n_bitrates;
- pos = skb_put(skb, supp_rates_len + 2);
- *pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = supp_rates_len;
-
- count = 0;
- for (i = 0; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- if (++count == 8)
- break;
- }
- }
-
- if (rates_len > count) {
- pos = skb_put(skb, rates_len - count + 2);
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = rates_len - count;
-
- for (i++; i < sband->n_bitrates; i++) {
- if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
- }
- }
- }
-
- if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
- /* 1. power capabilities */
- pos = skb_put(skb, 4);
- *pos++ = WLAN_EID_PWR_CAPABILITY;
- *pos++ = 2;
- *pos++ = 0; /* min tx power */
- *pos++ = local->hw.conf.channel->max_power; /* max tx power */
-
- /* 2. supported channels */
- /* TODO: get this in reg domain format */
- pos = skb_put(skb, 2 * sband->n_channels + 2);
- *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
- *pos++ = 2 * sband->n_channels;
- for (i = 0; i < sband->n_channels; i++) {
- *pos++ = ieee80211_frequency_to_channel(
- sband->channels[i].center_freq);
- *pos++ = 1; /* one channel in the subband*/
- }
- }
-
- if (wk->ie_len && wk->ie) {
- pos = skb_put(skb, wk->ie_len);
- memcpy(pos, wk->ie, wk->ie_len);
- }
-
- if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) {
- pos = skb_put(skb, 9);
- *pos++ = WLAN_EID_VENDOR_SPECIFIC;
- *pos++ = 7; /* len */
- *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
- *pos++ = 0x50;
- *pos++ = 0xf2;
- *pos++ = 2; /* WME */
- *pos++ = 0; /* WME info */
- *pos++ = 1; /* WME ver */
- *pos++ = 0;
- }
-
- /* wmm support is a must to HT */
- /*
- * IEEE802.11n does not allow TKIP/WEP as pairwise
- * ciphers in HT mode. We still associate in non-ht
- * mode (11a/b/g) if any one of these ciphers is
- * configured as pairwise.
- */
- if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
- sband->ht_cap.ht_supported &&
- (ht_ie = ieee80211_bss_get_ie(&wk->bss->cbss, WLAN_EID_HT_INFORMATION)) &&
- ht_ie[1] >= sizeof(struct ieee80211_ht_info) &&
- (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))) {
- struct ieee80211_ht_info *ht_info =
- (struct ieee80211_ht_info *)(ht_ie + 2);
- u16 cap = sband->ht_cap.cap;
- __le16 tmp;
- u32 flags = local->hw.conf.channel->flags;
-
- switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
- cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- cap &= ~IEEE80211_HT_CAP_SGI_40;
- }
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
- cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- cap &= ~IEEE80211_HT_CAP_SGI_40;
- }
- break;
- }
-
- tmp = cpu_to_le16(cap);
- pos = skb_put(skb, sizeof(struct ieee80211_ht_cap)+2);
- *pos++ = WLAN_EID_HT_CAPABILITY;
- *pos++ = sizeof(struct ieee80211_ht_cap);
- memset(pos, 0, sizeof(struct ieee80211_ht_cap));
- memcpy(pos, &tmp, sizeof(u16));
- pos += sizeof(u16);
- /* TODO: needs a define here for << 2 */
- *pos++ = sband->ht_cap.ampdu_factor |
- (sband->ht_cap.ampdu_density << 2);
- memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
- }
-
- IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- ieee80211_tx_skb(sdata, skb);
-}
-
-
static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, u16 stype, u16 reason,
void *cookie)
@@ -443,7 +216,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer for "
- "deauth/disassoc frame\n", sdata->dev->name);
+ "deauth/disassoc frame\n", sdata->name);
return;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -451,7 +224,7 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
memcpy(mgmt->da, bssid, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
skb_put(skb, 2);
@@ -476,30 +249,15 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
void ieee80211_send_pspoll(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_pspoll *pspoll;
struct sk_buff *skb;
- u16 fc;
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for "
- "pspoll frame\n", sdata->dev->name);
+ skb = ieee80211_pspoll_get(&local->hw, &sdata->vif);
+ if (!skb)
return;
- }
- skb_reserve(skb, local->hw.extra_tx_headroom);
- pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
- memset(pspoll, 0, sizeof(*pspoll));
- fc = IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL | IEEE80211_FCTL_PM;
- pspoll->frame_control = cpu_to_le16(fc);
- pspoll->aid = cpu_to_le16(ifmgd->aid);
-
- /* aid in PS-Poll has its two MSBs each set to 1 */
- pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
-
- memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
- memcpy(pspoll->ta, sdata->dev->dev_addr, ETH_ALEN);
+ pspoll = (struct ieee80211_pspoll *) skb->data;
+ pspoll->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
@@ -510,30 +268,47 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
int powersave)
{
struct sk_buff *skb;
+ struct ieee80211_hdr_3addr *nullfunc;
+
+ skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
+ if (!skb)
+ return;
+
+ nullfunc = (struct ieee80211_hdr_3addr *) skb->data;
+ if (powersave)
+ nullfunc->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
+}
+
+static void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ struct sk_buff *skb;
struct ieee80211_hdr *nullfunc;
__le16 fc;
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
return;
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24);
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + 30);
if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
- "frame\n", sdata->dev->name);
+ printk(KERN_DEBUG "%s: failed to allocate buffer for 4addr "
+ "nullfunc frame\n", sdata->name);
return;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
- nullfunc = (struct ieee80211_hdr *) skb_put(skb, 24);
- memset(nullfunc, 0, 24);
+ nullfunc = (struct ieee80211_hdr *) skb_put(skb, 30);
+ memset(nullfunc, 0, 30);
fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
- if (powersave)
- fc |= cpu_to_le16(IEEE80211_FCTL_PM);
+ IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
nullfunc->frame_control = fc;
memcpy(nullfunc->addr1, sdata->u.mgd.bssid, ETH_ALEN);
- memcpy(nullfunc->addr2, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
memcpy(nullfunc->addr3, sdata->u.mgd.bssid, ETH_ALEN);
+ memcpy(nullfunc->addr4, sdata->vif.addr, ETH_ALEN);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
@@ -546,7 +321,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
return;
mutex_lock(&ifmgd->mtx);
@@ -557,7 +332,7 @@ static void ieee80211_chswitch_work(struct work_struct *work)
ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL);
/* XXX: shouldn't really modify cfg80211-owned data! */
- ifmgd->associated->cbss.channel = sdata->local->oper_channel;
+ ifmgd->associated->channel = sdata->local->oper_channel;
ieee80211_wake_queues_by_reason(&sdata->local->hw,
IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -584,6 +359,8 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel_sw_ie *sw_elem,
struct ieee80211_bss *bss)
{
+ struct cfg80211_bss *cbss =
+ container_of((void *)bss, struct cfg80211_bss, priv);
struct ieee80211_channel *new_ch;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
@@ -617,7 +394,7 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
mod_timer(&ifmgd->chswitch_timer,
jiffies +
msecs_to_jiffies(sw_elem->count *
- bss->cbss.beacon_interval));
+ cbss->beacon_interval));
}
}
@@ -691,8 +468,13 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
return;
}
+ if (!list_empty(&local->work_list)) {
+ local->ps_sdata = NULL;
+ goto change;
+ }
+
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type != NL80211_IFTYPE_STATION)
continue;
@@ -701,7 +483,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
}
if (count == 1 && found->u.mgd.powersave &&
- found->u.mgd.associated && list_empty(&found->u.mgd.work_list) &&
+ found->u.mgd.associated &&
!(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
IEEE80211_STA_CONNECTION_POLL))) {
s32 beaconint_us;
@@ -729,6 +511,7 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
local->ps_sdata = NULL;
}
+ change:
ieee80211_change_ps(local);
}
@@ -786,9 +569,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
struct ieee80211_tx_queue_params params;
size_t left;
int count;
- u8 *pos;
+ u8 *pos, uapsd_queues = 0;
- if (!(ifmgd->flags & IEEE80211_STA_WMM_ENABLED))
+ if (local->hw.queues < 4)
return;
if (!wmm_param)
@@ -796,6 +579,10 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
if (wmm_param_len < 8 || wmm_param[5] /* version */ != 1)
return;
+
+ if (ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
+ uapsd_queues = local->uapsd_queues;
+
count = wmm_param[6] & 0x0f;
if (count == ifmgd->wmm_last_param_set)
return;
@@ -810,6 +597,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
for (; left >= 4; left -= 4, pos += 4) {
int aci = (pos[0] >> 5) & 0x03;
int acm = (pos[0] >> 4) & 0x01;
+ bool uapsd = false;
int queue;
switch (aci) {
@@ -817,22 +605,30 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
queue = 3;
if (acm)
local->wmm_acm |= BIT(1) | BIT(2); /* BK/- */
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd = true;
break;
case 2: /* AC_VI */
queue = 1;
if (acm)
local->wmm_acm |= BIT(4) | BIT(5); /* CL/VI */
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd = true;
break;
case 3: /* AC_VO */
queue = 0;
if (acm)
local->wmm_acm |= BIT(6) | BIT(7); /* VO/NC */
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd = true;
break;
case 0: /* AC_BE */
default:
queue = 2;
if (acm)
local->wmm_acm |= BIT(0) | BIT(3); /* BE/EE */
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd = true;
break;
}
@@ -840,11 +636,14 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
params.cw_max = ecw2cw((pos[1] & 0xf0) >> 4);
params.cw_min = ecw2cw(pos[1] & 0x0f);
params.txop = get_unaligned_le16(pos + 2);
+ params.uapsd = uapsd;
+
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
printk(KERN_DEBUG "%s: WMM queue=%d aci=%d acm=%d aifs=%d "
- "cWmin=%d cWmax=%d txop=%d\n",
+ "cWmin=%d cWmax=%d txop=%d uapsd=%d\n",
wiphy_name(local->hw.wiphy), queue, aci, acm,
- params.aifs, params.cw_min, params.cw_max, params.txop);
+ params.aifs, params.cw_min, params.cw_max, params.txop,
+ params.uapsd);
#endif
if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx)
printk(KERN_DEBUG "%s: failed to set TX queue "
@@ -891,25 +690,24 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
}
static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
+ struct cfg80211_bss *cbss,
u32 bss_info_changed)
{
+ struct ieee80211_bss *bss = (void *)cbss->priv;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_bss *bss = wk->bss;
bss_info_changed |= BSS_CHANGED_ASSOC;
/* set timing information */
- sdata->vif.bss_conf.beacon_int = bss->cbss.beacon_interval;
- sdata->vif.bss_conf.timestamp = bss->cbss.tsf;
+ sdata->vif.bss_conf.beacon_int = cbss->beacon_interval;
+ sdata->vif.bss_conf.timestamp = cbss->tsf;
sdata->vif.bss_conf.dtim_period = bss->dtim_period;
bss_info_changed |= BSS_CHANGED_BEACON_INT;
bss_info_changed |= ieee80211_handle_bss_capability(sdata,
- bss->cbss.capability, bss->has_erp_value, bss->erp_value);
+ cbss->capability, bss->has_erp_value, bss->erp_value);
- sdata->u.mgd.associated = bss;
- sdata->u.mgd.old_associate_work = wk;
- memcpy(sdata->u.mgd.bssid, bss->cbss.bssid, ETH_ALEN);
+ sdata->u.mgd.associated = cbss;
+ memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
/* just to be sure */
sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
@@ -940,99 +738,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
mutex_lock(&local->iflist_mtx);
ieee80211_recalc_ps(local, -1);
+ ieee80211_recalc_smps(local, sdata);
mutex_unlock(&local->iflist_mtx);
- netif_start_queue(sdata->dev);
+ netif_tx_start_all_queues(sdata->dev);
netif_carrier_on(sdata->dev);
}
-static enum rx_mgmt_action __must_check
-ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
-
- wk->tries++;
- if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
- printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n",
- sdata->dev->name, wk->bss->cbss.bssid);
-
- /*
- * Most likely AP is not in the range so remove the
- * bss struct for that AP.
- */
- cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
-
- /*
- * We might have a pending scan which had no chance to run yet
- * due to work needing to be done. Hence, queue the STAs work
- * again for that.
- */
- ieee80211_queue_work(&local->hw, &ifmgd->work);
- return RX_MGMT_CFG80211_AUTH_TO;
- }
-
- printk(KERN_DEBUG "%s: direct probe to AP %pM (try %d)\n",
- sdata->dev->name, wk->bss->cbss.bssid,
- wk->tries);
-
- /*
- * Direct probe is sent to broadcast address as some APs
- * will not answer to direct packet in unassociated state.
- */
- ieee80211_send_probe_req(sdata, NULL, wk->ssid, wk->ssid_len, NULL, 0);
-
- wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
- run_again(ifmgd, wk->timeout);
-
- return RX_MGMT_NONE;
-}
-
-
-static enum rx_mgmt_action __must_check
-ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
-
- wk->tries++;
- if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
- printk(KERN_DEBUG "%s: authentication with AP %pM"
- " timed out\n",
- sdata->dev->name, wk->bss->cbss.bssid);
-
- /*
- * Most likely AP is not in the range so remove the
- * bss struct for that AP.
- */
- cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
-
- /*
- * We might have a pending scan which had no chance to run yet
- * due to work needing to be done. Hence, queue the STAs work
- * again for that.
- */
- ieee80211_queue_work(&local->hw, &ifmgd->work);
- return RX_MGMT_CFG80211_AUTH_TO;
- }
-
- printk(KERN_DEBUG "%s: authenticate with AP %pM (try %d)\n",
- sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
-
- ieee80211_send_auth(sdata, 1, wk->auth_alg, wk->ie, wk->ie_len,
- wk->bss->cbss.bssid, NULL, 0, 0);
- wk->auth_transaction = 2;
-
- wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
- run_again(ifmgd, wk->timeout);
-
- return RX_MGMT_NONE;
-}
-
-static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
- bool deauth)
+static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
@@ -1045,21 +758,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!ifmgd->associated))
return;
- memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN);
+ memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
ifmgd->associated = NULL;
memset(ifmgd->bssid, 0, ETH_ALEN);
- if (deauth) {
- kfree(ifmgd->old_associate_work);
- ifmgd->old_associate_work = NULL;
- } else {
- struct ieee80211_mgd_work *wk = ifmgd->old_associate_work;
-
- wk->state = IEEE80211_MGD_STATE_IDLE;
- list_add(&wk->list, &ifmgd->work_list);
- }
-
/*
* we need to commit the associated = NULL change because the
* scan code uses that to determine whether this iface should
@@ -1074,11 +777,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
* time -- we don't want the scan code to enable queues.
*/
- netif_stop_queue(sdata->dev);
+ netif_tx_stop_all_queues(sdata->dev);
netif_carrier_off(sdata->dev);
rcu_read_lock();
- sta = sta_info_get(local, bssid);
+ sta = sta_info_get(sdata, bssid);
if (sta)
ieee80211_sta_tear_down_BA_sessions(sta);
rcu_read_unlock();
@@ -1115,7 +818,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
- sta = sta_info_get(local, bssid);
+ sta = sta_info_get(sdata, bssid);
if (!sta) {
rcu_read_unlock();
return;
@@ -1128,44 +831,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sta_info_destroy(sta);
}
-static enum rx_mgmt_action __must_check
-ieee80211_associate(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
-
- wk->tries++;
- if (wk->tries > IEEE80211_ASSOC_MAX_TRIES) {
- printk(KERN_DEBUG "%s: association with AP %pM"
- " timed out\n",
- sdata->dev->name, wk->bss->cbss.bssid);
-
- /*
- * Most likely AP is not in the range so remove the
- * bss struct for that AP.
- */
- cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
-
- /*
- * We might have a pending scan which had no chance to run yet
- * due to work needing to be done. Hence, queue the STAs work
- * again for that.
- */
- ieee80211_queue_work(&local->hw, &ifmgd->work);
- return RX_MGMT_CFG80211_ASSOC_TO;
- }
-
- printk(KERN_DEBUG "%s: associate with AP %pM (try %d)\n",
- sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
- ieee80211_send_assoc(sdata, wk);
-
- wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
- run_again(ifmgd, wk->timeout);
-
- return RX_MGMT_NONE;
-}
-
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr)
{
@@ -1189,8 +854,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
const u8 *ssid;
- ssid = ieee80211_bss_get_ie(&ifmgd->associated->cbss, WLAN_EID_SSID);
- ieee80211_send_probe_req(sdata, ifmgd->associated->cbss.bssid,
+ ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
+ ieee80211_send_probe_req(sdata, ifmgd->associated->bssid,
ssid + 2, ssid[1], NULL, 0);
ifmgd->probe_send_count++;
@@ -1204,12 +869,15 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
bool already = false;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
return;
if (sdata->local->scanning)
return;
+ if (sdata->local->tmp_channel)
+ return;
+
mutex_lock(&ifmgd->mtx);
if (!ifmgd->associated)
@@ -1218,7 +886,7 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (beacon && net_ratelimit())
printk(KERN_DEBUG "%s: detected beacon loss from AP "
- "- sending probe request\n", sdata->dev->name);
+ "- sending probe request\n", sdata->name);
#endif
/*
@@ -1271,88 +939,8 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif)
}
EXPORT_SYMBOL(ieee80211_beacon_loss);
-static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk)
-{
- wk->state = IEEE80211_MGD_STATE_IDLE;
- printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
-}
-
-
-static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
- struct ieee80211_mgmt *mgmt,
- size_t len)
-{
- u8 *pos;
- struct ieee802_11_elems elems;
-
- pos = mgmt->u.auth.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
- if (!elems.challenge)
- return;
- ieee80211_send_auth(sdata, 3, wk->auth_alg,
- elems.challenge - 2, elems.challenge_len + 2,
- wk->bss->cbss.bssid,
- wk->key, wk->key_len, wk->key_idx);
- wk->auth_transaction = 4;
-}
-
-static enum rx_mgmt_action __must_check
-ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
- struct ieee80211_mgmt *mgmt, size_t len)
-{
- u16 auth_alg, auth_transaction, status_code;
-
- if (wk->state != IEEE80211_MGD_STATE_AUTH)
- return RX_MGMT_NONE;
-
- if (len < 24 + 6)
- return RX_MGMT_NONE;
-
- if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
- return RX_MGMT_NONE;
-
- if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
- return RX_MGMT_NONE;
-
- auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
- auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
- status_code = le16_to_cpu(mgmt->u.auth.status_code);
-
- if (auth_alg != wk->auth_alg ||
- auth_transaction != wk->auth_transaction)
- return RX_MGMT_NONE;
-
- if (status_code != WLAN_STATUS_SUCCESS) {
- list_del(&wk->list);
- kfree(wk);
- return RX_MGMT_CFG80211_AUTH;
- }
-
- switch (wk->auth_alg) {
- case WLAN_AUTH_OPEN:
- case WLAN_AUTH_LEAP:
- case WLAN_AUTH_FT:
- ieee80211_auth_completed(sdata, wk);
- return RX_MGMT_CFG80211_AUTH;
- case WLAN_AUTH_SHARED_KEY:
- if (wk->auth_transaction == 4) {
- ieee80211_auth_completed(sdata, wk);
- return RX_MGMT_CFG80211_AUTH;
- } else
- ieee80211_auth_challenge(sdata, wk, mgmt, len);
- break;
- }
-
- return RX_MGMT_NONE;
-}
-
-
static enum rx_mgmt_action __must_check
ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1364,23 +952,15 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
ASSERT_MGD_MTX(ifmgd);
- if (wk)
- bssid = wk->bss->cbss.bssid;
- else
- bssid = ifmgd->associated->cbss.bssid;
+ bssid = ifmgd->associated->bssid;
reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
- sdata->dev->name, bssid, reason_code);
+ sdata->name, bssid, reason_code);
- if (!wk) {
- ieee80211_set_disassoc(sdata, true);
- ieee80211_recalc_idle(sdata->local);
- } else {
- list_del(&wk->list);
- kfree(wk);
- }
+ ieee80211_set_disassoc(sdata);
+ ieee80211_recalc_idle(sdata->local);
return RX_MGMT_CFG80211_DEAUTH;
}
@@ -1401,123 +981,72 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!ifmgd->associated))
return RX_MGMT_NONE;
- if (WARN_ON(memcmp(ifmgd->associated->cbss.bssid, mgmt->sa, ETH_ALEN)))
+ if (WARN_ON(memcmp(ifmgd->associated->bssid, mgmt->sa, ETH_ALEN)))
return RX_MGMT_NONE;
reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
- sdata->dev->name, mgmt->sa, reason_code);
+ sdata->name, mgmt->sa, reason_code);
- ieee80211_set_disassoc(sdata, false);
+ ieee80211_set_disassoc(sdata);
ieee80211_recalc_idle(sdata->local);
return RX_MGMT_CFG80211_DISASSOC;
}
-static enum rx_mgmt_action __must_check
-ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
- struct ieee80211_mgmt *mgmt, size_t len,
- bool reassoc)
+static bool ieee80211_assoc_success(struct ieee80211_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len)
{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
struct sta_info *sta;
+ struct cfg80211_bss *cbss = wk->assoc.bss;
+ u8 *pos;
u32 rates, basic_rates;
- u16 capab_info, status_code, aid;
+ u16 capab_info, aid;
struct ieee802_11_elems elems;
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
- u8 *pos;
u32 changed = 0;
- int i, j;
- bool have_higher_than_11mbit = false, newsta = false;
+ int i, j, err;
+ bool have_higher_than_11mbit = false;
u16 ap_ht_cap_flags;
- /*
- * AssocResp and ReassocResp have identical structure, so process both
- * of them in this function.
- */
+ /* AssocResp and ReassocResp have identical structure */
- if (len < 24 + 6)
- return RX_MGMT_NONE;
-
- if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
- return RX_MGMT_NONE;
-
- capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
- status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
-
- printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
- "status=%d aid=%d)\n",
- sdata->dev->name, reassoc ? "Rea" : "A", mgmt->sa,
- capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
-
- pos = mgmt->u.assoc_resp.variable;
- ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
-
- if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
- elems.timeout_int && elems.timeout_int_len == 5 &&
- elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
- u32 tu, ms;
- tu = get_unaligned_le32(elems.timeout_int + 1);
- ms = tu * 1024 / 1000;
- printk(KERN_DEBUG "%s: AP rejected association temporarily; "
- "comeback duration %u TU (%u ms)\n",
- sdata->dev->name, tu, ms);
- wk->timeout = jiffies + msecs_to_jiffies(ms);
- if (ms > IEEE80211_ASSOC_TIMEOUT)
- run_again(ifmgd, jiffies + msecs_to_jiffies(ms));
- return RX_MGMT_NONE;
- }
-
- if (status_code != WLAN_STATUS_SUCCESS) {
- printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
- sdata->dev->name, status_code);
- wk->state = IEEE80211_MGD_STATE_IDLE;
- return RX_MGMT_CFG80211_ASSOC;
- }
+ capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
printk(KERN_DEBUG "%s: invalid aid value %d; bits 15:14 not "
- "set\n", sdata->dev->name, aid);
+ "set\n", sdata->name, aid);
aid &= ~(BIT(15) | BIT(14));
+ pos = mgmt->u.assoc_resp.variable;
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
+
if (!elems.supp_rates) {
printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
- sdata->dev->name);
- return RX_MGMT_NONE;
+ sdata->name);
+ return false;
}
- printk(KERN_DEBUG "%s: associated\n", sdata->dev->name);
ifmgd->aid = aid;
- rcu_read_lock();
-
- /* Add STA entry for the AP */
- sta = sta_info_get(local, wk->bss->cbss.bssid);
+ sta = sta_info_alloc(sdata, cbss->bssid, GFP_KERNEL);
if (!sta) {
- newsta = true;
-
- rcu_read_unlock();
-
- sta = sta_info_alloc(sdata, wk->bss->cbss.bssid, GFP_KERNEL);
- if (!sta) {
- printk(KERN_DEBUG "%s: failed to alloc STA entry for"
- " the AP\n", sdata->dev->name);
- return RX_MGMT_NONE;
- }
-
- set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
- WLAN_STA_ASSOC_AP);
- if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
- set_sta_flags(sta, WLAN_STA_AUTHORIZED);
-
- rcu_read_lock();
+ printk(KERN_DEBUG "%s: failed to alloc STA entry for"
+ " the AP\n", sdata->name);
+ return false;
}
+ set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
+ WLAN_STA_ASSOC_AP);
+ if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
+ set_sta_flags(sta, WLAN_STA_AUTHORIZED);
+
rates = 0;
basic_rates = 0;
sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
@@ -1580,40 +1109,40 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (elems.wmm_param)
set_sta_flags(sta, WLAN_STA_WME);
- if (newsta) {
- int err = sta_info_insert(sta);
- if (err) {
- printk(KERN_DEBUG "%s: failed to insert STA entry for"
- " the AP (error %d)\n", sdata->dev->name, err);
- rcu_read_unlock();
- return RX_MGMT_NONE;
- }
+ err = sta_info_insert(sta);
+ sta = NULL;
+ if (err) {
+ printk(KERN_DEBUG "%s: failed to insert STA entry for"
+ " the AP (error %d)\n", sdata->name, err);
+ return false;
}
- rcu_read_unlock();
-
if (elems.wmm_param)
ieee80211_sta_wmm_params(local, ifmgd, elems.wmm_param,
elems.wmm_param_len);
else
ieee80211_set_wmm_default(sdata);
+ local->oper_channel = wk->chan;
+
if (elems.ht_info_elem && elems.wmm_param &&
- (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
+ (sdata->local->hw.queues >= 4) &&
!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
- wk->bss->cbss.bssid,
- ap_ht_cap_flags);
-
- /* delete work item -- must be before set_associated for PS */
- list_del(&wk->list);
+ cbss->bssid, ap_ht_cap_flags);
/* set AID and assoc capability,
* ieee80211_set_associated() will tell the driver */
bss_conf->aid = aid;
bss_conf->assoc_capability = capab_info;
- /* this will take ownership of wk */
- ieee80211_set_associated(sdata, wk, changed);
+ ieee80211_set_associated(sdata, cbss, changed);
+
+ /*
+ * If we're using 4-addr mode, let the AP know that we're
+ * doing so, so that it can create the STA VLAN on its side
+ */
+ if (ifmgd->use_4addr)
+ ieee80211_send_4addr_nullfunc(local, sdata);
/*
* Start timer to probe the connection to the AP now.
@@ -1622,7 +1151,7 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
mod_beacon_timer(sdata);
- return RX_MGMT_CFG80211_ASSOC;
+ return true;
}
@@ -1657,7 +1186,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
return;
if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
- (memcmp(mgmt->bssid, sdata->u.mgd.associated->cbss.bssid,
+ (memcmp(mgmt->bssid, sdata->u.mgd.associated->bssid,
ETH_ALEN) == 0)) {
struct ieee80211_channel_sw_ie *sw_elem =
(struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
@@ -1667,19 +1196,19 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgd_work *wk,
- struct ieee80211_mgmt *mgmt, size_t len,
- struct ieee80211_rx_status *rx_status)
+ struct sk_buff *skb)
{
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
struct ieee80211_if_managed *ifmgd;
- size_t baselen;
+ struct ieee80211_rx_status *rx_status = (void *) skb->cb;
+ size_t baselen, len = skb->len;
struct ieee802_11_elems elems;
ifmgd = &sdata->u.mgd;
ASSERT_MGD_MTX(ifmgd);
- if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
+ if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
return; /* ignore ProbeResp to foreign address */
baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
@@ -1691,17 +1220,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
- /* direct probe may be part of the association flow */
- if (wk && wk->state == IEEE80211_MGD_STATE_PROBE) {
- printk(KERN_DEBUG "%s: direct probe responded\n",
- sdata->dev->name);
- wk->tries = 0;
- wk->state = IEEE80211_MGD_STATE_AUTH;
- WARN_ON(ieee80211_authenticate(sdata, wk) != RX_MGMT_NONE);
- }
-
if (ifmgd->associated &&
- memcmp(mgmt->bssid, ifmgd->associated->cbss.bssid, ETH_ALEN) == 0 &&
+ memcmp(mgmt->bssid, ifmgd->associated->bssid, ETH_ALEN) == 0 &&
ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
IEEE80211_STA_CONNECTION_POLL)) {
ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
@@ -1774,7 +1294,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (!ifmgd->associated)
return;
- bssid = ifmgd->associated->cbss.bssid;
+ bssid = ifmgd->associated->bssid;
/*
* And in theory even frames from a different AP we were just
@@ -1787,7 +1307,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: cancelling probereq poll due "
- "to a received beacon\n", sdata->dev->name);
+ "to a received beacon\n", sdata->name);
}
#endif
ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
@@ -1865,7 +1385,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
- sta = sta_info_get(local, bssid);
+ sta = sta_info_get(sdata, bssid);
if (WARN_ON(!sta)) {
rcu_read_unlock();
return;
@@ -1913,9 +1433,6 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_PROBE_RESP:
case IEEE80211_STYPE_BEACON:
- case IEEE80211_STYPE_AUTH:
- case IEEE80211_STYPE_ASSOC_RESP:
- case IEEE80211_STYPE_REASSOC_RESP:
case IEEE80211_STYPE_DEAUTH:
case IEEE80211_STYPE_DISASSOC:
case IEEE80211_STYPE_ACTION:
@@ -1933,7 +1450,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
- struct ieee80211_mgd_work *wk;
enum rx_mgmt_action rma = RX_MGMT_NONE;
u16 fc;
@@ -1944,29 +1460,28 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
mutex_lock(&ifmgd->mtx);
if (ifmgd->associated &&
- memcmp(ifmgd->associated->cbss.bssid, mgmt->bssid,
- ETH_ALEN) == 0) {
+ memcmp(ifmgd->associated->bssid, mgmt->bssid, ETH_ALEN) == 0) {
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_BEACON:
ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
rx_status);
break;
case IEEE80211_STYPE_PROBE_RESP:
- ieee80211_rx_mgmt_probe_resp(sdata, NULL, mgmt,
- skb->len, rx_status);
+ ieee80211_rx_mgmt_probe_resp(sdata, skb);
break;
case IEEE80211_STYPE_DEAUTH:
- rma = ieee80211_rx_mgmt_deauth(sdata, NULL,
- mgmt, skb->len);
+ rma = ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
break;
case IEEE80211_STYPE_DISASSOC:
rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
break;
case IEEE80211_STYPE_ACTION:
- /* XXX: differentiate, can only happen for CSA now! */
+ if (mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
+ break;
+
ieee80211_sta_process_chanswitch(sdata,
&mgmt->u.action.u.chan_switch.sw_elem,
- ifmgd->associated);
+ (void *)ifmgd->associated->priv);
break;
}
mutex_unlock(&ifmgd->mtx);
@@ -1987,58 +1502,11 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
goto out;
}
- list_for_each_entry(wk, &ifmgd->work_list, list) {
- if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
- continue;
-
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_PROBE_RESP:
- ieee80211_rx_mgmt_probe_resp(sdata, wk, mgmt, skb->len,
- rx_status);
- break;
- case IEEE80211_STYPE_AUTH:
- rma = ieee80211_rx_mgmt_auth(sdata, wk, mgmt, skb->len);
- break;
- case IEEE80211_STYPE_ASSOC_RESP:
- rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
- skb->len, false);
- break;
- case IEEE80211_STYPE_REASSOC_RESP:
- rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
- skb->len, true);
- break;
- case IEEE80211_STYPE_DEAUTH:
- rma = ieee80211_rx_mgmt_deauth(sdata, wk, mgmt,
- skb->len);
- break;
- }
- /*
- * We've processed this frame for that work, so it can't
- * belong to another work struct.
- * NB: this is also required for correctness because the
- * called functions can free 'wk', and for 'rma'!
- */
- break;
- }
-
mutex_unlock(&ifmgd->mtx);
- switch (rma) {
- case RX_MGMT_NONE:
- /* no action */
- break;
- case RX_MGMT_CFG80211_AUTH:
- cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, skb->len);
- break;
- case RX_MGMT_CFG80211_ASSOC:
- cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len);
- break;
- case RX_MGMT_CFG80211_DEAUTH:
+ if (skb->len >= 24 + 2 /* mgmt + deauth reason */ &&
+ (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH)
cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len);
- break;
- default:
- WARN(1, "unexpected: %d", rma);
- }
out:
kfree_skb(skb);
@@ -2066,12 +1534,8 @@ static void ieee80211_sta_work(struct work_struct *work)
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd;
struct sk_buff *skb;
- struct ieee80211_mgd_work *wk, *tmp;
- LIST_HEAD(free_work);
- enum rx_mgmt_action rma;
- bool anybusy = false;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
return;
if (local->scanning)
@@ -2102,7 +1566,7 @@ static void ieee80211_sta_work(struct work_struct *work)
ifmgd->associated) {
u8 bssid[ETH_ALEN];
- memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN);
+ memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
if (time_is_after_jiffies(ifmgd->probe_timeout))
run_again(ifmgd, ifmgd->probe_timeout);
@@ -2124,7 +1588,7 @@ static void ieee80211_sta_work(struct work_struct *work)
printk(KERN_DEBUG "No probe response from AP %pM"
" after %dms, disconnecting.\n",
bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
- ieee80211_set_disassoc(sdata, true);
+ ieee80211_set_disassoc(sdata);
ieee80211_recalc_idle(local);
mutex_unlock(&ifmgd->mtx);
/*
@@ -2139,87 +1603,7 @@ static void ieee80211_sta_work(struct work_struct *work)
}
}
-
- ieee80211_recalc_idle(local);
-
- list_for_each_entry_safe(wk, tmp, &ifmgd->work_list, list) {
- if (time_is_after_jiffies(wk->timeout)) {
- /*
- * This work item isn't supposed to be worked on
- * right now, but take care to adjust the timer
- * properly.
- */
- run_again(ifmgd, wk->timeout);
- continue;
- }
-
- switch (wk->state) {
- default:
- WARN_ON(1);
- /* fall through */
- case IEEE80211_MGD_STATE_IDLE:
- /* nothing */
- rma = RX_MGMT_NONE;
- break;
- case IEEE80211_MGD_STATE_PROBE:
- rma = ieee80211_direct_probe(sdata, wk);
- break;
- case IEEE80211_MGD_STATE_AUTH:
- rma = ieee80211_authenticate(sdata, wk);
- break;
- case IEEE80211_MGD_STATE_ASSOC:
- rma = ieee80211_associate(sdata, wk);
- break;
- }
-
- switch (rma) {
- case RX_MGMT_NONE:
- /* no action required */
- break;
- case RX_MGMT_CFG80211_AUTH_TO:
- case RX_MGMT_CFG80211_ASSOC_TO:
- list_del(&wk->list);
- list_add(&wk->list, &free_work);
- wk->tries = rma; /* small abuse but only local */
- break;
- default:
- WARN(1, "unexpected: %d", rma);
- }
- }
-
- list_for_each_entry(wk, &ifmgd->work_list, list) {
- if (wk->state != IEEE80211_MGD_STATE_IDLE) {
- anybusy = true;
- break;
- }
- }
- if (!anybusy &&
- test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request))
- ieee80211_queue_delayed_work(&local->hw,
- &local->scan_work,
- round_jiffies_relative(0));
-
mutex_unlock(&ifmgd->mtx);
-
- list_for_each_entry_safe(wk, tmp, &free_work, list) {
- switch (wk->tries) {
- case RX_MGMT_CFG80211_AUTH_TO:
- cfg80211_send_auth_timeout(sdata->dev,
- wk->bss->cbss.bssid);
- break;
- case RX_MGMT_CFG80211_ASSOC_TO:
- cfg80211_send_assoc_timeout(sdata->dev,
- wk->bss->cbss.bssid);
- break;
- default:
- WARN(1, "unexpected: %d", wk->tries);
- }
-
- list_del(&wk->list);
- kfree(wk);
- }
-
- ieee80211_recalc_idle(local);
}
static void ieee80211_sta_bcn_mon_timer(unsigned long data)
@@ -2328,14 +1712,14 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
(unsigned long) sdata);
skb_queue_head_init(&ifmgd->skb_queue);
- INIT_LIST_HEAD(&ifmgd->work_list);
-
- ifmgd->capab = WLAN_CAPABILITY_ESS;
ifmgd->flags = 0;
- if (sdata->local->hw.queues >= 4)
- ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
mutex_init(&ifmgd->mtx);
+
+ if (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS)
+ ifmgd->req_smps = IEEE80211_SMPS_AUTOMATIC;
+ else
+ ifmgd->req_smps = IEEE80211_SMPS_OFF;
}
/* scan finished notification */
@@ -2366,12 +1750,34 @@ int ieee80211_max_network_latency(struct notifier_block *nb,
}
/* config hooks */
+static enum work_done_result
+ieee80211_probe_auth_done(struct ieee80211_work *wk,
+ struct sk_buff *skb)
+{
+ if (!skb) {
+ cfg80211_send_auth_timeout(wk->sdata->dev, wk->filter_ta);
+ return WORK_DONE_DESTROY;
+ }
+
+ if (wk->type == IEEE80211_WORK_AUTH) {
+ cfg80211_send_rx_auth(wk->sdata->dev, skb->data, skb->len);
+ return WORK_DONE_DESTROY;
+ }
+
+ mutex_lock(&wk->sdata->u.mgd.mtx);
+ ieee80211_rx_mgmt_probe_resp(wk->sdata, skb);
+ mutex_unlock(&wk->sdata->u.mgd.mtx);
+
+ wk->type = IEEE80211_WORK_AUTH;
+ wk->probe_auth.tries = 0;
+ return WORK_DONE_REQUEUE;
+}
+
int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
struct cfg80211_auth_request *req)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
const u8 *ssid;
- struct ieee80211_mgd_work *wk;
+ struct ieee80211_work *wk;
u16 auth_alg;
switch (req->auth_type) {
@@ -2395,7 +1801,7 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
if (!wk)
return -ENOMEM;
- wk->bss = (void *)req->bss;
+ memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
if (req->ie && req->ie_len) {
memcpy(wk->ie, req->ie, req->ie_len);
@@ -2403,66 +1809,76 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
}
if (req->key && req->key_len) {
- wk->key_len = req->key_len;
- wk->key_idx = req->key_idx;
- memcpy(wk->key, req->key, req->key_len);
+ wk->probe_auth.key_len = req->key_len;
+ wk->probe_auth.key_idx = req->key_idx;
+ memcpy(wk->probe_auth.key, req->key, req->key_len);
}
ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
- memcpy(wk->ssid, ssid + 2, ssid[1]);
- wk->ssid_len = ssid[1];
+ memcpy(wk->probe_auth.ssid, ssid + 2, ssid[1]);
+ wk->probe_auth.ssid_len = ssid[1];
- wk->state = IEEE80211_MGD_STATE_PROBE;
- wk->auth_alg = auth_alg;
- wk->timeout = jiffies; /* run right away */
+ wk->probe_auth.algorithm = auth_alg;
+ wk->probe_auth.privacy = req->bss->capability & WLAN_CAPABILITY_PRIVACY;
- /*
- * XXX: if still associated need to tell AP that we're going
- * to sleep and then change channel etc.
- */
- sdata->local->oper_channel = req->bss->channel;
- ieee80211_hw_config(sdata->local, 0);
-
- mutex_lock(&ifmgd->mtx);
- list_add(&wk->list, &sdata->u.mgd.work_list);
- mutex_unlock(&ifmgd->mtx);
+ wk->type = IEEE80211_WORK_DIRECT_PROBE;
+ wk->chan = req->bss->channel;
+ wk->sdata = sdata;
+ wk->done = ieee80211_probe_auth_done;
- ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work);
+ ieee80211_add_work(wk);
return 0;
}
-int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_assoc_request *req)
+static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk,
+ struct sk_buff *skb)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_mgd_work *wk, *found = NULL;
- int i, err;
+ struct ieee80211_mgmt *mgmt;
+ u16 status;
- mutex_lock(&ifmgd->mtx);
+ if (!skb) {
+ cfg80211_send_assoc_timeout(wk->sdata->dev, wk->filter_ta);
+ return WORK_DONE_DESTROY;
+ }
- list_for_each_entry(wk, &ifmgd->work_list, list) {
- if (&wk->bss->cbss == req->bss &&
- wk->state == IEEE80211_MGD_STATE_IDLE) {
- found = wk;
- break;
+ mgmt = (void *)skb->data;
+ status = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+
+ if (status == WLAN_STATUS_SUCCESS) {
+ mutex_lock(&wk->sdata->u.mgd.mtx);
+ if (!ieee80211_assoc_success(wk, mgmt, skb->len)) {
+ mutex_unlock(&wk->sdata->u.mgd.mtx);
+ /* oops -- internal error -- send timeout for now */
+ cfg80211_send_assoc_timeout(wk->sdata->dev,
+ wk->filter_ta);
+ return WORK_DONE_DESTROY;
}
+ mutex_unlock(&wk->sdata->u.mgd.mtx);
}
- if (!found) {
- err = -ENOLINK;
- goto out;
- }
+ cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len);
+ return WORK_DONE_DESTROY;
+}
- list_del(&found->list);
+int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_assoc_request *req)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_bss *bss = (void *)req->bss->priv;
+ struct ieee80211_work *wk;
+ const u8 *ssid;
+ int i;
- wk = krealloc(found, sizeof(*wk) + req->ie_len, GFP_KERNEL);
- if (!wk) {
- list_add(&found->list, &ifmgd->work_list);
- err = -ENOMEM;
- goto out;
+ mutex_lock(&ifmgd->mtx);
+ if (ifmgd->associated) {
+ mutex_unlock(&ifmgd->mtx);
+ return -EALREADY;
}
+ mutex_unlock(&ifmgd->mtx);
- list_add(&wk->list, &ifmgd->work_list);
+ wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL);
+ if (!wk)
+ return -ENOMEM;
ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
@@ -2472,8 +1888,6 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
- sdata->local->oper_channel = req->bss->channel;
- ieee80211_hw_config(sdata->local, 0);
if (req->ie && req->ie_len) {
memcpy(wk->ie, req->ie, req->ie_len);
@@ -2481,12 +1895,55 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
} else
wk->ie_len = 0;
+ wk->assoc.bss = req->bss;
+
+ memcpy(wk->filter_ta, req->bss->bssid, ETH_ALEN);
+
+ /* new association always uses requested smps mode */
+ if (ifmgd->req_smps == IEEE80211_SMPS_AUTOMATIC) {
+ if (ifmgd->powersave)
+ ifmgd->ap_smps = IEEE80211_SMPS_DYNAMIC;
+ else
+ ifmgd->ap_smps = IEEE80211_SMPS_OFF;
+ } else
+ ifmgd->ap_smps = ifmgd->req_smps;
+
+ wk->assoc.smps = ifmgd->ap_smps;
+ /*
+ * IEEE802.11n does not allow TKIP/WEP as pairwise ciphers in HT mode.
+ * We still associate in non-HT mode (11a/b/g) if any one of these
+ * ciphers is configured as pairwise.
+ * We can set this to true for non-11n hardware, that'll be checked
+ * separately along with the peer capabilities.
+ */
+ wk->assoc.use_11n = !(ifmgd->flags & IEEE80211_STA_DISABLE_11N);
+ wk->assoc.capability = req->bss->capability;
+ wk->assoc.wmm_used = bss->wmm_used;
+ wk->assoc.supp_rates = bss->supp_rates;
+ wk->assoc.supp_rates_len = bss->supp_rates_len;
+ wk->assoc.ht_information_ie =
+ ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION);
+
+ if (bss->wmm_used && bss->uapsd_supported &&
+ (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
+ wk->assoc.uapsd_used = true;
+ ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED;
+ } else {
+ wk->assoc.uapsd_used = false;
+ ifmgd->flags &= ~IEEE80211_STA_UAPSD_ENABLED;
+ }
+
+ ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
+ memcpy(wk->assoc.ssid, ssid + 2, ssid[1]);
+ wk->assoc.ssid_len = ssid[1];
+
if (req->prev_bssid)
- memcpy(wk->prev_bssid, req->prev_bssid, ETH_ALEN);
+ memcpy(wk->assoc.prev_bssid, req->prev_bssid, ETH_ALEN);
- wk->state = IEEE80211_MGD_STATE_ASSOC;
- wk->tries = 0;
- wk->timeout = jiffies; /* run right away */
+ wk->type = IEEE80211_WORK_ASSOC;
+ wk->chan = req->bss->channel;
+ wk->sdata = sdata;
+ wk->done = ieee80211_assoc_done;
if (req->use_mfp) {
ifmgd->mfp = IEEE80211_MFP_REQUIRED;
@@ -2501,69 +1958,59 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
else
ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT;
- ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work);
-
- err = 0;
-
- out:
- mutex_unlock(&ifmgd->mtx);
- return err;
+ ieee80211_add_work(wk);
+ return 0;
}
int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
struct cfg80211_deauth_request *req,
void *cookie)
{
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_mgd_work *wk;
- const u8 *bssid = NULL;
- bool not_auth_yet = false;
+ struct ieee80211_work *wk;
+ const u8 *bssid = req->bss->bssid;
mutex_lock(&ifmgd->mtx);
- if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) {
+ if (ifmgd->associated == req->bss) {
bssid = req->bss->bssid;
- ieee80211_set_disassoc(sdata, true);
- } else list_for_each_entry(wk, &ifmgd->work_list, list) {
- if (&wk->bss->cbss == req->bss) {
- bssid = req->bss->bssid;
- if (wk->state == IEEE80211_MGD_STATE_PROBE)
- not_auth_yet = true;
+ ieee80211_set_disassoc(sdata);
+ mutex_unlock(&ifmgd->mtx);
+ } else {
+ bool not_auth_yet = false;
+
+ mutex_unlock(&ifmgd->mtx);
+
+ mutex_lock(&local->work_mtx);
+ list_for_each_entry(wk, &local->work_list, list) {
+ if (wk->type != IEEE80211_WORK_DIRECT_PROBE)
+ continue;
+ if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
+ continue;
+ not_auth_yet = true;
list_del(&wk->list);
- kfree(wk);
+ free_work(wk);
break;
}
- }
-
- /*
- * If somebody requests authentication and we haven't
- * sent out an auth frame yet there's no need to send
- * out a deauth frame either. If the state was PROBE,
- * then this is the case. If it's AUTH we have sent a
- * frame, and if it's IDLE we have completed the auth
- * process already.
- */
- if (not_auth_yet) {
- mutex_unlock(&ifmgd->mtx);
- __cfg80211_auth_canceled(sdata->dev, bssid);
- return 0;
- }
+ mutex_unlock(&local->work_mtx);
- /*
- * cfg80211 should catch this ... but it's racy since
- * we can receive a deauth frame, process it, hand it
- * to cfg80211 while that's in a locked section already
- * trying to tell us that the user wants to disconnect.
- */
- if (!bssid) {
- mutex_unlock(&ifmgd->mtx);
- return -ENOLINK;
+ /*
+ * If somebody requests authentication and we haven't
+ * sent out an auth frame yet there's no need to send
+ * out a deauth frame either. If the state was PROBE,
+ * then this is the case. If it's AUTH we have sent a
+ * frame, and if it's IDLE we have completed the auth
+ * process already.
+ */
+ if (not_auth_yet) {
+ __cfg80211_auth_canceled(sdata->dev, bssid);
+ return 0;
+ }
}
- mutex_unlock(&ifmgd->mtx);
-
printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
- sdata->dev->name, bssid, req->reason_code);
+ sdata->name, bssid, req->reason_code);
ieee80211_send_deauth_disassoc(sdata, bssid,
IEEE80211_STYPE_DEAUTH, req->reason_code,
@@ -2588,15 +2035,15 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
* to cfg80211 while that's in a locked section already
* trying to tell us that the user wants to disconnect.
*/
- if (&ifmgd->associated->cbss != req->bss) {
+ if (ifmgd->associated != req->bss) {
mutex_unlock(&ifmgd->mtx);
return -ENOLINK;
}
printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
- sdata->dev->name, req->bss->bssid, req->reason_code);
+ sdata->name, req->bss->bssid, req->reason_code);
- ieee80211_set_disassoc(sdata, false);
+ ieee80211_set_disassoc(sdata);
mutex_unlock(&ifmgd->mtx);
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
new file mode 100644
index 000000000000..1facfeb1f79b
--- /dev/null
+++ b/net/mac80211/offchannel.c
@@ -0,0 +1,168 @@
+/*
+ * Off-channel operation helpers
+ *
+ * Copyright 2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright 2004, Instant802 Networks, Inc.
+ * Copyright 2005, Devicescape Software, Inc.
+ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+
+/*
+ * inform AP that we will go to sleep so that it will buffer the frames
+ * while we scan
+ */
+static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ local->offchannel_ps_enabled = false;
+
+ /* FIXME: what to do when local->pspolling is true? */
+
+ del_timer_sync(&local->dynamic_ps_timer);
+ cancel_work_sync(&local->dynamic_ps_enable_work);
+
+ if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+ local->offchannel_ps_enabled = true;
+ local->hw.conf.flags &= ~IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ }
+
+ if (!(local->offchannel_ps_enabled) ||
+ !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
+ /*
+ * If power save was enabled, no need to send a nullfunc
+ * frame because AP knows that we are sleeping. But if the
+ * hardware is creating the nullfunc frame for power save
+ * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
+ * enabled) and power save was enabled, the firmware just
+ * sent a null frame with power save disabled. So we need
+ * to send a new nullfunc frame to inform the AP that we
+ * are again sleeping.
+ */
+ ieee80211_send_nullfunc(local, sdata, 1);
+}
+
+/* inform AP that we are awake again, unless power save is enabled */
+static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (!local->ps_sdata)
+ ieee80211_send_nullfunc(local, sdata, 0);
+ else if (local->offchannel_ps_enabled) {
+ /*
+ * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
+ * will send a nullfunc frame with the powersave bit set
+ * even though the AP already knows that we are sleeping.
+ * This could be avoided by sending a null frame with power
+ * save bit disabled before enabling the power save, but
+ * this doesn't gain anything.
+ *
+ * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
+ * to send a nullfunc frame because AP already knows that
+ * we are sleeping, let's just enable power save mode in
+ * hardware.
+ */
+ local->hw.conf.flags |= IEEE80211_CONF_PS;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+ } else if (local->hw.conf.dynamic_ps_timeout > 0) {
+ /*
+ * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
+ * had been running before leaving the operating channel,
+ * restart the timer now and send a nullfunc frame to inform
+ * the AP that we are awake.
+ */
+ ieee80211_send_nullfunc(local, sdata, 0);
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
+ }
+}
+
+void ieee80211_offchannel_stop_beaconing(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+ /* disable beaconing */
+ if (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
+ ieee80211_bss_info_change_notify(
+ sdata, BSS_CHANGED_BEACON_ENABLED);
+
+ /*
+ * only handle non-STA interfaces here, STA interfaces
+ * are handled in ieee80211_offchannel_stop_station(),
+ * e.g., from the background scan state machine.
+ *
+ * In addition, do not stop monitor interface to allow it to be
+ * used from user space controlled off-channel operations.
+ */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+ sdata->vif.type != NL80211_IFTYPE_MONITOR)
+ netif_tx_stop_all_queues(sdata->dev);
+ }
+ mutex_unlock(&local->iflist_mtx);
+}
+
+void ieee80211_offchannel_stop_station(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ /*
+ * notify the AP about us leaving the channel and stop all STA interfaces
+ */
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ netif_tx_stop_all_queues(sdata->dev);
+ if (sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_enable(sdata);
+ }
+ }
+ mutex_unlock(&local->iflist_mtx);
+}
+
+void ieee80211_offchannel_return(struct ieee80211_local *local,
+ bool enable_beaconing)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+ /* Tell AP we're back */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ if (sdata->u.mgd.associated)
+ ieee80211_offchannel_ps_disable(sdata);
+ netif_tx_wake_all_queues(sdata->dev);
+ }
+
+ /* re-enable beaconing */
+ if (enable_beaconing &&
+ (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT))
+ ieee80211_bss_info_change_notify(
+ sdata, BSS_CHANGED_BEACON_ENABLED);
+ }
+ mutex_unlock(&local->iflist_mtx);
+}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index e535f1c988fe..47f818959ad7 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -10,7 +10,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_if_init_conf conf;
struct sta_info *sta;
unsigned long flags;
@@ -65,7 +64,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
struct ieee80211_sub_if_data,
u.ap);
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
+ drv_sta_notify(local, sdata, STA_NOTIFY_REMOVE,
&sta->sta);
}
@@ -93,17 +92,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
break;
}
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
/* disable beaconing */
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_BEACON_ENABLED);
- conf.vif = &sdata->vif;
- conf.type = sdata->vif.type;
- conf.mac_addr = sdata->dev->dev_addr;
- drv_remove_interface(local, &conf);
+ drv_remove_interface(local, &sdata->vif);
}
/* stop hardware - this must stop RX */
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index b9007f80cb92..c74b7c85403c 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -207,6 +207,27 @@ static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc));
}
+static void rc_send_low_broadcast(s8 *idx, u32 basic_rates, u8 max_rate_idx)
+{
+ u8 i;
+
+ if (basic_rates == 0)
+ return; /* assume basic rates unknown and accept rate */
+ if (*idx < 0)
+ return;
+ if (basic_rates & (1 << *idx))
+ return; /* selected rate is a basic rate */
+
+ for (i = *idx + 1; i <= max_rate_idx; i++) {
+ if (basic_rates & (1 << i)) {
+ *idx = i;
+ return;
+ }
+ }
+
+ /* could not find a basic rate; use original selection */
+}
+
bool rate_control_send_low(struct ieee80211_sta *sta,
void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
@@ -218,12 +239,48 @@ bool rate_control_send_low(struct ieee80211_sta *sta,
info->control.rates[0].count =
(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
1 : txrc->hw->max_rate_tries;
+ if (!sta && txrc->ap)
+ rc_send_low_broadcast(&info->control.rates[0].idx,
+ txrc->bss_conf->basic_rates,
+ txrc->sband->n_bitrates);
return true;
}
return false;
}
EXPORT_SYMBOL(rate_control_send_low);
+static void rate_idx_match_mask(struct ieee80211_tx_rate *rate,
+ int n_bitrates, u32 mask)
+{
+ int j;
+
+ /* See whether the selected rate or anything below it is allowed. */
+ for (j = rate->idx; j >= 0; j--) {
+ if (mask & (1 << j)) {
+ /* Okay, found a suitable rate. Use it. */
+ rate->idx = j;
+ return;
+ }
+ }
+
+ /* Try to find a higher rate that would be allowed */
+ for (j = rate->idx + 1; j < n_bitrates; j++) {
+ if (mask & (1 << j)) {
+ /* Okay, found a suitable rate. Use it. */
+ rate->idx = j;
+ return;
+ }
+ }
+
+ /*
+ * Uh.. No suitable rate exists. This should not really happen with
+ * sane TX rate mask configurations. However, should someone manage to
+ * configure supported rates and TX rate mask in incompatible way,
+ * allow the frame to be transmitted with whatever the rate control
+ * selected.
+ */
+}
+
void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_tx_rate_control *txrc)
@@ -233,6 +290,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *ista = NULL;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
int i;
+ u32 mask;
if (sta) {
ista = &sta->sta;
@@ -245,23 +303,31 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
info->control.rates[i].count = 1;
}
- if (sta && sdata->force_unicast_rateidx > -1) {
- info->control.rates[0].idx = sdata->force_unicast_rateidx;
- } else {
- ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
- info->flags |= IEEE80211_TX_INTFL_RCALGO;
- }
+ ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
/*
- * try to enforce the maximum rate the user wanted
+ * Try to enforce the rateidx mask the user wanted. skip this if the
+ * default mask (allow all rates) is used to save some processing for
+ * the common case.
*/
- if (sdata->max_ratectrl_rateidx > -1)
+ mask = sdata->rc_rateidx_mask[info->band];
+ if (mask != (1 << txrc->sband->n_bitrates) - 1) {
+ if (sta) {
+ /* Filter out rates that the STA does not support */
+ mask &= sta->sta.supp_rates[info->band];
+ }
+ /*
+ * Make sure the rate index selected for each TX rate is
+ * included in the configured mask and change the rate indexes
+ * if needed.
+ */
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+ /* Rate masking supports only legacy rates for now */
if (info->control.rates[i].flags & IEEE80211_TX_RC_MCS)
continue;
- info->control.rates[i].idx =
- min_t(s8, info->control.rates[i].idx,
- sdata->max_ratectrl_rateidx);
+ rate_idx_match_mask(&info->control.rates[i],
+ txrc->sband->n_bitrates, mask);
+ }
}
BUG_ON(info->control.rates[0].idx < 0);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index cb9bd1f65e27..669dddd40521 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -44,10 +44,7 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
struct rate_control_ref *ref = local->rate_ctrl;
struct ieee80211_sta *ista = &sta->sta;
void *priv_sta = sta->rate_ctrl_priv;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- if (likely(info->flags & IEEE80211_TX_INTFL_RCALGO))
- ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
+ ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 9f2807aeaf52..a8e15b84c05b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -283,15 +283,15 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
skb->protocol = htons(ETH_P_802_2);
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
-
if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
continue;
if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
continue;
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
if (prev_dev) {
skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2) {
@@ -361,7 +361,9 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
* boundary. In the case of regular frames, this simply means aligning the
* payload to a four-byte boundary (because either the IP header is directly
* contained, or IV/RFC1042 headers that have a length divisible by four are
- * in front of it).
+ * in front of it). If the payload data is not properly aligned and the
+ * architecture doesn't support efficient unaligned operations, mac80211
+ * will align the data.
*
* With A-MSDU frames, however, the payload data address must yield two modulo
* four because there are 14-byte 802.3 headers within the A-MSDU frames that
@@ -375,25 +377,10 @@ static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
*/
static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
- int hdrlen;
-
-#ifndef CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT
- return;
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ WARN_ONCE((unsigned long)rx->skb->data & 1,
+ "unaligned packet at 0x%p\n", rx->skb->data);
#endif
-
- if (WARN_ONCE((unsigned long)rx->skb->data & 1,
- "unaligned packet at 0x%p\n", rx->skb->data))
- return;
-
- if (!ieee80211_is_data_present(hdr->frame_control))
- return;
-
- hdrlen = ieee80211_hdrlen(hdr->frame_control);
- if (rx->flags & IEEE80211_RX_AMSDU)
- hdrlen += ETH_HLEN;
- WARN_ONCE(((unsigned long)(rx->skb->data + hdrlen)) & 3,
- "unaligned IP payload at 0x%p\n", rx->skb->data + hdrlen);
}
@@ -476,7 +463,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
- char *dev_addr = rx->sdata->dev->dev_addr;
+ char *dev_addr = rx->sdata->vif.addr;
if (ieee80211_is_data(hdr->frame_control)) {
if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1021,10 +1008,10 @@ static void ap_sta_ps_start(struct sta_info *sta)
atomic_inc(&sdata->bss->num_sta_ps);
set_sta_flags(sta, WLAN_STA_PS_STA);
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
+ drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
- sdata->dev->name, sta->sta.addr, sta->sta.aid);
+ sdata->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
@@ -1038,13 +1025,13 @@ static void ap_sta_ps_end(struct sta_info *sta)
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d exits power save mode\n",
- sdata->dev->name, sta->sta.addr, sta->sta.aid);
+ sdata->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
if (test_sta_flags(sta, WLAN_STA_PS_DRIVER)) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d driver-ps-blocked\n",
- sdata->dev->name, sta->sta.addr, sta->sta.aid);
+ sdata->name, sta->sta.addr, sta->sta.aid);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
return;
}
@@ -1124,6 +1111,18 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
if (ieee80211_is_nullfunc(hdr->frame_control) ||
ieee80211_is_qos_nullfunc(hdr->frame_control)) {
I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
+
+ /*
+ * If we receive a 4-addr nullfunc frame from a STA
+ * that was not moved to a 4-addr STA vlan yet, drop
+ * the frame to the monitor interface, to make sure
+ * that hostapd sees it
+ */
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
+ (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ !rx->sdata->u.vlan.sta)))
+ return RX_DROP_MONITOR;
/*
* Update counter and free packet here to avoid
* counting this as a dropped packed.
@@ -1156,7 +1155,7 @@ ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: RX reassembly removed oldest "
"fragment entry (idx=%d age=%lu seq=%d last_frag=%d "
"addr1=%pM addr2=%pM\n",
- sdata->dev->name, idx,
+ sdata->name, idx,
jiffies - entry->first_frag_time, entry->seq,
entry->last_frag, hdr->addr1, hdr->addr2);
#endif
@@ -1424,7 +1423,6 @@ static int
__ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
- struct net_device *dev = sdata->dev;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
if (ieee80211_has_a4(hdr->frame_control) &&
@@ -1436,7 +1434,7 @@ __ieee80211_data_to_8023(struct ieee80211_rx_data *rx)
(sdata->vif.type == NL80211_IFTYPE_STATION && sdata->u.mgd.use_4addr)))
return -1;
- return ieee80211_data_to_8023(rx->skb, dev->dev_addr, sdata->vif.type);
+ return ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
}
/*
@@ -1453,7 +1451,7 @@ static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
* of whether the frame was encrypted or not.
*/
if (ehdr->h_proto == htons(ETH_P_PAE) &&
- (compare_ether_addr(ehdr->h_dest, rx->sdata->dev->dev_addr) == 0 ||
+ (compare_ether_addr(ehdr->h_dest, rx->sdata->vif.addr) == 0 ||
compare_ether_addr(ehdr->h_dest, pae_group_addr) == 0))
return true;
@@ -1472,7 +1470,6 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct net_device *dev = sdata->dev;
- struct ieee80211_local *local = rx->local;
struct sk_buff *skb, *xmit_skb;
struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
struct sta_info *dsta;
@@ -1495,8 +1492,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
printk(KERN_DEBUG "%s: failed to clone "
"multicast frame\n", dev->name);
} else {
- dsta = sta_info_get(local, skb->data);
- if (dsta && dsta->sdata->dev == dev) {
+ dsta = sta_info_get(sdata, skb->data);
+ if (dsta) {
/*
* The destination station is associated to
* this AP (in this VLAN), so send the frame
@@ -1512,7 +1509,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
if (skb) {
int align __maybe_unused;
-#if defined(CONFIG_MAC80211_DEBUG_PACKET_ALIGNMENT) || !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
/*
* 'align' will only take the values 0 or 2 here
* since all frames are required to be aligned
@@ -1556,16 +1553,10 @@ static ieee80211_rx_result debug_noinline
ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
{
struct net_device *dev = rx->sdata->dev;
- struct ieee80211_local *local = rx->local;
- u16 ethertype;
- u8 *payload;
- struct sk_buff *skb = rx->skb, *frame = NULL;
+ struct sk_buff *skb = rx->skb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
- const struct ethhdr *eth;
- int remaining, err;
- u8 dst[ETH_ALEN];
- u8 src[ETH_ALEN];
+ struct sk_buff_head frame_list;
if (unlikely(!ieee80211_is_data(fc)))
return RX_CONTINUE;
@@ -1576,94 +1567,34 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
if (!(rx->flags & IEEE80211_RX_AMSDU))
return RX_CONTINUE;
- err = __ieee80211_data_to_8023(rx);
- if (unlikely(err))
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ !rx->sdata->u.vlan.sta)
return RX_DROP_UNUSABLE;
- skb->dev = dev;
-
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += skb->len;
-
- /* skip the wrapping header */
- eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
- if (!eth)
+ if (is_multicast_ether_addr(hdr->addr1) &&
+ ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
+ rx->sdata->u.vlan.sta) ||
+ (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
+ rx->sdata->u.mgd.use_4addr)))
return RX_DROP_UNUSABLE;
- while (skb != frame) {
- u8 padding;
- __be16 len = eth->h_proto;
- unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
-
- remaining = skb->len;
- memcpy(dst, eth->h_dest, ETH_ALEN);
- memcpy(src, eth->h_source, ETH_ALEN);
-
- padding = ((4 - subframe_len) & 0x3);
- /* the last MSDU has no padding */
- if (subframe_len > remaining)
- return RX_DROP_UNUSABLE;
+ skb->dev = dev;
+ __skb_queue_head_init(&frame_list);
- skb_pull(skb, sizeof(struct ethhdr));
- /* if last subframe reuse skb */
- if (remaining <= subframe_len + padding)
- frame = skb;
- else {
- /*
- * Allocate and reserve two bytes more for payload
- * alignment since sizeof(struct ethhdr) is 14.
- */
- frame = dev_alloc_skb(
- ALIGN(local->hw.extra_tx_headroom, 4) +
- subframe_len + 2);
-
- if (frame == NULL)
- return RX_DROP_UNUSABLE;
-
- skb_reserve(frame,
- ALIGN(local->hw.extra_tx_headroom, 4) +
- sizeof(struct ethhdr) + 2);
- memcpy(skb_put(frame, ntohs(len)), skb->data,
- ntohs(len));
-
- eth = (struct ethhdr *) skb_pull(skb, ntohs(len) +
- padding);
- if (!eth) {
- dev_kfree_skb(frame);
- return RX_DROP_UNUSABLE;
- }
- }
+ ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
+ rx->sdata->vif.type,
+ rx->local->hw.extra_tx_headroom);
- skb_reset_network_header(frame);
- frame->dev = dev;
- frame->priority = skb->priority;
- rx->skb = frame;
-
- payload = frame->data;
- ethertype = (payload[6] << 8) | payload[7];
-
- if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
- ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
- compare_ether_addr(payload,
- bridge_tunnel_header) == 0)) {
- /* remove RFC1042 or Bridge-Tunnel
- * encapsulation and replace EtherType */
- skb_pull(frame, 6);
- memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
- } else {
- memcpy(skb_push(frame, sizeof(__be16)),
- &len, sizeof(__be16));
- memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
- memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
- }
+ while (!skb_queue_empty(&frame_list)) {
+ rx->skb = __skb_dequeue(&frame_list);
if (!ieee80211_frame_allowed(rx, fc)) {
- if (skb == frame) /* last frame */
- return RX_DROP_UNUSABLE;
- dev_kfree_skb(frame);
+ dev_kfree_skb(rx->skb);
continue;
}
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += rx->skb->len;
ieee80211_deliver_skb(rx);
}
@@ -1721,7 +1652,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
/* Frame has reached destination. Don't forward */
if (!is_multicast_ether_addr(hdr->addr1) &&
- compare_ether_addr(sdata->dev->dev_addr, hdr->addr3) == 0)
+ compare_ether_addr(sdata->vif.addr, hdr->addr3) == 0)
return RX_CONTINUE;
mesh_hdr->ttl--;
@@ -1738,15 +1669,17 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!fwd_skb && net_ratelimit())
printk(KERN_DEBUG "%s: failed to clone mesh frame\n",
- sdata->dev->name);
+ sdata->name);
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
- memcpy(fwd_hdr->addr2, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
info = IEEE80211_SKB_CB(fwd_skb);
memset(info, 0, sizeof(*info));
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
- ieee80211_select_queue(local, fwd_skb);
+ skb_set_queue_mapping(skb,
+ ieee80211_select_queue(rx->sdata, fwd_skb));
+ ieee80211_set_qos_hdr(local, skb);
if (is_multicast_ether_addr(fwd_hdr->addr1))
IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
fwded_mcast);
@@ -1870,7 +1803,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb;
struct ieee80211_mgmt *resp;
- if (compare_ether_addr(mgmt->da, sdata->dev->dev_addr) != 0) {
+ if (compare_ether_addr(mgmt->da, sdata->vif.addr) != 0) {
/* Not to own unicast address */
return;
}
@@ -1894,7 +1827,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(resp, 0, 24);
memcpy(resp->da, mgmt->sa, ETH_ALEN);
- memcpy(resp->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
@@ -2013,6 +1946,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
}
break;
default:
+ /* do not process rejected action frames */
+ if (mgmt->u.action.category & 0x80)
+ return RX_DROP_MONITOR;
+
return RX_CONTINUE;
}
@@ -2026,6 +1963,7 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
+ ieee80211_rx_result rxs;
if (!(rx->flags & IEEE80211_RX_RA_MATCH))
return RX_DROP_MONITOR;
@@ -2033,6 +1971,10 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
if (ieee80211_drop_unencrypted(rx, mgmt->frame_control))
return RX_DROP_MONITOR;
+ rxs = ieee80211_work_rx_mgmt(rx->sdata, rx->skb);
+ if (rxs != RX_CONTINUE)
+ return rxs;
+
if (ieee80211_vif_is_mesh(&sdata->vif))
return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
@@ -2137,7 +2079,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
skb->protocol = htons(ETH_P_802_2);
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
@@ -2274,7 +2216,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
if (!bssid && !sdata->u.mgd.use_4addr)
return 0;
if (!multicast &&
- compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) {
+ compare_ether_addr(sdata->vif.addr, hdr->addr1) != 0) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2291,7 +2233,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
return 0;
rx->flags &= ~IEEE80211_RX_RA_MATCH;
} else if (!multicast &&
- compare_ether_addr(sdata->dev->dev_addr,
+ compare_ether_addr(sdata->vif.addr,
hdr->addr1) != 0) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
@@ -2308,7 +2250,7 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
break;
case NL80211_IFTYPE_MESH_POINT:
if (!multicast &&
- compare_ether_addr(sdata->dev->dev_addr,
+ compare_ether_addr(sdata->vif.addr,
hdr->addr1) != 0) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
@@ -2319,11 +2261,11 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_AP:
if (!bssid) {
- if (compare_ether_addr(sdata->dev->dev_addr,
+ if (compare_ether_addr(sdata->vif.addr,
hdr->addr1))
return 0;
} else if (!ieee80211_bssid_match(bssid,
- sdata->dev->dev_addr)) {
+ sdata->vif.addr)) {
if (!(rx->flags & IEEE80211_RX_IN_SCAN))
return 0;
rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2362,6 +2304,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
int prepares;
struct ieee80211_sub_if_data *prev = NULL;
struct sk_buff *skb_new;
+ struct sta_info *sta, *tmp;
+ bool found_sta = false;
hdr = (struct ieee80211_hdr *)skb->data;
memset(&rx, 0, sizeof(rx));
@@ -2378,68 +2322,76 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
ieee80211_parse_qos(&rx);
ieee80211_verify_alignment(&rx);
- rx.sta = sta_info_get(local, hdr->addr2);
- if (rx.sta)
- rx.sdata = rx.sta->sdata;
-
- if (rx.sdata && ieee80211_is_data(hdr->frame_control)) {
- rx.flags |= IEEE80211_RX_RA_MATCH;
- prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
- if (prepares) {
- if (status->flag & RX_FLAG_MMIC_ERROR) {
- if (rx.flags & IEEE80211_RX_RA_MATCH)
- ieee80211_rx_michael_mic_report(hdr, &rx);
- } else
- prev = rx.sdata;
+ if (ieee80211_is_data(hdr->frame_control)) {
+ for_each_sta_info(local, hdr->addr2, sta, tmp) {
+ rx.sta = sta;
+ found_sta = true;
+ rx.sdata = sta->sdata;
+
+ rx.flags |= IEEE80211_RX_RA_MATCH;
+ prepares = prepare_for_handlers(rx.sdata, &rx, hdr);
+ if (prepares) {
+ if (status->flag & RX_FLAG_MMIC_ERROR) {
+ if (rx.flags & IEEE80211_RX_RA_MATCH)
+ ieee80211_rx_michael_mic_report(hdr, &rx);
+ } else
+ prev = rx.sdata;
+ }
}
- } else list_for_each_entry_rcu(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
+ }
+ if (!found_sta) {
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- continue;
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ continue;
- rx.flags |= IEEE80211_RX_RA_MATCH;
- prepares = prepare_for_handlers(sdata, &rx, hdr);
+ rx.sta = sta_info_get(sdata, hdr->addr2);
- if (!prepares)
- continue;
+ rx.flags |= IEEE80211_RX_RA_MATCH;
+ prepares = prepare_for_handlers(sdata, &rx, hdr);
- if (status->flag & RX_FLAG_MMIC_ERROR) {
- rx.sdata = sdata;
- if (rx.flags & IEEE80211_RX_RA_MATCH)
- ieee80211_rx_michael_mic_report(hdr, &rx);
- continue;
- }
+ if (!prepares)
+ continue;
- /*
- * frame is destined for this interface, but if it's not
- * also for the previous one we handle that after the
- * loop to avoid copying the SKB once too much
- */
+ if (status->flag & RX_FLAG_MMIC_ERROR) {
+ rx.sdata = sdata;
+ if (rx.flags & IEEE80211_RX_RA_MATCH)
+ ieee80211_rx_michael_mic_report(hdr,
+ &rx);
+ continue;
+ }
- if (!prev) {
- prev = sdata;
- continue;
- }
+ /*
+ * frame is destined for this interface, but if it's
+ * not also for the previous one we handle that after
+ * the loop to avoid copying the SKB once too much
+ */
- /*
- * frame was destined for the previous interface
- * so invoke RX handlers for it
- */
+ if (!prev) {
+ prev = sdata;
+ continue;
+ }
- skb_new = skb_copy(skb, GFP_ATOMIC);
- if (!skb_new) {
- if (net_ratelimit())
- printk(KERN_DEBUG "%s: failed to copy "
- "multicast frame for %s\n",
- wiphy_name(local->hw.wiphy),
- prev->dev->name);
- continue;
+ /*
+ * frame was destined for the previous interface
+ * so invoke RX handlers for it
+ */
+
+ skb_new = skb_copy(skb, GFP_ATOMIC);
+ if (!skb_new) {
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: failed to copy "
+ "multicast frame for %s\n",
+ wiphy_name(local->hw.wiphy),
+ prev->name);
+ continue;
+ }
+ ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
+ prev = sdata;
}
- ieee80211_invoke_rx_handlers(prev, &rx, skb_new, rate);
- prev = sdata;
}
if (prev)
ieee80211_invoke_rx_handlers(prev, &rx, skb, rate);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index f1a4c7160300..9afe2f9885dc 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -12,7 +12,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/wireless.h>
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <net/mac80211.h>
@@ -29,16 +28,19 @@ struct ieee80211_bss *
ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
u8 *ssid, u8 ssid_len)
{
- return (void *)cfg80211_get_bss(local->hw.wiphy,
- ieee80211_get_channel(local->hw.wiphy,
- freq),
- bssid, ssid, ssid_len,
- 0, 0);
+ struct cfg80211_bss *cbss;
+
+ cbss = cfg80211_get_bss(local->hw.wiphy,
+ ieee80211_get_channel(local->hw.wiphy, freq),
+ bssid, ssid, ssid_len, 0, 0);
+ if (!cbss)
+ return NULL;
+ return (void *)cbss->priv;
}
static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
{
- struct ieee80211_bss *bss = (void *)cbss;
+ struct ieee80211_bss *bss = (void *)cbss->priv;
kfree(bss_mesh_id(bss));
kfree(bss_mesh_cfg(bss));
@@ -47,7 +49,26 @@ static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
void ieee80211_rx_bss_put(struct ieee80211_local *local,
struct ieee80211_bss *bss)
{
- cfg80211_put_bss((struct cfg80211_bss *)bss);
+ if (!bss)
+ return;
+ cfg80211_put_bss(container_of((void *)bss, struct cfg80211_bss, priv));
+}
+
+static bool is_uapsd_supported(struct ieee802_11_elems *elems)
+{
+ u8 qos_info;
+
+ if (elems->wmm_info && elems->wmm_info_len == 7
+ && elems->wmm_info[5] == 1)
+ qos_info = elems->wmm_info[6];
+ else if (elems->wmm_param && elems->wmm_param_len == 24
+ && elems->wmm_param[5] == 1)
+ qos_info = elems->wmm_param[6];
+ else
+ /* no valid wmm information or parameter element found */
+ return false;
+
+ return qos_info & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD;
}
struct ieee80211_bss *
@@ -59,6 +80,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
struct ieee80211_channel *channel,
bool beacon)
{
+ struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
int clen;
s32 signal = 0;
@@ -68,13 +90,14 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
signal = (rx_status->signal * 100) / local->hw.max_signal;
- bss = (void *)cfg80211_inform_bss_frame(local->hw.wiphy, channel,
- mgmt, len, signal, GFP_ATOMIC);
+ cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
+ mgmt, len, signal, GFP_ATOMIC);
- if (!bss)
+ if (!cbss)
return NULL;
- bss->cbss.free_priv = ieee80211_rx_bss_free;
+ cbss->free_priv = ieee80211_rx_bss_free;
+ bss = (void *)cbss->priv;
/* save the ERP value so that it is available at association time */
if (elems->erp_info && elems->erp_info_len >= 1) {
@@ -111,6 +134,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
}
bss->wmm_used = elems->wmm_param || elems->wmm_info;
+ bss->uapsd_supported = is_uapsd_supported(elems);
if (!beacon)
bss->last_probe_resp = jiffies;
@@ -147,7 +171,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
presp = ieee80211_is_probe_resp(fc);
if (presp) {
/* ignore ProbeResp to foreign address */
- if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
+ if (memcmp(mgmt->da, sdata->vif.addr, ETH_ALEN))
return RX_DROP_MONITOR;
presp = true;
@@ -220,82 +244,9 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
return true;
}
-/*
- * inform AP that we will go to sleep so that it will buffer the frames
- * while we scan
- */
-static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_local *local = sdata->local;
-
- local->scan_ps_enabled = false;
-
- /* FIXME: what to do when local->pspolling is true? */
-
- del_timer_sync(&local->dynamic_ps_timer);
- cancel_work_sync(&local->dynamic_ps_enable_work);
-
- if (local->hw.conf.flags & IEEE80211_CONF_PS) {
- local->scan_ps_enabled = true;
- local->hw.conf.flags &= ~IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- }
-
- if (!(local->scan_ps_enabled) ||
- !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
- /*
- * If power save was enabled, no need to send a nullfunc
- * frame because AP knows that we are sleeping. But if the
- * hardware is creating the nullfunc frame for power save
- * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
- * enabled) and power save was enabled, the firmware just
- * sent a null frame with power save disabled. So we need
- * to send a new nullfunc frame to inform the AP that we
- * are again sleeping.
- */
- ieee80211_send_nullfunc(local, sdata, 1);
-}
-
-/* inform AP that we are awake again, unless power save is enabled */
-static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_local *local = sdata->local;
-
- if (!local->ps_sdata)
- ieee80211_send_nullfunc(local, sdata, 0);
- else if (local->scan_ps_enabled) {
- /*
- * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
- * will send a nullfunc frame with the powersave bit set
- * even though the AP already knows that we are sleeping.
- * This could be avoided by sending a null frame with power
- * save bit disabled before enabling the power save, but
- * this doesn't gain anything.
- *
- * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
- * to send a nullfunc frame because AP already knows that
- * we are sleeping, let's just enable power save mode in
- * hardware.
- */
- local->hw.conf.flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- } else if (local->hw.conf.dynamic_ps_timeout > 0) {
- /*
- * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
- * had been running before leaving the operating channel,
- * restart the timer now and send a nullfunc frame to inform
- * the AP that we are awake.
- */
- ieee80211_send_nullfunc(local, sdata, 0);
- mod_timer(&local->dynamic_ps_timer, jiffies +
- msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
- }
-}
-
void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_sub_if_data *sdata;
bool was_hw_scan;
mutex_lock(&local->scan_mtx);
@@ -344,41 +295,19 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
drv_sw_scan_complete(local);
- mutex_lock(&local->iflist_mtx);
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
-
- /* Tell AP we're back */
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (sdata->u.mgd.associated) {
- ieee80211_scan_ps_disable(sdata);
- netif_wake_queue(sdata->dev);
- }
- } else
- netif_wake_queue(sdata->dev);
-
- /* re-enable beaconing */
- if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_ADHOC ||
- sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
- ieee80211_bss_info_change_notify(
- sdata, BSS_CHANGED_BEACON_ENABLED);
- }
- mutex_unlock(&local->iflist_mtx);
+ ieee80211_offchannel_return(local, true);
done:
ieee80211_recalc_idle(local);
ieee80211_mlme_notify_scan_completed(local);
ieee80211_ibss_notify_scan_completed(local);
ieee80211_mesh_notify_scan_completed(local);
+ ieee80211_queue_work(&local->hw, &local->work_work);
}
EXPORT_SYMBOL(ieee80211_scan_completed);
static int ieee80211_start_sw_scan(struct ieee80211_local *local)
{
- struct ieee80211_sub_if_data *sdata;
-
/*
* Hardware/driver doesn't support hw_scan, so use software
* scanning instead. First send a nullfunc frame with power save
@@ -394,33 +323,15 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
*/
drv_sw_scan_start(local);
- mutex_lock(&local->iflist_mtx);
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
-
- /* disable beaconing */
- if (sdata->vif.type == NL80211_IFTYPE_AP ||
- sdata->vif.type == NL80211_IFTYPE_ADHOC ||
- sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
- ieee80211_bss_info_change_notify(
- sdata, BSS_CHANGED_BEACON_ENABLED);
-
- /*
- * only handle non-STA interfaces here, STA interfaces
- * are handled in the scan state machine
- */
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
- netif_stop_queue(sdata->dev);
- }
- mutex_unlock(&local->iflist_mtx);
+ ieee80211_offchannel_stop_beaconing(local);
local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
+ drv_flush(local, false);
+
ieee80211_configure_filter(local);
- /* TODO: start scan as soon as all nullfunc frames are ACKed */
ieee80211_queue_delayed_work(&local->hw,
&local->scan_work,
IEEE80211_CHANNEL_TIME);
@@ -433,7 +344,6 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req)
{
struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
int rc;
if (local->scan_req)
@@ -463,11 +373,8 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
local->scan_req = req;
local->scan_sdata = sdata;
- if (req != local->int_scan_req &&
- sdata->vif.type == NL80211_IFTYPE_STATION &&
- !list_empty(&ifmgd->work_list)) {
- /* actually wait for the work it's doing to finish/time out */
- set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
+ if (!list_empty(&local->work_list)) {
+ /* wait for the work to finish/time out */
return 0;
}
@@ -526,7 +433,7 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
/* check if at least one STA interface is associated */
mutex_lock(&local->iflist_mtx);
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
@@ -564,56 +471,35 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
unsigned long *next_delay)
{
- struct ieee80211_sub_if_data *sdata;
+ ieee80211_offchannel_stop_station(local);
+
+ __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
/*
- * notify the AP about us leaving the channel and stop all STA interfaces
+ * What if the nullfunc frames didn't arrive?
*/
- mutex_lock(&local->iflist_mtx);
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- netif_stop_queue(sdata->dev);
- if (sdata->u.mgd.associated)
- ieee80211_scan_ps_enable(sdata);
- }
- }
- mutex_unlock(&local->iflist_mtx);
-
- __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
+ drv_flush(local, false);
+ if (local->ops->flush)
+ *next_delay = 0;
+ else
+ *next_delay = HZ / 10;
/* advance to the next channel to be scanned */
- *next_delay = HZ / 10;
local->next_scan_state = SCAN_SET_CHANNEL;
}
static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
unsigned long *next_delay)
{
- struct ieee80211_sub_if_data *sdata = local->scan_sdata;
-
/* switch back to the operating channel */
local->scan_channel = NULL;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
/*
- * notify the AP about us being back and restart all STA interfaces
+ * Only re-enable station mode interface now; beaconing will be
+ * re-enabled once the full scan has been completed.
*/
- mutex_lock(&local->iflist_mtx);
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (!netif_running(sdata->dev))
- continue;
-
- /* Tell AP we're back */
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (sdata->u.mgd.associated)
- ieee80211_scan_ps_disable(sdata);
- netif_wake_queue(sdata->dev);
- }
- }
- mutex_unlock(&local->iflist_mtx);
+ ieee80211_offchannel_return(local, false);
__clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
@@ -727,7 +613,7 @@ void ieee80211_scan_work(struct work_struct *work)
/*
* Avoid re-scheduling when the sdata is going away.
*/
- if (!netif_running(sdata->dev)) {
+ if (!ieee80211_sdata_running(sdata)) {
ieee80211_scan_completed(&local->hw, true);
return;
}
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index aa743a895cf9..7733f66ee2c4 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -35,7 +35,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
if (!skb) {
printk(KERN_ERR "%s: failed to allocate buffer for "
- "measurement report frame\n", sdata->dev->name);
+ "measurement report frame\n", sdata->name);
return;
}
@@ -43,7 +43,7 @@ static void ieee80211_send_refuse_measurement_request(struct ieee80211_sub_if_da
msr_report = (struct ieee80211_mgmt *)skb_put(skb, 24);
memset(msr_report, 0, 24);
memcpy(msr_report->da, da, ETH_ALEN);
- memcpy(msr_report->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(msr_report->sa, sdata->vif.addr, ETH_ALEN);
memcpy(msr_report->bssid, bssid, ETH_ALEN);
msr_report->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ACTION);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 71f370dd24bc..f735826f055c 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -103,13 +103,37 @@ static int sta_info_hash_del(struct ieee80211_local *local,
}
/* protected by RCU */
-struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr)
+struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
{
+ struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
while (sta) {
- if (memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ if (sta->sdata == sdata &&
+ memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
+ break;
+ sta = rcu_dereference(sta->hnext);
+ }
+ return sta;
+}
+
+/*
+ * Get sta info either from the specified interface
+ * or from one of its vlans
+ */
+struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+
+ sta = rcu_dereference(local->sta_hash[STA_HASH(addr)]);
+ while (sta) {
+ if ((sta->sdata == sdata ||
+ sta->sdata->bss == sdata->bss) &&
+ memcmp(sta->sta.addr, addr, ETH_ALEN) == 0)
break;
sta = rcu_dereference(sta->hnext);
}
@@ -356,6 +380,7 @@ int sta_info_insert(struct sta_info *sta)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct station_info sinfo;
unsigned long flags;
int err = 0;
@@ -364,12 +389,12 @@ int sta_info_insert(struct sta_info *sta)
* something inserts a STA (on one CPU) without holding the RTNL
* and another CPU turns off the net device.
*/
- if (unlikely(!netif_running(sdata->dev))) {
+ if (unlikely(!ieee80211_sdata_running(sdata))) {
err = -ENETDOWN;
goto out_free;
}
- if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->dev->dev_addr) == 0 ||
+ if (WARN_ON(compare_ether_addr(sta->sta.addr, sdata->vif.addr) == 0 ||
is_multicast_ether_addr(sta->sta.addr))) {
err = -EINVAL;
goto out_free;
@@ -377,7 +402,7 @@ int sta_info_insert(struct sta_info *sta)
spin_lock_irqsave(&local->sta_lock, flags);
/* check if STA exists already */
- if (sta_info_get(local, sta->sta.addr)) {
+ if (sta_info_get(sdata, sta->sta.addr)) {
spin_unlock_irqrestore(&local->sta_lock, flags);
err = -EEXIST;
goto out_free;
@@ -394,7 +419,7 @@ int sta_info_insert(struct sta_info *sta)
struct ieee80211_sub_if_data,
u.ap);
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD, &sta->sta);
+ drv_sta_notify(local, sdata, STA_NOTIFY_ADD, &sta->sta);
sdata = sta->sdata;
}
@@ -405,6 +430,10 @@ int sta_info_insert(struct sta_info *sta)
spin_unlock_irqrestore(&local->sta_lock, flags);
+ sinfo.filled = 0;
+ sinfo.generation = local->sta_generation;
+ cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_ATOMIC);
+
#ifdef CONFIG_MAC80211_DEBUGFS
/*
* Debugfs entry adding might sleep, so schedule process
@@ -534,7 +563,7 @@ static void __sta_info_unlink(struct sta_info **sta)
struct ieee80211_sub_if_data,
u.ap);
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_REMOVE,
+ drv_sta_notify(local, sdata, STA_NOTIFY_REMOVE,
&(*sta)->sta);
sdata = (*sta)->sdata;
}
@@ -828,7 +857,7 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
if (time_after(jiffies, sta->last_rx + exp_time)) {
#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: expiring inactive STA %pM\n",
- sdata->dev->name, sta->sta.addr);
+ sdata->name, sta->sta.addr);
#endif
__sta_info_unlink(&sta);
if (sta)
@@ -843,11 +872,12 @@ void ieee80211_sta_expire(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw,
const u8 *addr)
{
- struct sta_info *sta = sta_info_get(hw_to_local(hw), addr);
+ struct sta_info *sta, *nxt;
- if (!sta)
- return NULL;
- return &sta->sta;
+ /* Just return a random station ... first in list ... */
+ for_each_sta_info(hw_to_local(hw), addr, sta, nxt)
+ return &sta->sta;
+ return NULL;
}
EXPORT_SYMBOL_GPL(ieee80211_find_sta_by_hw);
@@ -872,7 +902,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
struct ieee80211_local *local = sdata->local;
int sent, buffered;
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
+ drv_sta_notify(local, sdata, STA_NOTIFY_AWAKE, &sta->sta);
if (!skb_queue_empty(&sta->ps_tx_buf))
sta_info_clear_tim_bit(sta);
@@ -885,7 +915,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d sending %d filtered/%d PS frames "
- "since STA not sleeping anymore\n", sdata->dev->name,
+ "since STA not sleeping anymore\n", sdata->name,
sta->sta.addr, sta->sta.aid, sent - buffered, buffered);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
@@ -944,7 +974,7 @@ void ieee80211_sta_ps_deliver_poll_response(struct sta_info *sta)
*/
printk(KERN_DEBUG "%s: STA %pM sent PS Poll even "
"though there are no buffered frames for it\n",
- sdata->dev->name, sta->sta.addr);
+ sdata->name, sta->sta.addr);
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
}
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index b4810f6aa94f..6f79bba5706e 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -403,9 +403,37 @@ static inline u32 get_sta_flags(struct sta_info *sta)
#define STA_INFO_CLEANUP_INTERVAL (10 * HZ)
/*
- * Get a STA info, must have be under RCU read lock.
+ * Get a STA info, must be under RCU read lock.
*/
-struct sta_info *sta_info_get(struct ieee80211_local *local, const u8 *addr);
+struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+
+struct sta_info *sta_info_get_bss(struct ieee80211_sub_if_data *sdata,
+ const u8 *addr);
+
+static inline
+void for_each_sta_info_type_check(struct ieee80211_local *local,
+ const u8 *addr,
+ struct sta_info *sta,
+ struct sta_info *nxt)
+{
+}
+
+#define for_each_sta_info(local, _addr, sta, nxt) \
+ for ( /* initialise loop */ \
+ sta = rcu_dereference(local->sta_hash[STA_HASH(_addr)]),\
+ nxt = sta ? rcu_dereference(sta->hnext) : NULL; \
+ /* typecheck */ \
+ for_each_sta_info_type_check(local, (_addr), sta, nxt), \
+ /* continue condition */ \
+ sta; \
+ /* advance loop */ \
+ sta = nxt, \
+ nxt = sta ? rcu_dereference(sta->hnext) : NULL \
+ ) \
+ /* compare address and run code only if it matches */ \
+ if (memcmp(sta->sta.addr, (_addr), ETH_ALEN) == 0)
+
/*
* Get STA info by index, BROKEN!
*/
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index d78f36c64c7b..0ebcdda24200 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -134,6 +134,40 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
dev_kfree_skb(skb);
}
+static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (void *) skb->data;
+ struct ieee80211_local *local = sta->local;
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+
+ if (ieee80211_is_action(mgmt->frame_control) &&
+ sdata->vif.type == NL80211_IFTYPE_STATION &&
+ mgmt->u.action.category == WLAN_CATEGORY_HT &&
+ mgmt->u.action.u.ht_smps.action == WLAN_HT_ACTION_SMPS) {
+ /*
+ * This update looks racy, but isn't -- if we come
+ * here we've definitely got a station that we're
+ * talking to, and on a managed interface that can
+ * only be the AP. And the only other place updating
+ * this variable is before we're associated.
+ */
+ switch (mgmt->u.action.u.ht_smps.smps_control) {
+ case WLAN_HT_SMPS_CONTROL_DYNAMIC:
+ sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_DYNAMIC;
+ break;
+ case WLAN_HT_SMPS_CONTROL_STATIC:
+ sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_STATIC;
+ break;
+ case WLAN_HT_SMPS_CONTROL_DISABLED:
+ default: /* shouldn't happen since we don't send that */
+ sta->sdata->u.mgd.ap_smps = IEEE80211_SMPS_OFF;
+ break;
+ }
+
+ ieee80211_queue_work(&local->hw, &local->recalc_smps);
+ }
+}
+
void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct sk_buff *skb2;
@@ -146,7 +180,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
struct ieee80211_tx_status_rtap_hdr *rthdr;
struct ieee80211_sub_if_data *sdata;
struct net_device *prev_dev = NULL;
- struct sta_info *sta;
+ struct sta_info *sta, *tmp;
int retry_count = -1, i;
bool injected;
@@ -166,9 +200,11 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
sband = local->hw.wiphy->bands[info->band];
- sta = sta_info_get(local, hdr->addr1);
+ for_each_sta_info(local, hdr->addr1, sta, tmp) {
+ /* skip wrong virtual interface */
+ if (memcmp(hdr->addr2, sta->sdata->vif.addr, ETH_ALEN))
+ continue;
- if (sta) {
if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
test_sta_flags(sta, WLAN_STA_PS_STA)) {
/*
@@ -208,6 +244,10 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
rate_control_tx_status(local, sband, sta, skb);
if (ieee80211_vif_is_mesh(&sta->sdata->vif))
ieee80211s_update_metric(local, sta, skb);
+
+ if (!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
+ (info->flags & IEEE80211_TX_STAT_ACK))
+ ieee80211_frame_acked(sta, skb);
}
rcu_read_unlock();
@@ -311,7 +351,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (sdata->vif.type == NL80211_IFTYPE_MONITOR) {
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 4921d724b6c7..b73454a507f9 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -100,7 +100,7 @@ static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
}
- ctx->initialized = 1;
+ ctx->state = TKIP_STATE_PHASE1_DONE;
}
static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
@@ -183,7 +183,7 @@ void ieee80211_get_tkip_key(struct ieee80211_key_conf *keyconf,
/* Update the p1k only when the iv16 in the packet wraps around, this
* might occur after the wrap around of iv16 in the key in case of
* fragmented packets. */
- if (iv16 == 0 || !ctx->initialized)
+ if (iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
tkip_mixing_phase1(tk, ctx, hdr->addr2, iv32);
if (type == IEEE80211_TKIP_P1_KEY) {
@@ -209,7 +209,7 @@ void ieee80211_tkip_encrypt_data(struct crypto_blkcipher *tfm,
const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
/* Calculate per-packet key */
- if (ctx->iv16 == 0 || !ctx->initialized)
+ if (ctx->iv16 == 0 || ctx->state == TKIP_STATE_NOT_INIT)
tkip_mixing_phase1(tk, ctx, ta, ctx->iv32);
tkip_mixing_phase2(tk, ctx, ctx->iv16, rc4key);
@@ -259,7 +259,7 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
if ((keyid >> 6) != key->conf.keyidx)
return TKIP_DECRYPT_INVALID_KEYIDX;
- if (key->u.tkip.rx[queue].initialized &&
+ if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT &&
(iv32 < key->u.tkip.rx[queue].iv32 ||
(iv32 == key->u.tkip.rx[queue].iv32 &&
iv16 <= key->u.tkip.rx[queue].iv16))) {
@@ -275,11 +275,11 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
if (only_iv) {
res = TKIP_DECRYPT_OK;
- key->u.tkip.rx[queue].initialized = 1;
+ key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
goto done;
}
- if (!key->u.tkip.rx[queue].initialized ||
+ if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT ||
key->u.tkip.rx[queue].iv32 != iv32) {
/* IV16 wrapped around - perform TKIP phase 1 */
tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32);
@@ -299,18 +299,20 @@ int ieee80211_tkip_decrypt_data(struct crypto_blkcipher *tfm,
printk("\n");
}
#endif
- if (key->local->ops->update_tkip_key &&
- key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
- static const u8 bcast[ETH_ALEN] =
- {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- const u8 *sta_addr = key->sta->sta.addr;
-
- if (is_multicast_ether_addr(ra))
- sta_addr = bcast;
-
- drv_update_tkip_key(key->local, &key->conf, sta_addr,
- iv32, key->u.tkip.rx[queue].p1k);
- }
+ }
+ if (key->local->ops->update_tkip_key &&
+ key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE &&
+ key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) {
+ static const u8 bcast[ETH_ALEN] =
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ const u8 *sta_addr = key->sta->sta.addr;
+
+ if (is_multicast_ether_addr(ra))
+ sta_addr = bcast;
+
+ drv_update_tkip_key(key->local, &key->conf, sta_addr,
+ iv32, key->u.tkip.rx[queue].p1k);
+ key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED;
}
tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key);
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 27ceaefd7bc8..daf81048c1f7 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -180,6 +180,71 @@ static int inline is_ieee80211_device(struct ieee80211_local *local,
}
/* tx handlers */
+static ieee80211_tx_result debug_noinline
+ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
+{
+ struct ieee80211_local *local = tx->local;
+ struct ieee80211_if_managed *ifmgd;
+
+ /* driver doesn't support power save */
+ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
+ return TX_CONTINUE;
+
+ /* hardware does dynamic power save */
+ if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
+ return TX_CONTINUE;
+
+ /* dynamic power save disabled */
+ if (local->hw.conf.dynamic_ps_timeout <= 0)
+ return TX_CONTINUE;
+
+ /* we are scanning, don't enable power save */
+ if (local->scanning)
+ return TX_CONTINUE;
+
+ if (!local->ps_sdata)
+ return TX_CONTINUE;
+
+ /* No point if we're going to suspend */
+ if (local->quiescing)
+ return TX_CONTINUE;
+
+ /* dynamic ps is supported only in managed mode */
+ if (tx->sdata->vif.type != NL80211_IFTYPE_STATION)
+ return TX_CONTINUE;
+
+ ifmgd = &tx->sdata->u.mgd;
+
+ /*
+ * Don't wakeup from power save if u-apsd is enabled, voip ac has
+ * u-apsd enabled and the frame is in voip class. This effectively
+ * means that even if all access categories have u-apsd enabled, in
+ * practise u-apsd is only used with the voip ac. This is a
+ * workaround for the case when received voip class packets do not
+ * have correct qos tag for some reason, due the network or the
+ * peer application.
+ *
+ * Note: local->uapsd_queues access is racy here. If the value is
+ * changed via debugfs, user needs to reassociate manually to have
+ * everything in sync.
+ */
+ if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED)
+ && (local->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ && skb_get_queue_mapping(tx->skb) == 0)
+ return TX_CONTINUE;
+
+ if (local->hw.conf.flags & IEEE80211_CONF_PS) {
+ ieee80211_stop_queues_by_reason(&local->hw,
+ IEEE80211_QUEUE_STOP_REASON_PS);
+ ieee80211_queue_work(&local->hw,
+ &local->dynamic_ps_disable_work);
+ }
+
+ mod_timer(&local->dynamic_ps_timer, jiffies +
+ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
+
+ return TX_CONTINUE;
+}
static ieee80211_tx_result debug_noinline
ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
@@ -223,7 +288,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
printk(KERN_DEBUG "%s: dropped data frame to not "
"associated station %pM\n",
- tx->dev->name, hdr->addr1);
+ tx->sdata->name, hdr->addr1);
#endif /* CONFIG_MAC80211_VERBOSE_DEBUG */
I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc);
return TX_DROP;
@@ -331,7 +396,7 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
- tx->dev->name);
+ tx->sdata->name);
#endif
dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
} else
@@ -391,7 +456,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: STA %pM TX "
"buffer full - dropping oldest frame\n",
- tx->dev->name, sta->sta.addr);
+ tx->sdata->name, sta->sta.addr);
}
#endif
dev_kfree_skb(old);
@@ -416,7 +481,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
else if (unlikely(staflags & WLAN_STA_PS_STA)) {
printk(KERN_DEBUG "%s: STA %pM in PS mode, but pspoll "
- "set -> send frame\n", tx->dev->name,
+ "set -> send frame\n", tx->sdata->name,
sta->sta.addr);
}
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
@@ -519,7 +584,12 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
txrc.bss_conf = &tx->sdata->vif.bss_conf;
txrc.skb = tx->skb;
txrc.reported_rate.idx = -1;
- txrc.max_rate_idx = tx->sdata->max_ratectrl_rateidx;
+ txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[tx->channel->band];
+ if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
+ txrc.max_rate_idx = -1;
+ else
+ txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
+ txrc.ap = tx->sdata->vif.type == NL80211_IFTYPE_AP;
/* set up RTS protection if desired */
if (len > tx->local->hw.wiphy->rts_threshold) {
@@ -549,7 +619,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
"%s: Dropped data frame as no usable bitrate found while "
"scanning and associated. Target station: "
"%pM on %d GHz band\n",
- tx->dev->name, hdr->addr1,
+ tx->sdata->name, hdr->addr1,
tx->channel->band ? 5 : 2))
return TX_DROP;
@@ -1021,7 +1091,6 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
memset(tx, 0, sizeof(*tx));
tx->skb = skb;
- tx->dev = sdata->dev; /* use original interface */
tx->local = local;
tx->sdata = sdata;
tx->channel = local->hw.conf.channel;
@@ -1052,10 +1121,13 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
hdr = (struct ieee80211_hdr *) skb->data;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
tx->sta = rcu_dereference(sdata->u.vlan.sta);
+ if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr)
+ return TX_DROP;
+ }
if (!tx->sta)
- tx->sta = sta_info_get(local, hdr->addr1);
+ tx->sta = sta_info_get(sdata, hdr->addr1);
if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
(local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
@@ -1216,6 +1288,7 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
goto txh_done; \
} while (0)
+ CALL_TXH(ieee80211_tx_h_dynamic_ps);
CALL_TXH(ieee80211_tx_h_check_assoc);
CALL_TXH(ieee80211_tx_h_ps_buf);
CALL_TXH(ieee80211_tx_h_select_key);
@@ -1398,34 +1471,6 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
return 0;
}
-static bool need_dynamic_ps(struct ieee80211_local *local)
-{
- /* driver doesn't support power save */
- if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
- return false;
-
- /* hardware does dynamic power save */
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
- return false;
-
- /* dynamic power save disabled */
- if (local->hw.conf.dynamic_ps_timeout <= 0)
- return false;
-
- /* we are scanning, don't enable power save */
- if (local->scanning)
- return false;
-
- if (!local->ps_sdata)
- return false;
-
- /* No point if we're going to suspend */
- if (local->quiescing)
- return false;
-
- return true;
-}
-
static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
@@ -1436,18 +1481,6 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
int headroom;
bool may_encrypt;
- if (need_dynamic_ps(local)) {
- if (local->hw.conf.flags & IEEE80211_CONF_PS) {
- ieee80211_stop_queues_by_reason(&local->hw,
- IEEE80211_QUEUE_STOP_REASON_PS);
- ieee80211_queue_work(&local->hw,
- &local->dynamic_ps_disable_work);
- }
-
- mod_timer(&local->dynamic_ps_timer, jiffies +
- msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
- }
-
rcu_read_lock();
if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
@@ -1474,11 +1507,11 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
list) {
- if (!netif_running(tmp_sdata->dev))
+ if (!ieee80211_sdata_running(tmp_sdata))
continue;
if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
continue;
- if (compare_ether_addr(tmp_sdata->dev->dev_addr,
+ if (compare_ether_addr(tmp_sdata->vif.addr,
hdr->addr2) == 0) {
sdata = tmp_sdata;
break;
@@ -1512,7 +1545,7 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
return;
}
- ieee80211_select_queue(local, skb);
+ ieee80211_set_qos_hdr(local, skb);
ieee80211_tx(sdata, skb, false);
rcu_read_unlock();
}
@@ -1642,7 +1675,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memcpy(hdr.addr1, sta->sta.addr, ETH_ALEN);
- memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
@@ -1656,7 +1689,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
/* DA BSSID SA */
memcpy(hdr.addr1, skb->data, ETH_ALEN);
- memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 24;
break;
@@ -1664,7 +1697,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
memcpy(hdr.addr1, sdata->u.wds.remote_addr, ETH_ALEN);
- memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
@@ -1678,8 +1711,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
goto fail;
}
- if (compare_ether_addr(dev->dev_addr,
- skb->data + ETH_ALEN) == 0) {
+ if (compare_ether_addr(sdata->vif.addr,
+ skb->data + ETH_ALEN) == 0) {
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
skb->data, skb->data + ETH_ALEN);
meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
@@ -1709,7 +1742,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
}
}
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
- mesh_da, dev->dev_addr);
+ mesh_da, sdata->vif.addr);
rcu_read_unlock();
if (is_mesh_mcast)
meshhdrlen =
@@ -1734,7 +1767,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
if (sdata->u.mgd.use_4addr && ethertype != ETH_P_PAE) {
fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
/* RA TA DA SA */
- memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
+ memcpy(hdr.addr2, sdata->vif.addr, ETH_ALEN);
memcpy(hdr.addr3, skb->data, ETH_ALEN);
memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
hdrlen = 30;
@@ -1765,9 +1798,8 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
*/
if (!is_multicast_ether_addr(hdr.addr1)) {
rcu_read_lock();
- sta = sta_info_get(local, hdr.addr1);
- /* XXX: in the future, use sdata to look up the sta */
- if (sta && sta->sdata == sdata)
+ sta = sta_info_get(sdata, hdr.addr1);
+ if (sta)
sta_flags = get_sta_flags(sta);
rcu_read_unlock();
}
@@ -1786,7 +1818,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
unlikely(!is_multicast_ether_addr(hdr.addr1) &&
!(sta_flags & WLAN_STA_AUTHORIZED) &&
!(ethertype == ETH_P_PAE &&
- compare_ether_addr(dev->dev_addr,
+ compare_ether_addr(sdata->vif.addr,
skb->data + ETH_ALEN) == 0))) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit())
@@ -1926,7 +1958,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
ieee80211_tx(sdata, skb, true);
} else {
hdr = (struct ieee80211_hdr *)skb->data;
- sta = sta_info_get(local, hdr->addr1);
+ sta = sta_info_get(sdata, hdr->addr1);
ret = __ieee80211_tx(local, &skb, sta, true);
if (ret != IEEE80211_TX_OK)
@@ -2062,6 +2094,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct beacon_data *beacon;
struct ieee80211_supported_band *sband;
enum ieee80211_band band = local->hw.conf.channel->band;
+ struct ieee80211_tx_rate_control txrc;
sband = local->hw.wiphy->bands[band];
@@ -2150,8 +2183,8 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
mgmt->frame_control =
cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON);
memset(mgmt->da, 0xff, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
- memcpy(mgmt->bssid, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
mgmt->u.beacon.beacon_int =
cpu_to_le16(sdata->vif.bss_conf.beacon_int);
mgmt->u.beacon.capab_info = 0x0; /* 0x0 for MPs */
@@ -2169,21 +2202,25 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
info = IEEE80211_SKB_CB(skb);
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ info->flags |= IEEE80211_TX_CTL_NO_ACK;
info->band = band;
- /*
- * XXX: For now, always use the lowest rate
- */
- info->control.rates[0].idx = 0;
- info->control.rates[0].count = 1;
- info->control.rates[1].idx = -1;
- info->control.rates[2].idx = -1;
- info->control.rates[3].idx = -1;
- info->control.rates[4].idx = -1;
- BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
+
+ memset(&txrc, 0, sizeof(txrc));
+ txrc.hw = hw;
+ txrc.sband = sband;
+ txrc.bss_conf = &sdata->vif.bss_conf;
+ txrc.skb = skb;
+ txrc.reported_rate.idx = -1;
+ txrc.rate_idx_mask = sdata->rc_rateidx_mask[band];
+ if (txrc.rate_idx_mask == (1 << sband->n_bitrates) - 1)
+ txrc.max_rate_idx = -1;
+ else
+ txrc.max_rate_idx = fls(txrc.rate_idx_mask) - 1;
+ txrc.ap = true;
+ rate_control_get_rate(sdata, NULL, &txrc);
info->control.vif = vif;
- info->flags |= IEEE80211_TX_CTL_NO_ACK;
info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
out:
@@ -2192,6 +2229,134 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL(ieee80211_beacon_get_tim);
+struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_if_managed *ifmgd;
+ struct ieee80211_pspoll *pspoll;
+ struct ieee80211_local *local;
+ struct sk_buff *skb;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
+ return NULL;
+
+ sdata = vif_to_sdata(vif);
+ ifmgd = &sdata->u.mgd;
+ local = sdata->local;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*pspoll));
+ if (!skb) {
+ printk(KERN_DEBUG "%s: failed to allocate buffer for "
+ "pspoll template\n", sdata->name);
+ return NULL;
+ }
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ pspoll = (struct ieee80211_pspoll *) skb_put(skb, sizeof(*pspoll));
+ memset(pspoll, 0, sizeof(*pspoll));
+ pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
+ IEEE80211_STYPE_PSPOLL);
+ pspoll->aid = cpu_to_le16(ifmgd->aid);
+
+ /* aid in PS-Poll has its two MSBs each set to 1 */
+ pspoll->aid |= cpu_to_le16(1 << 15 | 1 << 14);
+
+ memcpy(pspoll->bssid, ifmgd->bssid, ETH_ALEN);
+ memcpy(pspoll->ta, vif->addr, ETH_ALEN);
+
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_pspoll_get);
+
+struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_hdr_3addr *nullfunc;
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_if_managed *ifmgd;
+ struct ieee80211_local *local;
+ struct sk_buff *skb;
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
+ return NULL;
+
+ sdata = vif_to_sdata(vif);
+ ifmgd = &sdata->u.mgd;
+ local = sdata->local;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
+ if (!skb) {
+ printk(KERN_DEBUG "%s: failed to allocate buffer for nullfunc "
+ "template\n", sdata->name);
+ return NULL;
+ }
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ nullfunc = (struct ieee80211_hdr_3addr *) skb_put(skb,
+ sizeof(*nullfunc));
+ memset(nullfunc, 0, sizeof(*nullfunc));
+ nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_NULLFUNC |
+ IEEE80211_FCTL_TODS);
+ memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
+ memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
+
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_nullfunc_get);
+
+struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
+ const u8 *ie, size_t ie_len)
+{
+ struct ieee80211_sub_if_data *sdata;
+ struct ieee80211_local *local;
+ struct ieee80211_hdr_3addr *hdr;
+ struct sk_buff *skb;
+ size_t ie_ssid_len;
+ u8 *pos;
+
+ sdata = vif_to_sdata(vif);
+ local = sdata->local;
+ ie_ssid_len = 2 + ssid_len;
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*hdr) +
+ ie_ssid_len + ie_len);
+ if (!skb) {
+ printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
+ "request template\n", sdata->name);
+ return NULL;
+ }
+
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_REQ);
+ memset(hdr->addr1, 0xff, ETH_ALEN);
+ memcpy(hdr->addr2, vif->addr, ETH_ALEN);
+ memset(hdr->addr3, 0xff, ETH_ALEN);
+
+ pos = skb_put(skb, ie_ssid_len);
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = ssid_len;
+ if (ssid)
+ memcpy(pos, ssid, ssid_len);
+ pos += ssid_len;
+
+ if (ie) {
+ pos = skb_put(skb, ie_len);
+ memcpy(pos, ie, ie_len);
+ }
+
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_probereq_get);
+
void ieee80211_rts_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const void *frame, size_t frame_len,
const struct ieee80211_tx_info *frame_txctl,
@@ -2291,6 +2456,9 @@ void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
skb_set_network_header(skb, 0);
skb_set_transport_header(skb, 0);
+ /* send all internal mgmt frames on VO */
+ skb_set_queue_mapping(skb, 0);
+
/*
* The other path calling ieee80211_xmit is from the tasklet,
* and while we can handle concurrent transmissions locking
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index dc76267e436e..e278f97c8305 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -18,7 +18,6 @@
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
-#include <linux/wireless.h>
#include <linux/bitmap.h>
#include <linux/crc32.h>
#include <net/net_namespace.h>
@@ -269,6 +268,7 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata;
if (WARN_ON(queue >= hw->queues))
return;
@@ -281,6 +281,11 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
if (!skb_queue_empty(&local->pending[queue]))
tasklet_schedule(&local->tx_pending_tasklet);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ netif_tx_wake_queue(netdev_get_tx_queue(sdata->dev, queue));
+ rcu_read_unlock();
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -305,11 +310,17 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
enum queue_stop_reason reason)
{
struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata;
if (WARN_ON(queue >= hw->queues))
return;
__set_bit(reason, &local->queue_stop_reasons[queue]);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ netif_tx_stop_queue(netdev_get_tx_queue(sdata->dev, queue));
+ rcu_read_unlock();
}
void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -468,8 +479,8 @@ void ieee80211_iterate_active_interfaces(
case NL80211_IFTYPE_MESH_POINT:
break;
}
- if (netif_running(sdata->dev))
- iterator(data, sdata->dev->dev_addr,
+ if (ieee80211_sdata_running(sdata))
+ iterator(data, sdata->vif.addr,
&sdata->vif);
}
@@ -502,8 +513,8 @@ void ieee80211_iterate_active_interfaces_atomic(
case NL80211_IFTYPE_MESH_POINT:
break;
}
- if (netif_running(sdata->dev))
- iterator(data, sdata->dev->dev_addr,
+ if (ieee80211_sdata_running(sdata))
+ iterator(data, sdata->vif.addr,
&sdata->vif);
}
@@ -781,6 +792,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
break;
}
+ qparam.uapsd = false;
+
drv_conf_tx(local, queue, &qparam);
}
}
@@ -848,7 +861,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
sizeof(*mgmt) + 6 + extra_len);
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
- "frame\n", sdata->dev->name);
+ "frame\n", sdata->name);
return;
}
skb_reserve(skb, local->hw.extra_tx_headroom);
@@ -858,7 +871,7 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_AUTH);
memcpy(mgmt->da, bssid, ETH_ALEN);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, bssid, ETH_ALEN);
mgmt->u.auth.auth_alg = cpu_to_le16(auth_alg);
mgmt->u.auth.auth_transaction = cpu_to_le16(transaction);
@@ -881,43 +894,87 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
enum ieee80211_band band)
{
struct ieee80211_supported_band *sband;
- u8 *pos, *supp_rates_len, *esupp_rates_len = NULL;
- int i;
+ u8 *pos;
+ size_t offset = 0, noffset;
+ int supp_rates_len, i;
sband = local->hw.wiphy->bands[band];
pos = buffer;
+ supp_rates_len = min_t(int, sband->n_bitrates, 8);
+
*pos++ = WLAN_EID_SUPP_RATES;
- supp_rates_len = pos;
- *pos++ = 0;
-
- for (i = 0; i < sband->n_bitrates; i++) {
- struct ieee80211_rate *rate = &sband->bitrates[i];
-
- if (esupp_rates_len) {
- *esupp_rates_len += 1;
- } else if (*supp_rates_len == 8) {
- *pos++ = WLAN_EID_EXT_SUPP_RATES;
- esupp_rates_len = pos;
- *pos++ = 1;
- } else
- *supp_rates_len += 1;
+ *pos++ = supp_rates_len;
+
+ for (i = 0; i < supp_rates_len; i++) {
+ int rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
- *pos++ = rate->bitrate / 5;
+ /* insert "request information" if in custom IEs */
+ if (ie && ie_len) {
+ static const u8 before_extrates[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ };
+ noffset = ieee80211_ie_split(ie, ie_len,
+ before_extrates,
+ ARRAY_SIZE(before_extrates),
+ offset);
+ memcpy(pos, ie + offset, noffset - offset);
+ pos += noffset - offset;
+ offset = noffset;
+ }
+
+ if (sband->n_bitrates > i) {
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = sband->n_bitrates - i;
+
+ for (; i < sband->n_bitrates; i++) {
+ int rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+ }
+
+ /* insert custom IEs that go before HT */
+ if (ie && ie_len) {
+ static const u8 before_ht[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ WLAN_EID_EXT_SUPP_RATES,
+ WLAN_EID_DS_PARAMS,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ };
+ noffset = ieee80211_ie_split(ie, ie_len,
+ before_ht, ARRAY_SIZE(before_ht),
+ offset);
+ memcpy(pos, ie + offset, noffset - offset);
+ pos += noffset - offset;
+ offset = noffset;
}
if (sband->ht_cap.ht_supported) {
- __le16 tmp = cpu_to_le16(sband->ht_cap.cap);
+ u16 cap = sband->ht_cap.cap;
+ __le16 tmp;
+
+ if (ieee80211_disable_40mhz_24ghz &&
+ sband->band == IEEE80211_BAND_2GHZ) {
+ cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
*pos++ = WLAN_EID_HT_CAPABILITY;
*pos++ = sizeof(struct ieee80211_ht_cap);
memset(pos, 0, sizeof(struct ieee80211_ht_cap));
+ tmp = cpu_to_le16(cap);
memcpy(pos, &tmp, sizeof(u16));
pos += sizeof(u16);
- /* TODO: needs a define here for << 2 */
*pos++ = sband->ht_cap.ampdu_factor |
- (sband->ht_cap.ampdu_density << 2);
+ (sband->ht_cap.ampdu_density <<
+ IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
pos += sizeof(sband->ht_cap.mcs);
pos += 2 + 4 + 1; /* ext info, BF cap, antsel */
@@ -928,9 +985,11 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
* that calculates local->scan_ies_len.
*/
- if (ie) {
- memcpy(pos, ie, ie_len);
- pos += ie_len;
+ /* add any remaining custom IEs */
+ if (ie && ie_len) {
+ noffset = ie_len;
+ memcpy(pos, ie + offset, noffset - offset);
+ pos += noffset - offset;
}
return pos - buffer;
@@ -943,37 +1002,29 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 *pos;
-
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 +
- ie_len);
- if (!skb) {
- printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
- "request\n", sdata->dev->name);
+ size_t buf_len;
+ u8 *buf;
+
+ /* FIXME: come up with a proper value */
+ buf = kmalloc(200 + ie_len, GFP_KERNEL);
+ if (!buf) {
+ printk(KERN_DEBUG "%s: failed to allocate temporary IE "
+ "buffer\n", sdata->name);
return;
}
- skb_reserve(skb, local->hw.extra_tx_headroom);
- mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
- memset(mgmt, 0, 24);
- mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
- IEEE80211_STYPE_PROBE_REQ);
- memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
+ buf_len = ieee80211_build_preq_ies(local, buf, ie, ie_len,
+ local->hw.conf.channel->band);
+
+ skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
+ ssid, ssid_len,
+ buf, buf_len);
+
if (dst) {
+ mgmt = (struct ieee80211_mgmt *) skb->data;
memcpy(mgmt->da, dst, ETH_ALEN);
memcpy(mgmt->bssid, dst, ETH_ALEN);
- } else {
- memset(mgmt->da, 0xff, ETH_ALEN);
- memset(mgmt->bssid, 0xff, ETH_ALEN);
}
- pos = skb_put(skb, 2 + ssid_len);
- *pos++ = WLAN_EID_SSID;
- *pos++ = ssid_len;
- memcpy(pos, ssid, ssid_len);
- pos += ssid_len;
-
- skb_put(skb, ieee80211_build_preq_ies(local, pos, ie, ie_len,
- local->hw.conf.channel->band));
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
ieee80211_tx_skb(sdata, skb);
@@ -1020,16 +1071,15 @@ void ieee80211_stop_device(struct ieee80211_local *local)
ieee80211_led_radio(local, false);
cancel_work_sync(&local->reconfig_filter);
- drv_stop(local);
flush_workqueue(local->workqueue);
+ drv_stop(local);
}
int ieee80211_reconfig(struct ieee80211_local *local)
{
struct ieee80211_hw *hw = &local->hw;
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_if_init_conf conf;
struct sta_info *sta;
unsigned long flags;
int res;
@@ -1049,7 +1099,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (res) {
WARN(local->suspended, "Harware became unavailable "
"upon resume. This is could be a software issue"
- "prior to suspend or a harware issue\n");
+ "prior to suspend or a hardware issue\n");
return res;
}
@@ -1060,12 +1110,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
list_for_each_entry(sdata, &local->interfaces, list) {
if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
sdata->vif.type != NL80211_IFTYPE_MONITOR &&
- netif_running(sdata->dev)) {
- conf.vif = &sdata->vif;
- conf.type = sdata->vif.type;
- conf.mac_addr = sdata->dev->dev_addr;
- res = drv_add_interface(local, &conf);
- }
+ ieee80211_sdata_running(sdata))
+ res = drv_add_interface(local, &sdata->vif);
}
/* add STAs back */
@@ -1078,7 +1124,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
struct ieee80211_sub_if_data,
u.ap);
- drv_sta_notify(local, &sdata->vif, STA_NOTIFY_ADD,
+ drv_sta_notify(local, sdata, STA_NOTIFY_ADD,
&sta->sta);
}
spin_unlock_irqrestore(&local->sta_lock, flags);
@@ -1107,7 +1153,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* Finally also reconfigure all the BSS information */
list_for_each_entry(sdata, &local->interfaces, list) {
u32 changed = ~0;
- if (!netif_running(sdata->dev))
+ if (!ieee80211_sdata_running(sdata))
continue;
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
@@ -1135,7 +1181,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* add back keys */
list_for_each_entry(sdata, &local->interfaces, list)
- if (netif_running(sdata->dev))
+ if (ieee80211_sdata_running(sdata))
ieee80211_enable_keys(sdata);
ieee80211_wake_queues_by_reason(hw,
@@ -1182,3 +1228,133 @@ int ieee80211_reconfig(struct ieee80211_local *local)
return 0;
}
+static int check_mgd_smps(struct ieee80211_if_managed *ifmgd,
+ enum ieee80211_smps_mode *smps_mode)
+{
+ if (ifmgd->associated) {
+ *smps_mode = ifmgd->ap_smps;
+
+ if (*smps_mode == IEEE80211_SMPS_AUTOMATIC) {
+ if (ifmgd->powersave)
+ *smps_mode = IEEE80211_SMPS_DYNAMIC;
+ else
+ *smps_mode = IEEE80211_SMPS_OFF;
+ }
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/* must hold iflist_mtx */
+void ieee80211_recalc_smps(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *forsdata)
+{
+ struct ieee80211_sub_if_data *sdata;
+ enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_OFF;
+ int count = 0;
+
+ if (forsdata)
+ WARN_ON(!mutex_is_locked(&forsdata->u.mgd.mtx));
+
+ WARN_ON(!mutex_is_locked(&local->iflist_mtx));
+
+ /*
+ * This function could be improved to handle multiple
+ * interfaces better, but right now it makes any
+ * non-station interfaces force SM PS to be turned
+ * off. If there are multiple station interfaces it
+ * could also use the best possible mode, e.g. if
+ * one is in static and the other in dynamic then
+ * dynamic is ok.
+ */
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!netif_running(sdata->dev))
+ continue;
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ goto set;
+ if (sdata != forsdata) {
+ /*
+ * This nested is ok -- we are holding the iflist_mtx
+ * so can't get here twice or so. But it's required
+ * since normally we acquire it first and then the
+ * iflist_mtx.
+ */
+ mutex_lock_nested(&sdata->u.mgd.mtx, SINGLE_DEPTH_NESTING);
+ count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
+ mutex_unlock(&sdata->u.mgd.mtx);
+ } else
+ count += check_mgd_smps(&sdata->u.mgd, &smps_mode);
+
+ if (count > 1) {
+ smps_mode = IEEE80211_SMPS_OFF;
+ break;
+ }
+ }
+
+ if (smps_mode == local->smps_mode)
+ return;
+
+ set:
+ local->smps_mode = smps_mode;
+ /* changed flag is auto-detected for this */
+ ieee80211_hw_config(local, 0);
+}
+
+static bool ieee80211_id_in_list(const u8 *ids, int n_ids, u8 id)
+{
+ int i;
+
+ for (i = 0; i < n_ids; i++)
+ if (ids[i] == id)
+ return true;
+ return false;
+}
+
+/**
+ * ieee80211_ie_split - split an IE buffer according to ordering
+ *
+ * @ies: the IE buffer
+ * @ielen: the length of the IE buffer
+ * @ids: an array with element IDs that are allowed before
+ * the split
+ * @n_ids: the size of the element ID array
+ * @offset: offset where to start splitting in the buffer
+ *
+ * This function splits an IE buffer by updating the @offset
+ * variable to point to the location where the buffer should be
+ * split.
+ *
+ * It assumes that the given IE buffer is well-formed, this
+ * has to be guaranteed by the caller!
+ *
+ * It also assumes that the IEs in the buffer are ordered
+ * correctly, if not the result of using this function will not
+ * be ordered correctly either, i.e. it does no reordering.
+ *
+ * The function returns the offset where the next part of the
+ * buffer starts, which may be @ielen if the entire (remainder)
+ * of the buffer should be used.
+ */
+size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
+ const u8 *ids, int n_ids, size_t offset)
+{
+ size_t pos = offset;
+
+ while (pos < ielen && ieee80211_id_in_list(ids, n_ids, ies[pos]))
+ pos += 2 + ies[pos + 1];
+
+ return pos;
+}
+
+size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset)
+{
+ size_t pos = offset;
+
+ while (pos < ielen && ies[pos] != WLAN_EID_VENDOR_SPECIFIC)
+ pos += 2 + ies[pos + 1];
+
+ return pos;
+}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index b19b7696f3a2..34e6d02da779 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -44,22 +44,69 @@ static int wme_downgrade_ac(struct sk_buff *skb)
}
-/* Indicate which queue to use. */
-static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
+/* Indicate which queue to use. */
+u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta = NULL;
+ u32 sta_flags = 0;
+ const u8 *ra = NULL;
+ bool qos = false;
- if (!ieee80211_is_data(hdr->frame_control)) {
- /* management frames go on AC_VO queue, but are sent
- * without QoS control fields */
- return 0;
+ if (local->hw.queues < 4 || skb->len < 6) {
+ skb->priority = 0; /* required for correct WPA/11i MIC */
+ return min_t(u16, local->hw.queues - 1,
+ ieee802_1d_to_ac[skb->priority]);
+ }
+
+ rcu_read_lock();
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ rcu_read_lock();
+ sta = rcu_dereference(sdata->u.vlan.sta);
+ if (sta)
+ sta_flags = get_sta_flags(sta);
+ rcu_read_unlock();
+ if (sta)
+ break;
+ case NL80211_IFTYPE_AP:
+ ra = skb->data;
+ break;
+ case NL80211_IFTYPE_WDS:
+ ra = sdata->u.wds.remote_addr;
+ break;
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+ /*
+ * XXX: This is clearly broken ... but already was before,
+ * because ieee80211_fill_mesh_addresses() would clear A1
+ * except for multicast addresses.
+ */
+ break;
+#endif
+ case NL80211_IFTYPE_STATION:
+ ra = sdata->u.mgd.bssid;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ra = skb->data;
+ break;
+ default:
+ break;
}
- if (0 /* injected */) {
- /* use AC from radiotap */
+ if (!sta && ra && !is_multicast_ether_addr(ra)) {
+ sta = sta_info_get(sdata, ra);
+ if (sta)
+ sta_flags = get_sta_flags(sta);
}
- if (!ieee80211_is_data_qos(hdr->frame_control)) {
+ if (sta_flags & WLAN_STA_WME)
+ qos = true;
+
+ rcu_read_unlock();
+
+ if (!qos) {
skb->priority = 0; /* required for correct WPA/11i MIC */
return ieee802_1d_to_ac[skb->priority];
}
@@ -68,6 +115,12 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
* data frame has */
skb->priority = cfg80211_classify8021d(skb);
+ return ieee80211_downgrade_queue(local, skb);
+}
+
+u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
+ struct sk_buff *skb)
+{
/* in case we are a client verify acm is not set for this ac */
while (unlikely(local->wmm_acm & BIT(skb->priority))) {
if (wme_downgrade_ac(skb)) {
@@ -85,24 +138,17 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
return ieee802_1d_to_ac[skb->priority];
}
-void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb)
+void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- u16 queue;
- u8 tid;
-
- queue = classify80211(local, skb);
- if (unlikely(queue >= local->hw.queues))
- queue = local->hw.queues - 1;
-
- /*
- * Now we know the 1d priority, fill in the QoS header if
- * there is one (and we haven't done this before).
- */
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+
+ /* Fill in the QoS header if there is one. */
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *p = ieee80211_get_qos_ctl(hdr);
- u8 ack_policy = 0;
+ u8 ack_policy = 0, tid;
+
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+
if (unlikely(local->wifi_wme_noack_test))
ack_policy |= QOS_CONTROL_ACK_POLICY_NOACK <<
QOS_CONTROL_ACK_POLICY_SHIFT;
@@ -110,6 +156,4 @@ void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb)
*p++ = ack_policy | tid;
*p = 0;
}
-
- skb_set_queue_mapping(skb, queue);
}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index d4fd87ca5118..6053b1c9feee 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -20,7 +20,11 @@
extern const int ieee802_1d_to_ac[8];
-void ieee80211_select_queue(struct ieee80211_local *local,
- struct sk_buff *skb);
+u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
+void ieee80211_set_qos_hdr(struct ieee80211_local *local, struct sk_buff *skb);
+u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
+ struct sk_buff *skb);
+
#endif /* _WME_H */
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
new file mode 100644
index 000000000000..81bd5d592bb4
--- /dev/null
+++ b/net/mac80211/work.c
@@ -0,0 +1,1098 @@
+/*
+ * mac80211 work implementation
+ *
+ * Copyright 2003-2008, Jouni Malinen <j@w1.fi>
+ * Copyright 2004, Instant802 Networks, Inc.
+ * Copyright 2005, Devicescape Software, Inc.
+ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
+ * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
+ * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+
+#include "ieee80211_i.h"
+#include "rate.h"
+
+#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
+#define IEEE80211_AUTH_MAX_TRIES 3
+#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
+#define IEEE80211_ASSOC_MAX_TRIES 3
+#define IEEE80211_MAX_PROBE_TRIES 5
+
+enum work_action {
+ WORK_ACT_NONE,
+ WORK_ACT_TIMEOUT,
+ WORK_ACT_DONE,
+};
+
+
+/* utils */
+static inline void ASSERT_WORK_MTX(struct ieee80211_local *local)
+{
+ WARN_ON(!mutex_is_locked(&local->work_mtx));
+}
+
+/*
+ * We can have multiple work items (and connection probing)
+ * scheduling this timer, but we need to take care to only
+ * reschedule it when it should fire _earlier_ than it was
+ * asked for before, or if it's not pending right now. This
+ * function ensures that. Note that it then is required to
+ * run this function for all timeouts after the first one
+ * has happened -- the work that runs from this timer will
+ * do that.
+ */
+static void run_again(struct ieee80211_local *local,
+ unsigned long timeout)
+{
+ ASSERT_WORK_MTX(local);
+
+ if (!timer_pending(&local->work_timer) ||
+ time_before(timeout, local->work_timer.expires))
+ mod_timer(&local->work_timer, timeout);
+}
+
+static void work_free_rcu(struct rcu_head *head)
+{
+ struct ieee80211_work *wk =
+ container_of(head, struct ieee80211_work, rcu_head);
+
+ kfree(wk);
+}
+
+void free_work(struct ieee80211_work *wk)
+{
+ call_rcu(&wk->rcu_head, work_free_rcu);
+}
+
+static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
+ struct ieee80211_supported_band *sband,
+ u32 *rates)
+{
+ int i, j, count;
+ *rates = 0;
+ count = 0;
+ for (i = 0; i < supp_rates_len; i++) {
+ int rate = (supp_rates[i] & 0x7F) * 5;
+
+ for (j = 0; j < sband->n_bitrates; j++)
+ if (sband->bitrates[j].bitrate == rate) {
+ *rates |= BIT(j);
+ count++;
+ break;
+ }
+ }
+
+ return count;
+}
+
+/* frame sending functions */
+
+static void ieee80211_add_ht_ie(struct sk_buff *skb, const u8 *ht_info_ie,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_channel *channel,
+ enum ieee80211_smps_mode smps)
+{
+ struct ieee80211_ht_info *ht_info;
+ u8 *pos;
+ u32 flags = channel->flags;
+ u16 cap = sband->ht_cap.cap;
+ __le16 tmp;
+
+ if (!sband->ht_cap.ht_supported)
+ return;
+
+ if (!ht_info_ie)
+ return;
+
+ if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info))
+ return;
+
+ ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2);
+
+ /* determine capability flags */
+
+ if (ieee80211_disable_40mhz_24ghz &&
+ sband->band == IEEE80211_BAND_2GHZ) {
+ cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
+
+ switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
+ cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ if (flags & IEEE80211_CHAN_NO_HT40MINUS) {
+ cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ cap &= ~IEEE80211_HT_CAP_SGI_40;
+ }
+ break;
+ }
+
+ /* set SM PS mode properly */
+ cap &= ~IEEE80211_HT_CAP_SM_PS;
+ switch (smps) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_NUM_MODES:
+ WARN_ON(1);
+ case IEEE80211_SMPS_OFF:
+ cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ cap |= WLAN_HT_CAP_SM_PS_STATIC <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ cap |= WLAN_HT_CAP_SM_PS_DYNAMIC <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
+ break;
+ }
+
+ /* reserve and fill IE */
+
+ pos = skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ memset(pos, 0, sizeof(struct ieee80211_ht_cap));
+
+ /* capability flags */
+ tmp = cpu_to_le16(cap);
+ memcpy(pos, &tmp, sizeof(u16));
+ pos += sizeof(u16);
+
+ /* AMPDU parameters */
+ *pos++ = sband->ht_cap.ampdu_factor |
+ (sband->ht_cap.ampdu_density <<
+ IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT);
+
+ /* MCS set */
+ memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
+ pos += sizeof(sband->ht_cap.mcs);
+
+ /* extended capabilities */
+ pos += sizeof(__le16);
+
+ /* BF capabilities */
+ pos += sizeof(__le32);
+
+ /* antenna selection */
+ pos += sizeof(u8);
+}
+
+static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_work *wk)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff *skb;
+ struct ieee80211_mgmt *mgmt;
+ u8 *pos, qos_info;
+ const u8 *ies;
+ size_t offset = 0, noffset;
+ int i, len, count, rates_len, supp_rates_len;
+ u16 capab;
+ struct ieee80211_supported_band *sband;
+ u32 rates = 0;
+
+ sband = local->hw.wiphy->bands[wk->chan->band];
+
+ /*
+ * Get all rates supported by the device and the AP as
+ * some APs don't like getting a superset of their rates
+ * in the association request (e.g. D-Link DAP 1353 in
+ * b-only mode)...
+ */
+ rates_len = ieee80211_compatible_rates(wk->assoc.supp_rates,
+ wk->assoc.supp_rates_len,
+ sband, &rates);
+
+ skb = alloc_skb(local->hw.extra_tx_headroom +
+ sizeof(*mgmt) + /* bit too much but doesn't matter */
+ 2 + wk->assoc.ssid_len + /* SSID */
+ 4 + rates_len + /* (extended) rates */
+ 4 + /* power capability */
+ 2 + 2 * sband->n_channels + /* supported channels */
+ 2 + sizeof(struct ieee80211_ht_cap) + /* HT */
+ wk->ie_len + /* extra IEs */
+ 9, /* WMM */
+ GFP_KERNEL);
+ if (!skb) {
+ printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
+ "frame\n", sdata->name);
+ return;
+ }
+ skb_reserve(skb, local->hw.extra_tx_headroom);
+
+ capab = WLAN_CAPABILITY_ESS;
+
+ if (sband->band == IEEE80211_BAND_2GHZ) {
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_SLOT_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_SLOT_TIME;
+ if (!(local->hw.flags & IEEE80211_HW_2GHZ_SHORT_PREAMBLE_INCAPABLE))
+ capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+ }
+
+ if (wk->assoc.capability & WLAN_CAPABILITY_PRIVACY)
+ capab |= WLAN_CAPABILITY_PRIVACY;
+
+ if ((wk->assoc.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
+ (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
+ capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
+
+ mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
+ memset(mgmt, 0, 24);
+ memcpy(mgmt->da, wk->filter_ta, ETH_ALEN);
+ memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+ memcpy(mgmt->bssid, wk->filter_ta, ETH_ALEN);
+
+ if (!is_zero_ether_addr(wk->assoc.prev_bssid)) {
+ skb_put(skb, 10);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_REASSOC_REQ);
+ mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
+ mgmt->u.reassoc_req.listen_interval =
+ cpu_to_le16(local->hw.conf.listen_interval);
+ memcpy(mgmt->u.reassoc_req.current_ap, wk->assoc.prev_bssid,
+ ETH_ALEN);
+ } else {
+ skb_put(skb, 4);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ASSOC_REQ);
+ mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
+ mgmt->u.assoc_req.listen_interval =
+ cpu_to_le16(local->hw.conf.listen_interval);
+ }
+
+ /* SSID */
+ ies = pos = skb_put(skb, 2 + wk->assoc.ssid_len);
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = wk->assoc.ssid_len;
+ memcpy(pos, wk->assoc.ssid, wk->assoc.ssid_len);
+
+ /* add all rates which were marked to be used above */
+ supp_rates_len = rates_len;
+ if (supp_rates_len > 8)
+ supp_rates_len = 8;
+
+ len = sband->n_bitrates;
+ pos = skb_put(skb, supp_rates_len + 2);
+ *pos++ = WLAN_EID_SUPP_RATES;
+ *pos++ = supp_rates_len;
+
+ count = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (BIT(i) & rates) {
+ int rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ if (++count == 8)
+ break;
+ }
+ }
+
+ if (rates_len > count) {
+ pos = skb_put(skb, rates_len - count + 2);
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = rates_len - count;
+
+ for (i++; i < sband->n_bitrates; i++) {
+ if (BIT(i) & rates) {
+ int rate = sband->bitrates[i].bitrate;
+ *pos++ = (u8) (rate / 5);
+ }
+ }
+ }
+
+ if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT) {
+ /* 1. power capabilities */
+ pos = skb_put(skb, 4);
+ *pos++ = WLAN_EID_PWR_CAPABILITY;
+ *pos++ = 2;
+ *pos++ = 0; /* min tx power */
+ *pos++ = wk->chan->max_power; /* max tx power */
+
+ /* 2. supported channels */
+ /* TODO: get this in reg domain format */
+ pos = skb_put(skb, 2 * sband->n_channels + 2);
+ *pos++ = WLAN_EID_SUPPORTED_CHANNELS;
+ *pos++ = 2 * sband->n_channels;
+ for (i = 0; i < sband->n_channels; i++) {
+ *pos++ = ieee80211_frequency_to_channel(
+ sband->channels[i].center_freq);
+ *pos++ = 1; /* one channel in the subband*/
+ }
+ }
+
+ /* if present, add any custom IEs that go before HT */
+ if (wk->ie_len && wk->ie) {
+ static const u8 before_ht[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_EXT_SUPP_RATES,
+ WLAN_EID_PWR_CAPABILITY,
+ WLAN_EID_SUPPORTED_CHANNELS,
+ WLAN_EID_RSN,
+ WLAN_EID_QOS_CAPA,
+ WLAN_EID_RRM_ENABLED_CAPABILITIES,
+ WLAN_EID_MOBILITY_DOMAIN,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ };
+ noffset = ieee80211_ie_split(wk->ie, wk->ie_len,
+ before_ht, ARRAY_SIZE(before_ht),
+ offset);
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, wk->ie + offset, noffset - offset);
+ offset = noffset;
+ }
+
+ if (wk->assoc.use_11n && wk->assoc.wmm_used &&
+ local->hw.queues >= 4)
+ ieee80211_add_ht_ie(skb, wk->assoc.ht_information_ie,
+ sband, wk->chan, wk->assoc.smps);
+
+ /* if present, add any custom non-vendor IEs that go after HT */
+ if (wk->ie_len && wk->ie) {
+ noffset = ieee80211_ie_split_vendor(wk->ie, wk->ie_len,
+ offset);
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, wk->ie + offset, noffset - offset);
+ offset = noffset;
+ }
+
+ if (wk->assoc.wmm_used && local->hw.queues >= 4) {
+ if (wk->assoc.uapsd_used) {
+ qos_info = local->uapsd_queues;
+ qos_info |= (local->uapsd_max_sp_len <<
+ IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
+ } else {
+ qos_info = 0;
+ }
+
+ pos = skb_put(skb, 9);
+ *pos++ = WLAN_EID_VENDOR_SPECIFIC;
+ *pos++ = 7; /* len */
+ *pos++ = 0x00; /* Microsoft OUI 00:50:F2 */
+ *pos++ = 0x50;
+ *pos++ = 0xf2;
+ *pos++ = 2; /* WME */
+ *pos++ = 0; /* WME info */
+ *pos++ = 1; /* WME ver */
+ *pos++ = qos_info;
+ }
+
+ /* add any remaining custom (i.e. vendor specific here) IEs */
+ if (wk->ie_len && wk->ie) {
+ noffset = wk->ie_len;
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, wk->ie + offset, noffset - offset);
+ }
+
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ ieee80211_tx_skb(sdata, skb);
+}
+
+static void ieee80211_remove_auth_bss(struct ieee80211_local *local,
+ struct ieee80211_work *wk)
+{
+ struct cfg80211_bss *cbss;
+ u16 capa_val = WLAN_CAPABILITY_ESS;
+
+ if (wk->probe_auth.privacy)
+ capa_val |= WLAN_CAPABILITY_PRIVACY;
+
+ cbss = cfg80211_get_bss(local->hw.wiphy, wk->chan, wk->filter_ta,
+ wk->probe_auth.ssid, wk->probe_auth.ssid_len,
+ WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
+ capa_val);
+ if (!cbss)
+ return;
+
+ cfg80211_unlink_bss(local->hw.wiphy, cbss);
+ cfg80211_put_bss(cbss);
+}
+
+static enum work_action __must_check
+ieee80211_direct_probe(struct ieee80211_work *wk)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ struct ieee80211_local *local = sdata->local;
+
+ wk->probe_auth.tries++;
+ if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
+ printk(KERN_DEBUG "%s: direct probe to %pM timed out\n",
+ sdata->name, wk->filter_ta);
+
+ /*
+ * Most likely AP is not in the range so remove the
+ * bss struct for that AP.
+ */
+ ieee80211_remove_auth_bss(local, wk);
+
+ return WORK_ACT_TIMEOUT;
+ }
+
+ printk(KERN_DEBUG "%s: direct probe to %pM (try %d)\n",
+ sdata->name, wk->filter_ta, wk->probe_auth.tries);
+
+ /*
+ * Direct probe is sent to broadcast address as some APs
+ * will not answer to direct packet in unassociated state.
+ */
+ ieee80211_send_probe_req(sdata, NULL, wk->probe_auth.ssid,
+ wk->probe_auth.ssid_len, NULL, 0);
+
+ wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
+ run_again(local, wk->timeout);
+
+ return WORK_ACT_NONE;
+}
+
+
+static enum work_action __must_check
+ieee80211_authenticate(struct ieee80211_work *wk)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ struct ieee80211_local *local = sdata->local;
+
+ wk->probe_auth.tries++;
+ if (wk->probe_auth.tries > IEEE80211_AUTH_MAX_TRIES) {
+ printk(KERN_DEBUG "%s: authentication with %pM"
+ " timed out\n", sdata->name, wk->filter_ta);
+
+ /*
+ * Most likely AP is not in the range so remove the
+ * bss struct for that AP.
+ */
+ ieee80211_remove_auth_bss(local, wk);
+
+ return WORK_ACT_TIMEOUT;
+ }
+
+ printk(KERN_DEBUG "%s: authenticate with %pM (try %d)\n",
+ sdata->name, wk->filter_ta, wk->probe_auth.tries);
+
+ ieee80211_send_auth(sdata, 1, wk->probe_auth.algorithm, wk->ie,
+ wk->ie_len, wk->filter_ta, NULL, 0, 0);
+ wk->probe_auth.transaction = 2;
+
+ wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
+ run_again(local, wk->timeout);
+
+ return WORK_ACT_NONE;
+}
+
+static enum work_action __must_check
+ieee80211_associate(struct ieee80211_work *wk)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ struct ieee80211_local *local = sdata->local;
+
+ wk->assoc.tries++;
+ if (wk->assoc.tries > IEEE80211_ASSOC_MAX_TRIES) {
+ printk(KERN_DEBUG "%s: association with %pM"
+ " timed out\n",
+ sdata->name, wk->filter_ta);
+
+ /*
+ * Most likely AP is not in the range so remove the
+ * bss struct for that AP.
+ */
+ if (wk->assoc.bss)
+ cfg80211_unlink_bss(local->hw.wiphy, wk->assoc.bss);
+
+ return WORK_ACT_TIMEOUT;
+ }
+
+ printk(KERN_DEBUG "%s: associate with %pM (try %d)\n",
+ sdata->name, wk->filter_ta, wk->assoc.tries);
+ ieee80211_send_assoc(sdata, wk);
+
+ wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
+ run_again(local, wk->timeout);
+
+ return WORK_ACT_NONE;
+}
+
+static enum work_action __must_check
+ieee80211_remain_on_channel_timeout(struct ieee80211_work *wk)
+{
+ /*
+ * First time we run, do nothing -- the generic code will
+ * have switched to the right channel etc.
+ */
+ if (!wk->remain.started) {
+ wk->remain.started = true;
+ wk->timeout = jiffies + msecs_to_jiffies(wk->remain.duration);
+
+ cfg80211_ready_on_channel(wk->sdata->dev, (unsigned long) wk,
+ wk->chan, wk->chan_type,
+ wk->remain.duration, GFP_KERNEL);
+
+ return WORK_ACT_NONE;
+ }
+
+ return WORK_ACT_TIMEOUT;
+}
+
+static void ieee80211_auth_challenge(struct ieee80211_work *wk,
+ struct ieee80211_mgmt *mgmt,
+ size_t len)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ u8 *pos;
+ struct ieee802_11_elems elems;
+
+ pos = mgmt->u.auth.variable;
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
+ if (!elems.challenge)
+ return;
+ ieee80211_send_auth(sdata, 3, wk->probe_auth.algorithm,
+ elems.challenge - 2, elems.challenge_len + 2,
+ wk->filter_ta, wk->probe_auth.key,
+ wk->probe_auth.key_len, wk->probe_auth.key_idx);
+ wk->probe_auth.transaction = 4;
+}
+
+static enum work_action __must_check
+ieee80211_rx_mgmt_auth(struct ieee80211_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len)
+{
+ u16 auth_alg, auth_transaction, status_code;
+
+ if (wk->type != IEEE80211_WORK_AUTH)
+ return WORK_ACT_NONE;
+
+ if (len < 24 + 6)
+ return WORK_ACT_NONE;
+
+ auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
+ auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
+ status_code = le16_to_cpu(mgmt->u.auth.status_code);
+
+ if (auth_alg != wk->probe_auth.algorithm ||
+ auth_transaction != wk->probe_auth.transaction)
+ return WORK_ACT_NONE;
+
+ if (status_code != WLAN_STATUS_SUCCESS) {
+ printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
+ wk->sdata->name, mgmt->sa, status_code);
+ return WORK_ACT_DONE;
+ }
+
+ switch (wk->probe_auth.algorithm) {
+ case WLAN_AUTH_OPEN:
+ case WLAN_AUTH_LEAP:
+ case WLAN_AUTH_FT:
+ break;
+ case WLAN_AUTH_SHARED_KEY:
+ if (wk->probe_auth.transaction != 4) {
+ ieee80211_auth_challenge(wk, mgmt, len);
+ /* need another frame */
+ return WORK_ACT_NONE;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ return WORK_ACT_NONE;
+ }
+
+ printk(KERN_DEBUG "%s: authenticated\n", wk->sdata->name);
+ return WORK_ACT_DONE;
+}
+
+static enum work_action __must_check
+ieee80211_rx_mgmt_assoc_resp(struct ieee80211_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ bool reassoc)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ struct ieee80211_local *local = sdata->local;
+ u16 capab_info, status_code, aid;
+ struct ieee802_11_elems elems;
+ u8 *pos;
+
+ /*
+ * AssocResp and ReassocResp have identical structure, so process both
+ * of them in this function.
+ */
+
+ if (len < 24 + 6)
+ return WORK_ACT_NONE;
+
+ capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
+ status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+ aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
+
+ printk(KERN_DEBUG "%s: RX %sssocResp from %pM (capab=0x%x "
+ "status=%d aid=%d)\n",
+ sdata->name, reassoc ? "Rea" : "A", mgmt->sa,
+ capab_info, status_code, (u16)(aid & ~(BIT(15) | BIT(14))));
+
+ pos = mgmt->u.assoc_resp.variable;
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
+
+ if (status_code == WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY &&
+ elems.timeout_int && elems.timeout_int_len == 5 &&
+ elems.timeout_int[0] == WLAN_TIMEOUT_ASSOC_COMEBACK) {
+ u32 tu, ms;
+ tu = get_unaligned_le32(elems.timeout_int + 1);
+ ms = tu * 1024 / 1000;
+ printk(KERN_DEBUG "%s: %pM rejected association temporarily; "
+ "comeback duration %u TU (%u ms)\n",
+ sdata->name, mgmt->sa, tu, ms);
+ wk->timeout = jiffies + msecs_to_jiffies(ms);
+ if (ms > IEEE80211_ASSOC_TIMEOUT)
+ run_again(local, wk->timeout);
+ return WORK_ACT_NONE;
+ }
+
+ if (status_code != WLAN_STATUS_SUCCESS)
+ printk(KERN_DEBUG "%s: %pM denied association (code=%d)\n",
+ sdata->name, mgmt->sa, status_code);
+ else
+ printk(KERN_DEBUG "%s: associated\n", sdata->name);
+
+ return WORK_ACT_DONE;
+}
+
+static enum work_action __must_check
+ieee80211_rx_mgmt_probe_resp(struct ieee80211_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_sub_if_data *sdata = wk->sdata;
+ struct ieee80211_local *local = sdata->local;
+ size_t baselen;
+
+ ASSERT_WORK_MTX(local);
+
+ baselen = (u8 *) mgmt->u.probe_resp.variable - (u8 *) mgmt;
+ if (baselen > len)
+ return WORK_ACT_NONE;
+
+ printk(KERN_DEBUG "%s: direct probe responded\n", sdata->name);
+ return WORK_ACT_DONE;
+}
+
+static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status;
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_work *wk;
+ enum work_action rma = WORK_ACT_NONE;
+ u16 fc;
+
+ rx_status = (struct ieee80211_rx_status *) skb->cb;
+ mgmt = (struct ieee80211_mgmt *) skb->data;
+ fc = le16_to_cpu(mgmt->frame_control);
+
+ mutex_lock(&local->work_mtx);
+
+ list_for_each_entry(wk, &local->work_list, list) {
+ const u8 *bssid = NULL;
+
+ switch (wk->type) {
+ case IEEE80211_WORK_DIRECT_PROBE:
+ case IEEE80211_WORK_AUTH:
+ case IEEE80211_WORK_ASSOC:
+ bssid = wk->filter_ta;
+ break;
+ default:
+ continue;
+ }
+
+ /*
+ * Before queuing, we already verified mgmt->sa,
+ * so this is needed just for matching.
+ */
+ if (compare_ether_addr(bssid, mgmt->bssid))
+ continue;
+
+ switch (fc & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_PROBE_RESP:
+ rma = ieee80211_rx_mgmt_probe_resp(wk, mgmt, skb->len,
+ rx_status);
+ break;
+ case IEEE80211_STYPE_AUTH:
+ rma = ieee80211_rx_mgmt_auth(wk, mgmt, skb->len);
+ break;
+ case IEEE80211_STYPE_ASSOC_RESP:
+ rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
+ skb->len, false);
+ break;
+ case IEEE80211_STYPE_REASSOC_RESP:
+ rma = ieee80211_rx_mgmt_assoc_resp(wk, mgmt,
+ skb->len, true);
+ break;
+ default:
+ WARN_ON(1);
+ }
+ /*
+ * We've processed this frame for that work, so it can't
+ * belong to another work struct.
+ * NB: this is also required for correctness for 'rma'!
+ */
+ break;
+ }
+
+ switch (rma) {
+ case WORK_ACT_NONE:
+ break;
+ case WORK_ACT_DONE:
+ list_del_rcu(&wk->list);
+ break;
+ default:
+ WARN(1, "unexpected: %d", rma);
+ }
+
+ mutex_unlock(&local->work_mtx);
+
+ if (rma != WORK_ACT_DONE)
+ goto out;
+
+ switch (wk->done(wk, skb)) {
+ case WORK_DONE_DESTROY:
+ free_work(wk);
+ break;
+ case WORK_DONE_REQUEUE:
+ synchronize_rcu();
+ wk->started = false; /* restart */
+ mutex_lock(&local->work_mtx);
+ list_add_tail(&wk->list, &local->work_list);
+ mutex_unlock(&local->work_mtx);
+ }
+
+ out:
+ kfree_skb(skb);
+}
+
+static void ieee80211_work_timer(unsigned long data)
+{
+ struct ieee80211_local *local = (void *) data;
+
+ if (local->quiescing)
+ return;
+
+ ieee80211_queue_work(&local->hw, &local->work_work);
+}
+
+static void ieee80211_work_work(struct work_struct *work)
+{
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, work_work);
+ struct sk_buff *skb;
+ struct ieee80211_work *wk, *tmp;
+ LIST_HEAD(free_work);
+ enum work_action rma;
+ bool remain_off_channel = false;
+
+ if (local->scanning)
+ return;
+
+ /*
+ * ieee80211_queue_work() should have picked up most cases,
+ * here we'll pick the the rest.
+ */
+ if (WARN(local->suspended, "work scheduled while going to suspend\n"))
+ return;
+
+ /* first process frames to avoid timing out while a frame is pending */
+ while ((skb = skb_dequeue(&local->work_skb_queue)))
+ ieee80211_work_rx_queued_mgmt(local, skb);
+
+ ieee80211_recalc_idle(local);
+
+ mutex_lock(&local->work_mtx);
+
+ list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
+ /* mark work as started if it's on the current off-channel */
+ if (!wk->started && local->tmp_channel &&
+ wk->chan == local->tmp_channel &&
+ wk->chan_type == local->tmp_channel_type) {
+ wk->started = true;
+ wk->timeout = jiffies;
+ }
+
+ if (!wk->started && !local->tmp_channel) {
+ /*
+ * TODO: could optimize this by leaving the
+ * station vifs in awake mode if they
+ * happen to be on the same channel as
+ * the requested channel
+ */
+ ieee80211_offchannel_stop_beaconing(local);
+ ieee80211_offchannel_stop_station(local);
+
+ local->tmp_channel = wk->chan;
+ local->tmp_channel_type = wk->chan_type;
+ ieee80211_hw_config(local, 0);
+ wk->started = true;
+ wk->timeout = jiffies;
+ }
+
+ /* don't try to work with items that aren't started */
+ if (!wk->started)
+ continue;
+
+ if (time_is_after_jiffies(wk->timeout)) {
+ /*
+ * This work item isn't supposed to be worked on
+ * right now, but take care to adjust the timer
+ * properly.
+ */
+ run_again(local, wk->timeout);
+ continue;
+ }
+
+ switch (wk->type) {
+ default:
+ WARN_ON(1);
+ /* nothing */
+ rma = WORK_ACT_NONE;
+ break;
+ case IEEE80211_WORK_ABORT:
+ rma = WORK_ACT_TIMEOUT;
+ case IEEE80211_WORK_DIRECT_PROBE:
+ rma = ieee80211_direct_probe(wk);
+ break;
+ case IEEE80211_WORK_AUTH:
+ rma = ieee80211_authenticate(wk);
+ break;
+ case IEEE80211_WORK_ASSOC:
+ rma = ieee80211_associate(wk);
+ break;
+ case IEEE80211_WORK_REMAIN_ON_CHANNEL:
+ rma = ieee80211_remain_on_channel_timeout(wk);
+ break;
+ }
+
+ switch (rma) {
+ case WORK_ACT_NONE:
+ /* might have changed the timeout */
+ run_again(local, wk->timeout);
+ break;
+ case WORK_ACT_TIMEOUT:
+ list_del_rcu(&wk->list);
+ synchronize_rcu();
+ list_add(&wk->list, &free_work);
+ break;
+ default:
+ WARN(1, "unexpected: %d", rma);
+ }
+ }
+
+ list_for_each_entry(wk, &local->work_list, list) {
+ if (!wk->started)
+ continue;
+ if (wk->chan != local->tmp_channel)
+ continue;
+ if (wk->chan_type != local->tmp_channel_type)
+ continue;
+ remain_off_channel = true;
+ }
+
+ if (!remain_off_channel && local->tmp_channel) {
+ local->tmp_channel = NULL;
+ ieee80211_hw_config(local, 0);
+ ieee80211_offchannel_return(local, true);
+ /* give connection some time to breathe */
+ run_again(local, jiffies + HZ/2);
+ }
+
+ if (list_empty(&local->work_list) && local->scan_req)
+ ieee80211_queue_delayed_work(&local->hw,
+ &local->scan_work,
+ round_jiffies_relative(0));
+
+ mutex_unlock(&local->work_mtx);
+
+ ieee80211_recalc_idle(local);
+
+ list_for_each_entry_safe(wk, tmp, &free_work, list) {
+ wk->done(wk, NULL);
+ list_del(&wk->list);
+ kfree(wk);
+ }
+}
+
+void ieee80211_add_work(struct ieee80211_work *wk)
+{
+ struct ieee80211_local *local;
+
+ if (WARN_ON(!wk->chan))
+ return;
+
+ if (WARN_ON(!wk->sdata))
+ return;
+
+ if (WARN_ON(!wk->done))
+ return;
+
+ if (WARN_ON(!ieee80211_sdata_running(wk->sdata)))
+ return;
+
+ wk->started = false;
+
+ local = wk->sdata->local;
+ mutex_lock(&local->work_mtx);
+ list_add_tail(&wk->list, &local->work_list);
+ mutex_unlock(&local->work_mtx);
+
+ ieee80211_queue_work(&local->hw, &local->work_work);
+}
+
+void ieee80211_work_init(struct ieee80211_local *local)
+{
+ mutex_init(&local->work_mtx);
+ INIT_LIST_HEAD(&local->work_list);
+ setup_timer(&local->work_timer, ieee80211_work_timer,
+ (unsigned long)local);
+ INIT_WORK(&local->work_work, ieee80211_work_work);
+ skb_queue_head_init(&local->work_skb_queue);
+}
+
+void ieee80211_work_purge(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_work *wk;
+
+ mutex_lock(&local->work_mtx);
+ list_for_each_entry(wk, &local->work_list, list) {
+ if (wk->sdata != sdata)
+ continue;
+ wk->type = IEEE80211_WORK_ABORT;
+ wk->started = true;
+ wk->timeout = jiffies;
+ }
+ mutex_unlock(&local->work_mtx);
+
+ /* run cleanups etc. */
+ ieee80211_work_work(&local->work_work);
+
+ mutex_lock(&local->work_mtx);
+ list_for_each_entry(wk, &local->work_list, list) {
+ if (wk->sdata != sdata)
+ continue;
+ WARN_ON(1);
+ break;
+ }
+ mutex_unlock(&local->work_mtx);
+}
+
+ieee80211_rx_result ieee80211_work_rx_mgmt(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_mgmt *mgmt;
+ struct ieee80211_work *wk;
+ u16 fc;
+
+ if (skb->len < 24)
+ return RX_DROP_MONITOR;
+
+ mgmt = (struct ieee80211_mgmt *) skb->data;
+ fc = le16_to_cpu(mgmt->frame_control);
+
+ list_for_each_entry_rcu(wk, &local->work_list, list) {
+ if (sdata != wk->sdata)
+ continue;
+ if (compare_ether_addr(wk->filter_ta, mgmt->sa))
+ continue;
+ if (compare_ether_addr(wk->filter_ta, mgmt->bssid))
+ continue;
+
+ switch (fc & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_AUTH:
+ case IEEE80211_STYPE_PROBE_RESP:
+ case IEEE80211_STYPE_ASSOC_RESP:
+ case IEEE80211_STYPE_REASSOC_RESP:
+ case IEEE80211_STYPE_DEAUTH:
+ case IEEE80211_STYPE_DISASSOC:
+ skb_queue_tail(&local->work_skb_queue, skb);
+ ieee80211_queue_work(&local->hw, &local->work_work);
+ return RX_QUEUED;
+ }
+ }
+
+ return RX_CONTINUE;
+}
+
+static enum work_done_result ieee80211_remain_done(struct ieee80211_work *wk,
+ struct sk_buff *skb)
+{
+ /*
+ * We are done serving the remain-on-channel command.
+ */
+ cfg80211_remain_on_channel_expired(wk->sdata->dev, (unsigned long) wk,
+ wk->chan, wk->chan_type,
+ GFP_KERNEL);
+
+ return WORK_DONE_DESTROY;
+}
+
+int ieee80211_wk_remain_on_channel(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, u64 *cookie)
+{
+ struct ieee80211_work *wk;
+
+ wk = kzalloc(sizeof(*wk), GFP_KERNEL);
+ if (!wk)
+ return -ENOMEM;
+
+ wk->type = IEEE80211_WORK_REMAIN_ON_CHANNEL;
+ wk->chan = chan;
+ wk->chan_type = channel_type;
+ wk->sdata = sdata;
+ wk->done = ieee80211_remain_done;
+
+ wk->remain.duration = duration;
+
+ *cookie = (unsigned long) wk;
+
+ ieee80211_add_work(wk);
+
+ return 0;
+}
+
+int ieee80211_wk_cancel_remain_on_channel(struct ieee80211_sub_if_data *sdata,
+ u64 cookie)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_work *wk, *tmp;
+ bool found = false;
+
+ mutex_lock(&local->work_mtx);
+ list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
+ if ((unsigned long) wk == cookie) {
+ wk->timeout = jiffies;
+ found = true;
+ break;
+ }
+ }
+ mutex_unlock(&local->work_mtx);
+
+ if (!found)
+ return -ENOENT;
+
+ ieee80211_queue_work(&local->hw, &local->work_work);
+
+ return 0;
+}
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 79a698052218..f2d76238b9b5 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -112,7 +112,8 @@ config IP_VS_RR
module, choose M here. If unsure, say N.
config IP_VS_WRR
- tristate "weighted round-robin scheduling"
+ tristate "weighted round-robin scheduling"
+ select GCD
---help---
The weighted robin-robin scheduling algorithm directs network
connections to different real servers based on server weights
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 6bde12da2fe0..c37ac2d7bec4 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2077,6 +2077,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX)
+ return -EINVAL;
+ if (len < 0 || len > MAX_ARG_LEN)
+ return -EINVAL;
if (len != set_arglen[SET_CMDID(cmd)]) {
pr_err("set_ctl: len %u != %u\n",
len, set_arglen[SET_CMDID(cmd)]);
@@ -2352,17 +2356,25 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
{
unsigned char arg[128];
int ret = 0;
+ unsigned int copylen;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
+ if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX)
+ return -EINVAL;
+
if (*len < get_arglen[GET_CMDID(cmd)]) {
pr_err("get_ctl: len %u < %u\n",
*len, get_arglen[GET_CMDID(cmd)]);
return -EINVAL;
}
- if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0)
+ copylen = get_arglen[GET_CMDID(cmd)];
+ if (copylen > 128)
+ return -EINVAL;
+
+ if (copy_from_user(arg, user, copylen) != 0)
return -EFAULT;
if (mutex_lock_interruptible(&__ip_vs_mutex))
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 6182e8ea0be7..3c115fc19784 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/net.h>
+#include <linux/gcd.h>
#include <net/ip_vs.h>
@@ -38,20 +39,6 @@ struct ip_vs_wrr_mark {
};
-/*
- * Get the gcd of server weights
- */
-static int gcd(int a, int b)
-{
- int c;
-
- while ((c = a % b)) {
- a = b;
- b = c;
- }
- return b;
-}
-
static int ip_vs_wrr_gcd_weight(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest;
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index 38ea7ef3ccd2..f0732aa18e4f 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -323,24 +323,24 @@ static void update_nl_seq(struct nf_conn *ct, u32 nl_seq,
struct nf_ct_ftp_master *info, int dir,
struct sk_buff *skb)
{
- unsigned int i, oldest = NUM_SEQ_TO_REMEMBER;
+ unsigned int i, oldest;
/* Look for oldest: if we find exact match, we're done. */
for (i = 0; i < info->seq_aft_nl_num[dir]; i++) {
if (info->seq_aft_nl[dir][i] == nl_seq)
return;
-
- if (oldest == info->seq_aft_nl_num[dir] ||
- before(info->seq_aft_nl[dir][i],
- info->seq_aft_nl[dir][oldest]))
- oldest = i;
}
if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
- } else if (oldest != NUM_SEQ_TO_REMEMBER &&
- after(nl_seq, info->seq_aft_nl[dir][oldest])) {
- info->seq_aft_nl[dir][oldest] = nl_seq;
+ } else {
+ if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1]))
+ oldest = 0;
+ else
+ oldest = 1;
+
+ if (after(nl_seq, info->seq_aft_nl[dir][oldest]))
+ info->seq_aft_nl[dir][oldest] = nl_seq;
}
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e0516a22be2e..f126d18dbdc4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1021,8 +1021,20 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
status = TP_STATUS_SEND_REQUEST;
err = dev_queue_xmit(skb);
- if (unlikely(err > 0 && (err = net_xmit_errno(err)) != 0))
- goto out_xmit;
+ if (unlikely(err > 0)) {
+ err = net_xmit_errno(err);
+ if (err && __packet_get_status(po, ph) ==
+ TP_STATUS_AVAILABLE) {
+ /* skb was destructed already */
+ skb = NULL;
+ goto out_status;
+ }
+ /*
+ * skb was dropped but not destructed yet;
+ * let's treat it like congestion or err < 0
+ */
+ err = 0;
+ }
packet_increment_head(&po->tx_ring);
len_sum += tp_len;
} while (likely((ph != NULL) ||
@@ -1033,9 +1045,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
err = len_sum;
goto out_put;
-out_xmit:
- skb->destructor = sock_wfree;
- atomic_dec(&po->tx_ring.pending);
out_status:
__packet_set_status(po, ph, status);
kfree_skb(skb);
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index 67f072e94d00..387197b579b1 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -75,7 +75,8 @@ static int pn_sendmsg(struct kiocb *iocb, struct sock *sk,
struct sk_buff *skb;
int err;
- if (msg->msg_flags & MSG_OOB)
+ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
+ MSG_CMSG_COMPAT))
return -EOPNOTSUPP;
if (msg->msg_name == NULL)
@@ -119,7 +120,8 @@ static int pn_recvmsg(struct kiocb *iocb, struct sock *sk,
int rval = -EOPNOTSUPP;
int copylen;
- if (flags & MSG_OOB)
+ if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL|
+ MSG_CMSG_COMPAT))
goto out_nofree;
if (addr_len)
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index d183509d3fa6..d01208968c83 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -96,11 +96,11 @@ static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb)
goto drop;
}
- if (likely(skb_headroom(skb) & 3)) {
+ if (skb_headroom(skb) & 3) {
struct sk_buff *rskb, *fs;
int flen = 0;
- /* Phonet Pipe data header is misaligned (3 bytes),
+ /* Phonet Pipe data header may be misaligned (3 bytes),
* so wrap the IP packet as a single fragment of an head-less
* socket buffer. The network stack will pull what it needs,
* but at least, the whole IP payload is not memcpy'd. */
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index b6356f3832f6..360cf377693e 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -354,6 +354,9 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
queue = &pn->ctrlreq_queue;
goto queue;
+ case PNS_PIPE_ALIGNED_DATA:
+ __skb_pull(skb, 1);
+ /* fall through */
case PNS_PIPE_DATA:
__skb_pull(skb, 3); /* Pipe data header */
if (!pn_flow_safe(pn->rx_fc)) {
@@ -441,6 +444,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
struct sockaddr_pn dst;
u16 peer_type;
u8 pipe_handle, enabled, n_sb;
+ u8 aligned = 0;
if (!pskb_pull(skb, sizeof(*hdr) + 4))
return -EINVAL;
@@ -479,6 +483,9 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
return -EINVAL;
peer_type = (peer_type & 0xff00) | data[0];
break;
+ case PN_PIPE_SB_ALIGNED_DATA:
+ aligned = data[0] != 0;
+ break;
}
n_sb--;
}
@@ -510,6 +517,7 @@ static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
newpn->rx_credits = 0;
newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
newpn->init_enable = enabled;
+ newpn->aligned = aligned;
BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
skb_queue_head(&newsk->sk_receive_queue, skb);
@@ -829,11 +837,15 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
return -ENOBUFS;
}
- skb_push(skb, 3);
+ skb_push(skb, 3 + pn->aligned);
skb_reset_transport_header(skb);
ph = pnp_hdr(skb);
ph->utid = 0;
- ph->message_id = PNS_PIPE_DATA;
+ if (pn->aligned) {
+ ph->message_id = PNS_PIPE_ALIGNED_DATA;
+ ph->data[0] = 0; /* padding */
+ } else
+ ph->message_id = PNS_PIPE_DATA;
ph->pipe_handle = pn->pipe_handle;
return pn_skb_send(sk, skb, &pipe_srv);
@@ -848,7 +860,9 @@ static int pep_sendmsg(struct kiocb *iocb, struct sock *sk,
int flags = msg->msg_flags;
int err, done;
- if (msg->msg_flags & MSG_OOB || !(msg->msg_flags & MSG_EOR))
+ if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
+ MSG_CMSG_COMPAT)) ||
+ !(msg->msg_flags & MSG_EOR))
return -EOPNOTSUPP;
skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
@@ -927,6 +941,9 @@ int pep_write(struct sock *sk, struct sk_buff *skb)
struct sk_buff *rskb, *fs;
int flen = 0;
+ if (pep_sk(sk)->aligned)
+ return pipe_skb_send(sk, skb);
+
rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
if (!rskb) {
kfree_skb(skb);
@@ -966,6 +983,10 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
struct sk_buff *skb;
int err;
+ if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
+ MSG_NOSIGNAL|MSG_CMSG_COMPAT))
+ return -EOPNOTSUPP;
+
if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
return -ENOTCONN;
@@ -973,6 +994,8 @@ static int pep_recvmsg(struct kiocb *iocb, struct sock *sk,
/* Dequeue and acknowledge control request */
struct pep_sock *pn = pep_sk(sk);
+ if (flags & MSG_PEEK)
+ return -EOPNOTSUPP;
skb = skb_dequeue(&pn->ctrlreq_queue);
if (skb) {
pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
index 114df6eec8c3..968e8bac1b5d 100644
--- a/net/rose/rose_loopback.c
+++ b/net/rose/rose_loopback.c
@@ -75,7 +75,7 @@ static void rose_loopback_timer(unsigned long param)
lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);
frametype = skb->data[2];
dest = (rose_address *)(skb->data + 4);
- lci_o = 0xFFF - lci_i;
+ lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i;
skb_reset_transport_header(skb);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 89ab66e54740..67fdac9d2d33 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2087,8 +2087,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
/* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */
- if (sp->autoclose > (MAX_SCHEDULE_TIMEOUT / HZ) )
- sp->autoclose = (__u32)(MAX_SCHEDULE_TIMEOUT / HZ) ;
+ sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ);
return 0;
}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3c3c50f38a1c..f7a7f8380e38 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -644,7 +644,22 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
p = gss_fill_context(p, end, ctx, gss_msg->auth->mech);
if (IS_ERR(p)) {
err = PTR_ERR(p);
- gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES;
+ switch (err) {
+ case -EACCES:
+ gss_msg->msg.errno = err;
+ err = mlen;
+ break;
+ case -EFAULT:
+ case -ENOMEM:
+ case -EINVAL:
+ case -ENOSYS:
+ gss_msg->msg.errno = -EAGAIN;
+ break;
+ default:
+ printk(KERN_CRIT "%s: bad return from "
+ "gss_fill_context: %zd\n", __func__, err);
+ BUG();
+ }
goto err_release_msg;
}
gss_msg->ctx = gss_get_ctx(ctx);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index ef45eba22485..2deb0ed72ff4 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -131,8 +131,10 @@ gss_import_sec_context_kerberos(const void *p,
struct krb5_ctx *ctx;
int tmp;
- if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS)))
+ if (!(ctx = kzalloc(sizeof(*ctx), GFP_NOFS))) {
+ p = ERR_PTR(-ENOMEM);
goto out_err;
+ }
p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
if (IS_ERR(p))
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 6efbb0cd3c7c..76e4c6f4ac3c 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -252,7 +252,7 @@ gss_import_sec_context(const void *input_token, size_t bufsize,
struct gss_ctx **ctx_id)
{
if (!(*ctx_id = kzalloc(sizeof(**ctx_id), GFP_KERNEL)))
- return GSS_S_FAILURE;
+ return -ENOMEM;
(*ctx_id)->mech_type = gss_mech_get(mech);
return mech->gm_ops
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 1c924ee0a1ef..7d1f9e928f69 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -699,7 +699,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
spin_unlock_bh(&pool->sp_lock);
len = 0;
- if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) {
+ if (test_bit(XPT_LISTENER, &xprt->xpt_flags) &&
+ !test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
struct svc_xprt *newxpt;
newxpt = xprt->xpt_ops->xpo_accept(xprt);
if (newxpt) {
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index 3b30d1130b61..dafbd533067c 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -33,6 +33,7 @@ config TIPC_ADVANCED
config TIPC_ZONES
int "Maximum number of zones in network"
depends on TIPC_ADVANCED
+ range 1 255
default "3"
help
Max number of zones inside TIPC network. Max supported value
@@ -44,10 +45,10 @@ config TIPC_ZONES
config TIPC_CLUSTERS
int "Maximum number of clusters in a zone"
depends on TIPC_ADVANCED
+ range 1 1
default "1"
help
- ***Only 1 (one cluster in a zone) is supported by current code.
- Any value set here will be overridden.***
+ ***Only 1 (one cluster in a zone) is supported by current code.***
(Max number of clusters inside TIPC zone. Max supported
value is 4095 clusters, minimum is 1.
@@ -59,6 +60,7 @@ config TIPC_CLUSTERS
config TIPC_NODES
int "Maximum number of nodes in cluster"
depends on TIPC_ADVANCED
+ range 8 2047
default "255"
help
Maximum number of nodes inside a TIPC cluster. Maximum
@@ -70,6 +72,7 @@ config TIPC_NODES
config TIPC_SLAVE_NODES
int "Maximum number of slave nodes in cluster"
depends on TIPC_ADVANCED
+ range 0 2047
default "0"
help
***This capability is not supported by current code.***
@@ -83,6 +86,7 @@ config TIPC_SLAVE_NODES
config TIPC_PORTS
int "Maximum number of ports in a node"
depends on TIPC_ADVANCED
+ range 217 65536
default "8191"
help
Maximum number of ports within a node. Maximum
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 3256bd7d398f..52c571fedbe0 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -189,11 +189,11 @@ static int __init tipc_init(void)
tipc_remote_management = 1;
tipc_max_publications = 10000;
tipc_max_subscriptions = 2000;
- tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
- tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 255);
- tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
- tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
- tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
+ tipc_max_ports = CONFIG_TIPC_PORTS;
+ tipc_max_zones = CONFIG_TIPC_ZONES;
+ tipc_max_clusters = CONFIG_TIPC_CLUSTERS;
+ tipc_max_nodes = CONFIG_TIPC_NODES;
+ tipc_max_slaves = CONFIG_TIPC_SLAVE_NODES;
tipc_net_id = 4711;
if ((res = tipc_core_start()))
diff --git a/net/wireless/.gitignore b/net/wireless/.gitignore
new file mode 100644
index 000000000000..c33451b896d9
--- /dev/null
+++ b/net/wireless/.gitignore
@@ -0,0 +1 @@
+regdb.c
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 90e93a5701aa..d0ee29063e5d 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -94,20 +94,21 @@ config CFG80211_DEBUGFS
If unsure, say N.
-config WIRELESS_OLD_REGULATORY
- bool "Old wireless static regulatory definitions"
+config CFG80211_INTERNAL_REGDB
+ bool "use statically compiled regulatory rules database" if EMBEDDED
default n
depends on CFG80211
---help---
- This option enables the old static regulatory information
- and uses it within the new framework. This option is available
- for historical reasons and it is advised to leave it off.
+ This option generates an internal data structure representing
+ the wireless regulatory rules described in net/wireless/db.txt
+ and includes code to query that database. This is an alternative
+ to using CRDA for defining regulatory rules for the kernel.
For details see:
http://wireless.kernel.org/en/developers/Regulatory
- Say N and if you say Y, please tell us why. The default is N.
+ Most distributions have a CRDA package. So if unsure, say N.
config CFG80211_WEXT
bool "cfg80211 wireless extensions compatibility"
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index f07c8dc7aab2..e77e508126fa 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -13,5 +13,11 @@ cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o
cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
+cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
ccflags-y += -D__CHECK_ENDIAN__
+
+$(obj)/regdb.c: $(src)/db.txt $(src)/genregdb.awk
+ @$(AWK) -f $(srctree)/$(src)/genregdb.awk < $< > $@
+
+clean-files := regdb.c
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index a46ac6c9b365..bf1737fc9a7e 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -41,44 +41,57 @@ rdev_fixed_channel(struct cfg80211_registered_device *rdev,
return result;
}
-int rdev_set_freq(struct cfg80211_registered_device *rdev,
- struct wireless_dev *for_wdev,
+struct ieee80211_channel *
+rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
int freq, enum nl80211_channel_type channel_type)
{
struct ieee80211_channel *chan;
struct ieee80211_sta_ht_cap *ht_cap;
- int result;
-
- if (rdev_fixed_channel(rdev, for_wdev))
- return -EBUSY;
-
- if (!rdev->ops->set_channel)
- return -EOPNOTSUPP;
chan = ieee80211_get_channel(&rdev->wiphy, freq);
/* Primary channel not allowed */
if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
- return -EINVAL;
+ return NULL;
if (channel_type == NL80211_CHAN_HT40MINUS &&
chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
- return -EINVAL;
+ return NULL;
else if (channel_type == NL80211_CHAN_HT40PLUS &&
chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
- return -EINVAL;
+ return NULL;
ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
if (channel_type != NL80211_CHAN_NO_HT) {
if (!ht_cap->ht_supported)
- return -EINVAL;
+ return NULL;
if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
- return -EINVAL;
+ return NULL;
}
+ return chan;
+}
+
+int rdev_set_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *for_wdev,
+ int freq, enum nl80211_channel_type channel_type)
+{
+ struct ieee80211_channel *chan;
+ int result;
+
+ if (rdev_fixed_channel(rdev, for_wdev))
+ return -EBUSY;
+
+ if (!rdev->ops->set_channel)
+ return -EOPNOTSUPP;
+
+ chan = rdev_freq_to_chan(rdev, freq, channel_type);
+ if (!chan)
+ return -EINVAL;
+
result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type);
if (result)
return result;
diff --git a/net/wireless/core.c b/net/wireless/core.c
index c2a2c563d21a..20db90246de5 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -402,6 +402,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
rdev->wiphy.retry_long = 4;
rdev->wiphy.frag_threshold = (u32) -1;
rdev->wiphy.rts_threshold = (u32) -1;
+ rdev->wiphy.coverage_class = 0;
return &rdev->wiphy;
}
@@ -745,9 +746,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
mutex_unlock(&rdev->devlist_mtx);
dev_put(dev);
}
-#ifdef CONFIG_CFG80211_WEXT
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
+#ifdef CONFIG_CFG80211_WEXT
wdev_lock(wdev);
switch (wdev->iftype) {
case NL80211_IFTYPE_ADHOC:
@@ -760,10 +761,10 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
break;
}
wdev_unlock(wdev);
+#endif
rdev->opencount++;
mutex_unlock(&rdev->devlist_mtx);
cfg80211_unlock_rdev(rdev);
-#endif
break;
case NETDEV_UNREGISTER:
/*
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 4ef3efc94106..2d6a6b9c0c43 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -111,7 +111,8 @@ struct cfg80211_internal_bss {
unsigned long ts;
struct kref ref;
atomic_t hold;
- bool ies_allocated;
+ bool beacon_ies_allocated;
+ bool proberesp_ies_allocated;
/* must be last because of priv member */
struct cfg80211_bss pub;
@@ -374,10 +375,15 @@ void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
struct ieee80211_channel *
rdev_fixed_channel(struct cfg80211_registered_device *rdev,
struct wireless_dev *for_wdev);
+struct ieee80211_channel *
+rdev_freq_to_chan(struct cfg80211_registered_device *rdev,
+ int freq, enum nl80211_channel_type channel_type);
int rdev_set_freq(struct cfg80211_registered_device *rdev,
struct wireless_dev *for_wdev,
int freq, enum nl80211_channel_type channel_type);
+u16 cfg80211_calculate_bitrate(struct rate_info *rate);
+
#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
#else
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
new file mode 100644
index 000000000000..a2fc3a09ccdc
--- /dev/null
+++ b/net/wireless/db.txt
@@ -0,0 +1,17 @@
+#
+# This file is a placeholder to prevent accidental build breakage if someone
+# enables CONFIG_CFG80211_INTERNAL_REGDB. Almost no one actually needs to
+# enable that build option.
+#
+# You should be using CRDA instead. It is even better if you use the CRDA
+# package provided by your distribution, since they will probably keep it
+# up-to-date on your behalf.
+#
+# If you _really_ intend to use CONFIG_CFG80211_INTERNAL_REGDB then you will
+# need to replace this file with one containing appropriately formatted
+# regulatory rules that cover the regulatory domains you will be using. Your
+# best option is to extract the db.txt file from the wireless-regdb git
+# repository:
+#
+# git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
+#
diff --git a/net/wireless/genregdb.awk b/net/wireless/genregdb.awk
new file mode 100644
index 000000000000..3cc9e69880a8
--- /dev/null
+++ b/net/wireless/genregdb.awk
@@ -0,0 +1,118 @@
+#!/usr/bin/awk -f
+#
+# genregdb.awk -- generate regdb.c from db.txt
+#
+# Actually, it reads from stdin (presumed to be db.txt) and writes
+# to stdout (presumed to be regdb.c), but close enough...
+#
+# Copyright 2009 John W. Linville <linville@tuxdriver.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+BEGIN {
+ active = 0
+ rules = 0;
+ print "/*"
+ print " * DO NOT EDIT -- file generated from data in db.txt"
+ print " */"
+ print ""
+ print "#include <linux/nl80211.h>"
+ print "#include <net/cfg80211.h>"
+ print ""
+ regdb = "const struct ieee80211_regdomain *reg_regdb[] = {\n"
+}
+
+/^[ \t]*#/ {
+ # Ignore
+}
+
+!active && /^[ \t]*$/ {
+ # Ignore
+}
+
+!active && /country/ {
+ country=$2
+ sub(/:/, "", country)
+ printf "static const struct ieee80211_regdomain regdom_%s = {\n", country
+ printf "\t.alpha2 = \"%s\",\n", country
+ printf "\t.reg_rules = {\n"
+ active = 1
+ regdb = regdb "\t&regdom_" country ",\n"
+}
+
+active && /^[ \t]*\(/ {
+ start = $1
+ sub(/\(/, "", start)
+ end = $3
+ bw = $5
+ sub(/\),/, "", bw)
+ gain = $6
+ sub(/\(/, "", gain)
+ sub(/,/, "", gain)
+ power = $7
+ sub(/\)/, "", power)
+ sub(/,/, "", power)
+ # power might be in mW...
+ units = $8
+ sub(/\)/, "", units)
+ sub(/,/, "", units)
+ if (units == "mW") {
+ if (power == 100) {
+ power = 20
+ } else if (power == 200) {
+ power = 23
+ } else if (power == 500) {
+ power = 27
+ } else if (power == 1000) {
+ power = 30
+ } else {
+ print "Unknown power value in database!"
+ }
+ }
+ flagstr = ""
+ for (i=8; i<=NF; i++)
+ flagstr = flagstr $i
+ split(flagstr, flagarray, ",")
+ flags = ""
+ for (arg in flagarray) {
+ if (flagarray[arg] == "NO-OFDM") {
+ flags = flags "\n\t\t\tNL80211_RRF_NO_OFDM | "
+ } else if (flagarray[arg] == "NO-CCK") {
+ flags = flags "\n\t\t\tNL80211_RRF_NO_CCK | "
+ } else if (flagarray[arg] == "NO-INDOOR") {
+ flags = flags "\n\t\t\tNL80211_RRF_NO_INDOOR | "
+ } else if (flagarray[arg] == "NO-OUTDOOR") {
+ flags = flags "\n\t\t\tNL80211_RRF_NO_OUTDOOR | "
+ } else if (flagarray[arg] == "DFS") {
+ flags = flags "\n\t\t\tNL80211_RRF_DFS | "
+ } else if (flagarray[arg] == "PTP-ONLY") {
+ flags = flags "\n\t\t\tNL80211_RRF_PTP_ONLY | "
+ } else if (flagarray[arg] == "PTMP-ONLY") {
+ flags = flags "\n\t\t\tNL80211_RRF_PTMP_ONLY | "
+ } else if (flagarray[arg] == "PASSIVE-SCAN") {
+ flags = flags "\n\t\t\tNL80211_RRF_PASSIVE_SCAN | "
+ } else if (flagarray[arg] == "NO-IBSS") {
+ flags = flags "\n\t\t\tNL80211_RRF_NO_IBSS | "
+ }
+ }
+ flags = flags "0"
+ printf "\t\tREG_RULE(%d, %d, %d, %d, %d, %s),\n", start, end, bw, gain, power, flags
+ rules++
+}
+
+active && /^[ \t]*$/ {
+ active = 0
+ printf "\t},\n"
+ printf "\t.n_reg_rules = %d\n", rules
+ printf "};\n\n"
+ rules = 0;
+}
+
+END {
+ print regdb "};"
+ print ""
+ print "int reg_regdb_size = ARRAY_SIZE(reg_regdb);"
+}
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 82e6002c8d67..94d151f6f73e 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -148,22 +148,23 @@ void __cfg80211_send_deauth(struct net_device *dev,
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
const u8 *bssid = mgmt->bssid;
int i;
+ bool found = false;
ASSERT_WDEV_LOCK(wdev);
- nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
-
if (wdev->current_bss &&
memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(&wdev->current_bss->pub);
wdev->current_bss = NULL;
+ found = true;
} else for (i = 0; i < MAX_AUTH_BSSES; i++) {
if (wdev->auth_bsses[i] &&
memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
cfg80211_unhold_bss(wdev->auth_bsses[i]);
cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
wdev->auth_bsses[i] = NULL;
+ found = true;
break;
}
if (wdev->authtry_bsses[i] &&
@@ -171,10 +172,16 @@ void __cfg80211_send_deauth(struct net_device *dev,
cfg80211_unhold_bss(wdev->authtry_bsses[i]);
cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
wdev->authtry_bsses[i] = NULL;
+ found = true;
break;
}
}
+ if (!found)
+ return;
+
+ nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
+
if (wdev->sme_state == CFG80211_SME_CONNECTED) {
u16 reason_code;
bool from_ap;
@@ -684,3 +691,40 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
}
}
}
+
+void cfg80211_ready_on_channel(struct net_device *dev, u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, gfp_t gfp)
+{
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_send_remain_on_channel(rdev, dev, cookie, chan, channel_type,
+ duration, gfp);
+}
+EXPORT_SYMBOL(cfg80211_ready_on_channel);
+
+void cfg80211_remain_on_channel_expired(struct net_device *dev,
+ u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ gfp_t gfp)
+{
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_send_remain_on_channel_cancel(rdev, dev, cookie, chan,
+ channel_type, gfp);
+}
+EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
+
+void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp)
+{
+ struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp);
+}
+EXPORT_SYMBOL(cfg80211_new_sta);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a6028433e3a0..4af7991a9ec8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -69,6 +69,7 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
[NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 },
[NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 },
+ [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 },
[NL80211_ATTR_IFTYPE] = { .type = NLA_U32 },
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
@@ -141,6 +142,9 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
[NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
[NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
.len = WLAN_PMKID_LEN },
+ [NL80211_ATTR_DURATION] = { .type = NLA_U32 },
+ [NL80211_ATTR_COOKIE] = { .type = NLA_U64 },
+ [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED },
};
/* policy for the attributes */
@@ -442,6 +446,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
dev->wiphy.frag_threshold);
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
dev->wiphy.rts_threshold);
+ NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
+ dev->wiphy.coverage_class);
NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
dev->wiphy.max_scan_ssids);
@@ -569,6 +575,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
CMD(set_pmksa, SET_PMKSA);
CMD(del_pmksa, DEL_PMKSA);
CMD(flush_pmksa, FLUSH_PMKSA);
+ CMD(remain_on_channel, REMAIN_ON_CHANNEL);
+ CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
i++;
NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
@@ -681,6 +689,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
u32 changed;
u8 retry_short = 0, retry_long = 0;
u32 frag_threshold = 0, rts_threshold = 0;
+ u8 coverage_class = 0;
rtnl_lock();
@@ -803,9 +812,16 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
changed |= WIPHY_PARAM_RTS_THRESHOLD;
}
+ if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) {
+ coverage_class = nla_get_u8(
+ info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]);
+ changed |= WIPHY_PARAM_COVERAGE_CLASS;
+ }
+
if (changed) {
u8 old_retry_short, old_retry_long;
u32 old_frag_threshold, old_rts_threshold;
+ u8 old_coverage_class;
if (!rdev->ops->set_wiphy_params) {
result = -EOPNOTSUPP;
@@ -816,6 +832,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
old_retry_long = rdev->wiphy.retry_long;
old_frag_threshold = rdev->wiphy.frag_threshold;
old_rts_threshold = rdev->wiphy.rts_threshold;
+ old_coverage_class = rdev->wiphy.coverage_class;
if (changed & WIPHY_PARAM_RETRY_SHORT)
rdev->wiphy.retry_short = retry_short;
@@ -825,6 +842,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rdev->wiphy.frag_threshold = frag_threshold;
if (changed & WIPHY_PARAM_RTS_THRESHOLD)
rdev->wiphy.rts_threshold = rts_threshold;
+ if (changed & WIPHY_PARAM_COVERAGE_CLASS)
+ rdev->wiphy.coverage_class = coverage_class;
result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
if (result) {
@@ -832,6 +851,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rdev->wiphy.retry_long = old_retry_long;
rdev->wiphy.frag_threshold = old_frag_threshold;
rdev->wiphy.rts_threshold = old_rts_threshold;
+ rdev->wiphy.coverage_class = old_coverage_class;
}
}
@@ -1637,42 +1657,9 @@ static int parse_station_flags(struct genl_info *info,
return 0;
}
-static u16 nl80211_calculate_bitrate(struct rate_info *rate)
-{
- int modulation, streams, bitrate;
-
- if (!(rate->flags & RATE_INFO_FLAGS_MCS))
- return rate->legacy;
-
- /* the formula below does only work for MCS values smaller than 32 */
- if (rate->mcs >= 32)
- return 0;
-
- modulation = rate->mcs & 7;
- streams = (rate->mcs >> 3) + 1;
-
- bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
- 13500000 : 6500000;
-
- if (modulation < 4)
- bitrate *= (modulation + 1);
- else if (modulation == 4)
- bitrate *= (modulation + 2);
- else
- bitrate *= (modulation + 3);
-
- bitrate *= streams;
-
- if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
- bitrate = (bitrate / 9) * 10;
-
- /* do NOT round down here */
- return (bitrate + 50000) / 100000;
-}
-
static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
int flags, struct net_device *dev,
- u8 *mac_addr, struct station_info *sinfo)
+ const u8 *mac_addr, struct station_info *sinfo)
{
void *hdr;
struct nlattr *sinfoattr, *txrate;
@@ -1716,8 +1703,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
if (!txrate)
goto nla_put_failure;
- /* nl80211_calculate_bitrate will return 0 for mcs >= 32 */
- bitrate = nl80211_calculate_bitrate(&sinfo->txrate);
+ /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
+ bitrate = cfg80211_calculate_bitrate(&sinfo->txrate);
if (bitrate > 0)
NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate);
@@ -2583,12 +2570,6 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]);
-#ifdef CONFIG_WIRELESS_OLD_REGULATORY
- /* We ignore world regdom requests with the old regdom setup */
- if (is_world_regdom(data))
- return -EINVAL;
-#endif
-
r = regulatory_hint_user(data);
return r;
@@ -3182,6 +3163,10 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS,
res->len_information_elements,
res->information_elements);
+ if (res->beacon_ies && res->len_beacon_ies &&
+ res->beacon_ies != res->information_elements)
+ NLA_PUT(msg, NL80211_BSS_BEACON_IES,
+ res->len_beacon_ies, res->beacon_ies);
if (res->tsf)
NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf);
if (res->beacon_interval)
@@ -4322,6 +4307,246 @@ static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
}
+static int nl80211_remain_on_channel(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct net_device *dev;
+ struct ieee80211_channel *chan;
+ struct sk_buff *msg;
+ void *hdr;
+ u64 cookie;
+ enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
+ u32 freq, duration;
+ int err;
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
+ !info->attrs[NL80211_ATTR_DURATION])
+ return -EINVAL;
+
+ duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]);
+
+ /*
+ * We should be on that channel for at least one jiffie,
+ * and more than 5 seconds seems excessive.
+ */
+ if (!duration || !msecs_to_jiffies(duration) || duration > 5000)
+ return -EINVAL;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (!rdev->ops->remain_on_channel) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
+ if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
+ channel_type = nla_get_u32(
+ info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]);
+ if (channel_type != NL80211_CHAN_NO_HT &&
+ channel_type != NL80211_CHAN_HT20 &&
+ channel_type != NL80211_CHAN_HT40PLUS &&
+ channel_type != NL80211_CHAN_HT40MINUS)
+ err = -EINVAL;
+ goto out;
+ }
+
+ freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ chan = rdev_freq_to_chan(rdev, freq, channel_type);
+ if (chan == NULL) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0,
+ NL80211_CMD_REMAIN_ON_CHANNEL);
+
+ if (IS_ERR(hdr)) {
+ err = PTR_ERR(hdr);
+ goto free_msg;
+ }
+
+ err = rdev->ops->remain_on_channel(&rdev->wiphy, dev, chan,
+ channel_type, duration, &cookie);
+
+ if (err)
+ goto free_msg;
+
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+
+ genlmsg_end(msg, hdr);
+ err = genlmsg_reply(msg, info);
+ goto out;
+
+ nla_put_failure:
+ err = -ENOBUFS;
+ free_msg:
+ nlmsg_free(msg);
+ out:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+ unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static int nl80211_cancel_remain_on_channel(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct net_device *dev;
+ u64 cookie;
+ int err;
+
+ if (!info->attrs[NL80211_ATTR_COOKIE])
+ return -EINVAL;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (!rdev->ops->cancel_remain_on_channel) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
+ cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]);
+
+ err = rdev->ops->cancel_remain_on_channel(&rdev->wiphy, dev, cookie);
+
+ out:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+ unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static u32 rateset_to_mask(struct ieee80211_supported_band *sband,
+ u8 *rates, u8 rates_len)
+{
+ u8 i;
+ u32 mask = 0;
+
+ for (i = 0; i < rates_len; i++) {
+ int rate = (rates[i] & 0x7f) * 5;
+ int ridx;
+ for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
+ struct ieee80211_rate *srate =
+ &sband->bitrates[ridx];
+ if (rate == srate->bitrate) {
+ mask |= 1 << ridx;
+ break;
+ }
+ }
+ if (ridx == sband->n_bitrates)
+ return 0; /* rate not found */
+ }
+
+ return mask;
+}
+
+static struct nla_policy
+nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] __read_mostly = {
+ [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY,
+ .len = NL80211_MAX_SUPP_RATES },
+};
+
+static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct nlattr *tb[NL80211_TXRATE_MAX + 1];
+ struct cfg80211_registered_device *rdev;
+ struct cfg80211_bitrate_mask mask;
+ int err, rem, i;
+ struct net_device *dev;
+ struct nlattr *tx_rates;
+ struct ieee80211_supported_band *sband;
+
+ if (info->attrs[NL80211_ATTR_TX_RATES] == NULL)
+ return -EINVAL;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (!rdev->ops->set_bitrate_mask) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ memset(&mask, 0, sizeof(mask));
+ /* Default to all rates enabled */
+ for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
+ sband = rdev->wiphy.bands[i];
+ mask.control[i].legacy =
+ sband ? (1 << sband->n_bitrates) - 1 : 0;
+ }
+
+ /*
+ * The nested attribute uses enum nl80211_band as the index. This maps
+ * directly to the enum ieee80211_band values used in cfg80211.
+ */
+ nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
+ {
+ enum ieee80211_band band = nla_type(tx_rates);
+ if (band < 0 || band >= IEEE80211_NUM_BANDS) {
+ err = -EINVAL;
+ goto unlock;
+ }
+ sband = rdev->wiphy.bands[band];
+ if (sband == NULL) {
+ err = -EINVAL;
+ goto unlock;
+ }
+ nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
+ nla_len(tx_rates), nl80211_txattr_policy);
+ if (tb[NL80211_TXRATE_LEGACY]) {
+ mask.control[band].legacy = rateset_to_mask(
+ sband,
+ nla_data(tb[NL80211_TXRATE_LEGACY]),
+ nla_len(tb[NL80211_TXRATE_LEGACY]));
+ if (mask.control[band].legacy == 0) {
+ err = -EINVAL;
+ goto unlock;
+ }
+ }
+ }
+
+ err = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, NULL, &mask);
+
+ unlock:
+ dev_put(dev);
+ cfg80211_unlock_rdev(rdev);
+ unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
static struct genl_ops nl80211_ops[] = {
{
.cmd = NL80211_CMD_GET_WIPHY,
@@ -4584,8 +4809,26 @@ static struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
},
-
+ {
+ .cmd = NL80211_CMD_REMAIN_ON_CHANNEL,
+ .doit = nl80211_remain_on_channel,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
+ .doit = nl80211_cancel_remain_on_channel,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_SET_TX_BITRATE_MASK,
+ .doit = nl80211_set_tx_bitrate_mask,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
};
+
static struct genl_multicast_group nl80211_mlme_mcgrp = {
.name = "mlme",
};
@@ -5173,6 +5416,89 @@ nla_put_failure:
nlmsg_free(msg);
}
+static void nl80211_send_remain_on_chan_event(
+ int cmd, struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, gfp_t gfp)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, cmd);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq);
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type);
+ NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie);
+
+ if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL)
+ NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+}
+
+void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, gfp_t gfp)
+{
+ nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
+ rdev, netdev, cookie, chan,
+ channel_type, duration, gfp);
+}
+
+void nl80211_send_remain_on_channel_cancel(
+ struct cfg80211_registered_device *rdev, struct net_device *netdev,
+ u64 cookie, struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type, gfp_t gfp)
+{
+ nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
+ rdev, netdev, cookie, chan,
+ channel_type, 0, gfp);
+}
+
+void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp)
+{
+ struct sk_buff *msg;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+}
+
/* initialisation/exit functions */
int nl80211_init(void)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 44cc2a76a1b0..14855b8fb430 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -59,4 +59,19 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
gfp_t gfp);
+void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ u64 cookie,
+ struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type,
+ unsigned int duration, gfp_t gfp);
+void nl80211_send_remain_on_channel_cancel(
+ struct cfg80211_registered_device *rdev, struct net_device *netdev,
+ u64 cookie, struct ieee80211_channel *chan,
+ enum nl80211_channel_type channel_type, gfp_t gfp);
+
+void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *mac_addr,
+ struct station_info *sinfo, gfp_t gfp);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index baa898add287..a5c2d3a6cbb2 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -40,8 +40,18 @@
#include <net/cfg80211.h>
#include "core.h"
#include "reg.h"
+#include "regdb.h"
#include "nl80211.h"
+#ifdef CONFIG_CFG80211_REG_DEBUG
+#define REG_DBG_PRINT(format, args...) \
+ do { \
+ printk(KERN_DEBUG format , ## args); \
+ } while (0)
+#else
+#define REG_DBG_PRINT(args...)
+#endif
+
/* Receipt of information from last regulatory request */
static struct regulatory_request *last_request;
@@ -128,78 +138,6 @@ static char *ieee80211_regdom = "00";
module_param(ieee80211_regdom, charp, 0444);
MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
-#ifdef CONFIG_WIRELESS_OLD_REGULATORY
-/*
- * We assume 40 MHz bandwidth for the old regulatory work.
- * We make emphasis we are using the exact same frequencies
- * as before
- */
-
-static const struct ieee80211_regdomain us_regdom = {
- .n_reg_rules = 6,
- .alpha2 = "US",
- .reg_rules = {
- /* IEEE 802.11b/g, channels 1..11 */
- REG_RULE(2412-10, 2462+10, 40, 6, 27, 0),
- /* IEEE 802.11a, channel 36..48 */
- REG_RULE(5180-10, 5240+10, 40, 6, 17, 0),
- /* IEEE 802.11a, channels 48..64 */
- REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
- /* IEEE 802.11a, channels 100..124 */
- REG_RULE(5500-10, 5590+10, 40, 6, 20, NL80211_RRF_DFS),
- /* IEEE 802.11a, channels 132..144 */
- REG_RULE(5660-10, 5700+10, 40, 6, 20, NL80211_RRF_DFS),
- /* IEEE 802.11a, channels 149..165, outdoor */
- REG_RULE(5745-10, 5825+10, 40, 6, 30, 0),
- }
-};
-
-static const struct ieee80211_regdomain jp_regdom = {
- .n_reg_rules = 6,
- .alpha2 = "JP",
- .reg_rules = {
- /* IEEE 802.11b/g, channels 1..11 */
- REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
- /* IEEE 802.11b/g, channels 12..13 */
- REG_RULE(2467-10, 2472+10, 20, 6, 20, 0),
- /* IEEE 802.11b/g, channel 14 */
- REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_OFDM),
- /* IEEE 802.11a, channels 36..48 */
- REG_RULE(5180-10, 5240+10, 40, 6, 20, 0),
- /* IEEE 802.11a, channels 52..64 */
- REG_RULE(5260-10, 5320+10, 40, 6, 20, NL80211_RRF_DFS),
- /* IEEE 802.11a, channels 100..144 */
- REG_RULE(5500-10, 5700+10, 40, 6, 23, NL80211_RRF_DFS),
- }
-};
-
-static const struct ieee80211_regdomain *static_regdom(char *alpha2)
-{
- if (alpha2[0] == 'U' && alpha2[1] == 'S')
- return &us_regdom;
- if (alpha2[0] == 'J' && alpha2[1] == 'P')
- return &jp_regdom;
- /* Use world roaming rules for "EU", since it was a pseudo
- domain anyway... */
- if (alpha2[0] == 'E' && alpha2[1] == 'U')
- return &world_regdom;
- /* Default, world roaming rules */
- return &world_regdom;
-}
-
-static bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
-{
- if (rd == &us_regdom || rd == &jp_regdom || rd == &world_regdom)
- return true;
- return false;
-}
-#else
-static inline bool is_old_static_regdom(const struct ieee80211_regdomain *rd)
-{
- return false;
-}
-#endif
-
static void reset_regdomains(void)
{
/* avoid freeing static information or freeing something twice */
@@ -209,8 +147,6 @@ static void reset_regdomains(void)
cfg80211_world_regdom = NULL;
if (cfg80211_regdomain == &world_regdom)
cfg80211_regdomain = NULL;
- if (is_old_static_regdom(cfg80211_regdomain))
- cfg80211_regdomain = NULL;
kfree(cfg80211_regdomain);
kfree(cfg80211_world_regdom);
@@ -335,6 +271,98 @@ static bool country_ie_integrity_changes(u32 checksum)
return false;
}
+static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
+ const struct ieee80211_regdomain *src_regd)
+{
+ struct ieee80211_regdomain *regd;
+ int size_of_regd = 0;
+ unsigned int i;
+
+ size_of_regd = sizeof(struct ieee80211_regdomain) +
+ ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
+
+ regd = kzalloc(size_of_regd, GFP_KERNEL);
+ if (!regd)
+ return -ENOMEM;
+
+ memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
+
+ for (i = 0; i < src_regd->n_reg_rules; i++)
+ memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
+ sizeof(struct ieee80211_reg_rule));
+
+ *dst_regd = regd;
+ return 0;
+}
+
+#ifdef CONFIG_CFG80211_INTERNAL_REGDB
+struct reg_regdb_search_request {
+ char alpha2[2];
+ struct list_head list;
+};
+
+static LIST_HEAD(reg_regdb_search_list);
+static DEFINE_SPINLOCK(reg_regdb_search_lock);
+
+static void reg_regdb_search(struct work_struct *work)
+{
+ struct reg_regdb_search_request *request;
+ const struct ieee80211_regdomain *curdom, *regdom;
+ int i, r;
+
+ spin_lock(&reg_regdb_search_lock);
+ while (!list_empty(&reg_regdb_search_list)) {
+ request = list_first_entry(&reg_regdb_search_list,
+ struct reg_regdb_search_request,
+ list);
+ list_del(&request->list);
+
+ for (i=0; i<reg_regdb_size; i++) {
+ curdom = reg_regdb[i];
+
+ if (!memcmp(request->alpha2, curdom->alpha2, 2)) {
+ r = reg_copy_regd(&regdom, curdom);
+ if (r)
+ break;
+ spin_unlock(&reg_regdb_search_lock);
+ mutex_lock(&cfg80211_mutex);
+ set_regdom(regdom);
+ mutex_unlock(&cfg80211_mutex);
+ spin_lock(&reg_regdb_search_lock);
+ break;
+ }
+ }
+
+ kfree(request);
+ }
+ spin_unlock(&reg_regdb_search_lock);
+}
+
+static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
+
+static void reg_regdb_query(const char *alpha2)
+{
+ struct reg_regdb_search_request *request;
+
+ if (!alpha2)
+ return;
+
+ request = kzalloc(sizeof(struct reg_regdb_search_request), GFP_KERNEL);
+ if (!request)
+ return;
+
+ memcpy(request->alpha2, alpha2, 2);
+
+ spin_lock(&reg_regdb_search_lock);
+ list_add_tail(&request->list, &reg_regdb_search_list);
+ spin_unlock(&reg_regdb_search_lock);
+
+ schedule_work(&reg_regdb_work);
+}
+#else
+static inline void reg_regdb_query(const char *alpha2) {}
+#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
+
/*
* This lets us keep regulatory code which is updated on a regulatory
* basis in userspace.
@@ -354,6 +382,9 @@ static int call_crda(const char *alpha2)
printk(KERN_INFO "cfg80211: Calling CRDA to update world "
"regulatory domain\n");
+ /* query internal regulatory database (if it exists) */
+ reg_regdb_query(alpha2);
+
country_env[8] = alpha2[0];
country_env[9] = alpha2[1];
@@ -454,6 +485,178 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
}
/*
+ * Some APs may send a country IE triplet for each channel they
+ * support and while this is completely overkill and silly we still
+ * need to support it. We avoid making a single rule for each channel
+ * though and to help us with this we use this helper to find the
+ * actual subband end channel. These type of country IE triplet
+ * scenerios are handled then, all yielding two regulaotry rules from
+ * parsing a country IE:
+ *
+ * [1]
+ * [2]
+ * [36]
+ * [40]
+ *
+ * [1]
+ * [2-4]
+ * [5-12]
+ * [36]
+ * [40-44]
+ *
+ * [1-4]
+ * [5-7]
+ * [36-44]
+ * [48-64]
+ *
+ * [36-36]
+ * [40-40]
+ * [44-44]
+ * [48-48]
+ * [52-52]
+ * [56-56]
+ * [60-60]
+ * [64-64]
+ * [100-100]
+ * [104-104]
+ * [108-108]
+ * [112-112]
+ * [116-116]
+ * [120-120]
+ * [124-124]
+ * [128-128]
+ * [132-132]
+ * [136-136]
+ * [140-140]
+ *
+ * Returns 0 if the IE has been found to be invalid in the middle
+ * somewhere.
+ */
+static int max_subband_chan(int orig_cur_chan,
+ int orig_end_channel,
+ s8 orig_max_power,
+ u8 **country_ie,
+ u8 *country_ie_len)
+{
+ u8 *triplets_start = *country_ie;
+ u8 len_at_triplet = *country_ie_len;
+ int end_subband_chan = orig_end_channel;
+ enum ieee80211_band band;
+
+ /*
+ * We'll deal with padding for the caller unless
+ * its not immediate and we don't process any channels
+ */
+ if (*country_ie_len == 1) {
+ *country_ie += 1;
+ *country_ie_len -= 1;
+ return orig_end_channel;
+ }
+
+ /* Move to the next triplet and then start search */
+ *country_ie += 3;
+ *country_ie_len -= 3;
+
+ if (orig_cur_chan <= 14)
+ band = IEEE80211_BAND_2GHZ;
+ else
+ band = IEEE80211_BAND_5GHZ;
+
+ while (*country_ie_len >= 3) {
+ int end_channel = 0;
+ struct ieee80211_country_ie_triplet *triplet =
+ (struct ieee80211_country_ie_triplet *) *country_ie;
+ int cur_channel = 0, next_expected_chan;
+ enum ieee80211_band next_band = IEEE80211_BAND_2GHZ;
+
+ /* means last triplet is completely unrelated to this one */
+ if (triplet->ext.reg_extension_id >=
+ IEEE80211_COUNTRY_EXTENSION_ID) {
+ *country_ie -= 3;
+ *country_ie_len += 3;
+ break;
+ }
+
+ if (triplet->chans.first_channel == 0) {
+ *country_ie += 1;
+ *country_ie_len -= 1;
+ if (*country_ie_len != 0)
+ return 0;
+ break;
+ }
+
+ /* Monitonically increasing channel order */
+ if (triplet->chans.first_channel <= end_subband_chan)
+ return 0;
+
+ /* 2 GHz */
+ if (triplet->chans.first_channel <= 14) {
+ end_channel = triplet->chans.first_channel +
+ triplet->chans.num_channels - 1;
+ }
+ else {
+ end_channel = triplet->chans.first_channel +
+ (4 * (triplet->chans.num_channels - 1));
+ next_band = IEEE80211_BAND_5GHZ;
+ }
+
+ if (band != next_band) {
+ *country_ie -= 3;
+ *country_ie_len += 3;
+ break;
+ }
+
+ if (orig_max_power != triplet->chans.max_power) {
+ *country_ie -= 3;
+ *country_ie_len += 3;
+ break;
+ }
+
+ cur_channel = triplet->chans.first_channel;
+
+ /* The key is finding the right next expected channel */
+ if (band == IEEE80211_BAND_2GHZ)
+ next_expected_chan = end_subband_chan + 1;
+ else
+ next_expected_chan = end_subband_chan + 4;
+
+ if (cur_channel != next_expected_chan) {
+ *country_ie -= 3;
+ *country_ie_len += 3;
+ break;
+ }
+
+ end_subband_chan = end_channel;
+
+ /* Move to the next one */
+ *country_ie += 3;
+ *country_ie_len -= 3;
+
+ /*
+ * Padding needs to be dealt with if we processed
+ * some channels.
+ */
+ if (*country_ie_len == 1) {
+ *country_ie += 1;
+ *country_ie_len -= 1;
+ break;
+ }
+
+ /* If seen, the IE is invalid */
+ if (*country_ie_len == 2)
+ return 0;
+ }
+
+ if (end_subband_chan == orig_end_channel) {
+ *country_ie = triplets_start;
+ *country_ie_len = len_at_triplet;
+ return orig_end_channel;
+ }
+
+ return end_subband_chan;
+}
+
+/*
* Converts a country IE to a regulatory domain. A regulatory domain
* structure has a lot of information which the IE doesn't yet have,
* so for the other values we use upper max values as we will intersect
@@ -521,6 +724,19 @@ static struct ieee80211_regdomain *country_ie_2_rd(
continue;
}
+ /*
+ * APs can add padding to make length divisible
+ * by two, required by the spec.
+ */
+ if (triplet->chans.first_channel == 0) {
+ country_ie++;
+ country_ie_len--;
+ /* This is expected to be at the very end only */
+ if (country_ie_len != 0)
+ return NULL;
+ break;
+ }
+
/* 2 GHz */
if (triplet->chans.first_channel <= 14)
end_channel = triplet->chans.first_channel +
@@ -539,6 +755,20 @@ static struct ieee80211_regdomain *country_ie_2_rd(
(4 * (triplet->chans.num_channels - 1));
cur_channel = triplet->chans.first_channel;
+
+ /*
+ * Enhancement for APs that send a triplet for every channel
+ * or for whatever reason sends triplets with multiple channels
+ * separated when in fact they should be together.
+ */
+ end_channel = max_subband_chan(cur_channel,
+ end_channel,
+ triplet->chans.max_power,
+ &country_ie,
+ &country_ie_len);
+ if (!end_channel)
+ return NULL;
+
cur_sub_max_channel = end_channel;
/* Basic sanity check */
@@ -569,10 +799,13 @@ static struct ieee80211_regdomain *country_ie_2_rd(
last_sub_max_channel = cur_sub_max_channel;
- country_ie += 3;
- country_ie_len -= 3;
num_rules++;
+ if (country_ie_len >= 3) {
+ country_ie += 3;
+ country_ie_len -= 3;
+ }
+
/*
* Note: this is not a IEEE requirement but
* simply a memory requirement
@@ -615,6 +848,12 @@ static struct ieee80211_regdomain *country_ie_2_rd(
continue;
}
+ if (triplet->chans.first_channel == 0) {
+ country_ie++;
+ country_ie_len--;
+ break;
+ }
+
reg_rule = &rd->reg_rules[i];
freq_range = &reg_rule->freq_range;
power_rule = &reg_rule->power_rule;
@@ -629,6 +868,12 @@ static struct ieee80211_regdomain *country_ie_2_rd(
end_channel = triplet->chans.first_channel +
(4 * (triplet->chans.num_channels - 1));
+ end_channel = max_subband_chan(triplet->chans.first_channel,
+ end_channel,
+ triplet->chans.max_power,
+ &country_ie,
+ &country_ie_len);
+
/*
* The +10 is since the regulatory domain expects
* the actual band edge, not the center of freq for
@@ -649,12 +894,15 @@ static struct ieee80211_regdomain *country_ie_2_rd(
*/
freq_range->max_bandwidth_khz = MHZ_TO_KHZ(40);
power_rule->max_antenna_gain = DBI_TO_MBI(100);
- power_rule->max_eirp = DBM_TO_MBM(100);
+ power_rule->max_eirp = DBM_TO_MBM(triplet->chans.max_power);
- country_ie += 3;
- country_ie_len -= 3;
i++;
+ if (country_ie_len >= 3) {
+ country_ie += 3;
+ country_ie_len -= 3;
+ }
+
BUG_ON(i > NL80211_MAX_SUPP_REG_RULES);
}
@@ -950,25 +1198,21 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
if (r == -ERANGE &&
last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE) {
-#ifdef CONFIG_CFG80211_REG_DEBUG
- printk(KERN_DEBUG "cfg80211: Leaving channel %d MHz "
+ REG_DBG_PRINT("cfg80211: Leaving channel %d MHz "
"intact on %s - no rule found in band on "
"Country IE\n",
- chan->center_freq, wiphy_name(wiphy));
-#endif
+ chan->center_freq, wiphy_name(wiphy));
} else {
/*
* In this case we know the country IE has at least one reg rule
* for the band so we respect its band definitions
*/
-#ifdef CONFIG_CFG80211_REG_DEBUG
if (last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE)
- printk(KERN_DEBUG "cfg80211: Disabling "
+ REG_DBG_PRINT("cfg80211: Disabling "
"channel %d MHz on %s due to "
"Country IE\n",
chan->center_freq, wiphy_name(wiphy));
-#endif
flags |= IEEE80211_CHAN_DISABLED;
chan->flags = flags;
}
@@ -1342,30 +1586,6 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
}
EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
-static int reg_copy_regd(const struct ieee80211_regdomain **dst_regd,
- const struct ieee80211_regdomain *src_regd)
-{
- struct ieee80211_regdomain *regd;
- int size_of_regd = 0;
- unsigned int i;
-
- size_of_regd = sizeof(struct ieee80211_regdomain) +
- ((src_regd->n_reg_rules + 1) * sizeof(struct ieee80211_reg_rule));
-
- regd = kzalloc(size_of_regd, GFP_KERNEL);
- if (!regd)
- return -ENOMEM;
-
- memcpy(regd, src_regd, sizeof(struct ieee80211_regdomain));
-
- for (i = 0; i < src_regd->n_reg_rules; i++)
- memcpy(&regd->reg_rules[i], &src_regd->reg_rules[i],
- sizeof(struct ieee80211_reg_rule));
-
- *dst_regd = regd;
- return 0;
-}
-
/*
* Return value which can be used by ignore_request() to indicate
* it has been determined we should intersect two regulatory domains
@@ -1418,8 +1638,6 @@ static int ignore_request(struct wiphy *wiphy,
return REG_INTERSECT;
case NL80211_REGDOM_SET_BY_DRIVER:
if (last_request->initiator == NL80211_REGDOM_SET_BY_CORE) {
- if (is_old_static_regdom(cfg80211_regdomain))
- return 0;
if (regdom_changes(pending_request->alpha2))
return 0;
return -EALREADY;
@@ -1456,8 +1674,7 @@ static int ignore_request(struct wiphy *wiphy,
return -EAGAIN;
}
- if (!is_old_static_regdom(cfg80211_regdomain) &&
- !regdom_changes(pending_request->alpha2))
+ if (!regdom_changes(pending_request->alpha2))
return -EALREADY;
return 0;
@@ -1690,7 +1907,7 @@ int regulatory_hint_user(const char *alpha2)
request->wiphy_idx = WIPHY_IDX_STALE;
request->alpha2[0] = alpha2[0];
request->alpha2[1] = alpha2[1];
- request->initiator = NL80211_REGDOM_SET_BY_USER,
+ request->initiator = NL80211_REGDOM_SET_BY_USER;
queue_regulatory_request(request);
@@ -1806,8 +2023,10 @@ void regulatory_hint_11d(struct wiphy *wiphy,
goto out;
rd = country_ie_2_rd(country_ie, country_ie_len, &checksum);
- if (!rd)
+ if (!rd) {
+ REG_DBG_PRINT("cfg80211: Ignoring bogus country IE\n");
goto out;
+ }
/*
* This will not happen right now but we leave it here for the
@@ -1875,13 +2094,12 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
if (!reg_beacon)
return -ENOMEM;
-#ifdef CONFIG_CFG80211_REG_DEBUG
- printk(KERN_DEBUG "cfg80211: Found new beacon on "
- "frequency: %d MHz (Ch %d) on %s\n",
- beacon_chan->center_freq,
- ieee80211_frequency_to_channel(beacon_chan->center_freq),
- wiphy_name(wiphy));
-#endif
+ REG_DBG_PRINT("cfg80211: Found new beacon on "
+ "frequency: %d MHz (Ch %d) on %s\n",
+ beacon_chan->center_freq,
+ ieee80211_frequency_to_channel(beacon_chan->center_freq),
+ wiphy_name(wiphy));
+
memcpy(&reg_beacon->chan, beacon_chan,
sizeof(struct ieee80211_channel));
@@ -2039,8 +2257,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
* If someone else asked us to change the rd lets only bother
* checking if the alpha2 changes if CRDA was already called
*/
- if (!is_old_static_regdom(cfg80211_regdomain) &&
- !regdom_changes(rd->alpha2))
+ if (!regdom_changes(rd->alpha2))
return -EINVAL;
}
@@ -2239,15 +2456,8 @@ int regulatory_init(void)
spin_lock_init(&reg_requests_lock);
spin_lock_init(&reg_pending_beacons_lock);
-#ifdef CONFIG_WIRELESS_OLD_REGULATORY
- cfg80211_regdomain = static_regdom(ieee80211_regdom);
-
- printk(KERN_INFO "cfg80211: Using static regulatory domain info\n");
- print_regdomain_info(cfg80211_regdomain);
-#else
cfg80211_regdomain = cfg80211_world_regdom;
-#endif
/* We always try to get an update for the static regdomain */
err = regulatory_hint_core(cfg80211_regdomain->alpha2);
if (err) {
diff --git a/net/wireless/regdb.h b/net/wireless/regdb.h
new file mode 100644
index 000000000000..818222c92513
--- /dev/null
+++ b/net/wireless/regdb.h
@@ -0,0 +1,7 @@
+#ifndef __REGDB_H__
+#define __REGDB_H__
+
+extern const struct ieee80211_regdomain *reg_regdb[];
+extern int reg_regdb_size;
+
+#endif /* __REGDB_H__ */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 0c2cbbebca95..06b0231ee5e3 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -100,8 +100,10 @@ static void bss_release(struct kref *ref)
if (bss->pub.free_priv)
bss->pub.free_priv(&bss->pub);
- if (bss->ies_allocated)
- kfree(bss->pub.information_elements);
+ if (bss->beacon_ies_allocated)
+ kfree(bss->pub.beacon_ies);
+ if (bss->proberesp_ies_allocated)
+ kfree(bss->pub.proberesp_ies);
BUG_ON(atomic_read(&bss->hold));
@@ -375,8 +377,7 @@ rb_find_bss(struct cfg80211_registered_device *dev,
static struct cfg80211_internal_bss *
cfg80211_bss_update(struct cfg80211_registered_device *dev,
- struct cfg80211_internal_bss *res,
- bool overwrite)
+ struct cfg80211_internal_bss *res)
{
struct cfg80211_internal_bss *found = NULL;
const u8 *meshid, *meshcfg;
@@ -418,28 +419,64 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
found->pub.capability = res->pub.capability;
found->ts = res->ts;
- /* overwrite IEs */
- if (overwrite) {
+ /* Update IEs */
+ if (res->pub.proberesp_ies) {
size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
- size_t ielen = res->pub.len_information_elements;
+ size_t ielen = res->pub.len_proberesp_ies;
+
+ if (found->pub.proberesp_ies &&
+ !found->proberesp_ies_allocated &&
+ ksize(found) >= used + ielen) {
+ memcpy(found->pub.proberesp_ies,
+ res->pub.proberesp_ies, ielen);
+ found->pub.len_proberesp_ies = ielen;
+ } else {
+ u8 *ies = found->pub.proberesp_ies;
+
+ if (found->proberesp_ies_allocated)
+ ies = krealloc(ies, ielen, GFP_ATOMIC);
+ else
+ ies = kmalloc(ielen, GFP_ATOMIC);
+
+ if (ies) {
+ memcpy(ies, res->pub.proberesp_ies,
+ ielen);
+ found->proberesp_ies_allocated = true;
+ found->pub.proberesp_ies = ies;
+ found->pub.len_proberesp_ies = ielen;
+ }
+ }
- if (!found->ies_allocated && ksize(found) >= used + ielen) {
- memcpy(found->pub.information_elements,
- res->pub.information_elements, ielen);
- found->pub.len_information_elements = ielen;
+ /* Override possible earlier Beacon frame IEs */
+ found->pub.information_elements =
+ found->pub.proberesp_ies;
+ found->pub.len_information_elements =
+ found->pub.len_proberesp_ies;
+ }
+ if (res->pub.beacon_ies) {
+ size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
+ size_t ielen = res->pub.len_beacon_ies;
+
+ if (found->pub.beacon_ies &&
+ !found->beacon_ies_allocated &&
+ ksize(found) >= used + ielen) {
+ memcpy(found->pub.beacon_ies,
+ res->pub.beacon_ies, ielen);
+ found->pub.len_beacon_ies = ielen;
} else {
- u8 *ies = found->pub.information_elements;
+ u8 *ies = found->pub.beacon_ies;
- if (found->ies_allocated)
+ if (found->beacon_ies_allocated)
ies = krealloc(ies, ielen, GFP_ATOMIC);
else
ies = kmalloc(ielen, GFP_ATOMIC);
if (ies) {
- memcpy(ies, res->pub.information_elements, ielen);
- found->ies_allocated = true;
- found->pub.information_elements = ies;
- found->pub.len_information_elements = ielen;
+ memcpy(ies, res->pub.beacon_ies,
+ ielen);
+ found->beacon_ies_allocated = true;
+ found->pub.beacon_ies = ies;
+ found->pub.len_beacon_ies = ielen;
}
}
}
@@ -489,14 +526,26 @@ cfg80211_inform_bss(struct wiphy *wiphy,
res->pub.tsf = timestamp;
res->pub.beacon_interval = beacon_interval;
res->pub.capability = capability;
- /* point to after the private area */
- res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz;
- memcpy(res->pub.information_elements, ie, ielen);
- res->pub.len_information_elements = ielen;
+ /*
+ * Since we do not know here whether the IEs are from a Beacon or Probe
+ * Response frame, we need to pick one of the options and only use it
+ * with the driver that does not provide the full Beacon/Probe Response
+ * frame. Use Beacon frame pointer to avoid indicating that this should
+ * override the information_elements pointer should we have received an
+ * earlier indication of Probe Response data.
+ *
+ * The initial buffer for the IEs is allocated with the BSS entry and
+ * is located after the private area.
+ */
+ res->pub.beacon_ies = (u8 *)res + sizeof(*res) + privsz;
+ memcpy(res->pub.beacon_ies, ie, ielen);
+ res->pub.len_beacon_ies = ielen;
+ res->pub.information_elements = res->pub.beacon_ies;
+ res->pub.len_information_elements = res->pub.len_beacon_ies;
kref_init(&res->ref);
- res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, 0);
+ res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
if (!res)
return NULL;
@@ -517,7 +566,6 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
struct cfg80211_internal_bss *res;
size_t ielen = len - offsetof(struct ieee80211_mgmt,
u.probe_resp.variable);
- bool overwrite;
size_t privsz = wiphy->bss_priv_size;
if (WARN_ON(wiphy->signal_type == NL80211_BSS_SIGNAL_UNSPEC &&
@@ -538,16 +586,28 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
res->pub.tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
res->pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
res->pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
- /* point to after the private area */
- res->pub.information_elements = (u8 *)res + sizeof(*res) + privsz;
- memcpy(res->pub.information_elements, mgmt->u.probe_resp.variable, ielen);
- res->pub.len_information_elements = ielen;
+ /*
+ * The initial buffer for the IEs is allocated with the BSS entry and
+ * is located after the private area.
+ */
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ res->pub.proberesp_ies = (u8 *) res + sizeof(*res) + privsz;
+ memcpy(res->pub.proberesp_ies, mgmt->u.probe_resp.variable,
+ ielen);
+ res->pub.len_proberesp_ies = ielen;
+ res->pub.information_elements = res->pub.proberesp_ies;
+ res->pub.len_information_elements = res->pub.len_proberesp_ies;
+ } else {
+ res->pub.beacon_ies = (u8 *) res + sizeof(*res) + privsz;
+ memcpy(res->pub.beacon_ies, mgmt->u.beacon.variable, ielen);
+ res->pub.len_beacon_ies = ielen;
+ res->pub.information_elements = res->pub.beacon_ies;
+ res->pub.len_information_elements = res->pub.len_beacon_ies;
+ }
kref_init(&res->ref);
- overwrite = ieee80211_is_probe_resp(mgmt->frame_control);
-
- res = cfg80211_bss_update(wiphy_to_dev(wiphy), res, overwrite);
+ res = cfg80211_bss_update(wiphy_to_dev(wiphy), res);
if (!res)
return NULL;
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 59361fdcb5d0..23557c1d0a9c 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -285,7 +285,7 @@ static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
}
}
-int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
+int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -383,7 +383,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
}
EXPORT_SYMBOL(ieee80211_data_to_8023);
-int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
+int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
enum nl80211_iftype iftype, u8 *bssid, bool qos)
{
struct ieee80211_hdr hdr;
@@ -497,6 +497,101 @@ int ieee80211_data_from_8023(struct sk_buff *skb, u8 *addr,
}
EXPORT_SYMBOL(ieee80211_data_from_8023);
+
+void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
+ const u8 *addr, enum nl80211_iftype iftype,
+ const unsigned int extra_headroom)
+{
+ struct sk_buff *frame = NULL;
+ u16 ethertype;
+ u8 *payload;
+ const struct ethhdr *eth;
+ int remaining, err;
+ u8 dst[ETH_ALEN], src[ETH_ALEN];
+
+ err = ieee80211_data_to_8023(skb, addr, iftype);
+ if (err)
+ goto out;
+
+ /* skip the wrapping header */
+ eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
+ if (!eth)
+ goto out;
+
+ while (skb != frame) {
+ u8 padding;
+ __be16 len = eth->h_proto;
+ unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
+
+ remaining = skb->len;
+ memcpy(dst, eth->h_dest, ETH_ALEN);
+ memcpy(src, eth->h_source, ETH_ALEN);
+
+ padding = (4 - subframe_len) & 0x3;
+ /* the last MSDU has no padding */
+ if (subframe_len > remaining)
+ goto purge;
+
+ skb_pull(skb, sizeof(struct ethhdr));
+ /* reuse skb for the last subframe */
+ if (remaining <= subframe_len + padding)
+ frame = skb;
+ else {
+ unsigned int hlen = ALIGN(extra_headroom, 4);
+ /*
+ * Allocate and reserve two bytes more for payload
+ * alignment since sizeof(struct ethhdr) is 14.
+ */
+ frame = dev_alloc_skb(hlen + subframe_len + 2);
+ if (!frame)
+ goto purge;
+
+ skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
+ memcpy(skb_put(frame, ntohs(len)), skb->data,
+ ntohs(len));
+
+ eth = (struct ethhdr *)skb_pull(skb, ntohs(len) +
+ padding);
+ if (!eth) {
+ dev_kfree_skb(frame);
+ goto purge;
+ }
+ }
+
+ skb_reset_network_header(frame);
+ frame->dev = skb->dev;
+ frame->priority = skb->priority;
+
+ payload = frame->data;
+ ethertype = (payload[6] << 8) | payload[7];
+
+ if (likely((compare_ether_addr(payload, rfc1042_header) == 0 &&
+ ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
+ compare_ether_addr(payload,
+ bridge_tunnel_header) == 0)) {
+ /* remove RFC1042 or Bridge-Tunnel
+ * encapsulation and replace EtherType */
+ skb_pull(frame, 6);
+ memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
+ memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
+ } else {
+ memcpy(skb_push(frame, sizeof(__be16)), &len,
+ sizeof(__be16));
+ memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
+ memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
+ }
+ __skb_queue_tail(list, frame);
+ }
+
+ return;
+
+ purge:
+ __skb_queue_purge(list);
+ out:
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(ieee80211_amsdu_to_8023s);
+
/* Given a data frame determine the 802.1p/1d tag to use. */
unsigned int cfg80211_classify8021d(struct sk_buff *skb)
{
@@ -720,3 +815,36 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
return err;
}
+
+u16 cfg80211_calculate_bitrate(struct rate_info *rate)
+{
+ int modulation, streams, bitrate;
+
+ if (!(rate->flags & RATE_INFO_FLAGS_MCS))
+ return rate->legacy;
+
+ /* the formula below does only work for MCS values smaller than 32 */
+ if (rate->mcs >= 32)
+ return 0;
+
+ modulation = rate->mcs & 7;
+ streams = (rate->mcs >> 3) + 1;
+
+ bitrate = (rate->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) ?
+ 13500000 : 6500000;
+
+ if (modulation < 4)
+ bitrate *= (modulation + 1);
+ else if (modulation == 4)
+ bitrate *= (modulation + 2);
+ else
+ bitrate *= (modulation + 3);
+
+ bitrate *= streams;
+
+ if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
+ bitrate = (bitrate / 9) * 10;
+
+ /* do NOT round down here */
+ return (bitrate + 50000) / 100000;
+}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 54face3d4424..966d2f01beac 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1204,21 +1204,47 @@ int cfg80211_wext_siwrate(struct net_device *dev,
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct cfg80211_bitrate_mask mask;
+ u32 fixed, maxrate;
+ struct ieee80211_supported_band *sband;
+ int band, ridx;
+ bool match = false;
if (!rdev->ops->set_bitrate_mask)
return -EOPNOTSUPP;
- mask.fixed = 0;
- mask.maxrate = 0;
+ memset(&mask, 0, sizeof(mask));
+ fixed = 0;
+ maxrate = 0;
if (rate->value < 0) {
/* nothing */
} else if (rate->fixed) {
- mask.fixed = rate->value / 1000; /* kbps */
+ fixed = rate->value / 100000;
} else {
- mask.maxrate = rate->value / 1000; /* kbps */
+ maxrate = rate->value / 100000;
}
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ sband = wdev->wiphy->bands[band];
+ if (sband == NULL)
+ continue;
+ for (ridx = 0; ridx < sband->n_bitrates; ridx++) {
+ struct ieee80211_rate *srate = &sband->bitrates[ridx];
+ if (fixed == srate->bitrate) {
+ mask.control[band].legacy = 1 << ridx;
+ match = true;
+ break;
+ }
+ if (srate->bitrate <= maxrate) {
+ mask.control[band].legacy |= 1 << ridx;
+ match = true;
+ }
+ }
+ }
+
+ if (!match)
+ return -EINVAL;
+
return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate);
@@ -1257,10 +1283,7 @@ int cfg80211_wext_giwrate(struct net_device *dev,
if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
return -EOPNOTSUPP;
- rate->value = 0;
-
- if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS))
- rate->value = 100000 * sinfo.txrate.legacy;
+ rate->value = 100000 * cfg80211_calculate_bitrate(&sinfo.txrate);
return 0;
}
diff --git a/scripts/.gitignore b/scripts/.gitignore
index 52cab46ae35a..c5d5db54c009 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -6,5 +6,4 @@ kallsyms
pnmtologo
bin2c
unifdef
-binoffset
ihex2fw
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 0b94d2fa3a88..e4deb73e9a84 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -82,7 +82,7 @@ ifneq ($(strip $(lib-y) $(lib-m) $(lib-n) $(lib-)),)
lib-target := $(obj)/lib.a
endif
-ifneq ($(strip $(obj-y) $(obj-m) $(obj-n) $(obj-) $(lib-target)),)
+ifneq ($(strip $(obj-y) $(obj-m) $(obj-n) $(obj-) $(subdir-m) $(lib-target)),)
builtin-target := $(obj)/built-in.o
endif
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index cd815ac2a50b..f9bdf264473d 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -219,8 +219,13 @@ for F in $1; do \
fsize=$$(stat -c "%s" $$F); \
dec_size=$$(expr $$dec_size + $$fsize); \
done; \
-printf "%08x" $$dec_size | \
- sed 's/\(..\)\(..\)\(..\)\(..\)/\\\\x\4\\\\x\3\\\\x\2\\\\x\1/g' \
+printf "%08x\n" $$dec_size | \
+ sed 's/\(..\)/\1 /g' | { \
+ read ch0 ch1 ch2 ch3; \
+ for ch in $$ch3 $$ch2 $$ch1 $$ch0; do \
+ printf '%s%03o' '\\' $$((0x$$ch)); \
+ done; \
+ } \
)
quiet_cmd_bzip2 = BZIP2 $@
@@ -235,3 +240,8 @@ quiet_cmd_lzma = LZMA $@
cmd_lzma = (cat $(filter-out FORCE,$^) | \
lzma -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
(rm -f $@ ; false)
+
+quiet_cmd_lzo = LZO $@
+cmd_lzo = (cat $(filter-out FORCE,$^) | \
+ lzop -9 && $(call size_append, $(filter-out FORCE,$^))) > $@ || \
+ (rm -f $@ ; false)
diff --git a/scripts/binoffset.c b/scripts/binoffset.c
deleted file mode 100644
index 1a2e39b8e3e5..000000000000
--- a/scripts/binoffset.c
+++ /dev/null
@@ -1,163 +0,0 @@
-/***************************************************************************
- * binoffset.c
- * (C) 2002 Randy Dunlap <rdunlap@xenotime.net>
-
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-
-# binoffset.c:
-# - searches a (binary) file for a specified (binary) pattern
-# - returns the offset of the located pattern or ~0 if not found
-# - exits with exit status 0 normally or non-0 if pattern is not found
-# or any other error occurs.
-
-****************************************************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <sys/mman.h>
-
-#define VERSION "0.1"
-#define BUF_SIZE (16 * 1024)
-#define PAT_SIZE 100
-
-char *progname;
-char *inputname;
-int inputfd;
-unsigned int bix; /* buf index */
-unsigned char patterns [PAT_SIZE] = {0}; /* byte-sized pattern array */
-int pat_len; /* actual number of pattern bytes */
-unsigned char *madr; /* mmap address */
-size_t filesize;
-int num_matches = 0;
-off_t firstloc = 0;
-
-void usage (void)
-{
- fprintf (stderr, "%s ver. %s\n", progname, VERSION);
- fprintf (stderr, "usage: %s filename pattern_bytes\n",
- progname);
- fprintf (stderr, " [prints location of pattern_bytes in file]\n");
- exit (1);
-}
-
-void get_pattern (int pat_count, char *pats [])
-{
- int ix, err, tmp;
-
-#ifdef DEBUG
- fprintf (stderr,"get_pattern: count = %d\n", pat_count);
- for (ix = 0; ix < pat_count; ix++)
- fprintf (stderr, " pat # %d: [%s]\n", ix, pats[ix]);
-#endif
-
- for (ix = 0; ix < pat_count; ix++) {
- tmp = 0;
- err = sscanf (pats[ix], "%5i", &tmp);
- if (err != 1 || tmp > 0xff) {
- fprintf (stderr, "pattern or value error in pattern # %d [%s]\n",
- ix, pats[ix]);
- usage ();
- }
- patterns [ix] = tmp;
- }
- pat_len = pat_count;
-}
-
-void search_pattern (void)
-{
- for (bix = 0; bix < filesize; bix++) {
- if (madr[bix] == patterns[0]) {
- if (memcmp (&madr[bix], patterns, pat_len) == 0) {
- if (num_matches == 0)
- firstloc = bix;
- num_matches++;
- }
- }
- }
-}
-
-#ifdef NOTDEF
-size_t get_filesize (int fd)
-{
- off_t end_off = lseek (fd, 0, SEEK_END);
- lseek (fd, 0, SEEK_SET);
- return (size_t) end_off;
-}
-#endif
-
-size_t get_filesize (int fd)
-{
- int err;
- struct stat stat;
-
- err = fstat (fd, &stat);
- fprintf (stderr, "filesize: %ld\n", err < 0 ? (long)err : stat.st_size);
- if (err < 0)
- return err;
- return (size_t) stat.st_size;
-}
-
-int main (int argc, char *argv [])
-{
- progname = argv[0];
-
- if (argc < 3)
- usage ();
-
- get_pattern (argc - 2, argv + 2);
-
- inputname = argv[1];
-
- inputfd = open (inputname, O_RDONLY);
- if (inputfd == -1) {
- fprintf (stderr, "%s: cannot open '%s'\n",
- progname, inputname);
- exit (3);
- }
-
- filesize = get_filesize (inputfd);
-
- madr = mmap (0, filesize, PROT_READ, MAP_PRIVATE, inputfd, 0);
- if (madr == MAP_FAILED) {
- fprintf (stderr, "mmap error = %d\n", errno);
- close (inputfd);
- exit (4);
- }
-
- search_pattern ();
-
- if (munmap (madr, filesize))
- fprintf (stderr, "munmap error = %d\n", errno);
-
- if (close (inputfd))
- fprintf (stderr, "%s: error %d closing '%s'\n",
- progname, errno, inputname);
-
- fprintf (stderr, "number of pattern matches = %d\n", num_matches);
- if (num_matches == 0)
- firstloc = ~0;
- printf ("%ld\n", firstloc);
- fprintf (stderr, "%ld\n", firstloc);
-
- exit (num_matches ? 0 : 2);
-}
-
-/* end binoffset.c */
diff --git a/scripts/decodecode b/scripts/decodecode
index 4b00647814bc..8b30cc36744f 100755
--- a/scripts/decodecode
+++ b/scripts/decodecode
@@ -7,7 +7,7 @@
# AFLAGS=--32 decodecode < 386.oops
cleanup() {
- rm -f $T $T.s $T.o $T.oo $T.aa $T.aaa
+ rm -f $T $T.s $T.o $T.oo $T.aa $T.dis
exit 1
}
@@ -39,6 +39,29 @@ fi
echo $code
code=`echo $code | sed -e 's/.*Code: //'`
+width=`expr index "$code" ' '`
+width=$[($width-1)/2]
+case $width in
+1) type=byte ;;
+2) type=2byte ;;
+4) type=4byte ;;
+esac
+
+disas() {
+ ${CROSS_COMPILE}as $AFLAGS -o $1.o $1.s &> /dev/null
+
+ if [ "$ARCH" == "arm" ]; then
+ if [ $width == 2 ]; then
+ OBJDUMPFLAGS="-M force-thumb"
+ fi
+
+ ${CROSS_COMPILE}strip $1.o
+ fi
+
+ ${CROSS_COMPILE}objdump $OBJDUMPFLAGS -S $1.o | \
+ grep -v "/tmp\|Disassembly\|\.text\|^$" &> $1.dis
+}
+
marker=`expr index "$code" "\<"`
if [ $marker -eq 0 ]; then
marker=`expr index "$code" "\("`
@@ -49,26 +72,25 @@ if [ $marker -ne 0 ]; then
echo All code >> $T.oo
echo ======== >> $T.oo
beforemark=`echo "$code"`
- echo -n " .byte 0x" > $T.s
- echo $beforemark | sed -e 's/ /,0x/g' | sed -e 's/<//g' | sed -e 's/>//g' >> $T.s
- as $AFLAGS -o $T.o $T.s &> /dev/null
- objdump -S $T.o | grep -v "/tmp" | grep -v "Disassembly" | grep -v "\.text" | grep -v "^$" &> $T.ooo
- cat $T.ooo >> $T.oo
- rm -f $T.o $T.s $T.ooo
+ echo -n " .$type 0x" > $T.s
+ echo $beforemark | sed -e 's/ /,0x/g; s/[<>()]//g' >> $T.s
+ disas $T
+ cat $T.dis >> $T.oo
+ rm -f $T.o $T.s $T.dis
# and fix code at-and-after marker
code=`echo "$code" | cut -c$((${marker} + 1))-`
fi
echo Code starting with the faulting instruction > $T.aa
echo =========================================== >> $T.aa
-code=`echo $code | sed -e 's/ [<(]/ /;s/[>)] / /;s/ /,0x/g'`
-echo -n " .byte 0x" > $T.s
+code=`echo $code | sed -e 's/ [<(]/ /;s/[>)] / /;s/ /,0x/g; s/[>)]$//'`
+echo -n " .$type 0x" > $T.s
echo $code >> $T.s
-as $AFLAGS -o $T.o $T.s &> /dev/null
-objdump -S $T.o | grep -v "Disassembly" | grep -v "/tmp" | grep -v "\.text" | grep -v "^$" &> $T.aaa
-cat $T.aaa >> $T.aa
+disas $T
+cat $T.dis >> $T.aa
-faultline=`cat $T.aaa | head -1 | cut -d":" -f2`
+faultline=`cat $T.dis | head -1 | cut -d":" -f2`
+faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'`
cat $T.oo | sed -e "s/\($faultline\)/\*\1 <-- trapping instruction/g"
echo
diff --git a/scripts/extract-ikconfig b/scripts/extract-ikconfig
index de233ff43c1c..37f30d36c944 100755
--- a/scripts/extract-ikconfig
+++ b/scripts/extract-ikconfig
@@ -1,92 +1,53 @@
#!/bin/sh
-# extracts .config info from a [b]zImage file
-# uses: binoffset (new), dd, zcat, strings, grep
-# $arg1 is [b]zImage filename
-
-binoffset="./scripts/binoffset"
-test -e $binoffset || cc -o $binoffset ./scripts/binoffset.c || exit 1
-
-IKCFG_ST="0x49 0x4b 0x43 0x46 0x47 0x5f 0x53 0x54"
-IKCFG_ED="0x49 0x4b 0x43 0x46 0x47 0x5f 0x45 0x44"
-dump_config() {
- file="$1"
-
- start=`$binoffset $file $IKCFG_ST 2>/dev/null`
- [ "$?" != "0" ] && start="-1"
- if [ "$start" -eq "-1" ]; then
- return
- fi
- end=`$binoffset $file $IKCFG_ED 2>/dev/null`
- [ "$?" != "0" ] && end="-1"
- if [ "$end" -eq "-1" ]; then
- return
- fi
-
- start=`expr $start + 8`
- size=`expr $end - $start`
-
- dd if="$file" ibs=1 skip="$start" count="$size" 2>/dev/null | zcat
-
- clean_up
- exit 0
-}
-
-
-usage()
-{
- echo " usage: extract-ikconfig [b]zImage_filename"
-}
-
-clean_up()
+# ----------------------------------------------------------------------
+# extract-ikconfig - Extract the .config file from a kernel image
+#
+# This will only work when the kernel was compiled with CONFIG_IKCONFIG.
+#
+# The obscure use of the "tr" filter is to work around older versions of
+# "grep" that report the byte offset of the line instead of the pattern.
+#
+# (c) 2009, Dick Streefland <dick@streefland.net>
+# Licensed under the terms of the GNU General Public License.
+# ----------------------------------------------------------------------
+
+gz1='\037\213\010'
+gz2='01'
+cf1='IKCFG_ST\037\213\010'
+cf2='0123456789'
+
+dump_config()
{
- if [ "$TMPFILE" != "" ]; then
- rm -f $TMPFILE
+ if pos=`tr "$cf1\n$cf2" "\n$cf2=" < "$1" | grep -abo "^$cf2"`
+ then
+ pos=${pos%%:*}
+ tail -c+$(($pos+8)) "$1" | zcat -q
+ exit 0
fi
}
-if [ $# -lt 1 ]
+# Check invocation:
+me=${0##*/}
+img=$1
+if [ $# -ne 1 -o ! -s "$img" ]
then
- usage
- exit 1
+ echo "Usage: $me <kernel-image>" >&2
+ exit 2
fi
-TMPFILE=`mktemp -t ikconfig-XXXXXX` || exit 1
-image="$1"
-
-# vmlinux: Attempt to dump the configuration from the file directly
-dump_config "$image"
-
-GZHDR1="0x1f 0x8b 0x08 0x00"
-GZHDR2="0x1f 0x8b 0x08 0x08"
-
-ELFHDR="0x7f 0x45 0x4c 0x46"
-
-# vmlinux.gz: Check for a compressed images
-off=`$binoffset "$image" $GZHDR1 2>/dev/null`
-[ "$?" != "0" ] && off="-1"
-if [ "$off" -eq "-1" ]; then
- off=`$binoffset "$image" $GZHDR2 2>/dev/null`
- [ "$?" != "0" ] && off="-1"
-fi
-if [ "$off" -eq "0" ]; then
- zcat <"$image" >"$TMPFILE"
- dump_config "$TMPFILE"
-elif [ "$off" -ne "-1" ]; then
- (dd ibs="$off" skip=1 count=0 && dd bs=512k) <"$image" 2>/dev/null | \
- zcat >"$TMPFILE"
- dump_config "$TMPFILE"
-
-# check if this is simply an ELF file
-else
- off=`$binoffset "$image" $ELFHDR 2>/dev/null`
- [ "$?" != "0" ] && off="-1"
- if [ "$off" -eq "0" ]; then
- dump_config "$image"
- fi
-fi
-
-echo "ERROR: Unable to extract kernel configuration information."
-echo " This kernel image may not have the config info."
-
-clean_up
+# Initial attempt for uncompressed images or objects:
+dump_config "$img"
+
+# That didn't work, so decompress and try again:
+tmp=/tmp/ikconfig$$
+trap "rm -f $tmp" 0
+for pos in `tr "$gz1\n$gz2" "\n$gz2=" < "$img" | grep -abo "^$gz2"`
+do
+ pos=${pos%%:*}
+ tail -c+$pos "$img" | zcat 2> /dev/null > $tmp
+ dump_config $tmp
+done
+
+# Bail out:
+echo "$me: Cannot find kernel config." >&2
exit 1
diff --git a/scripts/genksyms/genksyms.c b/scripts/genksyms/genksyms.c
index af6b8363a2d5..f99115ebe925 100644
--- a/scripts/genksyms/genksyms.c
+++ b/scripts/genksyms/genksyms.c
@@ -758,8 +758,10 @@ int main(int argc, char **argv)
/* setlinebuf(debugfile); */
}
- if (flag_reference)
+ if (flag_reference) {
read_reference(ref_file);
+ fclose(ref_file);
+ }
yyparse();
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 445e8845f0a4..090f24839700 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -296,46 +296,56 @@ my @status = ();
foreach my $file (@files) {
-#Do not match excluded file patterns
-
- my $exclude = 0;
- foreach my $line (@typevalue) {
- if ($line =~ m/^(\C):\s*(.*)/) {
- my $type = $1;
- my $value = $2;
- if ($type eq 'X') {
- if (file_match_pattern($file, $value)) {
- $exclude = 1;
- last;
- }
- }
- }
- }
+ my %hash;
+ my $tvi = find_first_section();
+ while ($tvi < @typevalue) {
+ my $start = find_starting_index($tvi);
+ my $end = find_ending_index($tvi);
+ my $exclude = 0;
+ my $i;
+
+ #Do not match excluded file patterns
- if (!$exclude) {
- my $tvi = 0;
- my %hash;
- foreach my $line (@typevalue) {
+ for ($i = $start; $i < $end; $i++) {
+ my $line = $typevalue[$i];
if ($line =~ m/^(\C):\s*(.*)/) {
my $type = $1;
my $value = $2;
- if ($type eq 'F') {
+ if ($type eq 'X') {
if (file_match_pattern($file, $value)) {
- my $value_pd = ($value =~ tr@/@@);
- my $file_pd = ($file =~ tr@/@@);
- $value_pd++ if (substr($value,-1,1) ne "/");
- if ($pattern_depth == 0 ||
- (($file_pd - $value_pd) < $pattern_depth)) {
- $hash{$tvi} = $value_pd;
- }
+ $exclude = 1;
}
}
}
- $tvi++;
}
- foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
- add_categories($line);
+
+ if (!$exclude) {
+ for ($i = $start; $i < $end; $i++) {
+ my $line = $typevalue[$i];
+ if ($line =~ m/^(\C):\s*(.*)/) {
+ my $type = $1;
+ my $value = $2;
+ if ($type eq 'F') {
+ if (file_match_pattern($file, $value)) {
+ my $value_pd = ($value =~ tr@/@@);
+ my $file_pd = ($file =~ tr@/@@);
+ $value_pd++ if (substr($value,-1,1) ne "/");
+ if ($pattern_depth == 0 ||
+ (($file_pd - $value_pd) < $pattern_depth)) {
+ $hash{$tvi} = $value_pd;
+ }
+ }
+ }
+ }
+ }
}
+
+ $tvi += ($end - $start);
+
+ }
+
+ foreach my $line (sort {$hash{$b} <=> $hash{$a}} keys %hash) {
+ add_categories($line);
}
if ($email && $email_git) {
@@ -570,6 +580,20 @@ sub format_email {
return $formatted_email;
}
+sub find_first_section {
+ my $index = 0;
+
+ while ($index < @typevalue) {
+ my $tv = $typevalue[$index];
+ if (($tv =~ m/^(\C):\s*(.*)/)) {
+ last;
+ }
+ $index++;
+ }
+
+ return $index;
+}
+
sub find_starting_index {
my ($index) = @_;
diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile
index 999e8a7d5bf7..75bdf5ae202c 100644
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -23,6 +23,9 @@ menuconfig: $(obj)/mconf
config: $(obj)/conf
$< $(Kconfig)
+nconfig: $(obj)/nconf
+ $< $(Kconfig)
+
oldconfig: $(obj)/conf
$< -o $(Kconfig)
@@ -110,6 +113,7 @@ endif
# Help text used by make help
help:
@echo ' config - Update current config utilising a line-oriented program'
+ @echo ' nconfig - Update current config utilising a ncurses menu based program'
@echo ' menuconfig - Update current config utilising a menu based program'
@echo ' xconfig - Update current config utilising a QT based front-end'
@echo ' gconfig - Update current config utilising a GTK based front-end'
@@ -137,6 +141,8 @@ HOST_EXTRACFLAGS += -DLOCALE
# ===========================================================================
# Shared Makefile for the various kconfig executables:
# conf: Used for defconfig, oldconfig and related targets
+# nconf: Used for the nconfig target.
+# Utilizes ncurses
# mconf: Used for the menuconfig target
# Utilizes the lxdialog package
# qconf: Used for the xconfig target
@@ -149,11 +155,16 @@ lxdialog := lxdialog/checklist.o lxdialog/util.o lxdialog/inputbox.o
lxdialog += lxdialog/textbox.o lxdialog/yesno.o lxdialog/menubox.o
conf-objs := conf.o zconf.tab.o
-mconf-objs := mconf.o zconf.tab.o $(lxdialog)
+mconf-objs := mconf.o zconf.tab.o $(lxdialog)
+nconf-objs := nconf.o zconf.tab.o nconf.gui.o
kxgettext-objs := kxgettext.o zconf.tab.o
hostprogs-y := conf qconf gconf kxgettext
+ifeq ($(MAKECMDGOALS),nconfig)
+ hostprogs-y += nconf
+endif
+
ifeq ($(MAKECMDGOALS),menuconfig)
hostprogs-y += mconf
endif
@@ -177,7 +188,7 @@ endif
clean-files := lkc_defs.h qconf.moc .tmp_qtcheck \
.tmp_gtkcheck zconf.tab.c lex.zconf.c zconf.hash.c gconf.glade.h
-clean-files += mconf qconf gconf
+clean-files += mconf qconf gconf nconf
clean-files += config.pot linux.pot
# Check that we have the required ncurses stuff installed for lxdialog (menuconfig)
@@ -202,6 +213,7 @@ HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0`
HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \
-D LKC_DIRECT_LINK
+HOSTLOADLIBES_nconf = -lmenu -lpanel -lncurses
$(obj)/qconf.o: $(obj)/.tmp_qtcheck
ifeq ($(qconf-target),1)
diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
index edd3f39a080a..d83f2322893a 100644
--- a/scripts/kconfig/expr.c
+++ b/scripts/kconfig/expr.c
@@ -1097,9 +1097,32 @@ void expr_fprint(struct expr *e, FILE *out)
static void expr_print_gstr_helper(void *data, struct symbol *sym, const char *str)
{
- str_append((struct gstr*)data, str);
+ struct gstr *gs = (struct gstr*)data;
+ const char *sym_str = NULL;
+
+ if (sym)
+ sym_str = sym_get_string_value(sym);
+
+ if (gs->max_width) {
+ unsigned extra_length = strlen(str);
+ const char *last_cr = strrchr(gs->s, '\n');
+ unsigned last_line_length;
+
+ if (sym_str)
+ extra_length += 4 + strlen(sym_str);
+
+ if (!last_cr)
+ last_cr = gs->s;
+
+ last_line_length = strlen(gs->s) - (last_cr - gs->s);
+
+ if ((last_line_length + extra_length) > gs->max_width)
+ str_append(gs, "\\\n");
+ }
+
+ str_append(gs, str);
if (sym)
- str_printf((struct gstr*)data, " [=%s]", sym_get_string_value(sym));
+ str_printf(gs, " [=%s]", sym_str);
}
void expr_gstr_print(struct expr *e, struct gstr *gs)
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index f379b0bf8c9e..ce6549cdaccf 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -84,7 +84,7 @@ void conf_set_all_new_symbols(enum conf_def_mode mode);
void kconfig_load(void);
/* menu.c */
-void menu_init(void);
+void _menu_init(void);
void menu_warn(struct menu *menu, const char *fmt, ...);
struct menu *menu_add_menu(void);
void menu_end_menu(void);
@@ -106,6 +106,11 @@ int file_write_dep(const char *name);
struct gstr {
size_t len;
char *s;
+ /*
+ * when max_width is not zero long lines in string s (if any) get
+ * wrapped not to exceed the max_width value
+ */
+ int max_width;
};
struct gstr str_new(void);
struct gstr str_assign(const char *s);
diff --git a/scripts/kconfig/lkc_proto.h b/scripts/kconfig/lkc_proto.h
index ffeb532b2cff..41e652a8d1f6 100644
--- a/scripts/kconfig/lkc_proto.h
+++ b/scripts/kconfig/lkc_proto.h
@@ -17,7 +17,8 @@ P(menu_get_root_menu,struct menu *,(struct menu *menu));
P(menu_get_parent_menu,struct menu *,(struct menu *menu));
P(menu_has_help,bool,(struct menu *menu));
P(menu_get_help,const char *,(struct menu *menu));
-P(get_symbol_str,void,(struct gstr *r, struct symbol *sym));
+P(get_symbol_str, void, (struct gstr *r, struct symbol *sym));
+P(get_relations_str, struct gstr, (struct symbol **sym_arr));
P(menu_get_ext_help,void,(struct menu *menu, struct gstr *help));
/* symbol.c */
diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c
index 8413cf38ed27..a4a75190457c 100644
--- a/scripts/kconfig/mconf.c
+++ b/scripts/kconfig/mconf.c
@@ -282,19 +282,6 @@ static void show_textbox(const char *title, const char *text, int r, int c);
static void show_helptext(const char *title, const char *text);
static void show_help(struct menu *menu);
-static struct gstr get_relations_str(struct symbol **sym_arr)
-{
- struct symbol *sym;
- struct gstr res = str_new();
- int i;
-
- for (i = 0; sym_arr && (sym = sym_arr[i]); i++)
- get_symbol_str(&res, sym);
- if (!i)
- str_append(&res, _("No matches found.\n"));
- return res;
-}
-
static char filename[PATH_MAX+1];
static void set_config_filename(const char *config_filename)
{
@@ -638,6 +625,7 @@ static void show_help(struct menu *menu)
{
struct gstr help = str_new();
+ help.max_width = getmaxx(stdscr) - 10;
menu_get_ext_help(menu, &help);
show_helptext(_(menu_get_prompt(menu)), str_get(&help));
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 059a2465c574..21bfb3dbc87a 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -38,7 +38,7 @@ static void prop_warn(struct property *prop, const char *fmt, ...)
va_end(ap);
}
-void menu_init(void)
+void _menu_init(void)
{
current_entry = current_menu = &rootmenu;
last_entry_ptr = &rootmenu.list;
@@ -515,6 +515,20 @@ void get_symbol_str(struct gstr *r, struct symbol *sym)
str_append(r, "\n\n");
}
+struct gstr get_relations_str(struct symbol **sym_arr)
+{
+ struct symbol *sym;
+ struct gstr res = str_new();
+ int i;
+
+ for (i = 0; sym_arr && (sym = sym_arr[i]); i++)
+ get_symbol_str(&res, sym);
+ if (!i)
+ str_append(&res, _("No matches found.\n"));
+ return res;
+}
+
+
void menu_get_ext_help(struct menu *menu, struct gstr *help)
{
struct symbol *sym = menu->sym;
diff --git a/scripts/kconfig/nconf.c b/scripts/kconfig/nconf.c
new file mode 100644
index 000000000000..fb54c98874bc
--- /dev/null
+++ b/scripts/kconfig/nconf.c
@@ -0,0 +1,1568 @@
+/*
+ * Copyright (C) 2008 Nir Tzachar <nir.tzachar@gmail.com?
+ * Released under the terms of the GNU GPL v2.0.
+ *
+ * Derived from menuconfig.
+ *
+ */
+#define LKC_DIRECT_LINK
+#include "lkc.h"
+#include "nconf.h"
+
+static const char nconf_readme[] = N_(
+"Overview\n"
+"--------\n"
+"Some kernel features may be built directly into the kernel.\n"
+"Some may be made into loadable runtime modules. Some features\n"
+"may be completely removed altogether. There are also certain\n"
+"kernel parameters which are not really features, but must be\n"
+"entered in as decimal or hexadecimal numbers or possibly text.\n"
+"\n"
+"Menu items beginning with following braces represent features that\n"
+" [ ] can be built in or removed\n"
+" < > can be built in, modularized or removed\n"
+" { } can be built in or modularized (selected by other feature)\n"
+" - - are selected by other feature,\n"
+" XXX cannot be selected. use Symbol Info to find out why,\n"
+"while *, M or whitespace inside braces means to build in, build as\n"
+"a module or to exclude the feature respectively.\n"
+"\n"
+"To change any of these features, highlight it with the cursor\n"
+"keys and press <Y> to build it in, <M> to make it a module or\n"
+"<N> to removed it. You may also press the <Space Bar> to cycle\n"
+"through the available options (ie. Y->N->M->Y).\n"
+"\n"
+"Some additional keyboard hints:\n"
+"\n"
+"Menus\n"
+"----------\n"
+"o Use the Up/Down arrow keys (cursor keys) to highlight the item\n"
+" you wish to change use <Enter> or <Space>. Goto submenu by \n"
+" pressing <Enter> of <right-arrow>. Use <Esc> or <left-arrow> to go back.\n"
+" Submenus are designated by \"--->\".\n"
+"\n"
+" Shortcut: Press the option's highlighted letter (hotkey).\n"
+" Pressing a hotkey more than once will sequence\n"
+" through all visible items which use that hotkey.\n"
+"\n"
+" You may also use the <PAGE UP> and <PAGE DOWN> keys to scroll\n"
+" unseen options into view.\n"
+"\n"
+"o To exit a menu use the just press <ESC> <F5> <F8> or <left-arrow>.\n"
+"\n"
+"o To get help with an item, press <F1>\n"
+" Shortcut: Press <h> or <?>.\n"
+"\n"
+"\n"
+"Radiolists (Choice lists)\n"
+"-----------\n"
+"o Use the cursor keys to select the option you wish to set and press\n"
+" <S> or the <SPACE BAR>.\n"
+"\n"
+" Shortcut: Press the first letter of the option you wish to set then\n"
+" press <S> or <SPACE BAR>.\n"
+"\n"
+"o To see available help for the item, press <F1>\n"
+" Shortcut: Press <H> or <?>.\n"
+"\n"
+"\n"
+"Data Entry\n"
+"-----------\n"
+"o Enter the requested information and press <ENTER>\n"
+" If you are entering hexadecimal values, it is not necessary to\n"
+" add the '0x' prefix to the entry.\n"
+"\n"
+"o For help, press <F1>.\n"
+"\n"
+"\n"
+"Text Box (Help Window)\n"
+"--------\n"
+"o Use the cursor keys to scroll up/down/left/right. The VI editor\n"
+" keys h,j,k,l function here as do <SPACE BAR> for those\n"
+" who are familiar with less and lynx.\n"
+"\n"
+"o Press <Enter>, <F1>, <F5>, <F7> or <Esc> to exit.\n"
+"\n"
+"\n"
+"Alternate Configuration Files\n"
+"-----------------------------\n"
+"nconfig supports the use of alternate configuration files for\n"
+"those who, for various reasons, find it necessary to switch\n"
+"between different kernel configurations.\n"
+"\n"
+"At the end of the main menu you will find two options. One is\n"
+"for saving the current configuration to a file of your choosing.\n"
+"The other option is for loading a previously saved alternate\n"
+"configuration.\n"
+"\n"
+"Even if you don't use alternate configuration files, but you\n"
+"find during a nconfig session that you have completely messed\n"
+"up your settings, you may use the \"Load Alternate...\" option to\n"
+"restore your previously saved settings from \".config\" without\n"
+"restarting nconfig.\n"
+"\n"
+"Other information\n"
+"-----------------\n"
+"If you use nconfig in an XTERM window make sure you have your\n"
+"$TERM variable set to point to a xterm definition which supports color.\n"
+"Otherwise, nconfig will look rather bad. nconfig will not\n"
+"display correctly in a RXVT window because rxvt displays only one\n"
+"intensity of color, bright.\n"
+"\n"
+"nconfig will display larger menus on screens or xterms which are\n"
+"set to display more than the standard 25 row by 80 column geometry.\n"
+"In order for this to work, the \"stty size\" command must be able to\n"
+"display the screen's current row and column geometry. I STRONGLY\n"
+"RECOMMEND that you make sure you do NOT have the shell variables\n"
+"LINES and COLUMNS exported into your environment. Some distributions\n"
+"export those variables via /etc/profile. Some ncurses programs can\n"
+"become confused when those variables (LINES & COLUMNS) don't reflect\n"
+"the true screen size.\n"
+"\n"
+"Optional personality available\n"
+"------------------------------\n"
+"If you prefer to have all of the kernel options listed in a single\n"
+"menu, rather than the default multimenu hierarchy, run the nconfig\n"
+"with NCONFIG_MODE environment variable set to single_menu. Example:\n"
+"\n"
+"make NCONFIG_MODE=single_menu nconfig\n"
+"\n"
+"<Enter> will then unroll the appropriate category, or enfold it if it\n"
+"is already unrolled.\n"
+"\n"
+"Note that this mode can eventually be a little more CPU expensive\n"
+"(especially with a larger number of unrolled categories) than the\n"
+"default mode.\n"
+"\n"),
+menu_no_f_instructions[] = N_(
+" You do not have function keys support. Please follow the\n"
+" following instructions:\n"
+" Arrow keys navigate the menu.\n"
+" <Enter> or <right-arrow> selects submenus --->.\n"
+" Capital Letters are hotkeys.\n"
+" Pressing <Y> includes, <N> excludes, <M> modularizes features.\n"
+" Pressing SpaceBar toggles between the above options\n"
+" Press <Esc> or <left-arrow> to go back one menu, \n"
+" <?> or <h> for Help, </> for Search.\n"
+" <1> is interchangable with <F1>, <2> with <F2>, etc.\n"
+" Legend: [*] built-in [ ] excluded <M> module < > module capable.\n"
+" <Esc> always leaves the current window\n"),
+menu_instructions[] = N_(
+" Arrow keys navigate the menu.\n"
+" <Enter> or <right-arrow> selects submenus --->.\n"
+" Capital Letters are hotkeys.\n"
+" Pressing <Y> includes, <N> excludes, <M> modularizes features.\n"
+" Pressing SpaceBar toggles between the above options\n"
+" Press <Esc>, <F3> or <left-arrow> to go back one menu, \n"
+" <?>, <F1> or <h> for Help, </> for Search.\n"
+" <1> is interchangable with <F1>, <2> with <F2>, etc.\n"
+" Legend: [*] built-in [ ] excluded <M> module < > module capable.\n"
+" <Esc> always leaves the current window\n"),
+radiolist_instructions[] = N_(
+" Use the arrow keys to navigate this window or\n"
+" press the hotkey of the item you wish to select\n"
+" followed by the <SPACE BAR>.\n"
+" Press <?>, <F1> or <h> for additional information about this option.\n"),
+inputbox_instructions_int[] = N_(
+"Please enter a decimal value.\n"
+"Fractions will not be accepted.\n"
+"Press <RETURN> to accept, <ESC> to cancel."),
+inputbox_instructions_hex[] = N_(
+"Please enter a hexadecimal value.\n"
+"Press <RETURN> to accept, <ESC> to cancel."),
+inputbox_instructions_string[] = N_(
+"Please enter a string value.\n"
+"Press <RETURN> to accept, <ESC> to cancel."),
+setmod_text[] = N_(
+"This feature depends on another which\n"
+"has been configured as a module.\n"
+"As a result, this feature will be built as a module."),
+nohelp_text[] = N_(
+"There is no help available for this kernel option.\n"),
+load_config_text[] = N_(
+"Enter the name of the configuration file you wish to load.\n"
+"Accept the name shown to restore the configuration you\n"
+"last retrieved. Leave blank to abort."),
+load_config_help[] = N_(
+"\n"
+"For various reasons, one may wish to keep several different kernel\n"
+"configurations available on a single machine.\n"
+"\n"
+"If you have saved a previous configuration in a file other than the\n"
+"kernel's default, entering the name of the file here will allow you\n"
+"to modify that configuration.\n"
+"\n"
+"If you are uncertain, then you have probably never used alternate\n"
+"configuration files. You should therefor leave this blank to abort.\n"),
+save_config_text[] = N_(
+"Enter a filename to which this configuration should be saved\n"
+"as an alternate. Leave blank to abort."),
+save_config_help[] = N_(
+"\n"
+"For various reasons, one may wish to keep different kernel\n"
+"configurations available on a single machine.\n"
+"\n"
+"Entering a file name here will allow you to later retrieve, modify\n"
+"and use the current configuration as an alternate to whatever\n"
+"configuration options you have selected at that time.\n"
+"\n"
+"If you are uncertain what all this means then you should probably\n"
+"leave this blank.\n"),
+search_help[] = N_(
+"\n"
+"Search for CONFIG_ symbols and display their relations.\n"
+"Regular expressions are allowed.\n"
+"Example: search for \"^FOO\"\n"
+"Result:\n"
+"-----------------------------------------------------------------\n"
+"Symbol: FOO [ = m]\n"
+"Prompt: Foo bus is used to drive the bar HW\n"
+"Defined at drivers/pci/Kconfig:47\n"
+"Depends on: X86_LOCAL_APIC && X86_IO_APIC || IA64\n"
+"Location:\n"
+" -> Bus options (PCI, PCMCIA, EISA, MCA, ISA)\n"
+" -> PCI support (PCI [ = y])\n"
+" -> PCI access mode (<choice> [ = y])\n"
+"Selects: LIBCRC32\n"
+"Selected by: BAR\n"
+"-----------------------------------------------------------------\n"
+"o The line 'Prompt:' shows the text used in the menu structure for\n"
+" this CONFIG_ symbol\n"
+"o The 'Defined at' line tell at what file / line number the symbol\n"
+" is defined\n"
+"o The 'Depends on:' line tell what symbols needs to be defined for\n"
+" this symbol to be visible in the menu (selectable)\n"
+"o The 'Location:' lines tell where in the menu structure this symbol\n"
+" is located\n"
+" A location followed by a [ = y] indicate that this is a selectable\n"
+" menu item - and current value is displayed inside brackets.\n"
+"o The 'Selects:' line tell what symbol will be automatically\n"
+" selected if this symbol is selected (y or m)\n"
+"o The 'Selected by' line tell what symbol has selected this symbol\n"
+"\n"
+"Only relevant lines are shown.\n"
+"\n\n"
+"Search examples:\n"
+"Examples: USB = > find all CONFIG_ symbols containing USB\n"
+" ^USB => find all CONFIG_ symbols starting with USB\n"
+" USB$ => find all CONFIG_ symbols ending with USB\n"
+"\n");
+
+struct mitem {
+ char str[256];
+ char tag;
+ void *usrptr;
+ int is_hot;
+ int is_visible;
+};
+
+#define MAX_MENU_ITEMS 4096
+static int show_all_items;
+static int indent;
+static struct menu *current_menu;
+static int child_count;
+static int single_menu_mode;
+/* the window in which all information appears */
+static WINDOW *main_window;
+/* the largest size of the menu window */
+static int mwin_max_lines;
+static int mwin_max_cols;
+/* the window in which we show option buttons */
+static MENU *curses_menu;
+static ITEM *curses_menu_items[MAX_MENU_ITEMS];
+static struct mitem k_menu_items[MAX_MENU_ITEMS];
+static int items_num;
+static int global_exit;
+/* the currently selected button */
+const char *current_instructions = menu_instructions;
+/* this array is used to implement hot keys. it is updated in item_make and
+ * resetted in clean_items. It would be better to use a hash, but lets keep it
+ * simple... */
+#define MAX_SAME_KEY MAX_MENU_ITEMS
+struct {
+ int count;
+ int ptrs[MAX_MENU_ITEMS];
+} hotkeys[1<<(sizeof(char)*8)];
+
+static void conf(struct menu *menu);
+static void conf_choice(struct menu *menu);
+static void conf_string(struct menu *menu);
+static void conf_load(void);
+static void conf_save(void);
+static void show_help(struct menu *menu);
+static int do_exit(void);
+static void setup_windows(void);
+
+typedef void (*function_key_handler_t)(int *key, struct menu *menu);
+static void handle_f1(int *key, struct menu *current_item);
+static void handle_f2(int *key, struct menu *current_item);
+static void handle_f3(int *key, struct menu *current_item);
+static void handle_f4(int *key, struct menu *current_item);
+static void handle_f5(int *key, struct menu *current_item);
+static void handle_f6(int *key, struct menu *current_item);
+static void handle_f7(int *key, struct menu *current_item);
+static void handle_f8(int *key, struct menu *current_item);
+
+struct function_keys {
+ const char *key_str;
+ const char *func;
+ function_key key;
+ function_key_handler_t handler;
+};
+
+static const int function_keys_num = 8;
+struct function_keys function_keys[] = {
+ {
+ .key_str = "F1",
+ .func = "Help",
+ .key = F_HELP,
+ .handler = handle_f1,
+ },
+ {
+ .key_str = "F2",
+ .func = "Symbol Info",
+ .key = F_SYMBOL,
+ .handler = handle_f2,
+ },
+ {
+ .key_str = "F3",
+ .func = "Instructions",
+ .key = F_INSTS,
+ .handler = handle_f3,
+ },
+ {
+ .key_str = "F4",
+ .func = "Config",
+ .key = F_CONF,
+ .handler = handle_f4,
+ },
+ {
+ .key_str = "F5",
+ .func = "Back",
+ .key = F_BACK,
+ .handler = handle_f5,
+ },
+ {
+ .key_str = "F6",
+ .func = "Save",
+ .key = F_SAVE,
+ .handler = handle_f6,
+ },
+ {
+ .key_str = "F7",
+ .func = "Load",
+ .key = F_LOAD,
+ .handler = handle_f7,
+ },
+ {
+ .key_str = "F8",
+ .func = "Exit",
+ .key = F_EXIT,
+ .handler = handle_f8,
+ },
+};
+
+static void print_function_line(void)
+{
+ int i;
+ int offset = 1;
+ const int skip = 1;
+
+ for (i = 0; i < function_keys_num; i++) {
+ wattrset(main_window, attributes[FUNCTION_HIGHLIGHT]);
+ mvwprintw(main_window, LINES-3, offset,
+ "%s",
+ function_keys[i].key_str);
+ wattrset(main_window, attributes[FUNCTION_TEXT]);
+ offset += strlen(function_keys[i].key_str);
+ mvwprintw(main_window, LINES-3,
+ offset, "%s",
+ function_keys[i].func);
+ offset += strlen(function_keys[i].func) + skip;
+ }
+ wattrset(main_window, attributes[NORMAL]);
+}
+
+/* help */
+static void handle_f1(int *key, struct menu *current_item)
+{
+ show_scroll_win(main_window,
+ _("README"), _(nconf_readme));
+ return;
+}
+
+/* symbole help */
+static void handle_f2(int *key, struct menu *current_item)
+{
+ show_help(current_item);
+ return;
+}
+
+/* instructions */
+static void handle_f3(int *key, struct menu *current_item)
+{
+ show_scroll_win(main_window,
+ _("Instructions"),
+ _(current_instructions));
+ return;
+}
+
+/* config */
+static void handle_f4(int *key, struct menu *current_item)
+{
+ int res = btn_dialog(main_window,
+ _("Show all symbols?"),
+ 2,
+ " <Show All> ",
+ "<Don't show all>");
+ if (res == 0)
+ show_all_items = 1;
+ else if (res == 1)
+ show_all_items = 0;
+
+ return;
+}
+
+/* back */
+static void handle_f5(int *key, struct menu *current_item)
+{
+ *key = KEY_LEFT;
+ return;
+}
+
+/* save */
+static void handle_f6(int *key, struct menu *current_item)
+{
+ conf_save();
+ return;
+}
+
+/* load */
+static void handle_f7(int *key, struct menu *current_item)
+{
+ conf_load();
+ return;
+}
+
+/* exit */
+static void handle_f8(int *key, struct menu *current_item)
+{
+ do_exit();
+ return;
+}
+
+/* return != 0 to indicate the key was handles */
+static int process_special_keys(int *key, struct menu *menu)
+{
+ int i;
+
+ if (*key == KEY_RESIZE) {
+ setup_windows();
+ return 1;
+ }
+
+ for (i = 0; i < function_keys_num; i++) {
+ if (*key == KEY_F(function_keys[i].key) ||
+ *key == '0' + function_keys[i].key){
+ function_keys[i].handler(key, menu);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+static void clean_items(void)
+{
+ int i;
+ for (i = 0; curses_menu_items[i]; i++)
+ free_item(curses_menu_items[i]);
+ bzero(curses_menu_items, sizeof(curses_menu_items));
+ bzero(k_menu_items, sizeof(k_menu_items));
+ bzero(hotkeys, sizeof(hotkeys));
+ items_num = 0;
+}
+
+/* return the index of the next hot item, or -1 if no such item exists */
+static int get_next_hot(int c)
+{
+ static int hot_index;
+ static int hot_char;
+
+ if (c < 0 || c > 255 || hotkeys[c].count <= 0)
+ return -1;
+
+ if (hot_char == c) {
+ hot_index = (hot_index+1)%hotkeys[c].count;
+ return hotkeys[c].ptrs[hot_index];
+ } else {
+ hot_char = c;
+ hot_index = 0;
+ return hotkeys[c].ptrs[0];
+ }
+}
+
+/* can the char c be a hot key? no, if c is a common shortcut used elsewhere */
+static int canbhot(char c)
+{
+ c = tolower(c);
+ return isalnum(c) && c != 'y' && c != 'm' && c != 'h' &&
+ c != 'n' && c != '?';
+}
+
+/* check if str already contains a hot key. */
+static int is_hot(int index)
+{
+ return k_menu_items[index].is_hot;
+}
+
+/* find the first possible hot key, and mark it.
+ * index is the index of the item in the menu
+ * return 0 on success*/
+static int make_hot(char *dest, int len, const char *org, int index)
+{
+ int position = -1;
+ int i;
+ int tmp;
+ int c;
+ int org_len = strlen(org);
+
+ if (org == NULL || is_hot(index))
+ return 1;
+
+ /* make sure not to make hot keys out of markers.
+ * find where to start looking for a hot key
+ */
+ i = 0;
+ /* skip white space */
+ while (i < org_len && org[i] == ' ')
+ i++;
+ if (i == org_len)
+ return -1;
+ /* if encountering '(' or '<' or '[', find the match and look from there
+ **/
+ if (org[i] == '[' || org[i] == '<' || org[i] == '(') {
+ i++;
+ for (; i < org_len; i++)
+ if (org[i] == ']' || org[i] == '>' || org[i] == ')')
+ break;
+ }
+ if (i == org_len)
+ return -1;
+ for (; i < org_len; i++) {
+ if (canbhot(org[i]) && org[i-1] != '<' && org[i-1] != '(') {
+ position = i;
+ break;
+ }
+ }
+ if (position == -1)
+ return 1;
+
+ /* ok, char at org[position] should be a hot key to this item */
+ c = tolower(org[position]);
+ tmp = hotkeys[c].count;
+ hotkeys[c].ptrs[tmp] = index;
+ hotkeys[c].count++;
+ /*
+ snprintf(dest, len, "%.*s(%c)%s", position, org, org[position],
+ &org[position+1]);
+ */
+ /* make org[position] uppercase, and all leading letter small case */
+ strncpy(dest, org, len);
+ for (i = 0; i < position; i++)
+ dest[i] = tolower(dest[i]);
+ dest[position] = toupper(dest[position]);
+ k_menu_items[index].is_hot = 1;
+ return 0;
+}
+
+/* Make a new item. Add a hotkey mark in the first possible letter.
+ * As ncurses does not allow any attributes inside menue item, we mark the
+ * hot key as the first capitalized letter in the string */
+static void item_make(struct menu *menu, char tag, const char *fmt, ...)
+{
+ va_list ap;
+ char tmp_str[256];
+
+ if (items_num > MAX_MENU_ITEMS-1)
+ return;
+
+ bzero(&k_menu_items[items_num], sizeof(k_menu_items[0]));
+ k_menu_items[items_num].tag = tag;
+ k_menu_items[items_num].usrptr = menu;
+ if (menu != NULL)
+ k_menu_items[items_num].is_visible =
+ menu_is_visible(menu);
+ else
+ k_menu_items[items_num].is_visible = 1;
+
+ va_start(ap, fmt);
+ vsnprintf(tmp_str, sizeof(tmp_str), fmt, ap);
+ if (!k_menu_items[items_num].is_visible)
+ memcpy(tmp_str, "XXX", 3);
+ va_end(ap);
+ if (make_hot(
+ k_menu_items[items_num].str,
+ sizeof(k_menu_items[items_num].str), tmp_str, items_num) != 0)
+ strncpy(k_menu_items[items_num].str,
+ tmp_str,
+ sizeof(k_menu_items[items_num].str));
+
+ curses_menu_items[items_num] = new_item(
+ k_menu_items[items_num].str,
+ k_menu_items[items_num].str);
+ set_item_userptr(curses_menu_items[items_num],
+ &k_menu_items[items_num]);
+ /*
+ if (!k_menu_items[items_num].is_visible)
+ item_opts_off(curses_menu_items[items_num], O_SELECTABLE);
+ */
+
+ items_num++;
+ curses_menu_items[items_num] = NULL;
+}
+
+/* very hackish. adds a string to the last item added */
+static void item_add_str(const char *fmt, ...)
+{
+ va_list ap;
+ int index = items_num-1;
+ char new_str[256];
+ char tmp_str[256];
+
+ if (index < 0)
+ return;
+
+ va_start(ap, fmt);
+ vsnprintf(new_str, sizeof(new_str), fmt, ap);
+ va_end(ap);
+ snprintf(tmp_str, sizeof(tmp_str), "%s%s",
+ k_menu_items[index].str, new_str);
+ if (make_hot(k_menu_items[index].str,
+ sizeof(k_menu_items[index].str), tmp_str, index) != 0)
+ strncpy(k_menu_items[index].str,
+ tmp_str,
+ sizeof(k_menu_items[index].str));
+
+ free_item(curses_menu_items[index]);
+ curses_menu_items[index] = new_item(
+ k_menu_items[index].str,
+ k_menu_items[index].str);
+ set_item_userptr(curses_menu_items[index],
+ &k_menu_items[index]);
+}
+
+/* get the tag of the currently selected item */
+static char item_tag(void)
+{
+ ITEM *cur;
+ struct mitem *mcur;
+
+ cur = current_item(curses_menu);
+ if (cur == NULL)
+ return 0;
+ mcur = (struct mitem *) item_userptr(cur);
+ return mcur->tag;
+}
+
+static int curses_item_index(void)
+{
+ return item_index(current_item(curses_menu));
+}
+
+static void *item_data(void)
+{
+ ITEM *cur;
+ struct mitem *mcur;
+
+ cur = current_item(curses_menu);
+ mcur = (struct mitem *) item_userptr(cur);
+ return mcur->usrptr;
+
+}
+
+static int item_is_tag(char tag)
+{
+ return item_tag() == tag;
+}
+
+static char filename[PATH_MAX+1];
+static char menu_backtitle[PATH_MAX+128];
+static const char *set_config_filename(const char *config_filename)
+{
+ int size;
+ struct symbol *sym;
+
+ sym = sym_lookup("KERNELVERSION", 0);
+ sym_calc_value(sym);
+ size = snprintf(menu_backtitle, sizeof(menu_backtitle),
+ _("%s - Linux Kernel v%s Configuration"),
+ config_filename, sym_get_string_value(sym));
+ if (size >= sizeof(menu_backtitle))
+ menu_backtitle[sizeof(menu_backtitle)-1] = '\0';
+
+ size = snprintf(filename, sizeof(filename), "%s", config_filename);
+ if (size >= sizeof(filename))
+ filename[sizeof(filename)-1] = '\0';
+ return menu_backtitle;
+}
+
+/* command = 0 is supress, 1 is restore */
+static void supress_stdout(int command)
+{
+ static FILE *org_stdout;
+ static FILE *org_stderr;
+
+ if (command == 0) {
+ org_stdout = stdout;
+ org_stderr = stderr;
+ stdout = fopen("/dev/null", "a");
+ stderr = fopen("/dev/null", "a");
+ } else {
+ fclose(stdout);
+ fclose(stderr);
+ stdout = org_stdout;
+ stderr = org_stderr;
+ }
+}
+
+/* return = 0 means we are successful.
+ * -1 means go on doing what you were doing
+ */
+static int do_exit(void)
+{
+ int res;
+ if (!conf_get_changed()) {
+ global_exit = 1;
+ return 0;
+ }
+ res = btn_dialog(main_window,
+ _("Do you wish to save your "
+ "new kernel configuration?\n"
+ "<ESC> to cancel and resume nconfig."),
+ 2,
+ " <save> ",
+ "<don't save>");
+ if (res == KEY_EXIT) {
+ global_exit = 0;
+ return -1;
+ }
+
+ /* if we got here, the user really wants to exit */
+ switch (res) {
+ case 0:
+ supress_stdout(0);
+ res = conf_write(filename);
+ supress_stdout(1);
+ if (res)
+ btn_dialog(
+ main_window,
+ _("Error during writing of the kernel "
+ "configuration.\n"
+ "Your kernel configuration "
+ "changes were NOT saved."),
+ 1,
+ "<OK>");
+ else {
+ char buf[1024];
+ snprintf(buf, 1024,
+ _("Configuration written to %s\n"
+ "End of Linux kernel configuration.\n"
+ "Execute 'make' to build the kernel or try"
+ " 'make help'."), filename);
+ btn_dialog(
+ main_window,
+ buf,
+ 1,
+ "<OK>");
+ }
+ break;
+ default:
+ btn_dialog(
+ main_window,
+ _("Your kernel configuration changes were NOT saved."),
+ 1,
+ "<OK>");
+ break;
+ }
+ global_exit = 1;
+ return 0;
+}
+
+
+static void search_conf(void)
+{
+ struct symbol **sym_arr;
+ struct gstr res;
+ char dialog_input_result[100];
+ char *dialog_input;
+ int dres;
+again:
+ dres = dialog_inputbox(main_window,
+ _("Search Configuration Parameter"),
+ _("Enter CONFIG_ (sub)string to search for "
+ "(with or without \"CONFIG\")"),
+ "", dialog_input_result, 99);
+ switch (dres) {
+ case 0:
+ break;
+ case 1:
+ show_scroll_win(main_window,
+ _("Search Configuration"), search_help);
+ goto again;
+ default:
+ return;
+ }
+
+ /* strip CONFIG_ if necessary */
+ dialog_input = dialog_input_result;
+ if (strncasecmp(dialog_input_result, "CONFIG_", 7) == 0)
+ dialog_input += 7;
+
+ sym_arr = sym_re_search(dialog_input);
+ res = get_relations_str(sym_arr);
+ free(sym_arr);
+ show_scroll_win(main_window,
+ _("Search Results"), str_get(&res));
+ str_free(&res);
+}
+
+
+static void build_conf(struct menu *menu)
+{
+ struct symbol *sym;
+ struct property *prop;
+ struct menu *child;
+ int type, tmp, doint = 2;
+ tristate val;
+ char ch;
+
+ if (!menu || (!show_all_items && !menu_is_visible(menu)))
+ return;
+
+ sym = menu->sym;
+ prop = menu->prompt;
+ if (!sym) {
+ if (prop && menu != current_menu) {
+ const char *prompt = menu_get_prompt(menu);
+ enum prop_type ptype;
+ ptype = menu->prompt ? menu->prompt->type : P_UNKNOWN;
+ switch (ptype) {
+ case P_MENU:
+ child_count++;
+ prompt = _(prompt);
+ if (single_menu_mode) {
+ item_make(menu, 'm',
+ "%s%*c%s",
+ menu->data ? "-->" : "++>",
+ indent + 1, ' ', prompt);
+ } else
+ item_make(menu, 'm',
+ " %*c%s --->",
+ indent + 1,
+ ' ', prompt);
+
+ if (single_menu_mode && menu->data)
+ goto conf_childs;
+ return;
+ case P_COMMENT:
+ if (prompt) {
+ child_count++;
+ item_make(menu, ':',
+ " %*c*** %s ***",
+ indent + 1, ' ',
+ _(prompt));
+ }
+ break;
+ default:
+ if (prompt) {
+ child_count++;
+ item_make(menu, ':', "---%*c%s",
+ indent + 1, ' ',
+ _(prompt));
+ }
+ }
+ } else
+ doint = 0;
+ goto conf_childs;
+ }
+
+ type = sym_get_type(sym);
+ if (sym_is_choice(sym)) {
+ struct symbol *def_sym = sym_get_choice_value(sym);
+ struct menu *def_menu = NULL;
+
+ child_count++;
+ for (child = menu->list; child; child = child->next) {
+ if (menu_is_visible(child) && child->sym == def_sym)
+ def_menu = child;
+ }
+
+ val = sym_get_tristate_value(sym);
+ if (sym_is_changable(sym)) {
+ switch (type) {
+ case S_BOOLEAN:
+ item_make(menu, 't', "[%c]",
+ val == no ? ' ' : '*');
+ break;
+ case S_TRISTATE:
+ switch (val) {
+ case yes:
+ ch = '*';
+ break;
+ case mod:
+ ch = 'M';
+ break;
+ default:
+ ch = ' ';
+ break;
+ }
+ item_make(menu, 't', "<%c>", ch);
+ break;
+ }
+ } else {
+ item_make(menu, def_menu ? 't' : ':', " ");
+ }
+
+ item_add_str("%*c%s", indent + 1,
+ ' ', _(menu_get_prompt(menu)));
+ if (val == yes) {
+ if (def_menu) {
+ item_add_str(" (%s)",
+ _(menu_get_prompt(def_menu)));
+ item_add_str(" --->");
+ if (def_menu->list) {
+ indent += 2;
+ build_conf(def_menu);
+ indent -= 2;
+ }
+ }
+ return;
+ }
+ } else {
+ if (menu == current_menu) {
+ item_make(menu, ':',
+ "---%*c%s", indent + 1,
+ ' ', _(menu_get_prompt(menu)));
+ goto conf_childs;
+ }
+ child_count++;
+ val = sym_get_tristate_value(sym);
+ if (sym_is_choice_value(sym) && val == yes) {
+ item_make(menu, ':', " ");
+ } else {
+ switch (type) {
+ case S_BOOLEAN:
+ if (sym_is_changable(sym))
+ item_make(menu, 't', "[%c]",
+ val == no ? ' ' : '*');
+ else
+ item_make(menu, 't', "-%c-",
+ val == no ? ' ' : '*');
+ break;
+ case S_TRISTATE:
+ switch (val) {
+ case yes:
+ ch = '*';
+ break;
+ case mod:
+ ch = 'M';
+ break;
+ default:
+ ch = ' ';
+ break;
+ }
+ if (sym_is_changable(sym)) {
+ if (sym->rev_dep.tri == mod)
+ item_make(menu,
+ 't', "{%c}", ch);
+ else
+ item_make(menu,
+ 't', "<%c>", ch);
+ } else
+ item_make(menu, 't', "-%c-", ch);
+ break;
+ default:
+ tmp = 2 + strlen(sym_get_string_value(sym));
+ item_make(menu, 's', "(%s)",
+ sym_get_string_value(sym));
+ tmp = indent - tmp + 4;
+ if (tmp < 0)
+ tmp = 0;
+ item_add_str("%*c%s%s", tmp, ' ',
+ _(menu_get_prompt(menu)),
+ (sym_has_value(sym) ||
+ !sym_is_changable(sym)) ? "" :
+ _(" (NEW)"));
+ goto conf_childs;
+ }
+ }
+ item_add_str("%*c%s%s", indent + 1, ' ',
+ _(menu_get_prompt(menu)),
+ (sym_has_value(sym) || !sym_is_changable(sym)) ?
+ "" : _(" (NEW)"));
+ if (menu->prompt && menu->prompt->type == P_MENU) {
+ item_add_str(" --->");
+ return;
+ }
+ }
+
+conf_childs:
+ indent += doint;
+ for (child = menu->list; child; child = child->next)
+ build_conf(child);
+ indent -= doint;
+}
+
+static void reset_menu(void)
+{
+ unpost_menu(curses_menu);
+ clean_items();
+}
+
+/* adjust the menu to show this item.
+ * prefer not to scroll the menu if possible*/
+static void center_item(int selected_index, int *last_top_row)
+{
+ int toprow;
+ int maxy, maxx;
+
+ scale_menu(curses_menu, &maxy, &maxx);
+ set_top_row(curses_menu, *last_top_row);
+ toprow = top_row(curses_menu);
+ if (selected_index >= toprow && selected_index < toprow+maxy) {
+ /* we can only move the selected item. no need to scroll */
+ set_current_item(curses_menu,
+ curses_menu_items[selected_index]);
+ } else {
+ toprow = max(selected_index-maxy/2, 0);
+ if (toprow >= item_count(curses_menu)-maxy)
+ toprow = item_count(curses_menu)-mwin_max_lines;
+ set_top_row(curses_menu, toprow);
+ set_current_item(curses_menu,
+ curses_menu_items[selected_index]);
+ }
+ *last_top_row = toprow;
+ post_menu(curses_menu);
+ refresh_all_windows(main_window);
+}
+
+/* this function assumes reset_menu has been called before */
+static void show_menu(const char *prompt, const char *instructions,
+ int selected_index, int *last_top_row)
+{
+ int maxx, maxy;
+ WINDOW *menu_window;
+
+ current_instructions = instructions;
+
+ clear();
+ wattrset(main_window, attributes[NORMAL]);
+ print_in_middle(stdscr, 1, 0, COLS,
+ menu_backtitle,
+ attributes[MAIN_HEADING]);
+
+ wattrset(main_window, attributes[MAIN_MENU_BOX]);
+ box(main_window, 0, 0);
+ wattrset(main_window, attributes[MAIN_MENU_HEADING]);
+ mvwprintw(main_window, 0, 3, " %s ", prompt);
+ wattrset(main_window, attributes[NORMAL]);
+
+ set_menu_items(curses_menu, curses_menu_items);
+
+ /* position the menu at the middle of the screen */
+ scale_menu(curses_menu, &maxy, &maxx);
+ maxx = min(maxx, mwin_max_cols);
+ maxy = mwin_max_lines-1;
+ menu_window = derwin(main_window,
+ maxy,
+ maxx,
+ 2,
+ (mwin_max_cols-maxx)/2);
+ keypad(menu_window, TRUE);
+ set_menu_win(curses_menu, menu_window);
+ set_menu_sub(curses_menu, menu_window);
+
+ /* must reassert this after changing items, otherwise returns to a
+ * default of 16
+ */
+ set_menu_format(curses_menu, maxy, 1);
+ center_item(selected_index, last_top_row);
+ set_menu_format(curses_menu, maxy, 1);
+
+ print_function_line();
+
+ /* Post the menu */
+ post_menu(curses_menu);
+ refresh_all_windows(main_window);
+}
+
+
+static void conf(struct menu *menu)
+{
+ char pattern[256];
+ struct menu *submenu = 0;
+ const char *prompt = menu_get_prompt(menu);
+ struct symbol *sym;
+ struct menu *active_menu = NULL;
+ int res;
+ int current_index = 0;
+ int last_top_row = 0;
+
+ bzero(pattern, sizeof(pattern));
+
+ while (!global_exit) {
+ reset_menu();
+ current_menu = menu;
+ build_conf(menu);
+ if (!child_count)
+ break;
+
+ show_menu(prompt ? _(prompt) : _("Main Menu"),
+ _(menu_instructions),
+ current_index, &last_top_row);
+ keypad((menu_win(curses_menu)), TRUE);
+ while (!global_exit && (res = wgetch(menu_win(curses_menu)))) {
+ if (process_special_keys(&res,
+ (struct menu *) item_data()))
+ break;
+ switch (res) {
+ case KEY_DOWN:
+ menu_driver(curses_menu, REQ_DOWN_ITEM);
+ break;
+ case KEY_UP:
+ menu_driver(curses_menu, REQ_UP_ITEM);
+ break;
+ case KEY_NPAGE:
+ menu_driver(curses_menu, REQ_SCR_DPAGE);
+ break;
+ case KEY_PPAGE:
+ menu_driver(curses_menu, REQ_SCR_UPAGE);
+ break;
+ case KEY_HOME:
+ menu_driver(curses_menu, REQ_FIRST_ITEM);
+ break;
+ case KEY_END:
+ menu_driver(curses_menu, REQ_LAST_ITEM);
+ break;
+ case 'h':
+ case '?':
+ show_help((struct menu *) item_data());
+ break;
+ }
+ if (res == 10 || res == 27 ||
+ res == 32 || res == 'n' || res == 'y' ||
+ res == KEY_LEFT || res == KEY_RIGHT ||
+ res == 'm' || res == '/')
+ break;
+ else if (canbhot(res)) {
+ /* check for hot keys: */
+ int tmp = get_next_hot(res);
+ if (tmp != -1)
+ center_item(tmp, &last_top_row);
+ }
+ refresh_all_windows(main_window);
+ }
+
+ refresh_all_windows(main_window);
+ /* if ESC or left*/
+ if (res == 27 || (menu != &rootmenu && res == KEY_LEFT))
+ break;
+
+ /* remember location in the menu */
+ last_top_row = top_row(curses_menu);
+ current_index = curses_item_index();
+
+ if (!item_tag())
+ continue;
+
+ submenu = (struct menu *) item_data();
+ active_menu = (struct menu *)item_data();
+ if (!submenu || !menu_is_visible(submenu))
+ continue;
+ if (submenu)
+ sym = submenu->sym;
+ else
+ sym = NULL;
+
+ switch (res) {
+ case ' ':
+ if (item_is_tag('t'))
+ sym_toggle_tristate_value(sym);
+ else if (item_is_tag('m'))
+ conf(submenu);
+ break;
+ case KEY_RIGHT:
+ case 10: /* ENTER WAS PRESSED */
+ switch (item_tag()) {
+ case 'm':
+ if (single_menu_mode)
+ submenu->data =
+ (void *) (long) !submenu->data;
+ else
+ conf(submenu);
+ break;
+ case 't':
+ if (sym_is_choice(sym) &&
+ sym_get_tristate_value(sym) == yes)
+ conf_choice(submenu);
+ else if (submenu->prompt &&
+ submenu->prompt->type == P_MENU)
+ conf(submenu);
+ else if (res == 10)
+ sym_toggle_tristate_value(sym);
+ break;
+ case 's':
+ conf_string(submenu);
+ break;
+ }
+ break;
+ case 'y':
+ if (item_is_tag('t')) {
+ if (sym_set_tristate_value(sym, yes))
+ break;
+ if (sym_set_tristate_value(sym, mod))
+ btn_dialog(main_window, setmod_text, 0);
+ }
+ break;
+ case 'n':
+ if (item_is_tag('t'))
+ sym_set_tristate_value(sym, no);
+ break;
+ case 'm':
+ if (item_is_tag('t'))
+ sym_set_tristate_value(sym, mod);
+ break;
+ case '/':
+ search_conf();
+ break;
+ }
+ }
+}
+
+static void show_help(struct menu *menu)
+{
+ struct gstr help = str_new();
+
+ if (menu && menu->sym && menu_has_help(menu)) {
+ if (menu->sym->name) {
+ str_printf(&help, "CONFIG_%s:\n\n", menu->sym->name);
+ str_append(&help, _(menu_get_help(menu)));
+ str_append(&help, "\n");
+ get_symbol_str(&help, menu->sym);
+ }
+ } else {
+ str_append(&help, nohelp_text);
+ }
+ show_scroll_win(main_window, _(menu_get_prompt(menu)), str_get(&help));
+ str_free(&help);
+}
+
+static void conf_choice(struct menu *menu)
+{
+ const char *prompt = _(menu_get_prompt(menu));
+ struct menu *child = 0;
+ struct symbol *active;
+ int selected_index = 0;
+ int last_top_row = 0;
+ int res, i = 0;
+
+ active = sym_get_choice_value(menu->sym);
+ /* this is mostly duplicated from the conf() function. */
+ while (!global_exit) {
+ reset_menu();
+
+ for (i = 0, child = menu->list; child; child = child->next) {
+ if (!show_all_items && !menu_is_visible(child))
+ continue;
+
+ if (child->sym == sym_get_choice_value(menu->sym))
+ item_make(child, ':', "<X> %s",
+ _(menu_get_prompt(child)));
+ else
+ item_make(child, ':', " %s",
+ _(menu_get_prompt(child)));
+ if (child->sym == active){
+ last_top_row = top_row(curses_menu);
+ selected_index = i;
+ }
+ i++;
+ }
+ show_menu(prompt ? _(prompt) : _("Choice Menu"),
+ _(radiolist_instructions),
+ selected_index,
+ &last_top_row);
+ while (!global_exit && (res = wgetch(menu_win(curses_menu)))) {
+ if (process_special_keys(
+ &res,
+ (struct menu *) item_data()))
+ break;
+ switch (res) {
+ case KEY_DOWN:
+ menu_driver(curses_menu, REQ_DOWN_ITEM);
+ break;
+ case KEY_UP:
+ menu_driver(curses_menu, REQ_UP_ITEM);
+ break;
+ case KEY_NPAGE:
+ menu_driver(curses_menu, REQ_SCR_DPAGE);
+ break;
+ case KEY_PPAGE:
+ menu_driver(curses_menu, REQ_SCR_UPAGE);
+ break;
+ case KEY_HOME:
+ menu_driver(curses_menu, REQ_FIRST_ITEM);
+ break;
+ case KEY_END:
+ menu_driver(curses_menu, REQ_LAST_ITEM);
+ break;
+ case 'h':
+ case '?':
+ show_help((struct menu *) item_data());
+ break;
+ }
+ if (res == 10 || res == 27 || res == ' ' ||
+ res == KEY_LEFT)
+ break;
+ else if (canbhot(res)) {
+ /* check for hot keys: */
+ int tmp = get_next_hot(res);
+ if (tmp != -1)
+ center_item(tmp, &last_top_row);
+ }
+ refresh_all_windows(main_window);
+ }
+ /* if ESC or left */
+ if (res == 27 || res == KEY_LEFT)
+ break;
+
+ child = item_data();
+ if (!child || !menu_is_visible(child))
+ continue;
+ switch (res) {
+ case ' ':
+ case 10:
+ case KEY_RIGHT:
+ sym_set_tristate_value(child->sym, yes);
+ return;
+ case 'h':
+ case '?':
+ show_help(child);
+ active = child->sym;
+ break;
+ case KEY_EXIT:
+ return;
+ }
+ }
+}
+
+static void conf_string(struct menu *menu)
+{
+ const char *prompt = menu_get_prompt(menu);
+ char dialog_input_result[256];
+
+ while (1) {
+ int res;
+ const char *heading;
+
+ switch (sym_get_type(menu->sym)) {
+ case S_INT:
+ heading = _(inputbox_instructions_int);
+ break;
+ case S_HEX:
+ heading = _(inputbox_instructions_hex);
+ break;
+ case S_STRING:
+ heading = _(inputbox_instructions_string);
+ break;
+ default:
+ heading = _("Internal nconf error!");
+ }
+ res = dialog_inputbox(main_window,
+ prompt ? _(prompt) : _("Main Menu"),
+ heading,
+ sym_get_string_value(menu->sym),
+ dialog_input_result,
+ sizeof(dialog_input_result));
+ switch (res) {
+ case 0:
+ if (sym_set_string_value(menu->sym,
+ dialog_input_result))
+ return;
+ btn_dialog(main_window,
+ _("You have made an invalid entry."), 0);
+ break;
+ case 1:
+ show_help(menu);
+ break;
+ case KEY_EXIT:
+ return;
+ }
+ }
+}
+
+static void conf_load(void)
+{
+ char dialog_input_result[256];
+ while (1) {
+ int res;
+ res = dialog_inputbox(main_window,
+ NULL, load_config_text,
+ filename,
+ dialog_input_result,
+ sizeof(dialog_input_result));
+ switch (res) {
+ case 0:
+ if (!dialog_input_result[0])
+ return;
+ if (!conf_read(dialog_input_result)) {
+ set_config_filename(dialog_input_result);
+ sym_set_change_count(1);
+ return;
+ }
+ btn_dialog(main_window, _("File does not exist!"), 0);
+ break;
+ case 1:
+ show_scroll_win(main_window,
+ _("Load Alternate Configuration"),
+ load_config_help);
+ break;
+ case KEY_EXIT:
+ return;
+ }
+ }
+}
+
+static void conf_save(void)
+{
+ char dialog_input_result[256];
+ while (1) {
+ int res;
+ res = dialog_inputbox(main_window,
+ NULL, save_config_text,
+ filename,
+ dialog_input_result,
+ sizeof(dialog_input_result));
+ switch (res) {
+ case 0:
+ if (!dialog_input_result[0])
+ return;
+ supress_stdout(0);
+ res = conf_write(dialog_input_result);
+ supress_stdout(1);
+ if (!res) {
+ char buf[1024];
+ sprintf(buf, "%s %s",
+ _("configuration file saved to: "),
+ dialog_input_result);
+ btn_dialog(main_window,
+ buf, 1, "<OK>");
+ set_config_filename(dialog_input_result);
+ return;
+ }
+ btn_dialog(main_window, _("Can't create file! "
+ "Probably a nonexistent directory."),
+ 1, "<OK>");
+ break;
+ case 1:
+ show_scroll_win(main_window,
+ _("Save Alternate Configuration"),
+ save_config_help);
+ break;
+ case KEY_EXIT:
+ return;
+ }
+ }
+}
+
+void setup_windows(void)
+{
+ if (main_window != NULL)
+ delwin(main_window);
+
+ /* set up the menu and menu window */
+ main_window = newwin(LINES-2, COLS-2, 2, 1);
+ keypad(main_window, TRUE);
+ mwin_max_lines = LINES-6;
+ mwin_max_cols = COLS-6;
+
+ /* panels order is from bottom to top */
+ new_panel(main_window);
+}
+
+int main(int ac, char **av)
+{
+ char *mode;
+
+ setlocale(LC_ALL, "");
+ bindtextdomain(PACKAGE, LOCALEDIR);
+ textdomain(PACKAGE);
+
+ conf_parse(av[1]);
+ conf_read(NULL);
+
+ mode = getenv("NCONFIG_MODE");
+ if (mode) {
+ if (!strcasecmp(mode, "single_menu"))
+ single_menu_mode = 1;
+ }
+
+ /* Initialize curses */
+ initscr();
+ /* set color theme */
+ set_colors();
+
+ cbreak();
+ noecho();
+ keypad(stdscr, TRUE);
+ curs_set(0);
+
+ if (COLS < 75 || LINES < 20) {
+ endwin();
+ printf("Your terminal should have at "
+ "least 20 lines and 75 columns\n");
+ return 1;
+ }
+
+ notimeout(stdscr, FALSE);
+ ESCDELAY = 1;
+
+ /* set btns menu */
+ curses_menu = new_menu(curses_menu_items);
+ menu_opts_off(curses_menu, O_SHOWDESC);
+ menu_opts_off(curses_menu, O_SHOWMATCH);
+ menu_opts_on(curses_menu, O_ONEVALUE);
+ menu_opts_on(curses_menu, O_NONCYCLIC);
+ set_menu_mark(curses_menu, " ");
+ set_menu_fore(curses_menu, attributes[MAIN_MENU_FORE]);
+ set_menu_back(curses_menu, attributes[MAIN_MENU_BACK]);
+ set_menu_grey(curses_menu, attributes[MAIN_MENU_GREY]);
+
+ set_config_filename(conf_get_configname());
+ setup_windows();
+
+ /* check for KEY_FUNC(1) */
+ if (has_key(KEY_F(1)) == FALSE) {
+ show_scroll_win(main_window,
+ _("Instructions"),
+ _(menu_no_f_instructions));
+ }
+
+
+
+ /* do the work */
+ while (!global_exit) {
+ conf(&rootmenu);
+ if (!global_exit && do_exit() == 0)
+ break;
+ }
+ /* ok, we are done */
+ unpost_menu(curses_menu);
+ free_menu(curses_menu);
+ delwin(main_window);
+ clear();
+ refresh();
+ endwin();
+ return 0;
+}
+
diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c
new file mode 100644
index 000000000000..115edb437fb1
--- /dev/null
+++ b/scripts/kconfig/nconf.gui.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright (C) 2008 Nir Tzachar <nir.tzachar@gmail.com?
+ * Released under the terms of the GNU GPL v2.0.
+ *
+ * Derived from menuconfig.
+ *
+ */
+#include "nconf.h"
+
+/* a list of all the different widgets we use */
+attributes_t attributes[ATTR_MAX+1] = {0};
+
+/* available colors:
+ COLOR_BLACK 0
+ COLOR_RED 1
+ COLOR_GREEN 2
+ COLOR_YELLOW 3
+ COLOR_BLUE 4
+ COLOR_MAGENTA 5
+ COLOR_CYAN 6
+ COLOR_WHITE 7
+ */
+static void set_normal_colors(void)
+{
+ init_pair(NORMAL, -1, -1);
+ init_pair(MAIN_HEADING, COLOR_MAGENTA, -1);
+
+ /* FORE is for the selected item */
+ init_pair(MAIN_MENU_FORE, -1, -1);
+ /* BACK for all the rest */
+ init_pair(MAIN_MENU_BACK, -1, -1);
+ init_pair(MAIN_MENU_GREY, -1, -1);
+ init_pair(MAIN_MENU_HEADING, COLOR_GREEN, -1);
+ init_pair(MAIN_MENU_BOX, COLOR_YELLOW, -1);
+
+ init_pair(SCROLLWIN_TEXT, -1, -1);
+ init_pair(SCROLLWIN_HEADING, COLOR_GREEN, -1);
+ init_pair(SCROLLWIN_BOX, COLOR_YELLOW, -1);
+
+ init_pair(DIALOG_TEXT, -1, -1);
+ init_pair(DIALOG_BOX, COLOR_YELLOW, -1);
+ init_pair(DIALOG_MENU_BACK, COLOR_YELLOW, -1);
+ init_pair(DIALOG_MENU_FORE, COLOR_RED, -1);
+
+ init_pair(INPUT_BOX, COLOR_YELLOW, -1);
+ init_pair(INPUT_HEADING, COLOR_GREEN, -1);
+ init_pair(INPUT_TEXT, -1, -1);
+ init_pair(INPUT_FIELD, -1, -1);
+
+ init_pair(FUNCTION_HIGHLIGHT, -1, -1);
+ init_pair(FUNCTION_TEXT, COLOR_BLUE, -1);
+}
+
+/* available attributes:
+ A_NORMAL Normal display (no highlight)
+ A_STANDOUT Best highlighting mode of the terminal.
+ A_UNDERLINE Underlining
+ A_REVERSE Reverse video
+ A_BLINK Blinking
+ A_DIM Half bright
+ A_BOLD Extra bright or bold
+ A_PROTECT Protected mode
+ A_INVIS Invisible or blank mode
+ A_ALTCHARSET Alternate character set
+ A_CHARTEXT Bit-mask to extract a character
+ COLOR_PAIR(n) Color-pair number n
+ */
+static void normal_color_theme(void)
+{
+ /* automatically add color... */
+#define mkattr(name, attr) do { \
+attributes[name] = attr | COLOR_PAIR(name); } while (0)
+ mkattr(NORMAL, NORMAL);
+ mkattr(MAIN_HEADING, A_BOLD | A_UNDERLINE);
+
+ mkattr(MAIN_MENU_FORE, A_REVERSE);
+ mkattr(MAIN_MENU_BACK, A_NORMAL);
+ mkattr(MAIN_MENU_GREY, A_NORMAL);
+ mkattr(MAIN_MENU_HEADING, A_BOLD);
+ mkattr(MAIN_MENU_BOX, A_NORMAL);
+
+ mkattr(SCROLLWIN_TEXT, A_NORMAL);
+ mkattr(SCROLLWIN_HEADING, A_BOLD);
+ mkattr(SCROLLWIN_BOX, A_BOLD);
+
+ mkattr(DIALOG_TEXT, A_BOLD);
+ mkattr(DIALOG_BOX, A_BOLD);
+ mkattr(DIALOG_MENU_FORE, A_STANDOUT);
+ mkattr(DIALOG_MENU_BACK, A_NORMAL);
+
+ mkattr(INPUT_BOX, A_NORMAL);
+ mkattr(INPUT_HEADING, A_BOLD);
+ mkattr(INPUT_TEXT, A_NORMAL);
+ mkattr(INPUT_FIELD, A_UNDERLINE);
+
+ mkattr(FUNCTION_HIGHLIGHT, A_BOLD);
+ mkattr(FUNCTION_TEXT, A_REVERSE);
+}
+
+static void no_colors_theme(void)
+{
+ /* automatically add highlight, no color */
+#define mkattrn(name, attr) { attributes[name] = attr; }
+
+ mkattrn(NORMAL, NORMAL);
+ mkattrn(MAIN_HEADING, A_BOLD | A_UNDERLINE);
+
+ mkattrn(MAIN_MENU_FORE, A_STANDOUT);
+ mkattrn(MAIN_MENU_BACK, A_NORMAL);
+ mkattrn(MAIN_MENU_GREY, A_NORMAL);
+ mkattrn(MAIN_MENU_HEADING, A_BOLD);
+ mkattrn(MAIN_MENU_BOX, A_NORMAL);
+
+ mkattrn(SCROLLWIN_TEXT, A_NORMAL);
+ mkattrn(SCROLLWIN_HEADING, A_BOLD);
+ mkattrn(SCROLLWIN_BOX, A_BOLD);
+
+ mkattrn(DIALOG_TEXT, A_NORMAL);
+ mkattrn(DIALOG_BOX, A_BOLD);
+ mkattrn(DIALOG_MENU_FORE, A_STANDOUT);
+ mkattrn(DIALOG_MENU_BACK, A_NORMAL);
+
+ mkattrn(INPUT_BOX, A_BOLD);
+ mkattrn(INPUT_HEADING, A_BOLD);
+ mkattrn(INPUT_TEXT, A_NORMAL);
+ mkattrn(INPUT_FIELD, A_UNDERLINE);
+
+ mkattrn(FUNCTION_HIGHLIGHT, A_BOLD);
+ mkattrn(FUNCTION_TEXT, A_REVERSE);
+}
+
+void set_colors()
+{
+ start_color();
+ use_default_colors();
+ set_normal_colors();
+ if (has_colors()) {
+ normal_color_theme();
+ } else {
+ /* give deafults */
+ no_colors_theme();
+ }
+}
+
+
+/* this changes the windows attributes !!! */
+void print_in_middle(WINDOW *win,
+ int starty,
+ int startx,
+ int width,
+ const char *string,
+ chtype color)
+{ int length, x, y;
+ float temp;
+
+
+ if (win == NULL)
+ win = stdscr;
+ getyx(win, y, x);
+ if (startx != 0)
+ x = startx;
+ if (starty != 0)
+ y = starty;
+ if (width == 0)
+ width = 80;
+
+ length = strlen(string);
+ temp = (width - length) / 2;
+ x = startx + (int)temp;
+ wattrset(win, color);
+ mvwprintw(win, y, x, "%s", string);
+ refresh();
+}
+
+int get_line_no(const char *text)
+{
+ int i;
+ int total = 1;
+
+ if (!text)
+ return 0;
+
+ for (i = 0; text[i] != '\0'; i++)
+ if (text[i] == '\n')
+ total++;
+ return total;
+}
+
+const char *get_line(const char *text, int line_no)
+{
+ int i;
+ int lines = 0;
+
+ if (!text)
+ return 0;
+
+ for (i = 0; text[i] != '\0' && lines < line_no; i++)
+ if (text[i] == '\n')
+ lines++;
+ return text+i;
+}
+
+int get_line_length(const char *line)
+{
+ int res = 0;
+ while (*line != '\0' && *line != '\n') {
+ line++;
+ res++;
+ }
+ return res;
+}
+
+/* print all lines to the window. */
+void fill_window(WINDOW *win, const char *text)
+{
+ int x, y;
+ int total_lines = get_line_no(text);
+ int i;
+
+ getmaxyx(win, y, x);
+ /* do not go over end of line */
+ total_lines = min(total_lines, y);
+ for (i = 0; i < total_lines; i++) {
+ char tmp[x+10];
+ const char *line = get_line(text, i);
+ int len = get_line_length(line);
+ strncpy(tmp, line, min(len, x));
+ tmp[len] = '\0';
+ mvwprintw(win, i, 0, tmp);
+ }
+}
+
+/* get the message, and buttons.
+ * each button must be a char*
+ * return the selected button
+ *
+ * this dialog is used for 2 different things:
+ * 1) show a text box, no buttons.
+ * 2) show a dialog, with horizontal buttons
+ */
+int btn_dialog(WINDOW *main_window, const char *msg, int btn_num, ...)
+{
+ va_list ap;
+ char *btn;
+ int btns_width = 0;
+ int msg_lines = 0;
+ int msg_width = 0;
+ int total_width;
+ int win_rows = 0;
+ WINDOW *win;
+ WINDOW *msg_win;
+ WINDOW *menu_win;
+ MENU *menu;
+ ITEM *btns[btn_num+1];
+ int i, x, y;
+ int res = -1;
+
+
+ va_start(ap, btn_num);
+ for (i = 0; i < btn_num; i++) {
+ btn = va_arg(ap, char *);
+ btns[i] = new_item(btn, "");
+ btns_width += strlen(btn)+1;
+ }
+ va_end(ap);
+ btns[btn_num] = NULL;
+
+ /* find the widest line of msg: */
+ msg_lines = get_line_no(msg);
+ for (i = 0; i < msg_lines; i++) {
+ const char *line = get_line(msg, i);
+ int len = get_line_length(line);
+ if (msg_width < len)
+ msg_width = len;
+ }
+
+ total_width = max(msg_width, btns_width);
+ /* place dialog in middle of screen */
+ y = (LINES-(msg_lines+4))/2;
+ x = (COLS-(total_width+4))/2;
+
+
+ /* create the windows */
+ if (btn_num > 0)
+ win_rows = msg_lines+4;
+ else
+ win_rows = msg_lines+2;
+
+ win = newwin(win_rows, total_width+4, y, x);
+ keypad(win, TRUE);
+ menu_win = derwin(win, 1, btns_width, win_rows-2,
+ 1+(total_width+2-btns_width)/2);
+ menu = new_menu(btns);
+ msg_win = derwin(win, win_rows-2, msg_width, 1,
+ 1+(total_width+2-msg_width)/2);
+
+ set_menu_fore(menu, attributes[DIALOG_MENU_FORE]);
+ set_menu_back(menu, attributes[DIALOG_MENU_BACK]);
+
+ wattrset(win, attributes[DIALOG_BOX]);
+ box(win, 0, 0);
+
+ /* print message */
+ wattrset(msg_win, attributes[DIALOG_TEXT]);
+ fill_window(msg_win, msg);
+
+ set_menu_win(menu, win);
+ set_menu_sub(menu, menu_win);
+ set_menu_format(menu, 1, btn_num);
+ menu_opts_off(menu, O_SHOWDESC);
+ menu_opts_off(menu, O_SHOWMATCH);
+ menu_opts_on(menu, O_ONEVALUE);
+ menu_opts_on(menu, O_NONCYCLIC);
+ set_menu_mark(menu, "");
+ post_menu(menu);
+
+
+ touchwin(win);
+ refresh_all_windows(main_window);
+ while ((res = wgetch(win))) {
+ switch (res) {
+ case KEY_LEFT:
+ menu_driver(menu, REQ_LEFT_ITEM);
+ break;
+ case KEY_RIGHT:
+ menu_driver(menu, REQ_RIGHT_ITEM);
+ break;
+ case 10: /* ENTER */
+ case 27: /* ESCAPE */
+ case ' ':
+ case KEY_F(F_BACK):
+ case KEY_F(F_EXIT):
+ break;
+ }
+ touchwin(win);
+ refresh_all_windows(main_window);
+
+ if (res == 10 || res == ' ') {
+ res = item_index(current_item(menu));
+ break;
+ } else if (res == 27 || res == KEY_F(F_BACK) ||
+ res == KEY_F(F_EXIT)) {
+ res = KEY_EXIT;
+ break;
+ }
+ }
+
+ unpost_menu(menu);
+ free_menu(menu);
+ for (i = 0; i < btn_num; i++)
+ free_item(btns[i]);
+
+ delwin(win);
+ return res;
+}
+
+int dialog_inputbox(WINDOW *main_window,
+ const char *title, const char *prompt,
+ const char *init, char *result, int result_len)
+{
+ int prompt_lines = 0;
+ int prompt_width = 0;
+ WINDOW *win;
+ WINDOW *prompt_win;
+ WINDOW *form_win;
+ PANEL *panel;
+ int i, x, y;
+ int res = -1;
+ int cursor_position = strlen(init);
+
+
+ /* find the widest line of msg: */
+ prompt_lines = get_line_no(prompt);
+ for (i = 0; i < prompt_lines; i++) {
+ const char *line = get_line(prompt, i);
+ int len = get_line_length(line);
+ prompt_width = max(prompt_width, len);
+ }
+
+ if (title)
+ prompt_width = max(prompt_width, strlen(title));
+
+ /* place dialog in middle of screen */
+ y = (LINES-(prompt_lines+4))/2;
+ x = (COLS-(prompt_width+4))/2;
+
+ strncpy(result, init, result_len);
+
+ /* create the windows */
+ win = newwin(prompt_lines+6, prompt_width+7, y, x);
+ prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2);
+ form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2);
+ keypad(form_win, TRUE);
+
+ wattrset(form_win, attributes[INPUT_FIELD]);
+
+ wattrset(win, attributes[INPUT_BOX]);
+ box(win, 0, 0);
+ wattrset(win, attributes[INPUT_HEADING]);
+ if (title)
+ mvwprintw(win, 0, 3, "%s", title);
+
+ /* print message */
+ wattrset(prompt_win, attributes[INPUT_TEXT]);
+ fill_window(prompt_win, prompt);
+
+ mvwprintw(form_win, 0, 0, "%*s", prompt_width, " ");
+ mvwprintw(form_win, 0, 0, "%s", result);
+
+ /* create panels */
+ panel = new_panel(win);
+
+ /* show the cursor */
+ curs_set(1);
+
+ touchwin(win);
+ refresh_all_windows(main_window);
+ while ((res = wgetch(form_win))) {
+ int len = strlen(result);
+ switch (res) {
+ case 10: /* ENTER */
+ case 27: /* ESCAPE */
+ case KEY_F(F_HELP):
+ case KEY_F(F_EXIT):
+ case KEY_F(F_BACK):
+ break;
+ case 127:
+ case KEY_BACKSPACE:
+ if (cursor_position > 0) {
+ memmove(&result[cursor_position-1],
+ &result[cursor_position],
+ len-cursor_position+1);
+ cursor_position--;
+ }
+ break;
+ case KEY_DC:
+ if (cursor_position >= 0 && cursor_position < len) {
+ memmove(&result[cursor_position],
+ &result[cursor_position+1],
+ len-cursor_position+1);
+ }
+ break;
+ case KEY_UP:
+ case KEY_RIGHT:
+ if (cursor_position < len &&
+ cursor_position < min(result_len, prompt_width))
+ cursor_position++;
+ break;
+ case KEY_DOWN:
+ case KEY_LEFT:
+ if (cursor_position > 0)
+ cursor_position--;
+ break;
+ default:
+ if ((isgraph(res) || isspace(res)) &&
+ len-2 < result_len) {
+ /* insert the char at the proper position */
+ memmove(&result[cursor_position+1],
+ &result[cursor_position],
+ len+1);
+ result[cursor_position] = res;
+ cursor_position++;
+ } else {
+ mvprintw(0, 0, "unknow key: %d\n", res);
+ }
+ break;
+ }
+ wmove(form_win, 0, 0);
+ wclrtoeol(form_win);
+ mvwprintw(form_win, 0, 0, "%*s", prompt_width, " ");
+ mvwprintw(form_win, 0, 0, "%s", result);
+ wmove(form_win, 0, cursor_position);
+ touchwin(win);
+ refresh_all_windows(main_window);
+
+ if (res == 10) {
+ res = 0;
+ break;
+ } else if (res == 27 || res == KEY_F(F_BACK) ||
+ res == KEY_F(F_EXIT)) {
+ res = KEY_EXIT;
+ break;
+ } else if (res == KEY_F(F_HELP)) {
+ res = 1;
+ break;
+ }
+ }
+
+ /* hide the cursor */
+ curs_set(0);
+ del_panel(panel);
+ delwin(prompt_win);
+ delwin(form_win);
+ delwin(win);
+ return res;
+}
+
+/* refresh all windows in the correct order */
+void refresh_all_windows(WINDOW *main_window)
+{
+ update_panels();
+ touchwin(main_window);
+ refresh();
+}
+
+/* layman's scrollable window... */
+void show_scroll_win(WINDOW *main_window,
+ const char *title,
+ const char *text)
+{
+ int res;
+ int total_lines = get_line_no(text);
+ int x, y;
+ int start_x = 0, start_y = 0;
+ int text_lines = 0, text_cols = 0;
+ int total_cols = 0;
+ int win_cols = 0;
+ int win_lines = 0;
+ int i = 0;
+ WINDOW *win;
+ WINDOW *pad;
+ PANEL *panel;
+
+ /* find the widest line of msg: */
+ total_lines = get_line_no(text);
+ for (i = 0; i < total_lines; i++) {
+ const char *line = get_line(text, i);
+ int len = get_line_length(line);
+ total_cols = max(total_cols, len+2);
+ }
+
+ /* create the pad */
+ pad = newpad(total_lines+10, total_cols+10);
+ wattrset(pad, attributes[SCROLLWIN_TEXT]);
+ fill_window(pad, text);
+
+ win_lines = min(total_lines+4, LINES-2);
+ win_cols = min(total_cols+2, COLS-2);
+ text_lines = max(win_lines-4, 0);
+ text_cols = max(win_cols-2, 0);
+
+ /* place window in middle of screen */
+ y = (LINES-win_lines)/2;
+ x = (COLS-win_cols)/2;
+
+ win = newwin(win_lines, win_cols, y, x);
+ keypad(win, TRUE);
+ /* show the help in the help window, and show the help panel */
+ wattrset(win, attributes[SCROLLWIN_BOX]);
+ box(win, 0, 0);
+ wattrset(win, attributes[SCROLLWIN_HEADING]);
+ mvwprintw(win, 0, 3, " %s ", title);
+ panel = new_panel(win);
+
+ /* handle scrolling */
+ do {
+
+ copywin(pad, win, start_y, start_x, 2, 2, text_lines,
+ text_cols, 0);
+ print_in_middle(win,
+ text_lines+2,
+ 0,
+ text_cols,
+ "<OK>",
+ attributes[DIALOG_MENU_FORE]);
+ wrefresh(win);
+
+ res = wgetch(win);
+ switch (res) {
+ case KEY_NPAGE:
+ case ' ':
+ start_y += text_lines-2;
+ break;
+ case KEY_PPAGE:
+ start_y -= text_lines+2;
+ break;
+ case KEY_HOME:
+ start_y = 0;
+ break;
+ case KEY_END:
+ start_y = total_lines-text_lines;
+ break;
+ case KEY_DOWN:
+ case 'j':
+ start_y++;
+ break;
+ case KEY_UP:
+ case 'k':
+ start_y--;
+ break;
+ case KEY_LEFT:
+ case 'h':
+ start_x--;
+ break;
+ case KEY_RIGHT:
+ case 'l':
+ start_x++;
+ break;
+ }
+ if (res == 10 || res == 27 || res == 'q'
+ || res == KEY_F(F_BACK) || res == KEY_F(F_EXIT)) {
+ break;
+ }
+ if (start_y < 0)
+ start_y = 0;
+ if (start_y >= total_lines-text_lines)
+ start_y = total_lines-text_lines;
+ if (start_x < 0)
+ start_x = 0;
+ if (start_x >= total_cols-text_cols)
+ start_x = total_cols-text_cols;
+ } while (res);
+
+ del_panel(panel);
+ delwin(win);
+ refresh_all_windows(main_window);
+}
diff --git a/scripts/kconfig/nconf.h b/scripts/kconfig/nconf.h
new file mode 100644
index 000000000000..fb4296666004
--- /dev/null
+++ b/scripts/kconfig/nconf.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2008 Nir Tzachar <nir.tzachar@gmail.com?
+ * Released under the terms of the GNU GPL v2.0.
+ *
+ * Derived from menuconfig.
+ *
+ */
+
+#include <ctype.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <locale.h>
+#include <curses.h>
+#include <menu.h>
+#include <panel.h>
+#include <form.h>
+
+#include <stdio.h>
+#include <time.h>
+#include <sys/time.h>
+
+#include "ncurses.h"
+
+#define max(a, b) ({\
+ typeof(a) _a = a;\
+ typeof(b) _b = b;\
+ _a > _b ? _a : _b; })
+
+#define min(a, b) ({\
+ typeof(a) _a = a;\
+ typeof(b) _b = b;\
+ _a < _b ? _a : _b; })
+
+typedef enum {
+ NORMAL = 1,
+ MAIN_HEADING,
+ MAIN_MENU_BOX,
+ MAIN_MENU_FORE,
+ MAIN_MENU_BACK,
+ MAIN_MENU_GREY,
+ MAIN_MENU_HEADING,
+ SCROLLWIN_TEXT,
+ SCROLLWIN_HEADING,
+ SCROLLWIN_BOX,
+ DIALOG_TEXT,
+ DIALOG_MENU_FORE,
+ DIALOG_MENU_BACK,
+ DIALOG_BOX,
+ INPUT_BOX,
+ INPUT_HEADING,
+ INPUT_TEXT,
+ INPUT_FIELD,
+ FUNCTION_TEXT,
+ FUNCTION_HIGHLIGHT,
+ ATTR_MAX
+} attributes_t;
+extern attributes_t attributes[];
+
+typedef enum {
+ F_HELP = 1,
+ F_SYMBOL = 2,
+ F_INSTS = 3,
+ F_CONF = 4,
+ F_BACK = 5,
+ F_SAVE = 6,
+ F_LOAD = 7,
+ F_EXIT = 8
+} function_key;
+
+void set_colors(void);
+
+/* this changes the windows attributes !!! */
+void print_in_middle(WINDOW *win,
+ int starty,
+ int startx,
+ int width,
+ const char *string,
+ chtype color);
+int get_line_length(const char *line);
+int get_line_no(const char *text);
+const char *get_line(const char *text, int line_no);
+void fill_window(WINDOW *win, const char *text);
+int btn_dialog(WINDOW *main_window, const char *msg, int btn_num, ...);
+int dialog_inputbox(WINDOW *main_window,
+ const char *title, const char *prompt,
+ const char *init, char *result, int result_len);
+void refresh_all_windows(WINDOW *main_window);
+void show_scroll_win(WINDOW *main_window,
+ const char *title,
+ const char *text);
diff --git a/scripts/kconfig/util.c b/scripts/kconfig/util.c
index b6b2a46af14c..81c100d953ef 100644
--- a/scripts/kconfig/util.c
+++ b/scripts/kconfig/util.c
@@ -78,6 +78,7 @@ struct gstr str_new(void)
struct gstr gs;
gs.s = malloc(sizeof(char) * 64);
gs.len = 64;
+ gs.max_width = 0;
strcpy(gs.s, "\0");
return gs;
}
@@ -88,6 +89,7 @@ struct gstr str_assign(const char *s)
struct gstr gs;
gs.s = strdup(s);
gs.len = strlen(s) + 1;
+ gs.max_width = 0;
return gs;
}
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index 6e9dcd59aa87..8a0867a32b0f 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -2220,7 +2220,7 @@ void conf_parse(const char *name)
zconf_initscan(name);
sym_init();
- menu_init();
+ _menu_init();
modules_sym = sym_lookup(NULL, 0);
modules_sym->type = S_BOOLEAN;
modules_sym->flags |= SYMBOL_AUTO;
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 8c43491f8cc9..361b54318c8d 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -475,7 +475,7 @@ void conf_parse(const char *name)
zconf_initscan(name);
sym_init();
- menu_init();
+ _menu_init();
modules_sym = sym_lookup(NULL, 0);
modules_sym->type = S_BOOLEAN;
modules_sym->flags |= SYMBOL_AUTO;
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 241310e59cd6..208ad3b0ca51 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -13,8 +13,6 @@ use strict;
## This software falls under the GNU General Public License. ##
## Please read the COPYING file for more information ##
-# w.o. 03-11-2000: added the '-filelist' option.
-
# 18/01/2001 - Cleanups
# Functions prototyped as foo(void) same as foo()
# Stop eval'ing where we don't need to.
@@ -245,7 +243,7 @@ my $man_date = ('January', 'February', 'March', 'April', 'May', 'June',
# could cause "use of undefined value" or other bugs.
my ($function, %function_table, %parametertypes, $declaration_purpose);
my ($type, $declaration_name, $return_type);
-my ($newsection, $newcontents, $prototype, $filelist, $brcount, %source_map);
+my ($newsection, $newcontents, $prototype, $brcount, %source_map);
if (defined($ENV{'KBUILD_VERBOSE'})) {
$verbose = "$ENV{'KBUILD_VERBOSE'}";
@@ -338,8 +336,6 @@ while ($ARGV[0] =~ m/^-(.*)/) {
$verbose = 1;
} elsif (($cmd eq "-h") || ($cmd eq "--help")) {
usage();
- } elsif ($cmd eq '-filelist') {
- $filelist = shift @ARGV;
} elsif ($cmd eq '-no-doc-sections') {
$no_doc_sections = 1;
}
@@ -1811,14 +1807,6 @@ if (open(SOURCE_MAP, "<.tmp_filelist.txt")) {
close(SOURCE_MAP);
}
-if ($filelist) {
- open(FLIST,"<$filelist") or die "Can't open file list $filelist";
- while(<FLIST>) {
- chop;
- process_file($_);
- }
-}
-
foreach (@ARGV) {
chomp;
process_file($_);
@@ -2023,6 +2011,8 @@ sub process_file($) {
return;
}
+ $. = 1;
+
$section_counter = 0;
while (<IN>) {
if ($state == 0) {
diff --git a/security/capability.c b/security/capability.c
index 5c700e1a4fd3..10f23a4c84e5 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -466,7 +466,8 @@ static int cap_task_getioprio(struct task_struct *p)
return 0;
}
-static int cap_task_setrlimit(unsigned int resource, struct rlimit *new_rlim)
+static int cap_task_setrlimit(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
{
return 0;
}
diff --git a/security/security.c b/security/security.c
index 24e060be9fa5..1d06fa77642f 100644
--- a/security/security.c
+++ b/security/security.c
@@ -389,42 +389,42 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
EXPORT_SYMBOL(security_inode_init_security);
#ifdef CONFIG_SECURITY_PATH
-int security_path_mknod(struct path *path, struct dentry *dentry, int mode,
+int security_path_mknod(struct path *dir, struct dentry *dentry, int mode,
unsigned int dev)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_mknod(path, dentry, mode, dev);
+ return security_ops->path_mknod(dir, dentry, mode, dev);
}
EXPORT_SYMBOL(security_path_mknod);
-int security_path_mkdir(struct path *path, struct dentry *dentry, int mode)
+int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_mkdir(path, dentry, mode);
+ return security_ops->path_mkdir(dir, dentry, mode);
}
-int security_path_rmdir(struct path *path, struct dentry *dentry)
+int security_path_rmdir(struct path *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_rmdir(path, dentry);
+ return security_ops->path_rmdir(dir, dentry);
}
-int security_path_unlink(struct path *path, struct dentry *dentry)
+int security_path_unlink(struct path *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_unlink(path, dentry);
+ return security_ops->path_unlink(dir, dentry);
}
-int security_path_symlink(struct path *path, struct dentry *dentry,
+int security_path_symlink(struct path *dir, struct dentry *dentry,
const char *old_name)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_symlink(path, dentry, old_name);
+ return security_ops->path_symlink(dir, dentry, old_name);
}
int security_path_link(struct dentry *old_dentry, struct path *new_dir,
@@ -655,7 +655,13 @@ void security_inode_getsecid(const struct inode *inode, u32 *secid)
int security_file_permission(struct file *file, int mask)
{
- return security_ops->file_permission(file, mask);
+ int ret;
+
+ ret = security_ops->file_permission(file, mask);
+ if (ret)
+ return ret;
+
+ return fsnotify_perm(file, mask);
}
int security_file_alloc(struct file *file)
@@ -721,7 +727,13 @@ int security_file_receive(struct file *file)
int security_dentry_open(struct file *file, const struct cred *cred)
{
- return security_ops->dentry_open(file, cred);
+ int ret;
+
+ ret = security_ops->dentry_open(file, cred);
+ if (ret)
+ return ret;
+
+ return fsnotify_perm(file, MAY_OPEN);
}
int security_task_create(unsigned long clone_flags)
@@ -826,9 +838,10 @@ int security_task_getioprio(struct task_struct *p)
return security_ops->task_getioprio(p);
}
-int security_task_setrlimit(unsigned int resource, struct rlimit *new_rlim)
+int security_task_setrlimit(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
{
- return security_ops->task_setrlimit(resource, new_rlim);
+ return security_ops->task_setrlimit(p, resource, new_rlim);
}
int security_task_setscheduler(struct task_struct *p,
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 9a2ee845e9d4..9688ccc73c00 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2365,7 +2365,8 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
initrlim = init_task.signal->rlim + i;
rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
}
- update_rlimit_cpu(current->signal->rlim[RLIMIT_CPU].rlim_cur);
+ update_rlimit_cpu(current,
+ current->signal->rlim[RLIMIT_CPU].rlim_cur);
}
}
@@ -3398,16 +3399,17 @@ static int selinux_task_getioprio(struct task_struct *p)
return current_has_perm(p, PROCESS__GETSCHED);
}
-static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim)
+static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
{
- struct rlimit *old_rlim = current->signal->rlim + resource;
+ struct rlimit *old_rlim = p->signal->rlim + resource;
/* Control the ability to change the hard limit (whether
lowering or raising it), so that the hard limit can
later be used as a safe reset point for the soft limit
upon context transitions. See selinux_bprm_committing_creds. */
if (old_rlim->rlim_max != new_rlim->rlim_max)
- return current_has_perm(current, PROCESS__SETRLIMIT);
+ return current_has_perm(p, PROCESS__SETRLIMIT);
return 0;
}
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 3f2b2706b5bb..e6654b543aed 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -541,8 +541,8 @@ int mls_compute_sid(struct context *scontext,
case AVTAB_MEMBER:
/* Use the process effective MLS attributes. */
return mls_context_cpy_low(newcontext, scontext);
- default:
- return -EINVAL;
+
+ /* fall through */
}
return -EINVAL;
}
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b3efae204ac7..c1054a12eb43 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2555,7 +2555,7 @@ int security_get_classes(char ***classes, int *nclasses)
read_lock(&policy_rwlock);
*nclasses = policydb.p_classes.nprim;
- *classes = kcalloc(*nclasses, sizeof(*classes), GFP_ATOMIC);
+ *classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC);
if (!*classes)
goto out;
@@ -2602,7 +2602,7 @@ int security_get_permissions(char *class, char ***perms, int *nperms)
}
*nperms = match->permissions.nprim;
- *perms = kcalloc(*nperms, sizeof(*perms), GFP_ATOMIC);
+ *perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC);
if (!*perms)
goto out;
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index e0d0354008b7..e331e699cf54 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -16,6 +16,9 @@
#include "common.h"
#include "tomoyo.h"
+/* Lock for protecting policy. */
+DEFINE_MUTEX(tomoyo_policy_lock);
+
/* Has loading policy done? */
bool tomoyo_policy_loaded;
@@ -365,10 +368,9 @@ bool tomoyo_is_domain_def(const unsigned char *buffer)
*
* @domainname: The domainname to find.
*
- * Caller must call down_read(&tomoyo_domain_list_lock); or
- * down_write(&tomoyo_domain_list_lock); .
- *
* Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
{
@@ -377,7 +379,7 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
name.name = domainname;
tomoyo_fill_path_info(&name);
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
if (!domain->is_deleted &&
!tomoyo_pathcmp(&name, domain->domainname))
return domain;
@@ -829,6 +831,8 @@ bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain)
* @domain: Pointer to "struct tomoyo_domain_info".
*
* Returns true if the domain is not exceeded quota, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain)
{
@@ -837,61 +841,34 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain)
if (!domain)
return true;
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
if (ptr->type & TOMOYO_ACL_DELETED)
continue;
switch (tomoyo_acl_type2(ptr)) {
- struct tomoyo_single_path_acl_record *acl1;
- struct tomoyo_double_path_acl_record *acl2;
- u16 perm;
+ struct tomoyo_single_path_acl_record *acl;
+ u32 perm;
+ u8 i;
case TOMOYO_TYPE_SINGLE_PATH_ACL:
- acl1 = container_of(ptr,
- struct tomoyo_single_path_acl_record,
- head);
- perm = acl1->perm;
- if (perm & (1 << TOMOYO_TYPE_EXECUTE_ACL))
- count++;
- if (perm &
- ((1 << TOMOYO_TYPE_READ_ACL) |
- (1 << TOMOYO_TYPE_WRITE_ACL)))
- count++;
- if (perm & (1 << TOMOYO_TYPE_CREATE_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_UNLINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKDIR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_RMDIR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKFIFO_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKSOCK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKBLOCK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKCHAR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_TRUNCATE_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_SYMLINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_REWRITE_ACL))
- count++;
+ acl = container_of(ptr,
+ struct tomoyo_single_path_acl_record,
+ head);
+ perm = acl->perm | (((u32) acl->perm_high) << 16);
+ for (i = 0; i < TOMOYO_MAX_SINGLE_PATH_OPERATION; i++)
+ if (perm & (1 << i))
+ count++;
+ if (perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL))
+ count -= 2;
break;
case TOMOYO_TYPE_DOUBLE_PATH_ACL:
- acl2 = container_of(ptr,
+ perm = container_of(ptr,
struct tomoyo_double_path_acl_record,
- head);
- perm = acl2->perm;
- if (perm & (1 << TOMOYO_TYPE_LINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_RENAME_ACL))
- count++;
+ head)->perm;
+ for (i = 0; i < TOMOYO_MAX_DOUBLE_PATH_OPERATION; i++)
+ if (perm & (1 << i))
+ count++;
break;
}
}
- up_read(&tomoyo_domain_acl_info_list_lock);
if (count < tomoyo_check_flags(domain, TOMOYO_MAX_ACCEPT_ENTRY))
return true;
if (!domain->quota_warned) {
@@ -923,9 +900,11 @@ static struct tomoyo_profile *tomoyo_find_or_assign_new_profile(const unsigned
ptr = tomoyo_profile_ptr[profile];
if (ptr)
goto ok;
- ptr = tomoyo_alloc_element(sizeof(*ptr));
- if (!ptr)
+ ptr = kmalloc(sizeof(*ptr), GFP_KERNEL);
+ if (!tomoyo_memory_ok(ptr)) {
+ kfree(ptr);
goto ok;
+ }
for (i = 0; i < TOMOYO_MAX_CONTROL_INDEX; i++)
ptr->value[i] = tomoyo_control_array[i].current_value;
mb(); /* Avoid out-of-order execution. */
@@ -1112,7 +1091,6 @@ struct tomoyo_policy_manager_entry {
* # cat /sys/kernel/security/tomoyo/manager
*/
static LIST_HEAD(tomoyo_policy_manager_list);
-static DECLARE_RWSEM(tomoyo_policy_manager_list_lock);
/**
* tomoyo_update_manager_entry - Add a manager entry.
@@ -1121,6 +1099,8 @@ static DECLARE_RWSEM(tomoyo_policy_manager_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_manager_entry(const char *manager,
const bool is_delete)
@@ -1142,8 +1122,9 @@ static int tomoyo_update_manager_entry(const char *manager,
saved_manager = tomoyo_save_name(manager);
if (!saved_manager)
return -ENOMEM;
- down_write(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
+ new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
if (ptr->manager != saved_manager)
continue;
ptr->is_deleted = is_delete;
@@ -1154,15 +1135,16 @@ static int tomoyo_update_manager_entry(const char *manager,
error = -ENOENT;
goto out;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
+ if (!tomoyo_memory_ok(new_entry))
goto out;
new_entry->manager = saved_manager;
new_entry->is_domain = is_domain;
- list_add_tail(&new_entry->list, &tomoyo_policy_manager_list);
+ list_add_tail_rcu(&new_entry->list, &tomoyo_policy_manager_list);
+ new_entry = NULL;
error = 0;
out:
- up_write(&tomoyo_policy_manager_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ kfree(new_entry);
return error;
}
@@ -1172,6 +1154,8 @@ static int tomoyo_update_manager_entry(const char *manager,
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_manager_policy(struct tomoyo_io_buffer *head)
{
@@ -1191,6 +1175,8 @@ static int tomoyo_write_manager_policy(struct tomoyo_io_buffer *head)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
{
@@ -1199,7 +1185,6 @@ static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
if (head->read_eof)
return 0;
- down_read(&tomoyo_policy_manager_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_policy_manager_list) {
struct tomoyo_policy_manager_entry *ptr;
@@ -1211,7 +1196,6 @@ static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_policy_manager_list_lock);
head->read_eof = done;
return 0;
}
@@ -1221,6 +1205,8 @@ static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
*
* Returns true if the current process is permitted to modify policy
* via /sys/kernel/security/tomoyo/ interface.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_policy_manager(void)
{
@@ -1234,29 +1220,25 @@ static bool tomoyo_is_policy_manager(void)
return true;
if (!tomoyo_manage_by_non_root && (task->cred->uid || task->cred->euid))
return false;
- down_read(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
if (!ptr->is_deleted && ptr->is_domain
&& !tomoyo_pathcmp(domainname, ptr->manager)) {
found = true;
break;
}
}
- up_read(&tomoyo_policy_manager_list_lock);
if (found)
return true;
exe = tomoyo_get_exe();
if (!exe)
return false;
- down_read(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_policy_manager_list, list) {
if (!ptr->is_deleted && !ptr->is_domain
&& !strcmp(exe, ptr->manager->name)) {
found = true;
break;
}
}
- up_read(&tomoyo_policy_manager_list_lock);
if (!found) { /* Reduce error messages. */
static pid_t last_pid;
const pid_t pid = current->pid;
@@ -1277,6 +1259,8 @@ static bool tomoyo_is_policy_manager(void)
* @data: String to parse.
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
const char *data)
@@ -1292,11 +1276,8 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
domain = tomoyo_real_domain(p);
read_unlock(&tasklist_lock);
} else if (!strncmp(data, "domain=", 7)) {
- if (tomoyo_is_domain_def(data + 7)) {
- down_read(&tomoyo_domain_list_lock);
+ if (tomoyo_is_domain_def(data + 7))
domain = tomoyo_find_domain(data + 7);
- up_read(&tomoyo_domain_list_lock);
- }
} else
return false;
head->write_var1 = domain;
@@ -1310,13 +1291,11 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
if (domain) {
struct tomoyo_domain_info *d;
head->read_var1 = NULL;
- down_read(&tomoyo_domain_list_lock);
- list_for_each_entry(d, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(d, &tomoyo_domain_list, list) {
if (d == domain)
break;
head->read_var1 = &d->list;
}
- up_read(&tomoyo_domain_list_lock);
head->read_var2 = NULL;
head->read_bit = 0;
head->read_step = 0;
@@ -1332,6 +1311,8 @@ static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
* @domainname: The name of domain.
*
* Returns 0.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_delete_domain(char *domainname)
{
@@ -1340,9 +1321,9 @@ static int tomoyo_delete_domain(char *domainname)
name.name = domainname;
tomoyo_fill_path_info(&name);
- down_write(&tomoyo_domain_list_lock);
+ mutex_lock(&tomoyo_policy_lock);
/* Is there an active domain? */
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
/* Never delete tomoyo_kernel_domain */
if (domain == &tomoyo_kernel_domain)
continue;
@@ -1352,7 +1333,7 @@ static int tomoyo_delete_domain(char *domainname)
domain->is_deleted = true;
break;
}
- up_write(&tomoyo_domain_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
return 0;
}
@@ -1362,6 +1343,8 @@ static int tomoyo_delete_domain(char *domainname)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_domain_policy(struct tomoyo_io_buffer *head)
{
@@ -1384,11 +1367,9 @@ static int tomoyo_write_domain_policy(struct tomoyo_io_buffer *head)
domain = NULL;
if (is_delete)
tomoyo_delete_domain(data);
- else if (is_select) {
- down_read(&tomoyo_domain_list_lock);
+ else if (is_select)
domain = tomoyo_find_domain(data);
- up_read(&tomoyo_domain_list_lock);
- } else
+ else
domain = tomoyo_find_or_assign_new_domain(data, 0);
head->write_var1 = domain;
return 0;
@@ -1426,7 +1407,7 @@ static bool tomoyo_print_single_path_acl(struct tomoyo_io_buffer *head,
u8 bit;
const char *atmark = "";
const char *filename;
- const u16 perm = ptr->perm;
+ const u32 perm = ptr->perm | (((u32) ptr->perm_high) << 16);
filename = ptr->filename->name;
for (bit = head->read_bit; bit < TOMOYO_MAX_SINGLE_PATH_OPERATION;
@@ -1533,6 +1514,8 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_domain_policy(struct tomoyo_io_buffer *head)
{
@@ -1544,7 +1527,6 @@ static int tomoyo_read_domain_policy(struct tomoyo_io_buffer *head)
return 0;
if (head->read_step == 0)
head->read_step = 1;
- down_read(&tomoyo_domain_list_lock);
list_for_each_cookie(dpos, head->read_var1, &tomoyo_domain_list) {
struct tomoyo_domain_info *domain;
const char *quota_exceeded = "";
@@ -1577,7 +1559,6 @@ acl_loop:
if (head->read_step == 3)
goto tail_mark;
/* Print ACL entries in the domain. */
- down_read(&tomoyo_domain_acl_info_list_lock);
list_for_each_cookie(apos, head->read_var2,
&domain->acl_info_list) {
struct tomoyo_acl_info *ptr
@@ -1587,7 +1568,6 @@ acl_loop:
if (!done)
break;
}
- up_read(&tomoyo_domain_acl_info_list_lock);
if (!done)
break;
head->read_step = 3;
@@ -1599,7 +1579,6 @@ tail_mark:
if (head->read_single_domain)
break;
}
- up_read(&tomoyo_domain_list_lock);
head->read_eof = done;
return 0;
}
@@ -1615,6 +1594,8 @@ tail_mark:
*
* ( echo "select " $domainname; echo "use_profile " $profile ) |
* /usr/lib/ccs/loadpolicy -d
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head)
{
@@ -1626,9 +1607,7 @@ static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head)
if (!cp)
return -EINVAL;
*cp = '\0';
- down_read(&tomoyo_domain_list_lock);
domain = tomoyo_find_domain(cp + 1);
- up_read(&tomoyo_domain_list_lock);
if (strict_strtoul(data, 10, &profile))
return -EINVAL;
if (domain && profile < TOMOYO_MAX_PROFILES
@@ -1650,6 +1629,8 @@ static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head)
* awk ' { if ( domainname == "" ) { if ( $1 == "<kernel>" )
* domainname = $0; } else if ( $1 == "use_profile" ) {
* print $2 " " domainname; domainname = ""; } } ; '
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
{
@@ -1658,7 +1639,6 @@ static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
if (head->read_eof)
return 0;
- down_read(&tomoyo_domain_list_lock);
list_for_each_cookie(pos, head->read_var1, &tomoyo_domain_list) {
struct tomoyo_domain_info *domain;
domain = list_entry(pos, struct tomoyo_domain_info, list);
@@ -1669,7 +1649,6 @@ static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_domain_list_lock);
head->read_eof = done;
return 0;
}
@@ -1726,6 +1705,8 @@ static int tomoyo_read_pid(struct tomoyo_io_buffer *head)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_exception_policy(struct tomoyo_io_buffer *head)
{
@@ -1760,6 +1741,8 @@ static int tomoyo_write_exception_policy(struct tomoyo_io_buffer *head)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, -EINVAL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_exception_policy(struct tomoyo_io_buffer *head)
{
@@ -1889,15 +1872,13 @@ void tomoyo_load_policy(const char *filename)
tomoyo_policy_loaded = true;
{ /* Check all profiles currently assigned to domains are defined. */
struct tomoyo_domain_info *domain;
- down_read(&tomoyo_domain_list_lock);
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
const u8 profile = domain->profile;
if (tomoyo_profile_ptr[profile])
continue;
panic("Profile %u (used by '%s') not defined.\n",
profile, domain->domainname->name);
}
- up_read(&tomoyo_domain_list_lock);
}
}
@@ -1945,6 +1926,8 @@ static int tomoyo_read_self_domain(struct tomoyo_io_buffer *head)
* @file: Pointer to "struct file".
*
* Associates policy handler and returns 0 on success, -ENOMEM otherwise.
+ *
+ * Caller acquires tomoyo_read_lock().
*/
static int tomoyo_open_control(const u8 type, struct file *file)
{
@@ -2030,6 +2013,7 @@ static int tomoyo_open_control(const u8 type, struct file *file)
return -ENOMEM;
}
}
+ head->reader_idx = tomoyo_read_lock();
file->private_data = head;
/*
* Call the handler now if the file is
@@ -2051,6 +2035,8 @@ static int tomoyo_open_control(const u8 type, struct file *file)
* @buffer_len: Size of @buffer.
*
* Returns bytes read on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_read_control(struct file *file, char __user *buffer,
const int buffer_len)
@@ -2094,6 +2080,8 @@ static int tomoyo_read_control(struct file *file, char __user *buffer,
* @buffer_len: Size of @buffer.
*
* Returns @buffer_len on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_write_control(struct file *file, const char __user *buffer,
const int buffer_len)
@@ -2144,11 +2132,14 @@ static int tomoyo_write_control(struct file *file, const char __user *buffer,
* @file: Pointer to "struct file".
*
* Releases memory and returns 0.
+ *
+ * Caller looses tomoyo_read_lock().
*/
static int tomoyo_close_control(struct file *file)
{
struct tomoyo_io_buffer *head = file->private_data;
+ tomoyo_read_unlock(head->reader_idx);
/* Release memory used for policy I/O. */
tomoyo_free(head->read_buf);
head->read_buf = NULL;
@@ -2161,35 +2152,6 @@ static int tomoyo_close_control(struct file *file)
}
/**
- * tomoyo_alloc_acl_element - Allocate permanent memory for ACL entry.
- *
- * @acl_type: Type of ACL entry.
- *
- * Returns pointer to the ACL entry on success, NULL otherwise.
- */
-void *tomoyo_alloc_acl_element(const u8 acl_type)
-{
- int len;
- struct tomoyo_acl_info *ptr;
-
- switch (acl_type) {
- case TOMOYO_TYPE_SINGLE_PATH_ACL:
- len = sizeof(struct tomoyo_single_path_acl_record);
- break;
- case TOMOYO_TYPE_DOUBLE_PATH_ACL:
- len = sizeof(struct tomoyo_double_path_acl_record);
- break;
- default:
- return NULL;
- }
- ptr = tomoyo_alloc_element(len);
- if (!ptr)
- return NULL;
- ptr->type = acl_type;
- return ptr;
-}
-
-/**
* tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
*
* @inode: Pointer to "struct inode".
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 92169d29b2db..610a6a056828 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -108,7 +108,7 @@ struct tomoyo_path_info_with_data {
* (b) type & 0x80 : whether the entry is marked as "deleted".
*
* Packing "struct tomoyo_acl_info" allows
- * "struct tomoyo_single_path_acl_record" to embed "u16" and
+ * "struct tomoyo_single_path_acl_record" to embed "u8" + "u16" and
* "struct tomoyo_double_path_acl_record" to embed "u8"
* without enlarging their structure size.
*/
@@ -184,10 +184,13 @@ struct tomoyo_domain_info {
* Directives held by this structure are "allow_read/write", "allow_execute",
* "allow_read", "allow_write", "allow_create", "allow_unlink", "allow_mkdir",
* "allow_rmdir", "allow_mkfifo", "allow_mksock", "allow_mkblock",
- * "allow_mkchar", "allow_truncate", "allow_symlink" and "allow_rewrite".
+ * "allow_mkchar", "allow_truncate", "allow_symlink", "allow_rewrite",
+ * "allow_chmod", "allow_chown", "allow_chgrp", "allow_chroot", "allow_mount"
+ * and "allow_unmount".
*/
struct tomoyo_single_path_acl_record {
struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_SINGLE_PATH_ACL */
+ u8 perm_high;
u16 perm;
/* Pointer to single pathname. */
const struct tomoyo_path_info *filename;
@@ -195,7 +198,7 @@ struct tomoyo_single_path_acl_record {
/*
* tomoyo_double_path_acl_record is a structure which is used for holding an
- * entry with two pathnames operation (i.e. link() and rename()).
+ * entry with two pathnames operation (i.e. link(), rename() and pivot_root()).
* It has following fields.
*
* (1) "head" which is a "struct tomoyo_acl_info".
@@ -203,7 +206,8 @@ struct tomoyo_single_path_acl_record {
* (3) "filename1" is the source/old pathname.
* (4) "filename2" is the destination/new pathname.
*
- * Directives held by this structure are "allow_rename" and "allow_link".
+ * Directives held by this structure are "allow_rename", "allow_link" and
+ * "allow_pivot_root".
*/
struct tomoyo_double_path_acl_record {
struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_DOUBLE_PATH_ACL */
@@ -265,6 +269,8 @@ struct tomoyo_io_buffer {
int (*write) (struct tomoyo_io_buffer *);
/* Exclusive lock for this structure. */
struct mutex io_sem;
+ /* Index returned by tomoyo_read_lock(). */
+ int reader_idx;
/* The position currently reading from. */
struct list_head *read_var1;
/* Extra variables for reading. */
@@ -370,8 +376,6 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
/* Check mode for specified functionality. */
unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain,
const u8 index);
-/* Allocate memory for structures. */
-void *tomoyo_alloc_acl_element(const u8 acl_type);
/* Fill in "struct tomoyo_path_info" members. */
void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
/* Run policy loader when /sbin/init starts. */
@@ -425,10 +429,9 @@ static inline bool tomoyo_is_invalid(const unsigned char c)
/* The list for "struct tomoyo_domain_info". */
extern struct list_head tomoyo_domain_list;
-extern struct rw_semaphore tomoyo_domain_list_lock;
-/* Lock for domain->acl_info_list. */
-extern struct rw_semaphore tomoyo_domain_acl_info_list_lock;
+/* Lock for protecting policy. */
+extern struct mutex tomoyo_policy_lock;
/* Has /sbin/init started? */
extern bool tomoyo_policy_loaded;
@@ -442,16 +445,28 @@ extern struct tomoyo_domain_info tomoyo_kernel_domain;
* @cookie: the &struct list_head to use as a cookie.
* @head: the head for your list.
*
- * Same with list_for_each() except that this primitive uses @cookie
+ * Same with list_for_each_rcu() except that this primitive uses @cookie
* so that we can continue iteration.
* @cookie must be NULL when iteration starts, and @cookie will become
* NULL when iteration finishes.
*/
-#define list_for_each_cookie(pos, cookie, head) \
- for (({ if (!cookie) \
- cookie = head; }), \
- pos = (cookie)->next; \
- prefetch(pos->next), pos != (head) || ((cookie) = NULL); \
- (cookie) = pos, pos = pos->next)
+#define list_for_each_cookie(pos, cookie, head) \
+ for (({ if (!cookie) \
+ cookie = head; }), \
+ pos = rcu_dereference((cookie)->next); \
+ prefetch(pos->next), pos != (head) || ((cookie) = NULL); \
+ (cookie) = pos, pos = rcu_dereference(pos->next))
+
+extern struct srcu_struct tomoyo_ss;
+
+static inline int tomoyo_read_lock(void)
+{
+ return srcu_read_lock(&tomoyo_ss);
+}
+
+static inline void tomoyo_read_unlock(int idx)
+{
+ srcu_read_unlock(&tomoyo_ss, idx);
+}
#endif /* !defined(_SECURITY_TOMOYO_COMMON_H) */
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index fcf52accce2b..a55a1cced58e 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -58,7 +58,6 @@ struct tomoyo_domain_info tomoyo_kernel_domain;
* exceptions.
*/
LIST_HEAD(tomoyo_domain_list);
-DECLARE_RWSEM(tomoyo_domain_list_lock);
/*
* tomoyo_domain_initializer_entry is a structure which is used for holding
@@ -206,7 +205,6 @@ const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain)
* unless executed from "<kernel> /etc/rc.d/init.d/httpd" domain.
*/
static LIST_HEAD(tomoyo_domain_initializer_list);
-static DECLARE_RWSEM(tomoyo_domain_initializer_list_lock);
/**
* tomoyo_update_domain_initializer_entry - Update "struct tomoyo_domain_initializer_entry" list.
@@ -217,6 +215,8 @@ static DECLARE_RWSEM(tomoyo_domain_initializer_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_domain_initializer_entry(const char *domainname,
const char *program,
@@ -245,8 +245,9 @@ static int tomoyo_update_domain_initializer_entry(const char *domainname,
saved_program = tomoyo_save_name(program);
if (!saved_program)
return -ENOMEM;
- down_write(&tomoyo_domain_initializer_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) {
+ new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list, list) {
if (ptr->is_not != is_not ||
ptr->domainname != saved_domainname ||
ptr->program != saved_program)
@@ -259,17 +260,18 @@ static int tomoyo_update_domain_initializer_entry(const char *domainname,
error = -ENOENT;
goto out;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
+ if (!tomoyo_memory_ok(new_entry))
goto out;
new_entry->domainname = saved_domainname;
new_entry->program = saved_program;
new_entry->is_not = is_not;
new_entry->is_last_name = is_last_name;
- list_add_tail(&new_entry->list, &tomoyo_domain_initializer_list);
+ list_add_tail_rcu(&new_entry->list, &tomoyo_domain_initializer_list);
+ new_entry = NULL;
error = 0;
out:
- up_write(&tomoyo_domain_initializer_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ kfree(new_entry);
return error;
}
@@ -279,13 +281,14 @@ static int tomoyo_update_domain_initializer_entry(const char *domainname,
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_domain_initializer_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_domain_initializer_list) {
const char *no;
@@ -308,7 +311,6 @@ bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_domain_initializer_list_lock);
return done;
}
@@ -320,6 +322,8 @@ bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_domain_initializer_policy(char *data, const bool is_not,
const bool is_delete)
@@ -345,6 +349,8 @@ int tomoyo_write_domain_initializer_policy(char *data, const bool is_not,
*
* Returns true if executing @program reinitializes domain transition,
* false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
domainname,
@@ -355,8 +361,7 @@ static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
struct tomoyo_domain_initializer_entry *ptr;
bool flag = false;
- down_read(&tomoyo_domain_initializer_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_initializer_list, list) {
if (ptr->is_deleted)
continue;
if (ptr->domainname) {
@@ -376,7 +381,6 @@ static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
}
flag = true;
}
- up_read(&tomoyo_domain_initializer_list_lock);
return flag;
}
@@ -419,7 +423,6 @@ static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
* explicitly specified by "initialize_domain".
*/
static LIST_HEAD(tomoyo_domain_keeper_list);
-static DECLARE_RWSEM(tomoyo_domain_keeper_list_lock);
/**
* tomoyo_update_domain_keeper_entry - Update "struct tomoyo_domain_keeper_entry" list.
@@ -430,6 +433,8 @@ static DECLARE_RWSEM(tomoyo_domain_keeper_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_domain_keeper_entry(const char *domainname,
const char *program,
@@ -458,8 +463,9 @@ static int tomoyo_update_domain_keeper_entry(const char *domainname,
saved_domainname = tomoyo_save_name(domainname);
if (!saved_domainname)
return -ENOMEM;
- down_write(&tomoyo_domain_keeper_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) {
+ new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
if (ptr->is_not != is_not ||
ptr->domainname != saved_domainname ||
ptr->program != saved_program)
@@ -472,17 +478,18 @@ static int tomoyo_update_domain_keeper_entry(const char *domainname,
error = -ENOENT;
goto out;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
+ if (!tomoyo_memory_ok(new_entry))
goto out;
new_entry->domainname = saved_domainname;
new_entry->program = saved_program;
new_entry->is_not = is_not;
new_entry->is_last_name = is_last_name;
- list_add_tail(&new_entry->list, &tomoyo_domain_keeper_list);
+ list_add_tail_rcu(&new_entry->list, &tomoyo_domain_keeper_list);
+ new_entry = NULL;
error = 0;
out:
- up_write(&tomoyo_domain_keeper_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ kfree(new_entry);
return error;
}
@@ -493,6 +500,7 @@ static int tomoyo_update_domain_keeper_entry(const char *domainname,
* @is_not: True if it is "no_keep_domain" entry.
* @is_delete: True if it is a delete request.
*
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_domain_keeper_policy(char *data, const bool is_not,
const bool is_delete)
@@ -513,13 +521,14 @@ int tomoyo_write_domain_keeper_policy(char *data, const bool is_not,
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_domain_keeper_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_domain_keeper_list) {
struct tomoyo_domain_keeper_entry *ptr;
@@ -542,7 +551,6 @@ bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_domain_keeper_list_lock);
return done;
}
@@ -555,6 +563,8 @@ bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
*
* Returns true if executing @program supresses domain transition,
* false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
const struct tomoyo_path_info *program,
@@ -563,8 +573,7 @@ static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
struct tomoyo_domain_keeper_entry *ptr;
bool flag = false;
- down_read(&tomoyo_domain_keeper_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_domain_keeper_list, list) {
if (ptr->is_deleted)
continue;
if (!ptr->is_last_name) {
@@ -582,7 +591,6 @@ static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
}
flag = true;
}
- up_read(&tomoyo_domain_keeper_list_lock);
return flag;
}
@@ -617,7 +625,6 @@ static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
* execve() succeeds is calculated using /bin/cat rather than /bin/busybox .
*/
static LIST_HEAD(tomoyo_alias_list);
-static DECLARE_RWSEM(tomoyo_alias_list_lock);
/**
* tomoyo_update_alias_entry - Update "struct tomoyo_alias_entry" list.
@@ -627,6 +634,8 @@ static DECLARE_RWSEM(tomoyo_alias_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_alias_entry(const char *original_name,
const char *aliased_name,
@@ -645,8 +654,9 @@ static int tomoyo_update_alias_entry(const char *original_name,
saved_aliased_name = tomoyo_save_name(aliased_name);
if (!saved_original_name || !saved_aliased_name)
return -ENOMEM;
- down_write(&tomoyo_alias_list_lock);
- list_for_each_entry(ptr, &tomoyo_alias_list, list) {
+ new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
if (ptr->original_name != saved_original_name ||
ptr->aliased_name != saved_aliased_name)
continue;
@@ -658,15 +668,16 @@ static int tomoyo_update_alias_entry(const char *original_name,
error = -ENOENT;
goto out;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
+ if (!tomoyo_memory_ok(new_entry))
goto out;
new_entry->original_name = saved_original_name;
new_entry->aliased_name = saved_aliased_name;
- list_add_tail(&new_entry->list, &tomoyo_alias_list);
+ list_add_tail_rcu(&new_entry->list, &tomoyo_alias_list);
+ new_entry = NULL;
error = 0;
out:
- up_write(&tomoyo_alias_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ kfree(new_entry);
return error;
}
@@ -676,13 +687,14 @@ static int tomoyo_update_alias_entry(const char *original_name,
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_alias_list_lock);
list_for_each_cookie(pos, head->read_var2, &tomoyo_alias_list) {
struct tomoyo_alias_entry *ptr;
@@ -695,7 +707,6 @@ bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_alias_list_lock);
return done;
}
@@ -706,6 +717,8 @@ bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_alias_policy(char *data, const bool is_delete)
{
@@ -724,15 +737,17 @@ int tomoyo_write_alias_policy(char *data, const bool is_delete)
* @profile: Profile number to assign if the domain was newly created.
*
* Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
domainname,
const u8 profile)
{
- struct tomoyo_domain_info *domain = NULL;
+ struct tomoyo_domain_info *domain;
const struct tomoyo_path_info *saved_domainname;
- down_write(&tomoyo_domain_list_lock);
+ mutex_lock(&tomoyo_policy_lock);
domain = tomoyo_find_domain(domainname);
if (domain)
goto out;
@@ -741,45 +756,19 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
saved_domainname = tomoyo_save_name(domainname);
if (!saved_domainname)
goto out;
- /* Can I reuse memory of deleted domain? */
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
- struct task_struct *p;
- struct tomoyo_acl_info *ptr;
- bool flag;
- if (!domain->is_deleted ||
- domain->domainname != saved_domainname)
- continue;
- flag = false;
- read_lock(&tasklist_lock);
- for_each_process(p) {
- if (tomoyo_real_domain(p) != domain)
- continue;
- flag = true;
- break;
- }
- read_unlock(&tasklist_lock);
- if (flag)
- continue;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- ptr->type |= TOMOYO_ACL_DELETED;
- }
- tomoyo_set_domain_flag(domain, true, domain->flags);
- domain->profile = profile;
- domain->quota_warned = false;
- mb(); /* Avoid out-of-order execution. */
- domain->is_deleted = false;
- goto out;
- }
- /* No memory reusable. Create using new memory. */
- domain = tomoyo_alloc_element(sizeof(*domain));
- if (domain) {
+ domain = kmalloc(sizeof(*domain), GFP_KERNEL);
+ if (tomoyo_memory_ok(domain)) {
INIT_LIST_HEAD(&domain->acl_info_list);
domain->domainname = saved_domainname;
domain->profile = profile;
- list_add_tail(&domain->list, &tomoyo_domain_list);
+ list_add_tail_rcu(&domain->list, &tomoyo_domain_list);
+ } else {
+ kfree(domain);
+ domain = NULL;
}
+
out:
- up_write(&tomoyo_domain_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
return domain;
}
@@ -789,6 +778,8 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
* @bprm: Pointer to "struct linux_binprm".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_find_next_domain(struct linux_binprm *bprm)
{
@@ -849,8 +840,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
if (tomoyo_pathcmp(&r, &s)) {
struct tomoyo_alias_entry *ptr;
/* Is this program allowed to be called via symbolic links? */
- down_read(&tomoyo_alias_list_lock);
- list_for_each_entry(ptr, &tomoyo_alias_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_alias_list, list) {
if (ptr->is_deleted ||
tomoyo_pathcmp(&r, ptr->original_name) ||
tomoyo_pathcmp(&s, ptr->aliased_name))
@@ -861,7 +851,6 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
tomoyo_fill_path_info(&r);
break;
}
- up_read(&tomoyo_alias_list_lock);
}
/* Check execute permission. */
@@ -892,9 +881,7 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm)
}
if (domain || strlen(new_domain_name) >= TOMOYO_MAX_PATHNAME_LEN)
goto done;
- down_read(&tomoyo_domain_list_lock);
domain = tomoyo_find_domain(new_domain_name);
- up_read(&tomoyo_domain_list_lock);
if (domain)
goto done;
if (is_enforce)
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 9a6c58881c0a..cfcb096ee97a 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -80,12 +80,20 @@ static const char *tomoyo_sp_keyword[TOMOYO_MAX_SINGLE_PATH_OPERATION] = {
[TOMOYO_TYPE_TRUNCATE_ACL] = "truncate",
[TOMOYO_TYPE_SYMLINK_ACL] = "symlink",
[TOMOYO_TYPE_REWRITE_ACL] = "rewrite",
+ [TOMOYO_TYPE_IOCTL_ACL] = "ioctl",
+ [TOMOYO_TYPE_CHMOD_ACL] = "chmod",
+ [TOMOYO_TYPE_CHOWN_ACL] = "chown",
+ [TOMOYO_TYPE_CHGRP_ACL] = "chgrp",
+ [TOMOYO_TYPE_CHROOT_ACL] = "chroot",
+ [TOMOYO_TYPE_MOUNT_ACL] = "mount",
+ [TOMOYO_TYPE_UMOUNT_ACL] = "unmount",
};
/* Keyword array for double path operations. */
static const char *tomoyo_dp_keyword[TOMOYO_MAX_DOUBLE_PATH_OPERATION] = {
[TOMOYO_TYPE_LINK_ACL] = "link",
[TOMOYO_TYPE_RENAME_ACL] = "rename",
+ [TOMOYO_TYPE_PIVOT_ROOT_ACL] = "pivot_root",
};
/**
@@ -158,9 +166,6 @@ static struct tomoyo_path_info *tomoyo_get_path(struct path *path)
return NULL;
}
-/* Lock for domain->acl_info_list. */
-DECLARE_RWSEM(tomoyo_domain_acl_info_list_lock);
-
static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
const char *filename2,
struct tomoyo_domain_info *
@@ -195,7 +200,6 @@ static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
* belongs to.
*/
static LIST_HEAD(tomoyo_globally_readable_list);
-static DECLARE_RWSEM(tomoyo_globally_readable_list_lock);
/**
* tomoyo_update_globally_readable_entry - Update "struct tomoyo_globally_readable_file_entry" list.
@@ -204,6 +208,8 @@ static DECLARE_RWSEM(tomoyo_globally_readable_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_globally_readable_entry(const char *filename,
const bool is_delete)
@@ -218,8 +224,9 @@ static int tomoyo_update_globally_readable_entry(const char *filename,
saved_filename = tomoyo_save_name(filename);
if (!saved_filename)
return -ENOMEM;
- down_write(&tomoyo_globally_readable_list_lock);
- list_for_each_entry(ptr, &tomoyo_globally_readable_list, list) {
+ new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list, list) {
if (ptr->filename != saved_filename)
continue;
ptr->is_deleted = is_delete;
@@ -230,14 +237,15 @@ static int tomoyo_update_globally_readable_entry(const char *filename,
error = -ENOENT;
goto out;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
+ if (!tomoyo_memory_ok(new_entry))
goto out;
new_entry->filename = saved_filename;
- list_add_tail(&new_entry->list, &tomoyo_globally_readable_list);
+ list_add_tail_rcu(&new_entry->list, &tomoyo_globally_readable_list);
+ new_entry = NULL;
error = 0;
out:
- up_write(&tomoyo_globally_readable_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ kfree(new_entry);
return error;
}
@@ -247,21 +255,22 @@ static int tomoyo_update_globally_readable_entry(const char *filename,
* @filename: The filename to check.
*
* Returns true if any domain can open @filename for reading, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_globally_readable_file(const struct tomoyo_path_info *
filename)
{
struct tomoyo_globally_readable_file_entry *ptr;
bool found = false;
- down_read(&tomoyo_globally_readable_list_lock);
- list_for_each_entry(ptr, &tomoyo_globally_readable_list, list) {
+
+ list_for_each_entry_rcu(ptr, &tomoyo_globally_readable_list, list) {
if (!ptr->is_deleted &&
tomoyo_path_matches_pattern(filename, ptr->filename)) {
found = true;
break;
}
}
- up_read(&tomoyo_globally_readable_list_lock);
return found;
}
@@ -272,6 +281,8 @@ static bool tomoyo_is_globally_readable_file(const struct tomoyo_path_info *
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_globally_readable_policy(char *data, const bool is_delete)
{
@@ -284,13 +295,14 @@ int tomoyo_write_globally_readable_policy(char *data, const bool is_delete)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_globally_readable_list_lock);
list_for_each_cookie(pos, head->read_var2,
&tomoyo_globally_readable_list) {
struct tomoyo_globally_readable_file_entry *ptr;
@@ -304,7 +316,6 @@ bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_globally_readable_list_lock);
return done;
}
@@ -338,7 +349,6 @@ bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head)
* current process from accessing other process's information.
*/
static LIST_HEAD(tomoyo_pattern_list);
-static DECLARE_RWSEM(tomoyo_pattern_list_lock);
/**
* tomoyo_update_file_pattern_entry - Update "struct tomoyo_pattern_entry" list.
@@ -347,6 +357,8 @@ static DECLARE_RWSEM(tomoyo_pattern_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_file_pattern_entry(const char *pattern,
const bool is_delete)
@@ -361,8 +373,9 @@ static int tomoyo_update_file_pattern_entry(const char *pattern,
saved_pattern = tomoyo_save_name(pattern);
if (!saved_pattern)
return -ENOMEM;
- down_write(&tomoyo_pattern_list_lock);
- list_for_each_entry(ptr, &tomoyo_pattern_list, list) {
+ new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
if (saved_pattern != ptr->pattern)
continue;
ptr->is_deleted = is_delete;
@@ -373,14 +386,15 @@ static int tomoyo_update_file_pattern_entry(const char *pattern,
error = -ENOENT;
goto out;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
+ if (!tomoyo_memory_ok(new_entry))
goto out;
new_entry->pattern = saved_pattern;
- list_add_tail(&new_entry->list, &tomoyo_pattern_list);
+ list_add_tail_rcu(&new_entry->list, &tomoyo_pattern_list);
+ new_entry = NULL;
error = 0;
out:
- up_write(&tomoyo_pattern_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ kfree(new_entry);
return error;
}
@@ -390,6 +404,8 @@ static int tomoyo_update_file_pattern_entry(const char *pattern,
* @filename: The filename to find patterned pathname.
*
* Returns pointer to pathname pattern if matched, @filename otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static const struct tomoyo_path_info *
tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
@@ -397,8 +413,7 @@ tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
struct tomoyo_pattern_entry *ptr;
const struct tomoyo_path_info *pattern = NULL;
- down_read(&tomoyo_pattern_list_lock);
- list_for_each_entry(ptr, &tomoyo_pattern_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_pattern_list, list) {
if (ptr->is_deleted)
continue;
if (!tomoyo_path_matches_pattern(filename, ptr->pattern))
@@ -411,7 +426,6 @@ tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
break;
}
}
- up_read(&tomoyo_pattern_list_lock);
if (pattern)
filename = pattern;
return filename;
@@ -424,6 +438,8 @@ tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_pattern_policy(char *data, const bool is_delete)
{
@@ -436,13 +452,14 @@ int tomoyo_write_pattern_policy(char *data, const bool is_delete)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_pattern_list_lock);
list_for_each_cookie(pos, head->read_var2, &tomoyo_pattern_list) {
struct tomoyo_pattern_entry *ptr;
ptr = list_entry(pos, struct tomoyo_pattern_entry, list);
@@ -453,7 +470,6 @@ bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_pattern_list_lock);
return done;
}
@@ -487,7 +503,6 @@ bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head)
* need to worry whether the file is already unlink()ed or not.
*/
static LIST_HEAD(tomoyo_no_rewrite_list);
-static DECLARE_RWSEM(tomoyo_no_rewrite_list_lock);
/**
* tomoyo_update_no_rewrite_entry - Update "struct tomoyo_no_rewrite_entry" list.
@@ -496,6 +511,8 @@ static DECLARE_RWSEM(tomoyo_no_rewrite_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_no_rewrite_entry(const char *pattern,
const bool is_delete)
@@ -509,8 +526,9 @@ static int tomoyo_update_no_rewrite_entry(const char *pattern,
saved_pattern = tomoyo_save_name(pattern);
if (!saved_pattern)
return -ENOMEM;
- down_write(&tomoyo_no_rewrite_list_lock);
- list_for_each_entry(ptr, &tomoyo_no_rewrite_list, list) {
+ new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+ mutex_lock(&tomoyo_policy_lock);
+ list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
if (ptr->pattern != saved_pattern)
continue;
ptr->is_deleted = is_delete;
@@ -521,14 +539,15 @@ static int tomoyo_update_no_rewrite_entry(const char *pattern,
error = -ENOENT;
goto out;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
+ if (!tomoyo_memory_ok(new_entry))
goto out;
new_entry->pattern = saved_pattern;
- list_add_tail(&new_entry->list, &tomoyo_no_rewrite_list);
+ list_add_tail_rcu(&new_entry->list, &tomoyo_no_rewrite_list);
+ new_entry = NULL;
error = 0;
out:
- up_write(&tomoyo_no_rewrite_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
+ kfree(new_entry);
return error;
}
@@ -539,14 +558,15 @@ static int tomoyo_update_no_rewrite_entry(const char *pattern,
*
* Returns true if @filename is specified by "deny_rewrite" directive,
* false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename)
{
struct tomoyo_no_rewrite_entry *ptr;
bool found = false;
- down_read(&tomoyo_no_rewrite_list_lock);
- list_for_each_entry(ptr, &tomoyo_no_rewrite_list, list) {
+ list_for_each_entry_rcu(ptr, &tomoyo_no_rewrite_list, list) {
if (ptr->is_deleted)
continue;
if (!tomoyo_path_matches_pattern(filename, ptr->pattern))
@@ -554,7 +574,6 @@ static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename)
found = true;
break;
}
- up_read(&tomoyo_no_rewrite_list_lock);
return found;
}
@@ -565,6 +584,8 @@ static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename)
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_no_rewrite_policy(char *data, const bool is_delete)
{
@@ -577,13 +598,14 @@ int tomoyo_write_no_rewrite_policy(char *data, const bool is_delete)
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head)
{
struct list_head *pos;
bool done = true;
- down_read(&tomoyo_no_rewrite_list_lock);
list_for_each_cookie(pos, head->read_var2, &tomoyo_no_rewrite_list) {
struct tomoyo_no_rewrite_entry *ptr;
ptr = list_entry(pos, struct tomoyo_no_rewrite_entry, list);
@@ -594,7 +616,6 @@ bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head)
if (!done)
break;
}
- up_read(&tomoyo_no_rewrite_list_lock);
return done;
}
@@ -612,6 +633,8 @@ bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head)
* Current policy syntax uses "allow_read/write" instead of "6",
* "allow_read" instead of "4", "allow_write" instead of "2",
* "allow_execute" instead of "1".
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_file_acl(const char *filename, u8 perm,
struct tomoyo_domain_info * const domain,
@@ -649,26 +672,32 @@ static int tomoyo_update_file_acl(const char *filename, u8 perm,
* @may_use_pattern: True if patterned ACL is permitted.
*
* Returns 0 on success, -EPERM otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_check_single_path_acl2(const struct tomoyo_domain_info *
domain,
const struct tomoyo_path_info *
filename,
- const u16 perm,
+ const u32 perm,
const bool may_use_pattern)
{
struct tomoyo_acl_info *ptr;
int error = -EPERM;
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
struct tomoyo_single_path_acl_record *acl;
if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
continue;
acl = container_of(ptr, struct tomoyo_single_path_acl_record,
head);
- if (!(acl->perm & perm))
- continue;
+ if (perm <= 0xFFFF) {
+ if (!(acl->perm & perm))
+ continue;
+ } else {
+ if (!(acl->perm_high & (perm >> 16)))
+ continue;
+ }
if (may_use_pattern || !acl->filename->is_patterned) {
if (!tomoyo_path_matches_pattern(filename,
acl->filename))
@@ -679,7 +708,6 @@ static int tomoyo_check_single_path_acl2(const struct tomoyo_domain_info *
error = 0;
break;
}
- up_read(&tomoyo_domain_acl_info_list_lock);
return error;
}
@@ -691,12 +719,14 @@ static int tomoyo_check_single_path_acl2(const struct tomoyo_domain_info *
* @operation: Mode ("read" or "write" or "read/write" or "execute").
*
* Returns 0 on success, -EPERM otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_check_file_acl(const struct tomoyo_domain_info *domain,
const struct tomoyo_path_info *filename,
const u8 operation)
{
- u16 perm = 0;
+ u32 perm = 0;
if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
return 0;
@@ -724,6 +754,8 @@ static int tomoyo_check_file_acl(const struct tomoyo_domain_info *domain,
* @mode: Access control mode.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_check_file_perm2(struct tomoyo_domain_info * const domain,
const struct tomoyo_path_info *filename,
@@ -777,6 +809,8 @@ static int tomoyo_check_file_perm2(struct tomoyo_domain_info * const domain,
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
const bool is_delete)
@@ -824,18 +858,20 @@ int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
struct tomoyo_domain_info *
const domain, const bool is_delete)
{
- static const u16 rw_mask =
+ static const u32 rw_mask =
(1 << TOMOYO_TYPE_READ_ACL) | (1 << TOMOYO_TYPE_WRITE_ACL);
const struct tomoyo_path_info *saved_filename;
struct tomoyo_acl_info *ptr;
struct tomoyo_single_path_acl_record *acl;
int error = -ENOMEM;
- const u16 perm = 1 << type;
+ const u32 perm = 1 << type;
if (!domain)
return -EINVAL;
@@ -844,10 +880,10 @@ static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
saved_filename = tomoyo_save_name(filename);
if (!saved_filename)
return -ENOMEM;
- down_write(&tomoyo_domain_acl_info_list_lock);
+ mutex_lock(&tomoyo_policy_lock);
if (is_delete)
goto delete;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
if (tomoyo_acl_type1(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
continue;
acl = container_of(ptr, struct tomoyo_single_path_acl_record,
@@ -857,7 +893,10 @@ static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
/* Special case. Clear all bits if marked as deleted. */
if (ptr->type & TOMOYO_ACL_DELETED)
acl->perm = 0;
- acl->perm |= perm;
+ if (perm <= 0xFFFF)
+ acl->perm |= perm;
+ else
+ acl->perm_high |= (perm >> 16);
if ((acl->perm & rw_mask) == rw_mask)
acl->perm |= 1 << TOMOYO_TYPE_READ_WRITE_ACL;
else if (acl->perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL))
@@ -867,37 +906,47 @@ static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
goto out;
}
/* Not found. Append it to the tail. */
- acl = tomoyo_alloc_acl_element(TOMOYO_TYPE_SINGLE_PATH_ACL);
- if (!acl)
+ acl = kmalloc(sizeof(*acl), GFP_KERNEL);
+ if (!tomoyo_memory_ok(acl)) {
+ kfree(acl);
+ acl = NULL;
goto out;
- acl->perm = perm;
+ }
+ acl->head.type = TOMOYO_TYPE_SINGLE_PATH_ACL;
+ if (perm <= 0xFFFF)
+ acl->perm = perm;
+ else
+ acl->perm_high = (perm >> 16);
if (perm == (1 << TOMOYO_TYPE_READ_WRITE_ACL))
acl->perm |= rw_mask;
acl->filename = saved_filename;
- list_add_tail(&acl->head.list, &domain->acl_info_list);
+ list_add_tail_rcu(&acl->head.list, &domain->acl_info_list);
error = 0;
goto out;
delete:
error = -ENOENT;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
continue;
acl = container_of(ptr, struct tomoyo_single_path_acl_record,
head);
if (acl->filename != saved_filename)
continue;
- acl->perm &= ~perm;
+ if (perm <= 0xFFFF)
+ acl->perm &= ~perm;
+ else
+ acl->perm_high &= ~(perm >> 16);
if ((acl->perm & rw_mask) != rw_mask)
acl->perm &= ~(1 << TOMOYO_TYPE_READ_WRITE_ACL);
else if (!(acl->perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL)))
acl->perm &= ~rw_mask;
- if (!acl->perm)
+ if (!acl->perm && !acl->perm_high)
ptr->type |= TOMOYO_ACL_DELETED;
error = 0;
break;
}
out:
- up_write(&tomoyo_domain_acl_info_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
return error;
}
@@ -911,6 +960,8 @@ static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
const char *filename2,
@@ -933,10 +984,10 @@ static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
saved_filename2 = tomoyo_save_name(filename2);
if (!saved_filename1 || !saved_filename2)
return -ENOMEM;
- down_write(&tomoyo_domain_acl_info_list_lock);
+ mutex_lock(&tomoyo_policy_lock);
if (is_delete)
goto delete;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
if (tomoyo_acl_type1(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
continue;
acl = container_of(ptr, struct tomoyo_double_path_acl_record,
@@ -953,18 +1004,22 @@ static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
goto out;
}
/* Not found. Append it to the tail. */
- acl = tomoyo_alloc_acl_element(TOMOYO_TYPE_DOUBLE_PATH_ACL);
- if (!acl)
+ acl = kmalloc(sizeof(*acl), GFP_KERNEL);
+ if (!tomoyo_memory_ok(acl)) {
+ kfree(acl);
+ acl = NULL;
goto out;
+ }
+ acl->head.type = TOMOYO_TYPE_DOUBLE_PATH_ACL;
acl->perm = perm;
acl->filename1 = saved_filename1;
acl->filename2 = saved_filename2;
- list_add_tail(&acl->head.list, &domain->acl_info_list);
+ list_add_tail_rcu(&acl->head.list, &domain->acl_info_list);
error = 0;
goto out;
delete:
error = -ENOENT;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
continue;
acl = container_of(ptr, struct tomoyo_double_path_acl_record,
@@ -979,7 +1034,7 @@ static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
break;
}
out:
- up_write(&tomoyo_domain_acl_info_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
return error;
}
@@ -991,6 +1046,8 @@ static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
* @filename: Filename to check.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_check_single_path_acl(struct tomoyo_domain_info *domain,
const u8 type,
@@ -1010,6 +1067,8 @@ static int tomoyo_check_single_path_acl(struct tomoyo_domain_info *domain,
* @filename2: Second filename to check.
*
* Returns 0 on success, -EPERM otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
const u8 type,
@@ -1024,8 +1083,7 @@ static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
return 0;
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
struct tomoyo_double_path_acl_record *acl;
if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
continue;
@@ -1040,7 +1098,6 @@ static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
error = 0;
break;
}
- up_read(&tomoyo_domain_acl_info_list_lock);
return error;
}
@@ -1053,6 +1110,8 @@ static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
* @mode: Access control mode.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
const domain, u8 operation,
@@ -1101,6 +1160,8 @@ static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
* @filename: Check permission for "execute".
*
* Returns 0 on success, negativevalue otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
const struct tomoyo_path_info *filename)
@@ -1129,6 +1190,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
struct tomoyo_path_info *buf;
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
+ int idx;
if (!mode || !path->mnt)
return 0;
@@ -1140,6 +1202,7 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
* don't call me.
*/
return 0;
+ idx = tomoyo_read_lock();
buf = tomoyo_get_path(path);
if (!buf)
goto out;
@@ -1165,13 +1228,14 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
buf, mode);
out:
tomoyo_free(buf);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
}
/**
- * tomoyo_check_1path_perm - Check permission for "create", "unlink", "mkdir", "rmdir", "mkfifo", "mksock", "mkblock", "mkchar", "truncate" and "symlink".
+ * tomoyo_check_1path_perm - Check permission for "create", "unlink", "mkdir", "rmdir", "mkfifo", "mksock", "mkblock", "mkchar", "truncate", "symlink", "ioctl", "chmod", "chown", "chgrp", "chroot", "mount" and "unmount".
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @operation: Type of operation.
@@ -1186,15 +1250,18 @@ int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
struct tomoyo_path_info *buf;
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
+ int idx;
if (!mode || !path->mnt)
return 0;
+ idx = tomoyo_read_lock();
buf = tomoyo_get_path(path);
if (!buf)
goto out;
switch (operation) {
case TOMOYO_TYPE_MKDIR_ACL:
case TOMOYO_TYPE_RMDIR_ACL:
+ case TOMOYO_TYPE_CHROOT_ACL:
if (!buf->is_dir) {
/*
* tomoyo_get_path() reserves space for appending "/."
@@ -1207,6 +1274,7 @@ int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
mode);
out:
tomoyo_free(buf);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
@@ -1227,9 +1295,12 @@ int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
struct tomoyo_path_info *buf;
+ int idx;
if (!mode || !filp->f_path.mnt)
return 0;
+
+ idx = tomoyo_read_lock();
buf = tomoyo_get_path(&filp->f_path);
if (!buf)
goto out;
@@ -1242,13 +1313,14 @@ int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
buf, mode);
out:
tomoyo_free(buf);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
}
/**
- * tomoyo_check_2path_perm - Check permission for "rename" and "link".
+ * tomoyo_check_2path_perm - Check permission for "rename", "link" and "pivot_root".
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @operation: Type of operation.
@@ -1266,9 +1338,11 @@ int tomoyo_check_2path_perm(struct tomoyo_domain_info * const domain,
const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
const bool is_enforce = (mode == 3);
const char *msg;
+ int idx;
if (!mode || !path1->mnt || !path2->mnt)
return 0;
+ idx = tomoyo_read_lock();
buf1 = tomoyo_get_path(path1);
buf2 = tomoyo_get_path(path2);
if (!buf1 || !buf2)
@@ -1307,6 +1381,7 @@ int tomoyo_check_2path_perm(struct tomoyo_domain_info * const domain,
out:
tomoyo_free(buf1);
tomoyo_free(buf2);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 18369d497eb8..54226d5be493 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -14,6 +14,7 @@
#include <linux/mnt_namespace.h>
#include <linux/fs_struct.h>
#include <linux/hash.h>
+#include <linux/magic.h>
#include "common.h"
#include "realpath.h"
@@ -112,7 +113,7 @@ int tomoyo_realpath_from_path2(struct path *path, char *newname,
path_put(&ns_root);
/* Prepend "/proc" prefix if using internal proc vfs mount. */
if (!IS_ERR(sp) && (path->mnt->mnt_parent == path->mnt) &&
- (strcmp(path->mnt->mnt_sb->s_type->name, "proc") == 0)) {
+ (path->mnt->mnt_sb->s_magic == PROC_SUPER_MAGIC)) {
sp -= 5;
if (sp >= newname)
memcpy(sp, "/proc", 5);
@@ -211,57 +212,32 @@ static unsigned int tomoyo_allocated_memory_for_elements;
static unsigned int tomoyo_quota_for_elements;
/**
- * tomoyo_alloc_element - Allocate permanent memory for structures.
+ * tomoyo_memory_ok - Check memory quota.
*
- * @size: Size in bytes.
+ * @ptr: Pointer to allocated memory.
*
- * Returns pointer to allocated memory on success, NULL otherwise.
+ * Returns true on success, false otherwise.
*
- * Memory has to be zeroed.
- * The RAM is chunked, so NEVER try to kfree() the returned pointer.
+ * Caller holds tomoyo_policy_lock.
+ * Memory pointed by @ptr will be zeroed on success.
*/
-void *tomoyo_alloc_element(const unsigned int size)
+bool tomoyo_memory_ok(void *ptr)
{
- static char *buf;
- static DEFINE_MUTEX(lock);
- static unsigned int buf_used_len = PATH_MAX;
- char *ptr = NULL;
- /*Assumes sizeof(void *) >= sizeof(long) is true. */
- const unsigned int word_aligned_size
- = roundup(size, max(sizeof(void *), sizeof(long)));
- if (word_aligned_size > PATH_MAX)
- return NULL;
- mutex_lock(&lock);
- if (buf_used_len + word_aligned_size > PATH_MAX) {
- if (!tomoyo_quota_for_elements ||
- tomoyo_allocated_memory_for_elements
- + PATH_MAX <= tomoyo_quota_for_elements)
- ptr = kzalloc(PATH_MAX, GFP_KERNEL);
- if (!ptr) {
- printk(KERN_WARNING "ERROR: Out of memory "
- "for tomoyo_alloc_element().\n");
- if (!tomoyo_policy_loaded)
- panic("MAC Initialization failed.\n");
- } else {
- buf = ptr;
- tomoyo_allocated_memory_for_elements += PATH_MAX;
- buf_used_len = word_aligned_size;
- ptr = buf;
- }
- } else if (word_aligned_size) {
- int i;
- ptr = buf + buf_used_len;
- buf_used_len += word_aligned_size;
- for (i = 0; i < word_aligned_size; i++) {
- if (!ptr[i])
- continue;
- printk(KERN_ERR "WARNING: Reserved memory was tainted! "
- "The system might go wrong.\n");
- ptr[i] = '\0';
- }
+ int allocated_len = ptr ? ksize(ptr) : 0;
+ bool result = false;
+ if (!ptr || (tomoyo_quota_for_elements &&
+ tomoyo_allocated_memory_for_elements
+ + allocated_len > tomoyo_quota_for_elements)) {
+ printk(KERN_WARNING "ERROR: Out of memory "
+ "for tomoyo_alloc_element().\n");
+ if (!tomoyo_policy_loaded)
+ panic("MAC Initialization failed.\n");
+ } else {
+ result = true;
+ tomoyo_allocated_memory_for_elements += allocated_len;
+ memset(ptr, 0, allocated_len);
}
- mutex_unlock(&lock);
- return ptr;
+ return result;
}
/* Memory allocated for string data in bytes. */
@@ -292,13 +268,6 @@ struct tomoyo_name_entry {
struct tomoyo_path_info entry;
};
-/* Structure for available memory region. */
-struct tomoyo_free_memory_block_list {
- struct list_head list;
- char *ptr; /* Pointer to a free area. */
- int len; /* Length of the area. */
-};
-
/*
* tomoyo_name_list is used for holding string data used by TOMOYO.
* Since same string data is likely used for multiple times (e.g.
@@ -313,52 +282,32 @@ static struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
* @name: The string to store into the permernent memory.
*
* Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
- *
- * The RAM is shared, so NEVER try to modify or kfree() the returned name.
*/
const struct tomoyo_path_info *tomoyo_save_name(const char *name)
{
- static LIST_HEAD(fmb_list);
static DEFINE_MUTEX(lock);
struct tomoyo_name_entry *ptr;
unsigned int hash;
- /* fmb contains available size in bytes.
- fmb is removed from the fmb_list when fmb->len becomes 0. */
- struct tomoyo_free_memory_block_list *fmb;
int len;
- char *cp;
+ int allocated_len;
struct list_head *head;
if (!name)
return NULL;
len = strlen(name) + 1;
- if (len > TOMOYO_MAX_PATHNAME_LEN) {
- printk(KERN_WARNING "ERROR: Name too long "
- "for tomoyo_save_name().\n");
- return NULL;
- }
hash = full_name_hash((const unsigned char *) name, len - 1);
head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
-
mutex_lock(&lock);
list_for_each_entry(ptr, head, list) {
if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name))
goto out;
}
- list_for_each_entry(fmb, &fmb_list, list) {
- if (len <= fmb->len)
- goto ready;
- }
- if (!tomoyo_quota_for_savename ||
- tomoyo_allocated_memory_for_savename + PATH_MAX
- <= tomoyo_quota_for_savename)
- cp = kzalloc(PATH_MAX, GFP_KERNEL);
- else
- cp = NULL;
- fmb = kzalloc(sizeof(*fmb), GFP_KERNEL);
- if (!cp || !fmb) {
- kfree(cp);
- kfree(fmb);
+ ptr = kzalloc(sizeof(*ptr) + len, GFP_KERNEL);
+ allocated_len = ptr ? ksize(ptr) : 0;
+ if (!ptr || (tomoyo_quota_for_savename &&
+ tomoyo_allocated_memory_for_savename + allocated_len
+ > tomoyo_quota_for_savename)) {
+ kfree(ptr);
printk(KERN_WARNING "ERROR: Out of memory "
"for tomoyo_save_name().\n");
if (!tomoyo_policy_loaded)
@@ -366,24 +315,11 @@ const struct tomoyo_path_info *tomoyo_save_name(const char *name)
ptr = NULL;
goto out;
}
- tomoyo_allocated_memory_for_savename += PATH_MAX;
- list_add(&fmb->list, &fmb_list);
- fmb->ptr = cp;
- fmb->len = PATH_MAX;
- ready:
- ptr = tomoyo_alloc_element(sizeof(*ptr));
- if (!ptr)
- goto out;
- ptr->entry.name = fmb->ptr;
- memmove(fmb->ptr, name, len);
+ tomoyo_allocated_memory_for_savename += allocated_len;
+ ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
+ memmove((char *) ptr->entry.name, name, len);
tomoyo_fill_path_info(&ptr->entry);
- fmb->ptr += len;
- fmb->len -= len;
list_add_tail(&ptr->list, head);
- if (fmb->len == 0) {
- list_del(&fmb->list);
- kfree(fmb);
- }
out:
mutex_unlock(&lock);
return ptr ? &ptr->entry : NULL;
@@ -401,11 +337,13 @@ void __init tomoyo_realpath_init(void)
INIT_LIST_HEAD(&tomoyo_name_list[i]);
INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
tomoyo_kernel_domain.domainname = tomoyo_save_name(TOMOYO_ROOT_NAME);
- list_add_tail(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
- down_read(&tomoyo_domain_list_lock);
+ /*
+ * tomoyo_read_lock() is not needed because this function is
+ * called before the first "delete" request.
+ */
+ list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain)
panic("Can't register tomoyo_kernel_domain");
- up_read(&tomoyo_domain_list_lock);
}
/* Memory allocated for temporary purpose. */
diff --git a/security/tomoyo/realpath.h b/security/tomoyo/realpath.h
index 78217a37960b..47b4f59dad6f 100644
--- a/security/tomoyo/realpath.h
+++ b/security/tomoyo/realpath.h
@@ -36,11 +36,8 @@ char *tomoyo_realpath_nofollow(const char *pathname);
/* Same with tomoyo_realpath() except that the pathname is already solved. */
char *tomoyo_realpath_from_path(struct path *path);
-/*
- * Allocate memory for ACL entry.
- * The RAM is chunked, so NEVER try to kfree() the returned pointer.
- */
-void *tomoyo_alloc_element(const unsigned int size);
+/* Check memory quota. */
+bool tomoyo_memory_ok(void *ptr);
/*
* Keep the given name on the RAM.
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 8a00ade85166..714daa34d493 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -76,8 +76,18 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
* Execute permission is checked against pathname passed to do_execve()
* using current domain.
*/
- if (!domain)
- return tomoyo_find_next_domain(bprm);
+ if (!domain) {
+ /*
+ * We will need to protect whole execve() operation when GC
+ * starts kfree()ing "struct tomoyo_domain_info" because
+ * bprm->cred->security points to "struct tomoyo_domain_info"
+ * but "struct tomoyo_domain_info" does not have a refcounter.
+ */
+ const int idx = tomoyo_read_lock();
+ const int err = tomoyo_find_next_domain(bprm);
+ tomoyo_read_unlock(idx);
+ return err;
+ }
/*
* Read permission is checked against interpreters using next domain.
* '1' is the result of open_to_namei_flags(O_RDONLY).
@@ -194,6 +204,60 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags);
}
+static int tomoyo_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return tomoyo_check_1path_perm(tomoyo_domain(), TOMOYO_TYPE_IOCTL_ACL,
+ &file->f_path);
+}
+
+static int tomoyo_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
+ mode_t mode)
+{
+ struct path path = { mnt, dentry };
+ return tomoyo_check_1path_perm(tomoyo_domain(), TOMOYO_TYPE_CHMOD_ACL,
+ &path);
+}
+
+static int tomoyo_path_chown(struct path *path, uid_t uid, gid_t gid)
+{
+ int error = 0;
+ if (uid != (uid_t) -1)
+ error = tomoyo_check_1path_perm(tomoyo_domain(),
+ TOMOYO_TYPE_CHOWN_ACL, path);
+ if (!error && gid != (gid_t) -1)
+ error = tomoyo_check_1path_perm(tomoyo_domain(),
+ TOMOYO_TYPE_CHGRP_ACL, path);
+ return error;
+}
+
+static int tomoyo_path_chroot(struct path *path)
+{
+ return tomoyo_check_1path_perm(tomoyo_domain(), TOMOYO_TYPE_CHROOT_ACL,
+ path);
+}
+
+static int tomoyo_sb_mount(char *dev_name, struct path *path,
+ char *type, unsigned long flags, void *data)
+{
+ return tomoyo_check_1path_perm(tomoyo_domain(), TOMOYO_TYPE_MOUNT_ACL,
+ path);
+}
+
+static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
+{
+ struct path path = { mnt, mnt->mnt_root };
+ return tomoyo_check_1path_perm(tomoyo_domain(), TOMOYO_TYPE_UMOUNT_ACL,
+ &path);
+}
+
+static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
+{
+ return tomoyo_check_2path_perm(tomoyo_domain(),
+ TOMOYO_TYPE_PIVOT_ROOT_ACL,
+ new_path, old_path);
+}
+
/*
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
@@ -215,8 +279,18 @@ static struct security_operations tomoyo_security_ops = {
.path_mknod = tomoyo_path_mknod,
.path_link = tomoyo_path_link,
.path_rename = tomoyo_path_rename,
+ .file_ioctl = tomoyo_file_ioctl,
+ .path_chmod = tomoyo_path_chmod,
+ .path_chown = tomoyo_path_chown,
+ .path_chroot = tomoyo_path_chroot,
+ .sb_mount = tomoyo_sb_mount,
+ .sb_umount = tomoyo_sb_umount,
+ .sb_pivotroot = tomoyo_sb_pivotroot,
};
+/* Lock for GC. */
+struct srcu_struct tomoyo_ss;
+
static int __init tomoyo_init(void)
{
struct cred *cred = (struct cred *) current_cred();
@@ -224,7 +298,8 @@ static int __init tomoyo_init(void)
if (!security_module_enable(&tomoyo_security_ops))
return 0;
/* register ourselves with the security framework */
- if (register_security(&tomoyo_security_ops))
+ if (register_security(&tomoyo_security_ops) ||
+ init_srcu_struct(&tomoyo_ss))
panic("Failure registering TOMOYO Linux");
printk(KERN_INFO "TOMOYO Linux initialized\n");
cred->security = &tomoyo_kernel_domain;
diff --git a/security/tomoyo/tomoyo.h b/security/tomoyo/tomoyo.h
index ed758325b1ae..bf3986addc1a 100644
--- a/security/tomoyo/tomoyo.h
+++ b/security/tomoyo/tomoyo.h
@@ -62,11 +62,19 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm);
#define TOMOYO_TYPE_TRUNCATE_ACL 12
#define TOMOYO_TYPE_SYMLINK_ACL 13
#define TOMOYO_TYPE_REWRITE_ACL 14
-#define TOMOYO_MAX_SINGLE_PATH_OPERATION 15
+#define TOMOYO_TYPE_IOCTL_ACL 15
+#define TOMOYO_TYPE_CHMOD_ACL 16
+#define TOMOYO_TYPE_CHOWN_ACL 17
+#define TOMOYO_TYPE_CHGRP_ACL 18
+#define TOMOYO_TYPE_CHROOT_ACL 19
+#define TOMOYO_TYPE_MOUNT_ACL 20
+#define TOMOYO_TYPE_UMOUNT_ACL 21
+#define TOMOYO_MAX_SINGLE_PATH_OPERATION 22
#define TOMOYO_TYPE_LINK_ACL 0
#define TOMOYO_TYPE_RENAME_ACL 1
-#define TOMOYO_MAX_DOUBLE_PATH_OPERATION 2
+#define TOMOYO_TYPE_PIVOT_ROOT_ACL 2
+#define TOMOYO_MAX_DOUBLE_PATH_OPERATION 3
#define TOMOYO_DOMAINPOLICY 0
#define TOMOYO_EXCEPTIONPOLICY 1
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index d9c96353121a..255ad910077a 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -632,6 +632,13 @@ static long snd_pcm_alsa_frames(struct snd_pcm_substream *substream, long bytes)
return bytes_to_frames(runtime, (buffer_size * bytes) / runtime->oss.buffer_bytes);
}
+static inline
+snd_pcm_uframes_t get_hw_ptr_period(struct snd_pcm_runtime *runtime)
+{
+ snd_pcm_uframes_t ptr = runtime->status->hw_ptr;
+ return ptr - (ptr % runtime->period_size);
+}
+
/* define extended formats in the recent OSS versions (if any) */
/* linear formats */
#define AFMT_S32_LE 0x00001000
@@ -1102,7 +1109,7 @@ static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream)
return err;
}
runtime->oss.prepare = 0;
- runtime->oss.prev_hw_ptr_interrupt = 0;
+ runtime->oss.prev_hw_ptr_period = 0;
runtime->oss.period_ptr = 0;
runtime->oss.buffer_used = 0;
@@ -1950,7 +1957,8 @@ static int snd_pcm_oss_get_caps(struct snd_pcm_oss_file *pcm_oss_file)
return result;
}
-static void snd_pcm_oss_simulate_fill(struct snd_pcm_substream *substream, snd_pcm_uframes_t hw_ptr)
+static void snd_pcm_oss_simulate_fill(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t hw_ptr)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_uframes_t appl_ptr;
@@ -1986,7 +1994,8 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
if (runtime->oss.trigger)
goto _skip1;
if (atomic_read(&psubstream->mmap_count))
- snd_pcm_oss_simulate_fill(psubstream, runtime->hw_ptr_interrupt);
+ snd_pcm_oss_simulate_fill(psubstream,
+ get_hw_ptr_period(runtime));
runtime->oss.trigger = 1;
runtime->start_threshold = 1;
cmd = SNDRV_PCM_IOCTL_START;
@@ -2105,11 +2114,12 @@ static int snd_pcm_oss_get_ptr(struct snd_pcm_oss_file *pcm_oss_file, int stream
info.ptr = snd_pcm_oss_bytes(substream, runtime->status->hw_ptr % runtime->buffer_size);
if (atomic_read(&substream->mmap_count)) {
snd_pcm_sframes_t n;
- n = (delay = runtime->hw_ptr_interrupt) - runtime->oss.prev_hw_ptr_interrupt;
+ delay = get_hw_ptr_period(runtime);
+ n = delay - runtime->oss.prev_hw_ptr_period;
if (n < 0)
n += runtime->boundary;
info.blocks = n / runtime->period_size;
- runtime->oss.prev_hw_ptr_interrupt = delay;
+ runtime->oss.prev_hw_ptr_period = delay;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
snd_pcm_oss_simulate_fill(substream, delay);
info.bytes = snd_pcm_oss_bytes(substream, runtime->status->hw_ptr) & INT_MAX;
@@ -2673,18 +2683,22 @@ static int snd_pcm_oss_playback_ready(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (atomic_read(&substream->mmap_count))
- return runtime->oss.prev_hw_ptr_interrupt != runtime->hw_ptr_interrupt;
+ return runtime->oss.prev_hw_ptr_period !=
+ get_hw_ptr_period(runtime);
else
- return snd_pcm_playback_avail(runtime) >= runtime->oss.period_frames;
+ return snd_pcm_playback_avail(runtime) >=
+ runtime->oss.period_frames;
}
static int snd_pcm_oss_capture_ready(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
if (atomic_read(&substream->mmap_count))
- return runtime->oss.prev_hw_ptr_interrupt != runtime->hw_ptr_interrupt;
+ return runtime->oss.prev_hw_ptr_period !=
+ get_hw_ptr_period(runtime);
else
- return snd_pcm_capture_avail(runtime) >= runtime->oss.period_frames;
+ return snd_pcm_capture_avail(runtime) >=
+ runtime->oss.period_frames;
}
static unsigned int snd_pcm_oss_poll(struct file *file, poll_table * wait)
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 6884ae031f6f..df57a0e30bf2 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -921,6 +921,10 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
snd_free_pages((void*)runtime->control,
PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
kfree(runtime->hw_constraints.rules);
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+ if (runtime->hwptr_log)
+ kfree(runtime->hwptr_log);
+#endif
kfree(runtime);
substream->runtime = NULL;
put_pid(substream->pid);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index a27545b23ee9..0403a7d55f0c 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -126,17 +126,6 @@ void snd_pcm_playback_silence(struct snd_pcm_substream *substream, snd_pcm_ufram
}
}
-#ifdef CONFIG_SND_PCM_XRUN_DEBUG
-#define xrun_debug(substream, mask) ((substream)->pstr->xrun_debug & (mask))
-#else
-#define xrun_debug(substream, mask) 0
-#endif
-
-#define dump_stack_on_xrun(substream) do { \
- if (xrun_debug(substream, 2)) \
- dump_stack(); \
- } while (0)
-
static void pcm_debug_name(struct snd_pcm_substream *substream,
char *name, size_t len)
{
@@ -147,6 +136,24 @@ static void pcm_debug_name(struct snd_pcm_substream *substream,
substream->number);
}
+#define XRUN_DEBUG_BASIC (1<<0)
+#define XRUN_DEBUG_STACK (1<<1) /* dump also stack */
+#define XRUN_DEBUG_JIFFIESCHECK (1<<2) /* do jiffies check */
+#define XRUN_DEBUG_PERIODUPDATE (1<<3) /* full period update info */
+#define XRUN_DEBUG_HWPTRUPDATE (1<<4) /* full hwptr update info */
+#define XRUN_DEBUG_LOG (1<<5) /* show last 10 positions on err */
+#define XRUN_DEBUG_LOGONCE (1<<6) /* do above only once */
+
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
+
+#define xrun_debug(substream, mask) \
+ ((substream)->pstr->xrun_debug & (mask))
+
+#define dump_stack_on_xrun(substream) do { \
+ if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
+ dump_stack(); \
+ } while (0)
+
static void xrun(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
@@ -154,7 +161,7 @@ static void xrun(struct snd_pcm_substream *substream)
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
- if (xrun_debug(substream, 1)) {
+ if (xrun_debug(substream, XRUN_DEBUG_BASIC)) {
char name[16];
pcm_debug_name(substream, name, sizeof(name));
snd_printd(KERN_DEBUG "XRUN: %s\n", name);
@@ -162,32 +169,102 @@ static void xrun(struct snd_pcm_substream *substream)
}
}
-static snd_pcm_uframes_t
-snd_pcm_update_hw_ptr_pos(struct snd_pcm_substream *substream,
- struct snd_pcm_runtime *runtime)
-{
+#define hw_ptr_error(substream, fmt, args...) \
+ do { \
+ if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
+ xrun_log_show(substream); \
+ if (printk_ratelimit()) { \
+ snd_printd("PCM: " fmt, ##args); \
+ } \
+ dump_stack_on_xrun(substream); \
+ } \
+ } while (0)
+
+#define XRUN_LOG_CNT 10
+
+struct hwptr_log_entry {
+ unsigned long jiffies;
snd_pcm_uframes_t pos;
+ snd_pcm_uframes_t period_size;
+ snd_pcm_uframes_t buffer_size;
+ snd_pcm_uframes_t old_hw_ptr;
+ snd_pcm_uframes_t hw_ptr_base;
+};
- pos = substream->ops->pointer(substream);
- if (pos == SNDRV_PCM_POS_XRUN)
- return pos; /* XRUN */
- if (pos >= runtime->buffer_size) {
- if (printk_ratelimit()) {
- char name[16];
- pcm_debug_name(substream, name, sizeof(name));
- snd_printd(KERN_ERR "BUG: %s, pos = 0x%lx, "
- "buffer size = 0x%lx, period size = 0x%lx\n",
- name, pos, runtime->buffer_size,
- runtime->period_size);
- }
- pos = 0;
+struct snd_pcm_hwptr_log {
+ unsigned int idx;
+ unsigned int hit: 1;
+ struct hwptr_log_entry entries[XRUN_LOG_CNT];
+};
+
+static void xrun_log(struct snd_pcm_substream *substream,
+ snd_pcm_uframes_t pos)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_pcm_hwptr_log *log = runtime->hwptr_log;
+ struct hwptr_log_entry *entry;
+
+ if (log == NULL) {
+ log = kzalloc(sizeof(*log), GFP_ATOMIC);
+ if (log == NULL)
+ return;
+ runtime->hwptr_log = log;
+ } else {
+ if (xrun_debug(substream, XRUN_DEBUG_LOGONCE) && log->hit)
+ return;
}
- pos -= pos % runtime->min_align;
- return pos;
+ entry = &log->entries[log->idx];
+ entry->jiffies = jiffies;
+ entry->pos = pos;
+ entry->period_size = runtime->period_size;
+ entry->buffer_size = runtime->buffer_size;;
+ entry->old_hw_ptr = runtime->status->hw_ptr;
+ entry->hw_ptr_base = runtime->hw_ptr_base;
+ log->idx = (log->idx + 1) % XRUN_LOG_CNT;
+}
+
+static void xrun_log_show(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_hwptr_log *log = substream->runtime->hwptr_log;
+ struct hwptr_log_entry *entry;
+ char name[16];
+ unsigned int idx;
+ int cnt;
+
+ if (log == NULL)
+ return;
+ if (xrun_debug(substream, XRUN_DEBUG_LOGONCE) && log->hit)
+ return;
+ pcm_debug_name(substream, name, sizeof(name));
+ for (cnt = 0, idx = log->idx; cnt < XRUN_LOG_CNT; cnt++) {
+ entry = &log->entries[idx];
+ if (entry->period_size == 0)
+ break;
+ snd_printd("hwptr log: %s: j=%lu, pos=%ld/%ld/%ld, "
+ "hwptr=%ld/%ld\n",
+ name, entry->jiffies, (unsigned long)entry->pos,
+ (unsigned long)entry->period_size,
+ (unsigned long)entry->buffer_size,
+ (unsigned long)entry->old_hw_ptr,
+ (unsigned long)entry->hw_ptr_base);
+ idx++;
+ idx %= XRUN_LOG_CNT;
+ }
+ log->hit = 1;
}
-static int snd_pcm_update_hw_ptr_post(struct snd_pcm_substream *substream,
- struct snd_pcm_runtime *runtime)
+#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
+
+#define xrun_debug(substream, mask) 0
+#define xrun(substream) do { } while (0)
+#define hw_ptr_error(substream, fmt, args...) do { } while (0)
+#define xrun_log(substream, pos) do { } while (0)
+#define xrun_log_show(substream) do { } while (0)
+
+#endif
+
+int snd_pcm_update_state(struct snd_pcm_substream *substream,
+ struct snd_pcm_runtime *runtime)
{
snd_pcm_uframes_t avail;
@@ -208,89 +285,96 @@ static int snd_pcm_update_hw_ptr_post(struct snd_pcm_substream *substream,
return -EPIPE;
}
}
- if (avail >= runtime->control->avail_min)
+ if (!runtime->nowake && avail >= runtime->control->avail_min)
wake_up(&runtime->sleep);
return 0;
}
-#define hw_ptr_error(substream, fmt, args...) \
- do { \
- if (xrun_debug(substream, 1)) { \
- if (printk_ratelimit()) { \
- snd_printd("PCM: " fmt, ##args); \
- } \
- dump_stack_on_xrun(substream); \
- } \
- } while (0)
-
-static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
+static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
+ unsigned int in_interrupt)
{
struct snd_pcm_runtime *runtime = substream->runtime;
snd_pcm_uframes_t pos;
- snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_ptr_interrupt, hw_base;
+ snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
snd_pcm_sframes_t hdelta, delta;
unsigned long jdelta;
old_hw_ptr = runtime->status->hw_ptr;
- pos = snd_pcm_update_hw_ptr_pos(substream, runtime);
+ pos = substream->ops->pointer(substream);
if (pos == SNDRV_PCM_POS_XRUN) {
xrun(substream);
return -EPIPE;
}
- if (xrun_debug(substream, 8)) {
- char name[16];
- pcm_debug_name(substream, name, sizeof(name));
- snd_printd("period_update: %s: pos=0x%x/0x%x/0x%x, "
- "hwptr=0x%lx, hw_base=0x%lx, hw_intr=0x%lx\n",
- name, (unsigned int)pos,
- (unsigned int)runtime->period_size,
- (unsigned int)runtime->buffer_size,
- (unsigned long)old_hw_ptr,
- (unsigned long)runtime->hw_ptr_base,
- (unsigned long)runtime->hw_ptr_interrupt);
+ if (pos >= runtime->buffer_size) {
+ if (printk_ratelimit()) {
+ char name[16];
+ pcm_debug_name(substream, name, sizeof(name));
+ xrun_log_show(substream);
+ snd_printd(KERN_ERR "BUG: %s, pos = %ld, "
+ "buffer size = %ld, period size = %ld\n",
+ name, pos, runtime->buffer_size,
+ runtime->period_size);
+ }
+ pos = 0;
}
+ pos -= pos % runtime->min_align;
+ if (xrun_debug(substream, XRUN_DEBUG_LOG))
+ xrun_log(substream, pos);
hw_base = runtime->hw_ptr_base;
new_hw_ptr = hw_base + pos;
- hw_ptr_interrupt = runtime->hw_ptr_interrupt + runtime->period_size;
- delta = new_hw_ptr - hw_ptr_interrupt;
- if (hw_ptr_interrupt >= runtime->boundary) {
- hw_ptr_interrupt -= runtime->boundary;
- if (hw_base < runtime->boundary / 2)
- /* hw_base was already lapped; recalc delta */
- delta = new_hw_ptr - hw_ptr_interrupt;
- }
- if (delta < 0) {
- if (runtime->periods == 1 || new_hw_ptr < old_hw_ptr)
- delta += runtime->buffer_size;
- if (delta < 0) {
- hw_ptr_error(substream,
- "Unexpected hw_pointer value "
- "(stream=%i, pos=%ld, intr_ptr=%ld)\n",
- substream->stream, (long)pos,
- (long)hw_ptr_interrupt);
-#if 1
- /* simply skipping the hwptr update seems more
- * robust in some cases, e.g. on VMware with
- * inaccurate timer source
- */
- return 0; /* skip this update */
-#else
- /* rebase to interrupt position */
- hw_base = new_hw_ptr = hw_ptr_interrupt;
- /* align hw_base to buffer_size */
- hw_base -= hw_base % runtime->buffer_size;
- delta = 0;
-#endif
- } else {
+ if (in_interrupt) {
+ /* we know that one period was processed */
+ /* delta = "expected next hw_ptr" for in_interrupt != 0 */
+ delta = old_hw_ptr - (old_hw_ptr % runtime->period_size)
+ + runtime->period_size;
+ if (delta > new_hw_ptr) {
hw_base += runtime->buffer_size;
if (hw_base >= runtime->boundary)
hw_base = 0;
new_hw_ptr = hw_base + pos;
+ goto __delta;
}
}
+ /* new_hw_ptr might be lower than old_hw_ptr in case when */
+ /* pointer crosses the end of the ring buffer */
+ if (new_hw_ptr < old_hw_ptr) {
+ hw_base += runtime->buffer_size;
+ if (hw_base >= runtime->boundary)
+ hw_base = 0;
+ new_hw_ptr = hw_base + pos;
+ }
+ __delta:
+ delta = (new_hw_ptr - old_hw_ptr) % runtime->boundary;
+ if (xrun_debug(substream, in_interrupt ?
+ XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) {
+ char name[16];
+ pcm_debug_name(substream, name, sizeof(name));
+ snd_printd("%s_update: %s: pos=%u/%u/%u, "
+ "hwptr=%ld/%ld/%ld/%ld\n",
+ in_interrupt ? "period" : "hwptr",
+ name,
+ (unsigned int)pos,
+ (unsigned int)runtime->period_size,
+ (unsigned int)runtime->buffer_size,
+ (unsigned long)delta,
+ (unsigned long)old_hw_ptr,
+ (unsigned long)new_hw_ptr,
+ (unsigned long)runtime->hw_ptr_base);
+ }
+ /* something must be really wrong */
+ if (delta >= runtime->buffer_size + runtime->period_size) {
+ hw_ptr_error(substream,
+ "Unexpected hw_pointer value %s"
+ "(stream=%i, pos=%ld, new_hw_ptr=%ld, "
+ "old_hw_ptr=%ld)\n",
+ in_interrupt ? "[Q] " : "[P]",
+ substream->stream, (long)pos,
+ (long)new_hw_ptr, (long)old_hw_ptr);
+ return 0;
+ }
/* Do jiffies check only in xrun_debug mode */
- if (!xrun_debug(substream, 4))
+ if (!xrun_debug(substream, XRUN_DEBUG_JIFFIESCHECK))
goto no_jiffies_check;
/* Skip the jiffies check for hardwares with BATCH flag.
@@ -299,7 +383,7 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
*/
if (runtime->hw.info & SNDRV_PCM_INFO_BATCH)
goto no_jiffies_check;
- hdelta = new_hw_ptr - old_hw_ptr;
+ hdelta = delta;
if (hdelta < runtime->delay)
goto no_jiffies_check;
hdelta -= runtime->delay;
@@ -308,130 +392,62 @@ static int snd_pcm_update_hw_ptr_interrupt(struct snd_pcm_substream *substream)
delta = jdelta /
(((runtime->period_size * HZ) / runtime->rate)
+ HZ/100);
+ /* move new_hw_ptr according jiffies not pos variable */
+ new_hw_ptr = old_hw_ptr;
+ /* use loop to avoid checks for delta overflows */
+ /* the delta value is small or zero in most cases */
+ while (delta > 0) {
+ new_hw_ptr += runtime->period_size;
+ if (new_hw_ptr >= runtime->boundary)
+ new_hw_ptr -= runtime->boundary;
+ delta--;
+ }
+ /* align hw_base to buffer_size */
+ hw_base = new_hw_ptr - (new_hw_ptr % runtime->buffer_size);
+ delta = 0;
hw_ptr_error(substream,
- "hw_ptr skipping! [Q] "
+ "hw_ptr skipping! %s"
"(pos=%ld, delta=%ld, period=%ld, "
- "jdelta=%lu/%lu/%lu)\n",
+ "jdelta=%lu/%lu/%lu, hw_ptr=%ld/%ld)\n",
+ in_interrupt ? "[Q] " : "",
(long)pos, (long)hdelta,
(long)runtime->period_size, jdelta,
- ((hdelta * HZ) / runtime->rate), delta);
- hw_ptr_interrupt = runtime->hw_ptr_interrupt +
- runtime->period_size * delta;
- if (hw_ptr_interrupt >= runtime->boundary)
- hw_ptr_interrupt -= runtime->boundary;
- /* rebase to interrupt position */
- hw_base = new_hw_ptr = hw_ptr_interrupt;
- /* align hw_base to buffer_size */
- hw_base -= hw_base % runtime->buffer_size;
- delta = 0;
+ ((hdelta * HZ) / runtime->rate), delta,
+ (unsigned long)old_hw_ptr,
+ (unsigned long)new_hw_ptr);
}
no_jiffies_check:
if (delta > runtime->period_size + runtime->period_size / 2) {
hw_ptr_error(substream,
- "Lost interrupts? "
- "(stream=%i, delta=%ld, intr_ptr=%ld)\n",
+ "Lost interrupts? %s"
+ "(stream=%i, delta=%ld, new_hw_ptr=%ld, "
+ "old_hw_ptr=%ld)\n",
+ in_interrupt ? "[Q] " : "",
substream->stream, (long)delta,
- (long)hw_ptr_interrupt);
- /* rebase hw_ptr_interrupt */
- hw_ptr_interrupt =
- new_hw_ptr - new_hw_ptr % runtime->period_size;
+ (long)new_hw_ptr,
+ (long)old_hw_ptr);
}
- runtime->hw_ptr_interrupt = hw_ptr_interrupt;
+
+ if (runtime->status->hw_ptr == new_hw_ptr)
+ return 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
runtime->silence_size > 0)
snd_pcm_playback_silence(substream, new_hw_ptr);
- if (runtime->status->hw_ptr == new_hw_ptr)
- return 0;
-
runtime->hw_ptr_base = hw_base;
runtime->status->hw_ptr = new_hw_ptr;
runtime->hw_ptr_jiffies = jiffies;
if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
- return snd_pcm_update_hw_ptr_post(substream, runtime);
+ return snd_pcm_update_state(substream, runtime);
}
/* CAUTION: call it with irq disabled */
int snd_pcm_update_hw_ptr(struct snd_pcm_substream *substream)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
- snd_pcm_uframes_t pos;
- snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
- snd_pcm_sframes_t delta;
- unsigned long jdelta;
-
- old_hw_ptr = runtime->status->hw_ptr;
- pos = snd_pcm_update_hw_ptr_pos(substream, runtime);
- if (pos == SNDRV_PCM_POS_XRUN) {
- xrun(substream);
- return -EPIPE;
- }
- if (xrun_debug(substream, 16)) {
- char name[16];
- pcm_debug_name(substream, name, sizeof(name));
- snd_printd("hw_update: %s: pos=0x%x/0x%x/0x%x, "
- "hwptr=0x%lx, hw_base=0x%lx, hw_intr=0x%lx\n",
- name, (unsigned int)pos,
- (unsigned int)runtime->period_size,
- (unsigned int)runtime->buffer_size,
- (unsigned long)old_hw_ptr,
- (unsigned long)runtime->hw_ptr_base,
- (unsigned long)runtime->hw_ptr_interrupt);
- }
-
- hw_base = runtime->hw_ptr_base;
- new_hw_ptr = hw_base + pos;
-
- delta = new_hw_ptr - old_hw_ptr;
- jdelta = jiffies - runtime->hw_ptr_jiffies;
- if (delta < 0) {
- delta += runtime->buffer_size;
- if (delta < 0) {
- hw_ptr_error(substream,
- "Unexpected hw_pointer value [2] "
- "(stream=%i, pos=%ld, old_ptr=%ld, jdelta=%li)\n",
- substream->stream, (long)pos,
- (long)old_hw_ptr, jdelta);
- return 0;
- }
- hw_base += runtime->buffer_size;
- if (hw_base >= runtime->boundary)
- hw_base = 0;
- new_hw_ptr = hw_base + pos;
- }
- /* Do jiffies check only in xrun_debug mode */
- if (!xrun_debug(substream, 4))
- goto no_jiffies_check;
- if (delta < runtime->delay)
- goto no_jiffies_check;
- delta -= runtime->delay;
- if (((delta * HZ) / runtime->rate) > jdelta + HZ/100) {
- hw_ptr_error(substream,
- "hw_ptr skipping! "
- "(pos=%ld, delta=%ld, period=%ld, jdelta=%lu/%lu)\n",
- (long)pos, (long)delta,
- (long)runtime->period_size, jdelta,
- ((delta * HZ) / runtime->rate));
- return 0;
- }
- no_jiffies_check:
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
- runtime->silence_size > 0)
- snd_pcm_playback_silence(substream, new_hw_ptr);
-
- if (runtime->status->hw_ptr == new_hw_ptr)
- return 0;
-
- runtime->hw_ptr_base = hw_base;
- runtime->status->hw_ptr = new_hw_ptr;
- runtime->hw_ptr_jiffies = jiffies;
- if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
- snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp);
-
- return snd_pcm_update_hw_ptr_post(substream, runtime);
+ return snd_pcm_update_hw_ptr0(substream, 0);
}
/**
@@ -745,10 +761,13 @@ int snd_interval_ratnum(struct snd_interval *i,
unsigned int rats_count, struct snd_ratnum *rats,
unsigned int *nump, unsigned int *denp)
{
- unsigned int best_num, best_diff, best_den;
+ unsigned int best_num, best_den;
+ int best_diff;
unsigned int k;
struct snd_interval t;
int err;
+ unsigned int result_num, result_den;
+ int result_diff;
best_num = best_den = best_diff = 0;
for (k = 0; k < rats_count; ++k) {
@@ -770,6 +789,8 @@ int snd_interval_ratnum(struct snd_interval *i,
den -= r;
}
diff = num - q * den;
+ if (diff < 0)
+ diff = -diff;
if (best_num == 0 ||
diff * best_den < best_diff * den) {
best_diff = diff;
@@ -784,6 +805,9 @@ int snd_interval_ratnum(struct snd_interval *i,
t.min = div_down(best_num, best_den);
t.openmin = !!(best_num % best_den);
+ result_num = best_num;
+ result_diff = best_diff;
+ result_den = best_den;
best_num = best_den = best_diff = 0;
for (k = 0; k < rats_count; ++k) {
unsigned int num = rats[k].num;
@@ -806,6 +830,8 @@ int snd_interval_ratnum(struct snd_interval *i,
den += rats[k].den_step - r;
}
diff = q * den - num;
+ if (diff < 0)
+ diff = -diff;
if (best_num == 0 ||
diff * best_den < best_diff * den) {
best_diff = diff;
@@ -825,10 +851,14 @@ int snd_interval_ratnum(struct snd_interval *i,
return err;
if (snd_interval_single(i)) {
+ if (best_diff * result_den < result_diff * best_den) {
+ result_num = best_num;
+ result_den = best_den;
+ }
if (nump)
- *nump = best_num;
+ *nump = result_num;
if (denp)
- *denp = best_den;
+ *denp = result_den;
}
return err;
}
@@ -1643,7 +1673,7 @@ void snd_pcm_period_elapsed(struct snd_pcm_substream *substream)
snd_pcm_stream_lock_irqsave(substream, flags);
if (!snd_pcm_running(substream) ||
- snd_pcm_update_hw_ptr_interrupt(substream) < 0)
+ snd_pcm_update_hw_ptr0(substream, 1) < 0)
goto _end;
if (substream->timer_running)
@@ -1776,6 +1806,7 @@ static snd_pcm_sframes_t snd_pcm_lib_write1(struct snd_pcm_substream *substream,
goto _end_unlock;
}
+ runtime->nowake = 1;
while (size > 0) {
snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
snd_pcm_uframes_t avail;
@@ -1797,15 +1828,17 @@ static snd_pcm_sframes_t snd_pcm_lib_write1(struct snd_pcm_substream *substream,
if (frames > cont)
frames = cont;
if (snd_BUG_ON(!frames)) {
+ runtime->nowake = 0;
snd_pcm_stream_unlock_irq(substream);
return -EINVAL;
}
appl_ptr = runtime->control->appl_ptr;
appl_ofs = appl_ptr % runtime->buffer_size;
snd_pcm_stream_unlock_irq(substream);
- if ((err = transfer(substream, appl_ofs, data, offset, frames)) < 0)
- goto _end;
+ err = transfer(substream, appl_ofs, data, offset, frames);
snd_pcm_stream_lock_irq(substream);
+ if (err < 0)
+ goto _end_unlock;
switch (runtime->status->state) {
case SNDRV_PCM_STATE_XRUN:
err = -EPIPE;
@@ -1834,8 +1867,10 @@ static snd_pcm_sframes_t snd_pcm_lib_write1(struct snd_pcm_substream *substream,
}
}
_end_unlock:
+ runtime->nowake = 0;
+ if (xfer > 0 && err >= 0)
+ snd_pcm_update_state(substream, runtime);
snd_pcm_stream_unlock_irq(substream);
- _end:
return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
}
@@ -1993,6 +2028,7 @@ static snd_pcm_sframes_t snd_pcm_lib_read1(struct snd_pcm_substream *substream,
goto _end_unlock;
}
+ runtime->nowake = 1;
while (size > 0) {
snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
snd_pcm_uframes_t avail;
@@ -2021,15 +2057,17 @@ static snd_pcm_sframes_t snd_pcm_lib_read1(struct snd_pcm_substream *substream,
if (frames > cont)
frames = cont;
if (snd_BUG_ON(!frames)) {
+ runtime->nowake = 0;
snd_pcm_stream_unlock_irq(substream);
return -EINVAL;
}
appl_ptr = runtime->control->appl_ptr;
appl_ofs = appl_ptr % runtime->buffer_size;
snd_pcm_stream_unlock_irq(substream);
- if ((err = transfer(substream, appl_ofs, data, offset, frames)) < 0)
- goto _end;
+ err = transfer(substream, appl_ofs, data, offset, frames);
snd_pcm_stream_lock_irq(substream);
+ if (err < 0)
+ goto _end_unlock;
switch (runtime->status->state) {
case SNDRV_PCM_STATE_XRUN:
err = -EPIPE;
@@ -2052,8 +2090,10 @@ static snd_pcm_sframes_t snd_pcm_lib_read1(struct snd_pcm_substream *substream,
xfer += frames;
}
_end_unlock:
+ runtime->nowake = 0;
+ if (xfer > 0 && err >= 0)
+ snd_pcm_update_state(substream, runtime);
snd_pcm_stream_unlock_irq(substream);
- _end:
return xfer > 0 ? (snd_pcm_sframes_t)xfer : err;
}
diff --git a/sound/core/pcm_memory.c b/sound/core/pcm_memory.c
index caa7796bc2f5..d6d49d6651f9 100644
--- a/sound/core/pcm_memory.c
+++ b/sound/core/pcm_memory.c
@@ -23,6 +23,7 @@
#include <linux/time.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/info.h>
@@ -434,3 +435,57 @@ int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream)
}
EXPORT_SYMBOL(snd_pcm_lib_free_pages);
+
+int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
+ size_t size, gfp_t gfp_flags)
+{
+ struct snd_pcm_runtime *runtime;
+
+ if (PCM_RUNTIME_CHECK(substream))
+ return -EINVAL;
+ runtime = substream->runtime;
+ if (runtime->dma_area) {
+ if (runtime->dma_bytes >= size)
+ return 0; /* already large enough */
+ vfree(runtime->dma_area);
+ }
+ runtime->dma_area = __vmalloc(size, gfp_flags, PAGE_KERNEL);
+ if (!runtime->dma_area)
+ return -ENOMEM;
+ runtime->dma_bytes = size;
+ return 1;
+}
+EXPORT_SYMBOL(_snd_pcm_lib_alloc_vmalloc_buffer);
+
+/**
+ * snd_pcm_lib_free_vmalloc_buffer - free vmalloc buffer
+ * @substream: the substream with a buffer allocated by
+ * snd_pcm_lib_alloc_vmalloc_buffer()
+ */
+int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime;
+
+ if (PCM_RUNTIME_CHECK(substream))
+ return -EINVAL;
+ runtime = substream->runtime;
+ vfree(runtime->dma_area);
+ runtime->dma_area = NULL;
+ return 0;
+}
+EXPORT_SYMBOL(snd_pcm_lib_free_vmalloc_buffer);
+
+/**
+ * snd_pcm_lib_get_vmalloc_page - map vmalloc buffer offset to page struct
+ * @substream: the substream with a buffer allocated by
+ * snd_pcm_lib_alloc_vmalloc_buffer()
+ * @offset: offset in the buffer
+ *
+ * This function is to be used as the page callback in the PCM ops.
+ */
+struct page *snd_pcm_lib_get_vmalloc_page(struct snd_pcm_substream *substream,
+ unsigned long offset)
+{
+ return vmalloc_to_page(substream->runtime->dma_area + offset);
+}
+EXPORT_SYMBOL(snd_pcm_lib_get_vmalloc_page);
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 25b0641e6b8c..a870fe696578 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -516,6 +516,7 @@ static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
struct snd_pcm_sw_params *params)
{
struct snd_pcm_runtime *runtime;
+ int err;
if (PCM_RUNTIME_CHECK(substream))
return -ENXIO;
@@ -540,6 +541,7 @@ static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
if (params->silence_threshold > runtime->buffer_size)
return -EINVAL;
}
+ err = 0;
snd_pcm_stream_lock_irq(substream);
runtime->tstamp_mode = params->tstamp_mode;
runtime->period_step = params->period_step;
@@ -553,10 +555,10 @@ static int snd_pcm_sw_params(struct snd_pcm_substream *substream,
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
runtime->silence_size > 0)
snd_pcm_playback_silence(substream, ULONG_MAX);
- wake_up(&runtime->sleep);
+ err = snd_pcm_update_state(substream, runtime);
}
snd_pcm_stream_unlock_irq(substream);
- return 0;
+ return err;
}
static int snd_pcm_sw_params_user(struct snd_pcm_substream *substream,
@@ -1247,8 +1249,6 @@ static int snd_pcm_do_reset(struct snd_pcm_substream *substream, int state)
if (err < 0)
return err;
runtime->hw_ptr_base = 0;
- runtime->hw_ptr_interrupt = runtime->status->hw_ptr -
- runtime->status->hw_ptr % runtime->period_size;
runtime->silence_start = runtime->status->hw_ptr;
runtime->silence_filled = 0;
return 0;
diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c
index 6644d0034fba..c8385d26a16f 100644
--- a/sound/drivers/vx/vx_pcm.c
+++ b/sound/drivers/vx/vx_pcm.c
@@ -46,7 +46,6 @@
*/
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <sound/core.h>
#include <sound/asoundef.h>
@@ -56,55 +55,6 @@
/*
- * we use a vmalloc'ed (sg-)buffer
- */
-
-/* get the physical page pointer on the given offset */
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
- unsigned long offset)
-{
- void *pageptr = subs->runtime->dma_area + offset;
- return vmalloc_to_page(pageptr);
-}
-
-/*
- * allocate a buffer via vmalloc_32().
- * called from hw_params
- * NOTE: this may be called not only once per pcm open!
- */
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t size)
-{
- struct snd_pcm_runtime *runtime = subs->runtime;
- if (runtime->dma_area) {
- /* already allocated */
- if (runtime->dma_bytes >= size)
- return 0; /* already enough large */
- vfree(runtime->dma_area);
- }
- runtime->dma_area = vmalloc_32(size);
- if (! runtime->dma_area)
- return -ENOMEM;
- memset(runtime->dma_area, 0, size);
- runtime->dma_bytes = size;
- return 1; /* changed */
-}
-
-/*
- * free the buffer.
- * called from hw_free callback
- * NOTE: this may be called not only once per pcm open!
- */
-static int snd_pcm_free_vmalloc_buffer(struct snd_pcm_substream *subs)
-{
- struct snd_pcm_runtime *runtime = subs->runtime;
-
- vfree(runtime->dma_area);
- runtime->dma_area = NULL;
- return 0;
-}
-
-
-/*
* read three pending pcm bytes via inb()
*/
static void vx_pcm_read_per_bytes(struct vx_core *chip, struct snd_pcm_runtime *runtime,
@@ -865,7 +815,8 @@ static snd_pcm_uframes_t vx_pcm_playback_pointer(struct snd_pcm_substream *subs)
static int vx_pcm_hw_params(struct snd_pcm_substream *subs,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_alloc_vmalloc_buffer(subs, params_buffer_bytes(hw_params));
+ return snd_pcm_lib_alloc_vmalloc_32_buffer
+ (subs, params_buffer_bytes(hw_params));
}
/*
@@ -873,7 +824,7 @@ static int vx_pcm_hw_params(struct snd_pcm_substream *subs,
*/
static int vx_pcm_hw_free(struct snd_pcm_substream *subs)
{
- return snd_pcm_free_vmalloc_buffer(subs);
+ return snd_pcm_lib_free_vmalloc_buffer(subs);
}
/*
@@ -953,7 +904,7 @@ static struct snd_pcm_ops vx_pcm_playback_ops = {
.prepare = vx_pcm_prepare,
.trigger = vx_pcm_trigger,
.pointer = vx_pcm_playback_pointer,
- .page = snd_pcm_get_vmalloc_page,
+ .page = snd_pcm_lib_get_vmalloc_page,
};
@@ -1173,7 +1124,7 @@ static struct snd_pcm_ops vx_pcm_capture_ops = {
.prepare = vx_pcm_prepare,
.trigger = vx_pcm_trigger,
.pointer = vx_pcm_capture_pointer,
- .page = snd_pcm_get_vmalloc_page,
+ .page = snd_pcm_lib_get_vmalloc_page,
};
diff --git a/sound/isa/Kconfig b/sound/isa/Kconfig
index 02fe81ca88fd..755a0a5f0e3f 100644
--- a/sound/isa/Kconfig
+++ b/sound/isa/Kconfig
@@ -63,15 +63,16 @@ config SND_AD1848
will be called snd-ad1848.
config SND_ALS100
- tristate "Avance Logic ALS100/ALS120"
+ tristate "Diamond Tech. DT-019x and Avance Logic ALSxxx"
depends on PNP
select ISAPNP
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_SB16_DSP
help
- Say Y here to include support for soundcards based on Avance
- Logic ALS100, ALS110, ALS120 and ALS200 chips.
+ Say Y here to include support for soundcards based on the
+ Diamond Technologies DT-019X or Avance Logic chips: ALS007,
+ ALS100, ALS110, ALS120 and ALS200 chips.
To compile this driver as a module, choose M here: the module
will be called snd-als100.
@@ -127,20 +128,6 @@ config SND_CS4236
To compile this driver as a module, choose M here: the module
will be called snd-cs4236.
-config SND_DT019X
- tristate "Diamond Technologies DT-019X, Avance Logic ALS-007"
- depends on PNP
- select ISAPNP
- select SND_OPL3_LIB
- select SND_MPU401_UART
- select SND_SB16_DSP
- help
- Say Y here to include support for soundcards based on the
- Diamond Technologies DT-019X or Avance Logic ALS-007 chips.
-
- To compile this driver as a module, choose M here: the module
- will be called snd-dt019x.
-
config SND_ES968
tristate "Generic ESS ES968 driver"
depends on PNP
@@ -252,6 +239,22 @@ config SND_INTERWAVE_STB
To compile this driver as a module, choose M here: the module
will be called snd-interwave-stb.
+config SND_JAZZ16
+ tristate "Media Vision Jazz16 card and compatibles"
+ select SND_OPL3_LIB
+ select SND_MPU401_UART
+ select SND_SB8_DSP
+ help
+ Say Y here to include support for soundcards based on the
+ Media Vision Jazz16 chipset: digital chip MVD1216 (Jazz16),
+ codec MVA416 (CS4216) and mixer MVA514 (ICS2514).
+ Media Vision's Jazz16 cards were sold under names Pro Sonic 16,
+ Premium 3-D and Pro 3-D. There were also OEMs cards with the
+ Jazz16 chipset.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-jazz16.
+
config SND_OPL3SA2
tristate "Yamaha OPL3-SA2/SA3"
select SND_OPL3_LIB
diff --git a/sound/isa/Makefile b/sound/isa/Makefile
index b906b9a1a81e..c73d30c4f462 100644
--- a/sound/isa/Makefile
+++ b/sound/isa/Makefile
@@ -7,7 +7,6 @@ snd-adlib-objs := adlib.o
snd-als100-objs := als100.o
snd-azt2320-objs := azt2320.o
snd-cmi8330-objs := cmi8330.o
-snd-dt019x-objs := dt019x.o
snd-es18xx-objs := es18xx.o
snd-opl3sa2-objs := opl3sa2.o
snd-sc6000-objs := sc6000.o
@@ -19,7 +18,6 @@ obj-$(CONFIG_SND_ADLIB) += snd-adlib.o
obj-$(CONFIG_SND_ALS100) += snd-als100.o
obj-$(CONFIG_SND_AZT2320) += snd-azt2320.o
obj-$(CONFIG_SND_CMI8330) += snd-cmi8330.o
-obj-$(CONFIG_SND_DT019X) += snd-dt019x.o
obj-$(CONFIG_SND_ES18XX) += snd-es18xx.o
obj-$(CONFIG_SND_OPL3SA2) += snd-opl3sa2.o
obj-$(CONFIG_SND_SC6000) += snd-sc6000.o
diff --git a/sound/isa/als100.c b/sound/isa/als100.c
index 5fd52e4d7079..20becc89f6f6 100644
--- a/sound/isa/als100.c
+++ b/sound/isa/als100.c
@@ -2,9 +2,13 @@
/*
card-als100.c - driver for Avance Logic ALS100 based soundcards.
Copyright (C) 1999-2000 by Massimo Piccioni <dafastidio@libero.it>
+ Copyright (C) 1999-2002 by Massimo Piccioni <dafastidio@libero.it>
Thanks to Pierfrancesco 'qM2' Passerini.
+ Generalised for soundcards based on DT-0196 and ALS-007 chips
+ by Jonathan Woithe <jwoithe@physics.adelaide.edu.au>: June 2002.
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
@@ -33,10 +37,10 @@
#define PFX "als100: "
-MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
-MODULE_DESCRIPTION("Avance Logic ALS1X0");
-MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Avance Logic,ALS100 - PRO16PNP},"
+MODULE_DESCRIPTION("Avance Logic ALS007/ALS1X0");
+MODULE_SUPPORTED_DEVICE("{{Diamond Technologies DT-019X},"
+ "{Avance Logic ALS-007}}"
+ "{{Avance Logic,ALS100 - PRO16PNP},"
"{Avance Logic,ALS110},"
"{Avance Logic,ALS120},"
"{Avance Logic,ALS200},"
@@ -45,9 +49,12 @@ MODULE_SUPPORTED_DEVICE("{{Avance Logic,ALS100 - PRO16PNP},"
"{Avance Logic,ALS120},"
"{RTL,RTL3000}}");
+MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
+MODULE_LICENSE("GPL");
+
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */
+static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */
static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
@@ -57,14 +64,15 @@ static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */
static int dma16[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */
module_param_array(index, int, NULL, 0444);
-MODULE_PARM_DESC(index, "Index value for als100 based soundcard.");
+MODULE_PARM_DESC(index, "Index value for Avance Logic based soundcard.");
module_param_array(id, charp, NULL, 0444);
-MODULE_PARM_DESC(id, "ID string for als100 based soundcard.");
+MODULE_PARM_DESC(id, "ID string for Avance Logic based soundcard.");
module_param_array(enable, bool, NULL, 0444);
-MODULE_PARM_DESC(enable, "Enable als100 based soundcard.");
+MODULE_PARM_DESC(enable, "Enable Avance Logic based soundcard.");
+
+MODULE_ALIAS("snd-dt019x");
struct snd_card_als100 {
- int dev_no;
struct pnp_dev *dev;
struct pnp_dev *devmpu;
struct pnp_dev *devopl;
@@ -72,25 +80,43 @@ struct snd_card_als100 {
};
static struct pnp_card_device_id snd_als100_pnpids[] = {
+ /* DT197A30 */
+ { .id = "RWB1688",
+ .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" } },
+ .driver_data = SB_HW_DT019X },
+ /* DT0196 / ALS-007 */
+ { .id = "ALS0007",
+ .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" } },
+ .driver_data = SB_HW_DT019X },
/* ALS100 - PRO16PNP */
- { .id = "ALS0001", .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" } } },
+ { .id = "ALS0001",
+ .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" } },
+ .driver_data = SB_HW_ALS100 },
/* ALS110 - MF1000 - Digimate 3D Sound */
- { .id = "ALS0110", .devs = { { "@@@1001" }, { "@X@1001" }, { "@H@1001" } } },
+ { .id = "ALS0110",
+ .devs = { { "@@@1001" }, { "@X@1001" }, { "@H@1001" } },
+ .driver_data = SB_HW_ALS100 },
/* ALS120 */
- { .id = "ALS0120", .devs = { { "@@@2001" }, { "@X@2001" }, { "@H@2001" } } },
+ { .id = "ALS0120",
+ .devs = { { "@@@2001" }, { "@X@2001" }, { "@H@2001" } },
+ .driver_data = SB_HW_ALS100 },
/* ALS200 */
- { .id = "ALS0200", .devs = { { "@@@0020" }, { "@X@0020" }, { "@H@0001" } } },
+ { .id = "ALS0200",
+ .devs = { { "@@@0020" }, { "@X@0020" }, { "@H@0001" } },
+ .driver_data = SB_HW_ALS100 },
/* ALS200 OEM */
- { .id = "ALS0200", .devs = { { "@@@0020" }, { "@X@0020" }, { "@H@0020" } } },
+ { .id = "ALS0200",
+ .devs = { { "@@@0020" }, { "@X@0020" }, { "@H@0020" } },
+ .driver_data = SB_HW_ALS100 },
/* RTL3000 */
- { .id = "RTL3000", .devs = { { "@@@2001" }, { "@X@2001" }, { "@H@2001" } } },
- { .id = "", } /* end */
+ { .id = "RTL3000",
+ .devs = { { "@@@2001" }, { "@X@2001" }, { "@H@2001" } },
+ .driver_data = SB_HW_ALS100 },
+ { .id = "" } /* end */
};
MODULE_DEVICE_TABLE(pnp_card, snd_als100_pnpids);
-#define DRIVER_NAME "snd-card-als100"
-
static int __devinit snd_card_als100_pnp(int dev, struct snd_card_als100 *acard,
struct pnp_card_link *card,
const struct pnp_card_device_id *id)
@@ -113,8 +139,12 @@ static int __devinit snd_card_als100_pnp(int dev, struct snd_card_als100 *acard,
return err;
}
port[dev] = pnp_port_start(pdev, 0);
- dma8[dev] = pnp_dma(pdev, 1);
- dma16[dev] = pnp_dma(pdev, 0);
+ if (id->driver_data == SB_HW_DT019X)
+ dma8[dev] = pnp_dma(pdev, 0);
+ else {
+ dma8[dev] = pnp_dma(pdev, 1);
+ dma16[dev] = pnp_dma(pdev, 0);
+ }
irq[dev] = pnp_irq(pdev, 0);
pdev = acard->devmpu;
@@ -175,22 +205,33 @@ static int __devinit snd_card_als100_probe(int dev,
}
snd_card_set_dev(card, &pcard->card->dev);
- if ((error = snd_sbdsp_create(card, port[dev],
- irq[dev],
- snd_sb16dsp_interrupt,
- dma8[dev],
- dma16[dev],
- SB_HW_ALS100, &chip)) < 0) {
+ if (pid->driver_data == SB_HW_DT019X)
+ dma16[dev] = -1;
+
+ error = snd_sbdsp_create(card, port[dev], irq[dev],
+ snd_sb16dsp_interrupt,
+ dma8[dev], dma16[dev],
+ pid->driver_data,
+ &chip);
+ if (error < 0) {
snd_card_free(card);
return error;
}
acard->chip = chip;
- strcpy(card->driver, "ALS100");
- strcpy(card->shortname, "Avance Logic ALS100");
- sprintf(card->longname, "%s, %s at 0x%lx, irq %d, dma %d&%d",
- card->shortname, chip->name, chip->port,
- irq[dev], dma8[dev], dma16[dev]);
+ if (pid->driver_data == SB_HW_DT019X) {
+ strcpy(card->driver, "DT-019X");
+ strcpy(card->shortname, "Diamond Tech. DT-019X");
+ sprintf(card->longname, "%s, %s at 0x%lx, irq %d, dma %d",
+ card->shortname, chip->name, chip->port,
+ irq[dev], dma8[dev]);
+ } else {
+ strcpy(card->driver, "ALS100");
+ strcpy(card->shortname, "Avance Logic ALS100");
+ sprintf(card->longname, "%s, %s at 0x%lx, irq %d, dma %d&%d",
+ card->shortname, chip->name, chip->port,
+ irq[dev], dma8[dev], dma16[dev]);
+ }
if ((error = snd_sb16dsp_pcm(chip, 0, NULL)) < 0) {
snd_card_free(card);
@@ -203,9 +244,19 @@ static int __devinit snd_card_als100_probe(int dev,
}
if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) {
- if (snd_mpu401_uart_new(card, 0, MPU401_HW_ALS100,
+ int mpu_type = MPU401_HW_ALS100;
+
+ if (mpu_irq[dev] == SNDRV_AUTO_IRQ)
+ mpu_irq[dev] = -1;
+
+ if (pid->driver_data == SB_HW_DT019X)
+ mpu_type = MPU401_HW_MPU401;
+
+ if (snd_mpu401_uart_new(card, 0,
+ mpu_type,
mpu_port[dev], 0,
- mpu_irq[dev], IRQF_DISABLED,
+ mpu_irq[dev],
+ mpu_irq[dev] >= 0 ? IRQF_DISABLED : 0,
NULL) < 0)
snd_printk(KERN_ERR PFX "no MPU-401 device at 0x%lx\n", mpu_port[dev]);
}
@@ -291,7 +342,7 @@ static int snd_als100_pnp_resume(struct pnp_card_link *pcard)
static struct pnp_card_driver als100_pnpc_driver = {
.flags = PNP_DRIVER_RES_DISABLE,
- .name = "als100",
+ .name = "als100",
.id_table = snd_als100_pnpids,
.probe = snd_als100_pnp_detect,
.remove = __devexit_p(snd_als100_pnp_remove),
@@ -312,7 +363,7 @@ static int __init alsa_card_als100_init(void)
if (!als100_devices) {
pnp_unregister_card_driver(&als100_pnpc_driver);
#ifdef MODULE
- snd_printk(KERN_ERR "no ALS100 based soundcards found\n");
+ snd_printk(KERN_ERR "no Avance Logic based soundcards found\n");
#endif
return -ENODEV;
}
diff --git a/sound/isa/dt019x.c b/sound/isa/dt019x.c
deleted file mode 100644
index 80f5b1af9be8..000000000000
--- a/sound/isa/dt019x.c
+++ /dev/null
@@ -1,321 +0,0 @@
-
-/*
- dt019x.c - driver for Diamond Technologies DT-0197H based soundcards.
- Copyright (C) 1999, 2002 by Massimo Piccioni <dafastidio@libero.it>
-
- Generalised for soundcards based on DT-0196 and ALS-007 chips
- by Jonathan Woithe <jwoithe@physics.adelaide.edu.au>: June 2002.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-*/
-
-#include <linux/init.h>
-#include <linux/wait.h>
-#include <linux/pnp.h>
-#include <linux/moduleparam.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/mpu401.h>
-#include <sound/opl3.h>
-#include <sound/sb.h>
-
-#define PFX "dt019x: "
-
-MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
-MODULE_DESCRIPTION("Diamond Technologies DT-019X / Avance Logic ALS-007");
-MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{Diamond Technologies DT-019X},"
- "{Avance Logic ALS-007}}");
-
-static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
-static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */
-static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
-static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
-static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
-static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* PnP setup */
-static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* PnP setup */
-static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */
-
-module_param_array(index, int, NULL, 0444);
-MODULE_PARM_DESC(index, "Index value for DT-019X based soundcard.");
-module_param_array(id, charp, NULL, 0444);
-MODULE_PARM_DESC(id, "ID string for DT-019X based soundcard.");
-module_param_array(enable, bool, NULL, 0444);
-MODULE_PARM_DESC(enable, "Enable DT-019X based soundcard.");
-
-struct snd_card_dt019x {
- struct pnp_dev *dev;
- struct pnp_dev *devmpu;
- struct pnp_dev *devopl;
- struct snd_sb *chip;
-};
-
-static struct pnp_card_device_id snd_dt019x_pnpids[] = {
- /* DT197A30 */
- { .id = "RWB1688", .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" }, } },
- /* DT0196 / ALS-007 */
- { .id = "ALS0007", .devs = { { "@@@0001" }, { "@X@0001" }, { "@H@0001" }, } },
- { .id = "", }
-};
-
-MODULE_DEVICE_TABLE(pnp_card, snd_dt019x_pnpids);
-
-
-#define DRIVER_NAME "snd-card-dt019x"
-
-
-static int __devinit snd_card_dt019x_pnp(int dev, struct snd_card_dt019x *acard,
- struct pnp_card_link *card,
- const struct pnp_card_device_id *pid)
-{
- struct pnp_dev *pdev;
- int err;
-
- acard->dev = pnp_request_card_device(card, pid->devs[0].id, NULL);
- if (acard->dev == NULL)
- return -ENODEV;
-
- acard->devmpu = pnp_request_card_device(card, pid->devs[1].id, NULL);
- acard->devopl = pnp_request_card_device(card, pid->devs[2].id, NULL);
-
- pdev = acard->dev;
-
- err = pnp_activate_dev(pdev);
- if (err < 0) {
- snd_printk(KERN_ERR PFX "DT-019X AUDIO pnp configure failure\n");
- return err;
- }
-
- port[dev] = pnp_port_start(pdev, 0);
- dma8[dev] = pnp_dma(pdev, 0);
- irq[dev] = pnp_irq(pdev, 0);
- snd_printdd("dt019x: found audio interface: port=0x%lx, irq=0x%x, dma=0x%x\n",
- port[dev],irq[dev],dma8[dev]);
-
- pdev = acard->devmpu;
- if (pdev != NULL) {
- err = pnp_activate_dev(pdev);
- if (err < 0) {
- pnp_release_card_device(pdev);
- snd_printk(KERN_ERR PFX "DT-019X MPU401 pnp configure failure, skipping\n");
- goto __mpu_error;
- }
- mpu_port[dev] = pnp_port_start(pdev, 0);
- mpu_irq[dev] = pnp_irq(pdev, 0);
- snd_printdd("dt019x: found MPU-401: port=0x%lx, irq=0x%x\n",
- mpu_port[dev],mpu_irq[dev]);
- } else {
- __mpu_error:
- acard->devmpu = NULL;
- mpu_port[dev] = -1;
- }
-
- pdev = acard->devopl;
- if (pdev != NULL) {
- err = pnp_activate_dev(pdev);
- if (err < 0) {
- pnp_release_card_device(pdev);
- snd_printk(KERN_ERR PFX "DT-019X OPL3 pnp configure failure, skipping\n");
- goto __fm_error;
- }
- fm_port[dev] = pnp_port_start(pdev, 0);
- snd_printdd("dt019x: found OPL3 synth: port=0x%lx\n",fm_port[dev]);
- } else {
- __fm_error:
- acard->devopl = NULL;
- fm_port[dev] = -1;
- }
-
- return 0;
-}
-
-static int __devinit snd_card_dt019x_probe(int dev, struct pnp_card_link *pcard, const struct pnp_card_device_id *pid)
-{
- int error;
- struct snd_sb *chip;
- struct snd_card *card;
- struct snd_card_dt019x *acard;
- struct snd_opl3 *opl3;
-
- error = snd_card_create(index[dev], id[dev], THIS_MODULE,
- sizeof(struct snd_card_dt019x), &card);
- if (error < 0)
- return error;
- acard = card->private_data;
-
- snd_card_set_dev(card, &pcard->card->dev);
- if ((error = snd_card_dt019x_pnp(dev, acard, pcard, pid))) {
- snd_card_free(card);
- return error;
- }
-
- if ((error = snd_sbdsp_create(card, port[dev],
- irq[dev],
- snd_sb16dsp_interrupt,
- dma8[dev],
- -1,
- SB_HW_DT019X,
- &chip)) < 0) {
- snd_card_free(card);
- return error;
- }
- acard->chip = chip;
-
- strcpy(card->driver, "DT-019X");
- strcpy(card->shortname, "Diamond Tech. DT-019X");
- sprintf(card->longname, "%s, %s at 0x%lx, irq %d, dma %d",
- card->shortname, chip->name, chip->port,
- irq[dev], dma8[dev]);
-
- if ((error = snd_sb16dsp_pcm(chip, 0, NULL)) < 0) {
- snd_card_free(card);
- return error;
- }
- if ((error = snd_sbmixer_new(chip)) < 0) {
- snd_card_free(card);
- return error;
- }
-
- if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) {
- if (mpu_irq[dev] == SNDRV_AUTO_IRQ)
- mpu_irq[dev] = -1;
- if (snd_mpu401_uart_new(card, 0,
-/* MPU401_HW_SB,*/
- MPU401_HW_MPU401,
- mpu_port[dev], 0,
- mpu_irq[dev],
- mpu_irq[dev] >= 0 ? IRQF_DISABLED : 0,
- NULL) < 0)
- snd_printk(KERN_ERR PFX "no MPU-401 device at 0x%lx ?\n", mpu_port[dev]);
- }
-
- if (fm_port[dev] > 0 && fm_port[dev] != SNDRV_AUTO_PORT) {
- if (snd_opl3_create(card,
- fm_port[dev],
- fm_port[dev] + 2,
- OPL3_HW_AUTO, 0, &opl3) < 0) {
- snd_printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx ?\n",
- fm_port[dev], fm_port[dev] + 2);
- } else {
- if ((error = snd_opl3_timer_new(opl3, 0, 1)) < 0) {
- snd_card_free(card);
- return error;
- }
- if ((error = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) {
- snd_card_free(card);
- return error;
- }
- }
- }
-
- if ((error = snd_card_register(card)) < 0) {
- snd_card_free(card);
- return error;
- }
- pnp_set_card_drvdata(pcard, card);
- return 0;
-}
-
-static unsigned int __devinitdata dt019x_devices;
-
-static int __devinit snd_dt019x_pnp_probe(struct pnp_card_link *card,
- const struct pnp_card_device_id *pid)
-{
- static int dev;
- int res;
-
- for ( ; dev < SNDRV_CARDS; dev++) {
- if (!enable[dev])
- continue;
- res = snd_card_dt019x_probe(dev, card, pid);
- if (res < 0)
- return res;
- dev++;
- dt019x_devices++;
- return 0;
- }
- return -ENODEV;
-}
-
-static void __devexit snd_dt019x_pnp_remove(struct pnp_card_link * pcard)
-{
- snd_card_free(pnp_get_card_drvdata(pcard));
- pnp_set_card_drvdata(pcard, NULL);
-}
-
-#ifdef CONFIG_PM
-static int snd_dt019x_pnp_suspend(struct pnp_card_link *pcard, pm_message_t state)
-{
- struct snd_card *card = pnp_get_card_drvdata(pcard);
- struct snd_card_dt019x *acard = card->private_data;
- struct snd_sb *chip = acard->chip;
-
- snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
- snd_pcm_suspend_all(chip->pcm);
- snd_sbmixer_suspend(chip);
- return 0;
-}
-
-static int snd_dt019x_pnp_resume(struct pnp_card_link *pcard)
-{
- struct snd_card *card = pnp_get_card_drvdata(pcard);
- struct snd_card_dt019x *acard = card->private_data;
- struct snd_sb *chip = acard->chip;
-
- snd_sbdsp_reset(chip);
- snd_sbmixer_resume(chip);
- snd_power_change_state(card, SNDRV_CTL_POWER_D0);
- return 0;
-}
-#endif
-
-static struct pnp_card_driver dt019x_pnpc_driver = {
- .flags = PNP_DRIVER_RES_DISABLE,
- .name = "dt019x",
- .id_table = snd_dt019x_pnpids,
- .probe = snd_dt019x_pnp_probe,
- .remove = __devexit_p(snd_dt019x_pnp_remove),
-#ifdef CONFIG_PM
- .suspend = snd_dt019x_pnp_suspend,
- .resume = snd_dt019x_pnp_resume,
-#endif
-};
-
-static int __init alsa_card_dt019x_init(void)
-{
- int err;
-
- err = pnp_register_card_driver(&dt019x_pnpc_driver);
- if (err)
- return err;
-
- if (!dt019x_devices) {
- pnp_unregister_card_driver(&dt019x_pnpc_driver);
-#ifdef MODULE
- snd_printk(KERN_ERR "no DT-019X / ALS-007 based soundcards found\n");
-#endif
- return -ENODEV;
- }
- return 0;
-}
-
-static void __exit alsa_card_dt019x_exit(void)
-{
- pnp_unregister_card_driver(&dt019x_pnpc_driver);
-}
-
-module_init(alsa_card_dt019x_init)
-module_exit(alsa_card_dt019x_exit)
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index c8a8da0d4036..a4af53b5c1cf 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -33,6 +33,7 @@
#include <asm/io.h>
#include <asm/dma.h>
#include <sound/core.h>
+#include <sound/tlv.h>
#include <sound/wss.h>
#include <sound/mpu401.h>
#include <sound/opl3.h>
@@ -546,6 +547,93 @@ __skip_mpu:
#ifdef OPTi93X
+static const DECLARE_TLV_DB_SCALE(db_scale_5bit_3db_step, -9300, 300, 0);
+static const DECLARE_TLV_DB_SCALE(db_scale_5bit, -4650, 150, 0);
+static const DECLARE_TLV_DB_SCALE(db_scale_4bit_12db_max, -3300, 300, 0);
+
+static struct snd_kcontrol_new snd_opti93x_controls[] = {
+WSS_DOUBLE("Master Playback Switch", 0,
+ OPTi93X_OUT_LEFT, OPTi93X_OUT_RIGHT, 7, 7, 1, 1),
+WSS_DOUBLE_TLV("Master Playback Volume", 0,
+ OPTi93X_OUT_LEFT, OPTi93X_OUT_RIGHT, 1, 1, 31, 1,
+ db_scale_5bit_3db_step),
+WSS_DOUBLE_TLV("PCM Playback Volume", 0,
+ CS4231_LEFT_OUTPUT, CS4231_RIGHT_OUTPUT, 0, 0, 31, 1,
+ db_scale_5bit),
+WSS_DOUBLE_TLV("FM Playback Volume", 0,
+ CS4231_AUX2_LEFT_INPUT, CS4231_AUX2_RIGHT_INPUT, 1, 1, 15, 1,
+ db_scale_4bit_12db_max),
+WSS_DOUBLE("Line Playback Switch", 0,
+ CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 7, 7, 1, 1),
+WSS_DOUBLE_TLV("Line Playback Volume", 0,
+ CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 0, 0, 15, 1,
+ db_scale_4bit_12db_max),
+WSS_DOUBLE("Mic Playback Switch", 0,
+ OPTi93X_MIC_LEFT_INPUT, OPTi93X_MIC_RIGHT_INPUT, 7, 7, 1, 1),
+WSS_DOUBLE_TLV("Mic Playback Volume", 0,
+ OPTi93X_MIC_LEFT_INPUT, OPTi93X_MIC_RIGHT_INPUT, 1, 1, 15, 1,
+ db_scale_4bit_12db_max),
+WSS_DOUBLE_TLV("CD Playback Volume", 0,
+ CS4231_AUX1_LEFT_INPUT, CS4231_AUX1_RIGHT_INPUT, 1, 1, 15, 1,
+ db_scale_4bit_12db_max),
+WSS_DOUBLE("Aux Playback Switch", 0,
+ OPTi931_AUX_LEFT_INPUT, OPTi931_AUX_RIGHT_INPUT, 7, 7, 1, 1),
+WSS_DOUBLE_TLV("Aux Playback Volume", 0,
+ OPTi931_AUX_LEFT_INPUT, OPTi931_AUX_RIGHT_INPUT, 1, 1, 15, 1,
+ db_scale_4bit_12db_max),
+};
+
+static int __devinit snd_opti93x_mixer(struct snd_wss *chip)
+{
+ struct snd_card *card;
+ unsigned int idx;
+ struct snd_ctl_elem_id id1, id2;
+ int err;
+
+ if (snd_BUG_ON(!chip || !chip->pcm))
+ return -EINVAL;
+
+ card = chip->card;
+
+ strcpy(card->mixername, chip->pcm->name);
+
+ memset(&id1, 0, sizeof(id1));
+ memset(&id2, 0, sizeof(id2));
+ id1.iface = id2.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ /* reassign AUX0 switch to CD */
+ strcpy(id1.name, "Aux Playback Switch");
+ strcpy(id2.name, "CD Playback Switch");
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0) {
+ snd_printk(KERN_ERR "Cannot rename opti93x control\n");
+ return err;
+ }
+ /* reassign AUX1 switch to FM */
+ strcpy(id1.name, "Aux Playback Switch"); id1.index = 1;
+ strcpy(id2.name, "FM Playback Switch");
+ err = snd_ctl_rename_id(card, &id1, &id2);
+ if (err < 0) {
+ snd_printk(KERN_ERR "Cannot rename opti93x control\n");
+ return err;
+ }
+ /* remove AUX1 volume */
+ strcpy(id1.name, "Aux Playback Volume"); id1.index = 1;
+ snd_ctl_remove_id(card, &id1);
+
+ /* Replace WSS volume controls with OPTi93x volume controls */
+ id1.index = 0;
+ for (idx = 0; idx < ARRAY_SIZE(snd_opti93x_controls); idx++) {
+ strcpy(id1.name, snd_opti93x_controls[idx].name);
+ snd_ctl_remove_id(card, &id1);
+
+ err = snd_ctl_add(card,
+ snd_ctl_new1(&snd_opti93x_controls[idx], chip));
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
static irqreturn_t snd_opti93x_interrupt(int irq, void *dev_id)
{
struct snd_opti9xx *chip = dev_id;
@@ -754,6 +842,11 @@ static int __devinit snd_opti9xx_probe(struct snd_card *card)
error = snd_wss_mixer(codec);
if (error < 0)
return error;
+#ifdef OPTi93X
+ error = snd_opti93x_mixer(codec);
+ if (error < 0)
+ return error;
+#endif
#ifdef CS4231
error = snd_wss_timer(codec, 0, &timer);
if (error < 0)
diff --git a/sound/isa/sb/Makefile b/sound/isa/sb/Makefile
index faeffceb01b7..af3669681788 100644
--- a/sound/isa/sb/Makefile
+++ b/sound/isa/sb/Makefile
@@ -12,6 +12,7 @@ snd-sb16-objs := sb16.o
snd-sbawe-objs := sbawe.o emu8000.o
snd-emu8000-synth-objs := emu8000_synth.o emu8000_callback.o emu8000_patch.o emu8000_pcm.o
snd-es968-objs := es968.o
+snd-jazz16-objs := jazz16.o
# Toplevel Module Dependency
obj-$(CONFIG_SND_SB_COMMON) += snd-sb-common.o
@@ -21,6 +22,7 @@ obj-$(CONFIG_SND_SB8) += snd-sb8.o
obj-$(CONFIG_SND_SB16) += snd-sb16.o
obj-$(CONFIG_SND_SBAWE) += snd-sbawe.o
obj-$(CONFIG_SND_ES968) += snd-es968.o
+obj-$(CONFIG_SND_JAZZ16) += snd-jazz16.o
ifeq ($(CONFIG_SND_SB16_CSP),y)
obj-$(CONFIG_SND_SB16) += snd-sb16-csp.o
obj-$(CONFIG_SND_SBAWE) += snd-sb16-csp.o
diff --git a/sound/isa/sb/emu8000.c b/sound/isa/sb/emu8000.c
index 751762f1c59a..0c40951b6523 100644
--- a/sound/isa/sb/emu8000.c
+++ b/sound/isa/sb/emu8000.c
@@ -377,12 +377,13 @@ init_arrays(struct snd_emu8000 *emu)
static void __devinit
size_dram(struct snd_emu8000 *emu)
{
- int i, size;
+ int i, size, detected_size;
if (emu->dram_checked)
return;
size = 0;
+ detected_size = 0;
/* write out a magic number */
snd_emu8000_dma_chan(emu, 0, EMU8000_RAM_WRITE);
@@ -393,6 +394,8 @@ size_dram(struct snd_emu8000 *emu)
while (size < EMU8000_MAX_DRAM) {
+ size += 512 * 1024; /* increment 512kbytes */
+
/* Write a unique data on the test address.
* if the address is out of range, the data is written on
* 0x200000(=EMU8000_DRAM_OFFSET). Then the id word is
@@ -414,7 +417,7 @@ size_dram(struct snd_emu8000 *emu)
if (EMU8000_SMLD_READ(emu) != UNIQUE_ID2)
break; /* no memory at this address */
- size += 512 * 1024; /* increment 512kbytes */
+ detected_size = size;
snd_emu8000_read_wait(emu);
@@ -442,9 +445,9 @@ size_dram(struct snd_emu8000 *emu)
snd_emu8000_dma_chan(emu, 1, EMU8000_RAM_CLOSE);
snd_printdd("EMU8000 [0x%lx]: %d Kb on-board memory detected\n",
- emu->port1, size/1024);
+ emu->port1, detected_size/1024);
- emu->mem_size = size;
+ emu->mem_size = detected_size;
emu->dram_checked = 1;
}
diff --git a/sound/isa/sb/jazz16.c b/sound/isa/sb/jazz16.c
new file mode 100644
index 000000000000..8d21a3feda3a
--- /dev/null
+++ b/sound/isa/sb/jazz16.c
@@ -0,0 +1,404 @@
+
+/*
+ * jazz16.c - driver for Media Vision Jazz16 based soundcards.
+ * Copyright (C) 2009 Krzysztof Helt <krzysztof.h1@wp.pl>
+ * Based on patches posted by Rask Ingemann Lambertsen and Rene Herman.
+ * Based on OSS Sound Blaster driver.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <asm/dma.h>
+#include <linux/isa.h>
+#include <sound/core.h>
+#include <sound/mpu401.h>
+#include <sound/opl3.h>
+#include <sound/sb.h>
+#define SNDRV_LEGACY_FIND_FREE_IRQ
+#define SNDRV_LEGACY_FIND_FREE_DMA
+#include <sound/initval.h>
+
+#define PFX "jazz16: "
+
+MODULE_DESCRIPTION("Media Vision Jazz16");
+MODULE_SUPPORTED_DEVICE("{{Media Vision ??? },"
+ "{RTL,RTL3000}}");
+
+MODULE_AUTHOR("Krzysztof Helt <krzysztof.h1@wp.pl>");
+MODULE_LICENSE("GPL");
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
+static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */
+static unsigned long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
+static unsigned long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT;
+static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
+static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;
+static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA;
+static int dma16[SNDRV_CARDS] = SNDRV_DEFAULT_DMA;
+
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index, "Index value for Media Vision Jazz16 based soundcard.");
+module_param_array(id, charp, NULL, 0444);
+MODULE_PARM_DESC(id, "ID string for Media Vision Jazz16 based soundcard.");
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(enable, "Enable Media Vision Jazz16 based soundcard.");
+module_param_array(port, long, NULL, 0444);
+MODULE_PARM_DESC(port, "Port # for jazz16 driver.");
+module_param_array(mpu_port, long, NULL, 0444);
+MODULE_PARM_DESC(mpu_port, "MPU-401 port # for jazz16 driver.");
+module_param_array(irq, int, NULL, 0444);
+MODULE_PARM_DESC(irq, "IRQ # for jazz16 driver.");
+module_param_array(mpu_irq, int, NULL, 0444);
+MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for jazz16 driver.");
+module_param_array(dma8, int, NULL, 0444);
+MODULE_PARM_DESC(dma8, "DMA8 # for jazz16 driver.");
+module_param_array(dma16, int, NULL, 0444);
+MODULE_PARM_DESC(dma16, "DMA16 # for jazz16 driver.");
+
+#define SB_JAZZ16_WAKEUP 0xaf
+#define SB_JAZZ16_SET_PORTS 0x50
+#define SB_DSP_GET_JAZZ_BRD_REV 0xfa
+#define SB_JAZZ16_SET_DMAINTR 0xfb
+#define SB_DSP_GET_JAZZ_MODEL 0xfe
+
+struct snd_card_jazz16 {
+ struct snd_sb *chip;
+};
+
+static irqreturn_t jazz16_interrupt(int irq, void *chip)
+{
+ return snd_sb8dsp_interrupt(chip);
+}
+
+static int __devinit jazz16_configure_ports(unsigned long port,
+ unsigned long mpu_port, int idx)
+{
+ unsigned char val;
+
+ if (!request_region(0x201, 1, "jazz16 config")) {
+ snd_printk(KERN_ERR "config port region is already in use.\n");
+ return -EBUSY;
+ }
+ outb(SB_JAZZ16_WAKEUP - idx, 0x201);
+ udelay(100);
+ outb(SB_JAZZ16_SET_PORTS + idx, 0x201);
+ udelay(100);
+ val = port & 0x70;
+ val |= (mpu_port & 0x30) >> 4;
+ outb(val, 0x201);
+
+ release_region(0x201, 1);
+ return 0;
+}
+
+static int __devinit jazz16_detect_board(unsigned long port,
+ unsigned long mpu_port)
+{
+ int err;
+ int val;
+ struct snd_sb chip;
+
+ if (!request_region(port, 0x10, "jazz16")) {
+ snd_printk(KERN_ERR "I/O port region is already in use.\n");
+ return -EBUSY;
+ }
+ /* just to call snd_sbdsp_command/reset/get_byte() */
+ chip.port = port;
+
+ err = snd_sbdsp_reset(&chip);
+ if (err < 0)
+ for (val = 0; val < 4; val++) {
+ err = jazz16_configure_ports(port, mpu_port, val);
+ if (err < 0)
+ break;
+
+ err = snd_sbdsp_reset(&chip);
+ if (!err)
+ break;
+ }
+ if (err < 0) {
+ err = -ENODEV;
+ goto err_unmap;
+ }
+ if (!snd_sbdsp_command(&chip, SB_DSP_GET_JAZZ_BRD_REV)) {
+ err = -EBUSY;
+ goto err_unmap;
+ }
+ val = snd_sbdsp_get_byte(&chip);
+ if (val >= 0x30)
+ snd_sbdsp_get_byte(&chip);
+
+ if ((val & 0xf0) != 0x10) {
+ err = -ENODEV;
+ goto err_unmap;
+ }
+ if (!snd_sbdsp_command(&chip, SB_DSP_GET_JAZZ_MODEL)) {
+ err = -EBUSY;
+ goto err_unmap;
+ }
+ snd_sbdsp_get_byte(&chip);
+ err = snd_sbdsp_get_byte(&chip);
+ snd_printd("Media Vision Jazz16 board detected: rev 0x%x, model 0x%x\n",
+ val, err);
+
+ err = 0;
+
+err_unmap:
+ release_region(port, 0x10);
+ return err;
+}
+
+static int __devinit jazz16_configure_board(struct snd_sb *chip, int mpu_irq)
+{
+ static unsigned char jazz_irq_bits[] = { 0, 0, 2, 3, 0, 1, 0, 4,
+ 0, 2, 5, 0, 0, 0, 0, 6 };
+ static unsigned char jazz_dma_bits[] = { 0, 1, 0, 2, 0, 3, 0, 4 };
+
+ if (jazz_dma_bits[chip->dma8] == 0 ||
+ jazz_dma_bits[chip->dma16] == 0 ||
+ jazz_irq_bits[chip->irq] == 0)
+ return -EINVAL;
+
+ if (!snd_sbdsp_command(chip, SB_JAZZ16_SET_DMAINTR))
+ return -EBUSY;
+
+ if (!snd_sbdsp_command(chip,
+ jazz_dma_bits[chip->dma8] |
+ (jazz_dma_bits[chip->dma16] << 4)))
+ return -EBUSY;
+
+ if (!snd_sbdsp_command(chip,
+ jazz_irq_bits[chip->irq] |
+ (jazz_irq_bits[mpu_irq] << 4)))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int __devinit snd_jazz16_match(struct device *devptr, unsigned int dev)
+{
+ if (!enable[dev])
+ return 0;
+ if (port[dev] == SNDRV_AUTO_PORT) {
+ snd_printk(KERN_ERR "please specify port\n");
+ return 0;
+ } else if (port[dev] == 0x200 || (port[dev] & ~0x270)) {
+ snd_printk(KERN_ERR "incorrect port specified\n");
+ return 0;
+ }
+ if (dma8[dev] != SNDRV_AUTO_DMA &&
+ dma8[dev] != 1 && dma8[dev] != 3) {
+ snd_printk(KERN_ERR "dma8 must be 1 or 3\n");
+ return 0;
+ }
+ if (dma16[dev] != SNDRV_AUTO_DMA &&
+ dma16[dev] != 5 && dma16[dev] != 7) {
+ snd_printk(KERN_ERR "dma16 must be 5 or 7\n");
+ return 0;
+ }
+ if (mpu_port[dev] != SNDRV_AUTO_PORT &&
+ (mpu_port[dev] & ~0x030) != 0x300) {
+ snd_printk(KERN_ERR "incorrect mpu_port specified\n");
+ return 0;
+ }
+ if (mpu_irq[dev] != SNDRV_AUTO_DMA &&
+ mpu_irq[dev] != 2 && mpu_irq[dev] != 3 &&
+ mpu_irq[dev] != 5 && mpu_irq[dev] != 7) {
+ snd_printk(KERN_ERR "mpu_irq must be 2, 3, 5 or 7\n");
+ return 0;
+ }
+ return 1;
+}
+
+static int __devinit snd_jazz16_probe(struct device *devptr, unsigned int dev)
+{
+ struct snd_card *card;
+ struct snd_card_jazz16 *jazz16;
+ struct snd_sb *chip;
+ struct snd_opl3 *opl3;
+ static int possible_irqs[] = {2, 3, 5, 7, 9, 10, 15, -1};
+ static int possible_dmas8[] = {1, 3, -1};
+ static int possible_dmas16[] = {5, 7, -1};
+ int err, xirq, xdma8, xdma16, xmpu_port, xmpu_irq;
+
+ err = snd_card_create(index[dev], id[dev], THIS_MODULE,
+ sizeof(struct snd_card_jazz16), &card);
+ if (err < 0)
+ return err;
+
+ jazz16 = card->private_data;
+
+ xirq = irq[dev];
+ if (xirq == SNDRV_AUTO_IRQ) {
+ xirq = snd_legacy_find_free_irq(possible_irqs);
+ if (xirq < 0) {
+ snd_printk(KERN_ERR "unable to find a free IRQ\n");
+ err = -EBUSY;
+ goto err_free;
+ }
+ }
+ xdma8 = dma8[dev];
+ if (xdma8 == SNDRV_AUTO_DMA) {
+ xdma8 = snd_legacy_find_free_dma(possible_dmas8);
+ if (xdma8 < 0) {
+ snd_printk(KERN_ERR "unable to find a free DMA8\n");
+ err = -EBUSY;
+ goto err_free;
+ }
+ }
+ xdma16 = dma16[dev];
+ if (xdma16 == SNDRV_AUTO_DMA) {
+ xdma16 = snd_legacy_find_free_dma(possible_dmas16);
+ if (xdma16 < 0) {
+ snd_printk(KERN_ERR "unable to find a free DMA16\n");
+ err = -EBUSY;
+ goto err_free;
+ }
+ }
+
+ xmpu_port = mpu_port[dev];
+ if (xmpu_port == SNDRV_AUTO_PORT)
+ xmpu_port = 0;
+ err = jazz16_detect_board(port[dev], xmpu_port);
+ if (err < 0) {
+ printk(KERN_ERR "Media Vision Jazz16 board not detected\n");
+ goto err_free;
+ }
+ err = snd_sbdsp_create(card, port[dev], irq[dev],
+ jazz16_interrupt,
+ dma8[dev], dma16[dev],
+ SB_HW_JAZZ16,
+ &chip);
+ if (err < 0)
+ goto err_free;
+
+ xmpu_irq = mpu_irq[dev];
+ if (xmpu_irq == SNDRV_AUTO_IRQ || mpu_port[dev] == SNDRV_AUTO_PORT)
+ xmpu_irq = 0;
+ err = jazz16_configure_board(chip, xmpu_irq);
+ if (err < 0) {
+ printk(KERN_ERR "Media Vision Jazz16 configuration failed\n");
+ goto err_free;
+ }
+
+ jazz16->chip = chip;
+
+ strcpy(card->driver, "jazz16");
+ strcpy(card->shortname, "Media Vision Jazz16");
+ sprintf(card->longname,
+ "Media Vision Jazz16 at 0x%lx, irq %d, dma8 %d, dma16 %d",
+ port[dev], xirq, xdma8, xdma16);
+
+ err = snd_sb8dsp_pcm(chip, 0, NULL);
+ if (err < 0)
+ goto err_free;
+ err = snd_sbmixer_new(chip);
+ if (err < 0)
+ goto err_free;
+
+ err = snd_opl3_create(card, chip->port, chip->port + 2,
+ OPL3_HW_AUTO, 1, &opl3);
+ if (err < 0)
+ snd_printk(KERN_WARNING "no OPL device at 0x%lx-0x%lx\n",
+ chip->port, chip->port + 2);
+ else {
+ err = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
+ if (err < 0)
+ goto err_free;
+ }
+ if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) {
+ if (mpu_irq[dev] == SNDRV_AUTO_IRQ)
+ mpu_irq[dev] = -1;
+
+ if (snd_mpu401_uart_new(card, 0,
+ MPU401_HW_MPU401,
+ mpu_port[dev], 0,
+ mpu_irq[dev],
+ mpu_irq[dev] >= 0 ? IRQF_DISABLED : 0,
+ NULL) < 0)
+ snd_printk(KERN_ERR "no MPU-401 device at 0x%lx\n",
+ mpu_port[dev]);
+ }
+
+ snd_card_set_dev(card, devptr);
+
+ err = snd_card_register(card);
+ if (err < 0)
+ goto err_free;
+
+ dev_set_drvdata(devptr, card);
+ return 0;
+
+err_free:
+ snd_card_free(card);
+ return err;
+}
+
+static int __devexit snd_jazz16_remove(struct device *devptr, unsigned int dev)
+{
+ struct snd_card *card = dev_get_drvdata(devptr);
+
+ dev_set_drvdata(devptr, NULL);
+ snd_card_free(card);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int snd_jazz16_suspend(struct device *pdev, unsigned int n,
+ pm_message_t state)
+{
+ struct snd_card *card = dev_get_drvdata(pdev);
+ struct snd_card_jazz16 *acard = card->private_data;
+ struct snd_sb *chip = acard->chip;
+
+ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+ snd_pcm_suspend_all(chip->pcm);
+ snd_sbmixer_suspend(chip);
+ return 0;
+}
+
+static int snd_jazz16_resume(struct device *pdev, unsigned int n)
+{
+ struct snd_card *card = dev_get_drvdata(pdev);
+ struct snd_card_jazz16 *acard = card->private_data;
+ struct snd_sb *chip = acard->chip;
+
+ snd_sbdsp_reset(chip);
+ snd_sbmixer_resume(chip);
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+ return 0;
+}
+#endif
+
+static struct isa_driver snd_jazz16_driver = {
+ .match = snd_jazz16_match,
+ .probe = snd_jazz16_probe,
+ .remove = __devexit_p(snd_jazz16_remove),
+#ifdef CONFIG_PM
+ .suspend = snd_jazz16_suspend,
+ .resume = snd_jazz16_resume,
+#endif
+ .driver = {
+ .name = "jazz16"
+ },
+};
+
+static int __init alsa_card_jazz16_init(void)
+{
+ return isa_register_driver(&snd_jazz16_driver, SNDRV_CARDS);
+}
+
+static void __exit alsa_card_jazz16_exit(void)
+{
+ isa_unregister_driver(&snd_jazz16_driver);
+}
+
+module_init(alsa_card_jazz16_init)
+module_exit(alsa_card_jazz16_exit)
diff --git a/sound/isa/sb/sb8_main.c b/sound/isa/sb/sb8_main.c
index 658d55769c9c..7d84c9f34dc9 100644
--- a/sound/isa/sb/sb8_main.c
+++ b/sound/isa/sb/sb8_main.c
@@ -106,9 +106,21 @@ static int snd_sb8_playback_prepare(struct snd_pcm_substream *substream)
struct snd_sb *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int mixreg, rate, size, count;
+ unsigned char format;
+ unsigned char stereo = runtime->channels > 1;
+ int dma;
rate = runtime->rate;
switch (chip->hardware) {
+ case SB_HW_JAZZ16:
+ if (runtime->format == SNDRV_PCM_FORMAT_S16_LE) {
+ if (chip->mode & SB_MODE_CAPTURE_16)
+ return -EBUSY;
+ else
+ chip->mode |= SB_MODE_PLAYBACK_16;
+ }
+ chip->playback_format = SB_DSP_LO_OUTPUT_AUTO;
+ break;
case SB_HW_PRO:
if (runtime->channels > 1) {
if (snd_BUG_ON(rate != SB8_RATE(11025) &&
@@ -133,11 +145,21 @@ static int snd_sb8_playback_prepare(struct snd_pcm_substream *substream)
default:
return -EINVAL;
}
+ if (chip->mode & SB_MODE_PLAYBACK_16) {
+ format = stereo ? SB_DSP_STEREO_16BIT : SB_DSP_MONO_16BIT;
+ dma = chip->dma16;
+ } else {
+ format = stereo ? SB_DSP_STEREO_8BIT : SB_DSP_MONO_8BIT;
+ chip->mode |= SB_MODE_PLAYBACK_8;
+ dma = chip->dma8;
+ }
size = chip->p_dma_size = snd_pcm_lib_buffer_bytes(substream);
count = chip->p_period_size = snd_pcm_lib_period_bytes(substream);
spin_lock_irqsave(&chip->reg_lock, flags);
snd_sbdsp_command(chip, SB_DSP_SPEAKER_ON);
- if (runtime->channels > 1) {
+ if (chip->hardware == SB_HW_JAZZ16)
+ snd_sbdsp_command(chip, format);
+ else if (stereo) {
/* set playback stereo mode */
spin_lock(&chip->mixer_lock);
mixreg = snd_sbmixer_read(chip, SB_DSP_STEREO_SW);
@@ -147,15 +169,14 @@ static int snd_sb8_playback_prepare(struct snd_pcm_substream *substream)
/* Soundblaster hardware programming reference guide, 3-23 */
snd_sbdsp_command(chip, SB_DSP_DMA8_EXIT);
runtime->dma_area[0] = 0x80;
- snd_dma_program(chip->dma8, runtime->dma_addr, 1, DMA_MODE_WRITE);
+ snd_dma_program(dma, runtime->dma_addr, 1, DMA_MODE_WRITE);
/* force interrupt */
- chip->mode = SB_MODE_HALT;
snd_sbdsp_command(chip, SB_DSP_OUTPUT);
snd_sbdsp_command(chip, 0);
snd_sbdsp_command(chip, 0);
}
snd_sbdsp_command(chip, SB_DSP_SAMPLE_RATE);
- if (runtime->channels > 1) {
+ if (stereo) {
snd_sbdsp_command(chip, 256 - runtime->rate_den / 2);
spin_lock(&chip->mixer_lock);
/* save output filter status and turn it off */
@@ -168,13 +189,15 @@ static int snd_sb8_playback_prepare(struct snd_pcm_substream *substream)
snd_sbdsp_command(chip, 256 - runtime->rate_den);
}
if (chip->playback_format != SB_DSP_OUTPUT) {
+ if (chip->mode & SB_MODE_PLAYBACK_16)
+ count /= 2;
count--;
snd_sbdsp_command(chip, SB_DSP_BLOCK_SIZE);
snd_sbdsp_command(chip, count & 0xff);
snd_sbdsp_command(chip, count >> 8);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
- snd_dma_program(chip->dma8, runtime->dma_addr,
+ snd_dma_program(dma, runtime->dma_addr,
size, DMA_MODE_WRITE | DMA_AUTOINIT);
return 0;
}
@@ -212,7 +235,6 @@ static int snd_sb8_playback_trigger(struct snd_pcm_substream *substream,
snd_sbdsp_command(chip, SB_DSP_SPEAKER_OFF);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
- chip->mode = (cmd == SNDRV_PCM_TRIGGER_START) ? SB_MODE_PLAYBACK_8 : SB_MODE_HALT;
return 0;
}
@@ -234,9 +256,21 @@ static int snd_sb8_capture_prepare(struct snd_pcm_substream *substream)
struct snd_sb *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int mixreg, rate, size, count;
+ unsigned char format;
+ unsigned char stereo = runtime->channels > 1;
+ int dma;
rate = runtime->rate;
switch (chip->hardware) {
+ case SB_HW_JAZZ16:
+ if (runtime->format == SNDRV_PCM_FORMAT_S16_LE) {
+ if (chip->mode & SB_MODE_PLAYBACK_16)
+ return -EBUSY;
+ else
+ chip->mode |= SB_MODE_CAPTURE_16;
+ }
+ chip->capture_format = SB_DSP_LO_INPUT_AUTO;
+ break;
case SB_HW_PRO:
if (runtime->channels > 1) {
if (snd_BUG_ON(rate != SB8_RATE(11025) &&
@@ -262,14 +296,24 @@ static int snd_sb8_capture_prepare(struct snd_pcm_substream *substream)
default:
return -EINVAL;
}
+ if (chip->mode & SB_MODE_CAPTURE_16) {
+ format = stereo ? SB_DSP_STEREO_16BIT : SB_DSP_MONO_16BIT;
+ dma = chip->dma16;
+ } else {
+ format = stereo ? SB_DSP_STEREO_8BIT : SB_DSP_MONO_8BIT;
+ chip->mode |= SB_MODE_CAPTURE_8;
+ dma = chip->dma8;
+ }
size = chip->c_dma_size = snd_pcm_lib_buffer_bytes(substream);
count = chip->c_period_size = snd_pcm_lib_period_bytes(substream);
spin_lock_irqsave(&chip->reg_lock, flags);
snd_sbdsp_command(chip, SB_DSP_SPEAKER_OFF);
- if (runtime->channels > 1)
+ if (chip->hardware == SB_HW_JAZZ16)
+ snd_sbdsp_command(chip, format);
+ else if (stereo)
snd_sbdsp_command(chip, SB_DSP_STEREO_8BIT);
snd_sbdsp_command(chip, SB_DSP_SAMPLE_RATE);
- if (runtime->channels > 1) {
+ if (stereo) {
snd_sbdsp_command(chip, 256 - runtime->rate_den / 2);
spin_lock(&chip->mixer_lock);
/* save input filter status and turn it off */
@@ -282,13 +326,15 @@ static int snd_sb8_capture_prepare(struct snd_pcm_substream *substream)
snd_sbdsp_command(chip, 256 - runtime->rate_den);
}
if (chip->capture_format != SB_DSP_INPUT) {
+ if (chip->mode & SB_MODE_PLAYBACK_16)
+ count /= 2;
count--;
snd_sbdsp_command(chip, SB_DSP_BLOCK_SIZE);
snd_sbdsp_command(chip, count & 0xff);
snd_sbdsp_command(chip, count >> 8);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
- snd_dma_program(chip->dma8, runtime->dma_addr,
+ snd_dma_program(dma, runtime->dma_addr,
size, DMA_MODE_READ | DMA_AUTOINIT);
return 0;
}
@@ -328,7 +374,6 @@ static int snd_sb8_capture_trigger(struct snd_pcm_substream *substream,
snd_sbdsp_command(chip, SB_DSP_SPEAKER_OFF);
}
spin_unlock_irqrestore(&chip->reg_lock, flags);
- chip->mode = (cmd == SNDRV_PCM_TRIGGER_START) ? SB_MODE_CAPTURE_8 : SB_MODE_HALT;
return 0;
}
@@ -339,13 +384,21 @@ irqreturn_t snd_sb8dsp_interrupt(struct snd_sb *chip)
snd_sb_ack_8bit(chip);
switch (chip->mode) {
- case SB_MODE_PLAYBACK_8: /* ok.. playback is active */
+ case SB_MODE_PLAYBACK_16: /* ok.. playback is active */
+ if (chip->hardware != SB_HW_JAZZ16)
+ break;
+ /* fallthru */
+ case SB_MODE_PLAYBACK_8:
substream = chip->playback_substream;
runtime = substream->runtime;
if (chip->playback_format == SB_DSP_OUTPUT)
snd_sb8_playback_trigger(substream, SNDRV_PCM_TRIGGER_START);
snd_pcm_period_elapsed(substream);
break;
+ case SB_MODE_CAPTURE_16:
+ if (chip->hardware != SB_HW_JAZZ16)
+ break;
+ /* fallthru */
case SB_MODE_CAPTURE_8:
substream = chip->capture_substream;
runtime = substream->runtime;
@@ -361,10 +414,15 @@ static snd_pcm_uframes_t snd_sb8_playback_pointer(struct snd_pcm_substream *subs
{
struct snd_sb *chip = snd_pcm_substream_chip(substream);
size_t ptr;
+ int dma;
- if (chip->mode != SB_MODE_PLAYBACK_8)
+ if (chip->mode & SB_MODE_PLAYBACK_8)
+ dma = chip->dma8;
+ else if (chip->mode & SB_MODE_PLAYBACK_16)
+ dma = chip->dma16;
+ else
return 0;
- ptr = snd_dma_pointer(chip->dma8, chip->p_dma_size);
+ ptr = snd_dma_pointer(dma, chip->p_dma_size);
return bytes_to_frames(substream->runtime, ptr);
}
@@ -372,10 +430,15 @@ static snd_pcm_uframes_t snd_sb8_capture_pointer(struct snd_pcm_substream *subst
{
struct snd_sb *chip = snd_pcm_substream_chip(substream);
size_t ptr;
+ int dma;
- if (chip->mode != SB_MODE_CAPTURE_8)
+ if (chip->mode & SB_MODE_CAPTURE_8)
+ dma = chip->dma8;
+ else if (chip->mode & SB_MODE_CAPTURE_16)
+ dma = chip->dma16;
+ else
return 0;
- ptr = snd_dma_pointer(chip->dma8, chip->c_dma_size);
+ ptr = snd_dma_pointer(dma, chip->c_dma_size);
return bytes_to_frames(substream->runtime, ptr);
}
@@ -446,6 +509,14 @@ static int snd_sb8_open(struct snd_pcm_substream *substream)
runtime->hw = snd_sb8_capture;
}
switch (chip->hardware) {
+ case SB_HW_JAZZ16:
+ if (chip->dma16 == 5 || chip->dma16 == 7)
+ runtime->hw.formats |= SNDRV_PCM_FMTBIT_S16_LE;
+ runtime->hw.rates |= SNDRV_PCM_RATE_8000_48000;
+ runtime->hw.rate_min = 4000;
+ runtime->hw.rate_max = 50000;
+ runtime->hw.channels_max = 2;
+ break;
case SB_HW_PRO:
runtime->hw.rate_max = 44100;
runtime->hw.channels_max = 2;
@@ -468,6 +539,14 @@ static int snd_sb8_open(struct snd_pcm_substream *substream)
}
snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&hw_constraints_clock);
+ if (chip->dma8 > 3 || chip->dma16 >= 0) {
+ snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 2);
+ snd_pcm_hw_constraint_step(runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 2);
+ runtime->hw.buffer_bytes_max = 128 * 1024 * 1024;
+ runtime->hw.period_bytes_max = 128 * 1024 * 1024;
+ }
return 0;
}
@@ -480,6 +559,10 @@ static int snd_sb8_close(struct snd_pcm_substream *substream)
chip->capture_substream = NULL;
spin_lock_irqsave(&chip->open_lock, flags);
chip->open &= ~SB_OPEN_PCM;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ chip->mode &= ~SB_MODE_PLAYBACK;
+ else
+ chip->mode &= ~SB_MODE_CAPTURE;
spin_unlock_irqrestore(&chip->open_lock, flags);
return 0;
}
@@ -515,6 +598,7 @@ int snd_sb8dsp_pcm(struct snd_sb *chip, int device, struct snd_pcm ** rpcm)
struct snd_card *card = chip->card;
struct snd_pcm *pcm;
int err;
+ size_t max_prealloc = 64 * 1024;
if (rpcm)
*rpcm = NULL;
@@ -527,9 +611,11 @@ int snd_sb8dsp_pcm(struct snd_sb *chip, int device, struct snd_pcm ** rpcm)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sb8_playback_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sb8_capture_ops);
+ if (chip->dma8 > 3 || chip->dma16 >= 0)
+ max_prealloc = 128 * 1024;
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_isa_data(),
- 64*1024, 64*1024);
+ 64*1024, max_prealloc);
if (rpcm)
*rpcm = pcm;
diff --git a/sound/isa/sb/sb_common.c b/sound/isa/sb/sb_common.c
index 27a651502251..eae6c1c0eff9 100644
--- a/sound/isa/sb/sb_common.c
+++ b/sound/isa/sb/sb_common.c
@@ -170,6 +170,9 @@ static int snd_sbdsp_probe(struct snd_sb * chip)
case SB_HW_CS5530:
str = "16 (CS5530)";
break;
+ case SB_HW_JAZZ16:
+ str = "Pro (Jazz16)";
+ break;
default:
return -ENODEV;
}
diff --git a/sound/isa/sb/sb_mixer.c b/sound/isa/sb/sb_mixer.c
index 318ff0c823e7..6496822c1808 100644
--- a/sound/isa/sb/sb_mixer.c
+++ b/sound/isa/sb/sb_mixer.c
@@ -528,20 +528,11 @@ int snd_sbmixer_add_ctl(struct snd_sb *chip, const char *name, int index, int ty
* SB 2.0 specific mixer elements
*/
-static struct sbmix_elem snd_sb20_ctl_master_play_vol =
- SB_SINGLE("Master Playback Volume", SB_DSP20_MASTER_DEV, 1, 7);
-static struct sbmix_elem snd_sb20_ctl_pcm_play_vol =
- SB_SINGLE("PCM Playback Volume", SB_DSP20_PCM_DEV, 1, 3);
-static struct sbmix_elem snd_sb20_ctl_synth_play_vol =
- SB_SINGLE("Synth Playback Volume", SB_DSP20_FM_DEV, 1, 7);
-static struct sbmix_elem snd_sb20_ctl_cd_play_vol =
- SB_SINGLE("CD Playback Volume", SB_DSP20_CD_DEV, 1, 7);
-
-static struct sbmix_elem *snd_sb20_controls[] = {
- &snd_sb20_ctl_master_play_vol,
- &snd_sb20_ctl_pcm_play_vol,
- &snd_sb20_ctl_synth_play_vol,
- &snd_sb20_ctl_cd_play_vol
+static struct sbmix_elem snd_sb20_controls[] = {
+ SB_SINGLE("Master Playback Volume", SB_DSP20_MASTER_DEV, 1, 7),
+ SB_SINGLE("PCM Playback Volume", SB_DSP20_PCM_DEV, 1, 3),
+ SB_SINGLE("Synth Playback Volume", SB_DSP20_FM_DEV, 1, 7),
+ SB_SINGLE("CD Playback Volume", SB_DSP20_CD_DEV, 1, 7)
};
static unsigned char snd_sb20_init_values[][2] = {
@@ -552,41 +543,24 @@ static unsigned char snd_sb20_init_values[][2] = {
/*
* SB Pro specific mixer elements
*/
-static struct sbmix_elem snd_sbpro_ctl_master_play_vol =
- SB_DOUBLE("Master Playback Volume", SB_DSP_MASTER_DEV, SB_DSP_MASTER_DEV, 5, 1, 7);
-static struct sbmix_elem snd_sbpro_ctl_pcm_play_vol =
- SB_DOUBLE("PCM Playback Volume", SB_DSP_PCM_DEV, SB_DSP_PCM_DEV, 5, 1, 7);
-static struct sbmix_elem snd_sbpro_ctl_pcm_play_filter =
- SB_SINGLE("PCM Playback Filter", SB_DSP_PLAYBACK_FILT, 5, 1);
-static struct sbmix_elem snd_sbpro_ctl_synth_play_vol =
- SB_DOUBLE("Synth Playback Volume", SB_DSP_FM_DEV, SB_DSP_FM_DEV, 5, 1, 7);
-static struct sbmix_elem snd_sbpro_ctl_cd_play_vol =
- SB_DOUBLE("CD Playback Volume", SB_DSP_CD_DEV, SB_DSP_CD_DEV, 5, 1, 7);
-static struct sbmix_elem snd_sbpro_ctl_line_play_vol =
- SB_DOUBLE("Line Playback Volume", SB_DSP_LINE_DEV, SB_DSP_LINE_DEV, 5, 1, 7);
-static struct sbmix_elem snd_sbpro_ctl_mic_play_vol =
- SB_SINGLE("Mic Playback Volume", SB_DSP_MIC_DEV, 1, 3);
-static struct sbmix_elem snd_sbpro_ctl_capture_source =
+static struct sbmix_elem snd_sbpro_controls[] = {
+ SB_DOUBLE("Master Playback Volume",
+ SB_DSP_MASTER_DEV, SB_DSP_MASTER_DEV, 5, 1, 7),
+ SB_DOUBLE("PCM Playback Volume",
+ SB_DSP_PCM_DEV, SB_DSP_PCM_DEV, 5, 1, 7),
+ SB_SINGLE("PCM Playback Filter", SB_DSP_PLAYBACK_FILT, 5, 1),
+ SB_DOUBLE("Synth Playback Volume",
+ SB_DSP_FM_DEV, SB_DSP_FM_DEV, 5, 1, 7),
+ SB_DOUBLE("CD Playback Volume", SB_DSP_CD_DEV, SB_DSP_CD_DEV, 5, 1, 7),
+ SB_DOUBLE("Line Playback Volume",
+ SB_DSP_LINE_DEV, SB_DSP_LINE_DEV, 5, 1, 7),
+ SB_SINGLE("Mic Playback Volume", SB_DSP_MIC_DEV, 1, 3),
{
.name = "Capture Source",
.type = SB_MIX_CAPTURE_PRO
- };
-static struct sbmix_elem snd_sbpro_ctl_capture_filter =
- SB_SINGLE("Capture Filter", SB_DSP_CAPTURE_FILT, 5, 1);
-static struct sbmix_elem snd_sbpro_ctl_capture_low_filter =
- SB_SINGLE("Capture Low-Pass Filter", SB_DSP_CAPTURE_FILT, 3, 1);
-
-static struct sbmix_elem *snd_sbpro_controls[] = {
- &snd_sbpro_ctl_master_play_vol,
- &snd_sbpro_ctl_pcm_play_vol,
- &snd_sbpro_ctl_pcm_play_filter,
- &snd_sbpro_ctl_synth_play_vol,
- &snd_sbpro_ctl_cd_play_vol,
- &snd_sbpro_ctl_line_play_vol,
- &snd_sbpro_ctl_mic_play_vol,
- &snd_sbpro_ctl_capture_source,
- &snd_sbpro_ctl_capture_filter,
- &snd_sbpro_ctl_capture_low_filter
+ },
+ SB_SINGLE("Capture Filter", SB_DSP_CAPTURE_FILT, 5, 1),
+ SB_SINGLE("Capture Low-Pass Filter", SB_DSP_CAPTURE_FILT, 3, 1)
};
static unsigned char snd_sbpro_init_values[][2] = {
@@ -598,68 +572,42 @@ static unsigned char snd_sbpro_init_values[][2] = {
/*
* SB16 specific mixer elements
*/
-static struct sbmix_elem snd_sb16_ctl_master_play_vol =
- SB_DOUBLE("Master Playback Volume", SB_DSP4_MASTER_DEV, (SB_DSP4_MASTER_DEV + 1), 3, 3, 31);
-static struct sbmix_elem snd_sb16_ctl_3d_enhance_switch =
- SB_SINGLE("3D Enhancement Switch", SB_DSP4_3DSE, 0, 1);
-static struct sbmix_elem snd_sb16_ctl_tone_bass =
- SB_DOUBLE("Tone Control - Bass", SB_DSP4_BASS_DEV, (SB_DSP4_BASS_DEV + 1), 4, 4, 15);
-static struct sbmix_elem snd_sb16_ctl_tone_treble =
- SB_DOUBLE("Tone Control - Treble", SB_DSP4_TREBLE_DEV, (SB_DSP4_TREBLE_DEV + 1), 4, 4, 15);
-static struct sbmix_elem snd_sb16_ctl_pcm_play_vol =
- SB_DOUBLE("PCM Playback Volume", SB_DSP4_PCM_DEV, (SB_DSP4_PCM_DEV + 1), 3, 3, 31);
-static struct sbmix_elem snd_sb16_ctl_synth_capture_route =
- SB16_INPUT_SW("Synth Capture Route", SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, 6, 5);
-static struct sbmix_elem snd_sb16_ctl_synth_play_vol =
- SB_DOUBLE("Synth Playback Volume", SB_DSP4_SYNTH_DEV, (SB_DSP4_SYNTH_DEV + 1), 3, 3, 31);
-static struct sbmix_elem snd_sb16_ctl_cd_capture_route =
- SB16_INPUT_SW("CD Capture Route", SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, 2, 1);
-static struct sbmix_elem snd_sb16_ctl_cd_play_switch =
- SB_DOUBLE("CD Playback Switch", SB_DSP4_OUTPUT_SW, SB_DSP4_OUTPUT_SW, 2, 1, 1);
-static struct sbmix_elem snd_sb16_ctl_cd_play_vol =
- SB_DOUBLE("CD Playback Volume", SB_DSP4_CD_DEV, (SB_DSP4_CD_DEV + 1), 3, 3, 31);
-static struct sbmix_elem snd_sb16_ctl_line_capture_route =
- SB16_INPUT_SW("Line Capture Route", SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, 4, 3);
-static struct sbmix_elem snd_sb16_ctl_line_play_switch =
- SB_DOUBLE("Line Playback Switch", SB_DSP4_OUTPUT_SW, SB_DSP4_OUTPUT_SW, 4, 3, 1);
-static struct sbmix_elem snd_sb16_ctl_line_play_vol =
- SB_DOUBLE("Line Playback Volume", SB_DSP4_LINE_DEV, (SB_DSP4_LINE_DEV + 1), 3, 3, 31);
-static struct sbmix_elem snd_sb16_ctl_mic_capture_route =
- SB16_INPUT_SW("Mic Capture Route", SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, 0, 0);
-static struct sbmix_elem snd_sb16_ctl_mic_play_switch =
- SB_SINGLE("Mic Playback Switch", SB_DSP4_OUTPUT_SW, 0, 1);
-static struct sbmix_elem snd_sb16_ctl_mic_play_vol =
- SB_SINGLE("Mic Playback Volume", SB_DSP4_MIC_DEV, 3, 31);
-static struct sbmix_elem snd_sb16_ctl_pc_speaker_vol =
- SB_SINGLE("Beep Volume", SB_DSP4_SPEAKER_DEV, 6, 3);
-static struct sbmix_elem snd_sb16_ctl_capture_vol =
- SB_DOUBLE("Capture Volume", SB_DSP4_IGAIN_DEV, (SB_DSP4_IGAIN_DEV + 1), 6, 6, 3);
-static struct sbmix_elem snd_sb16_ctl_play_vol =
- SB_DOUBLE("Playback Volume", SB_DSP4_OGAIN_DEV, (SB_DSP4_OGAIN_DEV + 1), 6, 6, 3);
-static struct sbmix_elem snd_sb16_ctl_auto_mic_gain =
- SB_SINGLE("Mic Auto Gain", SB_DSP4_MIC_AGC, 0, 1);
-
-static struct sbmix_elem *snd_sb16_controls[] = {
- &snd_sb16_ctl_master_play_vol,
- &snd_sb16_ctl_3d_enhance_switch,
- &snd_sb16_ctl_tone_bass,
- &snd_sb16_ctl_tone_treble,
- &snd_sb16_ctl_pcm_play_vol,
- &snd_sb16_ctl_synth_capture_route,
- &snd_sb16_ctl_synth_play_vol,
- &snd_sb16_ctl_cd_capture_route,
- &snd_sb16_ctl_cd_play_switch,
- &snd_sb16_ctl_cd_play_vol,
- &snd_sb16_ctl_line_capture_route,
- &snd_sb16_ctl_line_play_switch,
- &snd_sb16_ctl_line_play_vol,
- &snd_sb16_ctl_mic_capture_route,
- &snd_sb16_ctl_mic_play_switch,
- &snd_sb16_ctl_mic_play_vol,
- &snd_sb16_ctl_pc_speaker_vol,
- &snd_sb16_ctl_capture_vol,
- &snd_sb16_ctl_play_vol,
- &snd_sb16_ctl_auto_mic_gain
+static struct sbmix_elem snd_sb16_controls[] = {
+ SB_DOUBLE("Master Playback Volume",
+ SB_DSP4_MASTER_DEV, (SB_DSP4_MASTER_DEV + 1), 3, 3, 31),
+ SB_DOUBLE("PCM Playback Volume",
+ SB_DSP4_PCM_DEV, (SB_DSP4_PCM_DEV + 1), 3, 3, 31),
+ SB16_INPUT_SW("Synth Capture Route",
+ SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, 6, 5),
+ SB_DOUBLE("Synth Playback Volume",
+ SB_DSP4_SYNTH_DEV, (SB_DSP4_SYNTH_DEV + 1), 3, 3, 31),
+ SB16_INPUT_SW("CD Capture Route",
+ SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, 2, 1),
+ SB_DOUBLE("CD Playback Switch",
+ SB_DSP4_OUTPUT_SW, SB_DSP4_OUTPUT_SW, 2, 1, 1),
+ SB_DOUBLE("CD Playback Volume",
+ SB_DSP4_CD_DEV, (SB_DSP4_CD_DEV + 1), 3, 3, 31),
+ SB16_INPUT_SW("Mic Capture Route",
+ SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, 0, 0),
+ SB_SINGLE("Mic Playback Switch", SB_DSP4_OUTPUT_SW, 0, 1),
+ SB_SINGLE("Mic Playback Volume", SB_DSP4_MIC_DEV, 3, 31),
+ SB_SINGLE("Beep Volume", SB_DSP4_SPEAKER_DEV, 6, 3),
+ SB_DOUBLE("Capture Volume",
+ SB_DSP4_IGAIN_DEV, (SB_DSP4_IGAIN_DEV + 1), 6, 6, 3),
+ SB_DOUBLE("Playback Volume",
+ SB_DSP4_OGAIN_DEV, (SB_DSP4_OGAIN_DEV + 1), 6, 6, 3),
+ SB16_INPUT_SW("Line Capture Route",
+ SB_DSP4_INPUT_LEFT, SB_DSP4_INPUT_RIGHT, 4, 3),
+ SB_DOUBLE("Line Playback Switch",
+ SB_DSP4_OUTPUT_SW, SB_DSP4_OUTPUT_SW, 4, 3, 1),
+ SB_DOUBLE("Line Playback Volume",
+ SB_DSP4_LINE_DEV, (SB_DSP4_LINE_DEV + 1), 3, 3, 31),
+ SB_SINGLE("Mic Auto Gain", SB_DSP4_MIC_AGC, 0, 1),
+ SB_SINGLE("3D Enhancement Switch", SB_DSP4_3DSE, 0, 1),
+ SB_DOUBLE("Tone Control - Bass",
+ SB_DSP4_BASS_DEV, (SB_DSP4_BASS_DEV + 1), 4, 4, 15),
+ SB_DOUBLE("Tone Control - Treble",
+ SB_DSP4_TREBLE_DEV, (SB_DSP4_TREBLE_DEV + 1), 4, 4, 15)
};
static unsigned char snd_sb16_init_values[][2] = {
@@ -678,46 +626,34 @@ static unsigned char snd_sb16_init_values[][2] = {
/*
* DT019x specific mixer elements
*/
-static struct sbmix_elem snd_dt019x_ctl_master_play_vol =
- SB_DOUBLE("Master Playback Volume", SB_DT019X_MASTER_DEV, SB_DT019X_MASTER_DEV, 4,0, 15);
-static struct sbmix_elem snd_dt019x_ctl_pcm_play_vol =
- SB_DOUBLE("PCM Playback Volume", SB_DT019X_PCM_DEV, SB_DT019X_PCM_DEV, 4,0, 15);
-static struct sbmix_elem snd_dt019x_ctl_synth_play_vol =
- SB_DOUBLE("Synth Playback Volume", SB_DT019X_SYNTH_DEV, SB_DT019X_SYNTH_DEV, 4,0, 15);
-static struct sbmix_elem snd_dt019x_ctl_cd_play_vol =
- SB_DOUBLE("CD Playback Volume", SB_DT019X_CD_DEV, SB_DT019X_CD_DEV, 4,0, 15);
-static struct sbmix_elem snd_dt019x_ctl_mic_play_vol =
- SB_SINGLE("Mic Playback Volume", SB_DT019X_MIC_DEV, 4, 7);
-static struct sbmix_elem snd_dt019x_ctl_pc_speaker_vol =
- SB_SINGLE("Beep Volume", SB_DT019X_SPKR_DEV, 0, 7);
-static struct sbmix_elem snd_dt019x_ctl_line_play_vol =
- SB_DOUBLE("Line Playback Volume", SB_DT019X_LINE_DEV, SB_DT019X_LINE_DEV, 4,0, 15);
-static struct sbmix_elem snd_dt019x_ctl_pcm_play_switch =
- SB_DOUBLE("PCM Playback Switch", SB_DT019X_OUTPUT_SW2, SB_DT019X_OUTPUT_SW2, 2,1, 1);
-static struct sbmix_elem snd_dt019x_ctl_synth_play_switch =
- SB_DOUBLE("Synth Playback Switch", SB_DT019X_OUTPUT_SW2, SB_DT019X_OUTPUT_SW2, 4,3, 1);
-static struct sbmix_elem snd_dt019x_ctl_capture_source =
+static struct sbmix_elem snd_dt019x_controls[] = {
+ /* ALS4000 below has some parts which we might be lacking,
+ * e.g. snd_als4000_ctl_mono_playback_switch - check it! */
+ SB_DOUBLE("Master Playback Volume",
+ SB_DT019X_MASTER_DEV, SB_DT019X_MASTER_DEV, 4, 0, 15),
+ SB_DOUBLE("PCM Playback Switch",
+ SB_DT019X_OUTPUT_SW2, SB_DT019X_OUTPUT_SW2, 2, 1, 1),
+ SB_DOUBLE("PCM Playback Volume",
+ SB_DT019X_PCM_DEV, SB_DT019X_PCM_DEV, 4, 0, 15),
+ SB_DOUBLE("Synth Playback Switch",
+ SB_DT019X_OUTPUT_SW2, SB_DT019X_OUTPUT_SW2, 4, 3, 1),
+ SB_DOUBLE("Synth Playback Volume",
+ SB_DT019X_SYNTH_DEV, SB_DT019X_SYNTH_DEV, 4, 0, 15),
+ SB_DOUBLE("CD Playback Switch",
+ SB_DSP4_OUTPUT_SW, SB_DSP4_OUTPUT_SW, 2, 1, 1),
+ SB_DOUBLE("CD Playback Volume",
+ SB_DT019X_CD_DEV, SB_DT019X_CD_DEV, 4, 0, 15),
+ SB_SINGLE("Mic Playback Switch", SB_DSP4_OUTPUT_SW, 0, 1),
+ SB_SINGLE("Mic Playback Volume", SB_DT019X_MIC_DEV, 4, 7),
+ SB_SINGLE("Beep Volume", SB_DT019X_SPKR_DEV, 0, 7),
+ SB_DOUBLE("Line Playback Switch",
+ SB_DSP4_OUTPUT_SW, SB_DSP4_OUTPUT_SW, 4, 3, 1),
+ SB_DOUBLE("Line Playback Volume",
+ SB_DT019X_LINE_DEV, SB_DT019X_LINE_DEV, 4, 0, 15),
{
.name = "Capture Source",
.type = SB_MIX_CAPTURE_DT019X
- };
-
-static struct sbmix_elem *snd_dt019x_controls[] = {
- /* ALS4000 below has some parts which we might be lacking,
- * e.g. snd_als4000_ctl_mono_playback_switch - check it! */
- &snd_dt019x_ctl_master_play_vol,
- &snd_dt019x_ctl_pcm_play_vol,
- &snd_dt019x_ctl_synth_play_vol,
- &snd_dt019x_ctl_cd_play_vol,
- &snd_dt019x_ctl_mic_play_vol,
- &snd_dt019x_ctl_pc_speaker_vol,
- &snd_dt019x_ctl_line_play_vol,
- &snd_sb16_ctl_mic_play_switch,
- &snd_sb16_ctl_cd_play_switch,
- &snd_sb16_ctl_line_play_switch,
- &snd_dt019x_ctl_pcm_play_switch,
- &snd_dt019x_ctl_synth_play_switch,
- &snd_dt019x_ctl_capture_source
+ }
};
static unsigned char snd_dt019x_init_values[][2] = {
@@ -735,82 +671,37 @@ static unsigned char snd_dt019x_init_values[][2] = {
/*
* ALS4000 specific mixer elements
*/
-static struct sbmix_elem snd_als4000_ctl_master_mono_playback_switch =
- SB_SINGLE("Master Mono Playback Switch", SB_ALS4000_MONO_IO_CTRL, 5, 1);
-static struct sbmix_elem snd_als4k_ctl_master_mono_capture_route = {
+static struct sbmix_elem snd_als4000_controls[] = {
+ SB_DOUBLE("PCM Playback Switch",
+ SB_DT019X_OUTPUT_SW2, SB_DT019X_OUTPUT_SW2, 2, 1, 1),
+ SB_DOUBLE("Synth Playback Switch",
+ SB_DT019X_OUTPUT_SW2, SB_DT019X_OUTPUT_SW2, 4, 3, 1),
+ SB_SINGLE("Mic Boost (+20dB)", SB_ALS4000_MIC_IN_GAIN, 0, 0x03),
+ SB_SINGLE("Master Mono Playback Switch", SB_ALS4000_MONO_IO_CTRL, 5, 1),
+ {
.name = "Master Mono Capture Route",
.type = SB_MIX_MONO_CAPTURE_ALS4K
- };
-static struct sbmix_elem snd_als4000_ctl_mono_playback_switch =
- SB_SINGLE("Mono Playback Switch", SB_DT019X_OUTPUT_SW2, 0, 1);
-static struct sbmix_elem snd_als4000_ctl_mic_20db_boost =
- SB_SINGLE("Mic Boost (+20dB)", SB_ALS4000_MIC_IN_GAIN, 0, 0x03);
-static struct sbmix_elem snd_als4000_ctl_mixer_analog_loopback =
- SB_SINGLE("Analog Loopback Switch", SB_ALS4000_MIC_IN_GAIN, 7, 0x01);
-static struct sbmix_elem snd_als4000_ctl_mixer_digital_loopback =
+ },
+ SB_SINGLE("Mono Playback Switch", SB_DT019X_OUTPUT_SW2, 0, 1),
+ SB_SINGLE("Analog Loopback Switch", SB_ALS4000_MIC_IN_GAIN, 7, 0x01),
+ SB_SINGLE("3D Control - Switch", SB_ALS4000_3D_SND_FX, 6, 0x01),
SB_SINGLE("Digital Loopback Switch",
- SB_ALS4000_CR3_CONFIGURATION, 7, 0x01);
-/* FIXME: functionality of 3D controls might be swapped, I didn't find
- * a description of how to identify what is supposed to be what */
-static struct sbmix_elem snd_als4000_3d_control_switch =
- SB_SINGLE("3D Control - Switch", SB_ALS4000_3D_SND_FX, 6, 0x01);
-static struct sbmix_elem snd_als4000_3d_control_ratio =
- SB_SINGLE("3D Control - Level", SB_ALS4000_3D_SND_FX, 0, 0x07);
-static struct sbmix_elem snd_als4000_3d_control_freq =
+ SB_ALS4000_CR3_CONFIGURATION, 7, 0x01),
+ /* FIXME: functionality of 3D controls might be swapped, I didn't find
+ * a description of how to identify what is supposed to be what */
+ SB_SINGLE("3D Control - Level", SB_ALS4000_3D_SND_FX, 0, 0x07),
/* FIXME: maybe there's actually some standard 3D ctrl name for it?? */
- SB_SINGLE("3D Control - Freq", SB_ALS4000_3D_SND_FX, 4, 0x03);
-static struct sbmix_elem snd_als4000_3d_control_delay =
+ SB_SINGLE("3D Control - Freq", SB_ALS4000_3D_SND_FX, 4, 0x03),
/* FIXME: ALS4000a.pdf mentions BBD (Bucket Brigade Device) time delay,
* but what ALSA 3D attribute is that actually? "Center", "Depth",
* "Wide" or "Space" or even "Level"? Assuming "Wide" for now... */
- SB_SINGLE("3D Control - Wide", SB_ALS4000_3D_TIME_DELAY, 0, 0x0f);
-static struct sbmix_elem snd_als4000_3d_control_poweroff_switch =
- SB_SINGLE("3D PowerOff Switch", SB_ALS4000_3D_TIME_DELAY, 4, 0x01);
-static struct sbmix_elem snd_als4000_ctl_3db_freq_control_switch =
+ SB_SINGLE("3D Control - Wide", SB_ALS4000_3D_TIME_DELAY, 0, 0x0f),
+ SB_SINGLE("3D PowerOff Switch", SB_ALS4000_3D_TIME_DELAY, 4, 0x01),
SB_SINGLE("Master Playback 8kHz / 20kHz LPF Switch",
- SB_ALS4000_FMDAC, 5, 0x01);
+ SB_ALS4000_FMDAC, 5, 0x01),
#ifdef NOT_AVAILABLE
-static struct sbmix_elem snd_als4000_ctl_fmdac =
- SB_SINGLE("FMDAC Switch (Option ?)", SB_ALS4000_FMDAC, 0, 0x01);
-static struct sbmix_elem snd_als4000_ctl_qsound =
- SB_SINGLE("QSound Mode", SB_ALS4000_QSOUND, 1, 0x1f);
-#endif
-
-static struct sbmix_elem *snd_als4000_controls[] = {
- /* ALS4000a.PDF regs page */
- &snd_sb16_ctl_master_play_vol, /* MX30/31 12 */
- &snd_dt019x_ctl_pcm_play_switch, /* MX4C 16 */
- &snd_sb16_ctl_pcm_play_vol, /* MX32/33 12 */
- &snd_sb16_ctl_synth_capture_route, /* MX3D/3E 14 */
- &snd_dt019x_ctl_synth_play_switch, /* MX4C 16 */
- &snd_sb16_ctl_synth_play_vol, /* MX34/35 12/13 */
- &snd_sb16_ctl_cd_capture_route, /* MX3D/3E 14 */
- &snd_sb16_ctl_cd_play_switch, /* MX3C 14 */
- &snd_sb16_ctl_cd_play_vol, /* MX36/37 13 */
- &snd_sb16_ctl_line_capture_route, /* MX3D/3E 14 */
- &snd_sb16_ctl_line_play_switch, /* MX3C 14 */
- &snd_sb16_ctl_line_play_vol, /* MX38/39 13 */
- &snd_sb16_ctl_mic_capture_route, /* MX3D/3E 14 */
- &snd_als4000_ctl_mic_20db_boost, /* MX4D 16 */
- &snd_sb16_ctl_mic_play_switch, /* MX3C 14 */
- &snd_sb16_ctl_mic_play_vol, /* MX3A 13 */
- &snd_sb16_ctl_pc_speaker_vol, /* MX3B 14 */
- &snd_sb16_ctl_capture_vol, /* MX3F/40 15 */
- &snd_sb16_ctl_play_vol, /* MX41/42 15 */
- &snd_als4000_ctl_master_mono_playback_switch, /* MX4C 16 */
- &snd_als4k_ctl_master_mono_capture_route, /* MX4B 16 */
- &snd_als4000_ctl_mono_playback_switch, /* MX4C 16 */
- &snd_als4000_ctl_mixer_analog_loopback, /* MX4D 16 */
- &snd_als4000_ctl_mixer_digital_loopback, /* CR3 21 */
- &snd_als4000_3d_control_switch, /* MX50 17 */
- &snd_als4000_3d_control_ratio, /* MX50 17 */
- &snd_als4000_3d_control_freq, /* MX50 17 */
- &snd_als4000_3d_control_delay, /* MX51 18 */
- &snd_als4000_3d_control_poweroff_switch, /* MX51 18 */
- &snd_als4000_ctl_3db_freq_control_switch, /* MX4F 17 */
-#ifdef NOT_AVAILABLE
- &snd_als4000_ctl_fmdac,
- &snd_als4000_ctl_qsound,
+ SB_SINGLE("FMDAC Switch (Option ?)", SB_ALS4000_FMDAC, 0, 0x01),
+ SB_SINGLE("QSound Mode", SB_ALS4000_QSOUND, 1, 0x1f),
#endif
};
@@ -829,11 +720,10 @@ static unsigned char snd_als4000_init_values[][2] = {
{ SB_ALS4000_MIC_IN_GAIN, 0 },
};
-
/*
*/
static int snd_sbmixer_init(struct snd_sb *chip,
- struct sbmix_elem **controls,
+ struct sbmix_elem *controls,
int controls_count,
unsigned char map[][2],
int map_count,
@@ -856,7 +746,8 @@ static int snd_sbmixer_init(struct snd_sb *chip,
}
for (idx = 0; idx < controls_count; idx++) {
- if ((err = snd_sbmixer_add_ctl_elem(chip, controls[idx])) < 0)
+ err = snd_sbmixer_add_ctl_elem(chip, &controls[idx]);
+ if (err < 0)
return err;
}
snd_component_add(card, name);
@@ -888,6 +779,7 @@ int snd_sbmixer_new(struct snd_sb *chip)
return err;
break;
case SB_HW_PRO:
+ case SB_HW_JAZZ16:
if ((err = snd_sbmixer_init(chip,
snd_sbpro_controls,
ARRAY_SIZE(snd_sbpro_controls),
@@ -908,6 +800,15 @@ int snd_sbmixer_new(struct snd_sb *chip)
return err;
break;
case SB_HW_ALS4000:
+ /* use only the first 16 controls from SB16 */
+ err = snd_sbmixer_init(chip,
+ snd_sb16_controls,
+ 16,
+ snd_sb16_init_values,
+ ARRAY_SIZE(snd_sb16_init_values),
+ "ALS4000");
+ if (err < 0)
+ return err;
if ((err = snd_sbmixer_init(chip,
snd_als4000_controls,
ARRAY_SIZE(snd_als4000_controls),
@@ -1029,6 +930,7 @@ void snd_sbmixer_suspend(struct snd_sb *chip)
save_mixer(chip, sb20_saved_regs, ARRAY_SIZE(sb20_saved_regs));
break;
case SB_HW_PRO:
+ case SB_HW_JAZZ16:
save_mixer(chip, sbpro_saved_regs, ARRAY_SIZE(sbpro_saved_regs));
break;
case SB_HW_16:
@@ -1055,6 +957,7 @@ void snd_sbmixer_resume(struct snd_sb *chip)
restore_mixer(chip, sb20_saved_regs, ARRAY_SIZE(sb20_saved_regs));
break;
case SB_HW_PRO:
+ case SB_HW_JAZZ16:
restore_mixer(chip, sbpro_saved_regs, ARRAY_SIZE(sbpro_saved_regs));
break;
case SB_HW_16:
diff --git a/sound/isa/wss/wss_lib.c b/sound/isa/wss/wss_lib.c
index 5b9d6c18bc45..9191b32d9130 100644
--- a/sound/isa/wss/wss_lib.c
+++ b/sound/isa/wss/wss_lib.c
@@ -2014,6 +2014,7 @@ static int snd_wss_info_mux(struct snd_kcontrol *kcontrol,
case WSS_HW_INTERWAVE:
ptexts = gusmax_texts;
break;
+ case WSS_HW_OPTI93X:
case WSS_HW_OPL3SA2:
ptexts = opl3sa_texts;
break;
@@ -2246,54 +2247,12 @@ WSS_SINGLE("Beep Bypass Playback Switch", 0,
CS4231_MONO_CTRL, 5, 1, 0),
};
-static struct snd_kcontrol_new snd_opti93x_controls[] = {
-WSS_DOUBLE("Master Playback Switch", 0,
- OPTi93X_OUT_LEFT, OPTi93X_OUT_RIGHT, 7, 7, 1, 1),
-WSS_DOUBLE_TLV("Master Playback Volume", 0,
- OPTi93X_OUT_LEFT, OPTi93X_OUT_RIGHT, 1, 1, 31, 1,
- db_scale_6bit),
-WSS_DOUBLE("PCM Playback Switch", 0,
- CS4231_LEFT_OUTPUT, CS4231_RIGHT_OUTPUT, 7, 7, 1, 1),
-WSS_DOUBLE("PCM Playback Volume", 0,
- CS4231_LEFT_OUTPUT, CS4231_RIGHT_OUTPUT, 0, 0, 31, 1),
-WSS_DOUBLE("FM Playback Switch", 0,
- CS4231_AUX2_LEFT_INPUT, CS4231_AUX2_RIGHT_INPUT, 7, 7, 1, 1),
-WSS_DOUBLE("FM Playback Volume", 0,
- CS4231_AUX2_LEFT_INPUT, CS4231_AUX2_RIGHT_INPUT, 1, 1, 15, 1),
-WSS_DOUBLE("Line Playback Switch", 0,
- CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 7, 7, 1, 1),
-WSS_DOUBLE("Line Playback Volume", 0,
- CS4231_LEFT_LINE_IN, CS4231_RIGHT_LINE_IN, 0, 0, 15, 1),
-WSS_DOUBLE("Mic Playback Switch", 0,
- OPTi93X_MIC_LEFT_INPUT, OPTi93X_MIC_RIGHT_INPUT, 7, 7, 1, 1),
-WSS_DOUBLE("Mic Playback Volume", 0,
- OPTi93X_MIC_LEFT_INPUT, OPTi93X_MIC_RIGHT_INPUT, 1, 1, 15, 1),
-WSS_DOUBLE("Mic Boost", 0,
- CS4231_LEFT_INPUT, CS4231_RIGHT_INPUT, 5, 5, 1, 0),
-WSS_DOUBLE("CD Playback Switch", 0,
- CS4231_AUX1_LEFT_INPUT, CS4231_AUX1_RIGHT_INPUT, 7, 7, 1, 1),
-WSS_DOUBLE("CD Playback Volume", 0,
- CS4231_AUX1_LEFT_INPUT, CS4231_AUX1_RIGHT_INPUT, 1, 1, 15, 1),
-WSS_DOUBLE("Aux Playback Switch", 0,
- OPTi931_AUX_LEFT_INPUT, OPTi931_AUX_RIGHT_INPUT, 7, 7, 1, 1),
-WSS_DOUBLE("Aux Playback Volume", 0,
- OPTi931_AUX_LEFT_INPUT, OPTi931_AUX_RIGHT_INPUT, 1, 1, 15, 1),
-WSS_DOUBLE("Capture Volume", 0,
- CS4231_LEFT_INPUT, CS4231_RIGHT_INPUT, 0, 0, 15, 0),
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = snd_wss_info_mux,
- .get = snd_wss_get_mux,
- .put = snd_wss_put_mux,
-}
-};
-
int snd_wss_mixer(struct snd_wss *chip)
{
struct snd_card *card;
unsigned int idx;
int err;
+ int count = ARRAY_SIZE(snd_wss_controls);
if (snd_BUG_ON(!chip || !chip->pcm))
return -EINVAL;
@@ -2302,28 +2261,19 @@ int snd_wss_mixer(struct snd_wss *chip)
strcpy(card->mixername, chip->pcm->name);
- if (chip->hardware == WSS_HW_OPTI93X)
- for (idx = 0; idx < ARRAY_SIZE(snd_opti93x_controls); idx++) {
- err = snd_ctl_add(card,
- snd_ctl_new1(&snd_opti93x_controls[idx],
- chip));
- if (err < 0)
- return err;
- }
- else {
- int count = ARRAY_SIZE(snd_wss_controls);
-
- /* Use only the first 11 entries on AD1848 */
- if (chip->hardware & WSS_HW_AD1848_MASK)
- count = 11;
-
- for (idx = 0; idx < count; idx++) {
- err = snd_ctl_add(card,
- snd_ctl_new1(&snd_wss_controls[idx],
- chip));
- if (err < 0)
- return err;
- }
+ /* Use only the first 11 entries on AD1848 */
+ if (chip->hardware & WSS_HW_AD1848_MASK)
+ count = 11;
+ /* There is no loopback on OPTI93X */
+ else if (chip->hardware == WSS_HW_OPTI93X)
+ count = 9;
+
+ for (idx = 0; idx < count; idx++) {
+ err = snd_ctl_add(card,
+ snd_ctl_new1(&snd_wss_controls[idx],
+ chip));
+ if (err < 0)
+ return err;
}
return 0;
}
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index f1d9d16b5486..9b486beeb932 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/gfp.h>
-#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
@@ -603,25 +602,14 @@ static int snd_sgio2audio_pcm_close(struct snd_pcm_substream *substream)
static int snd_sgio2audio_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- struct snd_pcm_runtime *runtime = substream->runtime;
- int size = params_buffer_bytes(hw_params);
-
- /* alloc virtual 'dma' area */
- if (runtime->dma_area)
- vfree(runtime->dma_area);
- runtime->dma_area = vmalloc_user(size);
- if (runtime->dma_area == NULL)
- return -ENOMEM;
- runtime->dma_bytes = size;
- return 0;
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
}
/* hw_free callback */
static int snd_sgio2audio_pcm_hw_free(struct snd_pcm_substream *substream)
{
- vfree(substream->runtime->dma_area);
- substream->runtime->dma_area = NULL;
- return 0;
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
}
/* prepare callback */
@@ -692,13 +680,6 @@ snd_sgio2audio_pcm_pointer(struct snd_pcm_substream *substream)
chip->channel[chan->idx].pos);
}
-/* get the physical page pointer on the given offset */
-static struct page *snd_sgio2audio_page(struct snd_pcm_substream *substream,
- unsigned long offset)
-{
- return vmalloc_to_page(substream->runtime->dma_area + offset);
-}
-
/* operators */
static struct snd_pcm_ops snd_sgio2audio_playback1_ops = {
.open = snd_sgio2audio_playback1_open,
@@ -709,7 +690,7 @@ static struct snd_pcm_ops snd_sgio2audio_playback1_ops = {
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
- .page = snd_sgio2audio_page,
+ .page = snd_pcm_lib_get_vmalloc_page,
};
static struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
@@ -721,7 +702,7 @@ static struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
- .page = snd_sgio2audio_page,
+ .page = snd_pcm_lib_get_vmalloc_page,
};
static struct snd_pcm_ops snd_sgio2audio_capture_ops = {
@@ -733,7 +714,7 @@ static struct snd_pcm_ops snd_sgio2audio_capture_ops = {
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
- .page = snd_sgio2audio_page,
+ .page = snd_pcm_lib_get_vmalloc_page,
};
/*
diff --git a/sound/oss/au1550_ac97.c b/sound/oss/au1550_ac97.c
index 4191acccbcdb..c1070e33b32f 100644
--- a/sound/oss/au1550_ac97.c
+++ b/sound/oss/au1550_ac97.c
@@ -614,7 +614,8 @@ start_adc(struct au1550_state *s)
/* Put two buffers on the ring to get things started.
*/
for (i=0; i<2; i++) {
- au1xxx_dbdma_put_dest(db->dmanr, db->nextIn, db->dma_fragsize);
+ au1xxx_dbdma_put_dest(db->dmanr, virt_to_phys(db->nextIn),
+ db->dma_fragsize, DDMA_FLAGS_IE);
db->nextIn += db->dma_fragsize;
if (db->nextIn >= db->rawbuf + db->dmasize)
@@ -732,8 +733,9 @@ static void dac_dma_interrupt(int irq, void *dev_id)
db->dma_qcount--;
if (db->count >= db->fragsize) {
- if (au1xxx_dbdma_put_source(db->dmanr, db->nextOut,
- db->fragsize) == 0) {
+ if (au1xxx_dbdma_put_source(db->dmanr,
+ virt_to_phys(db->nextOut), db->fragsize,
+ DDMA_FLAGS_IE) == 0) {
err("qcount < 2 and no ring room!");
}
db->nextOut += db->fragsize;
@@ -777,7 +779,8 @@ static void adc_dma_interrupt(int irq, void *dev_id)
/* Put a new empty buffer on the destination DMA.
*/
- au1xxx_dbdma_put_dest(dp->dmanr, dp->nextIn, dp->dma_fragsize);
+ au1xxx_dbdma_put_dest(dp->dmanr, virt_to_phys(dp->nextIn),
+ dp->dma_fragsize, DDMA_FLAGS_IE);
dp->nextIn += dp->dma_fragsize;
if (dp->nextIn >= dp->rawbuf + dp->dmasize)
@@ -1177,8 +1180,9 @@ au1550_write(struct file *file, const char *buffer, size_t count, loff_t * ppos)
* we know the dma has stopped.
*/
while ((db->dma_qcount < 2) && (db->count >= db->fragsize)) {
- if (au1xxx_dbdma_put_source(db->dmanr, db->nextOut,
- db->fragsize) == 0) {
+ if (au1xxx_dbdma_put_source(db->dmanr,
+ virt_to_phys(db->nextOut), db->fragsize,
+ DDMA_FLAGS_IE) == 0) {
err("qcount < 2 and no ring room!");
}
db->nextOut += db->fragsize;
diff --git a/sound/oss/dev_table.c b/sound/oss/dev_table.c
index 08274c995d06..727bdb9ba2dc 100644
--- a/sound/oss/dev_table.c
+++ b/sound/oss/dev_table.c
@@ -67,14 +67,15 @@ int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver,
return -(EBUSY);
}
d = (struct audio_driver *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct audio_driver)));
-
- if (sound_nblocks < 1024)
- sound_nblocks++;
+ sound_nblocks++;
+ if (sound_nblocks >= MAX_MEM_BLOCKS)
+ sound_nblocks = MAX_MEM_BLOCKS - 1;
op = (struct audio_operations *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct audio_operations)));
+ sound_nblocks++;
+ if (sound_nblocks >= MAX_MEM_BLOCKS)
+ sound_nblocks = MAX_MEM_BLOCKS - 1;
- if (sound_nblocks < 1024)
- sound_nblocks++;
if (d == NULL || op == NULL) {
printk(KERN_ERR "Sound: Can't allocate driver for (%s)\n", name);
sound_unload_audiodev(num);
@@ -128,9 +129,10 @@ int sound_install_mixer(int vers, char *name, struct mixer_operations *driver,
until you unload sound! */
op = (struct mixer_operations *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct mixer_operations)));
+ sound_nblocks++;
+ if (sound_nblocks >= MAX_MEM_BLOCKS)
+ sound_nblocks = MAX_MEM_BLOCKS - 1;
- if (sound_nblocks < 1024)
- sound_nblocks++;
if (op == NULL) {
printk(KERN_ERR "Sound: Can't allocate mixer driver for (%s)\n", name);
return -ENOMEM;
diff --git a/sound/oss/sound_config.h b/sound/oss/sound_config.h
index 55271fbe7f49..9d35c4c65b9b 100644
--- a/sound/oss/sound_config.h
+++ b/sound/oss/sound_config.h
@@ -142,4 +142,6 @@ static inline int translate_mode(struct file *file)
#define TIMER_ARMED 121234
#define TIMER_NOT_ARMED 1
+#define MAX_MEM_BLOCKS 1024
+
#endif
diff --git a/sound/oss/soundcard.c b/sound/oss/soundcard.c
index 61aaedae6b7e..fde7c12fe5da 100644
--- a/sound/oss/soundcard.c
+++ b/sound/oss/soundcard.c
@@ -56,7 +56,7 @@
/*
* Table for permanently allocated memory (used when unloading the module)
*/
-void * sound_mem_blocks[1024];
+void * sound_mem_blocks[MAX_MEM_BLOCKS];
int sound_nblocks = 0;
/* Persistent DMA buffers */
@@ -328,11 +328,11 @@ static int sound_mixer_ioctl(int mixdev, unsigned int cmd, void __user *arg)
return mixer_devs[mixdev]->ioctl(mixdev, cmd, arg);
}
-static int sound_ioctl(struct inode *inode, struct file *file,
- unsigned int cmd, unsigned long arg)
+static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int len = 0, dtype;
- int dev = iminor(inode);
+ int dev = iminor(file->f_dentry->d_inode);
+ long ret = -EINVAL;
void __user *p = (void __user *)arg;
if (_SIOC_DIR(cmd) != _SIOC_NONE && _SIOC_DIR(cmd) != 0) {
@@ -353,6 +353,7 @@ static int sound_ioctl(struct inode *inode, struct file *file,
if (cmd == OSS_GETVERSION)
return __put_user(SOUND_VERSION, (int __user *)p);
+ lock_kernel();
if (_IOC_TYPE(cmd) == 'M' && num_mixers > 0 && /* Mixer ioctl */
(dev & 0x0f) != SND_DEV_CTL) {
dtype = dev & 0x0f;
@@ -360,24 +361,31 @@ static int sound_ioctl(struct inode *inode, struct file *file,
case SND_DEV_DSP:
case SND_DEV_DSP16:
case SND_DEV_AUDIO:
- return sound_mixer_ioctl(audio_devs[dev >> 4]->mixer_dev,
+ ret = sound_mixer_ioctl(audio_devs[dev >> 4]->mixer_dev,
cmd, p);
-
+ break;
default:
- return sound_mixer_ioctl(dev >> 4, cmd, p);
+ ret = sound_mixer_ioctl(dev >> 4, cmd, p);
+ break;
}
+ unlock_kernel();
+ return ret;
}
+
switch (dev & 0x0f) {
case SND_DEV_CTL:
if (cmd == SOUND_MIXER_GETLEVELS)
- return get_mixer_levels(p);
- if (cmd == SOUND_MIXER_SETLEVELS)
- return set_mixer_levels(p);
- return sound_mixer_ioctl(dev >> 4, cmd, p);
+ ret = get_mixer_levels(p);
+ else if (cmd == SOUND_MIXER_SETLEVELS)
+ ret = set_mixer_levels(p);
+ else
+ ret = sound_mixer_ioctl(dev >> 4, cmd, p);
+ break;
case SND_DEV_SEQ:
case SND_DEV_SEQ2:
- return sequencer_ioctl(dev, file, cmd, p);
+ ret = sequencer_ioctl(dev, file, cmd, p);
+ break;
case SND_DEV_DSP:
case SND_DEV_DSP16:
@@ -390,7 +398,8 @@ static int sound_ioctl(struct inode *inode, struct file *file,
break;
}
- return -EINVAL;
+ unlock_kernel();
+ return ret;
}
static unsigned int sound_poll(struct file *file, poll_table * wait)
@@ -490,7 +499,7 @@ const struct file_operations oss_sound_fops = {
.read = sound_read,
.write = sound_write,
.poll = sound_poll,
- .ioctl = sound_ioctl,
+ .unlocked_ioctl = sound_ioctl,
.mmap = sound_mmap,
.open = sound_open,
.release = sound_release,
@@ -574,7 +583,7 @@ static int __init oss_init(void)
NULL, "%s%d", dev_list[i].name, j);
}
- if (sound_nblocks >= 1024)
+ if (sound_nblocks >= MAX_MEM_BLOCKS - 1)
printk(KERN_ERR "Sound warning: Deallocation table was too small.\n");
return 0;
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index c11920623009..a7630e9edf8a 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -83,6 +83,7 @@ static const struct ac97_codec_id snd_ac97_codec_id_vendors[] = {
{ 0x4e534300, 0xffffff00, "National Semiconductor", NULL, NULL },
{ 0x50534300, 0xffffff00, "Philips", NULL, NULL },
{ 0x53494c00, 0xffffff00, "Silicon Laboratory", NULL, NULL },
+{ 0x53544d00, 0xffffff00, "STMicroelectronics", NULL, NULL },
{ 0x54524100, 0xffffff00, "TriTech", NULL, NULL },
{ 0x54584e00, 0xffffff00, "Texas Instruments", NULL, NULL },
{ 0x56494100, 0xffffff00, "VIA Technologies", NULL, NULL },
@@ -161,6 +162,7 @@ static const struct ac97_codec_id snd_ac97_codec_ids[] = {
{ 0x4e534350, 0xffffffff, "LM4550", patch_lm4550, NULL }, // volume wrap fix
{ 0x50534304, 0xffffffff, "UCB1400", patch_ucb1400, NULL },
{ 0x53494c20, 0xffffffe0, "Si3036,8", mpatch_si3036, mpatch_si3036, AC97_MODEM_PATCH },
+{ 0x53544d02, 0xffffffff, "ST7597", NULL, NULL },
{ 0x54524102, 0xffffffff, "TR28022", NULL, NULL },
{ 0x54524103, 0xffffffff, "TR28023", NULL, NULL },
{ 0x54524106, 0xffffffff, "TR28026", NULL, NULL },
@@ -213,6 +215,14 @@ static int snd_ac97_valid_reg(struct snd_ac97 *ac97, unsigned short reg)
{
/* filter some registers for buggy codecs */
switch (ac97->id) {
+ case AC97_ID_ST_AC97_ID4:
+ if (reg == 0x08)
+ return 0;
+ /* fall through */
+ case AC97_ID_ST7597:
+ if (reg == 0x22 || reg == 0x7a)
+ return 1;
+ /* fall through */
case AC97_ID_AK4540:
case AC97_ID_AK4542:
if (reg <= 0x1c || reg == 0x20 || reg == 0x26 || reg >= 0x7c)
diff --git a/sound/pci/ac97/ac97_id.h b/sound/pci/ac97/ac97_id.h
index c129492c82b3..d603147c4a96 100644
--- a/sound/pci/ac97/ac97_id.h
+++ b/sound/pci/ac97/ac97_id.h
@@ -62,3 +62,5 @@
#define AC97_ID_CM9761_78 0x434d4978
#define AC97_ID_CM9761_82 0x434d4982
#define AC97_ID_CM9761_83 0x434d4983
+#define AC97_ID_ST7597 0x53544d02
+#define AC97_ID_ST_AC97_ID4 0x53544d04
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index 139cf3b2b9d7..1caf5e3c1f6a 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -544,25 +544,10 @@ static int patch_wolfson04(struct snd_ac97 * ac97)
return 0;
}
-static int patch_wolfson_wm9705_specific(struct snd_ac97 * ac97)
-{
- int err, i;
- for (i = 0; i < ARRAY_SIZE(wm97xx_snd_ac97_controls); i++) {
- if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm97xx_snd_ac97_controls[i], ac97))) < 0)
- return err;
- }
- snd_ac97_write_cache(ac97, 0x72, 0x0808);
- return 0;
-}
-
-static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
- .build_specific = patch_wolfson_wm9705_specific,
-};
-
static int patch_wolfson05(struct snd_ac97 * ac97)
{
/* WM9705, WM9710 */
- ac97->build_ops = &patch_wolfson_wm9705_ops;
+ ac97->build_ops = &patch_wolfson_wm9703_ops;
#ifdef CONFIG_TOUCHSCREEN_WM9705
/* WM9705 touchscreen uses AUX and VIDEO for touch */
ac97->flags |= AC97_HAS_NO_VIDEO | AC97_HAS_NO_AUX;
@@ -1870,6 +1855,7 @@ static unsigned int ad1981_jacks_blacklist[] = {
0x10140554, /* Thinkpad T42p/R50p */
0x10140567, /* Thinkpad T43p 2668-G7U */
0x10140581, /* Thinkpad X41-2527 */
+ 0x10280160, /* Dell Dimension 2400 */
0x104380b0, /* Asus A7V8X-MX */
0x11790241, /* Toshiba Satellite A-15 S127 */
0x144dc01a, /* Samsung NP-X20C004/SEG */
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index d6752dff2a44..42b4fbbd8e2b 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -297,6 +297,7 @@ static struct pci_device_id snd_atiixp_ids[] = {
MODULE_DEVICE_TABLE(pci, snd_atiixp_ids);
static struct snd_pci_quirk atiixp_quirks[] __devinitdata = {
+ SND_PCI_QUIRK(0x105b, 0x0c81, "Foxconn RC4107MA-RS2", 0),
SND_PCI_QUIRK(0x15bd, 0x3100, "DFI RS482", 0),
{ } /* terminator */
};
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 1be96ead4244..e6b4a879ae2e 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -3597,7 +3597,7 @@ static struct cs_card_type __devinitdata cards[] = {
#ifdef CONFIG_PM
static unsigned int saved_regs[] = {
BA0_ACOSV,
- BA0_ASER_FADDR,
+ /*BA0_ASER_FADDR,*/
BA0_ASER_MASTER,
BA1_PVOL,
BA1_CVOL,
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index f4f0c8f5dad7..3e5ca8fb519f 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -298,6 +298,9 @@ void cs46xx_dsp_spos_destroy (struct snd_cs46xx * chip)
if (ins->scbs[i].deleted) continue;
cs46xx_dsp_proc_free_scb_desc ( (ins->scbs + i) );
+#ifdef CONFIG_PM
+ kfree(ins->scbs[i].data);
+#endif
}
kfree(ins->code.data);
@@ -974,13 +977,11 @@ static struct dsp_scb_descriptor * _map_scb (struct snd_cs46xx *chip, char * nam
index = find_free_scb_index (ins);
+ memset(&ins->scbs[index], 0, sizeof(ins->scbs[index]));
strcpy(ins->scbs[index].scb_name, name);
ins->scbs[index].address = dest;
ins->scbs[index].index = index;
- ins->scbs[index].proc_info = NULL;
ins->scbs[index].ref_count = 1;
- ins->scbs[index].deleted = 0;
- spin_lock_init(&ins->scbs[index].lock);
desc = (ins->scbs + index);
ins->scbs[index].scb_symbol = add_symbol (chip, name, dest, SYMBOL_PARAMETER);
@@ -1022,17 +1023,29 @@ _map_task_tree (struct snd_cs46xx *chip, char * name, u32 dest, u32 size)
return desc;
}
+#define SCB_BYTES (0x10 * 4)
+
struct dsp_scb_descriptor *
cs46xx_dsp_create_scb (struct snd_cs46xx *chip, char * name, u32 * scb_data, u32 dest)
{
struct dsp_scb_descriptor * desc;
+#ifdef CONFIG_PM
+ /* copy the data for resume */
+ scb_data = kmemdup(scb_data, SCB_BYTES, GFP_KERNEL);
+ if (!scb_data)
+ return NULL;
+#endif
+
desc = _map_scb (chip,name,dest);
if (desc) {
desc->data = scb_data;
_dsp_create_scb(chip,scb_data,dest);
} else {
snd_printk(KERN_ERR "dsp_spos: failed to map SCB\n");
+#ifdef CONFIG_PM
+ kfree(scb_data);
+#endif
}
return desc;
@@ -1988,7 +2001,28 @@ int cs46xx_dsp_resume(struct snd_cs46xx * chip)
continue;
_dsp_create_scb(chip, s->data, s->address);
}
-
+ for (i = 0; i < ins->nscb; i++) {
+ struct dsp_scb_descriptor *s = &ins->scbs[i];
+ if (s->deleted)
+ continue;
+ if (s->updated)
+ cs46xx_dsp_spos_update_scb(chip, s);
+ if (s->volume_set)
+ cs46xx_dsp_scb_set_volume(chip, s,
+ s->volume[0], s->volume[1]);
+ }
+ if (ins->spdif_status_out & DSP_SPDIF_STATUS_HW_ENABLED) {
+ cs46xx_dsp_enable_spdif_hw(chip);
+ snd_cs46xx_poke(chip, (ins->ref_snoop_scb->address + 2) << 2,
+ (OUTPUT_SNOOP_BUFFER + 0x10) << 0x10);
+ if (ins->spdif_status_out & DSP_SPDIF_STATUS_PLAYBACK_OPEN)
+ cs46xx_poke_via_dsp(chip, SP_SPDOUT_CSUV,
+ ins->spdif_csuv_stream);
+ }
+ if (chip->dsp_spos_instance->spdif_status_in) {
+ cs46xx_poke_via_dsp(chip, SP_ASER_COUNTDOWN, 0x80000005);
+ cs46xx_poke_via_dsp(chip, SP_SPDIN_CONTROL, 0x800003ff);
+ }
return 0;
}
#endif
diff --git a/sound/pci/cs46xx/dsp_spos.h b/sound/pci/cs46xx/dsp_spos.h
index f9e169d33c03..ca47a8114c7f 100644
--- a/sound/pci/cs46xx/dsp_spos.h
+++ b/sound/pci/cs46xx/dsp_spos.h
@@ -212,6 +212,7 @@ static inline void cs46xx_dsp_spos_update_scb (struct snd_cs46xx * chip,
(scb->address + SCBsubListPtr) << 2,
(scb->sub_list_ptr->address << 0x10) |
(scb->next_scb_ptr->address));
+ scb->updated = 1;
}
static inline void cs46xx_dsp_scb_set_volume (struct snd_cs46xx * chip,
@@ -222,6 +223,9 @@ static inline void cs46xx_dsp_scb_set_volume (struct snd_cs46xx * chip,
snd_cs46xx_poke(chip, (scb->address + SCBVolumeCtrl) << 2, val);
snd_cs46xx_poke(chip, (scb->address + SCBVolumeCtrl + 1) << 2, val);
+ scb->volume_set = 1;
+ scb->volume[0] = left;
+ scb->volume[1] = right;
}
#endif /* __DSP_SPOS_H__ */
#endif /* CONFIG_SND_CS46XX_NEW_DSP */
diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c
index dd7c41b037b4..00b148a10239 100644
--- a/sound/pci/cs46xx/dsp_spos_scb_lib.c
+++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c
@@ -115,7 +115,6 @@ static void cs46xx_dsp_proc_scb_info_read (struct snd_info_entry *entry,
static void _dsp_unlink_scb (struct snd_cs46xx *chip, struct dsp_scb_descriptor * scb)
{
struct dsp_spos_instance * ins = chip->dsp_spos_instance;
- unsigned long flags;
if ( scb->parent_scb_ptr ) {
/* unlink parent SCB */
@@ -153,8 +152,6 @@ static void _dsp_unlink_scb (struct snd_cs46xx *chip, struct dsp_scb_descriptor
scb->next_scb_ptr = ins->the_null_scb;
}
- spin_lock_irqsave(&chip->reg_lock, flags);
-
/* update parent first entry in DSP RAM */
cs46xx_dsp_spos_update_scb(chip,scb->parent_scb_ptr);
@@ -162,7 +159,6 @@ static void _dsp_unlink_scb (struct snd_cs46xx *chip, struct dsp_scb_descriptor
cs46xx_dsp_spos_update_scb(chip,scb);
scb->parent_scb_ptr = NULL;
- spin_unlock_irqrestore(&chip->reg_lock, flags);
}
}
@@ -197,9 +193,9 @@ void cs46xx_dsp_remove_scb (struct snd_cs46xx *chip, struct dsp_scb_descriptor *
goto _end;
#endif
- spin_lock_irqsave(&scb->lock, flags);
+ spin_lock_irqsave(&chip->reg_lock, flags);
_dsp_unlink_scb (chip,scb);
- spin_unlock_irqrestore(&scb->lock, flags);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
cs46xx_dsp_proc_free_scb_desc(scb);
if (snd_BUG_ON(!scb->scb_symbol))
@@ -207,6 +203,10 @@ void cs46xx_dsp_remove_scb (struct snd_cs46xx *chip, struct dsp_scb_descriptor *
remove_symbol (chip,scb->scb_symbol);
ins->scbs[scb->index].deleted = 1;
+#ifdef CONFIG_PM
+ kfree(ins->scbs[scb->index].data);
+ ins->scbs[scb->index].data = NULL;
+#endif
if (scb->index < ins->scb_highest_frag_index)
ins->scb_highest_frag_index = scb->index;
@@ -1508,20 +1508,17 @@ int cs46xx_dsp_pcm_unlink (struct snd_cs46xx * chip,
chip->dsp_spos_instance->npcm_channels <= 0))
return -EIO;
- spin_lock(&pcm_channel->src_scb->lock);
-
+ spin_lock_irqsave(&chip->reg_lock, flags);
if (pcm_channel->unlinked) {
- spin_unlock(&pcm_channel->src_scb->lock);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
return -EIO;
}
- spin_lock_irqsave(&chip->reg_lock, flags);
pcm_channel->unlinked = 1;
- spin_unlock_irqrestore(&chip->reg_lock, flags);
_dsp_unlink_scb (chip,pcm_channel->pcm_reader_scb);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
- spin_unlock(&pcm_channel->src_scb->lock);
return 0;
}
@@ -1533,10 +1530,10 @@ int cs46xx_dsp_pcm_link (struct snd_cs46xx * chip,
struct dsp_scb_descriptor * src_scb = pcm_channel->src_scb;
unsigned long flags;
- spin_lock(&pcm_channel->src_scb->lock);
+ spin_lock_irqsave(&chip->reg_lock, flags);
if (pcm_channel->unlinked == 0) {
- spin_unlock(&pcm_channel->src_scb->lock);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
return -EIO;
}
@@ -1552,8 +1549,6 @@ int cs46xx_dsp_pcm_link (struct snd_cs46xx * chip,
snd_BUG_ON(pcm_channel->pcm_reader_scb->parent_scb_ptr);
pcm_channel->pcm_reader_scb->parent_scb_ptr = parent_scb;
- spin_lock_irqsave(&chip->reg_lock, flags);
-
/* update SCB entry in DSP RAM */
cs46xx_dsp_spos_update_scb(chip,pcm_channel->pcm_reader_scb);
@@ -1562,8 +1557,6 @@ int cs46xx_dsp_pcm_link (struct snd_cs46xx * chip,
pcm_channel->unlinked = 0;
spin_unlock_irqrestore(&chip->reg_lock, flags);
-
- spin_unlock(&pcm_channel->src_scb->lock);
return 0;
}
@@ -1596,13 +1589,17 @@ cs46xx_add_record_source (struct snd_cs46xx *chip, struct dsp_scb_descriptor * s
int cs46xx_src_unlink(struct snd_cs46xx *chip, struct dsp_scb_descriptor * src)
{
+ unsigned long flags;
+
if (snd_BUG_ON(!src->parent_scb_ptr))
return -EINVAL;
/* mute SCB */
cs46xx_dsp_scb_set_volume (chip,src,0,0);
+ spin_lock_irqsave(&chip->reg_lock, flags);
_dsp_unlink_scb (chip,src);
+ spin_unlock_irqrestore(&chip->reg_lock, flags);
return 0;
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index f98b47cd6cfb..26ceace88c96 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -824,6 +824,9 @@ int snd_hda_add_pincfg(struct hda_codec *codec, struct snd_array *list,
struct hda_pincfg *pin;
unsigned int oldcfg;
+ if (get_wcaps_type(get_wcaps(codec, nid)) != AC_WID_PIN)
+ return -EINVAL;
+
oldcfg = snd_hda_codec_get_pincfg(codec, nid);
pin = look_up_pincfg(codec, list, nid);
if (!pin) {
@@ -899,6 +902,25 @@ static void restore_pincfgs(struct hda_codec *codec)
}
}
+/**
+ * snd_hda_shutup_pins - Shut up all pins
+ * @codec: the HDA codec
+ *
+ * Clear all pin controls to shup up before suspend for avoiding click noise.
+ * The controls aren't cached so that they can be resumed properly.
+ */
+void snd_hda_shutup_pins(struct hda_codec *codec)
+{
+ int i;
+ for (i = 0; i < codec->init_pins.used; i++) {
+ struct hda_pincfg *pin = snd_array_elem(&codec->init_pins, i);
+ /* use read here for syncing after issuing each verb */
+ snd_hda_codec_read(codec, pin->nid, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
+ }
+}
+EXPORT_SYMBOL_HDA(snd_hda_shutup_pins);
+
static void init_hda_cache(struct hda_cache_rec *cache,
unsigned int record_size);
static void free_hda_cache(struct hda_cache_rec *cache);
@@ -931,6 +953,7 @@ static void snd_hda_codec_free(struct hda_codec *codec)
#endif
list_del(&codec->list);
snd_array_free(&codec->mixers);
+ snd_array_free(&codec->nids);
codec->bus->caddr_tbl[codec->addr] = NULL;
if (codec->patch_ops.free)
codec->patch_ops.free(codec);
@@ -985,7 +1008,8 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr
mutex_init(&codec->control_mutex);
init_hda_cache(&codec->amp_cache, sizeof(struct hda_amp_info));
init_hda_cache(&codec->cmd_cache, sizeof(struct hda_cache_head));
- snd_array_init(&codec->mixers, sizeof(struct hda_nid_item), 60);
+ snd_array_init(&codec->mixers, sizeof(struct hda_nid_item), 32);
+ snd_array_init(&codec->nids, sizeof(struct hda_nid_item), 32);
snd_array_init(&codec->init_pins, sizeof(struct hda_pincfg), 16);
snd_array_init(&codec->driver_pins, sizeof(struct hda_pincfg), 16);
if (codec->bus->modelname) {
@@ -1708,7 +1732,7 @@ struct snd_kcontrol *snd_hda_find_mixer_ctl(struct hda_codec *codec,
EXPORT_SYMBOL_HDA(snd_hda_find_mixer_ctl);
/**
- * snd_hda_ctl-add - Add a control element and assign to the codec
+ * snd_hda_ctl_add - Add a control element and assign to the codec
* @codec: HD-audio codec
* @nid: corresponding NID (optional)
* @kctl: the control element to assign
@@ -1723,19 +1747,25 @@ EXPORT_SYMBOL_HDA(snd_hda_find_mixer_ctl);
*
* snd_hda_ctl_add() checks the control subdev id field whether
* #HDA_SUBDEV_NID_FLAG bit is set. If set (and @nid is zero), the lower
- * bits value is taken as the NID to assign.
+ * bits value is taken as the NID to assign. The #HDA_NID_ITEM_AMP bit
+ * specifies if kctl->private_value is a HDA amplifier value.
*/
int snd_hda_ctl_add(struct hda_codec *codec, hda_nid_t nid,
struct snd_kcontrol *kctl)
{
int err;
+ unsigned short flags = 0;
struct hda_nid_item *item;
- if (kctl->id.subdevice & HDA_SUBDEV_NID_FLAG) {
+ if (kctl->id.subdevice & HDA_SUBDEV_AMP_FLAG) {
+ flags |= HDA_NID_ITEM_AMP;
if (nid == 0)
- nid = kctl->id.subdevice & 0xffff;
- kctl->id.subdevice = 0;
+ nid = get_amp_nid_(kctl->private_value);
}
+ if ((kctl->id.subdevice & HDA_SUBDEV_NID_FLAG) != 0 && nid == 0)
+ nid = kctl->id.subdevice & 0xffff;
+ if (kctl->id.subdevice & (HDA_SUBDEV_NID_FLAG|HDA_SUBDEV_AMP_FLAG))
+ kctl->id.subdevice = 0;
err = snd_ctl_add(codec->bus->card, kctl);
if (err < 0)
return err;
@@ -1744,11 +1774,41 @@ int snd_hda_ctl_add(struct hda_codec *codec, hda_nid_t nid,
return -ENOMEM;
item->kctl = kctl;
item->nid = nid;
+ item->flags = flags;
return 0;
}
EXPORT_SYMBOL_HDA(snd_hda_ctl_add);
/**
+ * snd_hda_add_nid - Assign a NID to a control element
+ * @codec: HD-audio codec
+ * @nid: corresponding NID (optional)
+ * @kctl: the control element to assign
+ * @index: index to kctl
+ *
+ * Add the given control element to an array inside the codec instance.
+ * This function is used when #snd_hda_ctl_add cannot be used for 1:1
+ * NID:KCTL mapping - for example "Capture Source" selector.
+ */
+int snd_hda_add_nid(struct hda_codec *codec, struct snd_kcontrol *kctl,
+ unsigned int index, hda_nid_t nid)
+{
+ struct hda_nid_item *item;
+
+ if (nid > 0) {
+ item = snd_array_new(&codec->nids);
+ if (!item)
+ return -ENOMEM;
+ item->kctl = kctl;
+ item->index = index;
+ item->nid = nid;
+ return 0;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_HDA(snd_hda_add_nid);
+
+/**
* snd_hda_ctls_clear - Clear all controls assigned to the given codec
* @codec: HD-audio codec
*/
@@ -1759,6 +1819,7 @@ void snd_hda_ctls_clear(struct hda_codec *codec)
for (i = 0; i < codec->mixers.used; i++)
snd_ctl_remove(codec->bus->card, items[i].kctl);
snd_array_free(&codec->mixers);
+ snd_array_free(&codec->nids);
}
/* pseudo device locking
@@ -3478,6 +3539,8 @@ int snd_hda_add_new_ctls(struct hda_codec *codec, struct snd_kcontrol_new *knew)
for (; knew->name; knew++) {
struct snd_kcontrol *kctl;
+ if (knew->iface == -1) /* skip this codec private value */
+ continue;
kctl = snd_ctl_new1(knew, codec);
if (!kctl)
return -ENOMEM;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 0a770a28e71f..0c8f05cc56be 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -789,6 +789,7 @@ struct hda_codec {
u32 *wcaps;
struct snd_array mixers; /* list of assigned mixer elements */
+ struct snd_array nids; /* list of mapped mixer elements */
struct hda_cache_rec amp_cache; /* cache for amp access */
struct hda_cache_rec cmd_cache; /* cache for other commands */
@@ -898,6 +899,7 @@ int snd_hda_codec_set_pincfg(struct hda_codec *codec, hda_nid_t nid,
unsigned int cfg);
int snd_hda_add_pincfg(struct hda_codec *codec, struct snd_array *list,
hda_nid_t nid, unsigned int cfg); /* for hwdep */
+void snd_hda_shutup_pins(struct hda_codec *codec);
/*
* Mixer
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 092c6a7c2ff3..5ea21285ee1f 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -861,7 +861,8 @@ static int build_input_controls(struct hda_codec *codec)
}
/* create input MUX if multiple sources are available */
- err = snd_hda_ctl_add(codec, 0, snd_ctl_new1(&cap_sel, codec));
+ err = snd_hda_ctl_add(codec, spec->adc_node->nid,
+ snd_ctl_new1(&cap_sel, codec));
if (err < 0)
return err;
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 40ccb419b6e9..b36919c0d363 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -293,8 +293,11 @@ static ssize_t type##_store(struct device *dev, \
{ \
struct snd_hwdep *hwdep = dev_get_drvdata(dev); \
struct hda_codec *codec = hwdep->private_data; \
- char *after; \
- codec->type = simple_strtoul(buf, &after, 0); \
+ unsigned long val; \
+ int err = strict_strtoul(buf, 0, &val); \
+ if (err < 0) \
+ return err; \
+ codec->type = val; \
return count; \
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index ec9c348336cc..1f516e668d88 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2704,32 +2704,10 @@ static struct pci_device_id azx_ids[] = {
/* ULI M5461 */
{ PCI_DEVICE(0x10b9, 0x5461), .driver_data = AZX_DRIVER_ULI },
/* NVIDIA MCP */
- { PCI_DEVICE(0x10de, 0x026c), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0371), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x03e4), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x03f0), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x044a), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x044b), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x055c), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x055d), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0590), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0774), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0775), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0776), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0777), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x07fc), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x07fd), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0ac0), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0ac1), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0ac2), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0ac3), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0be2), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0be3), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0be4), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0d94), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0d95), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0d96), .driver_data = AZX_DRIVER_NVIDIA },
- { PCI_DEVICE(0x10de, 0x0d97), .driver_data = AZX_DRIVER_NVIDIA },
+ { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
+ .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
+ .class_mask = 0xffffff,
+ .driver_data = AZX_DRIVER_NVIDIA },
/* Teradici */
{ PCI_DEVICE(0x6549, 0x1200), .driver_data = AZX_DRIVER_TERA },
/* Creative X-Fi (CA0110-IBG) */
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 5778ae882b83..7cee364976ff 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -31,6 +31,7 @@
* in snd_hda_ctl_add(), so that this value won't appear in the outside.
*/
#define HDA_SUBDEV_NID_FLAG (1U << 31)
+#define HDA_SUBDEV_AMP_FLAG (1U << 30)
/*
* for mixer controls
@@ -42,7 +43,7 @@
/* mono volume with index (index=0,1,...) (channel=1,2) */
#define HDA_CODEC_VOLUME_MONO_IDX(xname, xcidx, nid, channel, xindex, direction) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xcidx, \
- .subdevice = HDA_SUBDEV_NID_FLAG | (nid), \
+ .subdevice = HDA_SUBDEV_AMP_FLAG, \
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \
@@ -63,7 +64,7 @@
/* mono mute switch with index (index=0,1,...) (channel=1,2) */
#define HDA_CODEC_MUTE_MONO_IDX(xname, xcidx, nid, channel, xindex, direction) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xcidx, \
- .subdevice = HDA_SUBDEV_NID_FLAG | (nid), \
+ .subdevice = HDA_SUBDEV_AMP_FLAG, \
.info = snd_hda_mixer_amp_switch_info, \
.get = snd_hda_mixer_amp_switch_get, \
.put = snd_hda_mixer_amp_switch_put, \
@@ -81,7 +82,7 @@
/* special beep mono mute switch with index (index=0,1,...) (channel=1,2) */
#define HDA_CODEC_MUTE_BEEP_MONO_IDX(xname, xcidx, nid, channel, xindex, direction) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xcidx, \
- .subdevice = HDA_SUBDEV_NID_FLAG | (nid), \
+ .subdevice = HDA_SUBDEV_AMP_FLAG, \
.info = snd_hda_mixer_amp_switch_info, \
.get = snd_hda_mixer_amp_switch_get, \
.put = snd_hda_mixer_amp_switch_put_beep, \
@@ -464,13 +465,20 @@ u32 snd_hda_query_pin_caps(struct hda_codec *codec, hda_nid_t nid);
u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid);
int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid);
+/* flags for hda_nid_item */
+#define HDA_NID_ITEM_AMP (1<<0)
+
struct hda_nid_item {
struct snd_kcontrol *kctl;
+ unsigned int index;
hda_nid_t nid;
+ unsigned short flags;
};
int snd_hda_ctl_add(struct hda_codec *codec, hda_nid_t nid,
struct snd_kcontrol *kctl);
+int snd_hda_add_nid(struct hda_codec *codec, struct snd_kcontrol *kctl,
+ unsigned int index, hda_nid_t nid);
void snd_hda_ctls_clear(struct hda_codec *codec);
/*
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index c9afc04adac8..f97d35de66c4 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -61,18 +61,29 @@ static const char *get_wid_type_name(unsigned int wid_value)
return "UNKNOWN Widget";
}
-static void print_nid_mixers(struct snd_info_buffer *buffer,
- struct hda_codec *codec, hda_nid_t nid)
+static void print_nid_array(struct snd_info_buffer *buffer,
+ struct hda_codec *codec, hda_nid_t nid,
+ struct snd_array *array)
{
int i;
- struct hda_nid_item *items = codec->mixers.list;
+ struct hda_nid_item *items = array->list, *item;
struct snd_kcontrol *kctl;
- for (i = 0; i < codec->mixers.used; i++) {
- if (items[i].nid == nid) {
- kctl = items[i].kctl;
+ for (i = 0; i < array->used; i++) {
+ item = &items[i];
+ if (item->nid == nid) {
+ kctl = item->kctl;
snd_iprintf(buffer,
" Control: name=\"%s\", index=%i, device=%i\n",
- kctl->id.name, kctl->id.index, kctl->id.device);
+ kctl->id.name, kctl->id.index + item->index,
+ kctl->id.device);
+ if (item->flags & HDA_NID_ITEM_AMP)
+ snd_iprintf(buffer,
+ " ControlAmp: chs=%lu, dir=%s, "
+ "idx=%lu, ofs=%lu\n",
+ get_amp_channels(kctl),
+ get_amp_direction(kctl) ? "Out" : "In",
+ get_amp_index(kctl),
+ get_amp_offset(kctl));
}
}
}
@@ -528,7 +539,8 @@ static void print_gpio(struct snd_info_buffer *buffer,
(data & (1<<i)) ? 1 : 0,
(unsol & (1<<i)) ? 1 : 0);
/* FIXME: add GPO and GPI pin information */
- print_nid_mixers(buffer, codec, nid);
+ print_nid_array(buffer, codec, nid, &codec->mixers);
+ print_nid_array(buffer, codec, nid, &codec->nids);
}
static void print_codec_info(struct snd_info_entry *entry,
@@ -608,7 +620,8 @@ static void print_codec_info(struct snd_info_entry *entry,
snd_iprintf(buffer, " CP");
snd_iprintf(buffer, "\n");
- print_nid_mixers(buffer, codec, nid);
+ print_nid_array(buffer, codec, nid, &codec->mixers);
+ print_nid_array(buffer, codec, nid, &codec->nids);
print_nid_pcms(buffer, codec, nid);
/* volume knob is a special widget that always have connection
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 69a941c7b158..21011b5199de 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -174,6 +174,7 @@ static struct snd_kcontrol_new ad_beep_mixer[] = {
static int ad198x_build_controls(struct hda_codec *codec)
{
struct ad198x_spec *spec = codec->spec;
+ struct snd_kcontrol *kctl;
unsigned int i;
int err;
@@ -208,9 +209,7 @@ static int ad198x_build_controls(struct hda_codec *codec)
if (!kctl)
return -ENOMEM;
kctl->private_value = spec->beep_amp;
- err = snd_hda_ctl_add(codec,
- get_amp_nid_(spec->beep_amp),
- kctl);
+ err = snd_hda_ctl_add(codec, 0, kctl);
if (err < 0)
return err;
}
@@ -239,6 +238,27 @@ static int ad198x_build_controls(struct hda_codec *codec)
}
ad198x_free_kctls(codec); /* no longer needed */
+
+ /* assign Capture Source enums to NID */
+ kctl = snd_hda_find_mixer_ctl(codec, "Capture Source");
+ if (!kctl)
+ kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
+ for (i = 0; kctl && i < kctl->count; i++) {
+ err = snd_hda_add_nid(codec, kctl, i, spec->capsrc_nids[i]);
+ if (err < 0)
+ return err;
+ }
+
+ /* assign IEC958 enums to NID */
+ kctl = snd_hda_find_mixer_ctl(codec,
+ SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source");
+ if (kctl) {
+ err = snd_hda_add_nid(codec, kctl, 0,
+ spec->multiout.dig_out_nid);
+ if (err < 0)
+ return err;
+ }
+
return 0;
}
@@ -421,6 +441,11 @@ static int ad198x_build_pcms(struct hda_codec *codec)
return 0;
}
+static inline void ad198x_shutup(struct hda_codec *codec)
+{
+ snd_hda_shutup_pins(codec);
+}
+
static void ad198x_free_kctls(struct hda_codec *codec)
{
struct ad198x_spec *spec = codec->spec;
@@ -434,6 +459,46 @@ static void ad198x_free_kctls(struct hda_codec *codec)
snd_array_free(&spec->kctls);
}
+static void ad198x_power_eapd_write(struct hda_codec *codec, hda_nid_t front,
+ hda_nid_t hp)
+{
+ struct ad198x_spec *spec = codec->spec;
+ snd_hda_codec_write(codec, front, 0, AC_VERB_SET_EAPD_BTLENABLE,
+ !spec->inv_eapd ? 0x00 : 0x02);
+ snd_hda_codec_write(codec, hp, 0, AC_VERB_SET_EAPD_BTLENABLE,
+ !spec->inv_eapd ? 0x00 : 0x02);
+}
+
+static void ad198x_power_eapd(struct hda_codec *codec)
+{
+ /* We currently only handle front, HP */
+ switch (codec->vendor_id) {
+ case 0x11d41882:
+ case 0x11d4882a:
+ case 0x11d41884:
+ case 0x11d41984:
+ case 0x11d41883:
+ case 0x11d4184a:
+ case 0x11d4194a:
+ case 0x11d4194b:
+ ad198x_power_eapd_write(codec, 0x12, 0x11);
+ break;
+ case 0x11d41981:
+ case 0x11d41983:
+ ad198x_power_eapd_write(codec, 0x05, 0x06);
+ break;
+ case 0x11d41986:
+ ad198x_power_eapd_write(codec, 0x1b, 0x1a);
+ break;
+ case 0x11d41988:
+ case 0x11d4198b:
+ case 0x11d4989a:
+ case 0x11d4989b:
+ ad198x_power_eapd_write(codec, 0x29, 0x22);
+ break;
+ }
+}
+
static void ad198x_free(struct hda_codec *codec)
{
struct ad198x_spec *spec = codec->spec;
@@ -441,11 +506,29 @@ static void ad198x_free(struct hda_codec *codec)
if (!spec)
return;
+ ad198x_shutup(codec);
ad198x_free_kctls(codec);
kfree(spec);
snd_hda_detach_beep_device(codec);
}
+#ifdef SND_HDA_NEEDS_RESUME
+static int ad198x_suspend(struct hda_codec *codec, pm_message_t state)
+{
+ ad198x_shutup(codec);
+ ad198x_power_eapd(codec);
+ return 0;
+}
+
+static int ad198x_resume(struct hda_codec *codec)
+{
+ ad198x_init(codec);
+ snd_hda_codec_resume_amp(codec);
+ snd_hda_codec_resume_cache(codec);
+ return 0;
+}
+#endif
+
static struct hda_codec_ops ad198x_patch_ops = {
.build_controls = ad198x_build_controls,
.build_pcms = ad198x_build_pcms,
@@ -454,6 +537,11 @@ static struct hda_codec_ops ad198x_patch_ops = {
#ifdef CONFIG_SND_HDA_POWER_SAVE
.check_power_status = ad198x_check_power_status,
#endif
+#ifdef SND_HDA_NEEDS_RESUME
+ .suspend = ad198x_suspend,
+ .resume = ad198x_resume,
+#endif
+ .reboot_notify = ad198x_shutup,
};
@@ -701,6 +789,7 @@ static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "External Amplifier",
+ .subdevice = HDA_SUBDEV_NID_FLAG | 0x1b,
.info = ad198x_eapd_info,
.get = ad198x_eapd_get,
.put = ad198x_eapd_put,
@@ -808,6 +897,7 @@ static struct snd_kcontrol_new ad1986a_automute_master_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_switch_info,
.get = snd_hda_mixer_amp_switch_get,
.put = ad1986a_hp_master_sw_put,
@@ -1612,6 +1702,7 @@ static struct snd_kcontrol_new ad1981_hp_mixers[] = {
HDA_BIND_VOL("Master Playback Volume", &ad1981_hp_bind_master_vol),
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .subdevice = HDA_SUBDEV_NID_FLAG | 0x05,
.name = "Master Playback Switch",
.info = ad198x_eapd_info,
.get = ad198x_eapd_get,
@@ -2136,6 +2227,7 @@ static struct snd_kcontrol_new ad1988_laptop_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "External Amplifier",
+ .subdevice = HDA_SUBDEV_NID_FLAG | 0x12,
.info = ad198x_eapd_info,
.get = ad198x_eapd_get,
.put = ad198x_eapd_put,
@@ -2257,6 +2349,7 @@ static struct snd_kcontrol_new ad1988_spdif_out_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "IEC958 Playback Source",
+ .subdevice = HDA_SUBDEV_NID_FLAG | 0x1b,
.info = ad1988_spdif_playback_source_info,
.get = ad1988_spdif_playback_source_get,
.put = ad1988_spdif_playback_source_put,
@@ -2589,7 +2682,7 @@ static int add_control(struct ad198x_spec *spec, int type, const char *name,
if (! knew->name)
return -ENOMEM;
if (get_amp_nid_(val))
- knew->subdevice = HDA_SUBDEV_NID_FLAG | get_amp_nid_(val);
+ knew->subdevice = HDA_SUBDEV_AMP_FLAG;
knew->private_value = val;
return 0;
}
@@ -3747,6 +3840,7 @@ static struct snd_kcontrol_new ad1884a_laptop_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_switch_info,
.get = snd_hda_mixer_amp_switch_get,
.put = ad1884a_mobile_master_sw_put,
@@ -3775,6 +3869,7 @@ static struct snd_kcontrol_new ad1884a_mobile_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_switch_info,
.get = snd_hda_mixer_amp_switch_get,
.put = ad1884a_mobile_master_sw_put,
@@ -4116,6 +4211,7 @@ static struct snd_kcontrol_new ad1984a_touchsmart_mixers[] = {
/* HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),*/
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.name = "Master Playback Switch",
.info = snd_hda_mixer_amp_switch_info,
.get = snd_hda_mixer_amp_switch_get,
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index fe0423c39598..7de782a5b8f4 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -501,7 +501,8 @@ static int add_mute(struct hda_codec *codec, const char *name, int index,
knew.private_value = pval;
snprintf(tmp, sizeof(tmp), "%s %s Switch", name, dir_sfx[dir]);
*kctlp = snd_ctl_new1(&knew, codec);
- return snd_hda_ctl_add(codec, get_amp_nid_(pval), *kctlp);
+ (*kctlp)->id.subdevice = HDA_SUBDEV_AMP_FLAG;
+ return snd_hda_ctl_add(codec, 0, *kctlp);
}
static int add_volume(struct hda_codec *codec, const char *name,
@@ -514,7 +515,8 @@ static int add_volume(struct hda_codec *codec, const char *name,
knew.private_value = pval;
snprintf(tmp, sizeof(tmp), "%s %s Volume", name, dir_sfx[dir]);
*kctlp = snd_ctl_new1(&knew, codec);
- return snd_hda_ctl_add(codec, get_amp_nid_(pval), *kctlp);
+ (*kctlp)->id.subdevice = HDA_SUBDEV_AMP_FLAG;
+ return snd_hda_ctl_add(codec, 0, *kctlp);
}
static void fix_volume_caps(struct hda_codec *codec, hda_nid_t dac)
@@ -751,6 +753,7 @@ static int build_input(struct hda_codec *codec)
spec->capture_bind[1] = make_bind_capture(codec, &snd_hda_bind_vol);
for (i = 0; i < 2; i++) {
struct snd_kcontrol *kctl;
+ int n;
if (!spec->capture_bind[i])
return -ENOMEM;
kctl = snd_ctl_new1(&cs_capture_ctls[i], codec);
@@ -760,6 +763,13 @@ static int build_input(struct hda_codec *codec)
err = snd_hda_ctl_add(codec, 0, kctl);
if (err < 0)
return err;
+ for (n = 0; n < AUTO_PIN_LAST; n++) {
+ if (!spec->adc_nid[n])
+ continue;
+ err = snd_hda_add_nid(codec, kctl, 0, spec->adc_nid[i]);
+ if (err < 0)
+ return err;
+ }
}
if (spec->num_inputs > 1 && !spec->mic_detect) {
diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c
index a45c1169762b..ff60908f4554 100644
--- a/sound/pci/hda/patch_cmedia.c
+++ b/sound/pci/hda/patch_cmedia.c
@@ -315,7 +315,8 @@ static struct hda_verb cmi9880_allout_init[] = {
static int cmi9880_build_controls(struct hda_codec *codec)
{
struct cmi_spec *spec = codec->spec;
- int err;
+ struct snd_kcontrol *kctl;
+ int i, err;
err = snd_hda_add_new_ctls(codec, cmi9880_basic_mixer);
if (err < 0)
@@ -340,6 +341,14 @@ static int cmi9880_build_controls(struct hda_codec *codec)
if (err < 0)
return err;
}
+
+ /* assign Capture Source enums to NID */
+ kctl = snd_hda_find_mixer_ctl(codec, "Capture Source");
+ for (i = 0; kctl && i < kctl->count; i++) {
+ err = snd_hda_add_nid(codec, kctl, i, spec->adc_nids[i]);
+ if (err < 0)
+ return err;
+ }
return 0;
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index c578c28f368e..685015a53292 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -111,8 +111,22 @@ struct conexant_spec {
unsigned int dell_automute;
unsigned int port_d_mode;
- unsigned char ext_mic_bias;
unsigned int dell_vostro;
+
+ unsigned int ext_mic_present;
+ unsigned int recording;
+ void (*capture_prepare)(struct hda_codec *codec);
+ void (*capture_cleanup)(struct hda_codec *codec);
+
+ /* OLPC XO-1.5 supports DC input mode (e.g. for use with analog sensors)
+ * through the microphone jack.
+ * When the user enables this through a mixer switch, both internal and
+ * external microphones are disabled. Gain is fixed at 0dB. In this mode,
+ * we also allow the bias to be configured through a separate mixer
+ * control. */
+ unsigned int dc_enable;
+ unsigned int dc_input_bias; /* offset into cxt5066_olpc_dc_bias */
+ unsigned int mic_boost; /* offset into cxt5066_analog_mic_boost */
};
static int conexant_playback_pcm_open(struct hda_pcm_stream *hinfo,
@@ -185,6 +199,8 @@ static int conexant_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
struct snd_pcm_substream *substream)
{
struct conexant_spec *spec = codec->spec;
+ if (spec->capture_prepare)
+ spec->capture_prepare(codec);
snd_hda_codec_setup_stream(codec, spec->adc_nids[substream->number],
stream_tag, 0, format);
return 0;
@@ -196,6 +212,8 @@ static int conexant_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
{
struct conexant_spec *spec = codec->spec;
snd_hda_codec_cleanup_stream(codec, spec->adc_nids[substream->number]);
+ if (spec->capture_cleanup)
+ spec->capture_cleanup(codec);
return 0;
}
@@ -1723,6 +1741,22 @@ static struct snd_kcontrol_new cxt5051_hp_dv6736_mixers[] = {
{}
};
+static struct snd_kcontrol_new cxt5051_f700_mixers[] = {
+ HDA_CODEC_VOLUME("Mic Volume", 0x14, 0x01, HDA_INPUT),
+ HDA_CODEC_MUTE("Mic Switch", 0x14, 0x01, HDA_INPUT),
+ HDA_CODEC_VOLUME("Master Playback Volume", 0x10, 0x00, HDA_OUTPUT),
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Master Playback Switch",
+ .info = cxt_eapd_info,
+ .get = cxt_eapd_get,
+ .put = cxt5051_hp_master_sw_put,
+ .private_value = 0x1a,
+ },
+
+ {}
+};
+
static struct hda_verb cxt5051_init_verbs[] = {
/* Line in, Mic */
{0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0) | 0x03},
@@ -1813,6 +1847,32 @@ static struct hda_verb cxt5051_lenovo_x200_init_verbs[] = {
{ } /* end */
};
+static struct hda_verb cxt5051_f700_init_verbs[] = {
+ /* Line in, Mic */
+ {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x03},
+ {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+ {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0},
+ {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0},
+ /* SPK */
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+ {0x1a, AC_VERB_SET_CONNECT_SEL, 0x00},
+ /* HP, Amp */
+ {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+ {0x16, AC_VERB_SET_CONNECT_SEL, 0x00},
+ /* DAC1 */
+ {0x10, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+ /* Record selector: Int mic */
+ {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1) | 0x44},
+ {0x14, AC_VERB_SET_CONNECT_SEL, 0x1},
+ /* SPDIF route: PCM */
+ {0x1c, AC_VERB_SET_CONNECT_SEL, 0x0},
+ /* EAPD */
+ {0x1a, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
+ {0x16, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN|CONEXANT_HP_EVENT},
+ {0x17, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN|CXT5051_PORTB_EVENT},
+ { } /* end */
+};
+
/* initialize jack-sensing, too */
static int cxt5051_init(struct hda_codec *codec)
{
@@ -1832,6 +1892,7 @@ enum {
CXT5051_HP, /* no docking */
CXT5051_HP_DV6736, /* HP without mic switch */
CXT5051_LENOVO_X200, /* Lenovo X200 laptop */
+ CXT5051_F700, /* HP Compaq Presario F700 */
CXT5051_MODELS
};
@@ -1840,6 +1901,7 @@ static const char *cxt5051_models[CXT5051_MODELS] = {
[CXT5051_HP] = "hp",
[CXT5051_HP_DV6736] = "hp-dv6736",
[CXT5051_LENOVO_X200] = "lenovo-x200",
+ [CXT5051_F700] = "hp 700"
};
static struct snd_pci_quirk cxt5051_cfg_tbl[] = {
@@ -1849,6 +1911,7 @@ static struct snd_pci_quirk cxt5051_cfg_tbl[] = {
CXT5051_LAPTOP),
SND_PCI_QUIRK(0x14f1, 0x5051, "HP Spartan 1.1", CXT5051_HP),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT5051_LENOVO_X200),
+ SND_PCI_QUIRK(0x103c, 0x30ea, "Compaq Presario F700", CXT5051_F700),
{}
};
@@ -1899,6 +1962,11 @@ static int patch_cxt5051(struct hda_codec *codec)
case CXT5051_LENOVO_X200:
spec->init_verbs[0] = cxt5051_lenovo_x200_init_verbs;
break;
+ case CXT5051_F700:
+ spec->init_verbs[0] = cxt5051_f700_init_verbs;
+ spec->mixers[0] = cxt5051_f700_mixers;
+ spec->no_auto_mic = 1;
+ break;
}
return 0;
@@ -1966,53 +2034,97 @@ static int cxt5066_hp_master_sw_put(struct snd_kcontrol *kcontrol,
return 1;
}
-/* toggle input of built-in and mic jack appropriately */
-static void cxt5066_automic(struct hda_codec *codec)
+static const struct hda_input_mux cxt5066_olpc_dc_bias = {
+ .num_items = 3,
+ .items = {
+ { "Off", PIN_IN },
+ { "50%", PIN_VREF50 },
+ { "80%", PIN_VREF80 },
+ },
+};
+
+static int cxt5066_set_olpc_dc_bias(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- struct hda_verb ext_mic_present[] = {
- /* enable external mic, port B */
- {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, spec->ext_mic_bias},
+ /* Even though port F is the DC input, the bias is controlled on port B.
+ * we also leave that port as an active input (but unselected) in DC mode
+ * just in case that is necessary to make the bias setting take effect. */
+ return snd_hda_codec_write_cache(codec, 0x1a, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL,
+ cxt5066_olpc_dc_bias.items[spec->dc_input_bias].index);
+}
- /* switch to external mic input */
- {0x17, AC_VERB_SET_CONNECT_SEL, 0},
+/* OLPC defers mic widget control until when capture is started because the
+ * microphone LED comes on as soon as these settings are put in place. if we
+ * did this before recording, it would give the false indication that recording
+ * is happening when it is not. */
+static void cxt5066_olpc_select_mic(struct hda_codec *codec)
+{
+ struct conexant_spec *spec = codec->spec;
+ if (!spec->recording)
+ return;
- /* disable internal mic, port C */
- {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
- {}
- };
- static struct hda_verb ext_mic_absent[] = {
- /* enable internal mic, port C */
- {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+ if (spec->dc_enable) {
+ /* in DC mode we ignore presence detection and just use the jack
+ * through our special DC port */
+ const struct hda_verb enable_dc_mode[] = {
+ /* disble internal mic, port C */
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+
+ /* enable DC capture, port F */
+ {0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
+ {},
+ };
+
+ snd_hda_sequence_write(codec, enable_dc_mode);
+ /* port B input disabled (and bias set) through the following call */
+ cxt5066_set_olpc_dc_bias(codec);
+ return;
+ }
- /* switch to internal mic input */
- {0x17, AC_VERB_SET_CONNECT_SEL, 1},
+ /* disable DC (port F) */
+ snd_hda_codec_write(codec, 0x1e, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
- /* disable external mic, port B */
- {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
- {}
- };
+ /* external mic, port B */
+ snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ spec->ext_mic_present ? CXT5066_OLPC_EXT_MIC_BIAS : 0);
+
+ /* internal mic, port C */
+ snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ spec->ext_mic_present ? 0 : PIN_VREF80);
+}
+
+/* toggle input of built-in and mic jack appropriately */
+static void cxt5066_olpc_automic(struct hda_codec *codec)
+{
+ struct conexant_spec *spec = codec->spec;
unsigned int present;
- present = snd_hda_jack_detect(codec, 0x1a);
- if (present) {
+ if (spec->dc_enable) /* don't do presence detection in DC mode */
+ return;
+
+ present = snd_hda_codec_read(codec, 0x1a, 0,
+ AC_VERB_GET_PIN_SENSE, 0) & 0x80000000;
+ if (present)
snd_printdd("CXT5066: external microphone detected\n");
- snd_hda_sequence_write(codec, ext_mic_present);
- } else {
+ else
snd_printdd("CXT5066: external microphone absent\n");
- snd_hda_sequence_write(codec, ext_mic_absent);
- }
+
+ snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
+ present ? 0 : 1);
+ spec->ext_mic_present = !!present;
+
+ cxt5066_olpc_select_mic(codec);
}
/* toggle input of built-in digital mic and mic jack appropriately */
static void cxt5066_vostro_automic(struct hda_codec *codec)
{
- struct conexant_spec *spec = codec->spec;
unsigned int present;
struct hda_verb ext_mic_present[] = {
/* enable external mic, port B */
- {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, spec->ext_mic_bias},
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
/* switch to external mic input */
{0x17, AC_VERB_SET_CONNECT_SEL, 0},
@@ -2063,15 +2175,18 @@ static void cxt5066_hp_automute(struct hda_codec *codec)
}
/* unsolicited event for jack sensing */
-static void cxt5066_unsol_event(struct hda_codec *codec, unsigned int res)
+static void cxt5066_olpc_unsol_event(struct hda_codec *codec, unsigned int res)
{
+ struct conexant_spec *spec = codec->spec;
snd_printdd("CXT5066: unsol event %x (%x)\n", res, res >> 26);
switch (res >> 26) {
case CONEXANT_HP_EVENT:
cxt5066_hp_automute(codec);
break;
case CONEXANT_MIC_EVENT:
- cxt5066_automic(codec);
+ /* ignore mic events in DC mode; we're always using the jack */
+ if (!spec->dc_enable)
+ cxt5066_olpc_automic(codec);
break;
}
}
@@ -2101,6 +2216,15 @@ static const struct hda_input_mux cxt5066_analog_mic_boost = {
},
};
+static int cxt5066_set_mic_boost(struct hda_codec *codec)
+{
+ struct conexant_spec *spec = codec->spec;
+ return snd_hda_codec_write_cache(codec, 0x17, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE,
+ AC_AMP_SET_RIGHT | AC_AMP_SET_LEFT | AC_AMP_SET_OUTPUT |
+ cxt5066_analog_mic_boost.items[spec->mic_boost].index);
+}
+
static int cxt5066_mic_boost_mux_enum_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -2111,15 +2235,8 @@ static int cxt5066_mic_boost_mux_enum_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- int val;
- hda_nid_t nid = kcontrol->private_value & 0xff;
- int inout = (kcontrol->private_value & 0x100) ?
- AC_AMP_GET_INPUT : AC_AMP_GET_OUTPUT;
-
- val = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_AMP_GAIN_MUTE, inout);
-
- ucontrol->value.enumerated.item[0] = val & AC_AMP_GAIN;
+ struct conexant_spec *spec = codec->spec;
+ ucontrol->value.enumerated.item[0] = spec->mic_boost;
return 0;
}
@@ -2127,26 +2244,132 @@ static int cxt5066_mic_boost_mux_enum_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct conexant_spec *spec = codec->spec;
const struct hda_input_mux *imux = &cxt5066_analog_mic_boost;
unsigned int idx;
- hda_nid_t nid = kcontrol->private_value & 0xff;
- int inout = (kcontrol->private_value & 0x100) ?
- AC_AMP_SET_INPUT : AC_AMP_SET_OUTPUT;
+ idx = ucontrol->value.enumerated.item[0];
+ if (idx >= imux->num_items)
+ idx = imux->num_items - 1;
+
+ spec->mic_boost = idx;
+ if (!spec->dc_enable)
+ cxt5066_set_mic_boost(codec);
+ return 1;
+}
+
+static void cxt5066_enable_dc(struct hda_codec *codec)
+{
+ const struct hda_verb enable_dc_mode[] = {
+ /* disable gain */
+ {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+
+ /* switch to DC input */
+ {0x17, AC_VERB_SET_CONNECT_SEL, 3},
+ {}
+ };
+
+ /* configure as input source */
+ snd_hda_sequence_write(codec, enable_dc_mode);
+ cxt5066_olpc_select_mic(codec); /* also sets configured bias */
+}
+
+static void cxt5066_disable_dc(struct hda_codec *codec)
+{
+ /* reconfigure input source */
+ cxt5066_set_mic_boost(codec);
+ /* automic also selects the right mic if we're recording */
+ cxt5066_olpc_automic(codec);
+}
+
+static int cxt5066_olpc_dc_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct conexant_spec *spec = codec->spec;
+ ucontrol->value.integer.value[0] = spec->dc_enable;
+ return 0;
+}
+
+static int cxt5066_olpc_dc_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct conexant_spec *spec = codec->spec;
+ int dc_enable = !!ucontrol->value.integer.value[0];
- if (!imux->num_items)
+ if (dc_enable == spec->dc_enable)
return 0;
+
+ spec->dc_enable = dc_enable;
+ if (dc_enable)
+ cxt5066_enable_dc(codec);
+ else
+ cxt5066_disable_dc(codec);
+
+ return 1;
+}
+
+static int cxt5066_olpc_dc_bias_enum_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ return snd_hda_input_mux_info(&cxt5066_olpc_dc_bias, uinfo);
+}
+
+static int cxt5066_olpc_dc_bias_enum_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct conexant_spec *spec = codec->spec;
+ ucontrol->value.enumerated.item[0] = spec->dc_input_bias;
+ return 0;
+}
+
+static int cxt5066_olpc_dc_bias_enum_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct conexant_spec *spec = codec->spec;
+ const struct hda_input_mux *imux = &cxt5066_analog_mic_boost;
+ unsigned int idx;
+
idx = ucontrol->value.enumerated.item[0];
if (idx >= imux->num_items)
idx = imux->num_items - 1;
- snd_hda_codec_write_cache(codec, nid, 0,
- AC_VERB_SET_AMP_GAIN_MUTE,
- AC_AMP_SET_RIGHT | AC_AMP_SET_LEFT | inout |
- imux->items[idx].index);
-
+ spec->dc_input_bias = idx;
+ if (spec->dc_enable)
+ cxt5066_set_olpc_dc_bias(codec);
return 1;
}
+static void cxt5066_olpc_capture_prepare(struct hda_codec *codec)
+{
+ struct conexant_spec *spec = codec->spec;
+ /* mark as recording and configure the microphone widget so that the
+ * recording LED comes on. */
+ spec->recording = 1;
+ cxt5066_olpc_select_mic(codec);
+}
+
+static void cxt5066_olpc_capture_cleanup(struct hda_codec *codec)
+{
+ struct conexant_spec *spec = codec->spec;
+ const struct hda_verb disable_mics[] = {
+ /* disable external mic, port B */
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+
+ /* disble internal mic, port C */
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+
+ /* disable DC capture, port F */
+ {0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
+ {},
+ };
+
+ snd_hda_sequence_write(codec, disable_mics);
+ spec->recording = 0;
+}
+
static struct hda_input_mux cxt5066_capture_source = {
.num_items = 4,
.items = {
@@ -2187,6 +2410,7 @@ static struct snd_kcontrol_new cxt5066_mixer_master_olpc[] = {
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK,
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_volume_info,
.get = snd_hda_mixer_amp_volume_get,
.put = snd_hda_mixer_amp_volume_put,
@@ -2198,6 +2422,24 @@ static struct snd_kcontrol_new cxt5066_mixer_master_olpc[] = {
{}
};
+static struct snd_kcontrol_new cxt5066_mixer_olpc_dc[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "DC Mode Enable Switch",
+ .info = snd_ctl_boolean_mono_info,
+ .get = cxt5066_olpc_dc_get,
+ .put = cxt5066_olpc_dc_put,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "DC Input Bias Enum",
+ .info = cxt5066_olpc_dc_bias_enum_info,
+ .get = cxt5066_olpc_dc_bias_enum_get,
+ .put = cxt5066_olpc_dc_bias_enum_put,
+ },
+ {}
+};
+
static struct snd_kcontrol_new cxt5066_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
@@ -2210,11 +2452,10 @@ static struct snd_kcontrol_new cxt5066_mixers[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Ext Mic Boost Capture Enum",
+ .name = "Analog Mic Boost Capture Enum",
.info = cxt5066_mic_boost_mux_enum_info,
.get = cxt5066_mic_boost_mux_enum_get,
.put = cxt5066_mic_boost_mux_enum_put,
- .private_value = 0x17,
},
HDA_BIND_VOL("Capture Volume", &cxt5066_bind_capture_vol_others),
@@ -2296,10 +2537,10 @@ static struct hda_verb cxt5066_init_verbs_olpc[] = {
{0x19, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC1 */
/* Port B: external microphone */
- {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, CXT5066_OLPC_EXT_MIC_BIAS},
+ {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port C: internal microphone */
- {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+ {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port D: unused */
{0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
@@ -2308,7 +2549,7 @@ static struct hda_verb cxt5066_init_verbs_olpc[] = {
{0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
{0x1d, AC_VERB_SET_EAPD_BTLENABLE, 0x2}, /* default on */
- /* Port F: unused */
+ /* Port F: external DC input through microphone port */
{0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, 0},
/* Port G: internal speakers */
@@ -2428,8 +2669,22 @@ static int cxt5066_init(struct hda_codec *codec)
cxt5066_hp_automute(codec);
if (spec->dell_vostro)
cxt5066_vostro_automic(codec);
- else
- cxt5066_automic(codec);
+ }
+ cxt5066_set_mic_boost(codec);
+ return 0;
+}
+
+static int cxt5066_olpc_init(struct hda_codec *codec)
+{
+ struct conexant_spec *spec = codec->spec;
+ snd_printdd("CXT5066: init\n");
+ conexant_init(codec);
+ cxt5066_hp_automute(codec);
+ if (!spec->dc_enable) {
+ cxt5066_set_mic_boost(codec);
+ cxt5066_olpc_automic(codec);
+ } else {
+ cxt5066_enable_dc(codec);
}
return 0;
}
@@ -2470,7 +2725,7 @@ static int patch_cxt5066(struct hda_codec *codec)
codec->spec = spec;
codec->patch_ops = conexant_patch_ops;
- codec->patch_ops.init = cxt5066_init;
+ codec->patch_ops.init = conexant_init;
spec->dell_automute = 0;
spec->multiout.max_channels = 2;
@@ -2483,7 +2738,6 @@ static int patch_cxt5066(struct hda_codec *codec)
spec->input_mux = &cxt5066_capture_source;
spec->port_d_mode = PIN_HP;
- spec->ext_mic_bias = PIN_VREF80;
spec->num_init_verbs = 1;
spec->init_verbs[0] = cxt5066_init_verbs;
@@ -2510,20 +2764,28 @@ static int patch_cxt5066(struct hda_codec *codec)
spec->dell_automute = 1;
break;
case CXT5066_OLPC_XO_1_5:
- codec->patch_ops.unsol_event = cxt5066_unsol_event;
+ codec->patch_ops.init = cxt5066_olpc_init;
+ codec->patch_ops.unsol_event = cxt5066_olpc_unsol_event;
spec->init_verbs[0] = cxt5066_init_verbs_olpc;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
+ spec->mixers[spec->num_mixers++] = cxt5066_mixer_olpc_dc;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
spec->port_d_mode = 0;
- spec->ext_mic_bias = CXT5066_OLPC_EXT_MIC_BIAS;
+ spec->mic_boost = 3; /* default 30dB gain */
/* no S/PDIF out */
spec->multiout.dig_out_nid = 0;
/* input source automatically selected */
spec->input_mux = NULL;
+
+ /* our capture hooks which allow us to turn on the microphone LED
+ * at the right time */
+ spec->capture_prepare = cxt5066_olpc_capture_prepare;
+ spec->capture_cleanup = cxt5066_olpc_capture_cleanup;
break;
case CXT5066_DELL_VOSTO:
+ codec->patch_ops.init = cxt5066_init;
codec->patch_ops.unsol_event = cxt5066_vostro_event;
spec->init_verbs[0] = cxt5066_init_verbs_vostro;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
@@ -2531,6 +2793,7 @@ static int patch_cxt5066(struct hda_codec *codec)
spec->mixers[spec->num_mixers++] = cxt5066_vostro_mixers;
spec->port_d_mode = 0;
spec->dell_vostro = 1;
+ spec->mic_boost = 3; /* default 30dB gain */
snd_hda_attach_beep_device(codec, 0x13);
/* no S/PDIF out */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c7465053d6bb..f858dcd8e7de 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -338,7 +338,7 @@ struct alc_spec {
void (*init_hook)(struct hda_codec *codec);
void (*unsol_event)(struct hda_codec *codec, unsigned int res);
#ifdef CONFIG_SND_HDA_POWER_SAVE
- void (*power_hook)(struct hda_codec *codec, int power);
+ void (*power_hook)(struct hda_codec *codec);
#endif
/* for pin sensing */
@@ -391,7 +391,7 @@ struct alc_config_preset {
void (*init_hook)(struct hda_codec *);
#ifdef CONFIG_SND_HDA_POWER_SAVE
struct hda_amp_list *loopbacks;
- void (*power_hook)(struct hda_codec *codec, int power);
+ void (*power_hook)(struct hda_codec *codec);
#endif
};
@@ -633,6 +633,7 @@ static int alc_pin_mode_put(struct snd_kcontrol *kcontrol,
#define ALC_PIN_MODE(xname, nid, dir) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = 0, \
+ .subdevice = HDA_SUBDEV_NID_FLAG | nid, \
.info = alc_pin_mode_info, \
.get = alc_pin_mode_get, \
.put = alc_pin_mode_put, \
@@ -684,6 +685,7 @@ static int alc_gpio_data_put(struct snd_kcontrol *kcontrol,
}
#define ALC_GPIO_DATA_SWITCH(xname, nid, mask) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = 0, \
+ .subdevice = HDA_SUBDEV_NID_FLAG | nid, \
.info = alc_gpio_data_info, \
.get = alc_gpio_data_get, \
.put = alc_gpio_data_put, \
@@ -738,6 +740,7 @@ static int alc_spdif_ctrl_put(struct snd_kcontrol *kcontrol,
}
#define ALC_SPDIF_CTRL_SWITCH(xname, nid, mask) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = 0, \
+ .subdevice = HDA_SUBDEV_NID_FLAG | nid, \
.info = alc_spdif_ctrl_info, \
.get = alc_spdif_ctrl_get, \
.put = alc_spdif_ctrl_put, \
@@ -791,6 +794,7 @@ static int alc_eapd_ctrl_put(struct snd_kcontrol *kcontrol,
#define ALC_EAPD_CTRL_SWITCH(xname, nid, mask) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = 0, \
+ .subdevice = HDA_SUBDEV_NID_FLAG | nid, \
.info = alc_eapd_ctrl_info, \
.get = alc_eapd_ctrl_get, \
.put = alc_eapd_ctrl_put, \
@@ -1831,16 +1835,6 @@ static void alc889_acer_aspire_8930g_setup(struct hda_codec *codec)
spec->autocfg.speaker_pins[2] = 0x1b;
}
-#ifdef CONFIG_SND_HDA_POWER_SAVE
-static void alc889_power_eapd(struct hda_codec *codec, int power)
-{
- snd_hda_codec_write(codec, 0x14, 0,
- AC_VERB_SET_EAPD_BTLENABLE, power ? 2 : 0);
- snd_hda_codec_write(codec, 0x15, 0,
- AC_VERB_SET_EAPD_BTLENABLE, power ? 2 : 0);
-}
-#endif
-
/*
* ALC880 3-stack model
*
@@ -2443,6 +2437,15 @@ static const char *alc_slave_sws[] = {
* build control elements
*/
+#define NID_MAPPING (-1)
+
+#define SUBDEV_SPEAKER_ (0 << 6)
+#define SUBDEV_HP_ (1 << 6)
+#define SUBDEV_LINE_ (2 << 6)
+#define SUBDEV_SPEAKER(x) (SUBDEV_SPEAKER_ | ((x) & 0x3f))
+#define SUBDEV_HP(x) (SUBDEV_HP_ | ((x) & 0x3f))
+#define SUBDEV_LINE(x) (SUBDEV_LINE_ | ((x) & 0x3f))
+
static void alc_free_kctls(struct hda_codec *codec);
#ifdef CONFIG_SND_HDA_INPUT_BEEP
@@ -2457,8 +2460,11 @@ static struct snd_kcontrol_new alc_beep_mixer[] = {
static int alc_build_controls(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- int err;
- int i;
+ struct snd_kcontrol *kctl;
+ struct snd_kcontrol_new *knew;
+ int i, j, err;
+ unsigned int u;
+ hda_nid_t nid;
for (i = 0; i < spec->num_mixers; i++) {
err = snd_hda_add_new_ctls(codec, spec->mixers[i]);
@@ -2499,8 +2505,7 @@ static int alc_build_controls(struct hda_codec *codec)
if (!kctl)
return -ENOMEM;
kctl->private_value = spec->beep_amp;
- err = snd_hda_ctl_add(codec,
- get_amp_nid_(spec->beep_amp), kctl);
+ err = snd_hda_ctl_add(codec, 0, kctl);
if (err < 0)
return err;
}
@@ -2527,6 +2532,75 @@ static int alc_build_controls(struct hda_codec *codec)
}
alc_free_kctls(codec); /* no longer needed */
+
+ /* assign Capture Source enums to NID */
+ kctl = snd_hda_find_mixer_ctl(codec, "Capture Source");
+ if (!kctl)
+ kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
+ for (i = 0; kctl && i < kctl->count; i++) {
+ hda_nid_t *nids = spec->capsrc_nids;
+ if (!nids)
+ nids = spec->adc_nids;
+ err = snd_hda_add_nid(codec, kctl, i, nids[i]);
+ if (err < 0)
+ return err;
+ }
+ if (spec->cap_mixer) {
+ const char *kname = kctl ? kctl->id.name : NULL;
+ for (knew = spec->cap_mixer; knew->name; knew++) {
+ if (kname && strcmp(knew->name, kname) == 0)
+ continue;
+ kctl = snd_hda_find_mixer_ctl(codec, knew->name);
+ for (i = 0; kctl && i < kctl->count; i++) {
+ err = snd_hda_add_nid(codec, kctl, i,
+ spec->adc_nids[i]);
+ if (err < 0)
+ return err;
+ }
+ }
+ }
+
+ /* other nid->control mapping */
+ for (i = 0; i < spec->num_mixers; i++) {
+ for (knew = spec->mixers[i]; knew->name; knew++) {
+ if (knew->iface != NID_MAPPING)
+ continue;
+ kctl = snd_hda_find_mixer_ctl(codec, knew->name);
+ if (kctl == NULL)
+ continue;
+ u = knew->subdevice;
+ for (j = 0; j < 4; j++, u >>= 8) {
+ nid = u & 0x3f;
+ if (nid == 0)
+ continue;
+ switch (u & 0xc0) {
+ case SUBDEV_SPEAKER_:
+ nid = spec->autocfg.speaker_pins[nid];
+ break;
+ case SUBDEV_LINE_:
+ nid = spec->autocfg.line_out_pins[nid];
+ break;
+ case SUBDEV_HP_:
+ nid = spec->autocfg.hp_pins[nid];
+ break;
+ default:
+ continue;
+ }
+ err = snd_hda_add_nid(codec, kctl, 0, nid);
+ if (err < 0)
+ return err;
+ }
+ u = knew->private_value;
+ for (j = 0; j < 4; j++, u >>= 8) {
+ nid = u & 0xff;
+ if (nid == 0)
+ continue;
+ err = snd_hda_add_nid(codec, kctl, 0, nid);
+ if (err < 0)
+ return err;
+ }
+ }
+ }
return 0;
}
@@ -3609,6 +3683,11 @@ static int alc_build_pcms(struct hda_codec *codec)
return 0;
}
+static inline void alc_shutup(struct hda_codec *codec)
+{
+ snd_hda_shutup_pins(codec);
+}
+
static void alc_free_kctls(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
@@ -3629,17 +3708,47 @@ static void alc_free(struct hda_codec *codec)
if (!spec)
return;
+ alc_shutup(codec);
alc_free_kctls(codec);
kfree(spec);
snd_hda_detach_beep_device(codec);
}
#ifdef CONFIG_SND_HDA_POWER_SAVE
+static void alc_power_eapd(struct hda_codec *codec)
+{
+ /* We currently only handle front, HP */
+ switch (codec->vendor_id) {
+ case 0x10ec0260:
+ snd_hda_codec_write(codec, 0x0f, 0,
+ AC_VERB_SET_EAPD_BTLENABLE, 0x00);
+ snd_hda_codec_write(codec, 0x10, 0,
+ AC_VERB_SET_EAPD_BTLENABLE, 0x00);
+ break;
+ case 0x10ec0262:
+ case 0x10ec0267:
+ case 0x10ec0268:
+ case 0x10ec0269:
+ case 0x10ec0272:
+ case 0x10ec0660:
+ case 0x10ec0662:
+ case 0x10ec0663:
+ case 0x10ec0862:
+ case 0x10ec0889:
+ snd_hda_codec_write(codec, 0x14, 0,
+ AC_VERB_SET_EAPD_BTLENABLE, 0x00);
+ snd_hda_codec_write(codec, 0x15, 0,
+ AC_VERB_SET_EAPD_BTLENABLE, 0x00);
+ break;
+ }
+}
+
static int alc_suspend(struct hda_codec *codec, pm_message_t state)
{
struct alc_spec *spec = codec->spec;
+ alc_shutup(codec);
if (spec && spec->power_hook)
- spec->power_hook(codec, 0);
+ spec->power_hook(codec);
return 0;
}
#endif
@@ -3647,16 +3756,9 @@ static int alc_suspend(struct hda_codec *codec, pm_message_t state)
#ifdef SND_HDA_NEEDS_RESUME
static int alc_resume(struct hda_codec *codec)
{
-#ifdef CONFIG_SND_HDA_POWER_SAVE
- struct alc_spec *spec = codec->spec;
-#endif
codec->patch_ops.init(codec);
snd_hda_codec_resume_amp(codec);
snd_hda_codec_resume_cache(codec);
-#ifdef CONFIG_SND_HDA_POWER_SAVE
- if (spec && spec->power_hook)
- spec->power_hook(codec, 1);
-#endif
return 0;
}
#endif
@@ -3676,6 +3778,7 @@ static struct hda_codec_ops alc_patch_ops = {
.suspend = alc_suspend,
.check_power_status = alc_check_power_status,
#endif
+ .reboot_notify = alc_shutup,
};
@@ -3832,6 +3935,7 @@ static int alc_test_pin_src_put(struct snd_kcontrol *kcontrol,
#define PIN_CTL_TEST(xname,nid) { \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
+ .subdevice = HDA_SUBDEV_NID_FLAG | nid, \
.info = alc_test_pin_ctl_info, \
.get = alc_test_pin_ctl_get, \
.put = alc_test_pin_ctl_put, \
@@ -3841,6 +3945,7 @@ static int alc_test_pin_src_put(struct snd_kcontrol *kcontrol,
#define PIN_SRC_TEST(xname,nid) { \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
+ .subdevice = HDA_SUBDEV_NID_FLAG | nid, \
.info = alc_test_pin_src_info, \
.get = alc_test_pin_src_get, \
.put = alc_test_pin_src_put, \
@@ -4380,7 +4485,7 @@ static int add_control(struct alc_spec *spec, int type, const char *name,
if (!knew->name)
return -ENOMEM;
if (get_amp_nid_(val))
- knew->subdevice = HDA_SUBDEV_NID_FLAG | get_amp_nid_(val);
+ knew->subdevice = HDA_SUBDEV_AMP_FLAG;
knew->private_value = val;
return 0;
}
@@ -5131,6 +5236,7 @@ static struct snd_kcontrol_new alc260_hp_output_mixer[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_NID_FLAG | 0x11,
.info = snd_ctl_boolean_mono_info,
.get = alc260_hp_master_sw_get,
.put = alc260_hp_master_sw_put,
@@ -5169,6 +5275,7 @@ static struct snd_kcontrol_new alc260_hp_3013_mixer[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_NID_FLAG | 0x11,
.info = snd_ctl_boolean_mono_info,
.get = alc260_hp_master_sw_get,
.put = alc260_hp_master_sw_put,
@@ -9452,7 +9559,7 @@ static struct alc_config_preset alc882_presets[] = {
.setup = alc889_acer_aspire_8930g_setup,
.init_hook = alc_automute_amp,
#ifdef CONFIG_SND_HDA_POWER_SAVE
- .power_hook = alc889_power_eapd,
+ .power_hook = alc_power_eapd,
#endif
},
[ALC888_ACER_ASPIRE_7730G] = {
@@ -10248,8 +10355,14 @@ static int alc262_hp_master_sw_put(struct snd_kcontrol *kcontrol,
.info = snd_ctl_boolean_mono_info, \
.get = alc262_hp_master_sw_get, \
.put = alc262_hp_master_sw_put, \
+ }, \
+ { \
+ .iface = NID_MAPPING, \
+ .name = "Master Playback Switch", \
+ .private_value = 0x15 | (0x16 << 8) | (0x1b << 16), \
}
+
static struct snd_kcontrol_new alc262_HP_BPC_mixer[] = {
ALC262_HP_MASTER_SWITCH,
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
@@ -10407,6 +10520,12 @@ static int alc262_hippo_master_sw_put(struct snd_kcontrol *kcontrol,
.info = snd_ctl_boolean_mono_info, \
.get = alc262_hippo_master_sw_get, \
.put = alc262_hippo_master_sw_put, \
+ }, \
+ { \
+ .iface = NID_MAPPING, \
+ .name = "Master Playback Switch", \
+ .subdevice = SUBDEV_HP(0) | (SUBDEV_LINE(0) << 8) | \
+ (SUBDEV_SPEAKER(0) << 16), \
}
static struct snd_kcontrol_new alc262_hippo_mixer[] = {
@@ -10887,11 +11006,17 @@ static struct snd_kcontrol_new alc262_fujitsu_mixer[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_switch_info,
.get = snd_hda_mixer_amp_switch_get,
.put = alc262_fujitsu_master_sw_put,
.private_value = HDA_COMPOSE_AMP_VAL(0x14, 3, 0, HDA_OUTPUT),
},
+ {
+ .iface = NID_MAPPING,
+ .name = "Master Playback Switch",
+ .private_value = 0x1b,
+ },
HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
@@ -10922,6 +11047,7 @@ static struct snd_kcontrol_new alc262_lenovo_3000_mixer[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_switch_info,
.get = snd_hda_mixer_amp_switch_get,
.put = alc262_lenovo_3000_master_sw_put,
@@ -11076,6 +11202,11 @@ static struct snd_kcontrol_new alc262_ultra_capture_mixer[] = {
.get = alc_mux_enum_get,
.put = alc262_ultra_mux_enum_put,
},
+ {
+ .iface = NID_MAPPING,
+ .name = "Capture Source",
+ .private_value = 0x15,
+ },
{ } /* end */
};
@@ -12094,6 +12225,7 @@ static struct snd_kcontrol_new alc268_acer_aspire_one_mixer[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_switch_info,
.get = snd_hda_mixer_amp_switch_get,
.put = alc268_acer_master_sw_put,
@@ -12109,6 +12241,7 @@ static struct snd_kcontrol_new alc268_acer_mixer[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_switch_info,
.get = snd_hda_mixer_amp_switch_get,
.put = alc268_acer_master_sw_put,
@@ -12126,6 +12259,7 @@ static struct snd_kcontrol_new alc268_acer_dmic_mixer[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_switch_info,
.get = snd_hda_mixer_amp_switch_get,
.put = alc268_acer_master_sw_put,
@@ -13078,6 +13212,7 @@ static struct snd_kcontrol_new alc269_quanta_fl1_mixer[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_switch_info,
.get = snd_hda_mixer_amp_switch_get,
.put = alc268_acer_master_sw_put,
@@ -13098,6 +13233,7 @@ static struct snd_kcontrol_new alc269_lifebook_mixer[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Master Playback Switch",
+ .subdevice = HDA_SUBDEV_AMP_FLAG,
.info = snd_hda_mixer_amp_switch_info,
.get = snd_hda_mixer_amp_switch_get,
.put = alc268_acer_master_sw_put,
@@ -14860,9 +14996,13 @@ static int patch_alc861(struct hda_codec *codec)
spec->vmaster_nid = 0x03;
codec->patch_ops = alc_patch_ops;
- if (board_config == ALC861_AUTO)
+ if (board_config == ALC861_AUTO) {
spec->init_hook = alc861_auto_init;
#ifdef CONFIG_SND_HDA_POWER_SAVE
+ spec->power_hook = alc_power_eapd;
+#endif
+ }
+#ifdef CONFIG_SND_HDA_POWER_SAVE
if (!spec->loopback.amplist)
spec->loopback.amplist = alc861_loopbacks;
#endif
@@ -15493,7 +15633,7 @@ static struct alc_config_preset alc861vd_presets[] = {
static int alc861vd_auto_create_input_ctls(struct hda_codec *codec,
const struct auto_pin_cfg *cfg)
{
- return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x09, 0);
+ return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x22, 0);
}
diff --git a/sound/pci/hda/patch_si3054.c b/sound/pci/hda/patch_si3054.c
index 43b436c5d01b..f419ee8d75f0 100644
--- a/sound/pci/hda/patch_si3054.c
+++ b/sound/pci/hda/patch_si3054.c
@@ -122,6 +122,7 @@ static int si3054_switch_put(struct snd_kcontrol *kcontrol,
#define SI3054_KCONTROL(kname,reg,mask) { \
.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = kname, \
+ .subdevice = HDA_SUBDEV_NID_FLAG | reg, \
.info = si3054_switch_info, \
.get = si3054_switch_get, \
.put = si3054_switch_put, \
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 2291a8396817..117919aa17f4 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2688,7 +2688,7 @@ static struct snd_kcontrol_new *
stac_control_new(struct sigmatel_spec *spec,
struct snd_kcontrol_new *ktemp,
const char *name,
- hda_nid_t nid)
+ unsigned int subdev)
{
struct snd_kcontrol_new *knew;
@@ -2704,8 +2704,7 @@ stac_control_new(struct sigmatel_spec *spec,
spec->kctls.alloced--;
return NULL;
}
- if (nid)
- knew->subdevice = HDA_SUBDEV_NID_FLAG | nid;
+ knew->subdevice = subdev;
return knew;
}
@@ -2715,7 +2714,7 @@ static int stac92xx_add_control_temp(struct sigmatel_spec *spec,
unsigned long val)
{
struct snd_kcontrol_new *knew = stac_control_new(spec, ktemp, name,
- get_amp_nid_(val));
+ HDA_SUBDEV_AMP_FLAG);
if (!knew)
return -ENOMEM;
knew->index = idx;
@@ -4160,34 +4159,52 @@ static void stac92xx_power_down(struct hda_codec *codec)
static void stac_toggle_power_map(struct hda_codec *codec, hda_nid_t nid,
int enable);
+static inline int get_int_hint(struct hda_codec *codec, const char *key,
+ int *valp)
+{
+ const char *p;
+ p = snd_hda_get_hint(codec, key);
+ if (p) {
+ unsigned long val;
+ if (!strict_strtoul(p, 0, &val)) {
+ *valp = val;
+ return 1;
+ }
+ }
+ return 0;
+}
+
/* override some hints from the hwdep entry */
static void stac_store_hints(struct hda_codec *codec)
{
struct sigmatel_spec *spec = codec->spec;
- const char *p;
int val;
val = snd_hda_get_bool_hint(codec, "hp_detect");
if (val >= 0)
spec->hp_detect = val;
- p = snd_hda_get_hint(codec, "gpio_mask");
- if (p) {
- spec->gpio_mask = simple_strtoul(p, NULL, 0);
+ if (get_int_hint(codec, "gpio_mask", &spec->gpio_mask)) {
spec->eapd_mask = spec->gpio_dir = spec->gpio_data =
spec->gpio_mask;
}
- p = snd_hda_get_hint(codec, "gpio_dir");
- if (p)
- spec->gpio_dir = simple_strtoul(p, NULL, 0) & spec->gpio_mask;
- p = snd_hda_get_hint(codec, "gpio_data");
- if (p)
- spec->gpio_data = simple_strtoul(p, NULL, 0) & spec->gpio_mask;
- p = snd_hda_get_hint(codec, "eapd_mask");
- if (p)
- spec->eapd_mask = simple_strtoul(p, NULL, 0) & spec->gpio_mask;
+ if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
+ spec->gpio_mask &= spec->gpio_mask;
+ if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
+ spec->gpio_dir &= spec->gpio_mask;
+ if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
+ spec->eapd_mask &= spec->gpio_mask;
+ if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
+ spec->gpio_mute &= spec->gpio_mask;
val = snd_hda_get_bool_hint(codec, "eapd_switch");
if (val >= 0)
spec->eapd_switch = val;
+ get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity);
+ if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) {
+ spec->gpio_mask |= spec->gpio_led;
+ spec->gpio_dir |= spec->gpio_led;
+ if (spec->gpio_led_polarity)
+ spec->gpio_data |= spec->gpio_led;
+ }
}
static int stac92xx_init(struct hda_codec *codec)
@@ -4372,18 +4389,8 @@ static void stac92xx_free_kctls(struct hda_codec *codec)
static void stac92xx_shutup(struct hda_codec *codec)
{
struct sigmatel_spec *spec = codec->spec;
- int i;
- hda_nid_t nid;
- /* reset each pin before powering down DAC/ADC to avoid click noise */
- nid = codec->start_nid;
- for (i = 0; i < codec->num_nodes; i++, nid++) {
- unsigned int wcaps = get_wcaps(codec, nid);
- unsigned int wid_type = get_wcaps_type(wcaps);
- if (wid_type == AC_WID_PIN)
- snd_hda_codec_read(codec, nid, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0);
- }
+ snd_hda_shutup_pins(codec);
if (spec->eapd_mask)
stac_gpio_set(codec, spec->gpio_mask,
@@ -5404,6 +5411,54 @@ static int stac92hd71bxx_connected_smuxes(struct hda_codec *codec,
return 0;
}
+/* HP dv7 bass switch - GPIO5 */
+#define stac_hp_bass_gpio_info snd_ctl_boolean_mono_info
+static int stac_hp_bass_gpio_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct sigmatel_spec *spec = codec->spec;
+ ucontrol->value.integer.value[0] = !!(spec->gpio_data & 0x20);
+ return 0;
+}
+
+static int stac_hp_bass_gpio_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct sigmatel_spec *spec = codec->spec;
+ unsigned int gpio_data;
+
+ gpio_data = (spec->gpio_data & ~0x20) |
+ (ucontrol->value.integer.value[0] ? 0x20 : 0);
+ if (gpio_data == spec->gpio_data)
+ return 0;
+ spec->gpio_data = gpio_data;
+ stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data);
+ return 1;
+}
+
+static struct snd_kcontrol_new stac_hp_bass_sw_ctrl = {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .info = stac_hp_bass_gpio_info,
+ .get = stac_hp_bass_gpio_get,
+ .put = stac_hp_bass_gpio_put,
+};
+
+static int stac_add_hp_bass_switch(struct hda_codec *codec)
+{
+ struct sigmatel_spec *spec = codec->spec;
+
+ if (!stac_control_new(spec, &stac_hp_bass_sw_ctrl,
+ "Bass Speaker Playback Switch", 0))
+ return -ENOMEM;
+
+ spec->gpio_mask |= 0x20;
+ spec->gpio_dir |= 0x20;
+ spec->gpio_data |= 0x20;
+ return 0;
+}
+
static int patch_stac92hd71bxx(struct hda_codec *codec)
{
struct sigmatel_spec *spec;
@@ -5645,6 +5700,15 @@ again:
return err;
}
+ /* enable bass on HP dv7 */
+ if (spec->board_config == STAC_HP_DV5) {
+ unsigned int cap;
+ cap = snd_hda_param_read(codec, 0x1, AC_PAR_GPIO_CAP);
+ cap &= AC_GPIO_IO_COUNT;
+ if (cap >= 6)
+ stac_add_hp_bass_switch(codec);
+ }
+
codec->proc_widget_hook = stac92hd7x_proc_hook;
return 0;
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index b70e26ad263f..9ddc37300f6b 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -54,6 +54,8 @@
#include "hda_codec.h"
#include "hda_local.h"
+#define NID_MAPPING (-1)
+
/* amp values */
#define AMP_VAL_IDX_SHIFT 19
#define AMP_VAL_IDX_MASK (0x0f<<19)
@@ -157,6 +159,19 @@ struct via_spec {
#endif
};
+static struct via_spec * via_new_spec(struct hda_codec *codec)
+{
+ struct via_spec *spec;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (spec == NULL)
+ return NULL;
+
+ codec->spec = spec;
+ spec->codec = codec;
+ return spec;
+}
+
static enum VIA_HDA_CODEC get_codec_type(struct hda_codec *codec)
{
u32 vendor_id = codec->vendor_id;
@@ -443,11 +458,27 @@ static int via_add_control(struct via_spec *spec, int type, const char *name,
if (!knew->name)
return -ENOMEM;
if (get_amp_nid_(val))
- knew->subdevice = HDA_SUBDEV_NID_FLAG | get_amp_nid_(val);
+ knew->subdevice = HDA_SUBDEV_AMP_FLAG;
knew->private_value = val;
return 0;
}
+static struct snd_kcontrol_new *via_clone_control(struct via_spec *spec,
+ struct snd_kcontrol_new *tmpl)
+{
+ struct snd_kcontrol_new *knew;
+
+ snd_array_init(&spec->kctls, sizeof(*knew), 32);
+ knew = snd_array_new(&spec->kctls);
+ if (!knew)
+ return NULL;
+ *knew = *tmpl;
+ knew->name = kstrdup(tmpl->name, GFP_KERNEL);
+ if (!knew->name)
+ return NULL;
+ return 0;
+}
+
static void via_free_kctls(struct hda_codec *codec)
{
struct via_spec *spec = codec->spec;
@@ -1088,24 +1119,9 @@ static int via_independent_hp_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct via_spec *spec = codec->spec;
- hda_nid_t nid;
+ hda_nid_t nid = kcontrol->private_value;
unsigned int pinsel;
- switch (spec->codec_type) {
- case VT1718S:
- nid = 0x34;
- break;
- case VT2002P:
- nid = 0x35;
- break;
- case VT1812:
- nid = 0x3d;
- break;
- default:
- nid = spec->autocfg.hp_pins[0];
- break;
- }
/* use !! to translate conn sel 2 for VT1718S */
pinsel = !!snd_hda_codec_read(codec, nid, 0,
AC_VERB_GET_CONNECT_SEL,
@@ -1127,29 +1143,24 @@ static void activate_ctl(struct hda_codec *codec, const char *name, int active)
}
}
+static hda_nid_t side_mute_channel(struct via_spec *spec)
+{
+ switch (spec->codec_type) {
+ case VT1708: return 0x1b;
+ case VT1709_10CH: return 0x29;
+ case VT1708B_8CH: /* fall thru */
+ case VT1708S: return 0x27;
+ default: return 0;
+ }
+}
+
static int update_side_mute_status(struct hda_codec *codec)
{
/* mute side channel */
struct via_spec *spec = codec->spec;
unsigned int parm = spec->hp_independent_mode
? AMP_OUT_MUTE : AMP_OUT_UNMUTE;
- hda_nid_t sw3;
-
- switch (spec->codec_type) {
- case VT1708:
- sw3 = 0x1b;
- break;
- case VT1709_10CH:
- sw3 = 0x29;
- break;
- case VT1708B_8CH:
- case VT1708S:
- sw3 = 0x27;
- break;
- default:
- sw3 = 0;
- break;
- }
+ hda_nid_t sw3 = side_mute_channel(spec);
if (sw3)
snd_hda_codec_write(codec, sw3, 0, AC_VERB_SET_AMP_GAIN_MUTE,
@@ -1162,28 +1173,11 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct via_spec *spec = codec->spec;
- hda_nid_t nid = spec->autocfg.hp_pins[0];
+ hda_nid_t nid = kcontrol->private_value;
unsigned int pinsel = ucontrol->value.enumerated.item[0];
/* Get Independent Mode index of headphone pin widget */
spec->hp_independent_mode = spec->hp_independent_mode_index == pinsel
? 1 : 0;
-
- switch (spec->codec_type) {
- case VT1718S:
- nid = 0x34;
- pinsel = pinsel ? 2 : 0; /* indep HP use AOW4 (index 2) */
- spec->multiout.num_dacs = 4;
- break;
- case VT2002P:
- nid = 0x35;
- break;
- case VT1812:
- nid = 0x3d;
- break;
- default:
- nid = spec->autocfg.hp_pins[0];
- break;
- }
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, pinsel);
if (spec->multiout.hp_nid && spec->multiout.hp_nid
@@ -1207,18 +1201,55 @@ static int via_independent_hp_put(struct snd_kcontrol *kcontrol,
return 0;
}
-static struct snd_kcontrol_new via_hp_mixer[] = {
+static struct snd_kcontrol_new via_hp_mixer[2] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Independent HP",
- .count = 1,
.info = via_independent_hp_info,
.get = via_independent_hp_get,
.put = via_independent_hp_put,
},
- { } /* end */
+ {
+ .iface = NID_MAPPING,
+ .name = "Independent HP",
+ },
};
+static int via_hp_build(struct via_spec *spec)
+{
+ struct snd_kcontrol_new *knew;
+ hda_nid_t nid;
+
+ knew = via_clone_control(spec, &via_hp_mixer[0]);
+ if (knew == NULL)
+ return -ENOMEM;
+
+ switch (spec->codec_type) {
+ case VT1718S:
+ nid = 0x34;
+ break;
+ case VT2002P:
+ nid = 0x35;
+ break;
+ case VT1812:
+ nid = 0x3d;
+ break;
+ default:
+ nid = spec->autocfg.hp_pins[0];
+ break;
+ }
+
+ knew->subdevice = HDA_SUBDEV_NID_FLAG | nid;
+ knew->private_value = nid;
+
+ knew = via_clone_control(spec, &via_hp_mixer[1]);
+ if (knew == NULL)
+ return -ENOMEM;
+ knew->subdevice = side_mute_channel(spec);
+
+ return 0;
+}
+
static void notify_aa_path_ctls(struct hda_codec *codec)
{
int i;
@@ -1376,7 +1407,7 @@ static int via_smart51_put(struct snd_kcontrol *kcontrol,
return 1;
}
-static struct snd_kcontrol_new via_smart51_mixer[] = {
+static struct snd_kcontrol_new via_smart51_mixer[2] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Smart 5.1",
@@ -1385,9 +1416,36 @@ static struct snd_kcontrol_new via_smart51_mixer[] = {
.get = via_smart51_get,
.put = via_smart51_put,
},
- {} /* end */
+ {
+ .iface = NID_MAPPING,
+ .name = "Smart 5.1",
+ }
};
+static int via_smart51_build(struct via_spec *spec)
+{
+ struct snd_kcontrol_new *knew;
+ int index[] = { AUTO_PIN_MIC, AUTO_PIN_FRONT_MIC, AUTO_PIN_LINE };
+ hda_nid_t nid;
+ int i;
+
+ knew = via_clone_control(spec, &via_smart51_mixer[0]);
+ if (knew == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(index); i++) {
+ nid = spec->autocfg.input_pins[index[i]];
+ if (nid) {
+ knew = via_clone_control(spec, &via_smart51_mixer[1]);
+ if (knew == NULL)
+ return -ENOMEM;
+ knew->subdevice = nid;
+ }
+ }
+
+ return 0;
+}
+
/* capture mixer elements */
static struct snd_kcontrol_new vt1708_capture_mixer[] = {
HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_INPUT),
@@ -1819,8 +1877,9 @@ static struct hda_pcm_stream vt1708_pcm_digital_capture = {
static int via_build_controls(struct hda_codec *codec)
{
struct via_spec *spec = codec->spec;
- int err;
- int i;
+ struct snd_kcontrol *kctl;
+ struct snd_kcontrol_new *knew;
+ int err, i;
for (i = 0; i < spec->num_mixers; i++) {
err = snd_hda_add_new_ctls(codec, spec->mixers[i]);
@@ -1845,6 +1904,27 @@ static int via_build_controls(struct hda_codec *codec)
return err;
}
+ /* assign Capture Source enums to NID */
+ kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
+ for (i = 0; kctl && i < kctl->count; i++) {
+ err = snd_hda_add_nid(codec, kctl, i, spec->mux_nids[i]);
+ if (err < 0)
+ return err;
+ }
+
+ /* other nid->control mapping */
+ for (i = 0; i < spec->num_mixers; i++) {
+ for (knew = spec->mixers[i]; knew->name; knew++) {
+ if (knew->iface != NID_MAPPING)
+ continue;
+ kctl = snd_hda_find_mixer_ctl(codec, knew->name);
+ if (kctl == NULL)
+ continue;
+ err = snd_hda_add_nid(codec, kctl, 0,
+ knew->subdevice);
+ }
+ }
+
/* init power states */
set_jack_power_state(codec);
analog_low_current_mode(codec, 1);
@@ -2481,9 +2561,9 @@ static int vt1708_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- spec->mixers[spec->num_mixers++] = via_hp_mixer;
+ via_hp_build(spec);
- spec->mixers[spec->num_mixers++] = via_smart51_mixer;
+ via_smart51_build(spec);
return 1;
}
@@ -2554,12 +2634,10 @@ static int patch_vt1708(struct hda_codec *codec)
int err;
/* create a codec specific record */
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = via_new_spec(codec);
if (spec == NULL)
return -ENOMEM;
- codec->spec = spec;
-
/* automatic parse from the BIOS config */
err = vt1708_parse_auto_config(codec);
if (err < 0) {
@@ -2597,7 +2675,6 @@ static int patch_vt1708(struct hda_codec *codec)
#ifdef CONFIG_SND_HDA_POWER_SAVE
spec->loopback.amplist = vt1708_loopbacks;
#endif
- spec->codec = codec;
INIT_DELAYED_WORK(&spec->vt1708_hp_work, vt1708_update_hp_jack_state);
return 0;
}
@@ -3010,9 +3087,9 @@ static int vt1709_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- spec->mixers[spec->num_mixers++] = via_hp_mixer;
+ via_hp_build(spec);
- spec->mixers[spec->num_mixers++] = via_smart51_mixer;
+ via_smart51_build(spec);
return 1;
}
@@ -3032,12 +3109,10 @@ static int patch_vt1709_10ch(struct hda_codec *codec)
int err;
/* create a codec specific record */
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = via_new_spec(codec);
if (spec == NULL)
return -ENOMEM;
- codec->spec = spec;
-
err = vt1709_parse_auto_config(codec);
if (err < 0) {
via_free(codec);
@@ -3126,12 +3201,10 @@ static int patch_vt1709_6ch(struct hda_codec *codec)
int err;
/* create a codec specific record */
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = via_new_spec(codec);
if (spec == NULL)
return -ENOMEM;
- codec->spec = spec;
-
err = vt1709_parse_auto_config(codec);
if (err < 0) {
via_free(codec);
@@ -3581,9 +3654,9 @@ static int vt1708B_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- spec->mixers[spec->num_mixers++] = via_hp_mixer;
+ via_hp_build(spec);
- spec->mixers[spec->num_mixers++] = via_smart51_mixer;
+ via_smart51_build(spec);
return 1;
}
@@ -3605,12 +3678,10 @@ static int patch_vt1708B_8ch(struct hda_codec *codec)
if (get_codec_type(codec) == VT1708BCE)
return patch_vt1708S(codec);
/* create a codec specific record */
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = via_new_spec(codec);
if (spec == NULL)
return -ENOMEM;
- codec->spec = spec;
-
/* automatic parse from the BIOS config */
err = vt1708B_parse_auto_config(codec);
if (err < 0) {
@@ -3657,12 +3728,10 @@ static int patch_vt1708B_4ch(struct hda_codec *codec)
int err;
/* create a codec specific record */
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = via_new_spec(codec);
if (spec == NULL)
return -ENOMEM;
- codec->spec = spec;
-
/* automatic parse from the BIOS config */
err = vt1708B_parse_auto_config(codec);
if (err < 0) {
@@ -4071,9 +4140,9 @@ static int vt1708S_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- spec->mixers[spec->num_mixers++] = via_hp_mixer;
+ via_hp_build(spec);
- spec->mixers[spec->num_mixers++] = via_smart51_mixer;
+ via_smart51_build(spec);
return 1;
}
@@ -4103,12 +4172,10 @@ static int patch_vt1708S(struct hda_codec *codec)
int err;
/* create a codec specific record */
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = via_new_spec(codec);
if (spec == NULL)
return -ENOMEM;
- codec->spec = spec;
-
/* automatic parse from the BIOS config */
err = vt1708S_parse_auto_config(codec);
if (err < 0) {
@@ -4443,7 +4510,7 @@ static int vt1702_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- spec->mixers[spec->num_mixers++] = via_hp_mixer;
+ via_hp_build(spec);
return 1;
}
@@ -4464,12 +4531,10 @@ static int patch_vt1702(struct hda_codec *codec)
int err;
/* create a codec specific record */
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = via_new_spec(codec);
if (spec == NULL)
return -ENOMEM;
- codec->spec = spec;
-
/* automatic parse from the BIOS config */
err = vt1702_parse_auto_config(codec);
if (err < 0) {
@@ -4865,9 +4930,9 @@ static int vt1718S_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- spec->mixers[spec->num_mixers++] = via_hp_mixer;
+ via_hp_build(spec);
- spec->mixers[spec->num_mixers++] = via_smart51_mixer;
+ via_smart51_build(spec);
return 1;
}
@@ -4888,12 +4953,10 @@ static int patch_vt1718S(struct hda_codec *codec)
int err;
/* create a codec specific record */
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = via_new_spec(codec);
if (spec == NULL)
return -ENOMEM;
- codec->spec = spec;
-
/* automatic parse from the BIOS config */
err = vt1718S_parse_auto_config(codec);
if (err < 0) {
@@ -5014,6 +5077,7 @@ static struct snd_kcontrol_new vt1716s_dmic_mixer[] = {
{
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Digital Mic Capture Switch",
+ .subdevice = HDA_SUBDEV_NID_FLAG | 0x26,
.count = 1,
.info = vt1716s_dmic_info,
.get = vt1716s_dmic_get,
@@ -5361,9 +5425,9 @@ static int vt1716S_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- spec->mixers[spec->num_mixers++] = via_hp_mixer;
+ via_hp_build(spec);
- spec->mixers[spec->num_mixers++] = via_smart51_mixer;
+ via_smart51_build(spec);
return 1;
}
@@ -5384,12 +5448,10 @@ static int patch_vt1716S(struct hda_codec *codec)
int err;
/* create a codec specific record */
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = via_new_spec(codec);
if (spec == NULL)
return -ENOMEM;
- codec->spec = spec;
-
/* automatic parse from the BIOS config */
err = vt1716S_parse_auto_config(codec);
if (err < 0) {
@@ -5719,7 +5781,7 @@ static int vt2002P_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- spec->mixers[spec->num_mixers++] = via_hp_mixer;
+ via_hp_build(spec);
return 1;
}
@@ -5741,12 +5803,10 @@ static int patch_vt2002P(struct hda_codec *codec)
int err;
/* create a codec specific record */
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = via_new_spec(codec);
if (spec == NULL)
return -ENOMEM;
- codec->spec = spec;
-
/* automatic parse from the BIOS config */
err = vt2002P_parse_auto_config(codec);
if (err < 0) {
@@ -6070,7 +6130,7 @@ static int vt1812_parse_auto_config(struct hda_codec *codec)
spec->input_mux = &spec->private_imux[0];
if (spec->hp_mux)
- spec->mixers[spec->num_mixers++] = via_hp_mixer;
+ via_hp_build(spec);
return 1;
}
@@ -6092,12 +6152,10 @@ static int patch_vt1812(struct hda_codec *codec)
int err;
/* create a codec specific record */
- spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ spec = via_new_spec(codec);
if (spec == NULL)
return -ENOMEM;
- codec->spec = spec;
-
/* automatic parse from the BIOS config */
err = vt1812_parse_auto_config(codec);
if (err < 0) {
diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index b5ca02e2038c..e66ef2b69b5d 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -1058,7 +1058,7 @@ setsamplerate(struct cmdif *cif, unsigned char *intdec, unsigned int rate)
rptr.retwords[2] != M &&
rptr.retwords[3] != N &&
i++ < MAX_WRITE_RETRY);
- if (i == MAX_WRITE_RETRY) {
+ if (i > MAX_WRITE_RETRY) {
snd_printdd("sent samplerate %d: %d failed\n",
*intdec, rate);
return -EIO;
diff --git a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
index 5cfa608823f7..0afa683c900e 100644
--- a/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
+++ b/sound/pcmcia/pdaudiocf/pdaudiocf_pcm.c
@@ -21,7 +21,6 @@
*/
#include <linux/slab.h>
-#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <sound/core.h>
#include <sound/asoundef.h>
@@ -29,49 +28,6 @@
/*
- * we use a vmalloc'ed (sg-)buffer
- */
-
-/* get the physical page pointer on the given offset */
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs, unsigned long offset)
-{
- void *pageptr = subs->runtime->dma_area + offset;
- return vmalloc_to_page(pageptr);
-}
-
-/*
- * hw_params callback
- * NOTE: this may be called not only once per pcm open!
- */
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t size)
-{
- struct snd_pcm_runtime *runtime = subs->runtime;
- if (runtime->dma_area) {
- if (runtime->dma_bytes >= size)
- return 0; /* already enough large */
- vfree(runtime->dma_area);
- }
- runtime->dma_area = vmalloc_32_user(size);
- if (! runtime->dma_area)
- return -ENOMEM;
- runtime->dma_bytes = size;
- return 0;
-}
-
-/*
- * hw_free callback
- * NOTE: this may be called not only once per pcm open!
- */
-static int snd_pcm_free_vmalloc_buffer(struct snd_pcm_substream *subs)
-{
- struct snd_pcm_runtime *runtime = subs->runtime;
-
- vfree(runtime->dma_area);
- runtime->dma_area = NULL;
- return 0;
-}
-
-/*
* clear the SRAM contents
*/
static int pdacf_pcm_clear_sram(struct snd_pdacf *chip)
@@ -147,7 +103,8 @@ static int pdacf_pcm_trigger(struct snd_pcm_substream *subs, int cmd)
static int pdacf_pcm_hw_params(struct snd_pcm_substream *subs,
struct snd_pcm_hw_params *hw_params)
{
- return snd_pcm_alloc_vmalloc_buffer(subs, params_buffer_bytes(hw_params));
+ return snd_pcm_lib_alloc_vmalloc_32_buffer
+ (subs, params_buffer_bytes(hw_params));
}
/*
@@ -155,7 +112,7 @@ static int pdacf_pcm_hw_params(struct snd_pcm_substream *subs,
*/
static int pdacf_pcm_hw_free(struct snd_pcm_substream *subs)
{
- return snd_pcm_free_vmalloc_buffer(subs);
+ return snd_pcm_lib_free_vmalloc_buffer(subs);
}
/*
@@ -319,7 +276,7 @@ static struct snd_pcm_ops pdacf_pcm_capture_ops = {
.prepare = pdacf_pcm_prepare,
.trigger = pdacf_pcm_trigger,
.pointer = pdacf_pcm_capture_pointer,
- .page = snd_pcm_get_vmalloc_page,
+ .page = snd_pcm_lib_get_vmalloc_page,
};
diff --git a/sound/soc/au1x/Kconfig b/sound/soc/au1x/Kconfig
index 410a893aa66b..4b67140fdec3 100644
--- a/sound/soc/au1x/Kconfig
+++ b/sound/soc/au1x/Kconfig
@@ -22,11 +22,13 @@ config SND_SOC_AU1XPSC_AC97
##
## Boards
##
-config SND_SOC_SAMPLE_PSC_AC97
- tristate "Sample Au12x0/Au1550 PSC AC97 sound machine"
+config SND_SOC_DB1200
+ tristate "DB1200 AC97+I2S audio support"
depends on SND_SOC_AU1XPSC
select SND_SOC_AU1XPSC_AC97
select SND_SOC_AC97_CODEC
+ select SND_SOC_AU1XPSC_I2S
+ select SND_SOC_WM8731
help
- This is a sample AC97 sound machine for use in Au12x0/Au1550
- based systems which have audio on PSC1 (e.g. Db1200 demoboard).
+ Select this option to enable audio (AC97 or I2S) on the
+ Alchemy/AMD/RMI DB1200 demoboard.
diff --git a/sound/soc/au1x/Makefile b/sound/soc/au1x/Makefile
index 6c6950b8003a..16873076e8c4 100644
--- a/sound/soc/au1x/Makefile
+++ b/sound/soc/au1x/Makefile
@@ -8,6 +8,6 @@ obj-$(CONFIG_SND_SOC_AU1XPSC_I2S) += snd-soc-au1xpsc-i2s.o
obj-$(CONFIG_SND_SOC_AU1XPSC_AC97) += snd-soc-au1xpsc-ac97.o
# Boards
-snd-soc-sample-ac97-objs := sample-ac97.o
+snd-soc-db1200-objs := db1200.o
-obj-$(CONFIG_SND_SOC_SAMPLE_PSC_AC97) += snd-soc-sample-ac97.o
+obj-$(CONFIG_SND_SOC_DB1200) += snd-soc-db1200.o
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
new file mode 100644
index 000000000000..cdf7be1b9b91
--- /dev/null
+++ b/sound/soc/au1x/db1200.c
@@ -0,0 +1,141 @@
+/*
+ * DB1200 ASoC audio fabric support code.
+ *
+ * (c) 2008-9 Manuel Lauss <manuel.lauss@gmail.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/timer.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_psc.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+#include <asm/mach-db1x00/bcsr.h>
+
+#include "../codecs/ac97.h"
+#include "../codecs/wm8731.h"
+#include "psc.h"
+
+/*------------------------- AC97 PART ---------------------------*/
+
+static struct snd_soc_dai_link db1200_ac97_dai = {
+ .name = "AC97",
+ .stream_name = "AC97 HiFi",
+ .cpu_dai = &au1xpsc_ac97_dai,
+ .codec_dai = &ac97_dai,
+};
+
+static struct snd_soc_card db1200_ac97_machine = {
+ .name = "DB1200_AC97",
+ .dai_link = &db1200_ac97_dai,
+ .num_links = 1,
+ .platform = &au1xpsc_soc_platform,
+};
+
+static struct snd_soc_device db1200_ac97_devdata = {
+ .card = &db1200_ac97_machine,
+ .codec_dev = &soc_codec_dev_ac97,
+};
+
+/*------------------------- I2S PART ---------------------------*/
+
+static int db1200_i2s_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->dai->codec_dai;
+ struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
+ int ret;
+
+ /* WM8731 has its own 12MHz crystal */
+ snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK,
+ 12000000, SND_SOC_CLOCK_IN);
+
+ /* codec is bitclock and lrclk master */
+ ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_LEFT_J |
+ SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ goto out;
+
+ ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_LEFT_J |
+ SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM);
+ if (ret < 0)
+ goto out;
+
+ ret = 0;
+out:
+ return ret;
+}
+
+static struct snd_soc_ops db1200_i2s_wm8731_ops = {
+ .startup = db1200_i2s_startup,
+};
+
+static struct snd_soc_dai_link db1200_i2s_dai = {
+ .name = "WM8731",
+ .stream_name = "WM8731 PCM",
+ .cpu_dai = &au1xpsc_i2s_dai,
+ .codec_dai = &wm8731_dai,
+ .ops = &db1200_i2s_wm8731_ops,
+};
+
+static struct snd_soc_card db1200_i2s_machine = {
+ .name = "DB1200_I2S",
+ .dai_link = &db1200_i2s_dai,
+ .num_links = 1,
+ .platform = &au1xpsc_soc_platform,
+};
+
+static struct snd_soc_device db1200_i2s_devdata = {
+ .card = &db1200_i2s_machine,
+ .codec_dev = &soc_codec_dev_wm8731,
+};
+
+/*------------------------- COMMON PART ---------------------------*/
+
+static struct platform_device *db1200_asoc_dev;
+
+static int __init db1200_audio_load(void)
+{
+ int ret;
+
+ ret = -ENOMEM;
+ db1200_asoc_dev = platform_device_alloc("soc-audio", -1);
+ if (!db1200_asoc_dev)
+ goto out;
+
+ /* DB1200 board setup set PSC1MUX to preferred audio device */
+ if (bcsr_read(BCSR_RESETS) & BCSR_RESETS_PSC1MUX)
+ platform_set_drvdata(db1200_asoc_dev, &db1200_i2s_devdata);
+ else
+ platform_set_drvdata(db1200_asoc_dev, &db1200_ac97_devdata);
+
+ db1200_ac97_devdata.dev = &db1200_asoc_dev->dev;
+ db1200_i2s_devdata.dev = &db1200_asoc_dev->dev;
+ ret = platform_device_add(db1200_asoc_dev);
+
+ if (ret) {
+ platform_device_put(db1200_asoc_dev);
+ db1200_asoc_dev = NULL;
+ }
+out:
+ return ret;
+}
+
+static void __exit db1200_audio_unload(void)
+{
+ platform_device_unregister(db1200_asoc_dev);
+}
+
+module_init(db1200_audio_load);
+module_exit(db1200_audio_unload);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DB1200 ASoC audio support");
+MODULE_AUTHOR("Manuel Lauss");
diff --git a/sound/soc/au1x/dbdma2.c b/sound/soc/au1x/dbdma2.c
index 19e4d37eba1c..6d9f4c624949 100644
--- a/sound/soc/au1x/dbdma2.c
+++ b/sound/soc/au1x/dbdma2.c
@@ -51,8 +51,8 @@ struct au1xpsc_audio_dmadata {
struct snd_pcm_substream *substream;
unsigned long curr_period; /* current segment DDMA is working on */
unsigned long q_period; /* queue period(s) */
- unsigned long dma_area; /* address of queued DMA area */
- unsigned long dma_area_s; /* start address of DMA area */
+ dma_addr_t dma_area; /* address of queued DMA area */
+ dma_addr_t dma_area_s; /* start address of DMA area */
unsigned long pos; /* current byte position being played */
unsigned long periods; /* number of SG segments in total */
unsigned long period_bytes; /* size in bytes of one SG segment */
@@ -94,8 +94,7 @@ static const struct snd_pcm_hardware au1xpsc_pcm_hardware = {
static void au1x_pcm_queue_tx(struct au1xpsc_audio_dmadata *cd)
{
- au1xxx_dbdma_put_source_flags(cd->ddma_chan,
- (void *)phys_to_virt(cd->dma_area),
+ au1xxx_dbdma_put_source(cd->ddma_chan, cd->dma_area,
cd->period_bytes, DDMA_FLAGS_IE);
/* update next-to-queue period */
@@ -109,9 +108,8 @@ static void au1x_pcm_queue_tx(struct au1xpsc_audio_dmadata *cd)
static void au1x_pcm_queue_rx(struct au1xpsc_audio_dmadata *cd)
{
- au1xxx_dbdma_put_dest_flags(cd->ddma_chan,
- (void *)phys_to_virt(cd->dma_area),
- cd->period_bytes, DDMA_FLAGS_IE);
+ au1xxx_dbdma_put_dest(cd->ddma_chan, cd->dma_area,
+ cd->period_bytes, DDMA_FLAGS_IE);
/* update next-to-queue period */
++cd->q_period;
@@ -233,7 +231,7 @@ static int au1xpsc_pcm_hw_params(struct snd_pcm_substream *substream,
pcd->substream = substream;
pcd->period_bytes = params_period_bytes(params);
pcd->periods = params_periods(params);
- pcd->dma_area_s = pcd->dma_area = (unsigned long)runtime->dma_addr;
+ pcd->dma_area_s = pcd->dma_area = runtime->dma_addr;
pcd->q_period = 0;
pcd->curr_period = 0;
pcd->pos = 0;
diff --git a/sound/soc/au1x/sample-ac97.c b/sound/soc/au1x/sample-ac97.c
deleted file mode 100644
index 27683eb7905e..000000000000
--- a/sound/soc/au1x/sample-ac97.c
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Sample Au12x0/Au1550 PSC AC97 sound machine.
- *
- * Copyright (c) 2007-2008 Manuel Lauss <mano@roarinelk.homelinux.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms outlined in the file COPYING at the root of this
- * source archive.
- *
- * This is a very generic AC97 sound machine driver for boards which
- * have (AC97) audio at PSC1 (e.g. DB1200 demoboards).
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/soc.h>
-#include <sound/soc-dapm.h>
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/au1xxx_psc.h>
-#include <asm/mach-au1x00/au1xxx_dbdma.h>
-
-#include "../codecs/ac97.h"
-#include "psc.h"
-
-static int au1xpsc_sample_ac97_init(struct snd_soc_codec *codec)
-{
- snd_soc_dapm_sync(codec);
- return 0;
-}
-
-static struct snd_soc_dai_link au1xpsc_sample_ac97_dai = {
- .name = "AC97",
- .stream_name = "AC97 HiFi",
- .cpu_dai = &au1xpsc_ac97_dai, /* see psc-ac97.c */
- .codec_dai = &ac97_dai, /* see codecs/ac97.c */
- .init = au1xpsc_sample_ac97_init,
- .ops = NULL,
-};
-
-static struct snd_soc_card au1xpsc_sample_ac97_machine = {
- .name = "Au1xxx PSC AC97 Audio",
- .dai_link = &au1xpsc_sample_ac97_dai,
- .num_links = 1,
-};
-
-static struct snd_soc_device au1xpsc_sample_ac97_devdata = {
- .card = &au1xpsc_sample_ac97_machine,
- .platform = &au1xpsc_soc_platform, /* see dbdma2.c */
- .codec_dev = &soc_codec_dev_ac97,
-};
-
-static struct resource au1xpsc_psc1_res[] = {
- [0] = {
- .start = CPHYSADDR(PSC1_BASE_ADDR),
- .end = CPHYSADDR(PSC1_BASE_ADDR) + 0x000fffff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
-#ifdef CONFIG_SOC_AU1200
- .start = AU1200_PSC1_INT,
- .end = AU1200_PSC1_INT,
-#elif defined(CONFIG_SOC_AU1550)
- .start = AU1550_PSC1_INT,
- .end = AU1550_PSC1_INT,
-#endif
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = DSCR_CMD0_PSC1_TX,
- .end = DSCR_CMD0_PSC1_TX,
- .flags = IORESOURCE_DMA,
- },
- [3] = {
- .start = DSCR_CMD0_PSC1_RX,
- .end = DSCR_CMD0_PSC1_RX,
- .flags = IORESOURCE_DMA,
- },
-};
-
-static struct platform_device *au1xpsc_sample_ac97_dev;
-
-static int __init au1xpsc_sample_ac97_load(void)
-{
- int ret;
-
-#ifdef CONFIG_SOC_AU1200
- unsigned long io;
-
- /* modify sys_pinfunc for AC97 on PSC1 */
- io = au_readl(SYS_PINFUNC);
- io |= SYS_PINFUNC_P1C;
- io &= ~(SYS_PINFUNC_P1A | SYS_PINFUNC_P1B);
- au_writel(io, SYS_PINFUNC);
- au_sync();
-#endif
-
- ret = -ENOMEM;
-
- /* setup PSC clock source for AC97 part: external clock provided
- * by codec. The psc-ac97.c driver depends on this setting!
- */
- au_writel(PSC_SEL_CLK_SERCLK, PSC1_BASE_ADDR + PSC_SEL_OFFSET);
- au_sync();
-
- au1xpsc_sample_ac97_dev = platform_device_alloc("soc-audio", -1);
- if (!au1xpsc_sample_ac97_dev)
- goto out;
-
- au1xpsc_sample_ac97_dev->resource =
- kmemdup(au1xpsc_psc1_res, sizeof(struct resource) *
- ARRAY_SIZE(au1xpsc_psc1_res), GFP_KERNEL);
- au1xpsc_sample_ac97_dev->num_resources = ARRAY_SIZE(au1xpsc_psc1_res);
- au1xpsc_sample_ac97_dev->id = 1;
-
- platform_set_drvdata(au1xpsc_sample_ac97_dev,
- &au1xpsc_sample_ac97_devdata);
- au1xpsc_sample_ac97_devdata.dev = &au1xpsc_sample_ac97_dev->dev;
- ret = platform_device_add(au1xpsc_sample_ac97_dev);
-
- if (ret) {
- platform_device_put(au1xpsc_sample_ac97_dev);
- au1xpsc_sample_ac97_dev = NULL;
- }
-
-out:
- return ret;
-}
-
-static void __exit au1xpsc_sample_ac97_exit(void)
-{
- platform_device_unregister(au1xpsc_sample_ac97_dev);
-}
-
-module_init(au1xpsc_sample_ac97_load);
-module_exit(au1xpsc_sample_ac97_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Au1xxx PSC sample AC97 machine");
-MODULE_AUTHOR("Manuel Lauss <mano@roarinelk.homelinux.net>");
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 52b005f8fed4..62ff26a08a2f 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -23,6 +23,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_AK4671 if I2C
select SND_SOC_CS4270 if I2C
select SND_SOC_MAX9877 if I2C
+ select SND_SOC_DA7210 if I2C
select SND_SOC_PCM3008
select SND_SOC_SPDIF
select SND_SOC_SSM2602 if I2C
@@ -49,7 +50,9 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM8776 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8900 if I2C
select SND_SOC_WM8903 if I2C
+ select SND_SOC_WM8904 if I2C
select SND_SOC_WM8940 if I2C
+ select SND_SOC_WM8955 if I2C
select SND_SOC_WM8960 if I2C
select SND_SOC_WM8961 if I2C
select SND_SOC_WM8971 if I2C
@@ -112,6 +115,9 @@ config SND_SOC_AK4671
config SND_SOC_CS4270
tristate
+config SND_SOC_DA7210
+ tristate
+
# Cirrus Logic CS4270 Codec VD = 3.3V Errata
# Select if you are affected by the errata where the part will not function
# if MCLK divide-by-1.5 is selected and VD is set to 3.3V. The driver will
@@ -203,9 +209,15 @@ config SND_SOC_WM8900
config SND_SOC_WM8903
tristate
+config SND_SOC_WM8904
+ tristate
+
config SND_SOC_WM8940
tristate
+config SND_SOC_WM8955
+ tristate
+
config SND_SOC_WM8960
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index dbaecb133ac7..ea9835412e6a 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -10,6 +10,7 @@ snd-soc-ak4642-objs := ak4642.o
snd-soc-ak4671-objs := ak4671.o
snd-soc-cs4270-objs := cs4270.o
snd-soc-cx20442-objs := cx20442.o
+snd-soc-da7210-objs := da7210.o
snd-soc-l3-objs := l3.o
snd-soc-pcm3008-objs := pcm3008.o
snd-soc-spdif-objs := spdif_transciever.o
@@ -36,7 +37,9 @@ snd-soc-wm8753-objs := wm8753.o
snd-soc-wm8776-objs := wm8776.o
snd-soc-wm8900-objs := wm8900.o
snd-soc-wm8903-objs := wm8903.o
+snd-soc-wm8904-objs := wm8904.o
snd-soc-wm8940-objs := wm8940.o
+snd-soc-wm8955-objs := wm8955.o
snd-soc-wm8960-objs := wm8960.o
snd-soc-wm8961-objs := wm8961.o
snd-soc-wm8971-objs := wm8971.o
@@ -66,6 +69,7 @@ obj-$(CONFIG_SND_SOC_AK4642) += snd-soc-ak4642.o
obj-$(CONFIG_SND_SOC_AK4671) += snd-soc-ak4671.o
obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o
obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
+obj-$(CONFIG_SND_SOC_DA7210) += snd-soc-da7210.o
obj-$(CONFIG_SND_SOC_L3) += snd-soc-l3.o
obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o
obj-$(CONFIG_SND_SOC_SPDIF) += snd-soc-spdif.o
@@ -92,11 +96,13 @@ obj-$(CONFIG_SND_SOC_WM8753) += snd-soc-wm8753.o
obj-$(CONFIG_SND_SOC_WM8776) += snd-soc-wm8776.o
obj-$(CONFIG_SND_SOC_WM8900) += snd-soc-wm8900.o
obj-$(CONFIG_SND_SOC_WM8903) += snd-soc-wm8903.o
-obj-$(CONFIG_SND_SOC_WM8971) += snd-soc-wm8971.o
-obj-$(CONFIG_SND_SOC_WM8974) += snd-soc-wm8974.o
+obj-$(CONFIG_SND_SOC_WM8904) += snd-soc-wm8904.o
obj-$(CONFIG_SND_SOC_WM8940) += snd-soc-wm8940.o
+obj-$(CONFIG_SND_SOC_WM8955) += snd-soc-wm8955.o
obj-$(CONFIG_SND_SOC_WM8960) += snd-soc-wm8960.o
obj-$(CONFIG_SND_SOC_WM8961) += snd-soc-wm8961.o
+obj-$(CONFIG_SND_SOC_WM8971) += snd-soc-wm8971.o
+obj-$(CONFIG_SND_SOC_WM8974) += snd-soc-wm8974.o
obj-$(CONFIG_SND_SOC_WM8988) += snd-soc-wm8988.o
obj-$(CONFIG_SND_SOC_WM8990) += snd-soc-wm8990.o
obj-$(CONFIG_SND_SOC_WM8993) += snd-soc-wm8993.o
diff --git a/sound/soc/codecs/ad1938.c b/sound/soc/codecs/ad1938.c
index 5d489186c05b..47d9ac0ec9d9 100644
--- a/sound/soc/codecs/ad1938.c
+++ b/sound/soc/codecs/ad1938.c
@@ -97,6 +97,7 @@ static const struct snd_kcontrol_new ad1938_snd_controls[] = {
static const struct snd_soc_dapm_widget ad1938_dapm_widgets[] = {
SND_SOC_DAPM_DAC("DAC", "Playback", AD1938_DAC_CTRL0, 0, 1),
SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_SUPPLY("PLL_PWR", AD1938_PLL_CLK_CTRL0, 0, 1, NULL, 0),
SND_SOC_DAPM_SUPPLY("ADC_PWR", AD1938_ADC_CTRL0, 0, 1, NULL, 0),
SND_SOC_DAPM_OUTPUT("DAC1OUT"),
SND_SOC_DAPM_OUTPUT("DAC2OUT"),
@@ -107,6 +108,8 @@ static const struct snd_soc_dapm_widget ad1938_dapm_widgets[] = {
};
static const struct snd_soc_dapm_route audio_paths[] = {
+ { "DAC", NULL, "PLL_PWR" },
+ { "ADC", NULL, "PLL_PWR" },
{ "DAC", NULL, "ADC_PWR" },
{ "ADC", NULL, "ADC_PWR" },
{ "DAC1OUT", "DAC1 Switch", "DAC" },
@@ -134,18 +137,8 @@ static int ad1938_mute(struct snd_soc_dai *dai, int mute)
return 0;
}
-static inline int ad1938_pll_powerctrl(struct snd_soc_codec *codec, int cmd)
-{
- int reg = codec->read(codec, AD1938_PLL_CLK_CTRL0);
- reg = (cmd > 0) ? reg & (~AD1938_PLL_POWERDOWN) : reg |
- AD1938_PLL_POWERDOWN;
- codec->write(codec, AD1938_PLL_CLK_CTRL0, reg);
-
- return 0;
-}
-
static int ad1938_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
- unsigned int mask, int slots, int width)
+ unsigned int rx_mask, int slots, int width)
{
struct snd_soc_codec *codec = dai->codec;
int dac_reg = codec->read(codec, AD1938_DAC_CTRL1);
@@ -306,24 +299,6 @@ static int ad1938_hw_params(struct snd_pcm_substream *substream,
return 0;
}
-static int ad1938_set_bias_level(struct snd_soc_codec *codec,
- enum snd_soc_bias_level level)
-{
- switch (level) {
- case SND_SOC_BIAS_ON:
- ad1938_pll_powerctrl(codec, 1);
- break;
- case SND_SOC_BIAS_PREPARE:
- break;
- case SND_SOC_BIAS_STANDBY:
- case SND_SOC_BIAS_OFF:
- ad1938_pll_powerctrl(codec, 0);
- break;
- }
- codec->bias_level = level;
- return 0;
-}
-
/*
* interface to read/write ad1938 register
*/
@@ -514,7 +489,6 @@ static int ad1938_register(struct ad1938_priv *ad1938)
codec->num_dai = 1;
codec->write = ad1938_write_reg;
codec->read = ad1938_read_reg_cache;
- codec->set_bias_level = ad1938_set_bias_level;
INIT_LIST_HEAD(&codec->dapm_widgets);
INIT_LIST_HEAD(&codec->dapm_paths);
@@ -559,7 +533,6 @@ static int ad1938_register(struct ad1938_priv *ad1938)
static void ad1938_unregister(struct ad1938_priv *ad1938)
{
- ad1938_set_bias_level(&ad1938->codec, SND_SOC_BIAS_OFF);
snd_soc_unregister_dai(&ad1938_dai);
snd_soc_unregister_codec(&ad1938->codec);
kfree(ad1938);
@@ -593,7 +566,6 @@ static int ad1938_probe(struct platform_device *pdev)
ARRAY_SIZE(ad1938_dapm_widgets));
snd_soc_dapm_add_routes(codec, audio_paths, ARRAY_SIZE(audio_paths));
- ad1938_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
pcm_err:
return ret;
@@ -610,37 +582,9 @@ static int ad1938_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int ad1938_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct snd_soc_codec *codec = socdev->card->codec;
-
- ad1938_set_bias_level(codec, SND_SOC_BIAS_OFF);
- return 0;
-}
-
-static int ad1938_resume(struct platform_device *pdev)
-{
- struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct snd_soc_codec *codec = socdev->card->codec;
-
- if (codec->suspend_bias_level == SND_SOC_BIAS_ON)
- ad1938_set_bias_level(codec, SND_SOC_BIAS_ON);
-
- return 0;
-}
-#else
-#define ad1938_suspend NULL
-#define ad1938_resume NULL
-#endif
-
struct snd_soc_codec_device soc_codec_dev_ad1938 = {
.probe = ad1938_probe,
.remove = ad1938_remove,
- .suspend = ad1938_suspend,
- .resume = ad1938_resume,
};
EXPORT_SYMBOL_GPL(soc_codec_dev_ad1938);
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index ffe122d1cd76..8b5457542a0e 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -28,6 +28,7 @@
#include <sound/initval.h>
#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/regulator/consumer.h>
#include "cs4270.h"
@@ -106,6 +107,10 @@
#define CS4270_MUTE_DAC_A 0x01
#define CS4270_MUTE_DAC_B 0x02
+static const char *supply_names[] = {
+ "va", "vd", "vlc"
+};
+
/* Private data for the CS4270 */
struct cs4270_private {
struct snd_soc_codec codec;
@@ -114,6 +119,9 @@ struct cs4270_private {
unsigned int mode; /* The mode (I2S or left-justified) */
unsigned int slave_mode;
unsigned int manual_mute;
+
+ /* power domain regulators */
+ struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
};
/**
@@ -579,7 +587,8 @@ static int cs4270_probe(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = cs4270_codec;
- int ret;
+ struct cs4270_private *cs4270 = codec->private_data;
+ int i, ret;
/* Connect the codec to the socdev. snd_soc_new_pcms() needs this. */
socdev->card->codec = codec;
@@ -599,6 +608,15 @@ static int cs4270_probe(struct platform_device *pdev)
goto error_free_pcms;
}
+ /* get the power supply regulators */
+ for (i = 0; i < ARRAY_SIZE(supply_names); i++)
+ cs4270->supplies[i].supply = supply_names[i];
+
+ ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(cs4270->supplies),
+ cs4270->supplies);
+ if (ret < 0)
+ goto error_free_pcms;
+
return 0;
error_free_pcms:
@@ -616,8 +634,11 @@ error_free_pcms:
static int cs4270_remove(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = cs4270_codec;
+ struct cs4270_private *cs4270 = codec->private_data;
snd_soc_free_pcms(socdev);
+ regulator_bulk_free(ARRAY_SIZE(cs4270->supplies), cs4270->supplies);
return 0;
};
@@ -799,17 +820,33 @@ MODULE_DEVICE_TABLE(i2c, cs4270_id);
static int cs4270_soc_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct snd_soc_codec *codec = cs4270_codec;
- int reg = snd_soc_read(codec, CS4270_PWRCTL) | CS4270_PWRCTL_PDN_ALL;
+ struct cs4270_private *cs4270 = codec->private_data;
+ int reg, ret;
- return snd_soc_write(codec, CS4270_PWRCTL, reg);
+ reg = snd_soc_read(codec, CS4270_PWRCTL) | CS4270_PWRCTL_PDN_ALL;
+ if (reg < 0)
+ return reg;
+
+ ret = snd_soc_write(codec, CS4270_PWRCTL, reg);
+ if (ret < 0)
+ return ret;
+
+ regulator_bulk_disable(ARRAY_SIZE(cs4270->supplies),
+ cs4270->supplies);
+
+ return 0;
}
static int cs4270_soc_resume(struct platform_device *pdev)
{
struct snd_soc_codec *codec = cs4270_codec;
+ struct cs4270_private *cs4270 = codec->private_data;
struct i2c_client *i2c_client = codec->control_data;
int reg;
+ regulator_bulk_enable(ARRAY_SIZE(cs4270->supplies),
+ cs4270->supplies);
+
/* In case the device was put to hard reset during sleep, we need to
* wait 500ns here before any I2C communication. */
ndelay(500);
diff --git a/sound/soc/codecs/da7210.c b/sound/soc/codecs/da7210.c
new file mode 100644
index 000000000000..cf2975a7294a
--- /dev/null
+++ b/sound/soc/codecs/da7210.c
@@ -0,0 +1,589 @@
+/*
+ * DA7210 ALSA Soc codec driver
+ *
+ * Copyright (c) 2009 Dialog Semiconductor
+ * Written by David Chen <Dajun.chen@diasemi.com>
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Cleanups by Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Tested on SuperH Ecovec24 board with S16/S24 LE in 48KHz using I2S
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+#include <sound/initval.h>
+#include <asm/div64.h>
+
+#include "da7210.h"
+
+/* DA7210 register space */
+#define DA7210_STATUS 0x02
+#define DA7210_STARTUP1 0x03
+#define DA7210_MIC_L 0x07
+#define DA7210_MIC_R 0x08
+#define DA7210_INMIX_L 0x0D
+#define DA7210_INMIX_R 0x0E
+#define DA7210_ADC_HPF 0x0F
+#define DA7210_ADC 0x10
+#define DA7210_DAC_HPF 0x14
+#define DA7210_DAC_L 0x15
+#define DA7210_DAC_R 0x16
+#define DA7210_DAC_SEL 0x17
+#define DA7210_OUTMIX_L 0x1C
+#define DA7210_OUTMIX_R 0x1D
+#define DA7210_HP_L_VOL 0x21
+#define DA7210_HP_R_VOL 0x22
+#define DA7210_HP_CFG 0x23
+#define DA7210_DAI_SRC_SEL 0x25
+#define DA7210_DAI_CFG1 0x26
+#define DA7210_DAI_CFG3 0x28
+#define DA7210_PLL_DIV3 0x2B
+#define DA7210_PLL 0x2C
+
+/* STARTUP1 bit fields */
+#define DA7210_SC_MST_EN (1 << 0)
+
+/* MIC_L bit fields */
+#define DA7210_MICBIAS_EN (1 << 6)
+#define DA7210_MIC_L_EN (1 << 7)
+
+/* MIC_R bit fields */
+#define DA7210_MIC_R_EN (1 << 7)
+
+/* INMIX_L bit fields */
+#define DA7210_IN_L_EN (1 << 7)
+
+/* INMIX_R bit fields */
+#define DA7210_IN_R_EN (1 << 7)
+
+/* ADC_HPF bit fields */
+#define DA7210_ADC_VOICE_EN (1 << 7)
+
+/* ADC bit fields */
+#define DA7210_ADC_L_EN (1 << 3)
+#define DA7210_ADC_R_EN (1 << 7)
+
+/* DAC_HPF fields */
+#define DA7210_DAC_VOICE_EN (1 << 7)
+
+/* DAC_SEL bit fields */
+#define DA7210_DAC_L_SRC_DAI_L (4 << 0)
+#define DA7210_DAC_L_EN (1 << 3)
+#define DA7210_DAC_R_SRC_DAI_R (5 << 4)
+#define DA7210_DAC_R_EN (1 << 7)
+
+/* OUTMIX_L bit fields */
+#define DA7210_OUT_L_EN (1 << 7)
+
+/* OUTMIX_R bit fields */
+#define DA7210_OUT_R_EN (1 << 7)
+
+/* HP_CFG bit fields */
+#define DA7210_HP_2CAP_MODE (1 << 1)
+#define DA7210_HP_SENSE_EN (1 << 2)
+#define DA7210_HP_L_EN (1 << 3)
+#define DA7210_HP_MODE (1 << 6)
+#define DA7210_HP_R_EN (1 << 7)
+
+/* DAI_SRC_SEL bit fields */
+#define DA7210_DAI_OUT_L_SRC (6 << 0)
+#define DA7210_DAI_OUT_R_SRC (7 << 4)
+
+/* DAI_CFG1 bit fields */
+#define DA7210_DAI_WORD_S16_LE (0 << 0)
+#define DA7210_DAI_WORD_S24_LE (2 << 0)
+#define DA7210_DAI_FLEN_64BIT (1 << 2)
+#define DA7210_DAI_MODE_MASTER (1 << 7)
+
+/* DAI_CFG3 bit fields */
+#define DA7210_DAI_FORMAT_I2SMODE (0 << 0)
+#define DA7210_DAI_OE (1 << 3)
+#define DA7210_DAI_EN (1 << 7)
+
+/*PLL_DIV3 bit fields */
+#define DA7210_MCLK_RANGE_10_20_MHZ (1 << 4)
+#define DA7210_PLL_BYP (1 << 6)
+
+/* PLL bit fields */
+#define DA7210_PLL_FS_48000 (11 << 0)
+
+#define DA7210_VERSION "0.0.1"
+
+/* Codec private data */
+struct da7210_priv {
+ struct snd_soc_codec codec;
+};
+
+static struct snd_soc_codec *da7210_codec;
+
+/*
+ * Register cache
+ */
+static const u8 da7210_reg[] = {
+ 0x00, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* R0 - R7 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, /* R8 - RF */
+ 0x00, 0x00, 0x00, 0x00, 0x08, 0x10, 0x10, 0x54, /* R10 - R17 */
+ 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* R18 - R1F */
+ 0x00, 0x00, 0x00, 0x02, 0x00, 0x76, 0x00, 0x00, /* R20 - R27 */
+ 0x04, 0x00, 0x00, 0x30, 0x2A, 0x00, 0x40, 0x00, /* R28 - R2F */
+ 0x40, 0x00, 0x40, 0x00, 0x40, 0x00, 0x40, 0x00, /* R30 - R37 */
+ 0x40, 0x00, 0x40, 0x00, 0x40, 0x00, 0x00, 0x00, /* R38 - R3F */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* R40 - R4F */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* R48 - R4F */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* R50 - R57 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* R58 - R5F */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* R60 - R67 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* R68 - R6F */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* R70 - R77 */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x54, 0x00, /* R78 - R7F */
+ 0x00, 0x00, 0x2C, 0x00, 0x00, 0x00, 0x00, 0x00, /* R80 - R87 */
+ 0x00, /* R88 */
+};
+
+/*
+ * Read da7210 register cache
+ */
+static inline u32 da7210_read_reg_cache(struct snd_soc_codec *codec, u32 reg)
+{
+ u8 *cache = codec->reg_cache;
+ BUG_ON(reg > ARRAY_SIZE(da7210_reg));
+ return cache[reg];
+}
+
+/*
+ * Write to the da7210 register space
+ */
+static int da7210_write(struct snd_soc_codec *codec, u32 reg, u32 value)
+{
+ u8 *cache = codec->reg_cache;
+ u8 data[2];
+
+ BUG_ON(codec->volatile_register);
+
+ data[0] = reg & 0xff;
+ data[1] = value & 0xff;
+
+ if (reg >= codec->reg_cache_size)
+ return -EIO;
+
+ if (2 != codec->hw_write(codec->control_data, data, 2))
+ return -EIO;
+
+ cache[reg] = value;
+ return 0;
+}
+
+/*
+ * Read from the da7210 register space.
+ */
+static inline u32 da7210_read(struct snd_soc_codec *codec, u32 reg)
+{
+ if (DA7210_STATUS == reg)
+ return i2c_smbus_read_byte_data(codec->control_data, reg);
+
+ return da7210_read_reg_cache(codec, reg);
+}
+
+static int da7210_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ struct snd_soc_codec *codec = dai->codec;
+
+ if (is_play) {
+ /* PlayBack Volume 40 */
+ snd_soc_update_bits(codec, DA7210_HP_L_VOL, 0x3F, 40);
+ snd_soc_update_bits(codec, DA7210_HP_R_VOL, 0x3F, 40);
+
+ /* Enable Out */
+ snd_soc_update_bits(codec, DA7210_OUTMIX_L, 0x1F, 0x10);
+ snd_soc_update_bits(codec, DA7210_OUTMIX_R, 0x1F, 0x10);
+
+ } else {
+ /* Volume 7 */
+ snd_soc_update_bits(codec, DA7210_MIC_L, 0x7, 0x7);
+ snd_soc_update_bits(codec, DA7210_MIC_R, 0x7, 0x7);
+
+ /* Enable Mic */
+ snd_soc_update_bits(codec, DA7210_INMIX_L, 0x1F, 0x1);
+ snd_soc_update_bits(codec, DA7210_INMIX_R, 0x1F, 0x1);
+ }
+
+ return 0;
+}
+
+/*
+ * Set PCM DAI word length.
+ */
+static int da7210_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_device *socdev = rtd->socdev;
+ struct snd_soc_codec *codec = socdev->card->codec;
+ u32 dai_cfg1;
+ u32 reg, mask;
+
+ /* set DAI source to Left and Right ADC */
+ da7210_write(codec, DA7210_DAI_SRC_SEL,
+ DA7210_DAI_OUT_R_SRC | DA7210_DAI_OUT_L_SRC);
+
+ /* Enable DAI */
+ da7210_write(codec, DA7210_DAI_CFG3, DA7210_DAI_OE | DA7210_DAI_EN);
+
+ dai_cfg1 = 0xFC & da7210_read(codec, DA7210_DAI_CFG1);
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ dai_cfg1 |= DA7210_DAI_WORD_S16_LE;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ dai_cfg1 |= DA7210_DAI_WORD_S24_LE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ da7210_write(codec, DA7210_DAI_CFG1, dai_cfg1);
+
+ /* FIXME
+ *
+ * It support 48K only now
+ */
+ switch (params_rate(params)) {
+ case 48000:
+ if (SNDRV_PCM_STREAM_PLAYBACK == substream->stream) {
+ reg = DA7210_DAC_HPF;
+ mask = DA7210_DAC_VOICE_EN;
+ } else {
+ reg = DA7210_ADC_HPF;
+ mask = DA7210_ADC_VOICE_EN;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, reg, mask, 0);
+
+ return 0;
+}
+
+/*
+ * Set DAI mode and Format
+ */
+static int da7210_set_dai_fmt(struct snd_soc_dai *codec_dai, u32 fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ u32 dai_cfg1;
+ u32 dai_cfg3;
+
+ dai_cfg1 = 0x7f & da7210_read(codec, DA7210_DAI_CFG1);
+ dai_cfg3 = 0xfc & da7210_read(codec, DA7210_DAI_CFG3);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ dai_cfg1 |= DA7210_DAI_MODE_MASTER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* FIXME
+ *
+ * It support I2S only now
+ */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ dai_cfg3 |= DA7210_DAI_FORMAT_I2SMODE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* FIXME
+ *
+ * It support 64bit data transmission only now
+ */
+ dai_cfg1 |= DA7210_DAI_FLEN_64BIT;
+
+ da7210_write(codec, DA7210_DAI_CFG1, dai_cfg1);
+ da7210_write(codec, DA7210_DAI_CFG3, dai_cfg3);
+
+ return 0;
+}
+
+#define DA7210_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+/* DAI operations */
+static struct snd_soc_dai_ops da7210_dai_ops = {
+ .startup = da7210_startup,
+ .hw_params = da7210_hw_params,
+ .set_fmt = da7210_set_dai_fmt,
+};
+
+struct snd_soc_dai da7210_dai = {
+ .name = "DA7210 IIS",
+ .id = 0,
+ /* playback capabilities */
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = DA7210_FORMATS,
+ },
+ /* capture capabilities */
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_96000,
+ .formats = DA7210_FORMATS,
+ },
+ .ops = &da7210_dai_ops,
+};
+EXPORT_SYMBOL_GPL(da7210_dai);
+
+/*
+ * Initialize the DA7210 driver
+ * register the mixer and dsp interfaces with the kernel
+ */
+static int da7210_init(struct da7210_priv *da7210)
+{
+ struct snd_soc_codec *codec = &da7210->codec;
+ int ret = 0;
+
+ if (da7210_codec) {
+ dev_err(codec->dev, "Another da7210 is registered\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = da7210;
+ codec->name = "DA7210";
+ codec->owner = THIS_MODULE;
+ codec->read = da7210_read;
+ codec->write = da7210_write;
+ codec->dai = &da7210_dai;
+ codec->num_dai = 1;
+ codec->hw_write = (hw_write_t)i2c_master_send;
+ codec->reg_cache_size = ARRAY_SIZE(da7210_reg);
+ codec->reg_cache = kmemdup(da7210_reg,
+ sizeof(da7210_reg), GFP_KERNEL);
+
+ if (!codec->reg_cache)
+ return -ENOMEM;
+
+ da7210_dai.dev = codec->dev;
+ da7210_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret) {
+ dev_err(codec->dev, "Failed to register CODEC: %d\n", ret);
+ goto init_err;
+ }
+
+ ret = snd_soc_register_dai(&da7210_dai);
+ if (ret) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ goto init_err;
+ }
+
+ /* FIXME
+ *
+ * This driver use fixed value here
+ */
+
+ /*
+ * ADC settings
+ */
+
+ /* Enable Left & Right MIC PGA and Mic Bias */
+ da7210_write(codec, DA7210_MIC_L, DA7210_MIC_L_EN | DA7210_MICBIAS_EN);
+ da7210_write(codec, DA7210_MIC_R, DA7210_MIC_R_EN);
+
+ /* Enable Left and Right input PGA */
+ da7210_write(codec, DA7210_INMIX_L, DA7210_IN_L_EN);
+ da7210_write(codec, DA7210_INMIX_R, DA7210_IN_R_EN);
+
+ /* Enable Left and Right ADC */
+ da7210_write(codec, DA7210_ADC, DA7210_ADC_L_EN | DA7210_ADC_R_EN);
+
+ /*
+ * DAC settings
+ */
+
+ /* Enable Left and Right DAC */
+ da7210_write(codec, DA7210_DAC_SEL,
+ DA7210_DAC_L_SRC_DAI_L | DA7210_DAC_L_EN |
+ DA7210_DAC_R_SRC_DAI_R | DA7210_DAC_R_EN);
+
+ /* Enable Left and Right out PGA */
+ da7210_write(codec, DA7210_OUTMIX_L, DA7210_OUT_L_EN);
+ da7210_write(codec, DA7210_OUTMIX_R, DA7210_OUT_R_EN);
+
+ /* Enable Left and Right HeadPhone PGA */
+ da7210_write(codec, DA7210_HP_CFG,
+ DA7210_HP_2CAP_MODE | DA7210_HP_SENSE_EN |
+ DA7210_HP_L_EN | DA7210_HP_MODE | DA7210_HP_R_EN);
+
+ /* Diable PLL and bypass it */
+ da7210_write(codec, DA7210_PLL, DA7210_PLL_FS_48000);
+
+ /* Bypass PLL and set MCLK freq rang to 10-20MHz */
+ da7210_write(codec, DA7210_PLL_DIV3,
+ DA7210_MCLK_RANGE_10_20_MHZ | DA7210_PLL_BYP);
+
+ /* Activate all enabled subsystem */
+ da7210_write(codec, DA7210_STARTUP1, DA7210_SC_MST_EN);
+
+ return ret;
+
+init_err:
+ kfree(codec->reg_cache);
+ codec->reg_cache = NULL;
+
+ return ret;
+
+}
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static int __devinit da7210_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct da7210_priv *da7210;
+ struct snd_soc_codec *codec;
+ int ret;
+
+ da7210 = kzalloc(sizeof(struct da7210_priv), GFP_KERNEL);
+ if (!da7210)
+ return -ENOMEM;
+
+ codec = &da7210->codec;
+ codec->dev = &i2c->dev;
+
+ i2c_set_clientdata(i2c, da7210);
+ codec->control_data = i2c;
+
+ ret = da7210_init(da7210);
+ if (ret < 0)
+ pr_err("Failed to initialise da7210 audio codec\n");
+
+ return ret;
+}
+
+static int __devexit da7210_i2c_remove(struct i2c_client *client)
+{
+ struct da7210_priv *da7210 = i2c_get_clientdata(client);
+
+ snd_soc_unregister_dai(&da7210_dai);
+ kfree(da7210->codec.reg_cache);
+ kfree(da7210);
+ da7210_codec = NULL;
+
+ return 0;
+}
+
+static const struct i2c_device_id da7210_i2c_id[] = {
+ { "da7210", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, da7210_i2c_id);
+
+/* I2C codec control layer */
+static struct i2c_driver da7210_i2c_driver = {
+ .driver = {
+ .name = "DA7210 I2C Codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = da7210_i2c_probe,
+ .remove = __devexit_p(da7210_i2c_remove),
+ .id_table = da7210_i2c_id,
+};
+#endif
+
+static int da7210_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret;
+
+ if (!da7210_codec) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = da7210_codec;
+ codec = da7210_codec;
+
+ /* Register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0)
+ goto pcm_err;
+
+ dev_info(&pdev->dev, "DA7210 Audio Codec %s\n", DA7210_VERSION);
+
+pcm_err:
+ return ret;
+}
+
+static int da7210_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_da7210 = {
+ .probe = da7210_probe,
+ .remove = da7210_remove,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_da7210);
+
+static int __init da7210_modinit(void)
+{
+ int ret = 0;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&da7210_i2c_driver);
+#endif
+ return ret;
+}
+module_init(da7210_modinit);
+
+static void __exit da7210_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&da7210_i2c_driver);
+#endif
+}
+module_exit(da7210_exit);
+
+MODULE_DESCRIPTION("ASoC DA7210 driver");
+MODULE_AUTHOR("David Chen, Kuninori Morimoto");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/da7210.h b/sound/soc/codecs/da7210.h
new file mode 100644
index 000000000000..390d621eb742
--- /dev/null
+++ b/sound/soc/codecs/da7210.h
@@ -0,0 +1,24 @@
+/*
+ * da7210.h -- audio driver for da7210
+ *
+ * Copyright (c) 2009 Dialog Semiconductor
+ * Written by David Chen <Dajun.chen@diasemi.com>
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Cleanups by Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef _DA7210_H
+#define _DA7210_H
+
+extern struct snd_soc_dai da7210_dai;
+extern struct snd_soc_codec_device soc_codec_dev_da7210;
+
+#endif
+
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 2b4dc2b0b017..e4b946a19ea3 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -765,9 +765,10 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_codec *codec = socdev->card->codec;
struct aic3x_priv *aic3x = codec->private_data;
int codec_clk = 0, bypass_pll = 0, fsref, last_clk = 0;
- u8 data, r, p, pll_q, pll_p = 1, pll_r = 1, pll_j = 1;
- u16 pll_d = 1;
+ u8 data, j, r, p, pll_q, pll_p = 1, pll_r = 1, pll_j = 1;
+ u16 d, pll_d = 1;
u8 reg;
+ int clk;
/* select data word length */
data =
@@ -833,48 +834,70 @@ static int aic3x_hw_params(struct snd_pcm_substream *substream,
if (bypass_pll)
return 0;
- /* Use PLL
- * find an apropriate setup for j, d, r and p by iterating over
- * p and r - j and d are calculated for each fraction.
- * Up to 128 values are probed, the closest one wins the game.
+ /* Use PLL, compute apropriate setup for j, d, r and p, the closest
+ * one wins the game. Try with d==0 first, next with d!=0.
+ * Constraints for j are according to the datasheet.
* The sysclk is divided by 1000 to prevent integer overflows.
*/
+
codec_clk = (2048 * fsref) / (aic3x->sysclk / 1000);
for (r = 1; r <= 16; r++)
for (p = 1; p <= 8; p++) {
- int clk, tmp = (codec_clk * pll_r * 10) / pll_p;
- u8 j = tmp / 10000;
- u16 d = tmp % 10000;
+ for (j = 4; j <= 55; j++) {
+ /* This is actually 1000*((j+(d/10000))*r)/p
+ * The term had to be converted to get
+ * rid of the division by 10000; d = 0 here
+ */
+ int tmp_clk = (1000 * j * r) / p;
+
+ /* Check whether this values get closer than
+ * the best ones we had before
+ */
+ if (abs(codec_clk - tmp_clk) <
+ abs(codec_clk - last_clk)) {
+ pll_j = j; pll_d = 0;
+ pll_r = r; pll_p = p;
+ last_clk = tmp_clk;
+ }
+
+ /* Early exit for exact matches */
+ if (tmp_clk == codec_clk)
+ goto found;
+ }
+ }
- if (j > 63)
- continue;
+ /* try with d != 0 */
+ for (p = 1; p <= 8; p++) {
+ j = codec_clk * p / 1000;
- if (d != 0 && aic3x->sysclk < 10000000)
- continue;
+ if (j < 4 || j > 11)
+ continue;
- /* This is actually 1000 * ((j + (d/10000)) * r) / p
- * The term had to be converted to get rid of the
- * division by 10000 */
- clk = ((10000 * j * r) + (d * r)) / (10 * p);
+ /* do not use codec_clk here since we'd loose precision */
+ d = ((2048 * p * fsref) - j * aic3x->sysclk)
+ * 100 / (aic3x->sysclk/100);
- /* check whether this values get closer than the best
- * ones we had before */
- if (abs(codec_clk - clk) < abs(codec_clk - last_clk)) {
- pll_j = j; pll_d = d; pll_r = r; pll_p = p;
- last_clk = clk;
- }
+ clk = (10000 * j + d) / (10 * p);
- /* Early exit for exact matches */
- if (clk == codec_clk)
- break;
+ /* check whether this values get closer than the best
+ * ones we had before */
+ if (abs(codec_clk - clk) < abs(codec_clk - last_clk)) {
+ pll_j = j; pll_d = d; pll_r = 1; pll_p = p;
+ last_clk = clk;
}
+ /* Early exit for exact matches */
+ if (clk == codec_clk)
+ goto found;
+ }
+
if (last_clk == 0) {
printk(KERN_ERR "%s(): unable to setup PLL\n", __func__);
return -EINVAL;
}
+found:
data = aic3x_read_reg_cache(codec, AIC3X_PLL_PROGA_REG);
aic3x_write(codec, AIC3X_PLL_PROGA_REG, data | (pll_p << PLLP_SHIFT));
aic3x_write(codec, AIC3X_OVRF_STATUS_AND_PLLR_REG, pll_r << PLLR_SHIFT);
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index 9c8903dbe647..2df9c20b7d52 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -30,6 +30,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -58,11 +59,26 @@ enum dac33_state {
DAC33_FLUSH,
};
+enum dac33_fifo_modes {
+ DAC33_FIFO_BYPASS = 0,
+ DAC33_FIFO_MODE1,
+ DAC33_FIFO_MODE7,
+ DAC33_FIFO_LAST_MODE,
+};
+
+#define DAC33_NUM_SUPPLIES 3
+static const char *dac33_supply_names[DAC33_NUM_SUPPLIES] = {
+ "AVDD",
+ "DVDD",
+ "IOVDD",
+};
+
struct tlv320dac33_priv {
struct mutex mutex;
struct workqueue_struct *dac33_wq;
struct work_struct work;
struct snd_soc_codec codec;
+ struct regulator_bulk_data supplies[DAC33_NUM_SUPPLIES];
int power_gpio;
int chip_power;
int irq;
@@ -73,7 +89,7 @@ struct tlv320dac33_priv {
* this */
unsigned int nsample_max; /* nsample should not be higher than
* this */
- unsigned int nsample_switch; /* Use FIFO or bypass FIFO switch */
+ enum dac33_fifo_modes fifo_mode;/* FIFO mode selection */
unsigned int nsample; /* burst read amount from host */
enum dac33_state state;
@@ -297,28 +313,49 @@ static inline void dac33_soft_power(struct snd_soc_codec *codec, int power)
dac33_write(codec, DAC33_PWR_CTRL, reg);
}
-static void dac33_hard_power(struct snd_soc_codec *codec, int power)
+static int dac33_hard_power(struct snd_soc_codec *codec, int power)
{
struct tlv320dac33_priv *dac33 = codec->private_data;
+ int ret;
mutex_lock(&dac33->mutex);
if (power) {
- if (dac33->power_gpio >= 0) {
- gpio_set_value(dac33->power_gpio, 1);
- dac33->chip_power = 1;
- /* Restore registers */
- dac33_restore_regs(codec);
+ ret = regulator_bulk_enable(ARRAY_SIZE(dac33->supplies),
+ dac33->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to enable supplies: %d\n", ret);
+ goto exit;
}
+
+ if (dac33->power_gpio >= 0)
+ gpio_set_value(dac33->power_gpio, 1);
+
+ dac33->chip_power = 1;
+
+ /* Restore registers */
+ dac33_restore_regs(codec);
+
dac33_soft_power(codec, 1);
} else {
dac33_soft_power(codec, 0);
- if (dac33->power_gpio >= 0) {
+ if (dac33->power_gpio >= 0)
gpio_set_value(dac33->power_gpio, 0);
- dac33->chip_power = 0;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(dac33->supplies),
+ dac33->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to disable supplies: %d\n", ret);
+ goto exit;
}
+
+ dac33->chip_power = 0;
}
- mutex_unlock(&dac33->mutex);
+exit:
+ mutex_unlock(&dac33->mutex);
+ return ret;
}
static int dac33_get_nsample(struct snd_kcontrol *kcontrol,
@@ -351,39 +388,48 @@ static int dac33_set_nsample(struct snd_kcontrol *kcontrol,
return ret;
}
-static int dac33_get_nsample_switch(struct snd_kcontrol *kcontrol,
+static int dac33_get_fifo_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct tlv320dac33_priv *dac33 = codec->private_data;
- ucontrol->value.integer.value[0] = dac33->nsample_switch;
+ ucontrol->value.integer.value[0] = dac33->fifo_mode;
return 0;
}
-static int dac33_set_nsample_switch(struct snd_kcontrol *kcontrol,
+static int dac33_set_fifo_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct tlv320dac33_priv *dac33 = codec->private_data;
int ret = 0;
- if (dac33->nsample_switch == ucontrol->value.integer.value[0])
+ if (dac33->fifo_mode == ucontrol->value.integer.value[0])
return 0;
/* Do not allow changes while stream is running*/
if (codec->active)
return -EPERM;
if (ucontrol->value.integer.value[0] < 0 ||
- ucontrol->value.integer.value[0] > 1)
+ ucontrol->value.integer.value[0] >= DAC33_FIFO_LAST_MODE)
ret = -EINVAL;
else
- dac33->nsample_switch = ucontrol->value.integer.value[0];
+ dac33->fifo_mode = ucontrol->value.integer.value[0];
return ret;
}
+/* Codec operation modes */
+static const char *dac33_fifo_mode_texts[] = {
+ "Bypass", "Mode 1", "Mode 7"
+};
+
+static const struct soc_enum dac33_fifo_mode_enum =
+ SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(dac33_fifo_mode_texts),
+ dac33_fifo_mode_texts);
+
/*
* DACL/R digital volume control:
* from 0 dB to -63.5 in 0.5 dB steps
@@ -406,8 +452,8 @@ static const struct snd_kcontrol_new dac33_snd_controls[] = {
static const struct snd_kcontrol_new dac33_nsample_snd_controls[] = {
SOC_SINGLE_EXT("nSample", 0, 0, 5900, 0,
dac33_get_nsample, dac33_set_nsample),
- SOC_SINGLE_EXT("nSample Switch", 0, 0, 1, 0,
- dac33_get_nsample_switch, dac33_set_nsample_switch),
+ SOC_ENUM_EXT("FIFO Mode", dac33_fifo_mode_enum,
+ dac33_get_fifo_mode, dac33_set_fifo_mode),
};
/* Analog bypass */
@@ -469,6 +515,8 @@ static int dac33_add_widgets(struct snd_soc_codec *codec)
static int dac33_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
+ int ret;
+
switch (level) {
case SND_SOC_BIAS_ON:
dac33_soft_power(codec, 1);
@@ -476,12 +524,19 @@ static int dac33_set_bias_level(struct snd_soc_codec *codec,
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
- if (codec->bias_level == SND_SOC_BIAS_OFF)
- dac33_hard_power(codec, 1);
+ if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ ret = dac33_hard_power(codec, 1);
+ if (ret != 0)
+ return ret;
+ }
+
dac33_soft_power(codec, 0);
break;
case SND_SOC_BIAS_OFF:
- dac33_hard_power(codec, 0);
+ ret = dac33_hard_power(codec, 0);
+ if (ret != 0)
+ return ret;
+
break;
}
codec->bias_level = level;
@@ -489,6 +544,51 @@ static int dac33_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
+static inline void dac33_prefill_handler(struct tlv320dac33_priv *dac33)
+{
+ struct snd_soc_codec *codec;
+
+ codec = &dac33->codec;
+
+ switch (dac33->fifo_mode) {
+ case DAC33_FIFO_MODE1:
+ dac33_write16(codec, DAC33_NSAMPLE_MSB,
+ DAC33_THRREG(dac33->nsample));
+ dac33_write16(codec, DAC33_PREFILL_MSB,
+ DAC33_THRREG(dac33->alarm_threshold));
+ break;
+ case DAC33_FIFO_MODE7:
+ dac33_write16(codec, DAC33_PREFILL_MSB,
+ DAC33_THRREG(20));
+ break;
+ default:
+ dev_warn(codec->dev, "Unhandled FIFO mode: %d\n",
+ dac33->fifo_mode);
+ break;
+ }
+}
+
+static inline void dac33_playback_handler(struct tlv320dac33_priv *dac33)
+{
+ struct snd_soc_codec *codec;
+
+ codec = &dac33->codec;
+
+ switch (dac33->fifo_mode) {
+ case DAC33_FIFO_MODE1:
+ dac33_write16(codec, DAC33_NSAMPLE_MSB,
+ DAC33_THRREG(dac33->nsample));
+ break;
+ case DAC33_FIFO_MODE7:
+ /* At the moment we are not using interrupts in mode7 */
+ break;
+ default:
+ dev_warn(codec->dev, "Unhandled FIFO mode: %d\n",
+ dac33->fifo_mode);
+ break;
+ }
+}
+
static void dac33_work(struct work_struct *work)
{
struct snd_soc_codec *codec;
@@ -502,14 +602,10 @@ static void dac33_work(struct work_struct *work)
switch (dac33->state) {
case DAC33_PREFILL:
dac33->state = DAC33_PLAYBACK;
- dac33_write16(codec, DAC33_NSAMPLE_MSB,
- DAC33_THRREG(dac33->nsample));
- dac33_write16(codec, DAC33_PREFILL_MSB,
- DAC33_THRREG(dac33->alarm_threshold));
+ dac33_prefill_handler(dac33);
break;
case DAC33_PLAYBACK:
- dac33_write16(codec, DAC33_NSAMPLE_MSB,
- DAC33_THRREG(dac33->nsample));
+ dac33_playback_handler(dac33);
break;
case DAC33_IDLE:
break;
@@ -547,7 +643,7 @@ static void dac33_shutdown(struct snd_pcm_substream *substream,
unsigned int pwr_ctrl;
/* Stop pending workqueue */
- if (dac33->nsample_switch)
+ if (dac33->fifo_mode)
cancel_work_sync(&dac33->work);
mutex_lock(&dac33->mutex);
@@ -619,7 +715,7 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream)
struct snd_soc_codec *codec = socdev->card->codec;
struct tlv320dac33_priv *dac33 = codec->private_data;
unsigned int oscset, ratioset, pwr_ctrl, reg_tmp;
- u8 aictrl_a, fifoctrl_a;
+ u8 aictrl_a, aictrl_b, fifoctrl_a;
switch (substream->runtime->rate) {
case 44100:
@@ -675,7 +771,8 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream)
dac33_oscwait(codec);
- if (dac33->nsample_switch) {
+ if (dac33->fifo_mode) {
+ /* Generic for all FIFO modes */
/* 50-51 : ASRC Control registers */
dac33_write(codec, DAC33_ASRC_CTRL_A, (1 << 4)); /* div=2 */
dac33_write(codec, DAC33_ASRC_CTRL_B, 1); /* ??? */
@@ -685,38 +782,91 @@ static int dac33_prepare_chip(struct snd_pcm_substream *substream)
/* Set interrupts to high active */
dac33_write(codec, DAC33_INTP_CTRL_A, DAC33_INTPM_AHIGH);
-
- dac33_write(codec, DAC33_FIFO_IRQ_MODE_B,
- DAC33_ATM(DAC33_FIFO_IRQ_MODE_LEVEL));
- dac33_write(codec, DAC33_FIFO_IRQ_MASK, DAC33_MAT);
} else {
+ /* FIFO bypass mode */
/* 50-51 : ASRC Control registers */
dac33_write(codec, DAC33_ASRC_CTRL_A, DAC33_SRCBYP);
dac33_write(codec, DAC33_ASRC_CTRL_B, 0); /* ??? */
}
- if (dac33->nsample_switch)
+ /* Interrupt behaviour configuration */
+ switch (dac33->fifo_mode) {
+ case DAC33_FIFO_MODE1:
+ dac33_write(codec, DAC33_FIFO_IRQ_MODE_B,
+ DAC33_ATM(DAC33_FIFO_IRQ_MODE_LEVEL));
+ dac33_write(codec, DAC33_FIFO_IRQ_MASK, DAC33_MAT);
+ break;
+ case DAC33_FIFO_MODE7:
+ /* Disable all interrupts */
+ dac33_write(codec, DAC33_FIFO_IRQ_MASK, 0);
+ break;
+ default:
+ /* in FIFO bypass mode, the interrupts are not used */
+ break;
+ }
+
+ aictrl_b = dac33_read_reg_cache(codec, DAC33_SER_AUDIOIF_CTRL_B);
+
+ switch (dac33->fifo_mode) {
+ case DAC33_FIFO_MODE1:
+ /*
+ * For mode1:
+ * Disable the FIFO bypass (Enable the use of FIFO)
+ * Select nSample mode
+ * BCLK is only running when data is needed by DAC33
+ */
fifoctrl_a &= ~DAC33_FBYPAS;
- else
+ fifoctrl_a &= ~DAC33_FAUTO;
+ aictrl_b &= ~DAC33_BCLKON;
+ break;
+ case DAC33_FIFO_MODE7:
+ /*
+ * For mode1:
+ * Disable the FIFO bypass (Enable the use of FIFO)
+ * Select Threshold mode
+ * BCLK is only running when data is needed by DAC33
+ */
+ fifoctrl_a &= ~DAC33_FBYPAS;
+ fifoctrl_a |= DAC33_FAUTO;
+ aictrl_b &= ~DAC33_BCLKON;
+ break;
+ default:
+ /*
+ * For FIFO bypass mode:
+ * Enable the FIFO bypass (Disable the FIFO use)
+ * Set the BCLK as continous
+ */
fifoctrl_a |= DAC33_FBYPAS;
- dac33_write(codec, DAC33_FIFO_CTRL_A, fifoctrl_a);
+ aictrl_b |= DAC33_BCLKON;
+ break;
+ }
+ dac33_write(codec, DAC33_FIFO_CTRL_A, fifoctrl_a);
dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_A, aictrl_a);
- reg_tmp = dac33_read_reg_cache(codec, DAC33_SER_AUDIOIF_CTRL_B);
- if (dac33->nsample_switch)
- reg_tmp &= ~DAC33_BCLKON;
- else
- reg_tmp |= DAC33_BCLKON;
- dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_B, reg_tmp);
+ dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_B, aictrl_b);
- if (dac33->nsample_switch) {
+ switch (dac33->fifo_mode) {
+ case DAC33_FIFO_MODE1:
/* 20: BCLK divide ratio */
dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 3);
dac33_write16(codec, DAC33_ATHR_MSB,
DAC33_THRREG(dac33->alarm_threshold));
- } else {
+ break;
+ case DAC33_FIFO_MODE7:
+ /*
+ * Configure the threshold levels, and leave 10 sample space
+ * at the bottom, and also at the top of the FIFO
+ */
+ dac33_write16(codec, DAC33_UTHR_MSB,
+ DAC33_THRREG(DAC33_BUFFER_SIZE_SAMPLES - 10));
+ dac33_write16(codec, DAC33_LTHR_MSB,
+ DAC33_THRREG(10));
+ break;
+ default:
+ /* BYPASS mode */
dac33_write(codec, DAC33_SER_AUDIOIF_CTRL_C, 32);
+ break;
}
mutex_unlock(&dac33->mutex);
@@ -789,7 +939,7 @@ static int dac33_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if (dac33->nsample_switch) {
+ if (dac33->fifo_mode) {
dac33->state = DAC33_PREFILL;
queue_work(dac33->dac33_wq, &dac33->work);
}
@@ -797,7 +947,7 @@ static int dac33_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (dac33->nsample_switch) {
+ if (dac33->fifo_mode) {
dac33->state = DAC33_FLUSH;
queue_work(dac33->dac33_wq, &dac33->work);
}
@@ -843,6 +993,7 @@ static int dac33_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
+ struct tlv320dac33_priv *dac33 = codec->private_data;
u8 aictrl_a, aictrl_b;
aictrl_a = dac33_read_reg_cache(codec, DAC33_SER_AUDIOIF_CTRL_A);
@@ -855,7 +1006,11 @@ static int dac33_set_dai_fmt(struct snd_soc_dai *codec_dai,
break;
case SND_SOC_DAIFMT_CBS_CFS:
/* Codec Slave */
- aictrl_a &= ~(DAC33_MSBCLK | DAC33_MSWCLK);
+ if (dac33->fifo_mode) {
+ dev_err(codec->dev, "FIFO mode requires master mode\n");
+ return -EINVAL;
+ } else
+ aictrl_a &= ~(DAC33_MSBCLK | DAC33_MSWCLK);
break;
default:
return -EINVAL;
@@ -959,6 +1114,9 @@ static int dac33_soc_probe(struct platform_device *pdev)
/* power on device */
dac33_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ /* Bias level configuration has enabled regulator an extra time */
+ regulator_bulk_disable(ARRAY_SIZE(dac33->supplies), dac33->supplies);
+
return 0;
pcm_err:
@@ -1033,13 +1191,13 @@ struct snd_soc_dai dac33_dai = {
};
EXPORT_SYMBOL_GPL(dac33_dai);
-static int dac33_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int __devinit dac33_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct tlv320dac33_platform_data *pdata;
struct tlv320dac33_priv *dac33;
struct snd_soc_codec *codec;
- int ret = 0;
+ int ret, i;
if (client->dev.platform_data == NULL) {
dev_err(&client->dev, "Platform data not set\n");
@@ -1083,7 +1241,7 @@ static int dac33_i2c_probe(struct i2c_client *client,
dac33->irq = client->irq;
dac33->nsample = NSAMPLE_MAX;
/* Disable FIFO use by default */
- dac33->nsample_switch = 0;
+ dac33->fifo_mode = DAC33_FIFO_BYPASS;
tlv320dac33_codec = codec;
@@ -1130,6 +1288,24 @@ static int dac33_i2c_probe(struct i2c_client *client,
}
}
+ for (i = 0; i < ARRAY_SIZE(dac33->supplies); i++)
+ dac33->supplies[i].supply = dac33_supply_names[i];
+
+ ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(dac33->supplies),
+ dac33->supplies);
+
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+ goto err_get;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(dac33->supplies),
+ dac33->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+ goto err_enable;
+ }
+
ret = snd_soc_register_codec(codec);
if (ret != 0) {
dev_err(codec->dev, "Failed to register codec: %d\n", ret);
@@ -1149,6 +1325,10 @@ static int dac33_i2c_probe(struct i2c_client *client,
return ret;
error_codec:
+ regulator_bulk_disable(ARRAY_SIZE(dac33->supplies), dac33->supplies);
+err_enable:
+ regulator_bulk_free(ARRAY_SIZE(dac33->supplies), dac33->supplies);
+err_get:
if (dac33->irq >= 0) {
free_irq(dac33->irq, &dac33->codec);
destroy_workqueue(dac33->dac33_wq);
@@ -1165,7 +1345,7 @@ error_reg:
return ret;
}
-static int dac33_i2c_remove(struct i2c_client *client)
+static int __devexit dac33_i2c_remove(struct i2c_client *client)
{
struct tlv320dac33_priv *dac33;
@@ -1177,6 +1357,8 @@ static int dac33_i2c_remove(struct i2c_client *client)
if (dac33->irq >= 0)
free_irq(dac33->irq, &dac33->codec);
+ regulator_bulk_free(ARRAY_SIZE(dac33->supplies), dac33->supplies);
+
destroy_workqueue(dac33->dac33_wq);
snd_soc_unregister_dai(&dac33_dai);
snd_soc_unregister_codec(&dac33->codec);
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 6b650c1aa3d1..958d49c969ac 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -25,6 +25,7 @@
#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
#include <sound/tpa6130a2-plat.h>
#include <sound/soc.h>
#include <sound/soc-dapm.h>
@@ -34,10 +35,22 @@
static struct i2c_client *tpa6130a2_client;
+#define TPA6130A2_NUM_SUPPLIES 2
+static const char *tpa6130a2_supply_names[TPA6130A2_NUM_SUPPLIES] = {
+ "CPVSS",
+ "Vdd",
+};
+
+static const char *tpa6140a2_supply_names[TPA6130A2_NUM_SUPPLIES] = {
+ "HPVdd",
+ "AVdd",
+};
+
/* This struct is used to save the context */
struct tpa6130a2_data {
struct mutex mutex;
unsigned char regs[TPA6130A2_CACHEREGNUM];
+ struct regulator_bulk_data supplies[TPA6130A2_NUM_SUPPLIES];
int power_gpio;
unsigned char power_state;
};
@@ -106,10 +119,11 @@ static void tpa6130a2_initialize(void)
tpa6130a2_i2c_write(i, data->regs[i]);
}
-static void tpa6130a2_power(int power)
+static int tpa6130a2_power(int power)
{
struct tpa6130a2_data *data;
u8 val;
+ int ret;
BUG_ON(tpa6130a2_client == NULL);
data = i2c_get_clientdata(tpa6130a2_client);
@@ -117,11 +131,20 @@ static void tpa6130a2_power(int power)
mutex_lock(&data->mutex);
if (power) {
/* Power on */
- if (data->power_gpio >= 0) {
+ if (data->power_gpio >= 0)
gpio_set_value(data->power_gpio, 1);
- data->power_state = 1;
- tpa6130a2_initialize();
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->supplies),
+ data->supplies);
+ if (ret != 0) {
+ dev_err(&tpa6130a2_client->dev,
+ "Failed to enable supplies: %d\n", ret);
+ goto exit;
}
+
+ data->power_state = 1;
+ tpa6130a2_initialize();
+
/* Clear SWS */
val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
val &= ~TPA6130A2_SWS;
@@ -131,13 +154,25 @@ static void tpa6130a2_power(int power)
val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
val |= TPA6130A2_SWS;
tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
+
/* Power off */
- if (data->power_gpio >= 0) {
+ if (data->power_gpio >= 0)
gpio_set_value(data->power_gpio, 0);
- data->power_state = 0;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(data->supplies),
+ data->supplies);
+ if (ret != 0) {
+ dev_err(&tpa6130a2_client->dev,
+ "Failed to disable supplies: %d\n", ret);
+ goto exit;
}
+
+ data->power_state = 0;
}
+
+exit:
mutex_unlock(&data->mutex);
+ return ret;
}
static int tpa6130a2_get_reg(struct snd_kcontrol *kcontrol,
@@ -237,12 +272,8 @@ static const struct snd_kcontrol_new tpa6130a2_controls[] = {
*/
static void tpa6130a2_channel_enable(u8 channel, int enable)
{
- struct tpa6130a2_data *data;
u8 val;
- BUG_ON(tpa6130a2_client == NULL);
- data = i2c_get_clientdata(tpa6130a2_client);
-
if (enable) {
/* Enable channel */
/* Enable amplifier */
@@ -299,15 +330,17 @@ static int tpa6130a2_right_event(struct snd_soc_dapm_widget *w,
static int tpa6130a2_supply_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ int ret = 0;
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
- tpa6130a2_power(1);
+ ret = tpa6130a2_power(1);
break;
case SND_SOC_DAPM_POST_PMD:
- tpa6130a2_power(0);
+ ret = tpa6130a2_power(0);
break;
}
- return 0;
+ return ret;
}
static const struct snd_soc_dapm_widget tpa6130a2_dapm_widgets[] = {
@@ -346,13 +379,13 @@ int tpa6130a2_add_controls(struct snd_soc_codec *codec)
}
EXPORT_SYMBOL_GPL(tpa6130a2_add_controls);
-static int tpa6130a2_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int __devinit tpa6130a2_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
struct device *dev;
struct tpa6130a2_data *data;
struct tpa6130a2_platform_data *pdata;
- int ret;
+ int i, ret;
dev = &client->dev;
@@ -387,15 +420,38 @@ static int tpa6130a2_probe(struct i2c_client *client,
if (ret < 0) {
dev_err(dev, "Failed to request power GPIO (%d)\n",
data->power_gpio);
- goto fail;
+ goto err_gpio;
}
gpio_direction_output(data->power_gpio, 0);
- } else {
- data->power_state = 1;
- tpa6130a2_initialize();
}
- tpa6130a2_power(1);
+ switch (pdata->id) {
+ case TPA6130A2:
+ for (i = 0; i < ARRAY_SIZE(data->supplies); i++)
+ data->supplies[i].supply = tpa6130a2_supply_names[i];
+ break;
+ case TPA6140A2:
+ for (i = 0; i < ARRAY_SIZE(data->supplies); i++)
+ data->supplies[i].supply = tpa6140a2_supply_names[i];;
+ break;
+ default:
+ dev_warn(dev, "Unknown TPA model (%d). Assuming 6130A2\n",
+ pdata->id);
+ for (i = 0; i < ARRAY_SIZE(data->supplies); i++)
+ data->supplies[i].supply = tpa6130a2_supply_names[i];
+ }
+
+ ret = regulator_bulk_get(dev, ARRAY_SIZE(data->supplies),
+ data->supplies);
+ if (ret != 0) {
+ dev_err(dev, "Failed to request supplies: %d\n", ret);
+ goto err_regulator;
+ }
+
+ ret = tpa6130a2_power(1);
+ if (ret != 0)
+ goto err_power;
+
/* Read version */
ret = tpa6130a2_i2c_read(TPA6130A2_REG_VERSION) &
@@ -404,10 +460,18 @@ static int tpa6130a2_probe(struct i2c_client *client,
dev_warn(dev, "UNTESTED version detected (%d)\n", ret);
/* Disable the chip */
- tpa6130a2_power(0);
+ ret = tpa6130a2_power(0);
+ if (ret != 0)
+ goto err_power;
return 0;
-fail:
+
+err_power:
+ regulator_bulk_free(ARRAY_SIZE(data->supplies), data->supplies);
+err_regulator:
+ if (data->power_gpio >= 0)
+ gpio_free(data->power_gpio);
+err_gpio:
kfree(data);
i2c_set_clientdata(tpa6130a2_client, NULL);
tpa6130a2_client = NULL;
@@ -415,7 +479,7 @@ fail:
return ret;
}
-static int tpa6130a2_remove(struct i2c_client *client)
+static int __devexit tpa6130a2_remove(struct i2c_client *client)
{
struct tpa6130a2_data *data = i2c_get_clientdata(client);
@@ -423,6 +487,9 @@ static int tpa6130a2_remove(struct i2c_client *client)
if (data->power_gpio >= 0)
gpio_free(data->power_gpio);
+
+ regulator_bulk_free(ARRAY_SIZE(data->supplies), data->supplies);
+
kfree(data);
tpa6130a2_client = NULL;
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index ebbf11b653a4..df2c6d9617fb 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -925,7 +925,7 @@ static int wm8350_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
iface |= 0x3 << 8;
break;
case SND_SOC_DAIFMT_DSP_B:
- iface |= 0x3 << 8; /* lg not sure which mode */
+ iface |= 0x3 << 8 | WM8350_AIF_LRCLK_INV;
break;
default:
return -EINVAL;
@@ -1349,7 +1349,7 @@ static irqreturn_t wm8350_hp_jack_handler(int irq, void *data)
int mask;
struct wm8350_jack_data *jack = NULL;
- switch (irq) {
+ switch (irq - wm8350->irq_base) {
case WM8350_IRQ_CODEC_JCK_DET_L:
jack = &priv->hpl;
mask = WM8350_JACK_L_LVL;
@@ -1424,7 +1424,7 @@ int wm8350_hp_jack_detect(struct snd_soc_codec *codec, enum wm8350_jack which,
wm8350_set_bits(wm8350, WM8350_JACK_DETECT, ena);
/* Sync status */
- wm8350_hp_jack_handler(irq, priv);
+ wm8350_hp_jack_handler(irq + wm8350->irq_base, priv);
return 0;
}
@@ -1521,8 +1521,8 @@ static int wm8350_remove(struct platform_device *pdev)
WM8350_JDL_ENA | WM8350_JDR_ENA);
wm8350_clear_bits(wm8350, WM8350_POWER_MGMT_4, WM8350_TOCLK_ENA);
- wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L);
- wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_L, priv);
+ wm8350_free_irq(wm8350, WM8350_IRQ_CODEC_JCK_DET_R, priv);
priv->hpl.jack = NULL;
priv->hpr.jack = NULL;
diff --git a/sound/soc/codecs/wm8727.c b/sound/soc/codecs/wm8727.c
index d8ffbd641d71..63a254e293ca 100644
--- a/sound/soc/codecs/wm8727.c
+++ b/sound/soc/codecs/wm8727.c
@@ -44,23 +44,16 @@ struct snd_soc_dai wm8727_dai = {
};
EXPORT_SYMBOL_GPL(wm8727_dai);
+static struct snd_soc_codec *wm8727_codec;
+
static int wm8727_soc_probe(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct snd_soc_codec *codec;
int ret = 0;
- codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL);
- if (codec == NULL)
- return -ENOMEM;
- mutex_init(&codec->mutex);
- codec->name = "WM8727";
- codec->owner = THIS_MODULE;
- codec->dai = &wm8727_dai;
- codec->num_dai = 1;
- socdev->card->codec = codec;
- INIT_LIST_HEAD(&codec->dapm_widgets);
- INIT_LIST_HEAD(&codec->dapm_paths);
+ BUG_ON(!wm8727_codec);
+
+ socdev->card->codec = wm8727_codec;
/* register pcms */
ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
@@ -80,12 +73,9 @@ pcm_err:
static int wm8727_soc_remove(struct platform_device *pdev)
{
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
- struct snd_soc_codec *codec = socdev->card->codec;
- if (codec == NULL)
- return 0;
snd_soc_free_pcms(socdev);
- kfree(codec);
+
return 0;
}
@@ -98,13 +88,55 @@ EXPORT_SYMBOL_GPL(soc_codec_dev_wm8727);
static __devinit int wm8727_platform_probe(struct platform_device *pdev)
{
+ struct snd_soc_codec *codec;
+ int ret;
+
+ if (wm8727_codec) {
+ dev_err(&pdev->dev, "Another WM8727 is registered\n");
+ return -EBUSY;
+ }
+
+ codec = kzalloc(sizeof(struct snd_soc_codec), GFP_KERNEL);
+ if (codec == NULL)
+ return -ENOMEM;
+ wm8727_codec = codec;
+
+ platform_set_drvdata(pdev, codec);
+
+ mutex_init(&codec->mutex);
+ codec->dev = &pdev->dev;
+ codec->name = "WM8727";
+ codec->owner = THIS_MODULE;
+ codec->dai = &wm8727_dai;
+ codec->num_dai = 1;
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
wm8727_dai.dev = &pdev->dev;
- return snd_soc_register_dai(&wm8727_dai);
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to register CODEC: %d\n", ret);
+ goto err;
+ }
+
+ ret = snd_soc_register_dai(&wm8727_dai);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to register DAI: %d\n", ret);
+ goto err_codec;
+ }
+
+err_codec:
+ snd_soc_unregister_codec(codec);
+err:
+ kfree(codec);
+ return ret;
}
static int __devexit wm8727_platform_remove(struct platform_device *pdev)
{
snd_soc_unregister_dai(&wm8727_dai);
+ snd_soc_unregister_codec(platform_get_drvdata(pdev));
return 0;
}
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 3a497810f939..5a2619dbf283 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -456,6 +456,9 @@ static int wm8731_resume(struct platform_device *pdev)
/* Sync reg_cache with the hardware */
for (i = 0; i < ARRAY_SIZE(wm8731_reg); i++) {
+ if (cache[i] == wm8731_reg[i])
+ continue;
+
data[0] = (i << 1) | ((cache[i] >> 8) & 0x0001);
data[1] = cache[i] & 0x00ff;
codec->hw_write(codec->control_data, data, 2);
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index d6850dacda29..c2444e7c8480 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1507,10 +1507,6 @@ static int wm8753_suspend(struct platform_device *pdev, pm_message_t state)
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- /* we only need to suspend if we are a valid card */
- if (!codec->card)
- return 0;
-
wm8753_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
@@ -1523,10 +1519,6 @@ static int wm8753_resume(struct platform_device *pdev)
u8 data[2];
u16 *cache = codec->reg_cache;
- /* we only need to resume if we are a valid card */
- if (!codec->card)
- return 0;
-
/* Sync reg_cache with the hardware */
for (i = 0; i < ARRAY_SIZE(wm8753_reg); i++) {
if (i + 1 == WM8753_RESET)
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index ab2c0da18091..44e7d9d82f87 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -406,6 +406,8 @@ static int wm8776_resume(struct platform_device *pdev)
/* Sync reg_cache with the hardware */
for (i = 0; i < ARRAY_SIZE(wm8776_reg); i++) {
+ if (cache[i] == wm8776_reg[i])
+ continue;
data[0] = (i << 1) | ((cache[i] >> 8) & 0x0001);
data[1] = cache[i] & 0x00ff;
codec->hw_write(codec->control_data, data, 2);
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
new file mode 100644
index 000000000000..992a7f23df5c
--- /dev/null
+++ b/sound/soc/codecs/wm8904.c
@@ -0,0 +1,2556 @@
+/*
+ * wm8904.c -- WM8904 ALSA SoC Audio driver
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/wm8904.h>
+
+#include "wm8904.h"
+
+static struct snd_soc_codec *wm8904_codec;
+struct snd_soc_codec_device soc_codec_dev_wm8904;
+
+#define WM8904_NUM_DCS_CHANNELS 4
+
+#define WM8904_NUM_SUPPLIES 5
+static const char *wm8904_supply_names[WM8904_NUM_SUPPLIES] = {
+ "DCVDD",
+ "DBVDD",
+ "AVDD",
+ "CPVDD",
+ "MICVDD",
+};
+
+/* codec private data */
+struct wm8904_priv {
+ struct snd_soc_codec codec;
+ u16 reg_cache[WM8904_MAX_REGISTER + 1];
+
+ struct regulator_bulk_data supplies[WM8904_NUM_SUPPLIES];
+
+ struct wm8904_pdata *pdata;
+
+ int deemph;
+
+ /* Platform provided DRC configuration */
+ const char **drc_texts;
+ int drc_cfg;
+ struct soc_enum drc_enum;
+
+ /* Platform provided ReTune mobile configuration */
+ int num_retune_mobile_texts;
+ const char **retune_mobile_texts;
+ int retune_mobile_cfg;
+ struct soc_enum retune_mobile_enum;
+
+ /* FLL setup */
+ int fll_src;
+ int fll_fref;
+ int fll_fout;
+
+ /* Clocking configuration */
+ unsigned int mclk_rate;
+ int sysclk_src;
+ unsigned int sysclk_rate;
+
+ int tdm_width;
+ int tdm_slots;
+ int bclk;
+ int fs;
+
+ /* DC servo configuration - cached offset values */
+ int dcs_state[WM8904_NUM_DCS_CHANNELS];
+};
+
+static const u16 wm8904_reg[WM8904_MAX_REGISTER + 1] = {
+ 0x8904, /* R0 - SW Reset and ID */
+ 0x0000, /* R1 - Revision */
+ 0x0000, /* R2 */
+ 0x0000, /* R3 */
+ 0x0018, /* R4 - Bias Control 0 */
+ 0x0000, /* R5 - VMID Control 0 */
+ 0x0000, /* R6 - Mic Bias Control 0 */
+ 0x0000, /* R7 - Mic Bias Control 1 */
+ 0x0001, /* R8 - Analogue DAC 0 */
+ 0x9696, /* R9 - mic Filter Control */
+ 0x0001, /* R10 - Analogue ADC 0 */
+ 0x0000, /* R11 */
+ 0x0000, /* R12 - Power Management 0 */
+ 0x0000, /* R13 */
+ 0x0000, /* R14 - Power Management 2 */
+ 0x0000, /* R15 - Power Management 3 */
+ 0x0000, /* R16 */
+ 0x0000, /* R17 */
+ 0x0000, /* R18 - Power Management 6 */
+ 0x0000, /* R19 */
+ 0x945E, /* R20 - Clock Rates 0 */
+ 0x0C05, /* R21 - Clock Rates 1 */
+ 0x0006, /* R22 - Clock Rates 2 */
+ 0x0000, /* R23 */
+ 0x0050, /* R24 - Audio Interface 0 */
+ 0x000A, /* R25 - Audio Interface 1 */
+ 0x00E4, /* R26 - Audio Interface 2 */
+ 0x0040, /* R27 - Audio Interface 3 */
+ 0x0000, /* R28 */
+ 0x0000, /* R29 */
+ 0x00C0, /* R30 - DAC Digital Volume Left */
+ 0x00C0, /* R31 - DAC Digital Volume Right */
+ 0x0000, /* R32 - DAC Digital 0 */
+ 0x0008, /* R33 - DAC Digital 1 */
+ 0x0000, /* R34 */
+ 0x0000, /* R35 */
+ 0x00C0, /* R36 - ADC Digital Volume Left */
+ 0x00C0, /* R37 - ADC Digital Volume Right */
+ 0x0010, /* R38 - ADC Digital 0 */
+ 0x0000, /* R39 - Digital Microphone 0 */
+ 0x01AF, /* R40 - DRC 0 */
+ 0x3248, /* R41 - DRC 1 */
+ 0x0000, /* R42 - DRC 2 */
+ 0x0000, /* R43 - DRC 3 */
+ 0x0085, /* R44 - Analogue Left Input 0 */
+ 0x0085, /* R45 - Analogue Right Input 0 */
+ 0x0044, /* R46 - Analogue Left Input 1 */
+ 0x0044, /* R47 - Analogue Right Input 1 */
+ 0x0000, /* R48 */
+ 0x0000, /* R49 */
+ 0x0000, /* R50 */
+ 0x0000, /* R51 */
+ 0x0000, /* R52 */
+ 0x0000, /* R53 */
+ 0x0000, /* R54 */
+ 0x0000, /* R55 */
+ 0x0000, /* R56 */
+ 0x002D, /* R57 - Analogue OUT1 Left */
+ 0x002D, /* R58 - Analogue OUT1 Right */
+ 0x0039, /* R59 - Analogue OUT2 Left */
+ 0x0039, /* R60 - Analogue OUT2 Right */
+ 0x0000, /* R61 - Analogue OUT12 ZC */
+ 0x0000, /* R62 */
+ 0x0000, /* R63 */
+ 0x0000, /* R64 */
+ 0x0000, /* R65 */
+ 0x0000, /* R66 */
+ 0x0000, /* R67 - DC Servo 0 */
+ 0x0000, /* R68 - DC Servo 1 */
+ 0xAAAA, /* R69 - DC Servo 2 */
+ 0x0000, /* R70 */
+ 0xAAAA, /* R71 - DC Servo 4 */
+ 0xAAAA, /* R72 - DC Servo 5 */
+ 0x0000, /* R73 - DC Servo 6 */
+ 0x0000, /* R74 - DC Servo 7 */
+ 0x0000, /* R75 - DC Servo 8 */
+ 0x0000, /* R76 - DC Servo 9 */
+ 0x0000, /* R77 - DC Servo Readback 0 */
+ 0x0000, /* R78 */
+ 0x0000, /* R79 */
+ 0x0000, /* R80 */
+ 0x0000, /* R81 */
+ 0x0000, /* R82 */
+ 0x0000, /* R83 */
+ 0x0000, /* R84 */
+ 0x0000, /* R85 */
+ 0x0000, /* R86 */
+ 0x0000, /* R87 */
+ 0x0000, /* R88 */
+ 0x0000, /* R89 */
+ 0x0000, /* R90 - Analogue HP 0 */
+ 0x0000, /* R91 */
+ 0x0000, /* R92 */
+ 0x0000, /* R93 */
+ 0x0000, /* R94 - Analogue Lineout 0 */
+ 0x0000, /* R95 */
+ 0x0000, /* R96 */
+ 0x0000, /* R97 */
+ 0x0000, /* R98 - Charge Pump 0 */
+ 0x0000, /* R99 */
+ 0x0000, /* R100 */
+ 0x0000, /* R101 */
+ 0x0000, /* R102 */
+ 0x0000, /* R103 */
+ 0x0004, /* R104 - Class W 0 */
+ 0x0000, /* R105 */
+ 0x0000, /* R106 */
+ 0x0000, /* R107 */
+ 0x0000, /* R108 - Write Sequencer 0 */
+ 0x0000, /* R109 - Write Sequencer 1 */
+ 0x0000, /* R110 - Write Sequencer 2 */
+ 0x0000, /* R111 - Write Sequencer 3 */
+ 0x0000, /* R112 - Write Sequencer 4 */
+ 0x0000, /* R113 */
+ 0x0000, /* R114 */
+ 0x0000, /* R115 */
+ 0x0000, /* R116 - FLL Control 1 */
+ 0x0007, /* R117 - FLL Control 2 */
+ 0x0000, /* R118 - FLL Control 3 */
+ 0x2EE0, /* R119 - FLL Control 4 */
+ 0x0004, /* R120 - FLL Control 5 */
+ 0x0014, /* R121 - GPIO Control 1 */
+ 0x0010, /* R122 - GPIO Control 2 */
+ 0x0010, /* R123 - GPIO Control 3 */
+ 0x0000, /* R124 - GPIO Control 4 */
+ 0x0000, /* R125 */
+ 0x0000, /* R126 - Digital Pulls */
+ 0x0000, /* R127 - Interrupt Status */
+ 0xFFFF, /* R128 - Interrupt Status Mask */
+ 0x0000, /* R129 - Interrupt Polarity */
+ 0x0000, /* R130 - Interrupt Debounce */
+ 0x0000, /* R131 */
+ 0x0000, /* R132 */
+ 0x0000, /* R133 */
+ 0x0000, /* R134 - EQ1 */
+ 0x000C, /* R135 - EQ2 */
+ 0x000C, /* R136 - EQ3 */
+ 0x000C, /* R137 - EQ4 */
+ 0x000C, /* R138 - EQ5 */
+ 0x000C, /* R139 - EQ6 */
+ 0x0FCA, /* R140 - EQ7 */
+ 0x0400, /* R141 - EQ8 */
+ 0x00D8, /* R142 - EQ9 */
+ 0x1EB5, /* R143 - EQ10 */
+ 0xF145, /* R144 - EQ11 */
+ 0x0B75, /* R145 - EQ12 */
+ 0x01C5, /* R146 - EQ13 */
+ 0x1C58, /* R147 - EQ14 */
+ 0xF373, /* R148 - EQ15 */
+ 0x0A54, /* R149 - EQ16 */
+ 0x0558, /* R150 - EQ17 */
+ 0x168E, /* R151 - EQ18 */
+ 0xF829, /* R152 - EQ19 */
+ 0x07AD, /* R153 - EQ20 */
+ 0x1103, /* R154 - EQ21 */
+ 0x0564, /* R155 - EQ22 */
+ 0x0559, /* R156 - EQ23 */
+ 0x4000, /* R157 - EQ24 */
+ 0x0000, /* R158 */
+ 0x0000, /* R159 */
+ 0x0000, /* R160 */
+ 0x0000, /* R161 - Control Interface Test 1 */
+ 0x0000, /* R162 */
+ 0x0000, /* R163 */
+ 0x0000, /* R164 */
+ 0x0000, /* R165 */
+ 0x0000, /* R166 */
+ 0x0000, /* R167 */
+ 0x0000, /* R168 */
+ 0x0000, /* R169 */
+ 0x0000, /* R170 */
+ 0x0000, /* R171 */
+ 0x0000, /* R172 */
+ 0x0000, /* R173 */
+ 0x0000, /* R174 */
+ 0x0000, /* R175 */
+ 0x0000, /* R176 */
+ 0x0000, /* R177 */
+ 0x0000, /* R178 */
+ 0x0000, /* R179 */
+ 0x0000, /* R180 */
+ 0x0000, /* R181 */
+ 0x0000, /* R182 */
+ 0x0000, /* R183 */
+ 0x0000, /* R184 */
+ 0x0000, /* R185 */
+ 0x0000, /* R186 */
+ 0x0000, /* R187 */
+ 0x0000, /* R188 */
+ 0x0000, /* R189 */
+ 0x0000, /* R190 */
+ 0x0000, /* R191 */
+ 0x0000, /* R192 */
+ 0x0000, /* R193 */
+ 0x0000, /* R194 */
+ 0x0000, /* R195 */
+ 0x0000, /* R196 */
+ 0x0000, /* R197 */
+ 0x0000, /* R198 */
+ 0x0000, /* R199 */
+ 0x0000, /* R200 */
+ 0x0000, /* R201 */
+ 0x0000, /* R202 */
+ 0x0000, /* R203 */
+ 0x0000, /* R204 - Analogue Output Bias 0 */
+ 0x0000, /* R205 */
+ 0x0000, /* R206 */
+ 0x0000, /* R207 */
+ 0x0000, /* R208 */
+ 0x0000, /* R209 */
+ 0x0000, /* R210 */
+ 0x0000, /* R211 */
+ 0x0000, /* R212 */
+ 0x0000, /* R213 */
+ 0x0000, /* R214 */
+ 0x0000, /* R215 */
+ 0x0000, /* R216 */
+ 0x0000, /* R217 */
+ 0x0000, /* R218 */
+ 0x0000, /* R219 */
+ 0x0000, /* R220 */
+ 0x0000, /* R221 */
+ 0x0000, /* R222 */
+ 0x0000, /* R223 */
+ 0x0000, /* R224 */
+ 0x0000, /* R225 */
+ 0x0000, /* R226 */
+ 0x0000, /* R227 */
+ 0x0000, /* R228 */
+ 0x0000, /* R229 */
+ 0x0000, /* R230 */
+ 0x0000, /* R231 */
+ 0x0000, /* R232 */
+ 0x0000, /* R233 */
+ 0x0000, /* R234 */
+ 0x0000, /* R235 */
+ 0x0000, /* R236 */
+ 0x0000, /* R237 */
+ 0x0000, /* R238 */
+ 0x0000, /* R239 */
+ 0x0000, /* R240 */
+ 0x0000, /* R241 */
+ 0x0000, /* R242 */
+ 0x0000, /* R243 */
+ 0x0000, /* R244 */
+ 0x0000, /* R245 */
+ 0x0000, /* R246 */
+ 0x0000, /* R247 - FLL NCO Test 0 */
+ 0x0019, /* R248 - FLL NCO Test 1 */
+};
+
+static struct {
+ int readable;
+ int writable;
+ int vol;
+} wm8904_access[] = {
+ { 0xFFFF, 0xFFFF, 1 }, /* R0 - SW Reset and ID */
+ { 0x0000, 0x0000, 0 }, /* R1 - Revision */
+ { 0x0000, 0x0000, 0 }, /* R2 */
+ { 0x0000, 0x0000, 0 }, /* R3 */
+ { 0x001F, 0x001F, 0 }, /* R4 - Bias Control 0 */
+ { 0x0047, 0x0047, 0 }, /* R5 - VMID Control 0 */
+ { 0x007F, 0x007F, 0 }, /* R6 - Mic Bias Control 0 */
+ { 0xC007, 0xC007, 0 }, /* R7 - Mic Bias Control 1 */
+ { 0x001E, 0x001E, 0 }, /* R8 - Analogue DAC 0 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R9 - mic Filter Control */
+ { 0x0001, 0x0001, 0 }, /* R10 - Analogue ADC 0 */
+ { 0x0000, 0x0000, 0 }, /* R11 */
+ { 0x0003, 0x0003, 0 }, /* R12 - Power Management 0 */
+ { 0x0000, 0x0000, 0 }, /* R13 */
+ { 0x0003, 0x0003, 0 }, /* R14 - Power Management 2 */
+ { 0x0003, 0x0003, 0 }, /* R15 - Power Management 3 */
+ { 0x0000, 0x0000, 0 }, /* R16 */
+ { 0x0000, 0x0000, 0 }, /* R17 */
+ { 0x000F, 0x000F, 0 }, /* R18 - Power Management 6 */
+ { 0x0000, 0x0000, 0 }, /* R19 */
+ { 0x7001, 0x7001, 0 }, /* R20 - Clock Rates 0 */
+ { 0x3C07, 0x3C07, 0 }, /* R21 - Clock Rates 1 */
+ { 0xD00F, 0xD00F, 0 }, /* R22 - Clock Rates 2 */
+ { 0x0000, 0x0000, 0 }, /* R23 */
+ { 0x1FFF, 0x1FFF, 0 }, /* R24 - Audio Interface 0 */
+ { 0x3DDF, 0x3DDF, 0 }, /* R25 - Audio Interface 1 */
+ { 0x0F1F, 0x0F1F, 0 }, /* R26 - Audio Interface 2 */
+ { 0x0FFF, 0x0FFF, 0 }, /* R27 - Audio Interface 3 */
+ { 0x0000, 0x0000, 0 }, /* R28 */
+ { 0x0000, 0x0000, 0 }, /* R29 */
+ { 0x00FF, 0x01FF, 0 }, /* R30 - DAC Digital Volume Left */
+ { 0x00FF, 0x01FF, 0 }, /* R31 - DAC Digital Volume Right */
+ { 0x0FFF, 0x0FFF, 0 }, /* R32 - DAC Digital 0 */
+ { 0x1E4E, 0x1E4E, 0 }, /* R33 - DAC Digital 1 */
+ { 0x0000, 0x0000, 0 }, /* R34 */
+ { 0x0000, 0x0000, 0 }, /* R35 */
+ { 0x00FF, 0x01FF, 0 }, /* R36 - ADC Digital Volume Left */
+ { 0x00FF, 0x01FF, 0 }, /* R37 - ADC Digital Volume Right */
+ { 0x0073, 0x0073, 0 }, /* R38 - ADC Digital 0 */
+ { 0x1800, 0x1800, 0 }, /* R39 - Digital Microphone 0 */
+ { 0xDFEF, 0xDFEF, 0 }, /* R40 - DRC 0 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R41 - DRC 1 */
+ { 0x003F, 0x003F, 0 }, /* R42 - DRC 2 */
+ { 0x07FF, 0x07FF, 0 }, /* R43 - DRC 3 */
+ { 0x009F, 0x009F, 0 }, /* R44 - Analogue Left Input 0 */
+ { 0x009F, 0x009F, 0 }, /* R45 - Analogue Right Input 0 */
+ { 0x007F, 0x007F, 0 }, /* R46 - Analogue Left Input 1 */
+ { 0x007F, 0x007F, 0 }, /* R47 - Analogue Right Input 1 */
+ { 0x0000, 0x0000, 0 }, /* R48 */
+ { 0x0000, 0x0000, 0 }, /* R49 */
+ { 0x0000, 0x0000, 0 }, /* R50 */
+ { 0x0000, 0x0000, 0 }, /* R51 */
+ { 0x0000, 0x0000, 0 }, /* R52 */
+ { 0x0000, 0x0000, 0 }, /* R53 */
+ { 0x0000, 0x0000, 0 }, /* R54 */
+ { 0x0000, 0x0000, 0 }, /* R55 */
+ { 0x0000, 0x0000, 0 }, /* R56 */
+ { 0x017F, 0x01FF, 0 }, /* R57 - Analogue OUT1 Left */
+ { 0x017F, 0x01FF, 0 }, /* R58 - Analogue OUT1 Right */
+ { 0x017F, 0x01FF, 0 }, /* R59 - Analogue OUT2 Left */
+ { 0x017F, 0x01FF, 0 }, /* R60 - Analogue OUT2 Right */
+ { 0x000F, 0x000F, 0 }, /* R61 - Analogue OUT12 ZC */
+ { 0x0000, 0x0000, 0 }, /* R62 */
+ { 0x0000, 0x0000, 0 }, /* R63 */
+ { 0x0000, 0x0000, 0 }, /* R64 */
+ { 0x0000, 0x0000, 0 }, /* R65 */
+ { 0x0000, 0x0000, 0 }, /* R66 */
+ { 0x000F, 0x000F, 0 }, /* R67 - DC Servo 0 */
+ { 0xFFFF, 0xFFFF, 1 }, /* R68 - DC Servo 1 */
+ { 0x0F0F, 0x0F0F, 0 }, /* R69 - DC Servo 2 */
+ { 0x0000, 0x0000, 0 }, /* R70 */
+ { 0x007F, 0x007F, 0 }, /* R71 - DC Servo 4 */
+ { 0x007F, 0x007F, 0 }, /* R72 - DC Servo 5 */
+ { 0x00FF, 0x00FF, 1 }, /* R73 - DC Servo 6 */
+ { 0x00FF, 0x00FF, 1 }, /* R74 - DC Servo 7 */
+ { 0x00FF, 0x00FF, 1 }, /* R75 - DC Servo 8 */
+ { 0x00FF, 0x00FF, 1 }, /* R76 - DC Servo 9 */
+ { 0x0FFF, 0x0000, 1 }, /* R77 - DC Servo Readback 0 */
+ { 0x0000, 0x0000, 0 }, /* R78 */
+ { 0x0000, 0x0000, 0 }, /* R79 */
+ { 0x0000, 0x0000, 0 }, /* R80 */
+ { 0x0000, 0x0000, 0 }, /* R81 */
+ { 0x0000, 0x0000, 0 }, /* R82 */
+ { 0x0000, 0x0000, 0 }, /* R83 */
+ { 0x0000, 0x0000, 0 }, /* R84 */
+ { 0x0000, 0x0000, 0 }, /* R85 */
+ { 0x0000, 0x0000, 0 }, /* R86 */
+ { 0x0000, 0x0000, 0 }, /* R87 */
+ { 0x0000, 0x0000, 0 }, /* R88 */
+ { 0x0000, 0x0000, 0 }, /* R89 */
+ { 0x00FF, 0x00FF, 0 }, /* R90 - Analogue HP 0 */
+ { 0x0000, 0x0000, 0 }, /* R91 */
+ { 0x0000, 0x0000, 0 }, /* R92 */
+ { 0x0000, 0x0000, 0 }, /* R93 */
+ { 0x00FF, 0x00FF, 0 }, /* R94 - Analogue Lineout 0 */
+ { 0x0000, 0x0000, 0 }, /* R95 */
+ { 0x0000, 0x0000, 0 }, /* R96 */
+ { 0x0000, 0x0000, 0 }, /* R97 */
+ { 0x0001, 0x0001, 0 }, /* R98 - Charge Pump 0 */
+ { 0x0000, 0x0000, 0 }, /* R99 */
+ { 0x0000, 0x0000, 0 }, /* R100 */
+ { 0x0000, 0x0000, 0 }, /* R101 */
+ { 0x0000, 0x0000, 0 }, /* R102 */
+ { 0x0000, 0x0000, 0 }, /* R103 */
+ { 0x0001, 0x0001, 0 }, /* R104 - Class W 0 */
+ { 0x0000, 0x0000, 0 }, /* R105 */
+ { 0x0000, 0x0000, 0 }, /* R106 */
+ { 0x0000, 0x0000, 0 }, /* R107 */
+ { 0x011F, 0x011F, 0 }, /* R108 - Write Sequencer 0 */
+ { 0x7FFF, 0x7FFF, 0 }, /* R109 - Write Sequencer 1 */
+ { 0x4FFF, 0x4FFF, 0 }, /* R110 - Write Sequencer 2 */
+ { 0x003F, 0x033F, 0 }, /* R111 - Write Sequencer 3 */
+ { 0x03F1, 0x0000, 0 }, /* R112 - Write Sequencer 4 */
+ { 0x0000, 0x0000, 0 }, /* R113 */
+ { 0x0000, 0x0000, 0 }, /* R114 */
+ { 0x0000, 0x0000, 0 }, /* R115 */
+ { 0x0007, 0x0007, 0 }, /* R116 - FLL Control 1 */
+ { 0x3F77, 0x3F77, 0 }, /* R117 - FLL Control 2 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R118 - FLL Control 3 */
+ { 0x7FEF, 0x7FEF, 0 }, /* R119 - FLL Control 4 */
+ { 0x001B, 0x001B, 0 }, /* R120 - FLL Control 5 */
+ { 0x003F, 0x003F, 0 }, /* R121 - GPIO Control 1 */
+ { 0x003F, 0x003F, 0 }, /* R122 - GPIO Control 2 */
+ { 0x003F, 0x003F, 0 }, /* R123 - GPIO Control 3 */
+ { 0x038F, 0x038F, 0 }, /* R124 - GPIO Control 4 */
+ { 0x0000, 0x0000, 0 }, /* R125 */
+ { 0x00FF, 0x00FF, 0 }, /* R126 - Digital Pulls */
+ { 0x07FF, 0x03FF, 1 }, /* R127 - Interrupt Status */
+ { 0x03FF, 0x03FF, 0 }, /* R128 - Interrupt Status Mask */
+ { 0x03FF, 0x03FF, 0 }, /* R129 - Interrupt Polarity */
+ { 0x03FF, 0x03FF, 0 }, /* R130 - Interrupt Debounce */
+ { 0x0000, 0x0000, 0 }, /* R131 */
+ { 0x0000, 0x0000, 0 }, /* R132 */
+ { 0x0000, 0x0000, 0 }, /* R133 */
+ { 0x0001, 0x0001, 0 }, /* R134 - EQ1 */
+ { 0x001F, 0x001F, 0 }, /* R135 - EQ2 */
+ { 0x001F, 0x001F, 0 }, /* R136 - EQ3 */
+ { 0x001F, 0x001F, 0 }, /* R137 - EQ4 */
+ { 0x001F, 0x001F, 0 }, /* R138 - EQ5 */
+ { 0x001F, 0x001F, 0 }, /* R139 - EQ6 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R140 - EQ7 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R141 - EQ8 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R142 - EQ9 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R143 - EQ10 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R144 - EQ11 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R145 - EQ12 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R146 - EQ13 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R147 - EQ14 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R148 - EQ15 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R149 - EQ16 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R150 - EQ17 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R151wm8523_dai - EQ18 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R152 - EQ19 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R153 - EQ20 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R154 - EQ21 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R155 - EQ22 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R156 - EQ23 */
+ { 0xFFFF, 0xFFFF, 0 }, /* R157 - EQ24 */
+ { 0x0000, 0x0000, 0 }, /* R158 */
+ { 0x0000, 0x0000, 0 }, /* R159 */
+ { 0x0000, 0x0000, 0 }, /* R160 */
+ { 0x0002, 0x0002, 0 }, /* R161 - Control Interface Test 1 */
+ { 0x0000, 0x0000, 0 }, /* R162 */
+ { 0x0000, 0x0000, 0 }, /* R163 */
+ { 0x0000, 0x0000, 0 }, /* R164 */
+ { 0x0000, 0x0000, 0 }, /* R165 */
+ { 0x0000, 0x0000, 0 }, /* R166 */
+ { 0x0000, 0x0000, 0 }, /* R167 */
+ { 0x0000, 0x0000, 0 }, /* R168 */
+ { 0x0000, 0x0000, 0 }, /* R169 */
+ { 0x0000, 0x0000, 0 }, /* R170 */
+ { 0x0000, 0x0000, 0 }, /* R171 */
+ { 0x0000, 0x0000, 0 }, /* R172 */
+ { 0x0000, 0x0000, 0 }, /* R173 */
+ { 0x0000, 0x0000, 0 }, /* R174 */
+ { 0x0000, 0x0000, 0 }, /* R175 */
+ { 0x0000, 0x0000, 0 }, /* R176 */
+ { 0x0000, 0x0000, 0 }, /* R177 */
+ { 0x0000, 0x0000, 0 }, /* R178 */
+ { 0x0000, 0x0000, 0 }, /* R179 */
+ { 0x0000, 0x0000, 0 }, /* R180 */
+ { 0x0000, 0x0000, 0 }, /* R181 */
+ { 0x0000, 0x0000, 0 }, /* R182 */
+ { 0x0000, 0x0000, 0 }, /* R183 */
+ { 0x0000, 0x0000, 0 }, /* R184 */
+ { 0x0000, 0x0000, 0 }, /* R185 */
+ { 0x0000, 0x0000, 0 }, /* R186 */
+ { 0x0000, 0x0000, 0 }, /* R187 */
+ { 0x0000, 0x0000, 0 }, /* R188 */
+ { 0x0000, 0x0000, 0 }, /* R189 */
+ { 0x0000, 0x0000, 0 }, /* R190 */
+ { 0x0000, 0x0000, 0 }, /* R191 */
+ { 0x0000, 0x0000, 0 }, /* R192 */
+ { 0x0000, 0x0000, 0 }, /* R193 */
+ { 0x0000, 0x0000, 0 }, /* R194 */
+ { 0x0000, 0x0000, 0 }, /* R195 */
+ { 0x0000, 0x0000, 0 }, /* R196 */
+ { 0x0000, 0x0000, 0 }, /* R197 */
+ { 0x0000, 0x0000, 0 }, /* R198 */
+ { 0x0000, 0x0000, 0 }, /* R199 */
+ { 0x0000, 0x0000, 0 }, /* R200 */
+ { 0x0000, 0x0000, 0 }, /* R201 */
+ { 0x0000, 0x0000, 0 }, /* R202 */
+ { 0x0000, 0x0000, 0 }, /* R203 */
+ { 0x0070, 0x0070, 0 }, /* R204 - Analogue Output Bias 0 */
+ { 0x0000, 0x0000, 0 }, /* R205 */
+ { 0x0000, 0x0000, 0 }, /* R206 */
+ { 0x0000, 0x0000, 0 }, /* R207 */
+ { 0x0000, 0x0000, 0 }, /* R208 */
+ { 0x0000, 0x0000, 0 }, /* R209 */
+ { 0x0000, 0x0000, 0 }, /* R210 */
+ { 0x0000, 0x0000, 0 }, /* R211 */
+ { 0x0000, 0x0000, 0 }, /* R212 */
+ { 0x0000, 0x0000, 0 }, /* R213 */
+ { 0x0000, 0x0000, 0 }, /* R214 */
+ { 0x0000, 0x0000, 0 }, /* R215 */
+ { 0x0000, 0x0000, 0 }, /* R216 */
+ { 0x0000, 0x0000, 0 }, /* R217 */
+ { 0x0000, 0x0000, 0 }, /* R218 */
+ { 0x0000, 0x0000, 0 }, /* R219 */
+ { 0x0000, 0x0000, 0 }, /* R220 */
+ { 0x0000, 0x0000, 0 }, /* R221 */
+ { 0x0000, 0x0000, 0 }, /* R222 */
+ { 0x0000, 0x0000, 0 }, /* R223 */
+ { 0x0000, 0x0000, 0 }, /* R224 */
+ { 0x0000, 0x0000, 0 }, /* R225 */
+ { 0x0000, 0x0000, 0 }, /* R226 */
+ { 0x0000, 0x0000, 0 }, /* R227 */
+ { 0x0000, 0x0000, 0 }, /* R228 */
+ { 0x0000, 0x0000, 0 }, /* R229 */
+ { 0x0000, 0x0000, 0 }, /* R230 */
+ { 0x0000, 0x0000, 0 }, /* R231 */
+ { 0x0000, 0x0000, 0 }, /* R232 */
+ { 0x0000, 0x0000, 0 }, /* R233 */
+ { 0x0000, 0x0000, 0 }, /* R234 */
+ { 0x0000, 0x0000, 0 }, /* R235 */
+ { 0x0000, 0x0000, 0 }, /* R236 */
+ { 0x0000, 0x0000, 0 }, /* R237 */
+ { 0x0000, 0x0000, 0 }, /* R238 */
+ { 0x0000, 0x0000, 0 }, /* R239 */
+ { 0x0000, 0x0000, 0 }, /* R240 */
+ { 0x0000, 0x0000, 0 }, /* R241 */
+ { 0x0000, 0x0000, 0 }, /* R242 */
+ { 0x0000, 0x0000, 0 }, /* R243 */
+ { 0x0000, 0x0000, 0 }, /* R244 */
+ { 0x0000, 0x0000, 0 }, /* R245 */
+ { 0x0000, 0x0000, 0 }, /* R246 */
+ { 0x0001, 0x0001, 0 }, /* R247 - FLL NCO Test 0 */
+ { 0x003F, 0x003F, 0 }, /* R248 - FLL NCO Test 1 */
+};
+
+static int wm8904_volatile_register(unsigned int reg)
+{
+ return wm8904_access[reg].vol;
+}
+
+static int wm8904_reset(struct snd_soc_codec *codec)
+{
+ return snd_soc_write(codec, WM8904_SW_RESET_AND_ID, 0);
+}
+
+static int wm8904_configure_clocking(struct snd_soc_codec *codec)
+{
+ struct wm8904_priv *wm8904 = codec->private_data;
+ unsigned int clock0, clock2, rate;
+
+ /* Gate the clock while we're updating to avoid misclocking */
+ clock2 = snd_soc_read(codec, WM8904_CLOCK_RATES_2);
+ snd_soc_update_bits(codec, WM8904_CLOCK_RATES_2,
+ WM8904_SYSCLK_SRC, 0);
+
+ /* This should be done on init() for bypass paths */
+ switch (wm8904->sysclk_src) {
+ case WM8904_CLK_MCLK:
+ dev_dbg(codec->dev, "Using %dHz MCLK\n", wm8904->mclk_rate);
+
+ clock2 &= ~WM8904_SYSCLK_SRC;
+ rate = wm8904->mclk_rate;
+
+ /* Ensure the FLL is stopped */
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_1,
+ WM8904_FLL_OSC_ENA | WM8904_FLL_ENA, 0);
+ break;
+
+ case WM8904_CLK_FLL:
+ dev_dbg(codec->dev, "Using %dHz FLL clock\n",
+ wm8904->fll_fout);
+
+ clock2 |= WM8904_SYSCLK_SRC;
+ rate = wm8904->fll_fout;
+ break;
+
+ default:
+ dev_err(codec->dev, "System clock not configured\n");
+ return -EINVAL;
+ }
+
+ /* SYSCLK shouldn't be over 13.5MHz */
+ if (rate > 13500000) {
+ clock0 = WM8904_MCLK_DIV;
+ wm8904->sysclk_rate = rate / 2;
+ } else {
+ clock0 = 0;
+ wm8904->sysclk_rate = rate;
+ }
+
+ snd_soc_update_bits(codec, WM8904_CLOCK_RATES_0, WM8904_MCLK_DIV,
+ clock0);
+
+ snd_soc_update_bits(codec, WM8904_CLOCK_RATES_2,
+ WM8904_CLK_SYS_ENA | WM8904_SYSCLK_SRC, clock2);
+
+ dev_dbg(codec->dev, "CLK_SYS is %dHz\n", wm8904->sysclk_rate);
+
+ return 0;
+}
+
+static void wm8904_set_drc(struct snd_soc_codec *codec)
+{
+ struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_pdata *pdata = wm8904->pdata;
+ int save, i;
+
+ /* Save any enables; the configuration should clear them. */
+ save = snd_soc_read(codec, WM8904_DRC_0);
+
+ for (i = 0; i < WM8904_DRC_REGS; i++)
+ snd_soc_update_bits(codec, WM8904_DRC_0 + i, 0xffff,
+ pdata->drc_cfgs[wm8904->drc_cfg].regs[i]);
+
+ /* Reenable the DRC */
+ snd_soc_update_bits(codec, WM8904_DRC_0,
+ WM8904_DRC_ENA | WM8904_DRC_DAC_PATH, save);
+}
+
+static int wm8904_put_drc_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_pdata *pdata = wm8904->pdata;
+ int value = ucontrol->value.integer.value[0];
+
+ if (value >= pdata->num_drc_cfgs)
+ return -EINVAL;
+
+ wm8904->drc_cfg = value;
+
+ wm8904_set_drc(codec);
+
+ return 0;
+}
+
+static int wm8904_get_drc_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct wm8904_priv *wm8904 = codec->private_data;
+
+ ucontrol->value.enumerated.item[0] = wm8904->drc_cfg;
+
+ return 0;
+}
+
+static void wm8904_set_retune_mobile(struct snd_soc_codec *codec)
+{
+ struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_pdata *pdata = wm8904->pdata;
+ int best, best_val, save, i, cfg;
+
+ if (!pdata || !wm8904->num_retune_mobile_texts)
+ return;
+
+ /* Find the version of the currently selected configuration
+ * with the nearest sample rate. */
+ cfg = wm8904->retune_mobile_cfg;
+ best = 0;
+ best_val = INT_MAX;
+ for (i = 0; i < pdata->num_retune_mobile_cfgs; i++) {
+ if (strcmp(pdata->retune_mobile_cfgs[i].name,
+ wm8904->retune_mobile_texts[cfg]) == 0 &&
+ abs(pdata->retune_mobile_cfgs[i].rate
+ - wm8904->fs) < best_val) {
+ best = i;
+ best_val = abs(pdata->retune_mobile_cfgs[i].rate
+ - wm8904->fs);
+ }
+ }
+
+ dev_dbg(codec->dev, "ReTune Mobile %s/%dHz for %dHz sample rate\n",
+ pdata->retune_mobile_cfgs[best].name,
+ pdata->retune_mobile_cfgs[best].rate,
+ wm8904->fs);
+
+ /* The EQ will be disabled while reconfiguring it, remember the
+ * current configuration.
+ */
+ save = snd_soc_read(codec, WM8904_EQ1);
+
+ for (i = 0; i < WM8904_EQ_REGS; i++)
+ snd_soc_update_bits(codec, WM8904_EQ1 + i, 0xffff,
+ pdata->retune_mobile_cfgs[best].regs[i]);
+
+ snd_soc_update_bits(codec, WM8904_EQ1, WM8904_EQ_ENA, save);
+}
+
+static int wm8904_put_retune_mobile_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct wm8904_priv *wm8904 = codec->private_data;
+ struct wm8904_pdata *pdata = wm8904->pdata;
+ int value = ucontrol->value.integer.value[0];
+
+ if (value >= pdata->num_retune_mobile_cfgs)
+ return -EINVAL;
+
+ wm8904->retune_mobile_cfg = value;
+
+ wm8904_set_retune_mobile(codec);
+
+ return 0;
+}
+
+static int wm8904_get_retune_mobile_enum(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct wm8904_priv *wm8904 = codec->private_data;
+
+ ucontrol->value.enumerated.item[0] = wm8904->retune_mobile_cfg;
+
+ return 0;
+}
+
+static int deemph_settings[] = { 0, 32000, 44100, 48000 };
+
+static int wm8904_set_deemph(struct snd_soc_codec *codec)
+{
+ struct wm8904_priv *wm8904 = codec->private_data;
+ int val, i, best;
+
+ /* If we're using deemphasis select the nearest available sample
+ * rate.
+ */
+ if (wm8904->deemph) {
+ best = 1;
+ for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) {
+ if (abs(deemph_settings[i] - wm8904->fs) <
+ abs(deemph_settings[best] - wm8904->fs))
+ best = i;
+ }
+
+ val = best << WM8904_DEEMPH_SHIFT;
+ } else {
+ val = 0;
+ }
+
+ dev_dbg(codec->dev, "Set deemphasis %d\n", val);
+
+ return snd_soc_update_bits(codec, WM8904_DAC_DIGITAL_1,
+ WM8904_DEEMPH_MASK, val);
+}
+
+static int wm8904_get_deemph(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct wm8904_priv *wm8904 = codec->private_data;
+
+ return wm8904->deemph;
+}
+
+static int wm8904_put_deemph(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct wm8904_priv *wm8904 = codec->private_data;
+ int deemph = ucontrol->value.enumerated.item[0];
+
+ if (deemph > 1)
+ return -EINVAL;
+
+ wm8904->deemph = deemph;
+
+ return wm8904_set_deemph(codec);
+}
+
+static const DECLARE_TLV_DB_SCALE(dac_boost_tlv, 0, 600, 0);
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
+static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0);
+static const DECLARE_TLV_DB_SCALE(sidetone_tlv, -3600, 300, 0);
+static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+
+static const char *input_mode_text[] = {
+ "Single-Ended", "Differential Line", "Differential Mic"
+};
+
+static const struct soc_enum lin_mode =
+ SOC_ENUM_SINGLE(WM8904_ANALOGUE_LEFT_INPUT_1, 0, 3, input_mode_text);
+
+static const struct soc_enum rin_mode =
+ SOC_ENUM_SINGLE(WM8904_ANALOGUE_RIGHT_INPUT_1, 0, 3, input_mode_text);
+
+static const char *hpf_mode_text[] = {
+ "Hi-fi", "Voice 1", "Voice 2", "Voice 3"
+};
+
+static const struct soc_enum hpf_mode =
+ SOC_ENUM_SINGLE(WM8904_ADC_DIGITAL_0, 5, 4, hpf_mode_text);
+
+static const struct snd_kcontrol_new wm8904_adc_snd_controls[] = {
+SOC_DOUBLE_R_TLV("Digital Capture Volume", WM8904_ADC_DIGITAL_VOLUME_LEFT,
+ WM8904_ADC_DIGITAL_VOLUME_RIGHT, 1, 119, 0, digital_tlv),
+
+SOC_ENUM("Left Caputure Mode", lin_mode),
+SOC_ENUM("Right Capture Mode", rin_mode),
+
+/* No TLV since it depends on mode */
+SOC_DOUBLE_R("Capture Volume", WM8904_ANALOGUE_LEFT_INPUT_0,
+ WM8904_ANALOGUE_RIGHT_INPUT_0, 0, 31, 0),
+SOC_DOUBLE_R("Capture Switch", WM8904_ANALOGUE_LEFT_INPUT_0,
+ WM8904_ANALOGUE_RIGHT_INPUT_0, 7, 1, 0),
+
+SOC_SINGLE("High Pass Filter Switch", WM8904_ADC_DIGITAL_0, 4, 1, 0),
+SOC_ENUM("High Pass Filter Mode", hpf_mode),
+
+SOC_SINGLE("ADC 128x OSR Switch", WM8904_ANALOGUE_ADC_0, 0, 1, 0),
+};
+
+static const char *drc_path_text[] = {
+ "ADC", "DAC"
+};
+
+static const struct soc_enum drc_path =
+ SOC_ENUM_SINGLE(WM8904_DRC_0, 14, 2, drc_path_text);
+
+static const struct snd_kcontrol_new wm8904_dac_snd_controls[] = {
+SOC_SINGLE_TLV("Digital Playback Boost Volume",
+ WM8904_AUDIO_INTERFACE_0, 9, 3, 0, dac_boost_tlv),
+SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8904_DAC_DIGITAL_VOLUME_LEFT,
+ WM8904_DAC_DIGITAL_VOLUME_RIGHT, 1, 96, 0, digital_tlv),
+
+SOC_DOUBLE_R_TLV("Headphone Volume", WM8904_ANALOGUE_OUT1_LEFT,
+ WM8904_ANALOGUE_OUT1_RIGHT, 0, 63, 0, out_tlv),
+SOC_DOUBLE_R("Headphone Switch", WM8904_ANALOGUE_OUT1_LEFT,
+ WM8904_ANALOGUE_OUT1_RIGHT, 8, 1, 1),
+SOC_DOUBLE_R("Headphone ZC Switch", WM8904_ANALOGUE_OUT1_LEFT,
+ WM8904_ANALOGUE_OUT1_RIGHT, 6, 1, 0),
+
+SOC_DOUBLE_R_TLV("Line Output Volume", WM8904_ANALOGUE_OUT2_LEFT,
+ WM8904_ANALOGUE_OUT2_RIGHT, 0, 63, 0, out_tlv),
+SOC_DOUBLE_R("Line Output Switch", WM8904_ANALOGUE_OUT2_LEFT,
+ WM8904_ANALOGUE_OUT2_RIGHT, 8, 1, 1),
+SOC_DOUBLE_R("Line Output ZC Switch", WM8904_ANALOGUE_OUT2_LEFT,
+ WM8904_ANALOGUE_OUT2_RIGHT, 6, 1, 0),
+
+SOC_SINGLE("EQ Switch", WM8904_EQ1, 0, 1, 0),
+SOC_SINGLE("DRC Switch", WM8904_DRC_0, 15, 1, 0),
+SOC_ENUM("DRC Path", drc_path),
+SOC_SINGLE("DAC OSRx2 Switch", WM8904_DAC_DIGITAL_1, 6, 1, 0),
+SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0,
+ wm8904_get_deemph, wm8904_put_deemph),
+};
+
+static const struct snd_kcontrol_new wm8904_snd_controls[] = {
+SOC_DOUBLE_TLV("Digital Sidetone Volume", WM8904_DAC_DIGITAL_0, 4, 8, 15, 0,
+ sidetone_tlv),
+};
+
+static const struct snd_kcontrol_new wm8904_eq_controls[] = {
+SOC_SINGLE_TLV("EQ1 Volume", WM8904_EQ2, 0, 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 Volume", WM8904_EQ3, 0, 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 Volume", WM8904_EQ4, 0, 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 Volume", WM8904_EQ5, 0, 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ5 Volume", WM8904_EQ6, 0, 24, 0, eq_tlv),
+};
+
+static int cp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ BUG_ON(event != SND_SOC_DAPM_POST_PMU);
+
+ /* Maximum startup time */
+ udelay(500);
+
+ return 0;
+}
+
+static int sysclk_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct wm8904_priv *wm8904 = codec->private_data;
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* If we're using the FLL then we only start it when
+ * required; we assume that the configuration has been
+ * done previously and all we need to do is kick it
+ * off.
+ */
+ switch (wm8904->sysclk_src) {
+ case WM8904_CLK_FLL:
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_1,
+ WM8904_FLL_OSC_ENA,
+ WM8904_FLL_OSC_ENA);
+
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_1,
+ WM8904_FLL_ENA,
+ WM8904_FLL_ENA);
+ break;
+
+ default:
+ break;
+ }
+ break;
+
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_1,
+ WM8904_FLL_OSC_ENA | WM8904_FLL_ENA, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int out_pga_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct wm8904_priv *wm8904 = codec->private_data;
+ int reg, val;
+ int dcs_mask;
+ int dcs_l, dcs_r;
+ int dcs_l_reg, dcs_r_reg;
+ int timeout;
+
+ /* This code is shared between HP and LINEOUT; we do all our
+ * power management in stereo pairs to avoid latency issues so
+ * we reuse shift to identify which rather than strcmp() the
+ * name. */
+ reg = w->shift;
+
+ switch (reg) {
+ case WM8904_ANALOGUE_HP_0:
+ dcs_mask = WM8904_DCS_ENA_CHAN_0 | WM8904_DCS_ENA_CHAN_1;
+ dcs_r_reg = WM8904_DC_SERVO_8;
+ dcs_l_reg = WM8904_DC_SERVO_9;
+ dcs_l = 0;
+ dcs_r = 1;
+ break;
+ case WM8904_ANALOGUE_LINEOUT_0:
+ dcs_mask = WM8904_DCS_ENA_CHAN_2 | WM8904_DCS_ENA_CHAN_3;
+ dcs_r_reg = WM8904_DC_SERVO_6;
+ dcs_l_reg = WM8904_DC_SERVO_7;
+ dcs_l = 2;
+ dcs_r = 3;
+ break;
+ default:
+ BUG();
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* Power on the amplifier */
+ snd_soc_update_bits(codec, reg,
+ WM8904_HPL_ENA | WM8904_HPR_ENA,
+ WM8904_HPL_ENA | WM8904_HPR_ENA);
+
+ /* Enable the first stage */
+ snd_soc_update_bits(codec, reg,
+ WM8904_HPL_ENA_DLY | WM8904_HPR_ENA_DLY,
+ WM8904_HPL_ENA_DLY | WM8904_HPR_ENA_DLY);
+
+ /* Power up the DC servo */
+ snd_soc_update_bits(codec, WM8904_DC_SERVO_0,
+ dcs_mask, dcs_mask);
+
+ /* Either calibrate the DC servo or restore cached state
+ * if we have that.
+ */
+ if (wm8904->dcs_state[dcs_l] || wm8904->dcs_state[dcs_r]) {
+ dev_dbg(codec->dev, "Restoring DC servo state\n");
+
+ snd_soc_write(codec, dcs_l_reg,
+ wm8904->dcs_state[dcs_l]);
+ snd_soc_write(codec, dcs_r_reg,
+ wm8904->dcs_state[dcs_r]);
+
+ snd_soc_write(codec, WM8904_DC_SERVO_1, dcs_mask);
+
+ timeout = 20;
+ } else {
+ dev_dbg(codec->dev, "Calibrating DC servo\n");
+
+ snd_soc_write(codec, WM8904_DC_SERVO_1,
+ dcs_mask << WM8904_DCS_TRIG_STARTUP_0_SHIFT);
+
+ timeout = 500;
+ }
+
+ /* Wait for DC servo to complete */
+ dcs_mask <<= WM8904_DCS_CAL_COMPLETE_SHIFT;
+ do {
+ val = snd_soc_read(codec, WM8904_DC_SERVO_READBACK_0);
+ if ((val & dcs_mask) == dcs_mask)
+ break;
+
+ msleep(1);
+ } while (--timeout);
+
+ if ((val & dcs_mask) != dcs_mask)
+ dev_warn(codec->dev, "DC servo timed out\n");
+ else
+ dev_dbg(codec->dev, "DC servo ready\n");
+
+ /* Enable the output stage */
+ snd_soc_update_bits(codec, reg,
+ WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP,
+ WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP);
+
+ /* Unshort the output itself */
+ snd_soc_update_bits(codec, reg,
+ WM8904_HPL_RMV_SHORT |
+ WM8904_HPR_RMV_SHORT,
+ WM8904_HPL_RMV_SHORT |
+ WM8904_HPR_RMV_SHORT);
+
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ /* Short the output */
+ snd_soc_update_bits(codec, reg,
+ WM8904_HPL_RMV_SHORT |
+ WM8904_HPR_RMV_SHORT, 0);
+
+ /* Cache the DC servo configuration; this will be
+ * invalidated if we change the configuration. */
+ wm8904->dcs_state[dcs_l] = snd_soc_read(codec, dcs_l_reg);
+ wm8904->dcs_state[dcs_r] = snd_soc_read(codec, dcs_r_reg);
+
+ snd_soc_update_bits(codec, WM8904_DC_SERVO_0,
+ dcs_mask, 0);
+
+ /* Disable the amplifier input and output stages */
+ snd_soc_update_bits(codec, reg,
+ WM8904_HPL_ENA | WM8904_HPR_ENA |
+ WM8904_HPL_ENA_DLY | WM8904_HPR_ENA_DLY |
+ WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP,
+ 0);
+ break;
+ }
+
+ return 0;
+}
+
+static const char *lin_text[] = {
+ "IN1L", "IN2L", "IN3L"
+};
+
+static const struct soc_enum lin_enum =
+ SOC_ENUM_SINGLE(WM8904_ANALOGUE_LEFT_INPUT_1, 2, 3, lin_text);
+
+static const struct snd_kcontrol_new lin_mux =
+ SOC_DAPM_ENUM("Left Capture Mux", lin_enum);
+
+static const struct soc_enum lin_inv_enum =
+ SOC_ENUM_SINGLE(WM8904_ANALOGUE_LEFT_INPUT_1, 4, 3, lin_text);
+
+static const struct snd_kcontrol_new lin_inv_mux =
+ SOC_DAPM_ENUM("Left Capture Inveting Mux", lin_inv_enum);
+
+static const char *rin_text[] = {
+ "IN1R", "IN2R", "IN3R"
+};
+
+static const struct soc_enum rin_enum =
+ SOC_ENUM_SINGLE(WM8904_ANALOGUE_RIGHT_INPUT_1, 2, 3, rin_text);
+
+static const struct snd_kcontrol_new rin_mux =
+ SOC_DAPM_ENUM("Right Capture Mux", rin_enum);
+
+static const struct soc_enum rin_inv_enum =
+ SOC_ENUM_SINGLE(WM8904_ANALOGUE_RIGHT_INPUT_1, 4, 3, rin_text);
+
+static const struct snd_kcontrol_new rin_inv_mux =
+ SOC_DAPM_ENUM("Right Capture Inveting Mux", rin_inv_enum);
+
+static const char *aif_text[] = {
+ "Left", "Right"
+};
+
+static const struct soc_enum aifoutl_enum =
+ SOC_ENUM_SINGLE(WM8904_AUDIO_INTERFACE_0, 7, 2, aif_text);
+
+static const struct snd_kcontrol_new aifoutl_mux =
+ SOC_DAPM_ENUM("AIFOUTL Mux", aifoutl_enum);
+
+static const struct soc_enum aifoutr_enum =
+ SOC_ENUM_SINGLE(WM8904_AUDIO_INTERFACE_0, 6, 2, aif_text);
+
+static const struct snd_kcontrol_new aifoutr_mux =
+ SOC_DAPM_ENUM("AIFOUTR Mux", aifoutr_enum);
+
+static const struct soc_enum aifinl_enum =
+ SOC_ENUM_SINGLE(WM8904_AUDIO_INTERFACE_0, 5, 2, aif_text);
+
+static const struct snd_kcontrol_new aifinl_mux =
+ SOC_DAPM_ENUM("AIFINL Mux", aifinl_enum);
+
+static const struct soc_enum aifinr_enum =
+ SOC_ENUM_SINGLE(WM8904_AUDIO_INTERFACE_0, 4, 2, aif_text);
+
+static const struct snd_kcontrol_new aifinr_mux =
+ SOC_DAPM_ENUM("AIFINR Mux", aifinr_enum);
+
+static const struct snd_soc_dapm_widget wm8904_core_dapm_widgets[] = {
+SND_SOC_DAPM_SUPPLY("SYSCLK", WM8904_CLOCK_RATES_2, 2, 0, sysclk_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_SUPPLY("CLK_DSP", WM8904_CLOCK_RATES_2, 1, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("TOCLK", WM8904_CLOCK_RATES_2, 0, 0, NULL, 0),
+};
+
+static const struct snd_soc_dapm_widget wm8904_adc_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("IN1L"),
+SND_SOC_DAPM_INPUT("IN1R"),
+SND_SOC_DAPM_INPUT("IN2L"),
+SND_SOC_DAPM_INPUT("IN2R"),
+SND_SOC_DAPM_INPUT("IN3L"),
+SND_SOC_DAPM_INPUT("IN3R"),
+
+SND_SOC_DAPM_MICBIAS("MICBIAS", WM8904_MIC_BIAS_CONTROL_0, 0, 0),
+
+SND_SOC_DAPM_MUX("Left Capture Mux", SND_SOC_NOPM, 0, 0, &lin_mux),
+SND_SOC_DAPM_MUX("Left Capture Inverting Mux", SND_SOC_NOPM, 0, 0,
+ &lin_inv_mux),
+SND_SOC_DAPM_MUX("Right Capture Mux", SND_SOC_NOPM, 0, 0, &rin_mux),
+SND_SOC_DAPM_MUX("Right Capture Inverting Mux", SND_SOC_NOPM, 0, 0,
+ &rin_inv_mux),
+
+SND_SOC_DAPM_PGA("Left Capture PGA", WM8904_POWER_MANAGEMENT_0, 1, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("Right Capture PGA", WM8904_POWER_MANAGEMENT_0, 0, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_ADC("ADCL", NULL, WM8904_POWER_MANAGEMENT_6, 1, 0),
+SND_SOC_DAPM_ADC("ADCR", NULL, WM8904_POWER_MANAGEMENT_6, 0, 0),
+
+SND_SOC_DAPM_MUX("AIFOUTL Mux", SND_SOC_NOPM, 0, 0, &aifoutl_mux),
+SND_SOC_DAPM_MUX("AIFOUTR Mux", SND_SOC_NOPM, 0, 0, &aifoutr_mux),
+
+SND_SOC_DAPM_AIF_OUT("AIFOUTL", "Capture", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_OUT("AIFOUTR", "Capture", 1, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_widget wm8904_dac_dapm_widgets[] = {
+SND_SOC_DAPM_AIF_IN("AIFINL", "Playback", 0, SND_SOC_NOPM, 0, 0),
+SND_SOC_DAPM_AIF_IN("AIFINR", "Playback", 1, SND_SOC_NOPM, 0, 0),
+
+SND_SOC_DAPM_MUX("DACL Mux", SND_SOC_NOPM, 0, 0, &aifinl_mux),
+SND_SOC_DAPM_MUX("DACR Mux", SND_SOC_NOPM, 0, 0, &aifinr_mux),
+
+SND_SOC_DAPM_DAC("DACL", NULL, WM8904_POWER_MANAGEMENT_6, 3, 0),
+SND_SOC_DAPM_DAC("DACR", NULL, WM8904_POWER_MANAGEMENT_6, 2, 0),
+
+SND_SOC_DAPM_SUPPLY("Charge pump", WM8904_CHARGE_PUMP_0, 0, 0, cp_event,
+ SND_SOC_DAPM_POST_PMU),
+
+SND_SOC_DAPM_PGA("HPL PGA", WM8904_POWER_MANAGEMENT_2, 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA("HPR PGA", WM8904_POWER_MANAGEMENT_2, 0, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("LINEL PGA", WM8904_POWER_MANAGEMENT_3, 1, 0, NULL, 0),
+SND_SOC_DAPM_PGA("LINER PGA", WM8904_POWER_MANAGEMENT_3, 0, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA_E("Headphone Output", SND_SOC_NOPM, WM8904_ANALOGUE_HP_0,
+ 0, NULL, 0, out_pga_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+SND_SOC_DAPM_PGA_E("Line Output", SND_SOC_NOPM, WM8904_ANALOGUE_LINEOUT_0,
+ 0, NULL, 0, out_pga_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+SND_SOC_DAPM_OUTPUT("HPOUTL"),
+SND_SOC_DAPM_OUTPUT("HPOUTR"),
+SND_SOC_DAPM_OUTPUT("LINEOUTL"),
+SND_SOC_DAPM_OUTPUT("LINEOUTR"),
+};
+
+static const char *out_mux_text[] = {
+ "DAC", "Bypass"
+};
+
+static const struct soc_enum hpl_enum =
+ SOC_ENUM_SINGLE(WM8904_ANALOGUE_OUT12_ZC, 3, 2, out_mux_text);
+
+static const struct snd_kcontrol_new hpl_mux =
+ SOC_DAPM_ENUM("HPL Mux", hpl_enum);
+
+static const struct soc_enum hpr_enum =
+ SOC_ENUM_SINGLE(WM8904_ANALOGUE_OUT12_ZC, 2, 2, out_mux_text);
+
+static const struct snd_kcontrol_new hpr_mux =
+ SOC_DAPM_ENUM("HPR Mux", hpr_enum);
+
+static const struct soc_enum linel_enum =
+ SOC_ENUM_SINGLE(WM8904_ANALOGUE_OUT12_ZC, 1, 2, out_mux_text);
+
+static const struct snd_kcontrol_new linel_mux =
+ SOC_DAPM_ENUM("LINEL Mux", linel_enum);
+
+static const struct soc_enum liner_enum =
+ SOC_ENUM_SINGLE(WM8904_ANALOGUE_OUT12_ZC, 0, 2, out_mux_text);
+
+static const struct snd_kcontrol_new liner_mux =
+ SOC_DAPM_ENUM("LINEL Mux", liner_enum);
+
+static const char *sidetone_text[] = {
+ "None", "Left", "Right"
+};
+
+static const struct soc_enum dacl_sidetone_enum =
+ SOC_ENUM_SINGLE(WM8904_DAC_DIGITAL_0, 2, 3, sidetone_text);
+
+static const struct snd_kcontrol_new dacl_sidetone_mux =
+ SOC_DAPM_ENUM("Left Sidetone Mux", dacl_sidetone_enum);
+
+static const struct soc_enum dacr_sidetone_enum =
+ SOC_ENUM_SINGLE(WM8904_DAC_DIGITAL_0, 0, 3, sidetone_text);
+
+static const struct snd_kcontrol_new dacr_sidetone_mux =
+ SOC_DAPM_ENUM("Right Sidetone Mux", dacr_sidetone_enum);
+
+static const struct snd_soc_dapm_widget wm8904_dapm_widgets[] = {
+SND_SOC_DAPM_SUPPLY("Class G", WM8904_CLASS_W_0, 0, 1, NULL, 0),
+SND_SOC_DAPM_PGA("Left Bypass", SND_SOC_NOPM, 0, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Right Bypass", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+SND_SOC_DAPM_MUX("Left Sidetone", SND_SOC_NOPM, 0, 0, &dacl_sidetone_mux),
+SND_SOC_DAPM_MUX("Right Sidetone", SND_SOC_NOPM, 0, 0, &dacr_sidetone_mux),
+
+SND_SOC_DAPM_MUX("HPL Mux", SND_SOC_NOPM, 0, 0, &hpl_mux),
+SND_SOC_DAPM_MUX("HPR Mux", SND_SOC_NOPM, 0, 0, &hpr_mux),
+SND_SOC_DAPM_MUX("LINEL Mux", SND_SOC_NOPM, 0, 0, &linel_mux),
+SND_SOC_DAPM_MUX("LINER Mux", SND_SOC_NOPM, 0, 0, &liner_mux),
+};
+
+static const struct snd_soc_dapm_route core_intercon[] = {
+ { "CLK_DSP", NULL, "SYSCLK" },
+ { "TOCLK", NULL, "SYSCLK" },
+};
+
+static const struct snd_soc_dapm_route adc_intercon[] = {
+ { "Left Capture Mux", "IN1L", "IN1L" },
+ { "Left Capture Mux", "IN2L", "IN2L" },
+ { "Left Capture Mux", "IN3L", "IN3L" },
+
+ { "Left Capture Inverting Mux", "IN1L", "IN1L" },
+ { "Left Capture Inverting Mux", "IN2L", "IN2L" },
+ { "Left Capture Inverting Mux", "IN3L", "IN3L" },
+
+ { "Right Capture Mux", "IN1R", "IN1R" },
+ { "Right Capture Mux", "IN2R", "IN2R" },
+ { "Right Capture Mux", "IN3R", "IN3R" },
+
+ { "Right Capture Inverting Mux", "IN1R", "IN1R" },
+ { "Right Capture Inverting Mux", "IN2R", "IN2R" },
+ { "Right Capture Inverting Mux", "IN3R", "IN3R" },
+
+ { "Left Capture PGA", NULL, "Left Capture Mux" },
+ { "Left Capture PGA", NULL, "Left Capture Inverting Mux" },
+
+ { "Right Capture PGA", NULL, "Right Capture Mux" },
+ { "Right Capture PGA", NULL, "Right Capture Inverting Mux" },
+
+ { "AIFOUTL", "Left", "ADCL" },
+ { "AIFOUTL", "Right", "ADCR" },
+ { "AIFOUTR", "Left", "ADCL" },
+ { "AIFOUTR", "Right", "ADCR" },
+
+ { "ADCL", NULL, "CLK_DSP" },
+ { "ADCL", NULL, "Left Capture PGA" },
+
+ { "ADCR", NULL, "CLK_DSP" },
+ { "ADCR", NULL, "Right Capture PGA" },
+};
+
+static const struct snd_soc_dapm_route dac_intercon[] = {
+ { "DACL", "Right", "AIFINR" },
+ { "DACL", "Left", "AIFINL" },
+ { "DACL", NULL, "CLK_DSP" },
+
+ { "DACR", "Right", "AIFINR" },
+ { "DACR", "Left", "AIFINL" },
+ { "DACR", NULL, "CLK_DSP" },
+
+ { "Charge pump", NULL, "SYSCLK" },
+
+ { "Headphone Output", NULL, "HPL PGA" },
+ { "Headphone Output", NULL, "HPR PGA" },
+ { "Headphone Output", NULL, "Charge pump" },
+ { "Headphone Output", NULL, "TOCLK" },
+
+ { "Line Output", NULL, "LINEL PGA" },
+ { "Line Output", NULL, "LINER PGA" },
+ { "Line Output", NULL, "Charge pump" },
+ { "Line Output", NULL, "TOCLK" },
+
+ { "HPOUTL", NULL, "Headphone Output" },
+ { "HPOUTR", NULL, "Headphone Output" },
+
+ { "LINEOUTL", NULL, "Line Output" },
+ { "LINEOUTR", NULL, "Line Output" },
+};
+
+static const struct snd_soc_dapm_route wm8904_intercon[] = {
+ { "Left Sidetone", "Left", "ADCL" },
+ { "Left Sidetone", "Right", "ADCR" },
+ { "DACL", NULL, "Left Sidetone" },
+
+ { "Right Sidetone", "Left", "ADCL" },
+ { "Right Sidetone", "Right", "ADCR" },
+ { "DACR", NULL, "Right Sidetone" },
+
+ { "Left Bypass", NULL, "Class G" },
+ { "Left Bypass", NULL, "Left Capture PGA" },
+
+ { "Right Bypass", NULL, "Class G" },
+ { "Right Bypass", NULL, "Right Capture PGA" },
+
+ { "HPL Mux", "DAC", "DACL" },
+ { "HPL Mux", "Bypass", "Left Bypass" },
+
+ { "HPR Mux", "DAC", "DACR" },
+ { "HPR Mux", "Bypass", "Right Bypass" },
+
+ { "LINEL Mux", "DAC", "DACL" },
+ { "LINEL Mux", "Bypass", "Left Bypass" },
+
+ { "LINER Mux", "DAC", "DACR" },
+ { "LINER Mux", "Bypass", "Right Bypass" },
+
+ { "HPL PGA", NULL, "HPL Mux" },
+ { "HPR PGA", NULL, "HPR Mux" },
+
+ { "LINEL PGA", NULL, "LINEL Mux" },
+ { "LINER PGA", NULL, "LINER Mux" },
+};
+
+static int wm8904_add_widgets(struct snd_soc_codec *codec)
+{
+ snd_soc_add_controls(codec, wm8904_adc_snd_controls,
+ ARRAY_SIZE(wm8904_adc_snd_controls));
+ snd_soc_add_controls(codec, wm8904_dac_snd_controls,
+ ARRAY_SIZE(wm8904_dac_snd_controls));
+ snd_soc_add_controls(codec, wm8904_snd_controls,
+ ARRAY_SIZE(wm8904_snd_controls));
+
+ snd_soc_dapm_new_controls(codec, wm8904_core_dapm_widgets,
+ ARRAY_SIZE(wm8904_core_dapm_widgets));
+ snd_soc_dapm_new_controls(codec, wm8904_adc_dapm_widgets,
+ ARRAY_SIZE(wm8904_adc_dapm_widgets));
+ snd_soc_dapm_new_controls(codec, wm8904_dac_dapm_widgets,
+ ARRAY_SIZE(wm8904_dac_dapm_widgets));
+ snd_soc_dapm_new_controls(codec, wm8904_dapm_widgets,
+ ARRAY_SIZE(wm8904_dapm_widgets));
+
+ snd_soc_dapm_add_routes(codec, core_intercon,
+ ARRAY_SIZE(core_intercon));
+ snd_soc_dapm_add_routes(codec, adc_intercon, ARRAY_SIZE(adc_intercon));
+ snd_soc_dapm_add_routes(codec, dac_intercon, ARRAY_SIZE(dac_intercon));
+ snd_soc_dapm_add_routes(codec, wm8904_intercon,
+ ARRAY_SIZE(wm8904_intercon));
+
+ snd_soc_dapm_new_widgets(codec);
+ return 0;
+}
+
+static struct {
+ int ratio;
+ unsigned int clk_sys_rate;
+} clk_sys_rates[] = {
+ { 64, 0 },
+ { 128, 1 },
+ { 192, 2 },
+ { 256, 3 },
+ { 384, 4 },
+ { 512, 5 },
+ { 786, 6 },
+ { 1024, 7 },
+ { 1408, 8 },
+ { 1536, 9 },
+};
+
+static struct {
+ int rate;
+ int sample_rate;
+} sample_rates[] = {
+ { 8000, 0 },
+ { 11025, 1 },
+ { 12000, 1 },
+ { 16000, 2 },
+ { 22050, 3 },
+ { 24000, 3 },
+ { 32000, 4 },
+ { 44100, 5 },
+ { 48000, 5 },
+};
+
+static struct {
+ int div; /* *10 due to .5s */
+ int bclk_div;
+} bclk_divs[] = {
+ { 10, 0 },
+ { 15, 1 },
+ { 20, 2 },
+ { 30, 3 },
+ { 40, 4 },
+ { 50, 5 },
+ { 55, 6 },
+ { 60, 7 },
+ { 80, 8 },
+ { 100, 9 },
+ { 110, 10 },
+ { 120, 11 },
+ { 160, 12 },
+ { 200, 13 },
+ { 220, 14 },
+ { 240, 16 },
+ { 200, 17 },
+ { 320, 18 },
+ { 440, 19 },
+ { 480, 20 },
+};
+
+
+static int wm8904_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8904_priv *wm8904 = codec->private_data;
+ int ret, i, best, best_val, cur_val;
+ unsigned int aif1 = 0;
+ unsigned int aif2 = 0;
+ unsigned int aif3 = 0;
+ unsigned int clock1 = 0;
+ unsigned int dac_digital1 = 0;
+
+ /* What BCLK do we need? */
+ wm8904->fs = params_rate(params);
+ if (wm8904->tdm_slots) {
+ dev_dbg(codec->dev, "Configuring for %d %d bit TDM slots\n",
+ wm8904->tdm_slots, wm8904->tdm_width);
+ wm8904->bclk = snd_soc_calc_bclk(wm8904->fs,
+ wm8904->tdm_width, 2,
+ wm8904->tdm_slots);
+ } else {
+ wm8904->bclk = snd_soc_params_to_bclk(params);
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ aif1 |= 0x40;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ aif1 |= 0x80;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ aif1 |= 0xc0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ dev_dbg(codec->dev, "Target BCLK is %dHz\n", wm8904->bclk);
+
+ ret = wm8904_configure_clocking(codec);
+ if (ret != 0)
+ return ret;
+
+ /* Select nearest CLK_SYS_RATE */
+ best = 0;
+ best_val = abs((wm8904->sysclk_rate / clk_sys_rates[0].ratio)
+ - wm8904->fs);
+ for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) {
+ cur_val = abs((wm8904->sysclk_rate /
+ clk_sys_rates[i].ratio) - wm8904->fs);;
+ if (cur_val < best_val) {
+ best = i;
+ best_val = cur_val;
+ }
+ }
+ dev_dbg(codec->dev, "Selected CLK_SYS_RATIO of %d\n",
+ clk_sys_rates[best].ratio);
+ clock1 |= (clk_sys_rates[best].clk_sys_rate
+ << WM8904_CLK_SYS_RATE_SHIFT);
+
+ /* SAMPLE_RATE */
+ best = 0;
+ best_val = abs(wm8904->fs - sample_rates[0].rate);
+ for (i = 1; i < ARRAY_SIZE(sample_rates); i++) {
+ /* Closest match */
+ cur_val = abs(wm8904->fs - sample_rates[i].rate);
+ if (cur_val < best_val) {
+ best = i;
+ best_val = cur_val;
+ }
+ }
+ dev_dbg(codec->dev, "Selected SAMPLE_RATE of %dHz\n",
+ sample_rates[best].rate);
+ clock1 |= (sample_rates[best].sample_rate
+ << WM8904_SAMPLE_RATE_SHIFT);
+
+ /* Enable sloping stopband filter for low sample rates */
+ if (wm8904->fs <= 24000)
+ dac_digital1 |= WM8904_DAC_SB_FILT;
+
+ /* BCLK_DIV */
+ best = 0;
+ best_val = INT_MAX;
+ for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) {
+ cur_val = ((wm8904->sysclk_rate * 10) / bclk_divs[i].div)
+ - wm8904->bclk;
+ if (cur_val < 0) /* Table is sorted */
+ break;
+ if (cur_val < best_val) {
+ best = i;
+ best_val = cur_val;
+ }
+ }
+ wm8904->bclk = (wm8904->sysclk_rate * 10) / bclk_divs[best].div;
+ dev_dbg(codec->dev, "Selected BCLK_DIV of %d for %dHz BCLK\n",
+ bclk_divs[best].div, wm8904->bclk);
+ aif2 |= bclk_divs[best].bclk_div;
+
+ /* LRCLK is a simple fraction of BCLK */
+ dev_dbg(codec->dev, "LRCLK_RATE is %d\n", wm8904->bclk / wm8904->fs);
+ aif3 |= wm8904->bclk / wm8904->fs;
+
+ /* Apply the settings */
+ snd_soc_update_bits(codec, WM8904_DAC_DIGITAL_1,
+ WM8904_DAC_SB_FILT, dac_digital1);
+ snd_soc_update_bits(codec, WM8904_AUDIO_INTERFACE_1,
+ WM8904_AIF_WL_MASK, aif1);
+ snd_soc_update_bits(codec, WM8904_AUDIO_INTERFACE_2,
+ WM8904_BCLK_DIV_MASK, aif2);
+ snd_soc_update_bits(codec, WM8904_AUDIO_INTERFACE_3,
+ WM8904_LRCLK_RATE_MASK, aif3);
+ snd_soc_update_bits(codec, WM8904_CLOCK_RATES_1,
+ WM8904_SAMPLE_RATE_MASK |
+ WM8904_CLK_SYS_RATE_MASK, clock1);
+
+ /* Update filters for the new settings */
+ wm8904_set_retune_mobile(codec);
+ wm8904_set_deemph(codec);
+
+ return 0;
+}
+
+
+static int wm8904_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8904_priv *priv = codec->private_data;
+
+ switch (clk_id) {
+ case WM8904_CLK_MCLK:
+ priv->sysclk_src = clk_id;
+ priv->mclk_rate = freq;
+ break;
+
+ case WM8904_CLK_FLL:
+ priv->sysclk_src = clk_id;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(dai->dev, "Clock source is %d at %uHz\n", clk_id, freq);
+
+ wm8904_configure_clocking(codec);
+
+ return 0;
+}
+
+static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ unsigned int aif1 = 0;
+ unsigned int aif3 = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ aif3 |= WM8904_LRCLK_DIR;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ aif1 |= WM8904_BCLK_DIR;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ aif1 |= WM8904_BCLK_DIR;
+ aif3 |= WM8904_LRCLK_DIR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_B:
+ aif1 |= WM8904_AIF_LRCLK_INV;
+ case SND_SOC_DAIFMT_DSP_A:
+ aif1 |= 0x3;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ aif1 |= 0x2;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ aif1 |= 0x1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ /* frame inversion not valid for DSP modes */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aif1 |= WM8904_AIF_BCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ case SND_SOC_DAIFMT_LEFT_J:
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ aif1 |= WM8904_AIF_BCLK_INV | WM8904_AIF_LRCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aif1 |= WM8904_AIF_BCLK_INV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ aif1 |= WM8904_AIF_LRCLK_INV;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, WM8904_AUDIO_INTERFACE_1,
+ WM8904_AIF_BCLK_INV | WM8904_AIF_LRCLK_INV |
+ WM8904_AIF_FMT_MASK | WM8904_BCLK_DIR, aif1);
+ snd_soc_update_bits(codec, WM8904_AUDIO_INTERFACE_3,
+ WM8904_LRCLK_DIR, aif3);
+
+ return 0;
+}
+
+
+static int wm8904_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+ unsigned int rx_mask, int slots, int slot_width)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8904_priv *wm8904 = codec->private_data;
+ int aif1 = 0;
+
+ /* Don't need to validate anything if we're turning off TDM */
+ if (slots == 0)
+ goto out;
+
+ /* Note that we allow configurations we can't handle ourselves -
+ * for example, we can generate clocks for slots 2 and up even if
+ * we can't use those slots ourselves.
+ */
+ aif1 |= WM8904_AIFADC_TDM | WM8904_AIFDAC_TDM;
+
+ switch (rx_mask) {
+ case 3:
+ break;
+ case 0xc:
+ aif1 |= WM8904_AIFADC_TDM_CHAN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+
+ switch (tx_mask) {
+ case 3:
+ break;
+ case 0xc:
+ aif1 |= WM8904_AIFDAC_TDM_CHAN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+out:
+ wm8904->tdm_width = slot_width;
+ wm8904->tdm_slots = slots / 2;
+
+ snd_soc_update_bits(codec, WM8904_AUDIO_INTERFACE_1,
+ WM8904_AIFADC_TDM | WM8904_AIFADC_TDM_CHAN |
+ WM8904_AIFDAC_TDM | WM8904_AIFDAC_TDM_CHAN, aif1);
+
+ return 0;
+}
+
+struct _fll_div {
+ u16 fll_fratio;
+ u16 fll_outdiv;
+ u16 fll_clk_ref_div;
+ u16 n;
+ u16 k;
+};
+
+/* The size in bits of the FLL divide multiplied by 10
+ * to allow rounding later */
+#define FIXED_FLL_SIZE ((1 << 16) * 10)
+
+static struct {
+ unsigned int min;
+ unsigned int max;
+ u16 fll_fratio;
+ int ratio;
+} fll_fratios[] = {
+ { 0, 64000, 4, 16 },
+ { 64000, 128000, 3, 8 },
+ { 128000, 256000, 2, 4 },
+ { 256000, 1000000, 1, 2 },
+ { 1000000, 13500000, 0, 1 },
+};
+
+static int fll_factors(struct _fll_div *fll_div, unsigned int Fref,
+ unsigned int Fout)
+{
+ u64 Kpart;
+ unsigned int K, Ndiv, Nmod, target;
+ unsigned int div;
+ int i;
+
+ /* Fref must be <=13.5MHz */
+ div = 1;
+ fll_div->fll_clk_ref_div = 0;
+ while ((Fref / div) > 13500000) {
+ div *= 2;
+ fll_div->fll_clk_ref_div++;
+
+ if (div > 8) {
+ pr_err("Can't scale %dMHz input down to <=13.5MHz\n",
+ Fref);
+ return -EINVAL;
+ }
+ }
+
+ pr_debug("Fref=%u Fout=%u\n", Fref, Fout);
+
+ /* Apply the division for our remaining calculations */
+ Fref /= div;
+
+ /* Fvco should be 90-100MHz; don't check the upper bound */
+ div = 4;
+ while (Fout * div < 90000000) {
+ div++;
+ if (div > 64) {
+ pr_err("Unable to find FLL_OUTDIV for Fout=%uHz\n",
+ Fout);
+ return -EINVAL;
+ }
+ }
+ target = Fout * div;
+ fll_div->fll_outdiv = div - 1;
+
+ pr_debug("Fvco=%dHz\n", target);
+
+ /* Find an appropraite FLL_FRATIO and factor it out of the target */
+ for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) {
+ if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) {
+ fll_div->fll_fratio = fll_fratios[i].fll_fratio;
+ target /= fll_fratios[i].ratio;
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(fll_fratios)) {
+ pr_err("Unable to find FLL_FRATIO for Fref=%uHz\n", Fref);
+ return -EINVAL;
+ }
+
+ /* Now, calculate N.K */
+ Ndiv = target / Fref;
+
+ fll_div->n = Ndiv;
+ Nmod = target % Fref;
+ pr_debug("Nmod=%d\n", Nmod);
+
+ /* Calculate fractional part - scale up so we can round. */
+ Kpart = FIXED_FLL_SIZE * (long long)Nmod;
+
+ do_div(Kpart, Fref);
+
+ K = Kpart & 0xFFFFFFFF;
+
+ if ((K % 10) >= 5)
+ K += 5;
+
+ /* Move down to proper range now rounding is done */
+ fll_div->k = K / 10;
+
+ pr_debug("N=%x K=%x FLL_FRATIO=%x FLL_OUTDIV=%x FLL_CLK_REF_DIV=%x\n",
+ fll_div->n, fll_div->k,
+ fll_div->fll_fratio, fll_div->fll_outdiv,
+ fll_div->fll_clk_ref_div);
+
+ return 0;
+}
+
+static int wm8904_set_fll(struct snd_soc_dai *dai, int fll_id, int source,
+ unsigned int Fref, unsigned int Fout)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8904_priv *wm8904 = codec->private_data;
+ struct _fll_div fll_div;
+ int ret, val;
+ int clock2, fll1;
+
+ /* Any change? */
+ if (source == wm8904->fll_src && Fref == wm8904->fll_fref &&
+ Fout == wm8904->fll_fout)
+ return 0;
+
+ clock2 = snd_soc_read(codec, WM8904_CLOCK_RATES_2);
+
+ if (Fout == 0) {
+ dev_dbg(codec->dev, "FLL disabled\n");
+
+ wm8904->fll_fref = 0;
+ wm8904->fll_fout = 0;
+
+ /* Gate SYSCLK to avoid glitches */
+ snd_soc_update_bits(codec, WM8904_CLOCK_RATES_2,
+ WM8904_CLK_SYS_ENA, 0);
+
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_1,
+ WM8904_FLL_OSC_ENA | WM8904_FLL_ENA, 0);
+
+ goto out;
+ }
+
+ /* Validate the FLL ID */
+ switch (source) {
+ case WM8904_FLL_MCLK:
+ case WM8904_FLL_LRCLK:
+ case WM8904_FLL_BCLK:
+ ret = fll_factors(&fll_div, Fref, Fout);
+ if (ret != 0)
+ return ret;
+ break;
+
+ case WM8904_FLL_FREE_RUNNING:
+ dev_dbg(codec->dev, "Using free running FLL\n");
+ /* Force 12MHz and output/4 for now */
+ Fout = 12000000;
+ Fref = 12000000;
+
+ memset(&fll_div, 0, sizeof(fll_div));
+ fll_div.fll_outdiv = 3;
+ break;
+
+ default:
+ dev_err(codec->dev, "Unknown FLL ID %d\n", fll_id);
+ return -EINVAL;
+ }
+
+ /* Save current state then disable the FLL and SYSCLK to avoid
+ * misclocking */
+ fll1 = snd_soc_read(codec, WM8904_FLL_CONTROL_1);
+ snd_soc_update_bits(codec, WM8904_CLOCK_RATES_2,
+ WM8904_CLK_SYS_ENA, 0);
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_1,
+ WM8904_FLL_OSC_ENA | WM8904_FLL_ENA, 0);
+
+ /* Unlock forced oscilator control to switch it on/off */
+ snd_soc_update_bits(codec, WM8904_CONTROL_INTERFACE_TEST_1,
+ WM8904_USER_KEY, WM8904_USER_KEY);
+
+ if (fll_id == WM8904_FLL_FREE_RUNNING) {
+ val = WM8904_FLL_FRC_NCO;
+ } else {
+ val = 0;
+ }
+
+ snd_soc_update_bits(codec, WM8904_FLL_NCO_TEST_1, WM8904_FLL_FRC_NCO,
+ val);
+ snd_soc_update_bits(codec, WM8904_CONTROL_INTERFACE_TEST_1,
+ WM8904_USER_KEY, 0);
+
+ switch (fll_id) {
+ case WM8904_FLL_MCLK:
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_5,
+ WM8904_FLL_CLK_REF_SRC_MASK, 0);
+ break;
+
+ case WM8904_FLL_LRCLK:
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_5,
+ WM8904_FLL_CLK_REF_SRC_MASK, 1);
+ break;
+
+ case WM8904_FLL_BCLK:
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_5,
+ WM8904_FLL_CLK_REF_SRC_MASK, 2);
+ break;
+ }
+
+ if (fll_div.k)
+ val = WM8904_FLL_FRACN_ENA;
+ else
+ val = 0;
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_1,
+ WM8904_FLL_FRACN_ENA, val);
+
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_2,
+ WM8904_FLL_OUTDIV_MASK | WM8904_FLL_FRATIO_MASK,
+ (fll_div.fll_outdiv << WM8904_FLL_OUTDIV_SHIFT) |
+ (fll_div.fll_fratio << WM8904_FLL_FRATIO_SHIFT));
+
+ snd_soc_write(codec, WM8904_FLL_CONTROL_3, fll_div.k);
+
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_4, WM8904_FLL_N_MASK,
+ fll_div.n << WM8904_FLL_N_SHIFT);
+
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_5,
+ WM8904_FLL_CLK_REF_DIV_MASK,
+ fll_div.fll_clk_ref_div
+ << WM8904_FLL_CLK_REF_DIV_SHIFT);
+
+ dev_dbg(codec->dev, "FLL configured for %dHz->%dHz\n", Fref, Fout);
+
+ wm8904->fll_fref = Fref;
+ wm8904->fll_fout = Fout;
+ wm8904->fll_src = source;
+
+ /* Enable the FLL if it was previously active */
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_1,
+ WM8904_FLL_OSC_ENA, fll1);
+ snd_soc_update_bits(codec, WM8904_FLL_CONTROL_1,
+ WM8904_FLL_ENA, fll1);
+
+out:
+ /* Reenable SYSCLK if it was previously active */
+ snd_soc_update_bits(codec, WM8904_CLOCK_RATES_2,
+ WM8904_CLK_SYS_ENA, clock2);
+
+ return 0;
+}
+
+static int wm8904_digital_mute(struct snd_soc_dai *codec_dai, int mute)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int val;
+
+ if (mute)
+ val = WM8904_DAC_MUTE;
+ else
+ val = 0;
+
+ snd_soc_update_bits(codec, WM8904_DAC_DIGITAL_1, WM8904_DAC_MUTE, val);
+
+ return 0;
+}
+
+static int wm8904_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct wm8904_priv *wm8904 = codec->private_data;
+ int ret, i;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ /* VMID resistance 2*50k */
+ snd_soc_update_bits(codec, WM8904_VMID_CONTROL_0,
+ WM8904_VMID_RES_MASK,
+ 0x1 << WM8904_VMID_RES_SHIFT);
+
+ /* Normal bias current */
+ snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0,
+ WM8904_ISEL_MASK, 2 << WM8904_ISEL_SHIFT);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8904->supplies),
+ wm8904->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to enable supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Sync back cached values if they're
+ * different from the hardware default.
+ */
+ for (i = 1; i < ARRAY_SIZE(wm8904->reg_cache); i++) {
+ if (!wm8904_access[i].writable)
+ continue;
+
+ if (wm8904->reg_cache[i] == wm8904_reg[i])
+ continue;
+
+ snd_soc_write(codec, i, wm8904->reg_cache[i]);
+ }
+
+ /* Enable bias */
+ snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0,
+ WM8904_BIAS_ENA, WM8904_BIAS_ENA);
+
+ /* Enable VMID, VMID buffering, 2*5k resistance */
+ snd_soc_update_bits(codec, WM8904_VMID_CONTROL_0,
+ WM8904_VMID_ENA |
+ WM8904_VMID_RES_MASK,
+ WM8904_VMID_ENA |
+ 0x3 << WM8904_VMID_RES_SHIFT);
+
+ /* Let VMID ramp */
+ msleep(1);
+ }
+
+ /* Maintain VMID with 2*250k */
+ snd_soc_update_bits(codec, WM8904_VMID_CONTROL_0,
+ WM8904_VMID_RES_MASK,
+ 0x2 << WM8904_VMID_RES_SHIFT);
+
+ /* Bias current *0.5 */
+ snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0,
+ WM8904_ISEL_MASK, 0);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ /* Turn off VMID */
+ snd_soc_update_bits(codec, WM8904_VMID_CONTROL_0,
+ WM8904_VMID_RES_MASK | WM8904_VMID_ENA, 0);
+
+ /* Stop bias generation */
+ snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0,
+ WM8904_BIAS_ENA, 0);
+
+ regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies),
+ wm8904->supplies);
+ break;
+ }
+ codec->bias_level = level;
+ return 0;
+}
+
+#define WM8904_RATES SNDRV_PCM_RATE_8000_96000
+
+#define WM8904_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8904_dai_ops = {
+ .set_sysclk = wm8904_set_sysclk,
+ .set_fmt = wm8904_set_fmt,
+ .set_tdm_slot = wm8904_set_tdm_slot,
+ .set_pll = wm8904_set_fll,
+ .hw_params = wm8904_hw_params,
+ .digital_mute = wm8904_digital_mute,
+};
+
+struct snd_soc_dai wm8904_dai = {
+ .name = "WM8904",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = WM8904_RATES,
+ .formats = WM8904_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = WM8904_RATES,
+ .formats = WM8904_FORMATS,
+ },
+ .ops = &wm8904_dai_ops,
+ .symmetric_rates = 1,
+};
+EXPORT_SYMBOL_GPL(wm8904_dai);
+
+#ifdef CONFIG_PM
+static int wm8904_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8904_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static int wm8904_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8904_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+#else
+#define wm8904_suspend NULL
+#define wm8904_resume NULL
+#endif
+
+static void wm8904_handle_retune_mobile_pdata(struct wm8904_priv *wm8904)
+{
+ struct snd_soc_codec *codec = &wm8904->codec;
+ struct wm8904_pdata *pdata = wm8904->pdata;
+ struct snd_kcontrol_new control =
+ SOC_ENUM_EXT("EQ Mode",
+ wm8904->retune_mobile_enum,
+ wm8904_get_retune_mobile_enum,
+ wm8904_put_retune_mobile_enum);
+ int ret, i, j;
+ const char **t;
+
+ /* We need an array of texts for the enum API but the number
+ * of texts is likely to be less than the number of
+ * configurations due to the sample rate dependency of the
+ * configurations. */
+ wm8904->num_retune_mobile_texts = 0;
+ wm8904->retune_mobile_texts = NULL;
+ for (i = 0; i < pdata->num_retune_mobile_cfgs; i++) {
+ for (j = 0; j < wm8904->num_retune_mobile_texts; j++) {
+ if (strcmp(pdata->retune_mobile_cfgs[i].name,
+ wm8904->retune_mobile_texts[j]) == 0)
+ break;
+ }
+
+ if (j != wm8904->num_retune_mobile_texts)
+ continue;
+
+ /* Expand the array... */
+ t = krealloc(wm8904->retune_mobile_texts,
+ sizeof(char *) *
+ (wm8904->num_retune_mobile_texts + 1),
+ GFP_KERNEL);
+ if (t == NULL)
+ continue;
+
+ /* ...store the new entry... */
+ t[wm8904->num_retune_mobile_texts] =
+ pdata->retune_mobile_cfgs[i].name;
+
+ /* ...and remember the new version. */
+ wm8904->num_retune_mobile_texts++;
+ wm8904->retune_mobile_texts = t;
+ }
+
+ dev_dbg(codec->dev, "Allocated %d unique ReTune Mobile names\n",
+ wm8904->num_retune_mobile_texts);
+
+ wm8904->retune_mobile_enum.max = wm8904->num_retune_mobile_texts;
+ wm8904->retune_mobile_enum.texts = wm8904->retune_mobile_texts;
+
+ ret = snd_soc_add_controls(&wm8904->codec, &control, 1);
+ if (ret != 0)
+ dev_err(wm8904->codec.dev,
+ "Failed to add ReTune Mobile control: %d\n", ret);
+}
+
+static void wm8904_handle_pdata(struct wm8904_priv *wm8904)
+{
+ struct snd_soc_codec *codec = &wm8904->codec;
+ struct wm8904_pdata *pdata = wm8904->pdata;
+ int ret, i;
+
+ if (!pdata) {
+ snd_soc_add_controls(&wm8904->codec, wm8904_eq_controls,
+ ARRAY_SIZE(wm8904_eq_controls));
+ return;
+ }
+
+ dev_dbg(codec->dev, "%d DRC configurations\n", pdata->num_drc_cfgs);
+
+ if (pdata->num_drc_cfgs) {
+ struct snd_kcontrol_new control =
+ SOC_ENUM_EXT("DRC Mode", wm8904->drc_enum,
+ wm8904_get_drc_enum, wm8904_put_drc_enum);
+
+ /* We need an array of texts for the enum API */
+ wm8904->drc_texts = kmalloc(sizeof(char *)
+ * pdata->num_drc_cfgs, GFP_KERNEL);
+ if (!wm8904->drc_texts) {
+ dev_err(wm8904->codec.dev,
+ "Failed to allocate %d DRC config texts\n",
+ pdata->num_drc_cfgs);
+ return;
+ }
+
+ for (i = 0; i < pdata->num_drc_cfgs; i++)
+ wm8904->drc_texts[i] = pdata->drc_cfgs[i].name;
+
+ wm8904->drc_enum.max = pdata->num_drc_cfgs;
+ wm8904->drc_enum.texts = wm8904->drc_texts;
+
+ ret = snd_soc_add_controls(&wm8904->codec, &control, 1);
+ if (ret != 0)
+ dev_err(wm8904->codec.dev,
+ "Failed to add DRC mode control: %d\n", ret);
+
+ wm8904_set_drc(codec);
+ }
+
+ dev_dbg(codec->dev, "%d ReTune Mobile configurations\n",
+ pdata->num_retune_mobile_cfgs);
+
+ if (pdata->num_retune_mobile_cfgs)
+ wm8904_handle_retune_mobile_pdata(wm8904);
+ else
+ snd_soc_add_controls(&wm8904->codec, wm8904_eq_controls,
+ ARRAY_SIZE(wm8904_eq_controls));
+}
+
+static int wm8904_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (wm8904_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = wm8904_codec;
+ codec = wm8904_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ wm8904_handle_pdata(codec->private_data);
+
+ wm8904_add_widgets(codec);
+
+ return ret;
+
+pcm_err:
+ return ret;
+}
+
+static int wm8904_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_wm8904 = {
+ .probe = wm8904_probe,
+ .remove = wm8904_remove,
+ .suspend = wm8904_suspend,
+ .resume = wm8904_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_wm8904);
+
+static int wm8904_register(struct wm8904_priv *wm8904,
+ enum snd_soc_control_type control)
+{
+ int ret;
+ struct snd_soc_codec *codec = &wm8904->codec;
+ int i;
+
+ if (wm8904_codec) {
+ dev_err(codec->dev, "Another WM8904 is registered\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = wm8904;
+ codec->name = "WM8904";
+ codec->owner = THIS_MODULE;
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = wm8904_set_bias_level;
+ codec->dai = &wm8904_dai;
+ codec->num_dai = 1;
+ codec->reg_cache_size = WM8904_MAX_REGISTER;
+ codec->reg_cache = &wm8904->reg_cache;
+ codec->volatile_register = wm8904_volatile_register;
+
+ memcpy(codec->reg_cache, wm8904_reg, sizeof(wm8904_reg));
+
+ ret = snd_soc_codec_set_cache_io(codec, 8, 16, control);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wm8904->supplies); i++)
+ wm8904->supplies[i].supply = wm8904_supply_names[i];
+
+ ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8904->supplies),
+ wm8904->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+ goto err;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8904->supplies),
+ wm8904->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+ goto err_get;
+ }
+
+ ret = snd_soc_read(codec, WM8904_SW_RESET_AND_ID);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to read ID register\n");
+ goto err_enable;
+ }
+ if (ret != wm8904_reg[WM8904_SW_RESET_AND_ID]) {
+ dev_err(codec->dev, "Device is not a WM8904, ID is %x\n", ret);
+ ret = -EINVAL;
+ goto err_enable;
+ }
+
+ ret = snd_soc_read(codec, WM8904_REVISION);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to read device revision: %d\n",
+ ret);
+ goto err_enable;
+ }
+ dev_info(codec->dev, "revision %c\n", ret + 'A');
+
+ ret = wm8904_reset(codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to issue reset\n");
+ goto err_enable;
+ }
+
+ wm8904_dai.dev = codec->dev;
+
+ /* Change some default settings - latch VU and enable ZC */
+ wm8904->reg_cache[WM8904_ADC_DIGITAL_VOLUME_LEFT] |= WM8904_ADC_VU;
+ wm8904->reg_cache[WM8904_ADC_DIGITAL_VOLUME_RIGHT] |= WM8904_ADC_VU;
+ wm8904->reg_cache[WM8904_DAC_DIGITAL_VOLUME_LEFT] |= WM8904_DAC_VU;
+ wm8904->reg_cache[WM8904_DAC_DIGITAL_VOLUME_RIGHT] |= WM8904_DAC_VU;
+ wm8904->reg_cache[WM8904_ANALOGUE_OUT1_LEFT] |= WM8904_HPOUT_VU |
+ WM8904_HPOUTLZC;
+ wm8904->reg_cache[WM8904_ANALOGUE_OUT1_RIGHT] |= WM8904_HPOUT_VU |
+ WM8904_HPOUTRZC;
+ wm8904->reg_cache[WM8904_ANALOGUE_OUT2_LEFT] |= WM8904_LINEOUT_VU |
+ WM8904_LINEOUTLZC;
+ wm8904->reg_cache[WM8904_ANALOGUE_OUT2_RIGHT] |= WM8904_LINEOUT_VU |
+ WM8904_LINEOUTRZC;
+ wm8904->reg_cache[WM8904_CLOCK_RATES_0] &= ~WM8904_SR_MODE;
+
+ /* Set Class W by default - this will be managed by the Class
+ * G widget at runtime where bypass paths are available.
+ */
+ wm8904->reg_cache[WM8904_CLASS_W_0] |= WM8904_CP_DYN_PWR;
+
+ /* Use normal bias source */
+ wm8904->reg_cache[WM8904_BIAS_CONTROL_0] &= ~WM8904_POBCTRL;
+
+ wm8904_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ /* Bias level configuration will have done an extra enable */
+ regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
+
+ wm8904_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_register_dai(&wm8904_dai);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ snd_soc_unregister_codec(codec);
+ return ret;
+ }
+
+ return 0;
+
+err_enable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
+err_get:
+ regulator_bulk_free(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
+err:
+ kfree(wm8904);
+ return ret;
+}
+
+static void wm8904_unregister(struct wm8904_priv *wm8904)
+{
+ wm8904_set_bias_level(&wm8904->codec, SND_SOC_BIAS_OFF);
+ regulator_bulk_free(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
+ snd_soc_unregister_dai(&wm8904_dai);
+ snd_soc_unregister_codec(&wm8904->codec);
+ kfree(wm8904);
+ wm8904_codec = NULL;
+}
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int wm8904_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm8904_priv *wm8904;
+ struct snd_soc_codec *codec;
+
+ wm8904 = kzalloc(sizeof(struct wm8904_priv), GFP_KERNEL);
+ if (wm8904 == NULL)
+ return -ENOMEM;
+
+ codec = &wm8904->codec;
+ codec->hw_write = (hw_write_t)i2c_master_send;
+
+ i2c_set_clientdata(i2c, wm8904);
+ codec->control_data = i2c;
+ wm8904->pdata = i2c->dev.platform_data;
+
+ codec->dev = &i2c->dev;
+
+ return wm8904_register(wm8904, SND_SOC_I2C);
+}
+
+static __devexit int wm8904_i2c_remove(struct i2c_client *client)
+{
+ struct wm8904_priv *wm8904 = i2c_get_clientdata(client);
+ wm8904_unregister(wm8904);
+ return 0;
+}
+
+static const struct i2c_device_id wm8904_i2c_id[] = {
+ { "wm8904", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm8904_i2c_id);
+
+static struct i2c_driver wm8904_i2c_driver = {
+ .driver = {
+ .name = "WM8904",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8904_i2c_probe,
+ .remove = __devexit_p(wm8904_i2c_remove),
+ .id_table = wm8904_i2c_id,
+};
+#endif
+
+static int __init wm8904_modinit(void)
+{
+ int ret;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&wm8904_i2c_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register WM8904 I2C driver: %d\n",
+ ret);
+ }
+#endif
+ return 0;
+}
+module_init(wm8904_modinit);
+
+static void __exit wm8904_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&wm8904_i2c_driver);
+#endif
+}
+module_exit(wm8904_exit);
+
+MODULE_DESCRIPTION("ASoC WM8904 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8904.h b/sound/soc/codecs/wm8904.h
new file mode 100644
index 000000000000..b68886df34e4
--- /dev/null
+++ b/sound/soc/codecs/wm8904.h
@@ -0,0 +1,1681 @@
+/*
+ * wm8904.h -- WM8904 ASoC driver
+ *
+ * Copyright 2009 Wolfson Microelectronics, plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8904_H
+#define _WM8904_H
+
+#define WM8904_CLK_MCLK 1
+#define WM8904_CLK_FLL 2
+
+#define WM8904_FLL_MCLK 1
+#define WM8904_FLL_BCLK 2
+#define WM8904_FLL_LRCLK 3
+#define WM8904_FLL_FREE_RUNNING 4
+
+extern struct snd_soc_dai wm8904_dai;
+extern struct snd_soc_codec_device soc_codec_dev_wm8904;
+
+/*
+ * Register values.
+ */
+#define WM8904_SW_RESET_AND_ID 0x00
+#define WM8904_REVISION 0x01
+#define WM8904_BIAS_CONTROL_0 0x04
+#define WM8904_VMID_CONTROL_0 0x05
+#define WM8904_MIC_BIAS_CONTROL_0 0x06
+#define WM8904_MIC_BIAS_CONTROL_1 0x07
+#define WM8904_ANALOGUE_DAC_0 0x08
+#define WM8904_MIC_FILTER_CONTROL 0x09
+#define WM8904_ANALOGUE_ADC_0 0x0A
+#define WM8904_POWER_MANAGEMENT_0 0x0C
+#define WM8904_POWER_MANAGEMENT_2 0x0E
+#define WM8904_POWER_MANAGEMENT_3 0x0F
+#define WM8904_POWER_MANAGEMENT_6 0x12
+#define WM8904_CLOCK_RATES_0 0x14
+#define WM8904_CLOCK_RATES_1 0x15
+#define WM8904_CLOCK_RATES_2 0x16
+#define WM8904_AUDIO_INTERFACE_0 0x18
+#define WM8904_AUDIO_INTERFACE_1 0x19
+#define WM8904_AUDIO_INTERFACE_2 0x1A
+#define WM8904_AUDIO_INTERFACE_3 0x1B
+#define WM8904_DAC_DIGITAL_VOLUME_LEFT 0x1E
+#define WM8904_DAC_DIGITAL_VOLUME_RIGHT 0x1F
+#define WM8904_DAC_DIGITAL_0 0x20
+#define WM8904_DAC_DIGITAL_1 0x21
+#define WM8904_ADC_DIGITAL_VOLUME_LEFT 0x24
+#define WM8904_ADC_DIGITAL_VOLUME_RIGHT 0x25
+#define WM8904_ADC_DIGITAL_0 0x26
+#define WM8904_DIGITAL_MICROPHONE_0 0x27
+#define WM8904_DRC_0 0x28
+#define WM8904_DRC_1 0x29
+#define WM8904_DRC_2 0x2A
+#define WM8904_DRC_3 0x2B
+#define WM8904_ANALOGUE_LEFT_INPUT_0 0x2C
+#define WM8904_ANALOGUE_RIGHT_INPUT_0 0x2D
+#define WM8904_ANALOGUE_LEFT_INPUT_1 0x2E
+#define WM8904_ANALOGUE_RIGHT_INPUT_1 0x2F
+#define WM8904_ANALOGUE_OUT1_LEFT 0x39
+#define WM8904_ANALOGUE_OUT1_RIGHT 0x3A
+#define WM8904_ANALOGUE_OUT2_LEFT 0x3B
+#define WM8904_ANALOGUE_OUT2_RIGHT 0x3C
+#define WM8904_ANALOGUE_OUT12_ZC 0x3D
+#define WM8904_DC_SERVO_0 0x43
+#define WM8904_DC_SERVO_1 0x44
+#define WM8904_DC_SERVO_2 0x45
+#define WM8904_DC_SERVO_4 0x47
+#define WM8904_DC_SERVO_5 0x48
+#define WM8904_DC_SERVO_6 0x49
+#define WM8904_DC_SERVO_7 0x4A
+#define WM8904_DC_SERVO_8 0x4B
+#define WM8904_DC_SERVO_9 0x4C
+#define WM8904_DC_SERVO_READBACK_0 0x4D
+#define WM8904_ANALOGUE_HP_0 0x5A
+#define WM8904_ANALOGUE_LINEOUT_0 0x5E
+#define WM8904_CHARGE_PUMP_0 0x62
+#define WM8904_CLASS_W_0 0x68
+#define WM8904_WRITE_SEQUENCER_0 0x6C
+#define WM8904_WRITE_SEQUENCER_1 0x6D
+#define WM8904_WRITE_SEQUENCER_2 0x6E
+#define WM8904_WRITE_SEQUENCER_3 0x6F
+#define WM8904_WRITE_SEQUENCER_4 0x70
+#define WM8904_FLL_CONTROL_1 0x74
+#define WM8904_FLL_CONTROL_2 0x75
+#define WM8904_FLL_CONTROL_3 0x76
+#define WM8904_FLL_CONTROL_4 0x77
+#define WM8904_FLL_CONTROL_5 0x78
+#define WM8904_GPIO_CONTROL_1 0x79
+#define WM8904_GPIO_CONTROL_2 0x7A
+#define WM8904_GPIO_CONTROL_3 0x7B
+#define WM8904_GPIO_CONTROL_4 0x7C
+#define WM8904_DIGITAL_PULLS 0x7E
+#define WM8904_INTERRUPT_STATUS 0x7F
+#define WM8904_INTERRUPT_STATUS_MASK 0x80
+#define WM8904_INTERRUPT_POLARITY 0x81
+#define WM8904_INTERRUPT_DEBOUNCE 0x82
+#define WM8904_EQ1 0x86
+#define WM8904_EQ2 0x87
+#define WM8904_EQ3 0x88
+#define WM8904_EQ4 0x89
+#define WM8904_EQ5 0x8A
+#define WM8904_EQ6 0x8B
+#define WM8904_EQ7 0x8C
+#define WM8904_EQ8 0x8D
+#define WM8904_EQ9 0x8E
+#define WM8904_EQ10 0x8F
+#define WM8904_EQ11 0x90
+#define WM8904_EQ12 0x91
+#define WM8904_EQ13 0x92
+#define WM8904_EQ14 0x93
+#define WM8904_EQ15 0x94
+#define WM8904_EQ16 0x95
+#define WM8904_EQ17 0x96
+#define WM8904_EQ18 0x97
+#define WM8904_EQ19 0x98
+#define WM8904_EQ20 0x99
+#define WM8904_EQ21 0x9A
+#define WM8904_EQ22 0x9B
+#define WM8904_EQ23 0x9C
+#define WM8904_EQ24 0x9D
+#define WM8904_CONTROL_INTERFACE_TEST_1 0xA1
+#define WM8904_ANALOGUE_OUTPUT_BIAS_0 0xCC
+#define WM8904_FLL_NCO_TEST_0 0xF7
+#define WM8904_FLL_NCO_TEST_1 0xF8
+
+#define WM8904_REGISTER_COUNT 101
+#define WM8904_MAX_REGISTER 0xF8
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R0 (0x00) - SW Reset and ID
+ */
+#define WM8904_SW_RST_DEV_ID1_MASK 0xFFFF /* SW_RST_DEV_ID1 - [15:0] */
+#define WM8904_SW_RST_DEV_ID1_SHIFT 0 /* SW_RST_DEV_ID1 - [15:0] */
+#define WM8904_SW_RST_DEV_ID1_WIDTH 16 /* SW_RST_DEV_ID1 - [15:0] */
+
+/*
+ * R1 (0x01) - Revision
+ */
+#define WM8904_REVISION_MASK 0x000F /* REVISION - [3:0] */
+#define WM8904_REVISION_SHIFT 0 /* REVISION - [3:0] */
+#define WM8904_REVISION_WIDTH 16 /* REVISION - [3:0] */
+
+/*
+ * R4 (0x04) - Bias Control 0
+ */
+#define WM8904_POBCTRL 0x0010 /* POBCTRL */
+#define WM8904_POBCTRL_MASK 0x0010 /* POBCTRL */
+#define WM8904_POBCTRL_SHIFT 4 /* POBCTRL */
+#define WM8904_POBCTRL_WIDTH 1 /* POBCTRL */
+#define WM8904_ISEL_MASK 0x000C /* ISEL - [3:2] */
+#define WM8904_ISEL_SHIFT 2 /* ISEL - [3:2] */
+#define WM8904_ISEL_WIDTH 2 /* ISEL - [3:2] */
+#define WM8904_STARTUP_BIAS_ENA 0x0002 /* STARTUP_BIAS_ENA */
+#define WM8904_STARTUP_BIAS_ENA_MASK 0x0002 /* STARTUP_BIAS_ENA */
+#define WM8904_STARTUP_BIAS_ENA_SHIFT 1 /* STARTUP_BIAS_ENA */
+#define WM8904_STARTUP_BIAS_ENA_WIDTH 1 /* STARTUP_BIAS_ENA */
+#define WM8904_BIAS_ENA 0x0001 /* BIAS_ENA */
+#define WM8904_BIAS_ENA_MASK 0x0001 /* BIAS_ENA */
+#define WM8904_BIAS_ENA_SHIFT 0 /* BIAS_ENA */
+#define WM8904_BIAS_ENA_WIDTH 1 /* BIAS_ENA */
+
+/*
+ * R5 (0x05) - VMID Control 0
+ */
+#define WM8904_VMID_BUF_ENA 0x0040 /* VMID_BUF_ENA */
+#define WM8904_VMID_BUF_ENA_MASK 0x0040 /* VMID_BUF_ENA */
+#define WM8904_VMID_BUF_ENA_SHIFT 6 /* VMID_BUF_ENA */
+#define WM8904_VMID_BUF_ENA_WIDTH 1 /* VMID_BUF_ENA */
+#define WM8904_VMID_RES_MASK 0x0006 /* VMID_RES - [2:1] */
+#define WM8904_VMID_RES_SHIFT 1 /* VMID_RES - [2:1] */
+#define WM8904_VMID_RES_WIDTH 2 /* VMID_RES - [2:1] */
+#define WM8904_VMID_ENA 0x0001 /* VMID_ENA */
+#define WM8904_VMID_ENA_MASK 0x0001 /* VMID_ENA */
+#define WM8904_VMID_ENA_SHIFT 0 /* VMID_ENA */
+#define WM8904_VMID_ENA_WIDTH 1 /* VMID_ENA */
+
+/*
+ * R6 (0x06) - Mic Bias Control 0
+ */
+#define WM8904_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */
+#define WM8904_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */
+#define WM8904_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */
+#define WM8904_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */
+#define WM8904_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */
+#define WM8904_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */
+#define WM8904_MICDET_ENA 0x0002 /* MICDET_ENA */
+#define WM8904_MICDET_ENA_MASK 0x0002 /* MICDET_ENA */
+#define WM8904_MICDET_ENA_SHIFT 1 /* MICDET_ENA */
+#define WM8904_MICDET_ENA_WIDTH 1 /* MICDET_ENA */
+#define WM8904_MICBIAS_ENA 0x0001 /* MICBIAS_ENA */
+#define WM8904_MICBIAS_ENA_MASK 0x0001 /* MICBIAS_ENA */
+#define WM8904_MICBIAS_ENA_SHIFT 0 /* MICBIAS_ENA */
+#define WM8904_MICBIAS_ENA_WIDTH 1 /* MICBIAS_ENA */
+
+/*
+ * R7 (0x07) - Mic Bias Control 1
+ */
+#define WM8904_MIC_DET_FILTER_ENA 0x8000 /* MIC_DET_FILTER_ENA */
+#define WM8904_MIC_DET_FILTER_ENA_MASK 0x8000 /* MIC_DET_FILTER_ENA */
+#define WM8904_MIC_DET_FILTER_ENA_SHIFT 15 /* MIC_DET_FILTER_ENA */
+#define WM8904_MIC_DET_FILTER_ENA_WIDTH 1 /* MIC_DET_FILTER_ENA */
+#define WM8904_MIC_SHORT_FILTER_ENA 0x4000 /* MIC_SHORT_FILTER_ENA */
+#define WM8904_MIC_SHORT_FILTER_ENA_MASK 0x4000 /* MIC_SHORT_FILTER_ENA */
+#define WM8904_MIC_SHORT_FILTER_ENA_SHIFT 14 /* MIC_SHORT_FILTER_ENA */
+#define WM8904_MIC_SHORT_FILTER_ENA_WIDTH 1 /* MIC_SHORT_FILTER_ENA */
+#define WM8904_MICBIAS_SEL_MASK 0x0007 /* MICBIAS_SEL - [2:0] */
+#define WM8904_MICBIAS_SEL_SHIFT 0 /* MICBIAS_SEL - [2:0] */
+#define WM8904_MICBIAS_SEL_WIDTH 3 /* MICBIAS_SEL - [2:0] */
+
+/*
+ * R8 (0x08) - Analogue DAC 0
+ */
+#define WM8904_DAC_BIAS_SEL_MASK 0x0018 /* DAC_BIAS_SEL - [4:3] */
+#define WM8904_DAC_BIAS_SEL_SHIFT 3 /* DAC_BIAS_SEL - [4:3] */
+#define WM8904_DAC_BIAS_SEL_WIDTH 2 /* DAC_BIAS_SEL - [4:3] */
+#define WM8904_DAC_VMID_BIAS_SEL_MASK 0x0006 /* DAC_VMID_BIAS_SEL - [2:1] */
+#define WM8904_DAC_VMID_BIAS_SEL_SHIFT 1 /* DAC_VMID_BIAS_SEL - [2:1] */
+#define WM8904_DAC_VMID_BIAS_SEL_WIDTH 2 /* DAC_VMID_BIAS_SEL - [2:1] */
+
+/*
+ * R9 (0x09) - mic Filter Control
+ */
+#define WM8904_MIC_DET_SET_THRESHOLD_MASK 0xF000 /* MIC_DET_SET_THRESHOLD - [15:12] */
+#define WM8904_MIC_DET_SET_THRESHOLD_SHIFT 12 /* MIC_DET_SET_THRESHOLD - [15:12] */
+#define WM8904_MIC_DET_SET_THRESHOLD_WIDTH 4 /* MIC_DET_SET_THRESHOLD - [15:12] */
+#define WM8904_MIC_DET_RESET_THRESHOLD_MASK 0x0F00 /* MIC_DET_RESET_THRESHOLD - [11:8] */
+#define WM8904_MIC_DET_RESET_THRESHOLD_SHIFT 8 /* MIC_DET_RESET_THRESHOLD - [11:8] */
+#define WM8904_MIC_DET_RESET_THRESHOLD_WIDTH 4 /* MIC_DET_RESET_THRESHOLD - [11:8] */
+#define WM8904_MIC_SHORT_SET_THRESHOLD_MASK 0x00F0 /* MIC_SHORT_SET_THRESHOLD - [7:4] */
+#define WM8904_MIC_SHORT_SET_THRESHOLD_SHIFT 4 /* MIC_SHORT_SET_THRESHOLD - [7:4] */
+#define WM8904_MIC_SHORT_SET_THRESHOLD_WIDTH 4 /* MIC_SHORT_SET_THRESHOLD - [7:4] */
+#define WM8904_MIC_SHORT_RESET_THRESHOLD_MASK 0x000F /* MIC_SHORT_RESET_THRESHOLD - [3:0] */
+#define WM8904_MIC_SHORT_RESET_THRESHOLD_SHIFT 0 /* MIC_SHORT_RESET_THRESHOLD - [3:0] */
+#define WM8904_MIC_SHORT_RESET_THRESHOLD_WIDTH 4 /* MIC_SHORT_RESET_THRESHOLD - [3:0] */
+
+/*
+ * R10 (0x0A) - Analogue ADC 0
+ */
+#define WM8904_ADC_OSR128 0x0001 /* ADC_OSR128 */
+#define WM8904_ADC_OSR128_MASK 0x0001 /* ADC_OSR128 */
+#define WM8904_ADC_OSR128_SHIFT 0 /* ADC_OSR128 */
+#define WM8904_ADC_OSR128_WIDTH 1 /* ADC_OSR128 */
+
+/*
+ * R12 (0x0C) - Power Management 0
+ */
+#define WM8904_INL_ENA 0x0002 /* INL_ENA */
+#define WM8904_INL_ENA_MASK 0x0002 /* INL_ENA */
+#define WM8904_INL_ENA_SHIFT 1 /* INL_ENA */
+#define WM8904_INL_ENA_WIDTH 1 /* INL_ENA */
+#define WM8904_INR_ENA 0x0001 /* INR_ENA */
+#define WM8904_INR_ENA_MASK 0x0001 /* INR_ENA */
+#define WM8904_INR_ENA_SHIFT 0 /* INR_ENA */
+#define WM8904_INR_ENA_WIDTH 1 /* INR_ENA */
+
+/*
+ * R14 (0x0E) - Power Management 2
+ */
+#define WM8904_HPL_PGA_ENA 0x0002 /* HPL_PGA_ENA */
+#define WM8904_HPL_PGA_ENA_MASK 0x0002 /* HPL_PGA_ENA */
+#define WM8904_HPL_PGA_ENA_SHIFT 1 /* HPL_PGA_ENA */
+#define WM8904_HPL_PGA_ENA_WIDTH 1 /* HPL_PGA_ENA */
+#define WM8904_HPR_PGA_ENA 0x0001 /* HPR_PGA_ENA */
+#define WM8904_HPR_PGA_ENA_MASK 0x0001 /* HPR_PGA_ENA */
+#define WM8904_HPR_PGA_ENA_SHIFT 0 /* HPR_PGA_ENA */
+#define WM8904_HPR_PGA_ENA_WIDTH 1 /* HPR_PGA_ENA */
+
+/*
+ * R15 (0x0F) - Power Management 3
+ */
+#define WM8904_LINEOUTL_PGA_ENA 0x0002 /* LINEOUTL_PGA_ENA */
+#define WM8904_LINEOUTL_PGA_ENA_MASK 0x0002 /* LINEOUTL_PGA_ENA */
+#define WM8904_LINEOUTL_PGA_ENA_SHIFT 1 /* LINEOUTL_PGA_ENA */
+#define WM8904_LINEOUTL_PGA_ENA_WIDTH 1 /* LINEOUTL_PGA_ENA */
+#define WM8904_LINEOUTR_PGA_ENA 0x0001 /* LINEOUTR_PGA_ENA */
+#define WM8904_LINEOUTR_PGA_ENA_MASK 0x0001 /* LINEOUTR_PGA_ENA */
+#define WM8904_LINEOUTR_PGA_ENA_SHIFT 0 /* LINEOUTR_PGA_ENA */
+#define WM8904_LINEOUTR_PGA_ENA_WIDTH 1 /* LINEOUTR_PGA_ENA */
+
+/*
+ * R18 (0x12) - Power Management 6
+ */
+#define WM8904_DACL_ENA 0x0008 /* DACL_ENA */
+#define WM8904_DACL_ENA_MASK 0x0008 /* DACL_ENA */
+#define WM8904_DACL_ENA_SHIFT 3 /* DACL_ENA */
+#define WM8904_DACL_ENA_WIDTH 1 /* DACL_ENA */
+#define WM8904_DACR_ENA 0x0004 /* DACR_ENA */
+#define WM8904_DACR_ENA_MASK 0x0004 /* DACR_ENA */
+#define WM8904_DACR_ENA_SHIFT 2 /* DACR_ENA */
+#define WM8904_DACR_ENA_WIDTH 1 /* DACR_ENA */
+#define WM8904_ADCL_ENA 0x0002 /* ADCL_ENA */
+#define WM8904_ADCL_ENA_MASK 0x0002 /* ADCL_ENA */
+#define WM8904_ADCL_ENA_SHIFT 1 /* ADCL_ENA */
+#define WM8904_ADCL_ENA_WIDTH 1 /* ADCL_ENA */
+#define WM8904_ADCR_ENA 0x0001 /* ADCR_ENA */
+#define WM8904_ADCR_ENA_MASK 0x0001 /* ADCR_ENA */
+#define WM8904_ADCR_ENA_SHIFT 0 /* ADCR_ENA */
+#define WM8904_ADCR_ENA_WIDTH 1 /* ADCR_ENA */
+
+/*
+ * R20 (0x14) - Clock Rates 0
+ */
+#define WM8904_TOCLK_RATE_DIV16 0x4000 /* TOCLK_RATE_DIV16 */
+#define WM8904_TOCLK_RATE_DIV16_MASK 0x4000 /* TOCLK_RATE_DIV16 */
+#define WM8904_TOCLK_RATE_DIV16_SHIFT 14 /* TOCLK_RATE_DIV16 */
+#define WM8904_TOCLK_RATE_DIV16_WIDTH 1 /* TOCLK_RATE_DIV16 */
+#define WM8904_TOCLK_RATE_X4 0x2000 /* TOCLK_RATE_X4 */
+#define WM8904_TOCLK_RATE_X4_MASK 0x2000 /* TOCLK_RATE_X4 */
+#define WM8904_TOCLK_RATE_X4_SHIFT 13 /* TOCLK_RATE_X4 */
+#define WM8904_TOCLK_RATE_X4_WIDTH 1 /* TOCLK_RATE_X4 */
+#define WM8904_SR_MODE 0x1000 /* SR_MODE */
+#define WM8904_SR_MODE_MASK 0x1000 /* SR_MODE */
+#define WM8904_SR_MODE_SHIFT 12 /* SR_MODE */
+#define WM8904_SR_MODE_WIDTH 1 /* SR_MODE */
+#define WM8904_MCLK_DIV 0x0001 /* MCLK_DIV */
+#define WM8904_MCLK_DIV_MASK 0x0001 /* MCLK_DIV */
+#define WM8904_MCLK_DIV_SHIFT 0 /* MCLK_DIV */
+#define WM8904_MCLK_DIV_WIDTH 1 /* MCLK_DIV */
+
+/*
+ * R21 (0x15) - Clock Rates 1
+ */
+#define WM8904_CLK_SYS_RATE_MASK 0x3C00 /* CLK_SYS_RATE - [13:10] */
+#define WM8904_CLK_SYS_RATE_SHIFT 10 /* CLK_SYS_RATE - [13:10] */
+#define WM8904_CLK_SYS_RATE_WIDTH 4 /* CLK_SYS_RATE - [13:10] */
+#define WM8904_SAMPLE_RATE_MASK 0x0007 /* SAMPLE_RATE - [2:0] */
+#define WM8904_SAMPLE_RATE_SHIFT 0 /* SAMPLE_RATE - [2:0] */
+#define WM8904_SAMPLE_RATE_WIDTH 3 /* SAMPLE_RATE - [2:0] */
+
+/*
+ * R22 (0x16) - Clock Rates 2
+ */
+#define WM8904_MCLK_INV 0x8000 /* MCLK_INV */
+#define WM8904_MCLK_INV_MASK 0x8000 /* MCLK_INV */
+#define WM8904_MCLK_INV_SHIFT 15 /* MCLK_INV */
+#define WM8904_MCLK_INV_WIDTH 1 /* MCLK_INV */
+#define WM8904_SYSCLK_SRC 0x4000 /* SYSCLK_SRC */
+#define WM8904_SYSCLK_SRC_MASK 0x4000 /* SYSCLK_SRC */
+#define WM8904_SYSCLK_SRC_SHIFT 14 /* SYSCLK_SRC */
+#define WM8904_SYSCLK_SRC_WIDTH 1 /* SYSCLK_SRC */
+#define WM8904_TOCLK_RATE 0x1000 /* TOCLK_RATE */
+#define WM8904_TOCLK_RATE_MASK 0x1000 /* TOCLK_RATE */
+#define WM8904_TOCLK_RATE_SHIFT 12 /* TOCLK_RATE */
+#define WM8904_TOCLK_RATE_WIDTH 1 /* TOCLK_RATE */
+#define WM8904_OPCLK_ENA 0x0008 /* OPCLK_ENA */
+#define WM8904_OPCLK_ENA_MASK 0x0008 /* OPCLK_ENA */
+#define WM8904_OPCLK_ENA_SHIFT 3 /* OPCLK_ENA */
+#define WM8904_OPCLK_ENA_WIDTH 1 /* OPCLK_ENA */
+#define WM8904_CLK_SYS_ENA 0x0004 /* CLK_SYS_ENA */
+#define WM8904_CLK_SYS_ENA_MASK 0x0004 /* CLK_SYS_ENA */
+#define WM8904_CLK_SYS_ENA_SHIFT 2 /* CLK_SYS_ENA */
+#define WM8904_CLK_SYS_ENA_WIDTH 1 /* CLK_SYS_ENA */
+#define WM8904_CLK_DSP_ENA 0x0002 /* CLK_DSP_ENA */
+#define WM8904_CLK_DSP_ENA_MASK 0x0002 /* CLK_DSP_ENA */
+#define WM8904_CLK_DSP_ENA_SHIFT 1 /* CLK_DSP_ENA */
+#define WM8904_CLK_DSP_ENA_WIDTH 1 /* CLK_DSP_ENA */
+#define WM8904_TOCLK_ENA 0x0001 /* TOCLK_ENA */
+#define WM8904_TOCLK_ENA_MASK 0x0001 /* TOCLK_ENA */
+#define WM8904_TOCLK_ENA_SHIFT 0 /* TOCLK_ENA */
+#define WM8904_TOCLK_ENA_WIDTH 1 /* TOCLK_ENA */
+
+/*
+ * R24 (0x18) - Audio Interface 0
+ */
+#define WM8904_DACL_DATINV 0x1000 /* DACL_DATINV */
+#define WM8904_DACL_DATINV_MASK 0x1000 /* DACL_DATINV */
+#define WM8904_DACL_DATINV_SHIFT 12 /* DACL_DATINV */
+#define WM8904_DACL_DATINV_WIDTH 1 /* DACL_DATINV */
+#define WM8904_DACR_DATINV 0x0800 /* DACR_DATINV */
+#define WM8904_DACR_DATINV_MASK 0x0800 /* DACR_DATINV */
+#define WM8904_DACR_DATINV_SHIFT 11 /* DACR_DATINV */
+#define WM8904_DACR_DATINV_WIDTH 1 /* DACR_DATINV */
+#define WM8904_DAC_BOOST_MASK 0x0600 /* DAC_BOOST - [10:9] */
+#define WM8904_DAC_BOOST_SHIFT 9 /* DAC_BOOST - [10:9] */
+#define WM8904_DAC_BOOST_WIDTH 2 /* DAC_BOOST - [10:9] */
+#define WM8904_LOOPBACK 0x0100 /* LOOPBACK */
+#define WM8904_LOOPBACK_MASK 0x0100 /* LOOPBACK */
+#define WM8904_LOOPBACK_SHIFT 8 /* LOOPBACK */
+#define WM8904_LOOPBACK_WIDTH 1 /* LOOPBACK */
+#define WM8904_AIFADCL_SRC 0x0080 /* AIFADCL_SRC */
+#define WM8904_AIFADCL_SRC_MASK 0x0080 /* AIFADCL_SRC */
+#define WM8904_AIFADCL_SRC_SHIFT 7 /* AIFADCL_SRC */
+#define WM8904_AIFADCL_SRC_WIDTH 1 /* AIFADCL_SRC */
+#define WM8904_AIFADCR_SRC 0x0040 /* AIFADCR_SRC */
+#define WM8904_AIFADCR_SRC_MASK 0x0040 /* AIFADCR_SRC */
+#define WM8904_AIFADCR_SRC_SHIFT 6 /* AIFADCR_SRC */
+#define WM8904_AIFADCR_SRC_WIDTH 1 /* AIFADCR_SRC */
+#define WM8904_AIFDACL_SRC 0x0020 /* AIFDACL_SRC */
+#define WM8904_AIFDACL_SRC_MASK 0x0020 /* AIFDACL_SRC */
+#define WM8904_AIFDACL_SRC_SHIFT 5 /* AIFDACL_SRC */
+#define WM8904_AIFDACL_SRC_WIDTH 1 /* AIFDACL_SRC */
+#define WM8904_AIFDACR_SRC 0x0010 /* AIFDACR_SRC */
+#define WM8904_AIFDACR_SRC_MASK 0x0010 /* AIFDACR_SRC */
+#define WM8904_AIFDACR_SRC_SHIFT 4 /* AIFDACR_SRC */
+#define WM8904_AIFDACR_SRC_WIDTH 1 /* AIFDACR_SRC */
+#define WM8904_ADC_COMP 0x0008 /* ADC_COMP */
+#define WM8904_ADC_COMP_MASK 0x0008 /* ADC_COMP */
+#define WM8904_ADC_COMP_SHIFT 3 /* ADC_COMP */
+#define WM8904_ADC_COMP_WIDTH 1 /* ADC_COMP */
+#define WM8904_ADC_COMPMODE 0x0004 /* ADC_COMPMODE */
+#define WM8904_ADC_COMPMODE_MASK 0x0004 /* ADC_COMPMODE */
+#define WM8904_ADC_COMPMODE_SHIFT 2 /* ADC_COMPMODE */
+#define WM8904_ADC_COMPMODE_WIDTH 1 /* ADC_COMPMODE */
+#define WM8904_DAC_COMP 0x0002 /* DAC_COMP */
+#define WM8904_DAC_COMP_MASK 0x0002 /* DAC_COMP */
+#define WM8904_DAC_COMP_SHIFT 1 /* DAC_COMP */
+#define WM8904_DAC_COMP_WIDTH 1 /* DAC_COMP */
+#define WM8904_DAC_COMPMODE 0x0001 /* DAC_COMPMODE */
+#define WM8904_DAC_COMPMODE_MASK 0x0001 /* DAC_COMPMODE */
+#define WM8904_DAC_COMPMODE_SHIFT 0 /* DAC_COMPMODE */
+#define WM8904_DAC_COMPMODE_WIDTH 1 /* DAC_COMPMODE */
+
+/*
+ * R25 (0x19) - Audio Interface 1
+ */
+#define WM8904_AIFDAC_TDM 0x2000 /* AIFDAC_TDM */
+#define WM8904_AIFDAC_TDM_MASK 0x2000 /* AIFDAC_TDM */
+#define WM8904_AIFDAC_TDM_SHIFT 13 /* AIFDAC_TDM */
+#define WM8904_AIFDAC_TDM_WIDTH 1 /* AIFDAC_TDM */
+#define WM8904_AIFDAC_TDM_CHAN 0x1000 /* AIFDAC_TDM_CHAN */
+#define WM8904_AIFDAC_TDM_CHAN_MASK 0x1000 /* AIFDAC_TDM_CHAN */
+#define WM8904_AIFDAC_TDM_CHAN_SHIFT 12 /* AIFDAC_TDM_CHAN */
+#define WM8904_AIFDAC_TDM_CHAN_WIDTH 1 /* AIFDAC_TDM_CHAN */
+#define WM8904_AIFADC_TDM 0x0800 /* AIFADC_TDM */
+#define WM8904_AIFADC_TDM_MASK 0x0800 /* AIFADC_TDM */
+#define WM8904_AIFADC_TDM_SHIFT 11 /* AIFADC_TDM */
+#define WM8904_AIFADC_TDM_WIDTH 1 /* AIFADC_TDM */
+#define WM8904_AIFADC_TDM_CHAN 0x0400 /* AIFADC_TDM_CHAN */
+#define WM8904_AIFADC_TDM_CHAN_MASK 0x0400 /* AIFADC_TDM_CHAN */
+#define WM8904_AIFADC_TDM_CHAN_SHIFT 10 /* AIFADC_TDM_CHAN */
+#define WM8904_AIFADC_TDM_CHAN_WIDTH 1 /* AIFADC_TDM_CHAN */
+#define WM8904_AIF_TRIS 0x0100 /* AIF_TRIS */
+#define WM8904_AIF_TRIS_MASK 0x0100 /* AIF_TRIS */
+#define WM8904_AIF_TRIS_SHIFT 8 /* AIF_TRIS */
+#define WM8904_AIF_TRIS_WIDTH 1 /* AIF_TRIS */
+#define WM8904_AIF_BCLK_INV 0x0080 /* AIF_BCLK_INV */
+#define WM8904_AIF_BCLK_INV_MASK 0x0080 /* AIF_BCLK_INV */
+#define WM8904_AIF_BCLK_INV_SHIFT 7 /* AIF_BCLK_INV */
+#define WM8904_AIF_BCLK_INV_WIDTH 1 /* AIF_BCLK_INV */
+#define WM8904_BCLK_DIR 0x0040 /* BCLK_DIR */
+#define WM8904_BCLK_DIR_MASK 0x0040 /* BCLK_DIR */
+#define WM8904_BCLK_DIR_SHIFT 6 /* BCLK_DIR */
+#define WM8904_BCLK_DIR_WIDTH 1 /* BCLK_DIR */
+#define WM8904_AIF_LRCLK_INV 0x0010 /* AIF_LRCLK_INV */
+#define WM8904_AIF_LRCLK_INV_MASK 0x0010 /* AIF_LRCLK_INV */
+#define WM8904_AIF_LRCLK_INV_SHIFT 4 /* AIF_LRCLK_INV */
+#define WM8904_AIF_LRCLK_INV_WIDTH 1 /* AIF_LRCLK_INV */
+#define WM8904_AIF_WL_MASK 0x000C /* AIF_WL - [3:2] */
+#define WM8904_AIF_WL_SHIFT 2 /* AIF_WL - [3:2] */
+#define WM8904_AIF_WL_WIDTH 2 /* AIF_WL - [3:2] */
+#define WM8904_AIF_FMT_MASK 0x0003 /* AIF_FMT - [1:0] */
+#define WM8904_AIF_FMT_SHIFT 0 /* AIF_FMT - [1:0] */
+#define WM8904_AIF_FMT_WIDTH 2 /* AIF_FMT - [1:0] */
+
+/*
+ * R26 (0x1A) - Audio Interface 2
+ */
+#define WM8904_OPCLK_DIV_MASK 0x0F00 /* OPCLK_DIV - [11:8] */
+#define WM8904_OPCLK_DIV_SHIFT 8 /* OPCLK_DIV - [11:8] */
+#define WM8904_OPCLK_DIV_WIDTH 4 /* OPCLK_DIV - [11:8] */
+#define WM8904_BCLK_DIV_MASK 0x001F /* BCLK_DIV - [4:0] */
+#define WM8904_BCLK_DIV_SHIFT 0 /* BCLK_DIV - [4:0] */
+#define WM8904_BCLK_DIV_WIDTH 5 /* BCLK_DIV - [4:0] */
+
+/*
+ * R27 (0x1B) - Audio Interface 3
+ */
+#define WM8904_LRCLK_DIR 0x0800 /* LRCLK_DIR */
+#define WM8904_LRCLK_DIR_MASK 0x0800 /* LRCLK_DIR */
+#define WM8904_LRCLK_DIR_SHIFT 11 /* LRCLK_DIR */
+#define WM8904_LRCLK_DIR_WIDTH 1 /* LRCLK_DIR */
+#define WM8904_LRCLK_RATE_MASK 0x07FF /* LRCLK_RATE - [10:0] */
+#define WM8904_LRCLK_RATE_SHIFT 0 /* LRCLK_RATE - [10:0] */
+#define WM8904_LRCLK_RATE_WIDTH 11 /* LRCLK_RATE - [10:0] */
+
+/*
+ * R30 (0x1E) - DAC Digital Volume Left
+ */
+#define WM8904_DAC_VU 0x0100 /* DAC_VU */
+#define WM8904_DAC_VU_MASK 0x0100 /* DAC_VU */
+#define WM8904_DAC_VU_SHIFT 8 /* DAC_VU */
+#define WM8904_DAC_VU_WIDTH 1 /* DAC_VU */
+#define WM8904_DACL_VOL_MASK 0x00FF /* DACL_VOL - [7:0] */
+#define WM8904_DACL_VOL_SHIFT 0 /* DACL_VOL - [7:0] */
+#define WM8904_DACL_VOL_WIDTH 8 /* DACL_VOL - [7:0] */
+
+/*
+ * R31 (0x1F) - DAC Digital Volume Right
+ */
+#define WM8904_DAC_VU 0x0100 /* DAC_VU */
+#define WM8904_DAC_VU_MASK 0x0100 /* DAC_VU */
+#define WM8904_DAC_VU_SHIFT 8 /* DAC_VU */
+#define WM8904_DAC_VU_WIDTH 1 /* DAC_VU */
+#define WM8904_DACR_VOL_MASK 0x00FF /* DACR_VOL - [7:0] */
+#define WM8904_DACR_VOL_SHIFT 0 /* DACR_VOL - [7:0] */
+#define WM8904_DACR_VOL_WIDTH 8 /* DACR_VOL - [7:0] */
+
+/*
+ * R32 (0x20) - DAC Digital 0
+ */
+#define WM8904_ADCL_DAC_SVOL_MASK 0x0F00 /* ADCL_DAC_SVOL - [11:8] */
+#define WM8904_ADCL_DAC_SVOL_SHIFT 8 /* ADCL_DAC_SVOL - [11:8] */
+#define WM8904_ADCL_DAC_SVOL_WIDTH 4 /* ADCL_DAC_SVOL - [11:8] */
+#define WM8904_ADCR_DAC_SVOL_MASK 0x00F0 /* ADCR_DAC_SVOL - [7:4] */
+#define WM8904_ADCR_DAC_SVOL_SHIFT 4 /* ADCR_DAC_SVOL - [7:4] */
+#define WM8904_ADCR_DAC_SVOL_WIDTH 4 /* ADCR_DAC_SVOL - [7:4] */
+#define WM8904_ADC_TO_DACL_MASK 0x000C /* ADC_TO_DACL - [3:2] */
+#define WM8904_ADC_TO_DACL_SHIFT 2 /* ADC_TO_DACL - [3:2] */
+#define WM8904_ADC_TO_DACL_WIDTH 2 /* ADC_TO_DACL - [3:2] */
+#define WM8904_ADC_TO_DACR_MASK 0x0003 /* ADC_TO_DACR - [1:0] */
+#define WM8904_ADC_TO_DACR_SHIFT 0 /* ADC_TO_DACR - [1:0] */
+#define WM8904_ADC_TO_DACR_WIDTH 2 /* ADC_TO_DACR - [1:0] */
+
+/*
+ * R33 (0x21) - DAC Digital 1
+ */
+#define WM8904_DAC_MONO 0x1000 /* DAC_MONO */
+#define WM8904_DAC_MONO_MASK 0x1000 /* DAC_MONO */
+#define WM8904_DAC_MONO_SHIFT 12 /* DAC_MONO */
+#define WM8904_DAC_MONO_WIDTH 1 /* DAC_MONO */
+#define WM8904_DAC_SB_FILT 0x0800 /* DAC_SB_FILT */
+#define WM8904_DAC_SB_FILT_MASK 0x0800 /* DAC_SB_FILT */
+#define WM8904_DAC_SB_FILT_SHIFT 11 /* DAC_SB_FILT */
+#define WM8904_DAC_SB_FILT_WIDTH 1 /* DAC_SB_FILT */
+#define WM8904_DAC_MUTERATE 0x0400 /* DAC_MUTERATE */
+#define WM8904_DAC_MUTERATE_MASK 0x0400 /* DAC_MUTERATE */
+#define WM8904_DAC_MUTERATE_SHIFT 10 /* DAC_MUTERATE */
+#define WM8904_DAC_MUTERATE_WIDTH 1 /* DAC_MUTERATE */
+#define WM8904_DAC_UNMUTE_RAMP 0x0200 /* DAC_UNMUTE_RAMP */
+#define WM8904_DAC_UNMUTE_RAMP_MASK 0x0200 /* DAC_UNMUTE_RAMP */
+#define WM8904_DAC_UNMUTE_RAMP_SHIFT 9 /* DAC_UNMUTE_RAMP */
+#define WM8904_DAC_UNMUTE_RAMP_WIDTH 1 /* DAC_UNMUTE_RAMP */
+#define WM8904_DAC_OSR128 0x0040 /* DAC_OSR128 */
+#define WM8904_DAC_OSR128_MASK 0x0040 /* DAC_OSR128 */
+#define WM8904_DAC_OSR128_SHIFT 6 /* DAC_OSR128 */
+#define WM8904_DAC_OSR128_WIDTH 1 /* DAC_OSR128 */
+#define WM8904_DAC_MUTE 0x0008 /* DAC_MUTE */
+#define WM8904_DAC_MUTE_MASK 0x0008 /* DAC_MUTE */
+#define WM8904_DAC_MUTE_SHIFT 3 /* DAC_MUTE */
+#define WM8904_DAC_MUTE_WIDTH 1 /* DAC_MUTE */
+#define WM8904_DEEMPH_MASK 0x0006 /* DEEMPH - [2:1] */
+#define WM8904_DEEMPH_SHIFT 1 /* DEEMPH - [2:1] */
+#define WM8904_DEEMPH_WIDTH 2 /* DEEMPH - [2:1] */
+
+/*
+ * R36 (0x24) - ADC Digital Volume Left
+ */
+#define WM8904_ADC_VU 0x0100 /* ADC_VU */
+#define WM8904_ADC_VU_MASK 0x0100 /* ADC_VU */
+#define WM8904_ADC_VU_SHIFT 8 /* ADC_VU */
+#define WM8904_ADC_VU_WIDTH 1 /* ADC_VU */
+#define WM8904_ADCL_VOL_MASK 0x00FF /* ADCL_VOL - [7:0] */
+#define WM8904_ADCL_VOL_SHIFT 0 /* ADCL_VOL - [7:0] */
+#define WM8904_ADCL_VOL_WIDTH 8 /* ADCL_VOL - [7:0] */
+
+/*
+ * R37 (0x25) - ADC Digital Volume Right
+ */
+#define WM8904_ADC_VU 0x0100 /* ADC_VU */
+#define WM8904_ADC_VU_MASK 0x0100 /* ADC_VU */
+#define WM8904_ADC_VU_SHIFT 8 /* ADC_VU */
+#define WM8904_ADC_VU_WIDTH 1 /* ADC_VU */
+#define WM8904_ADCR_VOL_MASK 0x00FF /* ADCR_VOL - [7:0] */
+#define WM8904_ADCR_VOL_SHIFT 0 /* ADCR_VOL - [7:0] */
+#define WM8904_ADCR_VOL_WIDTH 8 /* ADCR_VOL - [7:0] */
+
+/*
+ * R38 (0x26) - ADC Digital 0
+ */
+#define WM8904_ADC_HPF_CUT_MASK 0x0060 /* ADC_HPF_CUT - [6:5] */
+#define WM8904_ADC_HPF_CUT_SHIFT 5 /* ADC_HPF_CUT - [6:5] */
+#define WM8904_ADC_HPF_CUT_WIDTH 2 /* ADC_HPF_CUT - [6:5] */
+#define WM8904_ADC_HPF 0x0010 /* ADC_HPF */
+#define WM8904_ADC_HPF_MASK 0x0010 /* ADC_HPF */
+#define WM8904_ADC_HPF_SHIFT 4 /* ADC_HPF */
+#define WM8904_ADC_HPF_WIDTH 1 /* ADC_HPF */
+#define WM8904_ADCL_DATINV 0x0002 /* ADCL_DATINV */
+#define WM8904_ADCL_DATINV_MASK 0x0002 /* ADCL_DATINV */
+#define WM8904_ADCL_DATINV_SHIFT 1 /* ADCL_DATINV */
+#define WM8904_ADCL_DATINV_WIDTH 1 /* ADCL_DATINV */
+#define WM8904_ADCR_DATINV 0x0001 /* ADCR_DATINV */
+#define WM8904_ADCR_DATINV_MASK 0x0001 /* ADCR_DATINV */
+#define WM8904_ADCR_DATINV_SHIFT 0 /* ADCR_DATINV */
+#define WM8904_ADCR_DATINV_WIDTH 1 /* ADCR_DATINV */
+
+/*
+ * R39 (0x27) - Digital Microphone 0
+ */
+#define WM8904_DMIC_ENA 0x1000 /* DMIC_ENA */
+#define WM8904_DMIC_ENA_MASK 0x1000 /* DMIC_ENA */
+#define WM8904_DMIC_ENA_SHIFT 12 /* DMIC_ENA */
+#define WM8904_DMIC_ENA_WIDTH 1 /* DMIC_ENA */
+#define WM8904_DMIC_SRC 0x0800 /* DMIC_SRC */
+#define WM8904_DMIC_SRC_MASK 0x0800 /* DMIC_SRC */
+#define WM8904_DMIC_SRC_SHIFT 11 /* DMIC_SRC */
+#define WM8904_DMIC_SRC_WIDTH 1 /* DMIC_SRC */
+
+/*
+ * R40 (0x28) - DRC 0
+ */
+#define WM8904_DRC_ENA 0x8000 /* DRC_ENA */
+#define WM8904_DRC_ENA_MASK 0x8000 /* DRC_ENA */
+#define WM8904_DRC_ENA_SHIFT 15 /* DRC_ENA */
+#define WM8904_DRC_ENA_WIDTH 1 /* DRC_ENA */
+#define WM8904_DRC_DAC_PATH 0x4000 /* DRC_DAC_PATH */
+#define WM8904_DRC_DAC_PATH_MASK 0x4000 /* DRC_DAC_PATH */
+#define WM8904_DRC_DAC_PATH_SHIFT 14 /* DRC_DAC_PATH */
+#define WM8904_DRC_DAC_PATH_WIDTH 1 /* DRC_DAC_PATH */
+#define WM8904_DRC_GS_HYST_LVL_MASK 0x1800 /* DRC_GS_HYST_LVL - [12:11] */
+#define WM8904_DRC_GS_HYST_LVL_SHIFT 11 /* DRC_GS_HYST_LVL - [12:11] */
+#define WM8904_DRC_GS_HYST_LVL_WIDTH 2 /* DRC_GS_HYST_LVL - [12:11] */
+#define WM8904_DRC_STARTUP_GAIN_MASK 0x07C0 /* DRC_STARTUP_GAIN - [10:6] */
+#define WM8904_DRC_STARTUP_GAIN_SHIFT 6 /* DRC_STARTUP_GAIN - [10:6] */
+#define WM8904_DRC_STARTUP_GAIN_WIDTH 5 /* DRC_STARTUP_GAIN - [10:6] */
+#define WM8904_DRC_FF_DELAY 0x0020 /* DRC_FF_DELAY */
+#define WM8904_DRC_FF_DELAY_MASK 0x0020 /* DRC_FF_DELAY */
+#define WM8904_DRC_FF_DELAY_SHIFT 5 /* DRC_FF_DELAY */
+#define WM8904_DRC_FF_DELAY_WIDTH 1 /* DRC_FF_DELAY */
+#define WM8904_DRC_GS_ENA 0x0008 /* DRC_GS_ENA */
+#define WM8904_DRC_GS_ENA_MASK 0x0008 /* DRC_GS_ENA */
+#define WM8904_DRC_GS_ENA_SHIFT 3 /* DRC_GS_ENA */
+#define WM8904_DRC_GS_ENA_WIDTH 1 /* DRC_GS_ENA */
+#define WM8904_DRC_QR 0x0004 /* DRC_QR */
+#define WM8904_DRC_QR_MASK 0x0004 /* DRC_QR */
+#define WM8904_DRC_QR_SHIFT 2 /* DRC_QR */
+#define WM8904_DRC_QR_WIDTH 1 /* DRC_QR */
+#define WM8904_DRC_ANTICLIP 0x0002 /* DRC_ANTICLIP */
+#define WM8904_DRC_ANTICLIP_MASK 0x0002 /* DRC_ANTICLIP */
+#define WM8904_DRC_ANTICLIP_SHIFT 1 /* DRC_ANTICLIP */
+#define WM8904_DRC_ANTICLIP_WIDTH 1 /* DRC_ANTICLIP */
+#define WM8904_DRC_GS_HYST 0x0001 /* DRC_GS_HYST */
+#define WM8904_DRC_GS_HYST_MASK 0x0001 /* DRC_GS_HYST */
+#define WM8904_DRC_GS_HYST_SHIFT 0 /* DRC_GS_HYST */
+#define WM8904_DRC_GS_HYST_WIDTH 1 /* DRC_GS_HYST */
+
+/*
+ * R41 (0x29) - DRC 1
+ */
+#define WM8904_DRC_ATK_MASK 0xF000 /* DRC_ATK - [15:12] */
+#define WM8904_DRC_ATK_SHIFT 12 /* DRC_ATK - [15:12] */
+#define WM8904_DRC_ATK_WIDTH 4 /* DRC_ATK - [15:12] */
+#define WM8904_DRC_DCY_MASK 0x0F00 /* DRC_DCY - [11:8] */
+#define WM8904_DRC_DCY_SHIFT 8 /* DRC_DCY - [11:8] */
+#define WM8904_DRC_DCY_WIDTH 4 /* DRC_DCY - [11:8] */
+#define WM8904_DRC_QR_THR_MASK 0x00C0 /* DRC_QR_THR - [7:6] */
+#define WM8904_DRC_QR_THR_SHIFT 6 /* DRC_QR_THR - [7:6] */
+#define WM8904_DRC_QR_THR_WIDTH 2 /* DRC_QR_THR - [7:6] */
+#define WM8904_DRC_QR_DCY_MASK 0x0030 /* DRC_QR_DCY - [5:4] */
+#define WM8904_DRC_QR_DCY_SHIFT 4 /* DRC_QR_DCY - [5:4] */
+#define WM8904_DRC_QR_DCY_WIDTH 2 /* DRC_QR_DCY - [5:4] */
+#define WM8904_DRC_MINGAIN_MASK 0x000C /* DRC_MINGAIN - [3:2] */
+#define WM8904_DRC_MINGAIN_SHIFT 2 /* DRC_MINGAIN - [3:2] */
+#define WM8904_DRC_MINGAIN_WIDTH 2 /* DRC_MINGAIN - [3:2] */
+#define WM8904_DRC_MAXGAIN_MASK 0x0003 /* DRC_MAXGAIN - [1:0] */
+#define WM8904_DRC_MAXGAIN_SHIFT 0 /* DRC_MAXGAIN - [1:0] */
+#define WM8904_DRC_MAXGAIN_WIDTH 2 /* DRC_MAXGAIN - [1:0] */
+
+/*
+ * R42 (0x2A) - DRC 2
+ */
+#define WM8904_DRC_HI_COMP_MASK 0x0038 /* DRC_HI_COMP - [5:3] */
+#define WM8904_DRC_HI_COMP_SHIFT 3 /* DRC_HI_COMP - [5:3] */
+#define WM8904_DRC_HI_COMP_WIDTH 3 /* DRC_HI_COMP - [5:3] */
+#define WM8904_DRC_LO_COMP_MASK 0x0007 /* DRC_LO_COMP - [2:0] */
+#define WM8904_DRC_LO_COMP_SHIFT 0 /* DRC_LO_COMP - [2:0] */
+#define WM8904_DRC_LO_COMP_WIDTH 3 /* DRC_LO_COMP - [2:0] */
+
+/*
+ * R43 (0x2B) - DRC 3
+ */
+#define WM8904_DRC_KNEE_IP_MASK 0x07E0 /* DRC_KNEE_IP - [10:5] */
+#define WM8904_DRC_KNEE_IP_SHIFT 5 /* DRC_KNEE_IP - [10:5] */
+#define WM8904_DRC_KNEE_IP_WIDTH 6 /* DRC_KNEE_IP - [10:5] */
+#define WM8904_DRC_KNEE_OP_MASK 0x001F /* DRC_KNEE_OP - [4:0] */
+#define WM8904_DRC_KNEE_OP_SHIFT 0 /* DRC_KNEE_OP - [4:0] */
+#define WM8904_DRC_KNEE_OP_WIDTH 5 /* DRC_KNEE_OP - [4:0] */
+
+/*
+ * R44 (0x2C) - Analogue Left Input 0
+ */
+#define WM8904_LINMUTE 0x0080 /* LINMUTE */
+#define WM8904_LINMUTE_MASK 0x0080 /* LINMUTE */
+#define WM8904_LINMUTE_SHIFT 7 /* LINMUTE */
+#define WM8904_LINMUTE_WIDTH 1 /* LINMUTE */
+#define WM8904_LIN_VOL_MASK 0x001F /* LIN_VOL - [4:0] */
+#define WM8904_LIN_VOL_SHIFT 0 /* LIN_VOL - [4:0] */
+#define WM8904_LIN_VOL_WIDTH 5 /* LIN_VOL - [4:0] */
+
+/*
+ * R45 (0x2D) - Analogue Right Input 0
+ */
+#define WM8904_RINMUTE 0x0080 /* RINMUTE */
+#define WM8904_RINMUTE_MASK 0x0080 /* RINMUTE */
+#define WM8904_RINMUTE_SHIFT 7 /* RINMUTE */
+#define WM8904_RINMUTE_WIDTH 1 /* RINMUTE */
+#define WM8904_RIN_VOL_MASK 0x001F /* RIN_VOL - [4:0] */
+#define WM8904_RIN_VOL_SHIFT 0 /* RIN_VOL - [4:0] */
+#define WM8904_RIN_VOL_WIDTH 5 /* RIN_VOL - [4:0] */
+
+/*
+ * R46 (0x2E) - Analogue Left Input 1
+ */
+#define WM8904_INL_CM_ENA 0x0040 /* INL_CM_ENA */
+#define WM8904_INL_CM_ENA_MASK 0x0040 /* INL_CM_ENA */
+#define WM8904_INL_CM_ENA_SHIFT 6 /* INL_CM_ENA */
+#define WM8904_INL_CM_ENA_WIDTH 1 /* INL_CM_ENA */
+#define WM8904_L_IP_SEL_N_MASK 0x0030 /* L_IP_SEL_N - [5:4] */
+#define WM8904_L_IP_SEL_N_SHIFT 4 /* L_IP_SEL_N - [5:4] */
+#define WM8904_L_IP_SEL_N_WIDTH 2 /* L_IP_SEL_N - [5:4] */
+#define WM8904_L_IP_SEL_P_MASK 0x000C /* L_IP_SEL_P - [3:2] */
+#define WM8904_L_IP_SEL_P_SHIFT 2 /* L_IP_SEL_P - [3:2] */
+#define WM8904_L_IP_SEL_P_WIDTH 2 /* L_IP_SEL_P - [3:2] */
+#define WM8904_L_MODE_MASK 0x0003 /* L_MODE - [1:0] */
+#define WM8904_L_MODE_SHIFT 0 /* L_MODE - [1:0] */
+#define WM8904_L_MODE_WIDTH 2 /* L_MODE - [1:0] */
+
+/*
+ * R47 (0x2F) - Analogue Right Input 1
+ */
+#define WM8904_INR_CM_ENA 0x0040 /* INR_CM_ENA */
+#define WM8904_INR_CM_ENA_MASK 0x0040 /* INR_CM_ENA */
+#define WM8904_INR_CM_ENA_SHIFT 6 /* INR_CM_ENA */
+#define WM8904_INR_CM_ENA_WIDTH 1 /* INR_CM_ENA */
+#define WM8904_R_IP_SEL_N_MASK 0x0030 /* R_IP_SEL_N - [5:4] */
+#define WM8904_R_IP_SEL_N_SHIFT 4 /* R_IP_SEL_N - [5:4] */
+#define WM8904_R_IP_SEL_N_WIDTH 2 /* R_IP_SEL_N - [5:4] */
+#define WM8904_R_IP_SEL_P_MASK 0x000C /* R_IP_SEL_P - [3:2] */
+#define WM8904_R_IP_SEL_P_SHIFT 2 /* R_IP_SEL_P - [3:2] */
+#define WM8904_R_IP_SEL_P_WIDTH 2 /* R_IP_SEL_P - [3:2] */
+#define WM8904_R_MODE_MASK 0x0003 /* R_MODE - [1:0] */
+#define WM8904_R_MODE_SHIFT 0 /* R_MODE - [1:0] */
+#define WM8904_R_MODE_WIDTH 2 /* R_MODE - [1:0] */
+
+/*
+ * R57 (0x39) - Analogue OUT1 Left
+ */
+#define WM8904_HPOUTL_MUTE 0x0100 /* HPOUTL_MUTE */
+#define WM8904_HPOUTL_MUTE_MASK 0x0100 /* HPOUTL_MUTE */
+#define WM8904_HPOUTL_MUTE_SHIFT 8 /* HPOUTL_MUTE */
+#define WM8904_HPOUTL_MUTE_WIDTH 1 /* HPOUTL_MUTE */
+#define WM8904_HPOUT_VU 0x0080 /* HPOUT_VU */
+#define WM8904_HPOUT_VU_MASK 0x0080 /* HPOUT_VU */
+#define WM8904_HPOUT_VU_SHIFT 7 /* HPOUT_VU */
+#define WM8904_HPOUT_VU_WIDTH 1 /* HPOUT_VU */
+#define WM8904_HPOUTLZC 0x0040 /* HPOUTLZC */
+#define WM8904_HPOUTLZC_MASK 0x0040 /* HPOUTLZC */
+#define WM8904_HPOUTLZC_SHIFT 6 /* HPOUTLZC */
+#define WM8904_HPOUTLZC_WIDTH 1 /* HPOUTLZC */
+#define WM8904_HPOUTL_VOL_MASK 0x003F /* HPOUTL_VOL - [5:0] */
+#define WM8904_HPOUTL_VOL_SHIFT 0 /* HPOUTL_VOL - [5:0] */
+#define WM8904_HPOUTL_VOL_WIDTH 6 /* HPOUTL_VOL - [5:0] */
+
+/*
+ * R58 (0x3A) - Analogue OUT1 Right
+ */
+#define WM8904_HPOUTR_MUTE 0x0100 /* HPOUTR_MUTE */
+#define WM8904_HPOUTR_MUTE_MASK 0x0100 /* HPOUTR_MUTE */
+#define WM8904_HPOUTR_MUTE_SHIFT 8 /* HPOUTR_MUTE */
+#define WM8904_HPOUTR_MUTE_WIDTH 1 /* HPOUTR_MUTE */
+#define WM8904_HPOUT_VU 0x0080 /* HPOUT_VU */
+#define WM8904_HPOUT_VU_MASK 0x0080 /* HPOUT_VU */
+#define WM8904_HPOUT_VU_SHIFT 7 /* HPOUT_VU */
+#define WM8904_HPOUT_VU_WIDTH 1 /* HPOUT_VU */
+#define WM8904_HPOUTRZC 0x0040 /* HPOUTRZC */
+#define WM8904_HPOUTRZC_MASK 0x0040 /* HPOUTRZC */
+#define WM8904_HPOUTRZC_SHIFT 6 /* HPOUTRZC */
+#define WM8904_HPOUTRZC_WIDTH 1 /* HPOUTRZC */
+#define WM8904_HPOUTR_VOL_MASK 0x003F /* HPOUTR_VOL - [5:0] */
+#define WM8904_HPOUTR_VOL_SHIFT 0 /* HPOUTR_VOL - [5:0] */
+#define WM8904_HPOUTR_VOL_WIDTH 6 /* HPOUTR_VOL - [5:0] */
+
+/*
+ * R59 (0x3B) - Analogue OUT2 Left
+ */
+#define WM8904_LINEOUTL_MUTE 0x0100 /* LINEOUTL_MUTE */
+#define WM8904_LINEOUTL_MUTE_MASK 0x0100 /* LINEOUTL_MUTE */
+#define WM8904_LINEOUTL_MUTE_SHIFT 8 /* LINEOUTL_MUTE */
+#define WM8904_LINEOUTL_MUTE_WIDTH 1 /* LINEOUTL_MUTE */
+#define WM8904_LINEOUT_VU 0x0080 /* LINEOUT_VU */
+#define WM8904_LINEOUT_VU_MASK 0x0080 /* LINEOUT_VU */
+#define WM8904_LINEOUT_VU_SHIFT 7 /* LINEOUT_VU */
+#define WM8904_LINEOUT_VU_WIDTH 1 /* LINEOUT_VU */
+#define WM8904_LINEOUTLZC 0x0040 /* LINEOUTLZC */
+#define WM8904_LINEOUTLZC_MASK 0x0040 /* LINEOUTLZC */
+#define WM8904_LINEOUTLZC_SHIFT 6 /* LINEOUTLZC */
+#define WM8904_LINEOUTLZC_WIDTH 1 /* LINEOUTLZC */
+#define WM8904_LINEOUTL_VOL_MASK 0x003F /* LINEOUTL_VOL - [5:0] */
+#define WM8904_LINEOUTL_VOL_SHIFT 0 /* LINEOUTL_VOL - [5:0] */
+#define WM8904_LINEOUTL_VOL_WIDTH 6 /* LINEOUTL_VOL - [5:0] */
+
+/*
+ * R60 (0x3C) - Analogue OUT2 Right
+ */
+#define WM8904_LINEOUTR_MUTE 0x0100 /* LINEOUTR_MUTE */
+#define WM8904_LINEOUTR_MUTE_MASK 0x0100 /* LINEOUTR_MUTE */
+#define WM8904_LINEOUTR_MUTE_SHIFT 8 /* LINEOUTR_MUTE */
+#define WM8904_LINEOUTR_MUTE_WIDTH 1 /* LINEOUTR_MUTE */
+#define WM8904_LINEOUT_VU 0x0080 /* LINEOUT_VU */
+#define WM8904_LINEOUT_VU_MASK 0x0080 /* LINEOUT_VU */
+#define WM8904_LINEOUT_VU_SHIFT 7 /* LINEOUT_VU */
+#define WM8904_LINEOUT_VU_WIDTH 1 /* LINEOUT_VU */
+#define WM8904_LINEOUTRZC 0x0040 /* LINEOUTRZC */
+#define WM8904_LINEOUTRZC_MASK 0x0040 /* LINEOUTRZC */
+#define WM8904_LINEOUTRZC_SHIFT 6 /* LINEOUTRZC */
+#define WM8904_LINEOUTRZC_WIDTH 1 /* LINEOUTRZC */
+#define WM8904_LINEOUTR_VOL_MASK 0x003F /* LINEOUTR_VOL - [5:0] */
+#define WM8904_LINEOUTR_VOL_SHIFT 0 /* LINEOUTR_VOL - [5:0] */
+#define WM8904_LINEOUTR_VOL_WIDTH 6 /* LINEOUTR_VOL - [5:0] */
+
+/*
+ * R61 (0x3D) - Analogue OUT12 ZC
+ */
+#define WM8904_HPL_BYP_ENA 0x0008 /* HPL_BYP_ENA */
+#define WM8904_HPL_BYP_ENA_MASK 0x0008 /* HPL_BYP_ENA */
+#define WM8904_HPL_BYP_ENA_SHIFT 3 /* HPL_BYP_ENA */
+#define WM8904_HPL_BYP_ENA_WIDTH 1 /* HPL_BYP_ENA */
+#define WM8904_HPR_BYP_ENA 0x0004 /* HPR_BYP_ENA */
+#define WM8904_HPR_BYP_ENA_MASK 0x0004 /* HPR_BYP_ENA */
+#define WM8904_HPR_BYP_ENA_SHIFT 2 /* HPR_BYP_ENA */
+#define WM8904_HPR_BYP_ENA_WIDTH 1 /* HPR_BYP_ENA */
+#define WM8904_LINEOUTL_BYP_ENA 0x0002 /* LINEOUTL_BYP_ENA */
+#define WM8904_LINEOUTL_BYP_ENA_MASK 0x0002 /* LINEOUTL_BYP_ENA */
+#define WM8904_LINEOUTL_BYP_ENA_SHIFT 1 /* LINEOUTL_BYP_ENA */
+#define WM8904_LINEOUTL_BYP_ENA_WIDTH 1 /* LINEOUTL_BYP_ENA */
+#define WM8904_LINEOUTR_BYP_ENA 0x0001 /* LINEOUTR_BYP_ENA */
+#define WM8904_LINEOUTR_BYP_ENA_MASK 0x0001 /* LINEOUTR_BYP_ENA */
+#define WM8904_LINEOUTR_BYP_ENA_SHIFT 0 /* LINEOUTR_BYP_ENA */
+#define WM8904_LINEOUTR_BYP_ENA_WIDTH 1 /* LINEOUTR_BYP_ENA */
+
+/*
+ * R67 (0x43) - DC Servo 0
+ */
+#define WM8904_DCS_ENA_CHAN_3 0x0008 /* DCS_ENA_CHAN_3 */
+#define WM8904_DCS_ENA_CHAN_3_MASK 0x0008 /* DCS_ENA_CHAN_3 */
+#define WM8904_DCS_ENA_CHAN_3_SHIFT 3 /* DCS_ENA_CHAN_3 */
+#define WM8904_DCS_ENA_CHAN_3_WIDTH 1 /* DCS_ENA_CHAN_3 */
+#define WM8904_DCS_ENA_CHAN_2 0x0004 /* DCS_ENA_CHAN_2 */
+#define WM8904_DCS_ENA_CHAN_2_MASK 0x0004 /* DCS_ENA_CHAN_2 */
+#define WM8904_DCS_ENA_CHAN_2_SHIFT 2 /* DCS_ENA_CHAN_2 */
+#define WM8904_DCS_ENA_CHAN_2_WIDTH 1 /* DCS_ENA_CHAN_2 */
+#define WM8904_DCS_ENA_CHAN_1 0x0002 /* DCS_ENA_CHAN_1 */
+#define WM8904_DCS_ENA_CHAN_1_MASK 0x0002 /* DCS_ENA_CHAN_1 */
+#define WM8904_DCS_ENA_CHAN_1_SHIFT 1 /* DCS_ENA_CHAN_1 */
+#define WM8904_DCS_ENA_CHAN_1_WIDTH 1 /* DCS_ENA_CHAN_1 */
+#define WM8904_DCS_ENA_CHAN_0 0x0001 /* DCS_ENA_CHAN_0 */
+#define WM8904_DCS_ENA_CHAN_0_MASK 0x0001 /* DCS_ENA_CHAN_0 */
+#define WM8904_DCS_ENA_CHAN_0_SHIFT 0 /* DCS_ENA_CHAN_0 */
+#define WM8904_DCS_ENA_CHAN_0_WIDTH 1 /* DCS_ENA_CHAN_0 */
+
+/*
+ * R68 (0x44) - DC Servo 1
+ */
+#define WM8904_DCS_TRIG_SINGLE_3 0x8000 /* DCS_TRIG_SINGLE_3 */
+#define WM8904_DCS_TRIG_SINGLE_3_MASK 0x8000 /* DCS_TRIG_SINGLE_3 */
+#define WM8904_DCS_TRIG_SINGLE_3_SHIFT 15 /* DCS_TRIG_SINGLE_3 */
+#define WM8904_DCS_TRIG_SINGLE_3_WIDTH 1 /* DCS_TRIG_SINGLE_3 */
+#define WM8904_DCS_TRIG_SINGLE_2 0x4000 /* DCS_TRIG_SINGLE_2 */
+#define WM8904_DCS_TRIG_SINGLE_2_MASK 0x4000 /* DCS_TRIG_SINGLE_2 */
+#define WM8904_DCS_TRIG_SINGLE_2_SHIFT 14 /* DCS_TRIG_SINGLE_2 */
+#define WM8904_DCS_TRIG_SINGLE_2_WIDTH 1 /* DCS_TRIG_SINGLE_2 */
+#define WM8904_DCS_TRIG_SINGLE_1 0x2000 /* DCS_TRIG_SINGLE_1 */
+#define WM8904_DCS_TRIG_SINGLE_1_MASK 0x2000 /* DCS_TRIG_SINGLE_1 */
+#define WM8904_DCS_TRIG_SINGLE_1_SHIFT 13 /* DCS_TRIG_SINGLE_1 */
+#define WM8904_DCS_TRIG_SINGLE_1_WIDTH 1 /* DCS_TRIG_SINGLE_1 */
+#define WM8904_DCS_TRIG_SINGLE_0 0x1000 /* DCS_TRIG_SINGLE_0 */
+#define WM8904_DCS_TRIG_SINGLE_0_MASK 0x1000 /* DCS_TRIG_SINGLE_0 */
+#define WM8904_DCS_TRIG_SINGLE_0_SHIFT 12 /* DCS_TRIG_SINGLE_0 */
+#define WM8904_DCS_TRIG_SINGLE_0_WIDTH 1 /* DCS_TRIG_SINGLE_0 */
+#define WM8904_DCS_TRIG_SERIES_3 0x0800 /* DCS_TRIG_SERIES_3 */
+#define WM8904_DCS_TRIG_SERIES_3_MASK 0x0800 /* DCS_TRIG_SERIES_3 */
+#define WM8904_DCS_TRIG_SERIES_3_SHIFT 11 /* DCS_TRIG_SERIES_3 */
+#define WM8904_DCS_TRIG_SERIES_3_WIDTH 1 /* DCS_TRIG_SERIES_3 */
+#define WM8904_DCS_TRIG_SERIES_2 0x0400 /* DCS_TRIG_SERIES_2 */
+#define WM8904_DCS_TRIG_SERIES_2_MASK 0x0400 /* DCS_TRIG_SERIES_2 */
+#define WM8904_DCS_TRIG_SERIES_2_SHIFT 10 /* DCS_TRIG_SERIES_2 */
+#define WM8904_DCS_TRIG_SERIES_2_WIDTH 1 /* DCS_TRIG_SERIES_2 */
+#define WM8904_DCS_TRIG_SERIES_1 0x0200 /* DCS_TRIG_SERIES_1 */
+#define WM8904_DCS_TRIG_SERIES_1_MASK 0x0200 /* DCS_TRIG_SERIES_1 */
+#define WM8904_DCS_TRIG_SERIES_1_SHIFT 9 /* DCS_TRIG_SERIES_1 */
+#define WM8904_DCS_TRIG_SERIES_1_WIDTH 1 /* DCS_TRIG_SERIES_1 */
+#define WM8904_DCS_TRIG_SERIES_0 0x0100 /* DCS_TRIG_SERIES_0 */
+#define WM8904_DCS_TRIG_SERIES_0_MASK 0x0100 /* DCS_TRIG_SERIES_0 */
+#define WM8904_DCS_TRIG_SERIES_0_SHIFT 8 /* DCS_TRIG_SERIES_0 */
+#define WM8904_DCS_TRIG_SERIES_0_WIDTH 1 /* DCS_TRIG_SERIES_0 */
+#define WM8904_DCS_TRIG_STARTUP_3 0x0080 /* DCS_TRIG_STARTUP_3 */
+#define WM8904_DCS_TRIG_STARTUP_3_MASK 0x0080 /* DCS_TRIG_STARTUP_3 */
+#define WM8904_DCS_TRIG_STARTUP_3_SHIFT 7 /* DCS_TRIG_STARTUP_3 */
+#define WM8904_DCS_TRIG_STARTUP_3_WIDTH 1 /* DCS_TRIG_STARTUP_3 */
+#define WM8904_DCS_TRIG_STARTUP_2 0x0040 /* DCS_TRIG_STARTUP_2 */
+#define WM8904_DCS_TRIG_STARTUP_2_MASK 0x0040 /* DCS_TRIG_STARTUP_2 */
+#define WM8904_DCS_TRIG_STARTUP_2_SHIFT 6 /* DCS_TRIG_STARTUP_2 */
+#define WM8904_DCS_TRIG_STARTUP_2_WIDTH 1 /* DCS_TRIG_STARTUP_2 */
+#define WM8904_DCS_TRIG_STARTUP_1 0x0020 /* DCS_TRIG_STARTUP_1 */
+#define WM8904_DCS_TRIG_STARTUP_1_MASK 0x0020 /* DCS_TRIG_STARTUP_1 */
+#define WM8904_DCS_TRIG_STARTUP_1_SHIFT 5 /* DCS_TRIG_STARTUP_1 */
+#define WM8904_DCS_TRIG_STARTUP_1_WIDTH 1 /* DCS_TRIG_STARTUP_1 */
+#define WM8904_DCS_TRIG_STARTUP_0 0x0010 /* DCS_TRIG_STARTUP_0 */
+#define WM8904_DCS_TRIG_STARTUP_0_MASK 0x0010 /* DCS_TRIG_STARTUP_0 */
+#define WM8904_DCS_TRIG_STARTUP_0_SHIFT 4 /* DCS_TRIG_STARTUP_0 */
+#define WM8904_DCS_TRIG_STARTUP_0_WIDTH 1 /* DCS_TRIG_STARTUP_0 */
+#define WM8904_DCS_TRIG_DAC_WR_3 0x0008 /* DCS_TRIG_DAC_WR_3 */
+#define WM8904_DCS_TRIG_DAC_WR_3_MASK 0x0008 /* DCS_TRIG_DAC_WR_3 */
+#define WM8904_DCS_TRIG_DAC_WR_3_SHIFT 3 /* DCS_TRIG_DAC_WR_3 */
+#define WM8904_DCS_TRIG_DAC_WR_3_WIDTH 1 /* DCS_TRIG_DAC_WR_3 */
+#define WM8904_DCS_TRIG_DAC_WR_2 0x0004 /* DCS_TRIG_DAC_WR_2 */
+#define WM8904_DCS_TRIG_DAC_WR_2_MASK 0x0004 /* DCS_TRIG_DAC_WR_2 */
+#define WM8904_DCS_TRIG_DAC_WR_2_SHIFT 2 /* DCS_TRIG_DAC_WR_2 */
+#define WM8904_DCS_TRIG_DAC_WR_2_WIDTH 1 /* DCS_TRIG_DAC_WR_2 */
+#define WM8904_DCS_TRIG_DAC_WR_1 0x0002 /* DCS_TRIG_DAC_WR_1 */
+#define WM8904_DCS_TRIG_DAC_WR_1_MASK 0x0002 /* DCS_TRIG_DAC_WR_1 */
+#define WM8904_DCS_TRIG_DAC_WR_1_SHIFT 1 /* DCS_TRIG_DAC_WR_1 */
+#define WM8904_DCS_TRIG_DAC_WR_1_WIDTH 1 /* DCS_TRIG_DAC_WR_1 */
+#define WM8904_DCS_TRIG_DAC_WR_0 0x0001 /* DCS_TRIG_DAC_WR_0 */
+#define WM8904_DCS_TRIG_DAC_WR_0_MASK 0x0001 /* DCS_TRIG_DAC_WR_0 */
+#define WM8904_DCS_TRIG_DAC_WR_0_SHIFT 0 /* DCS_TRIG_DAC_WR_0 */
+#define WM8904_DCS_TRIG_DAC_WR_0_WIDTH 1 /* DCS_TRIG_DAC_WR_0 */
+
+/*
+ * R69 (0x45) - DC Servo 2
+ */
+#define WM8904_DCS_TIMER_PERIOD_23_MASK 0x0F00 /* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8904_DCS_TIMER_PERIOD_23_SHIFT 8 /* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8904_DCS_TIMER_PERIOD_23_WIDTH 4 /* DCS_TIMER_PERIOD_23 - [11:8] */
+#define WM8904_DCS_TIMER_PERIOD_01_MASK 0x000F /* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8904_DCS_TIMER_PERIOD_01_SHIFT 0 /* DCS_TIMER_PERIOD_01 - [3:0] */
+#define WM8904_DCS_TIMER_PERIOD_01_WIDTH 4 /* DCS_TIMER_PERIOD_01 - [3:0] */
+
+/*
+ * R71 (0x47) - DC Servo 4
+ */
+#define WM8904_DCS_SERIES_NO_23_MASK 0x007F /* DCS_SERIES_NO_23 - [6:0] */
+#define WM8904_DCS_SERIES_NO_23_SHIFT 0 /* DCS_SERIES_NO_23 - [6:0] */
+#define WM8904_DCS_SERIES_NO_23_WIDTH 7 /* DCS_SERIES_NO_23 - [6:0] */
+
+/*
+ * R72 (0x48) - DC Servo 5
+ */
+#define WM8904_DCS_SERIES_NO_01_MASK 0x007F /* DCS_SERIES_NO_01 - [6:0] */
+#define WM8904_DCS_SERIES_NO_01_SHIFT 0 /* DCS_SERIES_NO_01 - [6:0] */
+#define WM8904_DCS_SERIES_NO_01_WIDTH 7 /* DCS_SERIES_NO_01 - [6:0] */
+
+/*
+ * R73 (0x49) - DC Servo 6
+ */
+#define WM8904_DCS_DAC_WR_VAL_3_MASK 0x00FF /* DCS_DAC_WR_VAL_3 - [7:0] */
+#define WM8904_DCS_DAC_WR_VAL_3_SHIFT 0 /* DCS_DAC_WR_VAL_3 - [7:0] */
+#define WM8904_DCS_DAC_WR_VAL_3_WIDTH 8 /* DCS_DAC_WR_VAL_3 - [7:0] */
+
+/*
+ * R74 (0x4A) - DC Servo 7
+ */
+#define WM8904_DCS_DAC_WR_VAL_2_MASK 0x00FF /* DCS_DAC_WR_VAL_2 - [7:0] */
+#define WM8904_DCS_DAC_WR_VAL_2_SHIFT 0 /* DCS_DAC_WR_VAL_2 - [7:0] */
+#define WM8904_DCS_DAC_WR_VAL_2_WIDTH 8 /* DCS_DAC_WR_VAL_2 - [7:0] */
+
+/*
+ * R75 (0x4B) - DC Servo 8
+ */
+#define WM8904_DCS_DAC_WR_VAL_1_MASK 0x00FF /* DCS_DAC_WR_VAL_1 - [7:0] */
+#define WM8904_DCS_DAC_WR_VAL_1_SHIFT 0 /* DCS_DAC_WR_VAL_1 - [7:0] */
+#define WM8904_DCS_DAC_WR_VAL_1_WIDTH 8 /* DCS_DAC_WR_VAL_1 - [7:0] */
+
+/*
+ * R76 (0x4C) - DC Servo 9
+ */
+#define WM8904_DCS_DAC_WR_VAL_0_MASK 0x00FF /* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8904_DCS_DAC_WR_VAL_0_SHIFT 0 /* DCS_DAC_WR_VAL_0 - [7:0] */
+#define WM8904_DCS_DAC_WR_VAL_0_WIDTH 8 /* DCS_DAC_WR_VAL_0 - [7:0] */
+
+/*
+ * R77 (0x4D) - DC Servo Readback 0
+ */
+#define WM8904_DCS_CAL_COMPLETE_MASK 0x0F00 /* DCS_CAL_COMPLETE - [11:8] */
+#define WM8904_DCS_CAL_COMPLETE_SHIFT 8 /* DCS_CAL_COMPLETE - [11:8] */
+#define WM8904_DCS_CAL_COMPLETE_WIDTH 4 /* DCS_CAL_COMPLETE - [11:8] */
+#define WM8904_DCS_DAC_WR_COMPLETE_MASK 0x00F0 /* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8904_DCS_DAC_WR_COMPLETE_SHIFT 4 /* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8904_DCS_DAC_WR_COMPLETE_WIDTH 4 /* DCS_DAC_WR_COMPLETE - [7:4] */
+#define WM8904_DCS_STARTUP_COMPLETE_MASK 0x000F /* DCS_STARTUP_COMPLETE - [3:0] */
+#define WM8904_DCS_STARTUP_COMPLETE_SHIFT 0 /* DCS_STARTUP_COMPLETE - [3:0] */
+#define WM8904_DCS_STARTUP_COMPLETE_WIDTH 4 /* DCS_STARTUP_COMPLETE - [3:0] */
+
+/*
+ * R90 (0x5A) - Analogue HP 0
+ */
+#define WM8904_HPL_RMV_SHORT 0x0080 /* HPL_RMV_SHORT */
+#define WM8904_HPL_RMV_SHORT_MASK 0x0080 /* HPL_RMV_SHORT */
+#define WM8904_HPL_RMV_SHORT_SHIFT 7 /* HPL_RMV_SHORT */
+#define WM8904_HPL_RMV_SHORT_WIDTH 1 /* HPL_RMV_SHORT */
+#define WM8904_HPL_ENA_OUTP 0x0040 /* HPL_ENA_OUTP */
+#define WM8904_HPL_ENA_OUTP_MASK 0x0040 /* HPL_ENA_OUTP */
+#define WM8904_HPL_ENA_OUTP_SHIFT 6 /* HPL_ENA_OUTP */
+#define WM8904_HPL_ENA_OUTP_WIDTH 1 /* HPL_ENA_OUTP */
+#define WM8904_HPL_ENA_DLY 0x0020 /* HPL_ENA_DLY */
+#define WM8904_HPL_ENA_DLY_MASK 0x0020 /* HPL_ENA_DLY */
+#define WM8904_HPL_ENA_DLY_SHIFT 5 /* HPL_ENA_DLY */
+#define WM8904_HPL_ENA_DLY_WIDTH 1 /* HPL_ENA_DLY */
+#define WM8904_HPL_ENA 0x0010 /* HPL_ENA */
+#define WM8904_HPL_ENA_MASK 0x0010 /* HPL_ENA */
+#define WM8904_HPL_ENA_SHIFT 4 /* HPL_ENA */
+#define WM8904_HPL_ENA_WIDTH 1 /* HPL_ENA */
+#define WM8904_HPR_RMV_SHORT 0x0008 /* HPR_RMV_SHORT */
+#define WM8904_HPR_RMV_SHORT_MASK 0x0008 /* HPR_RMV_SHORT */
+#define WM8904_HPR_RMV_SHORT_SHIFT 3 /* HPR_RMV_SHORT */
+#define WM8904_HPR_RMV_SHORT_WIDTH 1 /* HPR_RMV_SHORT */
+#define WM8904_HPR_ENA_OUTP 0x0004 /* HPR_ENA_OUTP */
+#define WM8904_HPR_ENA_OUTP_MASK 0x0004 /* HPR_ENA_OUTP */
+#define WM8904_HPR_ENA_OUTP_SHIFT 2 /* HPR_ENA_OUTP */
+#define WM8904_HPR_ENA_OUTP_WIDTH 1 /* HPR_ENA_OUTP */
+#define WM8904_HPR_ENA_DLY 0x0002 /* HPR_ENA_DLY */
+#define WM8904_HPR_ENA_DLY_MASK 0x0002 /* HPR_ENA_DLY */
+#define WM8904_HPR_ENA_DLY_SHIFT 1 /* HPR_ENA_DLY */
+#define WM8904_HPR_ENA_DLY_WIDTH 1 /* HPR_ENA_DLY */
+#define WM8904_HPR_ENA 0x0001 /* HPR_ENA */
+#define WM8904_HPR_ENA_MASK 0x0001 /* HPR_ENA */
+#define WM8904_HPR_ENA_SHIFT 0 /* HPR_ENA */
+#define WM8904_HPR_ENA_WIDTH 1 /* HPR_ENA */
+
+/*
+ * R94 (0x5E) - Analogue Lineout 0
+ */
+#define WM8904_LINEOUTL_RMV_SHORT 0x0080 /* LINEOUTL_RMV_SHORT */
+#define WM8904_LINEOUTL_RMV_SHORT_MASK 0x0080 /* LINEOUTL_RMV_SHORT */
+#define WM8904_LINEOUTL_RMV_SHORT_SHIFT 7 /* LINEOUTL_RMV_SHORT */
+#define WM8904_LINEOUTL_RMV_SHORT_WIDTH 1 /* LINEOUTL_RMV_SHORT */
+#define WM8904_LINEOUTL_ENA_OUTP 0x0040 /* LINEOUTL_ENA_OUTP */
+#define WM8904_LINEOUTL_ENA_OUTP_MASK 0x0040 /* LINEOUTL_ENA_OUTP */
+#define WM8904_LINEOUTL_ENA_OUTP_SHIFT 6 /* LINEOUTL_ENA_OUTP */
+#define WM8904_LINEOUTL_ENA_OUTP_WIDTH 1 /* LINEOUTL_ENA_OUTP */
+#define WM8904_LINEOUTL_ENA_DLY 0x0020 /* LINEOUTL_ENA_DLY */
+#define WM8904_LINEOUTL_ENA_DLY_MASK 0x0020 /* LINEOUTL_ENA_DLY */
+#define WM8904_LINEOUTL_ENA_DLY_SHIFT 5 /* LINEOUTL_ENA_DLY */
+#define WM8904_LINEOUTL_ENA_DLY_WIDTH 1 /* LINEOUTL_ENA_DLY */
+#define WM8904_LINEOUTL_ENA 0x0010 /* LINEOUTL_ENA */
+#define WM8904_LINEOUTL_ENA_MASK 0x0010 /* LINEOUTL_ENA */
+#define WM8904_LINEOUTL_ENA_SHIFT 4 /* LINEOUTL_ENA */
+#define WM8904_LINEOUTL_ENA_WIDTH 1 /* LINEOUTL_ENA */
+#define WM8904_LINEOUTR_RMV_SHORT 0x0008 /* LINEOUTR_RMV_SHORT */
+#define WM8904_LINEOUTR_RMV_SHORT_MASK 0x0008 /* LINEOUTR_RMV_SHORT */
+#define WM8904_LINEOUTR_RMV_SHORT_SHIFT 3 /* LINEOUTR_RMV_SHORT */
+#define WM8904_LINEOUTR_RMV_SHORT_WIDTH 1 /* LINEOUTR_RMV_SHORT */
+#define WM8904_LINEOUTR_ENA_OUTP 0x0004 /* LINEOUTR_ENA_OUTP */
+#define WM8904_LINEOUTR_ENA_OUTP_MASK 0x0004 /* LINEOUTR_ENA_OUTP */
+#define WM8904_LINEOUTR_ENA_OUTP_SHIFT 2 /* LINEOUTR_ENA_OUTP */
+#define WM8904_LINEOUTR_ENA_OUTP_WIDTH 1 /* LINEOUTR_ENA_OUTP */
+#define WM8904_LINEOUTR_ENA_DLY 0x0002 /* LINEOUTR_ENA_DLY */
+#define WM8904_LINEOUTR_ENA_DLY_MASK 0x0002 /* LINEOUTR_ENA_DLY */
+#define WM8904_LINEOUTR_ENA_DLY_SHIFT 1 /* LINEOUTR_ENA_DLY */
+#define WM8904_LINEOUTR_ENA_DLY_WIDTH 1 /* LINEOUTR_ENA_DLY */
+#define WM8904_LINEOUTR_ENA 0x0001 /* LINEOUTR_ENA */
+#define WM8904_LINEOUTR_ENA_MASK 0x0001 /* LINEOUTR_ENA */
+#define WM8904_LINEOUTR_ENA_SHIFT 0 /* LINEOUTR_ENA */
+#define WM8904_LINEOUTR_ENA_WIDTH 1 /* LINEOUTR_ENA */
+
+/*
+ * R98 (0x62) - Charge Pump 0
+ */
+#define WM8904_CP_ENA 0x0001 /* CP_ENA */
+#define WM8904_CP_ENA_MASK 0x0001 /* CP_ENA */
+#define WM8904_CP_ENA_SHIFT 0 /* CP_ENA */
+#define WM8904_CP_ENA_WIDTH 1 /* CP_ENA */
+
+/*
+ * R104 (0x68) - Class W 0
+ */
+#define WM8904_CP_DYN_PWR 0x0001 /* CP_DYN_PWR */
+#define WM8904_CP_DYN_PWR_MASK 0x0001 /* CP_DYN_PWR */
+#define WM8904_CP_DYN_PWR_SHIFT 0 /* CP_DYN_PWR */
+#define WM8904_CP_DYN_PWR_WIDTH 1 /* CP_DYN_PWR */
+
+/*
+ * R108 (0x6C) - Write Sequencer 0
+ */
+#define WM8904_WSEQ_ENA 0x0100 /* WSEQ_ENA */
+#define WM8904_WSEQ_ENA_MASK 0x0100 /* WSEQ_ENA */
+#define WM8904_WSEQ_ENA_SHIFT 8 /* WSEQ_ENA */
+#define WM8904_WSEQ_ENA_WIDTH 1 /* WSEQ_ENA */
+#define WM8904_WSEQ_WRITE_INDEX_MASK 0x001F /* WSEQ_WRITE_INDEX - [4:0] */
+#define WM8904_WSEQ_WRITE_INDEX_SHIFT 0 /* WSEQ_WRITE_INDEX - [4:0] */
+#define WM8904_WSEQ_WRITE_INDEX_WIDTH 5 /* WSEQ_WRITE_INDEX - [4:0] */
+
+/*
+ * R109 (0x6D) - Write Sequencer 1
+ */
+#define WM8904_WSEQ_DATA_WIDTH_MASK 0x7000 /* WSEQ_DATA_WIDTH - [14:12] */
+#define WM8904_WSEQ_DATA_WIDTH_SHIFT 12 /* WSEQ_DATA_WIDTH - [14:12] */
+#define WM8904_WSEQ_DATA_WIDTH_WIDTH 3 /* WSEQ_DATA_WIDTH - [14:12] */
+#define WM8904_WSEQ_DATA_START_MASK 0x0F00 /* WSEQ_DATA_START - [11:8] */
+#define WM8904_WSEQ_DATA_START_SHIFT 8 /* WSEQ_DATA_START - [11:8] */
+#define WM8904_WSEQ_DATA_START_WIDTH 4 /* WSEQ_DATA_START - [11:8] */
+#define WM8904_WSEQ_ADDR_MASK 0x00FF /* WSEQ_ADDR - [7:0] */
+#define WM8904_WSEQ_ADDR_SHIFT 0 /* WSEQ_ADDR - [7:0] */
+#define WM8904_WSEQ_ADDR_WIDTH 8 /* WSEQ_ADDR - [7:0] */
+
+/*
+ * R110 (0x6E) - Write Sequencer 2
+ */
+#define WM8904_WSEQ_EOS 0x4000 /* WSEQ_EOS */
+#define WM8904_WSEQ_EOS_MASK 0x4000 /* WSEQ_EOS */
+#define WM8904_WSEQ_EOS_SHIFT 14 /* WSEQ_EOS */
+#define WM8904_WSEQ_EOS_WIDTH 1 /* WSEQ_EOS */
+#define WM8904_WSEQ_DELAY_MASK 0x0F00 /* WSEQ_DELAY - [11:8] */
+#define WM8904_WSEQ_DELAY_SHIFT 8 /* WSEQ_DELAY - [11:8] */
+#define WM8904_WSEQ_DELAY_WIDTH 4 /* WSEQ_DELAY - [11:8] */
+#define WM8904_WSEQ_DATA_MASK 0x00FF /* WSEQ_DATA - [7:0] */
+#define WM8904_WSEQ_DATA_SHIFT 0 /* WSEQ_DATA - [7:0] */
+#define WM8904_WSEQ_DATA_WIDTH 8 /* WSEQ_DATA - [7:0] */
+
+/*
+ * R111 (0x6F) - Write Sequencer 3
+ */
+#define WM8904_WSEQ_ABORT 0x0200 /* WSEQ_ABORT */
+#define WM8904_WSEQ_ABORT_MASK 0x0200 /* WSEQ_ABORT */
+#define WM8904_WSEQ_ABORT_SHIFT 9 /* WSEQ_ABORT */
+#define WM8904_WSEQ_ABORT_WIDTH 1 /* WSEQ_ABORT */
+#define WM8904_WSEQ_START 0x0100 /* WSEQ_START */
+#define WM8904_WSEQ_START_MASK 0x0100 /* WSEQ_START */
+#define WM8904_WSEQ_START_SHIFT 8 /* WSEQ_START */
+#define WM8904_WSEQ_START_WIDTH 1 /* WSEQ_START */
+#define WM8904_WSEQ_START_INDEX_MASK 0x003F /* WSEQ_START_INDEX - [5:0] */
+#define WM8904_WSEQ_START_INDEX_SHIFT 0 /* WSEQ_START_INDEX - [5:0] */
+#define WM8904_WSEQ_START_INDEX_WIDTH 6 /* WSEQ_START_INDEX - [5:0] */
+
+/*
+ * R112 (0x70) - Write Sequencer 4
+ */
+#define WM8904_WSEQ_CURRENT_INDEX_MASK 0x03F0 /* WSEQ_CURRENT_INDEX - [9:4] */
+#define WM8904_WSEQ_CURRENT_INDEX_SHIFT 4 /* WSEQ_CURRENT_INDEX - [9:4] */
+#define WM8904_WSEQ_CURRENT_INDEX_WIDTH 6 /* WSEQ_CURRENT_INDEX - [9:4] */
+#define WM8904_WSEQ_BUSY 0x0001 /* WSEQ_BUSY */
+#define WM8904_WSEQ_BUSY_MASK 0x0001 /* WSEQ_BUSY */
+#define WM8904_WSEQ_BUSY_SHIFT 0 /* WSEQ_BUSY */
+#define WM8904_WSEQ_BUSY_WIDTH 1 /* WSEQ_BUSY */
+
+/*
+ * R116 (0x74) - FLL Control 1
+ */
+#define WM8904_FLL_FRACN_ENA 0x0004 /* FLL_FRACN_ENA */
+#define WM8904_FLL_FRACN_ENA_MASK 0x0004 /* FLL_FRACN_ENA */
+#define WM8904_FLL_FRACN_ENA_SHIFT 2 /* FLL_FRACN_ENA */
+#define WM8904_FLL_FRACN_ENA_WIDTH 1 /* FLL_FRACN_ENA */
+#define WM8904_FLL_OSC_ENA 0x0002 /* FLL_OSC_ENA */
+#define WM8904_FLL_OSC_ENA_MASK 0x0002 /* FLL_OSC_ENA */
+#define WM8904_FLL_OSC_ENA_SHIFT 1 /* FLL_OSC_ENA */
+#define WM8904_FLL_OSC_ENA_WIDTH 1 /* FLL_OSC_ENA */
+#define WM8904_FLL_ENA 0x0001 /* FLL_ENA */
+#define WM8904_FLL_ENA_MASK 0x0001 /* FLL_ENA */
+#define WM8904_FLL_ENA_SHIFT 0 /* FLL_ENA */
+#define WM8904_FLL_ENA_WIDTH 1 /* FLL_ENA */
+
+/*
+ * R117 (0x75) - FLL Control 2
+ */
+#define WM8904_FLL_OUTDIV_MASK 0x3F00 /* FLL_OUTDIV - [13:8] */
+#define WM8904_FLL_OUTDIV_SHIFT 8 /* FLL_OUTDIV - [13:8] */
+#define WM8904_FLL_OUTDIV_WIDTH 6 /* FLL_OUTDIV - [13:8] */
+#define WM8904_FLL_CTRL_RATE_MASK 0x0070 /* FLL_CTRL_RATE - [6:4] */
+#define WM8904_FLL_CTRL_RATE_SHIFT 4 /* FLL_CTRL_RATE - [6:4] */
+#define WM8904_FLL_CTRL_RATE_WIDTH 3 /* FLL_CTRL_RATE - [6:4] */
+#define WM8904_FLL_FRATIO_MASK 0x0007 /* FLL_FRATIO - [2:0] */
+#define WM8904_FLL_FRATIO_SHIFT 0 /* FLL_FRATIO - [2:0] */
+#define WM8904_FLL_FRATIO_WIDTH 3 /* FLL_FRATIO - [2:0] */
+
+/*
+ * R118 (0x76) - FLL Control 3
+ */
+#define WM8904_FLL_K_MASK 0xFFFF /* FLL_K - [15:0] */
+#define WM8904_FLL_K_SHIFT 0 /* FLL_K - [15:0] */
+#define WM8904_FLL_K_WIDTH 16 /* FLL_K - [15:0] */
+
+/*
+ * R119 (0x77) - FLL Control 4
+ */
+#define WM8904_FLL_N_MASK 0x7FE0 /* FLL_N - [14:5] */
+#define WM8904_FLL_N_SHIFT 5 /* FLL_N - [14:5] */
+#define WM8904_FLL_N_WIDTH 10 /* FLL_N - [14:5] */
+#define WM8904_FLL_GAIN_MASK 0x000F /* FLL_GAIN - [3:0] */
+#define WM8904_FLL_GAIN_SHIFT 0 /* FLL_GAIN - [3:0] */
+#define WM8904_FLL_GAIN_WIDTH 4 /* FLL_GAIN - [3:0] */
+
+/*
+ * R120 (0x78) - FLL Control 5
+ */
+#define WM8904_FLL_CLK_REF_DIV_MASK 0x0018 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM8904_FLL_CLK_REF_DIV_SHIFT 3 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM8904_FLL_CLK_REF_DIV_WIDTH 2 /* FLL_CLK_REF_DIV - [4:3] */
+#define WM8904_FLL_CLK_REF_SRC_MASK 0x0003 /* FLL_CLK_REF_SRC - [1:0] */
+#define WM8904_FLL_CLK_REF_SRC_SHIFT 0 /* FLL_CLK_REF_SRC - [1:0] */
+#define WM8904_FLL_CLK_REF_SRC_WIDTH 2 /* FLL_CLK_REF_SRC - [1:0] */
+
+/*
+ * R121 (0x79) - GPIO Control 1
+ */
+#define WM8904_GPIO1_PU 0x0020 /* GPIO1_PU */
+#define WM8904_GPIO1_PU_MASK 0x0020 /* GPIO1_PU */
+#define WM8904_GPIO1_PU_SHIFT 5 /* GPIO1_PU */
+#define WM8904_GPIO1_PU_WIDTH 1 /* GPIO1_PU */
+#define WM8904_GPIO1_PD 0x0010 /* GPIO1_PD */
+#define WM8904_GPIO1_PD_MASK 0x0010 /* GPIO1_PD */
+#define WM8904_GPIO1_PD_SHIFT 4 /* GPIO1_PD */
+#define WM8904_GPIO1_PD_WIDTH 1 /* GPIO1_PD */
+#define WM8904_GPIO1_SEL_MASK 0x000F /* GPIO1_SEL - [3:0] */
+#define WM8904_GPIO1_SEL_SHIFT 0 /* GPIO1_SEL - [3:0] */
+#define WM8904_GPIO1_SEL_WIDTH 4 /* GPIO1_SEL - [3:0] */
+
+/*
+ * R122 (0x7A) - GPIO Control 2
+ */
+#define WM8904_GPIO2_PU 0x0020 /* GPIO2_PU */
+#define WM8904_GPIO2_PU_MASK 0x0020 /* GPIO2_PU */
+#define WM8904_GPIO2_PU_SHIFT 5 /* GPIO2_PU */
+#define WM8904_GPIO2_PU_WIDTH 1 /* GPIO2_PU */
+#define WM8904_GPIO2_PD 0x0010 /* GPIO2_PD */
+#define WM8904_GPIO2_PD_MASK 0x0010 /* GPIO2_PD */
+#define WM8904_GPIO2_PD_SHIFT 4 /* GPIO2_PD */
+#define WM8904_GPIO2_PD_WIDTH 1 /* GPIO2_PD */
+#define WM8904_GPIO2_SEL_MASK 0x000F /* GPIO2_SEL - [3:0] */
+#define WM8904_GPIO2_SEL_SHIFT 0 /* GPIO2_SEL - [3:0] */
+#define WM8904_GPIO2_SEL_WIDTH 4 /* GPIO2_SEL - [3:0] */
+
+/*
+ * R123 (0x7B) - GPIO Control 3
+ */
+#define WM8904_GPIO3_PU 0x0020 /* GPIO3_PU */
+#define WM8904_GPIO3_PU_MASK 0x0020 /* GPIO3_PU */
+#define WM8904_GPIO3_PU_SHIFT 5 /* GPIO3_PU */
+#define WM8904_GPIO3_PU_WIDTH 1 /* GPIO3_PU */
+#define WM8904_GPIO3_PD 0x0010 /* GPIO3_PD */
+#define WM8904_GPIO3_PD_MASK 0x0010 /* GPIO3_PD */
+#define WM8904_GPIO3_PD_SHIFT 4 /* GPIO3_PD */
+#define WM8904_GPIO3_PD_WIDTH 1 /* GPIO3_PD */
+#define WM8904_GPIO3_SEL_MASK 0x000F /* GPIO3_SEL - [3:0] */
+#define WM8904_GPIO3_SEL_SHIFT 0 /* GPIO3_SEL - [3:0] */
+#define WM8904_GPIO3_SEL_WIDTH 4 /* GPIO3_SEL - [3:0] */
+
+/*
+ * R124 (0x7C) - GPIO Control 4
+ */
+#define WM8904_GPI7_ENA 0x0200 /* GPI7_ENA */
+#define WM8904_GPI7_ENA_MASK 0x0200 /* GPI7_ENA */
+#define WM8904_GPI7_ENA_SHIFT 9 /* GPI7_ENA */
+#define WM8904_GPI7_ENA_WIDTH 1 /* GPI7_ENA */
+#define WM8904_GPI8_ENA 0x0100 /* GPI8_ENA */
+#define WM8904_GPI8_ENA_MASK 0x0100 /* GPI8_ENA */
+#define WM8904_GPI8_ENA_SHIFT 8 /* GPI8_ENA */
+#define WM8904_GPI8_ENA_WIDTH 1 /* GPI8_ENA */
+#define WM8904_GPIO_BCLK_MODE_ENA 0x0080 /* GPIO_BCLK_MODE_ENA */
+#define WM8904_GPIO_BCLK_MODE_ENA_MASK 0x0080 /* GPIO_BCLK_MODE_ENA */
+#define WM8904_GPIO_BCLK_MODE_ENA_SHIFT 7 /* GPIO_BCLK_MODE_ENA */
+#define WM8904_GPIO_BCLK_MODE_ENA_WIDTH 1 /* GPIO_BCLK_MODE_ENA */
+#define WM8904_GPIO_BCLK_SEL_MASK 0x000F /* GPIO_BCLK_SEL - [3:0] */
+#define WM8904_GPIO_BCLK_SEL_SHIFT 0 /* GPIO_BCLK_SEL - [3:0] */
+#define WM8904_GPIO_BCLK_SEL_WIDTH 4 /* GPIO_BCLK_SEL - [3:0] */
+
+/*
+ * R126 (0x7E) - Digital Pulls
+ */
+#define WM8904_MCLK_PU 0x0080 /* MCLK_PU */
+#define WM8904_MCLK_PU_MASK 0x0080 /* MCLK_PU */
+#define WM8904_MCLK_PU_SHIFT 7 /* MCLK_PU */
+#define WM8904_MCLK_PU_WIDTH 1 /* MCLK_PU */
+#define WM8904_MCLK_PD 0x0040 /* MCLK_PD */
+#define WM8904_MCLK_PD_MASK 0x0040 /* MCLK_PD */
+#define WM8904_MCLK_PD_SHIFT 6 /* MCLK_PD */
+#define WM8904_MCLK_PD_WIDTH 1 /* MCLK_PD */
+#define WM8904_DACDAT_PU 0x0020 /* DACDAT_PU */
+#define WM8904_DACDAT_PU_MASK 0x0020 /* DACDAT_PU */
+#define WM8904_DACDAT_PU_SHIFT 5 /* DACDAT_PU */
+#define WM8904_DACDAT_PU_WIDTH 1 /* DACDAT_PU */
+#define WM8904_DACDAT_PD 0x0010 /* DACDAT_PD */
+#define WM8904_DACDAT_PD_MASK 0x0010 /* DACDAT_PD */
+#define WM8904_DACDAT_PD_SHIFT 4 /* DACDAT_PD */
+#define WM8904_DACDAT_PD_WIDTH 1 /* DACDAT_PD */
+#define WM8904_LRCLK_PU 0x0008 /* LRCLK_PU */
+#define WM8904_LRCLK_PU_MASK 0x0008 /* LRCLK_PU */
+#define WM8904_LRCLK_PU_SHIFT 3 /* LRCLK_PU */
+#define WM8904_LRCLK_PU_WIDTH 1 /* LRCLK_PU */
+#define WM8904_LRCLK_PD 0x0004 /* LRCLK_PD */
+#define WM8904_LRCLK_PD_MASK 0x0004 /* LRCLK_PD */
+#define WM8904_LRCLK_PD_SHIFT 2 /* LRCLK_PD */
+#define WM8904_LRCLK_PD_WIDTH 1 /* LRCLK_PD */
+#define WM8904_BCLK_PU 0x0002 /* BCLK_PU */
+#define WM8904_BCLK_PU_MASK 0x0002 /* BCLK_PU */
+#define WM8904_BCLK_PU_SHIFT 1 /* BCLK_PU */
+#define WM8904_BCLK_PU_WIDTH 1 /* BCLK_PU */
+#define WM8904_BCLK_PD 0x0001 /* BCLK_PD */
+#define WM8904_BCLK_PD_MASK 0x0001 /* BCLK_PD */
+#define WM8904_BCLK_PD_SHIFT 0 /* BCLK_PD */
+#define WM8904_BCLK_PD_WIDTH 1 /* BCLK_PD */
+
+/*
+ * R127 (0x7F) - Interrupt Status
+ */
+#define WM8904_IRQ 0x0400 /* IRQ */
+#define WM8904_IRQ_MASK 0x0400 /* IRQ */
+#define WM8904_IRQ_SHIFT 10 /* IRQ */
+#define WM8904_IRQ_WIDTH 1 /* IRQ */
+#define WM8904_GPIO_BCLK_EINT 0x0200 /* GPIO_BCLK_EINT */
+#define WM8904_GPIO_BCLK_EINT_MASK 0x0200 /* GPIO_BCLK_EINT */
+#define WM8904_GPIO_BCLK_EINT_SHIFT 9 /* GPIO_BCLK_EINT */
+#define WM8904_GPIO_BCLK_EINT_WIDTH 1 /* GPIO_BCLK_EINT */
+#define WM8904_WSEQ_EINT 0x0100 /* WSEQ_EINT */
+#define WM8904_WSEQ_EINT_MASK 0x0100 /* WSEQ_EINT */
+#define WM8904_WSEQ_EINT_SHIFT 8 /* WSEQ_EINT */
+#define WM8904_WSEQ_EINT_WIDTH 1 /* WSEQ_EINT */
+#define WM8904_GPIO3_EINT 0x0080 /* GPIO3_EINT */
+#define WM8904_GPIO3_EINT_MASK 0x0080 /* GPIO3_EINT */
+#define WM8904_GPIO3_EINT_SHIFT 7 /* GPIO3_EINT */
+#define WM8904_GPIO3_EINT_WIDTH 1 /* GPIO3_EINT */
+#define WM8904_GPIO2_EINT 0x0040 /* GPIO2_EINT */
+#define WM8904_GPIO2_EINT_MASK 0x0040 /* GPIO2_EINT */
+#define WM8904_GPIO2_EINT_SHIFT 6 /* GPIO2_EINT */
+#define WM8904_GPIO2_EINT_WIDTH 1 /* GPIO2_EINT */
+#define WM8904_GPIO1_EINT 0x0020 /* GPIO1_EINT */
+#define WM8904_GPIO1_EINT_MASK 0x0020 /* GPIO1_EINT */
+#define WM8904_GPIO1_EINT_SHIFT 5 /* GPIO1_EINT */
+#define WM8904_GPIO1_EINT_WIDTH 1 /* GPIO1_EINT */
+#define WM8904_GPI8_EINT 0x0010 /* GPI8_EINT */
+#define WM8904_GPI8_EINT_MASK 0x0010 /* GPI8_EINT */
+#define WM8904_GPI8_EINT_SHIFT 4 /* GPI8_EINT */
+#define WM8904_GPI8_EINT_WIDTH 1 /* GPI8_EINT */
+#define WM8904_GPI7_EINT 0x0008 /* GPI7_EINT */
+#define WM8904_GPI7_EINT_MASK 0x0008 /* GPI7_EINT */
+#define WM8904_GPI7_EINT_SHIFT 3 /* GPI7_EINT */
+#define WM8904_GPI7_EINT_WIDTH 1 /* GPI7_EINT */
+#define WM8904_FLL_LOCK_EINT 0x0004 /* FLL_LOCK_EINT */
+#define WM8904_FLL_LOCK_EINT_MASK 0x0004 /* FLL_LOCK_EINT */
+#define WM8904_FLL_LOCK_EINT_SHIFT 2 /* FLL_LOCK_EINT */
+#define WM8904_FLL_LOCK_EINT_WIDTH 1 /* FLL_LOCK_EINT */
+#define WM8904_MIC_SHRT_EINT 0x0002 /* MIC_SHRT_EINT */
+#define WM8904_MIC_SHRT_EINT_MASK 0x0002 /* MIC_SHRT_EINT */
+#define WM8904_MIC_SHRT_EINT_SHIFT 1 /* MIC_SHRT_EINT */
+#define WM8904_MIC_SHRT_EINT_WIDTH 1 /* MIC_SHRT_EINT */
+#define WM8904_MIC_DET_EINT 0x0001 /* MIC_DET_EINT */
+#define WM8904_MIC_DET_EINT_MASK 0x0001 /* MIC_DET_EINT */
+#define WM8904_MIC_DET_EINT_SHIFT 0 /* MIC_DET_EINT */
+#define WM8904_MIC_DET_EINT_WIDTH 1 /* MIC_DET_EINT */
+
+/*
+ * R128 (0x80) - Interrupt Status Mask
+ */
+#define WM8904_IM_GPIO_BCLK_EINT 0x0200 /* IM_GPIO_BCLK_EINT */
+#define WM8904_IM_GPIO_BCLK_EINT_MASK 0x0200 /* IM_GPIO_BCLK_EINT */
+#define WM8904_IM_GPIO_BCLK_EINT_SHIFT 9 /* IM_GPIO_BCLK_EINT */
+#define WM8904_IM_GPIO_BCLK_EINT_WIDTH 1 /* IM_GPIO_BCLK_EINT */
+#define WM8904_IM_WSEQ_EINT 0x0100 /* IM_WSEQ_EINT */
+#define WM8904_IM_WSEQ_EINT_MASK 0x0100 /* IM_WSEQ_EINT */
+#define WM8904_IM_WSEQ_EINT_SHIFT 8 /* IM_WSEQ_EINT */
+#define WM8904_IM_WSEQ_EINT_WIDTH 1 /* IM_WSEQ_EINT */
+#define WM8904_IM_GPIO3_EINT 0x0080 /* IM_GPIO3_EINT */
+#define WM8904_IM_GPIO3_EINT_MASK 0x0080 /* IM_GPIO3_EINT */
+#define WM8904_IM_GPIO3_EINT_SHIFT 7 /* IM_GPIO3_EINT */
+#define WM8904_IM_GPIO3_EINT_WIDTH 1 /* IM_GPIO3_EINT */
+#define WM8904_IM_GPIO2_EINT 0x0040 /* IM_GPIO2_EINT */
+#define WM8904_IM_GPIO2_EINT_MASK 0x0040 /* IM_GPIO2_EINT */
+#define WM8904_IM_GPIO2_EINT_SHIFT 6 /* IM_GPIO2_EINT */
+#define WM8904_IM_GPIO2_EINT_WIDTH 1 /* IM_GPIO2_EINT */
+#define WM8904_IM_GPIO1_EINT 0x0020 /* IM_GPIO1_EINT */
+#define WM8904_IM_GPIO1_EINT_MASK 0x0020 /* IM_GPIO1_EINT */
+#define WM8904_IM_GPIO1_EINT_SHIFT 5 /* IM_GPIO1_EINT */
+#define WM8904_IM_GPIO1_EINT_WIDTH 1 /* IM_GPIO1_EINT */
+#define WM8904_IM_GPI8_EINT 0x0010 /* IM_GPI8_EINT */
+#define WM8904_IM_GPI8_EINT_MASK 0x0010 /* IM_GPI8_EINT */
+#define WM8904_IM_GPI8_EINT_SHIFT 4 /* IM_GPI8_EINT */
+#define WM8904_IM_GPI8_EINT_WIDTH 1 /* IM_GPI8_EINT */
+#define WM8904_IM_GPI7_EINT 0x0008 /* IM_GPI7_EINT */
+#define WM8904_IM_GPI7_EINT_MASK 0x0008 /* IM_GPI7_EINT */
+#define WM8904_IM_GPI7_EINT_SHIFT 3 /* IM_GPI7_EINT */
+#define WM8904_IM_GPI7_EINT_WIDTH 1 /* IM_GPI7_EINT */
+#define WM8904_IM_FLL_LOCK_EINT 0x0004 /* IM_FLL_LOCK_EINT */
+#define WM8904_IM_FLL_LOCK_EINT_MASK 0x0004 /* IM_FLL_LOCK_EINT */
+#define WM8904_IM_FLL_LOCK_EINT_SHIFT 2 /* IM_FLL_LOCK_EINT */
+#define WM8904_IM_FLL_LOCK_EINT_WIDTH 1 /* IM_FLL_LOCK_EINT */
+#define WM8904_IM_MIC_SHRT_EINT 0x0002 /* IM_MIC_SHRT_EINT */
+#define WM8904_IM_MIC_SHRT_EINT_MASK 0x0002 /* IM_MIC_SHRT_EINT */
+#define WM8904_IM_MIC_SHRT_EINT_SHIFT 1 /* IM_MIC_SHRT_EINT */
+#define WM8904_IM_MIC_SHRT_EINT_WIDTH 1 /* IM_MIC_SHRT_EINT */
+#define WM8904_IM_MIC_DET_EINT 0x0001 /* IM_MIC_DET_EINT */
+#define WM8904_IM_MIC_DET_EINT_MASK 0x0001 /* IM_MIC_DET_EINT */
+#define WM8904_IM_MIC_DET_EINT_SHIFT 0 /* IM_MIC_DET_EINT */
+#define WM8904_IM_MIC_DET_EINT_WIDTH 1 /* IM_MIC_DET_EINT */
+
+/*
+ * R129 (0x81) - Interrupt Polarity
+ */
+#define WM8904_GPIO_BCLK_EINT_POL 0x0200 /* GPIO_BCLK_EINT_POL */
+#define WM8904_GPIO_BCLK_EINT_POL_MASK 0x0200 /* GPIO_BCLK_EINT_POL */
+#define WM8904_GPIO_BCLK_EINT_POL_SHIFT 9 /* GPIO_BCLK_EINT_POL */
+#define WM8904_GPIO_BCLK_EINT_POL_WIDTH 1 /* GPIO_BCLK_EINT_POL */
+#define WM8904_WSEQ_EINT_POL 0x0100 /* WSEQ_EINT_POL */
+#define WM8904_WSEQ_EINT_POL_MASK 0x0100 /* WSEQ_EINT_POL */
+#define WM8904_WSEQ_EINT_POL_SHIFT 8 /* WSEQ_EINT_POL */
+#define WM8904_WSEQ_EINT_POL_WIDTH 1 /* WSEQ_EINT_POL */
+#define WM8904_GPIO3_EINT_POL 0x0080 /* GPIO3_EINT_POL */
+#define WM8904_GPIO3_EINT_POL_MASK 0x0080 /* GPIO3_EINT_POL */
+#define WM8904_GPIO3_EINT_POL_SHIFT 7 /* GPIO3_EINT_POL */
+#define WM8904_GPIO3_EINT_POL_WIDTH 1 /* GPIO3_EINT_POL */
+#define WM8904_GPIO2_EINT_POL 0x0040 /* GPIO2_EINT_POL */
+#define WM8904_GPIO2_EINT_POL_MASK 0x0040 /* GPIO2_EINT_POL */
+#define WM8904_GPIO2_EINT_POL_SHIFT 6 /* GPIO2_EINT_POL */
+#define WM8904_GPIO2_EINT_POL_WIDTH 1 /* GPIO2_EINT_POL */
+#define WM8904_GPIO1_EINT_POL 0x0020 /* GPIO1_EINT_POL */
+#define WM8904_GPIO1_EINT_POL_MASK 0x0020 /* GPIO1_EINT_POL */
+#define WM8904_GPIO1_EINT_POL_SHIFT 5 /* GPIO1_EINT_POL */
+#define WM8904_GPIO1_EINT_POL_WIDTH 1 /* GPIO1_EINT_POL */
+#define WM8904_GPI8_EINT_POL 0x0010 /* GPI8_EINT_POL */
+#define WM8904_GPI8_EINT_POL_MASK 0x0010 /* GPI8_EINT_POL */
+#define WM8904_GPI8_EINT_POL_SHIFT 4 /* GPI8_EINT_POL */
+#define WM8904_GPI8_EINT_POL_WIDTH 1 /* GPI8_EINT_POL */
+#define WM8904_GPI7_EINT_POL 0x0008 /* GPI7_EINT_POL */
+#define WM8904_GPI7_EINT_POL_MASK 0x0008 /* GPI7_EINT_POL */
+#define WM8904_GPI7_EINT_POL_SHIFT 3 /* GPI7_EINT_POL */
+#define WM8904_GPI7_EINT_POL_WIDTH 1 /* GPI7_EINT_POL */
+#define WM8904_FLL_LOCK_EINT_POL 0x0004 /* FLL_LOCK_EINT_POL */
+#define WM8904_FLL_LOCK_EINT_POL_MASK 0x0004 /* FLL_LOCK_EINT_POL */
+#define WM8904_FLL_LOCK_EINT_POL_SHIFT 2 /* FLL_LOCK_EINT_POL */
+#define WM8904_FLL_LOCK_EINT_POL_WIDTH 1 /* FLL_LOCK_EINT_POL */
+#define WM8904_MIC_SHRT_EINT_POL 0x0002 /* MIC_SHRT_EINT_POL */
+#define WM8904_MIC_SHRT_EINT_POL_MASK 0x0002 /* MIC_SHRT_EINT_POL */
+#define WM8904_MIC_SHRT_EINT_POL_SHIFT 1 /* MIC_SHRT_EINT_POL */
+#define WM8904_MIC_SHRT_EINT_POL_WIDTH 1 /* MIC_SHRT_EINT_POL */
+#define WM8904_MIC_DET_EINT_POL 0x0001 /* MIC_DET_EINT_POL */
+#define WM8904_MIC_DET_EINT_POL_MASK 0x0001 /* MIC_DET_EINT_POL */
+#define WM8904_MIC_DET_EINT_POL_SHIFT 0 /* MIC_DET_EINT_POL */
+#define WM8904_MIC_DET_EINT_POL_WIDTH 1 /* MIC_DET_EINT_POL */
+
+/*
+ * R130 (0x82) - Interrupt Debounce
+ */
+#define WM8904_GPIO_BCLK_EINT_DB 0x0200 /* GPIO_BCLK_EINT_DB */
+#define WM8904_GPIO_BCLK_EINT_DB_MASK 0x0200 /* GPIO_BCLK_EINT_DB */
+#define WM8904_GPIO_BCLK_EINT_DB_SHIFT 9 /* GPIO_BCLK_EINT_DB */
+#define WM8904_GPIO_BCLK_EINT_DB_WIDTH 1 /* GPIO_BCLK_EINT_DB */
+#define WM8904_WSEQ_EINT_DB 0x0100 /* WSEQ_EINT_DB */
+#define WM8904_WSEQ_EINT_DB_MASK 0x0100 /* WSEQ_EINT_DB */
+#define WM8904_WSEQ_EINT_DB_SHIFT 8 /* WSEQ_EINT_DB */
+#define WM8904_WSEQ_EINT_DB_WIDTH 1 /* WSEQ_EINT_DB */
+#define WM8904_GPIO3_EINT_DB 0x0080 /* GPIO3_EINT_DB */
+#define WM8904_GPIO3_EINT_DB_MASK 0x0080 /* GPIO3_EINT_DB */
+#define WM8904_GPIO3_EINT_DB_SHIFT 7 /* GPIO3_EINT_DB */
+#define WM8904_GPIO3_EINT_DB_WIDTH 1 /* GPIO3_EINT_DB */
+#define WM8904_GPIO2_EINT_DB 0x0040 /* GPIO2_EINT_DB */
+#define WM8904_GPIO2_EINT_DB_MASK 0x0040 /* GPIO2_EINT_DB */
+#define WM8904_GPIO2_EINT_DB_SHIFT 6 /* GPIO2_EINT_DB */
+#define WM8904_GPIO2_EINT_DB_WIDTH 1 /* GPIO2_EINT_DB */
+#define WM8904_GPIO1_EINT_DB 0x0020 /* GPIO1_EINT_DB */
+#define WM8904_GPIO1_EINT_DB_MASK 0x0020 /* GPIO1_EINT_DB */
+#define WM8904_GPIO1_EINT_DB_SHIFT 5 /* GPIO1_EINT_DB */
+#define WM8904_GPIO1_EINT_DB_WIDTH 1 /* GPIO1_EINT_DB */
+#define WM8904_GPI8_EINT_DB 0x0010 /* GPI8_EINT_DB */
+#define WM8904_GPI8_EINT_DB_MASK 0x0010 /* GPI8_EINT_DB */
+#define WM8904_GPI8_EINT_DB_SHIFT 4 /* GPI8_EINT_DB */
+#define WM8904_GPI8_EINT_DB_WIDTH 1 /* GPI8_EINT_DB */
+#define WM8904_GPI7_EINT_DB 0x0008 /* GPI7_EINT_DB */
+#define WM8904_GPI7_EINT_DB_MASK 0x0008 /* GPI7_EINT_DB */
+#define WM8904_GPI7_EINT_DB_SHIFT 3 /* GPI7_EINT_DB */
+#define WM8904_GPI7_EINT_DB_WIDTH 1 /* GPI7_EINT_DB */
+#define WM8904_FLL_LOCK_EINT_DB 0x0004 /* FLL_LOCK_EINT_DB */
+#define WM8904_FLL_LOCK_EINT_DB_MASK 0x0004 /* FLL_LOCK_EINT_DB */
+#define WM8904_FLL_LOCK_EINT_DB_SHIFT 2 /* FLL_LOCK_EINT_DB */
+#define WM8904_FLL_LOCK_EINT_DB_WIDTH 1 /* FLL_LOCK_EINT_DB */
+#define WM8904_MIC_SHRT_EINT_DB 0x0002 /* MIC_SHRT_EINT_DB */
+#define WM8904_MIC_SHRT_EINT_DB_MASK 0x0002 /* MIC_SHRT_EINT_DB */
+#define WM8904_MIC_SHRT_EINT_DB_SHIFT 1 /* MIC_SHRT_EINT_DB */
+#define WM8904_MIC_SHRT_EINT_DB_WIDTH 1 /* MIC_SHRT_EINT_DB */
+#define WM8904_MIC_DET_EINT_DB 0x0001 /* MIC_DET_EINT_DB */
+#define WM8904_MIC_DET_EINT_DB_MASK 0x0001 /* MIC_DET_EINT_DB */
+#define WM8904_MIC_DET_EINT_DB_SHIFT 0 /* MIC_DET_EINT_DB */
+#define WM8904_MIC_DET_EINT_DB_WIDTH 1 /* MIC_DET_EINT_DB */
+
+/*
+ * R134 (0x86) - EQ1
+ */
+#define WM8904_EQ_ENA 0x0001 /* EQ_ENA */
+#define WM8904_EQ_ENA_MASK 0x0001 /* EQ_ENA */
+#define WM8904_EQ_ENA_SHIFT 0 /* EQ_ENA */
+#define WM8904_EQ_ENA_WIDTH 1 /* EQ_ENA */
+
+/*
+ * R135 (0x87) - EQ2
+ */
+#define WM8904_EQ_B1_GAIN_MASK 0x001F /* EQ_B1_GAIN - [4:0] */
+#define WM8904_EQ_B1_GAIN_SHIFT 0 /* EQ_B1_GAIN - [4:0] */
+#define WM8904_EQ_B1_GAIN_WIDTH 5 /* EQ_B1_GAIN - [4:0] */
+
+/*
+ * R136 (0x88) - EQ3
+ */
+#define WM8904_EQ_B2_GAIN_MASK 0x001F /* EQ_B2_GAIN - [4:0] */
+#define WM8904_EQ_B2_GAIN_SHIFT 0 /* EQ_B2_GAIN - [4:0] */
+#define WM8904_EQ_B2_GAIN_WIDTH 5 /* EQ_B2_GAIN - [4:0] */
+
+/*
+ * R137 (0x89) - EQ4
+ */
+#define WM8904_EQ_B3_GAIN_MASK 0x001F /* EQ_B3_GAIN - [4:0] */
+#define WM8904_EQ_B3_GAIN_SHIFT 0 /* EQ_B3_GAIN - [4:0] */
+#define WM8904_EQ_B3_GAIN_WIDTH 5 /* EQ_B3_GAIN - [4:0] */
+
+/*
+ * R138 (0x8A) - EQ5
+ */
+#define WM8904_EQ_B4_GAIN_MASK 0x001F /* EQ_B4_GAIN - [4:0] */
+#define WM8904_EQ_B4_GAIN_SHIFT 0 /* EQ_B4_GAIN - [4:0] */
+#define WM8904_EQ_B4_GAIN_WIDTH 5 /* EQ_B4_GAIN - [4:0] */
+
+/*
+ * R139 (0x8B) - EQ6
+ */
+#define WM8904_EQ_B5_GAIN_MASK 0x001F /* EQ_B5_GAIN - [4:0] */
+#define WM8904_EQ_B5_GAIN_SHIFT 0 /* EQ_B5_GAIN - [4:0] */
+#define WM8904_EQ_B5_GAIN_WIDTH 5 /* EQ_B5_GAIN - [4:0] */
+
+/*
+ * R140 (0x8C) - EQ7
+ */
+#define WM8904_EQ_B1_A_MASK 0xFFFF /* EQ_B1_A - [15:0] */
+#define WM8904_EQ_B1_A_SHIFT 0 /* EQ_B1_A - [15:0] */
+#define WM8904_EQ_B1_A_WIDTH 16 /* EQ_B1_A - [15:0] */
+
+/*
+ * R141 (0x8D) - EQ8
+ */
+#define WM8904_EQ_B1_B_MASK 0xFFFF /* EQ_B1_B - [15:0] */
+#define WM8904_EQ_B1_B_SHIFT 0 /* EQ_B1_B - [15:0] */
+#define WM8904_EQ_B1_B_WIDTH 16 /* EQ_B1_B - [15:0] */
+
+/*
+ * R142 (0x8E) - EQ9
+ */
+#define WM8904_EQ_B1_PG_MASK 0xFFFF /* EQ_B1_PG - [15:0] */
+#define WM8904_EQ_B1_PG_SHIFT 0 /* EQ_B1_PG - [15:0] */
+#define WM8904_EQ_B1_PG_WIDTH 16 /* EQ_B1_PG - [15:0] */
+
+/*
+ * R143 (0x8F) - EQ10
+ */
+#define WM8904_EQ_B2_A_MASK 0xFFFF /* EQ_B2_A - [15:0] */
+#define WM8904_EQ_B2_A_SHIFT 0 /* EQ_B2_A - [15:0] */
+#define WM8904_EQ_B2_A_WIDTH 16 /* EQ_B2_A - [15:0] */
+
+/*
+ * R144 (0x90) - EQ11
+ */
+#define WM8904_EQ_B2_B_MASK 0xFFFF /* EQ_B2_B - [15:0] */
+#define WM8904_EQ_B2_B_SHIFT 0 /* EQ_B2_B - [15:0] */
+#define WM8904_EQ_B2_B_WIDTH 16 /* EQ_B2_B - [15:0] */
+
+/*
+ * R145 (0x91) - EQ12
+ */
+#define WM8904_EQ_B2_C_MASK 0xFFFF /* EQ_B2_C - [15:0] */
+#define WM8904_EQ_B2_C_SHIFT 0 /* EQ_B2_C - [15:0] */
+#define WM8904_EQ_B2_C_WIDTH 16 /* EQ_B2_C - [15:0] */
+
+/*
+ * R146 (0x92) - EQ13
+ */
+#define WM8904_EQ_B2_PG_MASK 0xFFFF /* EQ_B2_PG - [15:0] */
+#define WM8904_EQ_B2_PG_SHIFT 0 /* EQ_B2_PG - [15:0] */
+#define WM8904_EQ_B2_PG_WIDTH 16 /* EQ_B2_PG - [15:0] */
+
+/*
+ * R147 (0x93) - EQ14
+ */
+#define WM8904_EQ_B3_A_MASK 0xFFFF /* EQ_B3_A - [15:0] */
+#define WM8904_EQ_B3_A_SHIFT 0 /* EQ_B3_A - [15:0] */
+#define WM8904_EQ_B3_A_WIDTH 16 /* EQ_B3_A - [15:0] */
+
+/*
+ * R148 (0x94) - EQ15
+ */
+#define WM8904_EQ_B3_B_MASK 0xFFFF /* EQ_B3_B - [15:0] */
+#define WM8904_EQ_B3_B_SHIFT 0 /* EQ_B3_B - [15:0] */
+#define WM8904_EQ_B3_B_WIDTH 16 /* EQ_B3_B - [15:0] */
+
+/*
+ * R149 (0x95) - EQ16
+ */
+#define WM8904_EQ_B3_C_MASK 0xFFFF /* EQ_B3_C - [15:0] */
+#define WM8904_EQ_B3_C_SHIFT 0 /* EQ_B3_C - [15:0] */
+#define WM8904_EQ_B3_C_WIDTH 16 /* EQ_B3_C - [15:0] */
+
+/*
+ * R150 (0x96) - EQ17
+ */
+#define WM8904_EQ_B3_PG_MASK 0xFFFF /* EQ_B3_PG - [15:0] */
+#define WM8904_EQ_B3_PG_SHIFT 0 /* EQ_B3_PG - [15:0] */
+#define WM8904_EQ_B3_PG_WIDTH 16 /* EQ_B3_PG - [15:0] */
+
+/*
+ * R151 (0x97) - EQ18
+ */
+#define WM8904_EQ_B4_A_MASK 0xFFFF /* EQ_B4_A - [15:0] */
+#define WM8904_EQ_B4_A_SHIFT 0 /* EQ_B4_A - [15:0] */
+#define WM8904_EQ_B4_A_WIDTH 16 /* EQ_B4_A - [15:0] */
+
+/*
+ * R152 (0x98) - EQ19
+ */
+#define WM8904_EQ_B4_B_MASK 0xFFFF /* EQ_B4_B - [15:0] */
+#define WM8904_EQ_B4_B_SHIFT 0 /* EQ_B4_B - [15:0] */
+#define WM8904_EQ_B4_B_WIDTH 16 /* EQ_B4_B - [15:0] */
+
+/*
+ * R153 (0x99) - EQ20
+ */
+#define WM8904_EQ_B4_C_MASK 0xFFFF /* EQ_B4_C - [15:0] */
+#define WM8904_EQ_B4_C_SHIFT 0 /* EQ_B4_C - [15:0] */
+#define WM8904_EQ_B4_C_WIDTH 16 /* EQ_B4_C - [15:0] */
+
+/*
+ * R154 (0x9A) - EQ21
+ */
+#define WM8904_EQ_B4_PG_MASK 0xFFFF /* EQ_B4_PG - [15:0] */
+#define WM8904_EQ_B4_PG_SHIFT 0 /* EQ_B4_PG - [15:0] */
+#define WM8904_EQ_B4_PG_WIDTH 16 /* EQ_B4_PG - [15:0] */
+
+/*
+ * R155 (0x9B) - EQ22
+ */
+#define WM8904_EQ_B5_A_MASK 0xFFFF /* EQ_B5_A - [15:0] */
+#define WM8904_EQ_B5_A_SHIFT 0 /* EQ_B5_A - [15:0] */
+#define WM8904_EQ_B5_A_WIDTH 16 /* EQ_B5_A - [15:0] */
+
+/*
+ * R156 (0x9C) - EQ23
+ */
+#define WM8904_EQ_B5_B_MASK 0xFFFF /* EQ_B5_B - [15:0] */
+#define WM8904_EQ_B5_B_SHIFT 0 /* EQ_B5_B - [15:0] */
+#define WM8904_EQ_B5_B_WIDTH 16 /* EQ_B5_B - [15:0] */
+
+/*
+ * R157 (0x9D) - EQ24
+ */
+#define WM8904_EQ_B5_PG_MASK 0xFFFF /* EQ_B5_PG - [15:0] */
+#define WM8904_EQ_B5_PG_SHIFT 0 /* EQ_B5_PG - [15:0] */
+#define WM8904_EQ_B5_PG_WIDTH 16 /* EQ_B5_PG - [15:0] */
+
+/*
+ * R161 (0xA1) - Control Interface Test 1
+ */
+#define WM8904_USER_KEY 0x0002 /* USER_KEY */
+#define WM8904_USER_KEY_MASK 0x0002 /* USER_KEY */
+#define WM8904_USER_KEY_SHIFT 1 /* USER_KEY */
+#define WM8904_USER_KEY_WIDTH 1 /* USER_KEY */
+
+/*
+ * R204 (0xCC) - Analogue Output Bias 0
+ */
+#define WM8904_PGA_BIAS_MASK 0x0070 /* PGA_BIAS - [6:4] */
+#define WM8904_PGA_BIAS_SHIFT 4 /* PGA_BIAS - [6:4] */
+#define WM8904_PGA_BIAS_WIDTH 3 /* PGA_BIAS - [6:4] */
+
+/*
+ * R247 (0xF7) - FLL NCO Test 0
+ */
+#define WM8904_FLL_FRC_NCO 0x0001 /* FLL_FRC_NCO */
+#define WM8904_FLL_FRC_NCO_MASK 0x0001 /* FLL_FRC_NCO */
+#define WM8904_FLL_FRC_NCO_SHIFT 0 /* FLL_FRC_NCO */
+#define WM8904_FLL_FRC_NCO_WIDTH 1 /* FLL_FRC_NCO */
+
+/*
+ * R248 (0xF8) - FLL NCO Test 1
+ */
+#define WM8904_FLL_FRC_NCO_VAL_MASK 0x003F /* FLL_FRC_NCO_VAL - [5:0] */
+#define WM8904_FLL_FRC_NCO_VAL_SHIFT 0 /* FLL_FRC_NCO_VAL - [5:0] */
+#define WM8904_FLL_FRC_NCO_VAL_WIDTH 6 /* FLL_FRC_NCO_VAL - [5:0] */
+
+#endif
diff --git a/sound/soc/codecs/wm8955.c b/sound/soc/codecs/wm8955.c
new file mode 100644
index 000000000000..615dab2b62ef
--- /dev/null
+++ b/sound/soc/codecs/wm8955.c
@@ -0,0 +1,1151 @@
+/*
+ * wm8955.c -- WM8955 ALSA SoC Audio driver
+ *
+ * Copyright 2009 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <sound/wm8955.h>
+
+#include "wm8955.h"
+
+static struct snd_soc_codec *wm8955_codec;
+struct snd_soc_codec_device soc_codec_dev_wm8955;
+
+#define WM8955_NUM_SUPPLIES 4
+static const char *wm8955_supply_names[WM8955_NUM_SUPPLIES] = {
+ "DCVDD",
+ "DBVDD",
+ "HPVDD",
+ "AVDD",
+};
+
+/* codec private data */
+struct wm8955_priv {
+ struct snd_soc_codec codec;
+ u16 reg_cache[WM8955_MAX_REGISTER + 1];
+
+ unsigned int mclk_rate;
+
+ int deemph;
+ int fs;
+
+ struct regulator_bulk_data supplies[WM8955_NUM_SUPPLIES];
+
+ struct wm8955_pdata *pdata;
+};
+
+static const u16 wm8955_reg[WM8955_MAX_REGISTER + 1] = {
+ 0x0000, /* R0 */
+ 0x0000, /* R1 */
+ 0x0079, /* R2 - LOUT1 volume */
+ 0x0079, /* R3 - ROUT1 volume */
+ 0x0000, /* R4 */
+ 0x0008, /* R5 - DAC Control */
+ 0x0000, /* R6 */
+ 0x000A, /* R7 - Audio Interface */
+ 0x0000, /* R8 - Sample Rate */
+ 0x0000, /* R9 */
+ 0x00FF, /* R10 - Left DAC volume */
+ 0x00FF, /* R11 - Right DAC volume */
+ 0x000F, /* R12 - Bass control */
+ 0x000F, /* R13 - Treble control */
+ 0x0000, /* R14 */
+ 0x0000, /* R15 - Reset */
+ 0x0000, /* R16 */
+ 0x0000, /* R17 */
+ 0x0000, /* R18 */
+ 0x0000, /* R19 */
+ 0x0000, /* R20 */
+ 0x0000, /* R21 */
+ 0x0000, /* R22 */
+ 0x00C1, /* R23 - Additional control (1) */
+ 0x0000, /* R24 - Additional control (2) */
+ 0x0000, /* R25 - Power Management (1) */
+ 0x0000, /* R26 - Power Management (2) */
+ 0x0000, /* R27 - Additional Control (3) */
+ 0x0000, /* R28 */
+ 0x0000, /* R29 */
+ 0x0000, /* R30 */
+ 0x0000, /* R31 */
+ 0x0000, /* R32 */
+ 0x0000, /* R33 */
+ 0x0050, /* R34 - Left out Mix (1) */
+ 0x0050, /* R35 - Left out Mix (2) */
+ 0x0050, /* R36 - Right out Mix (1) */
+ 0x0050, /* R37 - Right Out Mix (2) */
+ 0x0050, /* R38 - Mono out Mix (1) */
+ 0x0050, /* R39 - Mono out Mix (2) */
+ 0x0079, /* R40 - LOUT2 volume */
+ 0x0079, /* R41 - ROUT2 volume */
+ 0x0079, /* R42 - MONOOUT volume */
+ 0x0000, /* R43 - Clocking / PLL */
+ 0x0103, /* R44 - PLL Control 1 */
+ 0x0024, /* R45 - PLL Control 2 */
+ 0x01BA, /* R46 - PLL Control 3 */
+ 0x0000, /* R47 */
+ 0x0000, /* R48 */
+ 0x0000, /* R49 */
+ 0x0000, /* R50 */
+ 0x0000, /* R51 */
+ 0x0000, /* R52 */
+ 0x0000, /* R53 */
+ 0x0000, /* R54 */
+ 0x0000, /* R55 */
+ 0x0000, /* R56 */
+ 0x0000, /* R57 */
+ 0x0000, /* R58 */
+ 0x0000, /* R59 - PLL Control 4 */
+};
+
+static int wm8955_reset(struct snd_soc_codec *codec)
+{
+ return snd_soc_write(codec, WM8955_RESET, 0);
+}
+
+struct pll_factors {
+ int n;
+ int k;
+ int outdiv;
+};
+
+/* The size in bits of the FLL divide multiplied by 10
+ * to allow rounding later */
+#define FIXED_FLL_SIZE ((1 << 22) * 10)
+
+static int wm8995_pll_factors(struct device *dev,
+ int Fref, int Fout, struct pll_factors *pll)
+{
+ u64 Kpart;
+ unsigned int K, Ndiv, Nmod, target;
+
+ dev_dbg(dev, "Fref=%u Fout=%u\n", Fref, Fout);
+
+ /* The oscilator should run at should be 90-100MHz, and
+ * there's a divide by 4 plus an optional divide by 2 in the
+ * output path to generate the system clock. The clock table
+ * is sortd so we should always generate a suitable target. */
+ target = Fout * 4;
+ if (target < 90000000) {
+ pll->outdiv = 1;
+ target *= 2;
+ } else {
+ pll->outdiv = 0;
+ }
+
+ WARN_ON(target < 90000000 || target > 100000000);
+
+ dev_dbg(dev, "Fvco=%dHz\n", target);
+
+ /* Now, calculate N.K */
+ Ndiv = target / Fref;
+
+ pll->n = Ndiv;
+ Nmod = target % Fref;
+ dev_dbg(dev, "Nmod=%d\n", Nmod);
+
+ /* Calculate fractional part - scale up so we can round. */
+ Kpart = FIXED_FLL_SIZE * (long long)Nmod;
+
+ do_div(Kpart, Fref);
+
+ K = Kpart & 0xFFFFFFFF;
+
+ if ((K % 10) >= 5)
+ K += 5;
+
+ /* Move down to proper range now rounding is done */
+ pll->k = K / 10;
+
+ dev_dbg(dev, "N=%x K=%x OUTDIV=%x\n", pll->n, pll->k, pll->outdiv);
+
+ return 0;
+}
+
+/* Lookup table specifiying SRATE (table 25 in datasheet); some of the
+ * output frequencies have been rounded to the standard frequencies
+ * they are intended to match where the error is slight. */
+static struct {
+ int mclk;
+ int fs;
+ int usb;
+ int sr;
+} clock_cfgs[] = {
+ { 18432000, 8000, 0, 3, },
+ { 18432000, 12000, 0, 9, },
+ { 18432000, 16000, 0, 11, },
+ { 18432000, 24000, 0, 29, },
+ { 18432000, 32000, 0, 13, },
+ { 18432000, 48000, 0, 1, },
+ { 18432000, 96000, 0, 15, },
+
+ { 16934400, 8018, 0, 19, },
+ { 16934400, 11025, 0, 25, },
+ { 16934400, 22050, 0, 27, },
+ { 16934400, 44100, 0, 17, },
+ { 16934400, 88200, 0, 31, },
+
+ { 12000000, 8000, 1, 2, },
+ { 12000000, 11025, 1, 25, },
+ { 12000000, 12000, 1, 8, },
+ { 12000000, 16000, 1, 10, },
+ { 12000000, 22050, 1, 27, },
+ { 12000000, 24000, 1, 28, },
+ { 12000000, 32000, 1, 12, },
+ { 12000000, 44100, 1, 17, },
+ { 12000000, 48000, 1, 0, },
+ { 12000000, 88200, 1, 31, },
+ { 12000000, 96000, 1, 14, },
+
+ { 12288000, 8000, 0, 2, },
+ { 12288000, 12000, 0, 8, },
+ { 12288000, 16000, 0, 10, },
+ { 12288000, 24000, 0, 28, },
+ { 12288000, 32000, 0, 12, },
+ { 12288000, 48000, 0, 0, },
+ { 12288000, 96000, 0, 14, },
+
+ { 12289600, 8018, 0, 18, },
+ { 12289600, 11025, 0, 24, },
+ { 12289600, 22050, 0, 26, },
+ { 11289600, 44100, 0, 16, },
+ { 11289600, 88200, 0, 31, },
+};
+
+static int wm8955_configure_clocking(struct snd_soc_codec *codec)
+{
+ struct wm8955_priv *wm8955 = codec->private_data;
+ int i, ret, val;
+ int clocking = 0;
+ int srate = 0;
+ int sr = -1;
+ struct pll_factors pll;
+
+ /* If we're not running a sample rate currently just pick one */
+ if (wm8955->fs == 0)
+ wm8955->fs = 8000;
+
+ /* Can we generate an exact output? */
+ for (i = 0; i < ARRAY_SIZE(clock_cfgs); i++) {
+ if (wm8955->fs != clock_cfgs[i].fs)
+ continue;
+ sr = i;
+
+ if (wm8955->mclk_rate == clock_cfgs[i].mclk)
+ break;
+ }
+
+ /* We should never get here with an unsupported sample rate */
+ if (sr == -1) {
+ dev_err(codec->dev, "Sample rate %dHz unsupported\n",
+ wm8955->fs);
+ WARN_ON(sr == -1);
+ return -EINVAL;
+ }
+
+ if (i == ARRAY_SIZE(clock_cfgs)) {
+ /* If we can't generate the right clock from MCLK then
+ * we should configure the PLL to supply us with an
+ * appropriate clock.
+ */
+ clocking |= WM8955_MCLKSEL;
+
+ /* Use the last divider configuration we saw for the
+ * sample rate. */
+ ret = wm8995_pll_factors(codec->dev, wm8955->mclk_rate,
+ clock_cfgs[sr].mclk, &pll);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Unable to generate %dHz from %dHz MCLK\n",
+ wm8955->fs, wm8955->mclk_rate);
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, WM8955_PLL_CONTROL_1,
+ WM8955_N_MASK | WM8955_K_21_18_MASK,
+ (pll.n << WM8955_N_SHIFT) |
+ pll.k >> 18);
+ snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
+ WM8955_K_17_9_MASK,
+ (pll.k >> 9) & WM8955_K_17_9_MASK);
+ snd_soc_update_bits(codec, WM8955_PLL_CONTROL_2,
+ WM8955_K_8_0_MASK,
+ pll.k & WM8955_K_8_0_MASK);
+ if (pll.k)
+ snd_soc_update_bits(codec, WM8955_PLL_CONTROL_4,
+ WM8955_KEN, WM8955_KEN);
+ else
+ snd_soc_update_bits(codec, WM8955_PLL_CONTROL_4,
+ WM8955_KEN, 0);
+
+ if (pll.outdiv)
+ val = WM8955_PLL_RB | WM8955_PLLOUTDIV2;
+ else
+ val = WM8955_PLL_RB;
+
+ /* Now start the PLL running */
+ snd_soc_update_bits(codec, WM8955_CLOCKING_PLL,
+ WM8955_PLL_RB | WM8955_PLLOUTDIV2, val);
+ snd_soc_update_bits(codec, WM8955_CLOCKING_PLL,
+ WM8955_PLLEN, WM8955_PLLEN);
+ }
+
+ srate = clock_cfgs[sr].usb | (clock_cfgs[sr].sr << WM8955_SR_SHIFT);
+
+ snd_soc_update_bits(codec, WM8955_SAMPLE_RATE,
+ WM8955_USB | WM8955_SR_MASK, srate);
+ snd_soc_update_bits(codec, WM8955_CLOCKING_PLL,
+ WM8955_MCLKSEL, clocking);
+
+ return 0;
+}
+
+static int wm8955_sysclk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ int ret = 0;
+
+ /* Always disable the clocks - if we're doing reconfiguration this
+ * avoids misclocking.
+ */
+ snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
+ WM8955_DIGENB, 0);
+ snd_soc_update_bits(codec, WM8955_CLOCKING_PLL,
+ WM8955_PLL_RB | WM8955_PLLEN, 0);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMD:
+ break;
+ case SND_SOC_DAPM_PRE_PMU:
+ ret = wm8955_configure_clocking(codec);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int deemph_settings[] = { 0, 32000, 44100, 48000 };
+
+static int wm8955_set_deemph(struct snd_soc_codec *codec)
+{
+ struct wm8955_priv *wm8955 = codec->private_data;
+ int val, i, best;
+
+ /* If we're using deemphasis select the nearest available sample
+ * rate.
+ */
+ if (wm8955->deemph) {
+ best = 1;
+ for (i = 2; i < ARRAY_SIZE(deemph_settings); i++) {
+ if (abs(deemph_settings[i] - wm8955->fs) <
+ abs(deemph_settings[best] - wm8955->fs))
+ best = i;
+ }
+
+ val = best << WM8955_DEEMPH_SHIFT;
+ } else {
+ val = 0;
+ }
+
+ dev_dbg(codec->dev, "Set deemphasis %d\n", val);
+
+ return snd_soc_update_bits(codec, WM8955_DAC_CONTROL,
+ WM8955_DEEMPH_MASK, val);
+}
+
+static int wm8955_get_deemph(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct wm8955_priv *wm8955 = codec->private_data;
+
+ return wm8955->deemph;
+}
+
+static int wm8955_put_deemph(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct wm8955_priv *wm8955 = codec->private_data;
+ int deemph = ucontrol->value.enumerated.item[0];
+
+ if (deemph > 1)
+ return -EINVAL;
+
+ wm8955->deemph = deemph;
+
+ return wm8955_set_deemph(codec);
+}
+
+static const char *bass_mode_text[] = {
+ "Linear", "Adaptive",
+};
+
+static const struct soc_enum bass_mode =
+ SOC_ENUM_SINGLE(WM8955_BASS_CONTROL, 7, 2, bass_mode_text);
+
+static const char *bass_cutoff_text[] = {
+ "Low", "High"
+};
+
+static const struct soc_enum bass_cutoff =
+ SOC_ENUM_SINGLE(WM8955_BASS_CONTROL, 6, 2, bass_cutoff_text);
+
+static const char *treble_cutoff_text[] = {
+ "High", "Low"
+};
+
+static const struct soc_enum treble_cutoff =
+ SOC_ENUM_SINGLE(WM8955_TREBLE_CONTROL, 6, 2, treble_cutoff_text);
+
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(atten_tlv, -600, 600, 0);
+static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0);
+static const DECLARE_TLV_DB_SCALE(mono_tlv, -2100, 300, 0);
+static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1);
+static const DECLARE_TLV_DB_SCALE(treble_tlv, -1200, 150, 1);
+
+static const struct snd_kcontrol_new wm8955_snd_controls[] = {
+SOC_DOUBLE_R_TLV("Digital Playback Volume", WM8955_LEFT_DAC_VOLUME,
+ WM8955_RIGHT_DAC_VOLUME, 0, 255, 0, digital_tlv),
+SOC_SINGLE_TLV("Playback Attenuation Volume", WM8955_DAC_CONTROL, 7, 1, 1,
+ atten_tlv),
+SOC_SINGLE_BOOL_EXT("DAC Deemphasis Switch", 0,
+ wm8955_get_deemph, wm8955_put_deemph),
+
+SOC_ENUM("Bass Mode", bass_mode),
+SOC_ENUM("Bass Cutoff", bass_cutoff),
+SOC_SINGLE("Bass Volume", WM8955_BASS_CONTROL, 0, 15, 1),
+
+SOC_ENUM("Treble Cutoff", treble_cutoff),
+SOC_SINGLE_TLV("Treble Volume", WM8955_TREBLE_CONTROL, 0, 14, 1, treble_tlv),
+
+SOC_SINGLE_TLV("Left Bypass Volume", WM8955_LEFT_OUT_MIX_1, 4, 7, 1,
+ bypass_tlv),
+SOC_SINGLE_TLV("Left Mono Volume", WM8955_LEFT_OUT_MIX_2, 4, 7, 1,
+ bypass_tlv),
+
+SOC_SINGLE_TLV("Right Mono Volume", WM8955_RIGHT_OUT_MIX_1, 4, 7, 1,
+ bypass_tlv),
+SOC_SINGLE_TLV("Right Bypass Volume", WM8955_RIGHT_OUT_MIX_2, 4, 7, 1,
+ bypass_tlv),
+
+/* Not a stereo pair so they line up with the DAPM switches */
+SOC_SINGLE_TLV("Mono Left Bypass Volume", WM8955_MONO_OUT_MIX_1, 4, 7, 1,
+ mono_tlv),
+SOC_SINGLE_TLV("Mono Right Bypass Volume", WM8955_MONO_OUT_MIX_2, 4, 7, 1,
+ mono_tlv),
+
+SOC_DOUBLE_R_TLV("Headphone Volume", WM8955_LOUT1_VOLUME,
+ WM8955_ROUT1_VOLUME, 0, 127, 0, out_tlv),
+SOC_DOUBLE_R("Headphone ZC Switch", WM8955_LOUT1_VOLUME,
+ WM8955_ROUT1_VOLUME, 7, 1, 0),
+
+SOC_DOUBLE_R_TLV("Speaker Volume", WM8955_LOUT2_VOLUME,
+ WM8955_ROUT2_VOLUME, 0, 127, 0, out_tlv),
+SOC_DOUBLE_R("Speaker ZC Switch", WM8955_LOUT2_VOLUME,
+ WM8955_ROUT2_VOLUME, 7, 1, 0),
+
+SOC_SINGLE_TLV("Mono Volume", WM8955_MONOOUT_VOLUME, 0, 127, 0, out_tlv),
+SOC_SINGLE("Mono ZC Switch", WM8955_MONOOUT_VOLUME, 7, 1, 0),
+};
+
+static const struct snd_kcontrol_new lmixer[] = {
+SOC_DAPM_SINGLE("Playback Switch", WM8955_LEFT_OUT_MIX_1, 8, 1, 0),
+SOC_DAPM_SINGLE("Bypass Switch", WM8955_LEFT_OUT_MIX_1, 7, 1, 0),
+SOC_DAPM_SINGLE("Right Playback Switch", WM8955_LEFT_OUT_MIX_2, 8, 1, 0),
+SOC_DAPM_SINGLE("Mono Switch", WM8955_LEFT_OUT_MIX_2, 7, 1, 0),
+};
+
+static const struct snd_kcontrol_new rmixer[] = {
+SOC_DAPM_SINGLE("Left Playback Switch", WM8955_RIGHT_OUT_MIX_1, 8, 1, 0),
+SOC_DAPM_SINGLE("Mono Switch", WM8955_RIGHT_OUT_MIX_1, 7, 1, 0),
+SOC_DAPM_SINGLE("Playback Switch", WM8955_RIGHT_OUT_MIX_2, 8, 1, 0),
+SOC_DAPM_SINGLE("Bypass Switch", WM8955_RIGHT_OUT_MIX_2, 7, 1, 0),
+};
+
+static const struct snd_kcontrol_new mmixer[] = {
+SOC_DAPM_SINGLE("Left Playback Switch", WM8955_MONO_OUT_MIX_1, 8, 1, 0),
+SOC_DAPM_SINGLE("Left Bypass Switch", WM8955_MONO_OUT_MIX_1, 7, 1, 0),
+SOC_DAPM_SINGLE("Right Playback Switch", WM8955_MONO_OUT_MIX_2, 8, 1, 0),
+SOC_DAPM_SINGLE("Right Bypass Switch", WM8955_MONO_OUT_MIX_2, 7, 1, 0),
+};
+
+static const struct snd_soc_dapm_widget wm8955_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("MONOIN-"),
+SND_SOC_DAPM_INPUT("MONOIN+"),
+SND_SOC_DAPM_INPUT("LINEINR"),
+SND_SOC_DAPM_INPUT("LINEINL"),
+
+SND_SOC_DAPM_PGA("Mono Input", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+SND_SOC_DAPM_SUPPLY("SYSCLK", WM8955_POWER_MANAGEMENT_1, 0, 1, wm8955_sysclk,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_SUPPLY("TSDEN", WM8955_ADDITIONAL_CONTROL_1, 8, 0, NULL, 0),
+
+SND_SOC_DAPM_DAC("DACL", "Playback", WM8955_POWER_MANAGEMENT_2, 8, 0),
+SND_SOC_DAPM_DAC("DACR", "Playback", WM8955_POWER_MANAGEMENT_2, 7, 0),
+
+SND_SOC_DAPM_PGA("LOUT1 PGA", WM8955_POWER_MANAGEMENT_2, 6, 0, NULL, 0),
+SND_SOC_DAPM_PGA("ROUT1 PGA", WM8955_POWER_MANAGEMENT_2, 5, 0, NULL, 0),
+SND_SOC_DAPM_PGA("LOUT2 PGA", WM8955_POWER_MANAGEMENT_2, 4, 0, NULL, 0),
+SND_SOC_DAPM_PGA("ROUT2 PGA", WM8955_POWER_MANAGEMENT_2, 3, 0, NULL, 0),
+SND_SOC_DAPM_PGA("MOUT PGA", WM8955_POWER_MANAGEMENT_2, 2, 0, NULL, 0),
+SND_SOC_DAPM_PGA("OUT3 PGA", WM8955_POWER_MANAGEMENT_2, 1, 0, NULL, 0),
+
+/* The names are chosen to make the control names nice */
+SND_SOC_DAPM_MIXER("Left", SND_SOC_NOPM, 0, 0,
+ lmixer, ARRAY_SIZE(lmixer)),
+SND_SOC_DAPM_MIXER("Right", SND_SOC_NOPM, 0, 0,
+ rmixer, ARRAY_SIZE(rmixer)),
+SND_SOC_DAPM_MIXER("Mono", SND_SOC_NOPM, 0, 0,
+ mmixer, ARRAY_SIZE(mmixer)),
+
+SND_SOC_DAPM_OUTPUT("LOUT1"),
+SND_SOC_DAPM_OUTPUT("ROUT1"),
+SND_SOC_DAPM_OUTPUT("LOUT2"),
+SND_SOC_DAPM_OUTPUT("ROUT2"),
+SND_SOC_DAPM_OUTPUT("MONOOUT"),
+SND_SOC_DAPM_OUTPUT("OUT3"),
+};
+
+static const struct snd_soc_dapm_route wm8955_intercon[] = {
+ { "DACL", NULL, "SYSCLK" },
+ { "DACR", NULL, "SYSCLK" },
+
+ { "Mono Input", NULL, "MONOIN-" },
+ { "Mono Input", NULL, "MONOIN+" },
+
+ { "Left", "Playback Switch", "DACL" },
+ { "Left", "Right Playback Switch", "DACR" },
+ { "Left", "Bypass Switch", "LINEINL" },
+ { "Left", "Mono Switch", "Mono Input" },
+
+ { "Right", "Playback Switch", "DACR" },
+ { "Right", "Left Playback Switch", "DACL" },
+ { "Right", "Bypass Switch", "LINEINR" },
+ { "Right", "Mono Switch", "Mono Input" },
+
+ { "Mono", "Left Playback Switch", "DACL" },
+ { "Mono", "Right Playback Switch", "DACR" },
+ { "Mono", "Left Bypass Switch", "LINEINL" },
+ { "Mono", "Right Bypass Switch", "LINEINR" },
+
+ { "LOUT1 PGA", NULL, "Left" },
+ { "LOUT1", NULL, "TSDEN" },
+ { "LOUT1", NULL, "LOUT1 PGA" },
+
+ { "ROUT1 PGA", NULL, "Right" },
+ { "ROUT1", NULL, "TSDEN" },
+ { "ROUT1", NULL, "ROUT1 PGA" },
+
+ { "LOUT2 PGA", NULL, "Left" },
+ { "LOUT2", NULL, "TSDEN" },
+ { "LOUT2", NULL, "LOUT2 PGA" },
+
+ { "ROUT2 PGA", NULL, "Right" },
+ { "ROUT2", NULL, "TSDEN" },
+ { "ROUT2", NULL, "ROUT2 PGA" },
+
+ { "MOUT PGA", NULL, "Mono" },
+ { "MONOOUT", NULL, "MOUT PGA" },
+
+ /* OUT3 not currently implemented */
+ { "OUT3", NULL, "OUT3 PGA" },
+};
+
+static int wm8955_add_widgets(struct snd_soc_codec *codec)
+{
+ snd_soc_add_controls(codec, wm8955_snd_controls,
+ ARRAY_SIZE(wm8955_snd_controls));
+
+ snd_soc_dapm_new_controls(codec, wm8955_dapm_widgets,
+ ARRAY_SIZE(wm8955_dapm_widgets));
+
+ snd_soc_dapm_add_routes(codec, wm8955_intercon,
+ ARRAY_SIZE(wm8955_intercon));
+
+ return 0;
+}
+
+static int wm8955_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8955_priv *wm8955 = codec->private_data;
+ int ret;
+ int wl;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ wl = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ wl = 0x4;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ wl = 0x8;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ wl = 0xc;
+ break;
+ default:
+ return -EINVAL;
+ }
+ snd_soc_update_bits(codec, WM8955_AUDIO_INTERFACE,
+ WM8955_WL_MASK, wl);
+
+ wm8955->fs = params_rate(params);
+ wm8955_set_deemph(codec);
+
+ /* If the chip is clocked then disable the clocks and force a
+ * reconfiguration, otherwise DAPM will power up the
+ * clocks for us later. */
+ ret = snd_soc_read(codec, WM8955_POWER_MANAGEMENT_1);
+ if (ret < 0)
+ return ret;
+ if (ret & WM8955_DIGENB) {
+ snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
+ WM8955_DIGENB, 0);
+ snd_soc_update_bits(codec, WM8955_CLOCKING_PLL,
+ WM8955_PLL_RB | WM8955_PLLEN, 0);
+
+ wm8955_configure_clocking(codec);
+ }
+
+ return 0;
+}
+
+
+static int wm8955_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct wm8955_priv *priv = codec->private_data;
+ int div;
+
+ switch (clk_id) {
+ case WM8955_CLK_MCLK:
+ if (freq > 15000000) {
+ priv->mclk_rate = freq /= 2;
+ div = WM8955_MCLKDIV2;
+ } else {
+ priv->mclk_rate = freq;
+ div = 0;
+ }
+
+ snd_soc_update_bits(codec, WM8955_SAMPLE_RATE,
+ WM8955_MCLKDIV2, div);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(dai->dev, "Clock source is %d at %uHz\n", clk_id, freq);
+
+ return 0;
+}
+
+static int wm8955_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ u16 aif = 0;
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ case SND_SOC_DAIFMT_CBM_CFM:
+ aif |= WM8955_MS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_B:
+ aif |= WM8955_LRP;
+ case SND_SOC_DAIFMT_DSP_A:
+ aif |= 0x3;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ aif |= 0x2;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ aif |= 0x1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_DSP_A:
+ case SND_SOC_DAIFMT_DSP_B:
+ /* frame inversion not valid for DSP modes */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aif |= WM8955_BCLKINV;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+
+ case SND_SOC_DAIFMT_I2S:
+ case SND_SOC_DAIFMT_RIGHT_J:
+ case SND_SOC_DAIFMT_LEFT_J:
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ aif |= WM8955_BCLKINV | WM8955_LRP;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ aif |= WM8955_BCLKINV;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ aif |= WM8955_LRP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_update_bits(codec, WM8955_AUDIO_INTERFACE,
+ WM8955_MS | WM8955_FORMAT_MASK | WM8955_BCLKINV |
+ WM8955_LRP, aif);
+
+ return 0;
+}
+
+
+static int wm8955_digital_mute(struct snd_soc_dai *codec_dai, int mute)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ int val;
+
+ if (mute)
+ val = WM8955_DACMU;
+ else
+ val = 0;
+
+ snd_soc_update_bits(codec, WM8955_DAC_CONTROL, WM8955_DACMU, val);
+
+ return 0;
+}
+
+static int wm8955_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct wm8955_priv *wm8955 = codec->private_data;
+ int ret, i;
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ break;
+
+ case SND_SOC_BIAS_PREPARE:
+ /* VMID resistance 2*50k */
+ snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
+ WM8955_VMIDSEL_MASK,
+ 0x1 << WM8955_VMIDSEL_SHIFT);
+
+ /* Default bias current */
+ snd_soc_update_bits(codec, WM8955_ADDITIONAL_CONTROL_1,
+ WM8955_VSEL_MASK,
+ 0x2 << WM8955_VSEL_SHIFT);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ if (codec->bias_level == SND_SOC_BIAS_OFF) {
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8955->supplies),
+ wm8955->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to enable supplies: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Sync back cached values if they're
+ * different from the hardware default.
+ */
+ for (i = 0; i < ARRAY_SIZE(wm8955->reg_cache); i++) {
+ if (i == WM8955_RESET)
+ continue;
+
+ if (wm8955->reg_cache[i] == wm8955_reg[i])
+ continue;
+
+ snd_soc_write(codec, i, wm8955->reg_cache[i]);
+ }
+
+ /* Enable VREF and VMID */
+ snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
+ WM8955_VREF |
+ WM8955_VMIDSEL_MASK,
+ WM8955_VREF |
+ 0x3 << WM8955_VREF_SHIFT);
+
+ /* Let VMID ramp */
+ msleep(500);
+
+ /* High resistance VROI to maintain outputs */
+ snd_soc_update_bits(codec,
+ WM8955_ADDITIONAL_CONTROL_3,
+ WM8955_VROI, WM8955_VROI);
+ }
+
+ /* Maintain VMID with 2*250k */
+ snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
+ WM8955_VMIDSEL_MASK,
+ 0x2 << WM8955_VMIDSEL_SHIFT);
+
+ /* Minimum bias current */
+ snd_soc_update_bits(codec, WM8955_ADDITIONAL_CONTROL_1,
+ WM8955_VSEL_MASK, 0);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ /* Low resistance VROI to help discharge */
+ snd_soc_update_bits(codec,
+ WM8955_ADDITIONAL_CONTROL_3,
+ WM8955_VROI, 0);
+
+ /* Turn off VMID and VREF */
+ snd_soc_update_bits(codec, WM8955_POWER_MANAGEMENT_1,
+ WM8955_VREF |
+ WM8955_VMIDSEL_MASK, 0);
+
+ regulator_bulk_disable(ARRAY_SIZE(wm8955->supplies),
+ wm8955->supplies);
+ break;
+ }
+ codec->bias_level = level;
+ return 0;
+}
+
+#define WM8955_RATES SNDRV_PCM_RATE_8000_96000
+
+#define WM8955_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops wm8955_dai_ops = {
+ .set_sysclk = wm8955_set_sysclk,
+ .set_fmt = wm8955_set_fmt,
+ .hw_params = wm8955_hw_params,
+ .digital_mute = wm8955_digital_mute,
+};
+
+struct snd_soc_dai wm8955_dai = {
+ .name = "WM8955",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = WM8955_RATES,
+ .formats = WM8955_FORMATS,
+ },
+ .ops = &wm8955_dai_ops,
+};
+EXPORT_SYMBOL_GPL(wm8955_dai);
+
+#ifdef CONFIG_PM
+static int wm8955_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8955_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static int wm8955_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+
+ wm8955_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+#else
+#define wm8955_suspend NULL
+#define wm8955_resume NULL
+#endif
+
+static int wm8955_probe(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec;
+ int ret = 0;
+
+ if (wm8955_codec == NULL) {
+ dev_err(&pdev->dev, "Codec device not registered\n");
+ return -ENODEV;
+ }
+
+ socdev->card->codec = wm8955_codec;
+ codec = wm8955_codec;
+
+ /* register pcms */
+ ret = snd_soc_new_pcms(socdev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1);
+ if (ret < 0) {
+ dev_err(codec->dev, "failed to create pcms: %d\n", ret);
+ goto pcm_err;
+ }
+
+ wm8955_add_widgets(codec);
+
+ return ret;
+
+pcm_err:
+ return ret;
+}
+
+static int wm8955_remove(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+
+ snd_soc_free_pcms(socdev);
+ snd_soc_dapm_free(socdev);
+
+ return 0;
+}
+
+struct snd_soc_codec_device soc_codec_dev_wm8955 = {
+ .probe = wm8955_probe,
+ .remove = wm8955_remove,
+ .suspend = wm8955_suspend,
+ .resume = wm8955_resume,
+};
+EXPORT_SYMBOL_GPL(soc_codec_dev_wm8955);
+
+static int wm8955_register(struct wm8955_priv *wm8955,
+ enum snd_soc_control_type control)
+{
+ int ret;
+ struct snd_soc_codec *codec = &wm8955->codec;
+ int i;
+
+ if (wm8955_codec) {
+ dev_err(codec->dev, "Another WM8955 is registered\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&codec->mutex);
+ INIT_LIST_HEAD(&codec->dapm_widgets);
+ INIT_LIST_HEAD(&codec->dapm_paths);
+
+ codec->private_data = wm8955;
+ codec->name = "WM8955";
+ codec->owner = THIS_MODULE;
+ codec->bias_level = SND_SOC_BIAS_OFF;
+ codec->set_bias_level = wm8955_set_bias_level;
+ codec->dai = &wm8955_dai;
+ codec->num_dai = 1;
+ codec->reg_cache_size = WM8955_MAX_REGISTER;
+ codec->reg_cache = &wm8955->reg_cache;
+
+ memcpy(codec->reg_cache, wm8955_reg, sizeof(wm8955_reg));
+
+ ret = snd_soc_codec_set_cache_io(codec, 7, 9, control);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(wm8955->supplies); i++)
+ wm8955->supplies[i].supply = wm8955_supply_names[i];
+
+ ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8955->supplies),
+ wm8955->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
+ goto err;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(wm8955->supplies),
+ wm8955->supplies);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
+ goto err_get;
+ }
+
+ ret = wm8955_reset(codec);
+ if (ret < 0) {
+ dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
+ goto err_enable;
+ }
+
+ wm8955_dai.dev = codec->dev;
+
+ /* Change some default settings - latch VU and enable ZC */
+ wm8955->reg_cache[WM8955_LEFT_DAC_VOLUME] |= WM8955_LDVU;
+ wm8955->reg_cache[WM8955_RIGHT_DAC_VOLUME] |= WM8955_RDVU;
+ wm8955->reg_cache[WM8955_LOUT1_VOLUME] |= WM8955_LO1VU | WM8955_LO1ZC;
+ wm8955->reg_cache[WM8955_ROUT1_VOLUME] |= WM8955_RO1VU | WM8955_RO1ZC;
+ wm8955->reg_cache[WM8955_LOUT2_VOLUME] |= WM8955_LO2VU | WM8955_LO2ZC;
+ wm8955->reg_cache[WM8955_ROUT2_VOLUME] |= WM8955_RO2VU | WM8955_RO2ZC;
+ wm8955->reg_cache[WM8955_MONOOUT_VOLUME] |= WM8955_MOZC;
+
+ /* Also enable adaptive bass boost by default */
+ wm8955->reg_cache[WM8955_BASS_CONTROL] |= WM8955_BB;
+
+ /* Set platform data values */
+ if (wm8955->pdata) {
+ if (wm8955->pdata->out2_speaker)
+ wm8955->reg_cache[WM8955_ADDITIONAL_CONTROL_2]
+ |= WM8955_ROUT2INV;
+
+ if (wm8955->pdata->monoin_diff)
+ wm8955->reg_cache[WM8955_MONO_OUT_MIX_1]
+ |= WM8955_DMEN;
+ }
+
+ wm8955_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ /* Bias level configuration will have done an extra enable */
+ regulator_bulk_disable(ARRAY_SIZE(wm8955->supplies), wm8955->supplies);
+
+ wm8955_codec = codec;
+
+ ret = snd_soc_register_codec(codec);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register codec: %d\n", ret);
+ return ret;
+ }
+
+ ret = snd_soc_register_dai(&wm8955_dai);
+ if (ret != 0) {
+ dev_err(codec->dev, "Failed to register DAI: %d\n", ret);
+ snd_soc_unregister_codec(codec);
+ return ret;
+ }
+
+ return 0;
+
+err_enable:
+ regulator_bulk_disable(ARRAY_SIZE(wm8955->supplies), wm8955->supplies);
+err_get:
+ regulator_bulk_free(ARRAY_SIZE(wm8955->supplies), wm8955->supplies);
+err:
+ kfree(wm8955);
+ return ret;
+}
+
+static void wm8955_unregister(struct wm8955_priv *wm8955)
+{
+ wm8955_set_bias_level(&wm8955->codec, SND_SOC_BIAS_OFF);
+ regulator_bulk_free(ARRAY_SIZE(wm8955->supplies), wm8955->supplies);
+ snd_soc_unregister_dai(&wm8955_dai);
+ snd_soc_unregister_codec(&wm8955->codec);
+ kfree(wm8955);
+ wm8955_codec = NULL;
+}
+
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+static __devinit int wm8955_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct wm8955_priv *wm8955;
+ struct snd_soc_codec *codec;
+
+ wm8955 = kzalloc(sizeof(struct wm8955_priv), GFP_KERNEL);
+ if (wm8955 == NULL)
+ return -ENOMEM;
+
+ codec = &wm8955->codec;
+ codec->hw_write = (hw_write_t)i2c_master_send;
+
+ i2c_set_clientdata(i2c, wm8955);
+ codec->control_data = i2c;
+ wm8955->pdata = i2c->dev.platform_data;
+
+ codec->dev = &i2c->dev;
+
+ return wm8955_register(wm8955, SND_SOC_I2C);
+}
+
+static __devexit int wm8955_i2c_remove(struct i2c_client *client)
+{
+ struct wm8955_priv *wm8955 = i2c_get_clientdata(client);
+ wm8955_unregister(wm8955);
+ return 0;
+}
+
+static const struct i2c_device_id wm8955_i2c_id[] = {
+ { "wm8955", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, wm8955_i2c_id);
+
+static struct i2c_driver wm8955_i2c_driver = {
+ .driver = {
+ .name = "wm8955",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8955_i2c_probe,
+ .remove = __devexit_p(wm8955_i2c_remove),
+ .id_table = wm8955_i2c_id,
+};
+#endif
+
+static int __init wm8955_modinit(void)
+{
+ int ret;
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ ret = i2c_add_driver(&wm8955_i2c_driver);
+ if (ret != 0) {
+ printk(KERN_ERR "Failed to register WM8955 I2C driver: %d\n",
+ ret);
+ }
+#endif
+ return 0;
+}
+module_init(wm8955_modinit);
+
+static void __exit wm8955_exit(void)
+{
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
+ i2c_del_driver(&wm8955_i2c_driver);
+#endif
+}
+module_exit(wm8955_exit);
+
+MODULE_DESCRIPTION("ASoC WM8955 driver");
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/wm8955.h b/sound/soc/codecs/wm8955.h
new file mode 100644
index 000000000000..ae349c8531f6
--- /dev/null
+++ b/sound/soc/codecs/wm8955.h
@@ -0,0 +1,489 @@
+/*
+ * wm8955.h -- WM8904 ASoC driver
+ *
+ * Copyright 2009 Wolfson Microelectronics, plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8955_H
+#define _WM8955_H
+
+#define WM8955_CLK_MCLK 1
+
+extern struct snd_soc_dai wm8955_dai;
+extern struct snd_soc_codec_device soc_codec_dev_wm8955;
+
+/*
+ * Register values.
+ */
+#define WM8955_LOUT1_VOLUME 0x02
+#define WM8955_ROUT1_VOLUME 0x03
+#define WM8955_DAC_CONTROL 0x05
+#define WM8955_AUDIO_INTERFACE 0x07
+#define WM8955_SAMPLE_RATE 0x08
+#define WM8955_LEFT_DAC_VOLUME 0x0A
+#define WM8955_RIGHT_DAC_VOLUME 0x0B
+#define WM8955_BASS_CONTROL 0x0C
+#define WM8955_TREBLE_CONTROL 0x0D
+#define WM8955_RESET 0x0F
+#define WM8955_ADDITIONAL_CONTROL_1 0x17
+#define WM8955_ADDITIONAL_CONTROL_2 0x18
+#define WM8955_POWER_MANAGEMENT_1 0x19
+#define WM8955_POWER_MANAGEMENT_2 0x1A
+#define WM8955_ADDITIONAL_CONTROL_3 0x1B
+#define WM8955_LEFT_OUT_MIX_1 0x22
+#define WM8955_LEFT_OUT_MIX_2 0x23
+#define WM8955_RIGHT_OUT_MIX_1 0x24
+#define WM8955_RIGHT_OUT_MIX_2 0x25
+#define WM8955_MONO_OUT_MIX_1 0x26
+#define WM8955_MONO_OUT_MIX_2 0x27
+#define WM8955_LOUT2_VOLUME 0x28
+#define WM8955_ROUT2_VOLUME 0x29
+#define WM8955_MONOOUT_VOLUME 0x2A
+#define WM8955_CLOCKING_PLL 0x2B
+#define WM8955_PLL_CONTROL_1 0x2C
+#define WM8955_PLL_CONTROL_2 0x2D
+#define WM8955_PLL_CONTROL_3 0x2E
+#define WM8955_PLL_CONTROL_4 0x3B
+
+#define WM8955_REGISTER_COUNT 29
+#define WM8955_MAX_REGISTER 0x3B
+
+/*
+ * Field Definitions.
+ */
+
+/*
+ * R2 (0x02) - LOUT1 volume
+ */
+#define WM8955_LO1VU 0x0100 /* LO1VU */
+#define WM8955_LO1VU_MASK 0x0100 /* LO1VU */
+#define WM8955_LO1VU_SHIFT 8 /* LO1VU */
+#define WM8955_LO1VU_WIDTH 1 /* LO1VU */
+#define WM8955_LO1ZC 0x0080 /* LO1ZC */
+#define WM8955_LO1ZC_MASK 0x0080 /* LO1ZC */
+#define WM8955_LO1ZC_SHIFT 7 /* LO1ZC */
+#define WM8955_LO1ZC_WIDTH 1 /* LO1ZC */
+#define WM8955_LOUTVOL_MASK 0x007F /* LOUTVOL - [6:0] */
+#define WM8955_LOUTVOL_SHIFT 0 /* LOUTVOL - [6:0] */
+#define WM8955_LOUTVOL_WIDTH 7 /* LOUTVOL - [6:0] */
+
+/*
+ * R3 (0x03) - ROUT1 volume
+ */
+#define WM8955_RO1VU 0x0100 /* RO1VU */
+#define WM8955_RO1VU_MASK 0x0100 /* RO1VU */
+#define WM8955_RO1VU_SHIFT 8 /* RO1VU */
+#define WM8955_RO1VU_WIDTH 1 /* RO1VU */
+#define WM8955_RO1ZC 0x0080 /* RO1ZC */
+#define WM8955_RO1ZC_MASK 0x0080 /* RO1ZC */
+#define WM8955_RO1ZC_SHIFT 7 /* RO1ZC */
+#define WM8955_RO1ZC_WIDTH 1 /* RO1ZC */
+#define WM8955_ROUTVOL_MASK 0x007F /* ROUTVOL - [6:0] */
+#define WM8955_ROUTVOL_SHIFT 0 /* ROUTVOL - [6:0] */
+#define WM8955_ROUTVOL_WIDTH 7 /* ROUTVOL - [6:0] */
+
+/*
+ * R5 (0x05) - DAC Control
+ */
+#define WM8955_DAT 0x0080 /* DAT */
+#define WM8955_DAT_MASK 0x0080 /* DAT */
+#define WM8955_DAT_SHIFT 7 /* DAT */
+#define WM8955_DAT_WIDTH 1 /* DAT */
+#define WM8955_DACMU 0x0008 /* DACMU */
+#define WM8955_DACMU_MASK 0x0008 /* DACMU */
+#define WM8955_DACMU_SHIFT 3 /* DACMU */
+#define WM8955_DACMU_WIDTH 1 /* DACMU */
+#define WM8955_DEEMPH_MASK 0x0006 /* DEEMPH - [2:1] */
+#define WM8955_DEEMPH_SHIFT 1 /* DEEMPH - [2:1] */
+#define WM8955_DEEMPH_WIDTH 2 /* DEEMPH - [2:1] */
+
+/*
+ * R7 (0x07) - Audio Interface
+ */
+#define WM8955_BCLKINV 0x0080 /* BCLKINV */
+#define WM8955_BCLKINV_MASK 0x0080 /* BCLKINV */
+#define WM8955_BCLKINV_SHIFT 7 /* BCLKINV */
+#define WM8955_BCLKINV_WIDTH 1 /* BCLKINV */
+#define WM8955_MS 0x0040 /* MS */
+#define WM8955_MS_MASK 0x0040 /* MS */
+#define WM8955_MS_SHIFT 6 /* MS */
+#define WM8955_MS_WIDTH 1 /* MS */
+#define WM8955_LRSWAP 0x0020 /* LRSWAP */
+#define WM8955_LRSWAP_MASK 0x0020 /* LRSWAP */
+#define WM8955_LRSWAP_SHIFT 5 /* LRSWAP */
+#define WM8955_LRSWAP_WIDTH 1 /* LRSWAP */
+#define WM8955_LRP 0x0010 /* LRP */
+#define WM8955_LRP_MASK 0x0010 /* LRP */
+#define WM8955_LRP_SHIFT 4 /* LRP */
+#define WM8955_LRP_WIDTH 1 /* LRP */
+#define WM8955_WL_MASK 0x000C /* WL - [3:2] */
+#define WM8955_WL_SHIFT 2 /* WL - [3:2] */
+#define WM8955_WL_WIDTH 2 /* WL - [3:2] */
+#define WM8955_FORMAT_MASK 0x0003 /* FORMAT - [1:0] */
+#define WM8955_FORMAT_SHIFT 0 /* FORMAT - [1:0] */
+#define WM8955_FORMAT_WIDTH 2 /* FORMAT - [1:0] */
+
+/*
+ * R8 (0x08) - Sample Rate
+ */
+#define WM8955_BCLKDIV2 0x0080 /* BCLKDIV2 */
+#define WM8955_BCLKDIV2_MASK 0x0080 /* BCLKDIV2 */
+#define WM8955_BCLKDIV2_SHIFT 7 /* BCLKDIV2 */
+#define WM8955_BCLKDIV2_WIDTH 1 /* BCLKDIV2 */
+#define WM8955_MCLKDIV2 0x0040 /* MCLKDIV2 */
+#define WM8955_MCLKDIV2_MASK 0x0040 /* MCLKDIV2 */
+#define WM8955_MCLKDIV2_SHIFT 6 /* MCLKDIV2 */
+#define WM8955_MCLKDIV2_WIDTH 1 /* MCLKDIV2 */
+#define WM8955_SR_MASK 0x003E /* SR - [5:1] */
+#define WM8955_SR_SHIFT 1 /* SR - [5:1] */
+#define WM8955_SR_WIDTH 5 /* SR - [5:1] */
+#define WM8955_USB 0x0001 /* USB */
+#define WM8955_USB_MASK 0x0001 /* USB */
+#define WM8955_USB_SHIFT 0 /* USB */
+#define WM8955_USB_WIDTH 1 /* USB */
+
+/*
+ * R10 (0x0A) - Left DAC volume
+ */
+#define WM8955_LDVU 0x0100 /* LDVU */
+#define WM8955_LDVU_MASK 0x0100 /* LDVU */
+#define WM8955_LDVU_SHIFT 8 /* LDVU */
+#define WM8955_LDVU_WIDTH 1 /* LDVU */
+#define WM8955_LDACVOL_MASK 0x00FF /* LDACVOL - [7:0] */
+#define WM8955_LDACVOL_SHIFT 0 /* LDACVOL - [7:0] */
+#define WM8955_LDACVOL_WIDTH 8 /* LDACVOL - [7:0] */
+
+/*
+ * R11 (0x0B) - Right DAC volume
+ */
+#define WM8955_RDVU 0x0100 /* RDVU */
+#define WM8955_RDVU_MASK 0x0100 /* RDVU */
+#define WM8955_RDVU_SHIFT 8 /* RDVU */
+#define WM8955_RDVU_WIDTH 1 /* RDVU */
+#define WM8955_RDACVOL_MASK 0x00FF /* RDACVOL - [7:0] */
+#define WM8955_RDACVOL_SHIFT 0 /* RDACVOL - [7:0] */
+#define WM8955_RDACVOL_WIDTH 8 /* RDACVOL - [7:0] */
+
+/*
+ * R12 (0x0C) - Bass control
+ */
+#define WM8955_BB 0x0080 /* BB */
+#define WM8955_BB_MASK 0x0080 /* BB */
+#define WM8955_BB_SHIFT 7 /* BB */
+#define WM8955_BB_WIDTH 1 /* BB */
+#define WM8955_BC 0x0040 /* BC */
+#define WM8955_BC_MASK 0x0040 /* BC */
+#define WM8955_BC_SHIFT 6 /* BC */
+#define WM8955_BC_WIDTH 1 /* BC */
+#define WM8955_BASS_MASK 0x000F /* BASS - [3:0] */
+#define WM8955_BASS_SHIFT 0 /* BASS - [3:0] */
+#define WM8955_BASS_WIDTH 4 /* BASS - [3:0] */
+
+/*
+ * R13 (0x0D) - Treble control
+ */
+#define WM8955_TC 0x0040 /* TC */
+#define WM8955_TC_MASK 0x0040 /* TC */
+#define WM8955_TC_SHIFT 6 /* TC */
+#define WM8955_TC_WIDTH 1 /* TC */
+#define WM8955_TRBL_MASK 0x000F /* TRBL - [3:0] */
+#define WM8955_TRBL_SHIFT 0 /* TRBL - [3:0] */
+#define WM8955_TRBL_WIDTH 4 /* TRBL - [3:0] */
+
+/*
+ * R15 (0x0F) - Reset
+ */
+#define WM8955_RESET_MASK 0x01FF /* RESET - [8:0] */
+#define WM8955_RESET_SHIFT 0 /* RESET - [8:0] */
+#define WM8955_RESET_WIDTH 9 /* RESET - [8:0] */
+
+/*
+ * R23 (0x17) - Additional control (1)
+ */
+#define WM8955_TSDEN 0x0100 /* TSDEN */
+#define WM8955_TSDEN_MASK 0x0100 /* TSDEN */
+#define WM8955_TSDEN_SHIFT 8 /* TSDEN */
+#define WM8955_TSDEN_WIDTH 1 /* TSDEN */
+#define WM8955_VSEL_MASK 0x00C0 /* VSEL - [7:6] */
+#define WM8955_VSEL_SHIFT 6 /* VSEL - [7:6] */
+#define WM8955_VSEL_WIDTH 2 /* VSEL - [7:6] */
+#define WM8955_DMONOMIX_MASK 0x0030 /* DMONOMIX - [5:4] */
+#define WM8955_DMONOMIX_SHIFT 4 /* DMONOMIX - [5:4] */
+#define WM8955_DMONOMIX_WIDTH 2 /* DMONOMIX - [5:4] */
+#define WM8955_DACINV 0x0002 /* DACINV */
+#define WM8955_DACINV_MASK 0x0002 /* DACINV */
+#define WM8955_DACINV_SHIFT 1 /* DACINV */
+#define WM8955_DACINV_WIDTH 1 /* DACINV */
+#define WM8955_TOEN 0x0001 /* TOEN */
+#define WM8955_TOEN_MASK 0x0001 /* TOEN */
+#define WM8955_TOEN_SHIFT 0 /* TOEN */
+#define WM8955_TOEN_WIDTH 1 /* TOEN */
+
+/*
+ * R24 (0x18) - Additional control (2)
+ */
+#define WM8955_OUT3SW_MASK 0x0180 /* OUT3SW - [8:7] */
+#define WM8955_OUT3SW_SHIFT 7 /* OUT3SW - [8:7] */
+#define WM8955_OUT3SW_WIDTH 2 /* OUT3SW - [8:7] */
+#define WM8955_ROUT2INV 0x0010 /* ROUT2INV */
+#define WM8955_ROUT2INV_MASK 0x0010 /* ROUT2INV */
+#define WM8955_ROUT2INV_SHIFT 4 /* ROUT2INV */
+#define WM8955_ROUT2INV_WIDTH 1 /* ROUT2INV */
+#define WM8955_DACOSR 0x0001 /* DACOSR */
+#define WM8955_DACOSR_MASK 0x0001 /* DACOSR */
+#define WM8955_DACOSR_SHIFT 0 /* DACOSR */
+#define WM8955_DACOSR_WIDTH 1 /* DACOSR */
+
+/*
+ * R25 (0x19) - Power Management (1)
+ */
+#define WM8955_VMIDSEL_MASK 0x0180 /* VMIDSEL - [8:7] */
+#define WM8955_VMIDSEL_SHIFT 7 /* VMIDSEL - [8:7] */
+#define WM8955_VMIDSEL_WIDTH 2 /* VMIDSEL - [8:7] */
+#define WM8955_VREF 0x0040 /* VREF */
+#define WM8955_VREF_MASK 0x0040 /* VREF */
+#define WM8955_VREF_SHIFT 6 /* VREF */
+#define WM8955_VREF_WIDTH 1 /* VREF */
+#define WM8955_DIGENB 0x0001 /* DIGENB */
+#define WM8955_DIGENB_MASK 0x0001 /* DIGENB */
+#define WM8955_DIGENB_SHIFT 0 /* DIGENB */
+#define WM8955_DIGENB_WIDTH 1 /* DIGENB */
+
+/*
+ * R26 (0x1A) - Power Management (2)
+ */
+#define WM8955_DACL 0x0100 /* DACL */
+#define WM8955_DACL_MASK 0x0100 /* DACL */
+#define WM8955_DACL_SHIFT 8 /* DACL */
+#define WM8955_DACL_WIDTH 1 /* DACL */
+#define WM8955_DACR 0x0080 /* DACR */
+#define WM8955_DACR_MASK 0x0080 /* DACR */
+#define WM8955_DACR_SHIFT 7 /* DACR */
+#define WM8955_DACR_WIDTH 1 /* DACR */
+#define WM8955_LOUT1 0x0040 /* LOUT1 */
+#define WM8955_LOUT1_MASK 0x0040 /* LOUT1 */
+#define WM8955_LOUT1_SHIFT 6 /* LOUT1 */
+#define WM8955_LOUT1_WIDTH 1 /* LOUT1 */
+#define WM8955_ROUT1 0x0020 /* ROUT1 */
+#define WM8955_ROUT1_MASK 0x0020 /* ROUT1 */
+#define WM8955_ROUT1_SHIFT 5 /* ROUT1 */
+#define WM8955_ROUT1_WIDTH 1 /* ROUT1 */
+#define WM8955_LOUT2 0x0010 /* LOUT2 */
+#define WM8955_LOUT2_MASK 0x0010 /* LOUT2 */
+#define WM8955_LOUT2_SHIFT 4 /* LOUT2 */
+#define WM8955_LOUT2_WIDTH 1 /* LOUT2 */
+#define WM8955_ROUT2 0x0008 /* ROUT2 */
+#define WM8955_ROUT2_MASK 0x0008 /* ROUT2 */
+#define WM8955_ROUT2_SHIFT 3 /* ROUT2 */
+#define WM8955_ROUT2_WIDTH 1 /* ROUT2 */
+#define WM8955_MONO 0x0004 /* MONO */
+#define WM8955_MONO_MASK 0x0004 /* MONO */
+#define WM8955_MONO_SHIFT 2 /* MONO */
+#define WM8955_MONO_WIDTH 1 /* MONO */
+#define WM8955_OUT3 0x0002 /* OUT3 */
+#define WM8955_OUT3_MASK 0x0002 /* OUT3 */
+#define WM8955_OUT3_SHIFT 1 /* OUT3 */
+#define WM8955_OUT3_WIDTH 1 /* OUT3 */
+
+/*
+ * R27 (0x1B) - Additional Control (3)
+ */
+#define WM8955_VROI 0x0040 /* VROI */
+#define WM8955_VROI_MASK 0x0040 /* VROI */
+#define WM8955_VROI_SHIFT 6 /* VROI */
+#define WM8955_VROI_WIDTH 1 /* VROI */
+
+/*
+ * R34 (0x22) - Left out Mix (1)
+ */
+#define WM8955_LD2LO 0x0100 /* LD2LO */
+#define WM8955_LD2LO_MASK 0x0100 /* LD2LO */
+#define WM8955_LD2LO_SHIFT 8 /* LD2LO */
+#define WM8955_LD2LO_WIDTH 1 /* LD2LO */
+#define WM8955_LI2LO 0x0080 /* LI2LO */
+#define WM8955_LI2LO_MASK 0x0080 /* LI2LO */
+#define WM8955_LI2LO_SHIFT 7 /* LI2LO */
+#define WM8955_LI2LO_WIDTH 1 /* LI2LO */
+#define WM8955_LI2LOVOL_MASK 0x0070 /* LI2LOVOL - [6:4] */
+#define WM8955_LI2LOVOL_SHIFT 4 /* LI2LOVOL - [6:4] */
+#define WM8955_LI2LOVOL_WIDTH 3 /* LI2LOVOL - [6:4] */
+
+/*
+ * R35 (0x23) - Left out Mix (2)
+ */
+#define WM8955_RD2LO 0x0100 /* RD2LO */
+#define WM8955_RD2LO_MASK 0x0100 /* RD2LO */
+#define WM8955_RD2LO_SHIFT 8 /* RD2LO */
+#define WM8955_RD2LO_WIDTH 1 /* RD2LO */
+#define WM8955_RI2LO 0x0080 /* RI2LO */
+#define WM8955_RI2LO_MASK 0x0080 /* RI2LO */
+#define WM8955_RI2LO_SHIFT 7 /* RI2LO */
+#define WM8955_RI2LO_WIDTH 1 /* RI2LO */
+#define WM8955_RI2LOVOL_MASK 0x0070 /* RI2LOVOL - [6:4] */
+#define WM8955_RI2LOVOL_SHIFT 4 /* RI2LOVOL - [6:4] */
+#define WM8955_RI2LOVOL_WIDTH 3 /* RI2LOVOL - [6:4] */
+
+/*
+ * R36 (0x24) - Right out Mix (1)
+ */
+#define WM8955_LD2RO 0x0100 /* LD2RO */
+#define WM8955_LD2RO_MASK 0x0100 /* LD2RO */
+#define WM8955_LD2RO_SHIFT 8 /* LD2RO */
+#define WM8955_LD2RO_WIDTH 1 /* LD2RO */
+#define WM8955_LI2RO 0x0080 /* LI2RO */
+#define WM8955_LI2RO_MASK 0x0080 /* LI2RO */
+#define WM8955_LI2RO_SHIFT 7 /* LI2RO */
+#define WM8955_LI2RO_WIDTH 1 /* LI2RO */
+#define WM8955_LI2ROVOL_MASK 0x0070 /* LI2ROVOL - [6:4] */
+#define WM8955_LI2ROVOL_SHIFT 4 /* LI2ROVOL - [6:4] */
+#define WM8955_LI2ROVOL_WIDTH 3 /* LI2ROVOL - [6:4] */
+
+/*
+ * R37 (0x25) - Right Out Mix (2)
+ */
+#define WM8955_RD2RO 0x0100 /* RD2RO */
+#define WM8955_RD2RO_MASK 0x0100 /* RD2RO */
+#define WM8955_RD2RO_SHIFT 8 /* RD2RO */
+#define WM8955_RD2RO_WIDTH 1 /* RD2RO */
+#define WM8955_RI2RO 0x0080 /* RI2RO */
+#define WM8955_RI2RO_MASK 0x0080 /* RI2RO */
+#define WM8955_RI2RO_SHIFT 7 /* RI2RO */
+#define WM8955_RI2RO_WIDTH 1 /* RI2RO */
+#define WM8955_RI2ROVOL_MASK 0x0070 /* RI2ROVOL - [6:4] */
+#define WM8955_RI2ROVOL_SHIFT 4 /* RI2ROVOL - [6:4] */
+#define WM8955_RI2ROVOL_WIDTH 3 /* RI2ROVOL - [6:4] */
+
+/*
+ * R38 (0x26) - Mono out Mix (1)
+ */
+#define WM8955_LD2MO 0x0100 /* LD2MO */
+#define WM8955_LD2MO_MASK 0x0100 /* LD2MO */
+#define WM8955_LD2MO_SHIFT 8 /* LD2MO */
+#define WM8955_LD2MO_WIDTH 1 /* LD2MO */
+#define WM8955_LI2MO 0x0080 /* LI2MO */
+#define WM8955_LI2MO_MASK 0x0080 /* LI2MO */
+#define WM8955_LI2MO_SHIFT 7 /* LI2MO */
+#define WM8955_LI2MO_WIDTH 1 /* LI2MO */
+#define WM8955_LI2MOVOL_MASK 0x0070 /* LI2MOVOL - [6:4] */
+#define WM8955_LI2MOVOL_SHIFT 4 /* LI2MOVOL - [6:4] */
+#define WM8955_LI2MOVOL_WIDTH 3 /* LI2MOVOL - [6:4] */
+#define WM8955_DMEN 0x0001 /* DMEN */
+#define WM8955_DMEN_MASK 0x0001 /* DMEN */
+#define WM8955_DMEN_SHIFT 0 /* DMEN */
+#define WM8955_DMEN_WIDTH 1 /* DMEN */
+
+/*
+ * R39 (0x27) - Mono out Mix (2)
+ */
+#define WM8955_RD2MO 0x0100 /* RD2MO */
+#define WM8955_RD2MO_MASK 0x0100 /* RD2MO */
+#define WM8955_RD2MO_SHIFT 8 /* RD2MO */
+#define WM8955_RD2MO_WIDTH 1 /* RD2MO */
+#define WM8955_RI2MO 0x0080 /* RI2MO */
+#define WM8955_RI2MO_MASK 0x0080 /* RI2MO */
+#define WM8955_RI2MO_SHIFT 7 /* RI2MO */
+#define WM8955_RI2MO_WIDTH 1 /* RI2MO */
+#define WM8955_RI2MOVOL_MASK 0x0070 /* RI2MOVOL - [6:4] */
+#define WM8955_RI2MOVOL_SHIFT 4 /* RI2MOVOL - [6:4] */
+#define WM8955_RI2MOVOL_WIDTH 3 /* RI2MOVOL - [6:4] */
+
+/*
+ * R40 (0x28) - LOUT2 volume
+ */
+#define WM8955_LO2VU 0x0100 /* LO2VU */
+#define WM8955_LO2VU_MASK 0x0100 /* LO2VU */
+#define WM8955_LO2VU_SHIFT 8 /* LO2VU */
+#define WM8955_LO2VU_WIDTH 1 /* LO2VU */
+#define WM8955_LO2ZC 0x0080 /* LO2ZC */
+#define WM8955_LO2ZC_MASK 0x0080 /* LO2ZC */
+#define WM8955_LO2ZC_SHIFT 7 /* LO2ZC */
+#define WM8955_LO2ZC_WIDTH 1 /* LO2ZC */
+#define WM8955_LOUT2VOL_MASK 0x007F /* LOUT2VOL - [6:0] */
+#define WM8955_LOUT2VOL_SHIFT 0 /* LOUT2VOL - [6:0] */
+#define WM8955_LOUT2VOL_WIDTH 7 /* LOUT2VOL - [6:0] */
+
+/*
+ * R41 (0x29) - ROUT2 volume
+ */
+#define WM8955_RO2VU 0x0100 /* RO2VU */
+#define WM8955_RO2VU_MASK 0x0100 /* RO2VU */
+#define WM8955_RO2VU_SHIFT 8 /* RO2VU */
+#define WM8955_RO2VU_WIDTH 1 /* RO2VU */
+#define WM8955_RO2ZC 0x0080 /* RO2ZC */
+#define WM8955_RO2ZC_MASK 0x0080 /* RO2ZC */
+#define WM8955_RO2ZC_SHIFT 7 /* RO2ZC */
+#define WM8955_RO2ZC_WIDTH 1 /* RO2ZC */
+#define WM8955_ROUT2VOL_MASK 0x007F /* ROUT2VOL - [6:0] */
+#define WM8955_ROUT2VOL_SHIFT 0 /* ROUT2VOL - [6:0] */
+#define WM8955_ROUT2VOL_WIDTH 7 /* ROUT2VOL - [6:0] */
+
+/*
+ * R42 (0x2A) - MONOOUT volume
+ */
+#define WM8955_MOZC 0x0080 /* MOZC */
+#define WM8955_MOZC_MASK 0x0080 /* MOZC */
+#define WM8955_MOZC_SHIFT 7 /* MOZC */
+#define WM8955_MOZC_WIDTH 1 /* MOZC */
+#define WM8955_MOUTVOL_MASK 0x007F /* MOUTVOL - [6:0] */
+#define WM8955_MOUTVOL_SHIFT 0 /* MOUTVOL - [6:0] */
+#define WM8955_MOUTVOL_WIDTH 7 /* MOUTVOL - [6:0] */
+
+/*
+ * R43 (0x2B) - Clocking / PLL
+ */
+#define WM8955_MCLKSEL 0x0100 /* MCLKSEL */
+#define WM8955_MCLKSEL_MASK 0x0100 /* MCLKSEL */
+#define WM8955_MCLKSEL_SHIFT 8 /* MCLKSEL */
+#define WM8955_MCLKSEL_WIDTH 1 /* MCLKSEL */
+#define WM8955_PLLOUTDIV2 0x0020 /* PLLOUTDIV2 */
+#define WM8955_PLLOUTDIV2_MASK 0x0020 /* PLLOUTDIV2 */
+#define WM8955_PLLOUTDIV2_SHIFT 5 /* PLLOUTDIV2 */
+#define WM8955_PLLOUTDIV2_WIDTH 1 /* PLLOUTDIV2 */
+#define WM8955_PLL_RB 0x0010 /* PLL_RB */
+#define WM8955_PLL_RB_MASK 0x0010 /* PLL_RB */
+#define WM8955_PLL_RB_SHIFT 4 /* PLL_RB */
+#define WM8955_PLL_RB_WIDTH 1 /* PLL_RB */
+#define WM8955_PLLEN 0x0008 /* PLLEN */
+#define WM8955_PLLEN_MASK 0x0008 /* PLLEN */
+#define WM8955_PLLEN_SHIFT 3 /* PLLEN */
+#define WM8955_PLLEN_WIDTH 1 /* PLLEN */
+
+/*
+ * R44 (0x2C) - PLL Control 1
+ */
+#define WM8955_N_MASK 0x01E0 /* N - [8:5] */
+#define WM8955_N_SHIFT 5 /* N - [8:5] */
+#define WM8955_N_WIDTH 4 /* N - [8:5] */
+#define WM8955_K_21_18_MASK 0x000F /* K(21:18) - [3:0] */
+#define WM8955_K_21_18_SHIFT 0 /* K(21:18) - [3:0] */
+#define WM8955_K_21_18_WIDTH 4 /* K(21:18) - [3:0] */
+
+/*
+ * R45 (0x2D) - PLL Control 2
+ */
+#define WM8955_K_17_9_MASK 0x01FF /* K(17:9) - [8:0] */
+#define WM8955_K_17_9_SHIFT 0 /* K(17:9) - [8:0] */
+#define WM8955_K_17_9_WIDTH 9 /* K(17:9) - [8:0] */
+
+/*
+ * R46 (0x2E) - PLL Control 3
+ */
+#define WM8955_K_8_0_MASK 0x01FF /* K(8:0) - [8:0] */
+#define WM8955_K_8_0_SHIFT 0 /* K(8:0) - [8:0] */
+#define WM8955_K_8_0_WIDTH 9 /* K(8:0) - [8:0] */
+
+/*
+ * R59 (0x3B) - PLL Control 4
+ */
+#define WM8955_KEN 0x0080 /* KEN */
+#define WM8955_KEN_MASK 0x0080 /* KEN */
+#define WM8955_KEN_SHIFT 7 /* KEN */
+#define WM8955_KEN_WIDTH 1 /* KEN */
+
+#endif
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index a8007d58813f..d2342c5e0425 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -1022,6 +1022,9 @@ static int wm8961_resume(struct platform_device *pdev)
int i;
for (i = 0; i < codec->reg_cache_size; i++) {
+ if (reg_cache[i] == wm8961_reg_defaults[i])
+ continue;
+
if (i == WM8961_SOFTWARE_RESET)
continue;
diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c
index 341481e0e830..a54dc77b7f34 100644
--- a/sound/soc/codecs/wm8990.c
+++ b/sound/soc/codecs/wm8990.c
@@ -1319,10 +1319,6 @@ static int wm8990_suspend(struct platform_device *pdev, pm_message_t state)
struct snd_soc_device *socdev = platform_get_drvdata(pdev);
struct snd_soc_codec *codec = socdev->card->codec;
- /* we only need to suspend if we are a valid card */
- if (!codec->card)
- return 0;
-
wm8990_set_bias_level(codec, SND_SOC_BIAS_OFF);
return 0;
}
@@ -1335,10 +1331,6 @@ static int wm8990_resume(struct platform_device *pdev)
u8 data[2];
u16 *cache = codec->reg_cache;
- /* we only need to resume if we are a valid card */
- if (!codec->card)
- return 0;
-
/* Sync reg_cache with the hardware */
for (i = 0; i < ARRAY_SIZE(wm8990_reg); i++) {
if (i + 1 == WM8990_RESET)
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 2981afae842c..828d8174d5b7 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -227,6 +227,7 @@ struct wm8993_priv {
int class_w_users;
unsigned int fll_fref;
unsigned int fll_fout;
+ int fll_src;
};
static unsigned int wm8993_read_hw(struct snd_soc_codec *codec, u8 reg)
@@ -506,6 +507,7 @@ static int wm8993_set_fll(struct snd_soc_dai *dai, int fll_id, int source,
wm8993->fll_fref = Fref;
wm8993->fll_fout = Fout;
+ wm8993->fll_src = source;
return 0;
}
@@ -1480,9 +1482,74 @@ static int wm8993_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int wm8993_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct wm8993_priv *wm8993 = codec->private_data;
+ int fll_fout = wm8993->fll_fout;
+ int fll_fref = wm8993->fll_fref;
+ int ret;
+
+ /* Stop the FLL in an orderly fashion */
+ ret = wm8993_set_fll(codec->dai, 0, 0, 0, 0);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to stop FLL\n");
+ return ret;
+ }
+
+ wm8993->fll_fout = fll_fout;
+ wm8993->fll_fref = fll_fref;
+
+ wm8993_set_bias_level(codec, SND_SOC_BIAS_OFF);
+
+ return 0;
+}
+
+static int wm8993_resume(struct platform_device *pdev)
+{
+ struct snd_soc_device *socdev = platform_get_drvdata(pdev);
+ struct snd_soc_codec *codec = socdev->card->codec;
+ struct wm8993_priv *wm8993 = codec->private_data;
+ u16 *cache = wm8993->reg_cache;
+ int i, ret;
+
+ /* Restore the register settings */
+ for (i = 1; i < WM8993_MAX_REGISTER; i++) {
+ if (cache[i] == wm8993_reg_defaults[i])
+ continue;
+ snd_soc_write(codec, i, cache[i]);
+ }
+
+ wm8993_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+
+ /* Restart the FLL? */
+ if (wm8993->fll_fout) {
+ int fll_fout = wm8993->fll_fout;
+ int fll_fref = wm8993->fll_fref;
+
+ wm8993->fll_fref = 0;
+ wm8993->fll_fout = 0;
+
+ ret = wm8993_set_fll(codec->dai, 0, wm8993->fll_src,
+ fll_fref, fll_fout);
+ if (ret != 0)
+ dev_err(codec->dev, "Failed to restart FLL\n");
+ }
+
+ return 0;
+}
+#else
+#define wm8993_suspend NULL
+#define wm8993_resume NULL
+#endif
+
struct snd_soc_codec_device soc_codec_dev_wm8993 = {
.probe = wm8993_probe,
.remove = wm8993_remove,
+ .suspend = wm8993_suspend,
+ .resume = wm8993_resume,
};
EXPORT_SYMBOL_GPL(soc_codec_dev_wm8993);
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 0a302e1080d9..a613bbb0bc91 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -767,14 +767,27 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
int ret = 0;
switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
+ if (!dev->clk_active) {
+ clk_enable(dev->clk);
+ dev->clk_active = 1;
+ }
+ /* Fall through */
+ case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
davinci_mcasp_start(dev, substream->stream);
break;
- case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
+ davinci_mcasp_stop(dev, substream->stream);
+ if (dev->clk_active) {
+ clk_disable(dev->clk);
+ dev->clk_active = 0;
+ }
+
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
davinci_mcasp_stop(dev, substream->stream);
break;
@@ -866,6 +879,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
}
clk_enable(dev->clk);
+ dev->clk_active = 1;
dev->base = (void __iomem *)IO_ADDRESS(mem->start);
dev->op_mode = pdata->op_mode;
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
index 582c9249ef09..e755b5121ec7 100644
--- a/sound/soc/davinci/davinci-mcasp.h
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -44,6 +44,7 @@ struct davinci_audio_dev {
int sample_rate;
struct clk *clk;
unsigned int codec_fmt;
+ u8 clk_active;
/* McASP specific data */
int tdm_slots;
diff --git a/sound/soc/davinci/davinci-pcm.c b/sound/soc/davinci/davinci-pcm.c
index ad4d7f47a86b..80c7fdf2f521 100644
--- a/sound/soc/davinci/davinci-pcm.c
+++ b/sound/soc/davinci/davinci-pcm.c
@@ -49,7 +49,7 @@ static void print_buf_info(int slot, char *name)
static struct snd_pcm_hardware pcm_hardware_playback = {
.info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_PAUSE),
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
.formats = (SNDRV_PCM_FMTBIT_S16_LE),
.rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |
SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_32000 |
diff --git a/sound/soc/s3c24xx/s3c64xx-i2s.c b/sound/soc/s3c24xx/s3c64xx-i2s.c
index cc7edb5f792d..93ed3aad1631 100644
--- a/sound/soc/s3c24xx/s3c64xx-i2s.c
+++ b/sound/soc/s3c24xx/s3c64xx-i2s.c
@@ -15,16 +15,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
-#include <linux/delay.h>
#include <linux/clk.h>
-#include <linux/kernel.h>
#include <linux/gpio.h>
#include <linux/io.h>
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
#include <sound/soc.h>
#include <plat/regs-s3c2412-iis.h>
@@ -38,6 +32,11 @@
#include "s3c-dma.h"
#include "s3c64xx-i2s.h"
+/* The value should be set to maximum of the total number
+ * of I2Sv3 controllers that any supported SoC has.
+ */
+#define MAX_I2SV3 2
+
static struct s3c2410_dma_client s3c64xx_dma_client_out = {
.name = "I2S PCM Stereo out"
};
@@ -46,37 +45,12 @@ static struct s3c2410_dma_client s3c64xx_dma_client_in = {
.name = "I2S PCM Stereo in"
};
-static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_out[2] = {
- [0] = {
- .channel = DMACH_I2S0_OUT,
- .client = &s3c64xx_dma_client_out,
- .dma_addr = S3C64XX_PA_IIS0 + S3C2412_IISTXD,
- .dma_size = 4,
- },
- [1] = {
- .channel = DMACH_I2S1_OUT,
- .client = &s3c64xx_dma_client_out,
- .dma_addr = S3C64XX_PA_IIS1 + S3C2412_IISTXD,
- .dma_size = 4,
- },
-};
-
-static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_in[2] = {
- [0] = {
- .channel = DMACH_I2S0_IN,
- .client = &s3c64xx_dma_client_in,
- .dma_addr = S3C64XX_PA_IIS0 + S3C2412_IISRXD,
- .dma_size = 4,
- },
- [1] = {
- .channel = DMACH_I2S1_IN,
- .client = &s3c64xx_dma_client_in,
- .dma_addr = S3C64XX_PA_IIS1 + S3C2412_IISRXD,
- .dma_size = 4,
- },
-};
+static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_out[MAX_I2SV3];
+static struct s3c_dma_params s3c64xx_i2s_pcm_stereo_in[MAX_I2SV3];
+static struct s3c_i2sv2_info s3c64xx_i2s[MAX_I2SV3];
-static struct s3c_i2sv2_info s3c64xx_i2s[2];
+struct snd_soc_dai s3c64xx_i2s_dai[MAX_I2SV3];
+EXPORT_SYMBOL_GPL(s3c64xx_i2s_dai);
static inline struct s3c_i2sv2_info *to_info(struct snd_soc_dai *cpu_dai)
{
@@ -169,55 +143,13 @@ static struct snd_soc_dai_ops s3c64xx_i2s_dai_ops = {
.set_sysclk = s3c64xx_i2s_set_sysclk,
};
-struct snd_soc_dai s3c64xx_i2s_dai[] = {
- {
- .name = "s3c64xx-i2s",
- .id = 0,
- .probe = s3c64xx_i2s_probe,
- .playback = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = S3C64XX_I2S_RATES,
- .formats = S3C64XX_I2S_FMTS,
- },
- .capture = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = S3C64XX_I2S_RATES,
- .formats = S3C64XX_I2S_FMTS,
- },
- .ops = &s3c64xx_i2s_dai_ops,
- .symmetric_rates = 1,
- },
- {
- .name = "s3c64xx-i2s",
- .id = 1,
- .probe = s3c64xx_i2s_probe,
- .playback = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = S3C64XX_I2S_RATES,
- .formats = S3C64XX_I2S_FMTS,
- },
- .capture = {
- .channels_min = 2,
- .channels_max = 2,
- .rates = S3C64XX_I2S_RATES,
- .formats = S3C64XX_I2S_FMTS,
- },
- .ops = &s3c64xx_i2s_dai_ops,
- .symmetric_rates = 1,
- },
-};
-EXPORT_SYMBOL_GPL(s3c64xx_i2s_dai);
-
static __devinit int s3c64xx_iis_dev_probe(struct platform_device *pdev)
{
struct s3c_i2sv2_info *i2s;
struct snd_soc_dai *dai;
int ret;
- if (pdev->id >= ARRAY_SIZE(s3c64xx_i2s)) {
+ if (pdev->id >= MAX_I2SV3) {
dev_err(&pdev->dev, "id %d out of range\n", pdev->id);
return -EINVAL;
}
@@ -225,10 +157,40 @@ static __devinit int s3c64xx_iis_dev_probe(struct platform_device *pdev)
i2s = &s3c64xx_i2s[pdev->id];
dai = &s3c64xx_i2s_dai[pdev->id];
dai->dev = &pdev->dev;
+ dai->name = "s3c64xx-i2s";
+ dai->id = pdev->id;
+ dai->symmetric_rates = 1;
+ dai->playback.channels_min = 2;
+ dai->playback.channels_max = 2;
+ dai->playback.rates = S3C64XX_I2S_RATES;
+ dai->playback.formats = S3C64XX_I2S_FMTS;
+ dai->capture.channels_min = 2;
+ dai->capture.channels_max = 2;
+ dai->capture.rates = S3C64XX_I2S_RATES;
+ dai->capture.formats = S3C64XX_I2S_FMTS;
+ dai->probe = s3c64xx_i2s_probe;
+ dai->ops = &s3c64xx_i2s_dai_ops;
i2s->dma_capture = &s3c64xx_i2s_pcm_stereo_in[pdev->id];
i2s->dma_playback = &s3c64xx_i2s_pcm_stereo_out[pdev->id];
+ if (pdev->id == 0) {
+ i2s->dma_capture->channel = DMACH_I2S0_IN;
+ i2s->dma_capture->dma_addr = S3C64XX_PA_IIS0 + S3C2412_IISRXD;
+ i2s->dma_playback->channel = DMACH_I2S0_OUT;
+ i2s->dma_playback->dma_addr = S3C64XX_PA_IIS0 + S3C2412_IISTXD;
+ } else {
+ i2s->dma_capture->channel = DMACH_I2S1_IN;
+ i2s->dma_capture->dma_addr = S3C64XX_PA_IIS1 + S3C2412_IISRXD;
+ i2s->dma_playback->channel = DMACH_I2S1_OUT;
+ i2s->dma_playback->dma_addr = S3C64XX_PA_IIS1 + S3C2412_IISTXD;
+ }
+
+ i2s->dma_capture->client = &s3c64xx_dma_client_in;
+ i2s->dma_capture->dma_size = 4;
+ i2s->dma_playback->client = &s3c64xx_dma_client_out;
+ i2s->dma_playback->dma_size = 4;
+
i2s->iis_cclk = clk_get(&pdev->dev, "audio-bus");
if (IS_ERR(i2s->iis_cclk)) {
dev_err(&pdev->dev, "failed to get audio-bus\n");
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 9e6976586554..8072a6d1c4db 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -47,4 +47,12 @@ config SND_FSI_AK4642
This option enables generic sound support for the
FSI - AK4642 unit
+config SND_FSI_DA7210
+ bool "FSI-DA7210 sound support"
+ depends on SND_SOC_SH4_FSI
+ select SND_SOC_DA7210
+ help
+ This option enables generic sound support for the
+ FSI - DA7210 unit
+
endmenu
diff --git a/sound/soc/sh/Makefile b/sound/soc/sh/Makefile
index a6997872f24e..1d0ec0af74b7 100644
--- a/sound/soc/sh/Makefile
+++ b/sound/soc/sh/Makefile
@@ -13,6 +13,8 @@ obj-$(CONFIG_SND_SOC_SH4_FSI) += snd-soc-fsi.o
## boards
snd-soc-sh7760-ac97-objs := sh7760-ac97.o
snd-soc-fsi-ak4642-objs := fsi-ak4642.o
+snd-soc-fsi-da7210-objs := fsi-da7210.o
obj-$(CONFIG_SND_SH7760_AC97) += snd-soc-sh7760-ac97.o
obj-$(CONFIG_SND_FSI_AK4642) += snd-soc-fsi-ak4642.o
+obj-$(CONFIG_SND_FSI_DA7210) += snd-soc-fsi-da7210.o
diff --git a/sound/soc/sh/fsi-da7210.c b/sound/soc/sh/fsi-da7210.c
new file mode 100644
index 000000000000..33b4d177f466
--- /dev/null
+++ b/sound/soc/sh/fsi-da7210.c
@@ -0,0 +1,83 @@
+/*
+ * fsi-da7210.c
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/i2c.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <sound/sh_fsi.h>
+#include "../codecs/da7210.h"
+
+static int fsi_da7210_init(struct snd_soc_codec *codec)
+{
+ return snd_soc_dai_set_fmt(&da7210_dai,
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM);
+}
+
+static struct snd_soc_dai_link fsi_da7210_dai = {
+ .name = "DA7210",
+ .stream_name = "DA7210",
+ .cpu_dai = &fsi_soc_dai[1], /* FSI B */
+ .codec_dai = &da7210_dai,
+ .init = fsi_da7210_init,
+};
+
+static struct snd_soc_card fsi_soc_card = {
+ .name = "FSI",
+ .platform = &fsi_soc_platform,
+ .dai_link = &fsi_da7210_dai,
+ .num_links = 1,
+};
+
+static struct snd_soc_device fsi_da7210_snd_devdata = {
+ .card = &fsi_soc_card,
+ .codec_dev = &soc_codec_dev_da7210,
+};
+
+static struct platform_device *fsi_da7210_snd_device;
+
+static int __init fsi_da7210_sound_init(void)
+{
+ int ret;
+
+ fsi_da7210_snd_device = platform_device_alloc("soc-audio", -1);
+ if (!fsi_da7210_snd_device)
+ return -ENOMEM;
+
+ platform_set_drvdata(fsi_da7210_snd_device, &fsi_da7210_snd_devdata);
+ fsi_da7210_snd_devdata.dev = &fsi_da7210_snd_device->dev;
+ ret = platform_device_add(fsi_da7210_snd_device);
+ if (ret)
+ platform_device_put(fsi_da7210_snd_device);
+
+ return ret;
+}
+
+static void __exit fsi_da7210_sound_exit(void)
+{
+ platform_device_unregister(fsi_da7210_snd_device);
+}
+
+module_init(fsi_da7210_sound_init);
+module_exit(fsi_da7210_sound_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("ALSA SoC FSI DA2710");
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 42813b808389..5f9f2693f4eb 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -67,6 +67,7 @@
/* DOFF_ST */
#define ERR_OVER 0x00000010
#define ERR_UNDER 0x00000001
+#define ST_ERR (ERR_OVER | ERR_UNDER)
/* CLK_RST */
#define B_CLK 0x00000010
@@ -92,6 +93,7 @@
struct fsi_priv {
void __iomem *base;
struct snd_pcm_substream *substream;
+ struct fsi_master *master;
int fifo_max;
int chan;
@@ -110,8 +112,6 @@ struct fsi_master {
struct sh_fsi_platform_info *info;
};
-static struct fsi_master *master;
-
/************************************************************************
@@ -166,7 +166,7 @@ static int fsi_reg_mask_set(struct fsi_priv *fsi, u32 reg, u32 mask, u32 data)
return __fsi_reg_mask_set((u32)(fsi->base + reg), mask, data);
}
-static int fsi_master_write(u32 reg, u32 data)
+static int fsi_master_write(struct fsi_master *master, u32 reg, u32 data)
{
if ((reg < MREG_START) ||
(reg > MREG_END))
@@ -175,7 +175,7 @@ static int fsi_master_write(u32 reg, u32 data)
return __fsi_reg_write((u32)(master->base + reg), data);
}
-static u32 fsi_master_read(u32 reg)
+static u32 fsi_master_read(struct fsi_master *master, u32 reg)
{
if ((reg < MREG_START) ||
(reg > MREG_END))
@@ -184,7 +184,8 @@ static u32 fsi_master_read(u32 reg)
return __fsi_reg_read((u32)(master->base + reg));
}
-static int fsi_master_mask_set(u32 reg, u32 mask, u32 data)
+static int fsi_master_mask_set(struct fsi_master *master,
+ u32 reg, u32 mask, u32 data)
{
if ((reg < MREG_START) ||
(reg > MREG_END))
@@ -200,43 +201,35 @@ static int fsi_master_mask_set(u32 reg, u32 mask, u32 data)
************************************************************************/
-static struct fsi_priv *fsi_get(struct snd_pcm_substream *substream)
+static struct fsi_master *fsi_get_master(struct fsi_priv *fsi)
{
- struct snd_soc_pcm_runtime *rtd;
- struct fsi_priv *fsi = NULL;
+ return fsi->master;
+}
- if (!substream || !master)
- return NULL;
+static int fsi_is_port_a(struct fsi_priv *fsi)
+{
+ return fsi->master->base == fsi->base;
+}
- rtd = substream->private_data;
- switch (rtd->dai->cpu_dai->id) {
- case 0:
- fsi = &master->fsia;
- break;
- case 1:
- fsi = &master->fsib;
- break;
- }
+static struct snd_soc_dai *fsi_get_dai(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai_link *machine = rtd->dai;
- return fsi;
+ return machine->cpu_dai;
}
-static int fsi_is_port_a(struct fsi_priv *fsi)
+static struct fsi_priv *fsi_get_priv(struct snd_pcm_substream *substream)
{
- /* return
- * 1 : port a
- * 0 : port b
- */
-
- if (fsi == &master->fsia)
- return 1;
+ struct snd_soc_dai *dai = fsi_get_dai(substream);
- return 0;
+ return dai->private_data;
}
static u32 fsi_get_info_flags(struct fsi_priv *fsi)
{
int is_porta = fsi_is_port_a(fsi);
+ struct fsi_master *master = fsi_get_master(fsi);
return is_porta ? master->info->porta_flags :
master->info->portb_flags;
@@ -314,27 +307,30 @@ static int fsi_get_fifo_residue(struct fsi_priv *fsi, int is_play)
static void fsi_irq_enable(struct fsi_priv *fsi, int is_play)
{
u32 data = fsi_port_ab_io_bit(fsi, is_play);
+ struct fsi_master *master = fsi_get_master(fsi);
- fsi_master_mask_set(IMSK, data, data);
- fsi_master_mask_set(IEMSK, data, data);
+ fsi_master_mask_set(master, IMSK, data, data);
+ fsi_master_mask_set(master, IEMSK, data, data);
}
static void fsi_irq_disable(struct fsi_priv *fsi, int is_play)
{
u32 data = fsi_port_ab_io_bit(fsi, is_play);
+ struct fsi_master *master = fsi_get_master(fsi);
- fsi_master_mask_set(IMSK, data, 0);
- fsi_master_mask_set(IEMSK, data, 0);
+ fsi_master_mask_set(master, IMSK, data, 0);
+ fsi_master_mask_set(master, IEMSK, data, 0);
}
static void fsi_clk_ctrl(struct fsi_priv *fsi, int enable)
{
u32 val = fsi_is_port_a(fsi) ? (1 << 0) : (1 << 4);
+ struct fsi_master *master = fsi_get_master(fsi);
if (enable)
- fsi_master_mask_set(CLK_RST, val, val);
+ fsi_master_mask_set(master, CLK_RST, val, val);
else
- fsi_master_mask_set(CLK_RST, val, 0);
+ fsi_master_mask_set(master, CLK_RST, val, 0);
}
static void fsi_irq_init(struct fsi_priv *fsi, int is_play)
@@ -355,23 +351,23 @@ static void fsi_irq_init(struct fsi_priv *fsi, int is_play)
fsi_reg_mask_set(fsi, ctrl, FIFO_CLR, FIFO_CLR);
/* clear interrupt factor */
- fsi_master_mask_set(INT_ST, data, 0);
+ fsi_master_mask_set(fsi_get_master(fsi), INT_ST, data, 0);
}
-static void fsi_soft_all_reset(void)
+static void fsi_soft_all_reset(struct fsi_master *master)
{
- u32 status = fsi_master_read(SOFT_RST);
+ u32 status = fsi_master_read(master, SOFT_RST);
/* port AB reset */
status &= 0x000000ff;
- fsi_master_write(SOFT_RST, status);
+ fsi_master_write(master, SOFT_RST, status);
mdelay(10);
/* soft reset */
status &= 0x000000f0;
- fsi_master_write(SOFT_RST, status);
+ fsi_master_write(master, SOFT_RST, status);
status |= 0x00000001;
- fsi_master_write(SOFT_RST, status);
+ fsi_master_write(master, SOFT_RST, status);
mdelay(10);
}
@@ -380,18 +376,21 @@ static int fsi_data_push(struct fsi_priv *fsi)
{
struct snd_pcm_runtime *runtime;
struct snd_pcm_substream *substream = NULL;
+ u32 status;
int send;
int fifo_free;
int width;
u8 *start;
- int i;
+ int i, ret, over_period;
if (!fsi ||
!fsi->substream ||
!fsi->substream->runtime)
return -EINVAL;
- runtime = fsi->substream->runtime;
+ over_period = 0;
+ substream = fsi->substream;
+ runtime = substream->runtime;
/* FSI FIFO has limit.
* So, this driver can not send periods data at a time
@@ -399,7 +398,7 @@ static int fsi_data_push(struct fsi_priv *fsi)
if (fsi->byte_offset >=
fsi->period_len * (fsi->periods + 1)) {
- substream = fsi->substream;
+ over_period = 1;
fsi->periods = (fsi->periods + 1) % runtime->periods;
if (0 == fsi->periods)
@@ -438,30 +437,42 @@ static int fsi_data_push(struct fsi_priv *fsi)
fsi->byte_offset += send * width;
+ ret = 0;
+ status = fsi_reg_read(fsi, DOFF_ST);
+ if (status & ERR_OVER) {
+ struct snd_soc_dai *dai = fsi_get_dai(substream);
+ dev_err(dai->dev, "over run error\n");
+ fsi_reg_write(fsi, DOFF_ST, status & ~ST_ERR);
+ ret = -EIO;
+ }
+
fsi_irq_enable(fsi, 1);
- if (substream)
+ if (over_period)
snd_pcm_period_elapsed(substream);
- return 0;
+ return ret;
}
static int fsi_data_pop(struct fsi_priv *fsi)
{
struct snd_pcm_runtime *runtime;
struct snd_pcm_substream *substream = NULL;
+ u32 status;
int free;
int fifo_fill;
int width;
u8 *start;
- int i;
+ int i, ret, over_period;
if (!fsi ||
!fsi->substream ||
!fsi->substream->runtime)
return -EINVAL;
- runtime = fsi->substream->runtime;
+ over_period = 0;
+ substream = fsi->substream;
+ runtime = substream->runtime;
/* FSI FIFO has limit.
* So, this driver can not send periods data at a time
@@ -469,7 +480,7 @@ static int fsi_data_pop(struct fsi_priv *fsi)
if (fsi->byte_offset >=
fsi->period_len * (fsi->periods + 1)) {
- substream = fsi->substream;
+ over_period = 1;
fsi->periods = (fsi->periods + 1) % runtime->periods;
if (0 == fsi->periods)
@@ -507,22 +518,32 @@ static int fsi_data_pop(struct fsi_priv *fsi)
fsi->byte_offset += fifo_fill * width;
+ ret = 0;
+ status = fsi_reg_read(fsi, DIFF_ST);
+ if (status & ERR_UNDER) {
+ struct snd_soc_dai *dai = fsi_get_dai(substream);
+ dev_err(dai->dev, "under run error\n");
+ fsi_reg_write(fsi, DIFF_ST, status & ~ST_ERR);
+ ret = -EIO;
+ }
+
fsi_irq_enable(fsi, 0);
- if (substream)
+ if (over_period)
snd_pcm_period_elapsed(substream);
- return 0;
+ return ret;
}
static irqreturn_t fsi_interrupt(int irq, void *data)
{
- u32 status = fsi_master_read(SOFT_RST) & ~0x00000010;
- u32 int_st = fsi_master_read(INT_ST);
+ struct fsi_master *master = data;
+ u32 status = fsi_master_read(master, SOFT_RST) & ~0x00000010;
+ u32 int_st = fsi_master_read(master, INT_ST);
/* clear irq status */
- fsi_master_write(SOFT_RST, status);
- fsi_master_write(SOFT_RST, status | 0x00000010);
+ fsi_master_write(master, SOFT_RST, status);
+ fsi_master_write(master, SOFT_RST, status | 0x00000010);
if (int_st & INT_A_OUT)
fsi_data_push(&master->fsia);
@@ -533,7 +554,7 @@ static irqreturn_t fsi_interrupt(int irq, void *data)
if (int_st & INT_B_IN)
fsi_data_pop(&master->fsib);
- fsi_master_write(INT_ST, 0x0000000);
+ fsi_master_write(master, INT_ST, 0x0000000);
return IRQ_HANDLED;
}
@@ -548,7 +569,7 @@ static irqreturn_t fsi_interrupt(int irq, void *data)
static int fsi_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct fsi_priv *fsi = fsi_get(substream);
+ struct fsi_priv *fsi = fsi_get_priv(substream);
const char *msg;
u32 flags = fsi_get_info_flags(fsi);
u32 fmt;
@@ -667,7 +688,7 @@ static int fsi_dai_startup(struct snd_pcm_substream *substream,
static void fsi_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct fsi_priv *fsi = fsi_get(substream);
+ struct fsi_priv *fsi = fsi_get_priv(substream);
int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
fsi_irq_disable(fsi, is_play);
@@ -679,7 +700,7 @@ static void fsi_dai_shutdown(struct snd_pcm_substream *substream,
static int fsi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
- struct fsi_priv *fsi = fsi_get(substream);
+ struct fsi_priv *fsi = fsi_get_priv(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
int ret = 0;
@@ -760,7 +781,7 @@ static int fsi_hw_free(struct snd_pcm_substream *substream)
static snd_pcm_uframes_t fsi_pointer(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct fsi_priv *fsi = fsi_get(substream);
+ struct fsi_priv *fsi = fsi_get_priv(substream);
long location;
location = (fsi->byte_offset - 1);
@@ -870,10 +891,16 @@ EXPORT_SYMBOL_GPL(fsi_soc_platform);
************************************************************************/
static int fsi_probe(struct platform_device *pdev)
{
+ struct fsi_master *master;
struct resource *res;
unsigned int irq;
int ret;
+ if (0 != pdev->id) {
+ dev_err(&pdev->dev, "current fsi support id 0 only now\n");
+ return -ENODEV;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
if (!res || (int)irq <= 0) {
@@ -899,15 +926,19 @@ static int fsi_probe(struct platform_device *pdev)
master->irq = irq;
master->info = pdev->dev.platform_data;
master->fsia.base = master->base;
+ master->fsia.master = master;
master->fsib.base = master->base + 0x40;
+ master->fsib.master = master;
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
fsi_soc_dai[0].dev = &pdev->dev;
+ fsi_soc_dai[0].private_data = &master->fsia;
fsi_soc_dai[1].dev = &pdev->dev;
+ fsi_soc_dai[1].private_data = &master->fsib;
- fsi_soft_all_reset();
+ fsi_soft_all_reset(master);
ret = request_irq(irq, &fsi_interrupt, IRQF_DISABLED, "fsi", master);
if (ret) {
@@ -937,6 +968,10 @@ exit:
static int fsi_remove(struct platform_device *pdev)
{
+ struct fsi_master *master;
+
+ master = fsi_get_master(fsi_soc_dai[0].private_data);
+
snd_soc_unregister_dais(fsi_soc_dai, ARRAY_SIZE(fsi_soc_dai));
snd_soc_unregister_platform(&fsi_soc_platform);
@@ -946,7 +981,12 @@ static int fsi_remove(struct platform_device *pdev)
iounmap(master->base);
kfree(master);
- master = NULL;
+
+ fsi_soc_dai[0].dev = NULL;
+ fsi_soc_dai[0].private_data = NULL;
+ fsi_soc_dai[1].dev = NULL;
+ fsi_soc_dai[1].private_data = NULL;
+
return 0;
}
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index d2505e8b06c9..02c235711bb8 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -182,7 +182,7 @@ static struct {
{
.addr_bits = 7, .data_bits = 9,
.write = snd_soc_7_9_write, .read = snd_soc_7_9_read,
- .spi_write = snd_soc_7_9_spi_write
+ .spi_write = snd_soc_7_9_spi_write,
},
{
.addr_bits = 8, .data_bits = 8,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 0a6440c6f54a..9085b40fa04b 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1276,8 +1276,8 @@ static int soc_new_pcm(struct snd_soc_device *socdev,
codec_dai->codec = card->codec;
/* check client and interface hw capabilities */
- sprintf(new_name, "%s %s-%d", dai_link->stream_name, codec_dai->name,
- num);
+ snprintf(new_name, sizeof(new_name), "%s %s-%d",
+ dai_link->stream_name, codec_dai->name, num);
if (codec_dai->playback.channels_min)
playback = 1;
@@ -1427,9 +1427,9 @@ EXPORT_SYMBOL_GPL(snd_soc_update_bits);
*
* Returns 1 for change else 0.
*/
-static int snd_soc_update_bits_locked(struct snd_soc_codec *codec,
- unsigned short reg, unsigned int mask,
- unsigned int value)
+int snd_soc_update_bits_locked(struct snd_soc_codec *codec,
+ unsigned short reg, unsigned int mask,
+ unsigned int value)
{
int change;
@@ -1439,6 +1439,7 @@ static int snd_soc_update_bits_locked(struct snd_soc_codec *codec,
return change;
}
+EXPORT_SYMBOL_GPL(snd_soc_update_bits_locked);
/**
* snd_soc_test_bits - test register for change
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 0d294ef72590..de22c2f1842e 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -739,6 +739,8 @@ static int dapm_seq_compare(struct snd_soc_dapm_widget *a,
struct snd_soc_dapm_widget *b,
int sort[])
{
+ if (a->codec != b->codec)
+ return (unsigned long)a - (unsigned long)b;
if (sort[a->id] != sort[b->id])
return sort[a->id] - sort[b->id];
if (a->reg != b->reg)
@@ -1147,9 +1149,16 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
out = is_connected_output_ep(w);
dapm_clear_walk(w->codec);
- ret = snprintf(buf, PAGE_SIZE, "%s: %s in %d out %d\n",
+ ret = snprintf(buf, PAGE_SIZE, "%s: %s in %d out %d",
w->name, w->power ? "On" : "Off", in, out);
+ if (w->reg >= 0)
+ ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ " - R%d(0x%x) bit %d",
+ w->reg, w->reg, w->shift);
+
+ ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+
if (w->sname)
ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
w->sname,
@@ -1255,8 +1264,7 @@ static int dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
/* test and update the power status of a mixer or switch widget */
static int dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
- struct snd_kcontrol *kcontrol, int reg,
- int val_mask, int val, int invert)
+ struct snd_kcontrol *kcontrol, int connect)
{
struct snd_soc_dapm_path *path;
int found = 0;
@@ -1266,9 +1274,6 @@ static int dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
widget->id != snd_soc_dapm_switch)
return -ENODEV;
- if (!snd_soc_test_bits(widget->codec, reg, val_mask, val))
- return 0;
-
/* find dapm widget path assoc with kcontrol */
list_for_each_entry(path, &widget->codec->dapm_paths, list) {
if (path->kcontrol != kcontrol)
@@ -1276,12 +1281,7 @@ static int dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
/* found, now check type */
found = 1;
- if (val)
- /* new connection */
- path->connect = invert ? 0:1;
- else
- /* old connection must be powered down */
- path->connect = invert ? 1:0;
+ path->connect = connect;
break;
}
@@ -1688,6 +1688,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
unsigned int val, val2, val_mask;
+ int connect;
int ret;
val = (ucontrol->value.integer.value[0] & mask);
@@ -1714,7 +1715,17 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
return 1;
}
- dapm_mixer_update_power(widget, kcontrol, reg, val_mask, val, invert);
+ if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) {
+ if (val)
+ /* new connection */
+ connect = invert ? 0:1;
+ else
+ /* old connection must be powered down */
+ connect = invert ? 1:0;
+
+ dapm_mixer_update_power(widget, kcontrol, connect);
+ }
+
if (widget->event) {
if (widget->event_flags & SND_SOC_DAPM_PRE_REG) {
ret = widget->event(widget, kcontrol,
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index 73525c048e7f..8c2925814ce4 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -21,6 +21,18 @@ config SND_USB_AUDIO
To compile this driver as a module, choose M here: the module
will be called snd-usb-audio.
+config SND_USB_UA101
+ tristate "Edirol UA-101 driver (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ select SND_PCM
+ select SND_RAWMIDI
+ help
+ Say Y here to include support for the Edirol UA-101 audio/MIDI
+ interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called snd-ua101.
+
config SND_USB_USX2Y
tristate "Tascam US-122, US-224 and US-428 USB driver"
depends on X86 || PPC || ALPHA
diff --git a/sound/usb/Makefile b/sound/usb/Makefile
index abb288bfe35d..5bf64aef9558 100644
--- a/sound/usb/Makefile
+++ b/sound/usb/Makefile
@@ -4,9 +4,11 @@
snd-usb-audio-objs := usbaudio.o usbmixer.o
snd-usb-lib-objs := usbmidi.o
+snd-ua101-objs := ua101.o
# Toplevel Module Dependency
obj-$(CONFIG_SND_USB_AUDIO) += snd-usb-audio.o snd-usb-lib.o
+obj-$(CONFIG_SND_USB_UA101) += snd-ua101.o snd-usb-lib.o
obj-$(CONFIG_SND_USB_USX2Y) += snd-usb-lib.o
obj-$(CONFIG_SND_USB_US122L) += snd-usb-lib.o
diff --git a/sound/usb/ua101.c b/sound/usb/ua101.c
new file mode 100644
index 000000000000..16dc7bd5e120
--- /dev/null
+++ b/sound/usb/ua101.c
@@ -0,0 +1,1419 @@
+/*
+ * Edirol UA-101 driver
+ * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
+ *
+ * This driver is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2.
+ *
+ * This driver is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this driver. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/audio.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include "usbaudio.h"
+
+MODULE_DESCRIPTION("Edirol UA-101 driver");
+MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_SUPPORTED_DEVICE("{{Edirol,UA-101}}");
+
+/* I use my UA-1A for testing because I don't have a UA-101 ... */
+#define UA1A_HACK
+
+/*
+ * Should not be lower than the minimum scheduling delay of the host
+ * controller. Some Intel controllers need more than one frame; as long as
+ * that driver doesn't tell us about this, use 1.5 frames just to be sure.
+ */
+#define MIN_QUEUE_LENGTH 12
+/* Somewhat random. */
+#define MAX_QUEUE_LENGTH 30
+/*
+ * This magic value optimizes memory usage efficiency for the UA-101's packet
+ * sizes at all sample rates, taking into account the stupid cache pool sizes
+ * that usb_buffer_alloc() uses.
+ */
+#define DEFAULT_QUEUE_LENGTH 21
+
+#define MAX_PACKET_SIZE 672 /* hardware specific */
+#define MAX_MEMORY_BUFFERS DIV_ROUND_UP(MAX_QUEUE_LENGTH, \
+ PAGE_SIZE / MAX_PACKET_SIZE)
+
+static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
+static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
+static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+static unsigned int queue_length = 21;
+
+module_param_array(index, int, NULL, 0444);
+MODULE_PARM_DESC(index, "card index");
+module_param_array(id, charp, NULL, 0444);
+MODULE_PARM_DESC(id, "ID string");
+module_param_array(enable, bool, NULL, 0444);
+MODULE_PARM_DESC(enable, "enable card");
+module_param(queue_length, uint, 0644);
+MODULE_PARM_DESC(queue_length, "USB queue length in microframes, "
+ __stringify(MIN_QUEUE_LENGTH)"-"__stringify(MAX_QUEUE_LENGTH));
+
+enum {
+ INTF_PLAYBACK,
+ INTF_CAPTURE,
+ INTF_MIDI,
+
+ INTF_COUNT
+};
+
+/* bits in struct ua101::states */
+enum {
+ USB_CAPTURE_RUNNING,
+ USB_PLAYBACK_RUNNING,
+ ALSA_CAPTURE_OPEN,
+ ALSA_PLAYBACK_OPEN,
+ ALSA_CAPTURE_RUNNING,
+ ALSA_PLAYBACK_RUNNING,
+ CAPTURE_URB_COMPLETED,
+ PLAYBACK_URB_COMPLETED,
+ DISCONNECTED,
+};
+
+struct ua101 {
+ struct usb_device *dev;
+ struct snd_card *card;
+ struct usb_interface *intf[INTF_COUNT];
+ int card_index;
+ struct snd_pcm *pcm;
+ struct list_head midi_list;
+ u64 format_bit;
+ unsigned int rate;
+ unsigned int packets_per_second;
+ spinlock_t lock;
+ struct mutex mutex;
+ unsigned long states;
+
+ /* FIFO to synchronize playback rate to capture rate */
+ unsigned int rate_feedback_start;
+ unsigned int rate_feedback_count;
+ u8 rate_feedback[MAX_QUEUE_LENGTH];
+
+ struct list_head ready_playback_urbs;
+ struct tasklet_struct playback_tasklet;
+ wait_queue_head_t alsa_capture_wait;
+ wait_queue_head_t rate_feedback_wait;
+ wait_queue_head_t alsa_playback_wait;
+ struct ua101_stream {
+ struct snd_pcm_substream *substream;
+ unsigned int usb_pipe;
+ unsigned int channels;
+ unsigned int frame_bytes;
+ unsigned int max_packet_bytes;
+ unsigned int period_pos;
+ unsigned int buffer_pos;
+ unsigned int queue_length;
+ struct ua101_urb {
+ struct urb urb;
+ struct usb_iso_packet_descriptor iso_frame_desc[1];
+ struct list_head ready_list;
+ } *urbs[MAX_QUEUE_LENGTH];
+ struct {
+ unsigned int size;
+ void *addr;
+ dma_addr_t dma;
+ } buffers[MAX_MEMORY_BUFFERS];
+ } capture, playback;
+
+ unsigned int fps[10];
+ unsigned int frame_counter;
+};
+
+static DEFINE_MUTEX(devices_mutex);
+static unsigned int devices_used;
+static struct usb_driver ua101_driver;
+
+static void abort_alsa_playback(struct ua101 *ua);
+static void abort_alsa_capture(struct ua101 *ua);
+
+static const char *usb_error_string(int err)
+{
+ switch (err) {
+ case -ENODEV:
+ return "no device";
+ case -ENOENT:
+ return "endpoint not enabled";
+ case -EPIPE:
+ return "endpoint stalled";
+ case -ENOSPC:
+ return "not enough bandwidth";
+ case -ESHUTDOWN:
+ return "device disabled";
+ case -EHOSTUNREACH:
+ return "device suspended";
+ case -EINVAL:
+ case -EAGAIN:
+ case -EFBIG:
+ case -EMSGSIZE:
+ return "internal error";
+ default:
+ return "unknown error";
+ }
+}
+
+static void abort_usb_capture(struct ua101 *ua)
+{
+ if (test_and_clear_bit(USB_CAPTURE_RUNNING, &ua->states)) {
+ wake_up(&ua->alsa_capture_wait);
+ wake_up(&ua->rate_feedback_wait);
+ }
+}
+
+static void abort_usb_playback(struct ua101 *ua)
+{
+ if (test_and_clear_bit(USB_PLAYBACK_RUNNING, &ua->states))
+ wake_up(&ua->alsa_playback_wait);
+}
+
+static void playback_urb_complete(struct urb *usb_urb)
+{
+ struct ua101_urb *urb = (struct ua101_urb *)usb_urb;
+ struct ua101 *ua = urb->urb.context;
+ unsigned long flags;
+
+ if (unlikely(urb->urb.status == -ENOENT || /* unlinked */
+ urb->urb.status == -ENODEV || /* device removed */
+ urb->urb.status == -ECONNRESET || /* unlinked */
+ urb->urb.status == -ESHUTDOWN)) { /* device disabled */
+ abort_usb_playback(ua);
+ abort_alsa_playback(ua);
+ return;
+ }
+
+ if (test_bit(USB_PLAYBACK_RUNNING, &ua->states)) {
+ /* append URB to FIFO */
+ spin_lock_irqsave(&ua->lock, flags);
+ list_add_tail(&urb->ready_list, &ua->ready_playback_urbs);
+ if (ua->rate_feedback_count > 0)
+ tasklet_schedule(&ua->playback_tasklet);
+ ua->playback.substream->runtime->delay -=
+ urb->urb.iso_frame_desc[0].length /
+ ua->playback.frame_bytes;
+ spin_unlock_irqrestore(&ua->lock, flags);
+ }
+}
+
+static void first_playback_urb_complete(struct urb *urb)
+{
+ struct ua101 *ua = urb->context;
+
+ urb->complete = playback_urb_complete;
+ playback_urb_complete(urb);
+
+ set_bit(PLAYBACK_URB_COMPLETED, &ua->states);
+ wake_up(&ua->alsa_playback_wait);
+}
+
+/* copy data from the ALSA ring buffer into the URB buffer */
+static bool copy_playback_data(struct ua101_stream *stream, struct urb *urb,
+ unsigned int frames)
+{
+ struct snd_pcm_runtime *runtime;
+ unsigned int frame_bytes, frames1;
+ const u8 *source;
+
+ runtime = stream->substream->runtime;
+ frame_bytes = stream->frame_bytes;
+ source = runtime->dma_area + stream->buffer_pos * frame_bytes;
+ if (stream->buffer_pos + frames <= runtime->buffer_size) {
+ memcpy(urb->transfer_buffer, source, frames * frame_bytes);
+ } else {
+ /* wrap around at end of ring buffer */
+ frames1 = runtime->buffer_size - stream->buffer_pos;
+ memcpy(urb->transfer_buffer, source, frames1 * frame_bytes);
+ memcpy(urb->transfer_buffer + frames1 * frame_bytes,
+ runtime->dma_area, (frames - frames1) * frame_bytes);
+ }
+
+ stream->buffer_pos += frames;
+ if (stream->buffer_pos >= runtime->buffer_size)
+ stream->buffer_pos -= runtime->buffer_size;
+ stream->period_pos += frames;
+ if (stream->period_pos >= runtime->period_size) {
+ stream->period_pos -= runtime->period_size;
+ return true;
+ }
+ return false;
+}
+
+static inline void add_with_wraparound(struct ua101 *ua,
+ unsigned int *value, unsigned int add)
+{
+ *value += add;
+ if (*value >= ua->playback.queue_length)
+ *value -= ua->playback.queue_length;
+}
+
+static void playback_tasklet(unsigned long data)
+{
+ struct ua101 *ua = (void *)data;
+ unsigned long flags;
+ unsigned int frames;
+ struct ua101_urb *urb;
+ bool do_period_elapsed = false;
+ int err;
+
+ if (unlikely(!test_bit(USB_PLAYBACK_RUNNING, &ua->states)))
+ return;
+
+ /*
+ * Synchronizing the playback rate to the capture rate is done by using
+ * the same sequence of packet sizes for both streams.
+ * Submitting a playback URB therefore requires both a ready URB and
+ * the size of the corresponding capture packet, i.e., both playback
+ * and capture URBs must have been completed. Since the USB core does
+ * not guarantee that playback and capture complete callbacks are
+ * called alternately, we use two FIFOs for packet sizes and read URBs;
+ * submitting playback URBs is possible as long as both FIFOs are
+ * nonempty.
+ */
+ spin_lock_irqsave(&ua->lock, flags);
+ while (ua->rate_feedback_count > 0 &&
+ !list_empty(&ua->ready_playback_urbs)) {
+ /* take packet size out of FIFO */
+ frames = ua->rate_feedback[ua->rate_feedback_start];
+ add_with_wraparound(ua, &ua->rate_feedback_start, 1);
+ ua->rate_feedback_count--;
+
+ /* take URB out of FIFO */
+ urb = list_first_entry(&ua->ready_playback_urbs,
+ struct ua101_urb, ready_list);
+ list_del(&urb->ready_list);
+
+ /* fill packet with data or silence */
+ urb->urb.iso_frame_desc[0].length =
+ frames * ua->playback.frame_bytes;
+ if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
+ do_period_elapsed |= copy_playback_data(&ua->playback,
+ &urb->urb,
+ frames);
+ else
+ memset(urb->urb.transfer_buffer, 0,
+ urb->urb.iso_frame_desc[0].length);
+
+ /* and off you go ... */
+ err = usb_submit_urb(&urb->urb, GFP_ATOMIC);
+ if (unlikely(err < 0)) {
+ spin_unlock_irqrestore(&ua->lock, flags);
+ abort_usb_playback(ua);
+ abort_alsa_playback(ua);
+ dev_err(&ua->dev->dev, "USB request error %d: %s\n",
+ err, usb_error_string(err));
+ return;
+ }
+ ua->playback.substream->runtime->delay += frames;
+ }
+ spin_unlock_irqrestore(&ua->lock, flags);
+ if (do_period_elapsed)
+ snd_pcm_period_elapsed(ua->playback.substream);
+}
+
+/* copy data from the URB buffer into the ALSA ring buffer */
+static bool copy_capture_data(struct ua101_stream *stream, struct urb *urb,
+ unsigned int frames)
+{
+ struct snd_pcm_runtime *runtime;
+ unsigned int frame_bytes, frames1;
+ u8 *dest;
+
+ runtime = stream->substream->runtime;
+ frame_bytes = stream->frame_bytes;
+ dest = runtime->dma_area + stream->buffer_pos * frame_bytes;
+ if (stream->buffer_pos + frames <= runtime->buffer_size) {
+ memcpy(dest, urb->transfer_buffer, frames * frame_bytes);
+ } else {
+ /* wrap around at end of ring buffer */
+ frames1 = runtime->buffer_size - stream->buffer_pos;
+ memcpy(dest, urb->transfer_buffer, frames1 * frame_bytes);
+ memcpy(runtime->dma_area,
+ urb->transfer_buffer + frames1 * frame_bytes,
+ (frames - frames1) * frame_bytes);
+ }
+
+ stream->buffer_pos += frames;
+ if (stream->buffer_pos >= runtime->buffer_size)
+ stream->buffer_pos -= runtime->buffer_size;
+ stream->period_pos += frames;
+ if (stream->period_pos >= runtime->period_size) {
+ stream->period_pos -= runtime->period_size;
+ return true;
+ }
+ return false;
+}
+
+static void capture_urb_complete(struct urb *urb)
+{
+ struct ua101 *ua = urb->context;
+ struct ua101_stream *stream = &ua->capture;
+ unsigned long flags;
+ unsigned int frames, write_ptr;
+ bool do_period_elapsed;
+ int err;
+
+ if (unlikely(urb->status == -ENOENT || /* unlinked */
+ urb->status == -ENODEV || /* device removed */
+ urb->status == -ECONNRESET || /* unlinked */
+ urb->status == -ESHUTDOWN)) /* device disabled */
+ goto stream_stopped;
+
+ if (urb->status >= 0 && urb->iso_frame_desc[0].status >= 0)
+ frames = urb->iso_frame_desc[0].actual_length /
+ stream->frame_bytes;
+ else
+ frames = 0;
+
+ spin_lock_irqsave(&ua->lock, flags);
+
+ if (frames > 0 && test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
+ do_period_elapsed = copy_capture_data(stream, urb, frames);
+ else
+ do_period_elapsed = false;
+
+ if (test_bit(USB_CAPTURE_RUNNING, &ua->states)) {
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (unlikely(err < 0)) {
+ spin_unlock_irqrestore(&ua->lock, flags);
+ dev_err(&ua->dev->dev, "USB request error %d: %s\n",
+ err, usb_error_string(err));
+ goto stream_stopped;
+ }
+
+ /* append packet size to FIFO */
+ write_ptr = ua->rate_feedback_start;
+ add_with_wraparound(ua, &write_ptr, ua->rate_feedback_count);
+ ua->rate_feedback[write_ptr] = frames;
+ if (ua->rate_feedback_count < ua->playback.queue_length) {
+ ua->rate_feedback_count++;
+ if (ua->rate_feedback_count ==
+ ua->playback.queue_length)
+ wake_up(&ua->rate_feedback_wait);
+ } else {
+ /*
+ * Ring buffer overflow; this happens when the playback
+ * stream is not running. Throw away the oldest entry,
+ * so that the playback stream, when it starts, sees
+ * the most recent packet sizes.
+ */
+ add_with_wraparound(ua, &ua->rate_feedback_start, 1);
+ }
+ if (test_bit(USB_PLAYBACK_RUNNING, &ua->states) &&
+ !list_empty(&ua->ready_playback_urbs))
+ tasklet_schedule(&ua->playback_tasklet);
+ }
+
+ spin_unlock_irqrestore(&ua->lock, flags);
+
+ if (do_period_elapsed)
+ snd_pcm_period_elapsed(stream->substream);
+
+ /* for debugging: measure the sample rate relative to the USB clock */
+ ua->fps[ua->frame_counter++ / ua->packets_per_second] += frames;
+ if (ua->frame_counter >= ARRAY_SIZE(ua->fps) * ua->packets_per_second) {
+ printk(KERN_DEBUG "capture rate:");
+ for (frames = 0; frames < ARRAY_SIZE(ua->fps); ++frames)
+ printk(KERN_CONT " %u", ua->fps[frames]);
+ printk(KERN_CONT "\n");
+ memset(ua->fps, 0, sizeof(ua->fps));
+ ua->frame_counter = 0;
+ }
+ return;
+
+stream_stopped:
+ abort_usb_playback(ua);
+ abort_usb_capture(ua);
+ abort_alsa_playback(ua);
+ abort_alsa_capture(ua);
+}
+
+static void first_capture_urb_complete(struct urb *urb)
+{
+ struct ua101 *ua = urb->context;
+
+ urb->complete = capture_urb_complete;
+ capture_urb_complete(urb);
+
+ set_bit(CAPTURE_URB_COMPLETED, &ua->states);
+ wake_up(&ua->alsa_capture_wait);
+}
+
+static int submit_stream_urbs(struct ua101 *ua, struct ua101_stream *stream)
+{
+ unsigned int i;
+
+ for (i = 0; i < stream->queue_length; ++i) {
+ int err = usb_submit_urb(&stream->urbs[i]->urb, GFP_KERNEL);
+ if (err < 0) {
+ dev_err(&ua->dev->dev, "USB request error %d: %s\n",
+ err, usb_error_string(err));
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void kill_stream_urbs(struct ua101_stream *stream)
+{
+ unsigned int i;
+
+ for (i = 0; i < stream->queue_length; ++i)
+ usb_kill_urb(&stream->urbs[i]->urb);
+}
+
+static int enable_iso_interface(struct ua101 *ua, unsigned int intf_index)
+{
+ struct usb_host_interface *alts;
+
+ alts = ua->intf[intf_index]->cur_altsetting;
+ if (alts->desc.bAlternateSetting != 1) {
+ int err = usb_set_interface(ua->dev,
+ alts->desc.bInterfaceNumber, 1);
+ if (err < 0) {
+ dev_err(&ua->dev->dev,
+ "cannot initialize interface; error %d: %s\n",
+ err, usb_error_string(err));
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void disable_iso_interface(struct ua101 *ua, unsigned int intf_index)
+{
+ struct usb_host_interface *alts;
+
+ alts = ua->intf[intf_index]->cur_altsetting;
+ if (alts->desc.bAlternateSetting != 0) {
+ int err = usb_set_interface(ua->dev,
+ alts->desc.bInterfaceNumber, 0);
+ if (err < 0 && !test_bit(DISCONNECTED, &ua->states))
+ dev_warn(&ua->dev->dev,
+ "interface reset failed; error %d: %s\n",
+ err, usb_error_string(err));
+ }
+}
+
+static void stop_usb_capture(struct ua101 *ua)
+{
+ clear_bit(USB_CAPTURE_RUNNING, &ua->states);
+
+ kill_stream_urbs(&ua->capture);
+
+ disable_iso_interface(ua, INTF_CAPTURE);
+}
+
+static int start_usb_capture(struct ua101 *ua)
+{
+ int err;
+
+ if (test_bit(DISCONNECTED, &ua->states))
+ return -ENODEV;
+
+ if (test_bit(USB_CAPTURE_RUNNING, &ua->states))
+ return 0;
+
+ kill_stream_urbs(&ua->capture);
+
+ err = enable_iso_interface(ua, INTF_CAPTURE);
+ if (err < 0)
+ return err;
+
+ clear_bit(CAPTURE_URB_COMPLETED, &ua->states);
+ ua->capture.urbs[0]->urb.complete = first_capture_urb_complete;
+ ua->rate_feedback_start = 0;
+ ua->rate_feedback_count = 0;
+
+ set_bit(USB_CAPTURE_RUNNING, &ua->states);
+ err = submit_stream_urbs(ua, &ua->capture);
+ if (err < 0)
+ stop_usb_capture(ua);
+ return err;
+}
+
+static void stop_usb_playback(struct ua101 *ua)
+{
+ clear_bit(USB_PLAYBACK_RUNNING, &ua->states);
+
+ kill_stream_urbs(&ua->playback);
+
+ tasklet_kill(&ua->playback_tasklet);
+
+ disable_iso_interface(ua, INTF_PLAYBACK);
+}
+
+static int start_usb_playback(struct ua101 *ua)
+{
+ unsigned int i, frames;
+ struct urb *urb;
+ int err = 0;
+
+ if (test_bit(DISCONNECTED, &ua->states))
+ return -ENODEV;
+
+ if (test_bit(USB_PLAYBACK_RUNNING, &ua->states))
+ return 0;
+
+ kill_stream_urbs(&ua->playback);
+ tasklet_kill(&ua->playback_tasklet);
+
+ err = enable_iso_interface(ua, INTF_PLAYBACK);
+ if (err < 0)
+ return err;
+
+ clear_bit(PLAYBACK_URB_COMPLETED, &ua->states);
+ ua->playback.urbs[0]->urb.complete =
+ first_playback_urb_complete;
+ spin_lock_irq(&ua->lock);
+ INIT_LIST_HEAD(&ua->ready_playback_urbs);
+ spin_unlock_irq(&ua->lock);
+
+ /*
+ * We submit the initial URBs all at once, so we have to wait for the
+ * packet size FIFO to be full.
+ */
+ wait_event(ua->rate_feedback_wait,
+ ua->rate_feedback_count >= ua->playback.queue_length ||
+ !test_bit(USB_CAPTURE_RUNNING, &ua->states) ||
+ test_bit(DISCONNECTED, &ua->states));
+ if (test_bit(DISCONNECTED, &ua->states)) {
+ stop_usb_playback(ua);
+ return -ENODEV;
+ }
+ if (!test_bit(USB_CAPTURE_RUNNING, &ua->states)) {
+ stop_usb_playback(ua);
+ return -EIO;
+ }
+
+ for (i = 0; i < ua->playback.queue_length; ++i) {
+ /* all initial URBs contain silence */
+ spin_lock_irq(&ua->lock);
+ frames = ua->rate_feedback[ua->rate_feedback_start];
+ add_with_wraparound(ua, &ua->rate_feedback_start, 1);
+ ua->rate_feedback_count--;
+ spin_unlock_irq(&ua->lock);
+ urb = &ua->playback.urbs[i]->urb;
+ urb->iso_frame_desc[0].length =
+ frames * ua->playback.frame_bytes;
+ memset(urb->transfer_buffer, 0,
+ urb->iso_frame_desc[0].length);
+ }
+
+ set_bit(USB_PLAYBACK_RUNNING, &ua->states);
+ err = submit_stream_urbs(ua, &ua->playback);
+ if (err < 0)
+ stop_usb_playback(ua);
+ return err;
+}
+
+static void abort_alsa_capture(struct ua101 *ua)
+{
+ if (test_bit(ALSA_CAPTURE_RUNNING, &ua->states))
+ snd_pcm_stop(ua->capture.substream, SNDRV_PCM_STATE_XRUN);
+}
+
+static void abort_alsa_playback(struct ua101 *ua)
+{
+ if (test_bit(ALSA_PLAYBACK_RUNNING, &ua->states))
+ snd_pcm_stop(ua->playback.substream, SNDRV_PCM_STATE_XRUN);
+}
+
+static int set_stream_hw(struct ua101 *ua, struct snd_pcm_substream *substream,
+ unsigned int channels)
+{
+ int err;
+
+ substream->runtime->hw.info =
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_BATCH |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_FIFO_IN_FRAMES;
+ substream->runtime->hw.formats = ua->format_bit;
+ substream->runtime->hw.rates = snd_pcm_rate_to_rate_bit(ua->rate);
+ substream->runtime->hw.rate_min = ua->rate;
+ substream->runtime->hw.rate_max = ua->rate;
+ substream->runtime->hw.channels_min = channels;
+ substream->runtime->hw.channels_max = channels;
+ substream->runtime->hw.buffer_bytes_max = 45000 * 1024;
+ substream->runtime->hw.period_bytes_min = 1;
+ substream->runtime->hw.period_bytes_max = UINT_MAX;
+ substream->runtime->hw.periods_min = 2;
+ substream->runtime->hw.periods_max = UINT_MAX;
+ err = snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_TIME,
+ 1500000 / ua->packets_per_second,
+ 8192000);
+ if (err < 0)
+ return err;
+ err = snd_pcm_hw_constraint_msbits(substream->runtime, 0, 32, 24);
+ return err;
+}
+
+static int capture_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct ua101 *ua = substream->private_data;
+ int err;
+
+ ua->capture.substream = substream;
+ err = set_stream_hw(ua, substream, ua->capture.channels);
+ if (err < 0)
+ return err;
+ substream->runtime->hw.fifo_size =
+ DIV_ROUND_CLOSEST(ua->rate, ua->packets_per_second);
+ substream->runtime->delay = substream->runtime->hw.fifo_size;
+
+ mutex_lock(&ua->mutex);
+ err = start_usb_capture(ua);
+ if (err >= 0)
+ set_bit(ALSA_CAPTURE_OPEN, &ua->states);
+ mutex_unlock(&ua->mutex);
+ return err;
+}
+
+static int playback_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct ua101 *ua = substream->private_data;
+ int err;
+
+ ua->playback.substream = substream;
+ err = set_stream_hw(ua, substream, ua->playback.channels);
+ if (err < 0)
+ return err;
+ substream->runtime->hw.fifo_size =
+ DIV_ROUND_CLOSEST(ua->rate * ua->playback.queue_length,
+ ua->packets_per_second);
+
+ mutex_lock(&ua->mutex);
+ err = start_usb_capture(ua);
+ if (err < 0)
+ goto error;
+ err = start_usb_playback(ua);
+ if (err < 0) {
+ if (!test_bit(ALSA_CAPTURE_OPEN, &ua->states))
+ stop_usb_capture(ua);
+ goto error;
+ }
+ set_bit(ALSA_PLAYBACK_OPEN, &ua->states);
+error:
+ mutex_unlock(&ua->mutex);
+ return err;
+}
+
+static int capture_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct ua101 *ua = substream->private_data;
+
+ mutex_lock(&ua->mutex);
+ clear_bit(ALSA_CAPTURE_OPEN, &ua->states);
+ if (!test_bit(ALSA_PLAYBACK_OPEN, &ua->states))
+ stop_usb_capture(ua);
+ mutex_unlock(&ua->mutex);
+ return 0;
+}
+
+static int playback_pcm_close(struct snd_pcm_substream *substream)
+{
+ struct ua101 *ua = substream->private_data;
+
+ mutex_lock(&ua->mutex);
+ stop_usb_playback(ua);
+ clear_bit(ALSA_PLAYBACK_OPEN, &ua->states);
+ if (!test_bit(ALSA_CAPTURE_OPEN, &ua->states))
+ stop_usb_capture(ua);
+ mutex_unlock(&ua->mutex);
+ return 0;
+}
+
+static int capture_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct ua101 *ua = substream->private_data;
+ int err;
+
+ mutex_lock(&ua->mutex);
+ err = start_usb_capture(ua);
+ mutex_unlock(&ua->mutex);
+ if (err < 0)
+ return err;
+
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+}
+
+static int playback_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ struct ua101 *ua = substream->private_data;
+ int err;
+
+ mutex_lock(&ua->mutex);
+ err = start_usb_capture(ua);
+ if (err >= 0)
+ err = start_usb_playback(ua);
+ mutex_unlock(&ua->mutex);
+ if (err < 0)
+ return err;
+
+ return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
+}
+
+static int ua101_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int capture_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct ua101 *ua = substream->private_data;
+ int err;
+
+ mutex_lock(&ua->mutex);
+ err = start_usb_capture(ua);
+ mutex_unlock(&ua->mutex);
+ if (err < 0)
+ return err;
+
+ /*
+ * The EHCI driver schedules the first packet of an iso stream at 10 ms
+ * in the future, i.e., no data is actually captured for that long.
+ * Take the wait here so that the stream is known to be actually
+ * running when the start trigger has been called.
+ */
+ wait_event(ua->alsa_capture_wait,
+ test_bit(CAPTURE_URB_COMPLETED, &ua->states) ||
+ !test_bit(USB_CAPTURE_RUNNING, &ua->states));
+ if (test_bit(DISCONNECTED, &ua->states))
+ return -ENODEV;
+ if (!test_bit(USB_CAPTURE_RUNNING, &ua->states))
+ return -EIO;
+
+ ua->capture.period_pos = 0;
+ ua->capture.buffer_pos = 0;
+ return 0;
+}
+
+static int playback_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ struct ua101 *ua = substream->private_data;
+ int err;
+
+ mutex_lock(&ua->mutex);
+ err = start_usb_capture(ua);
+ if (err >= 0)
+ err = start_usb_playback(ua);
+ mutex_unlock(&ua->mutex);
+ if (err < 0)
+ return err;
+
+ /* see the comment in capture_pcm_prepare() */
+ wait_event(ua->alsa_playback_wait,
+ test_bit(PLAYBACK_URB_COMPLETED, &ua->states) ||
+ !test_bit(USB_PLAYBACK_RUNNING, &ua->states));
+ if (test_bit(DISCONNECTED, &ua->states))
+ return -ENODEV;
+ if (!test_bit(USB_PLAYBACK_RUNNING, &ua->states))
+ return -EIO;
+
+ substream->runtime->delay = 0;
+ ua->playback.period_pos = 0;
+ ua->playback.buffer_pos = 0;
+ return 0;
+}
+
+static int capture_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct ua101 *ua = substream->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ if (!test_bit(USB_CAPTURE_RUNNING, &ua->states))
+ return -EIO;
+ set_bit(ALSA_CAPTURE_RUNNING, &ua->states);
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ clear_bit(ALSA_CAPTURE_RUNNING, &ua->states);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int playback_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ struct ua101 *ua = substream->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ if (!test_bit(USB_PLAYBACK_RUNNING, &ua->states))
+ return -EIO;
+ set_bit(ALSA_PLAYBACK_RUNNING, &ua->states);
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ clear_bit(ALSA_PLAYBACK_RUNNING, &ua->states);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline snd_pcm_uframes_t ua101_pcm_pointer(struct ua101 *ua,
+ struct ua101_stream *stream)
+{
+ unsigned long flags;
+ unsigned int pos;
+
+ spin_lock_irqsave(&ua->lock, flags);
+ pos = stream->buffer_pos;
+ spin_unlock_irqrestore(&ua->lock, flags);
+ return pos;
+}
+
+static snd_pcm_uframes_t capture_pcm_pointer(struct snd_pcm_substream *subs)
+{
+ struct ua101 *ua = subs->private_data;
+
+ return ua101_pcm_pointer(ua, &ua->capture);
+}
+
+static snd_pcm_uframes_t playback_pcm_pointer(struct snd_pcm_substream *subs)
+{
+ struct ua101 *ua = subs->private_data;
+
+ return ua101_pcm_pointer(ua, &ua->playback);
+}
+
+static struct snd_pcm_ops capture_pcm_ops = {
+ .open = capture_pcm_open,
+ .close = capture_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = capture_pcm_hw_params,
+ .hw_free = ua101_pcm_hw_free,
+ .prepare = capture_pcm_prepare,
+ .trigger = capture_pcm_trigger,
+ .pointer = capture_pcm_pointer,
+ .page = snd_pcm_lib_get_vmalloc_page,
+};
+
+static struct snd_pcm_ops playback_pcm_ops = {
+ .open = playback_pcm_open,
+ .close = playback_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = playback_pcm_hw_params,
+ .hw_free = ua101_pcm_hw_free,
+ .prepare = playback_pcm_prepare,
+ .trigger = playback_pcm_trigger,
+ .pointer = playback_pcm_pointer,
+ .page = snd_pcm_lib_get_vmalloc_page,
+};
+
+static const struct uac_format_type_i_discrete_descriptor *
+find_format_descriptor(struct usb_interface *interface)
+{
+ struct usb_host_interface *alt;
+ u8 *extra;
+ int extralen;
+
+ if (interface->num_altsetting != 2) {
+ dev_err(&interface->dev, "invalid num_altsetting\n");
+ return NULL;
+ }
+
+ alt = &interface->altsetting[0];
+ if (alt->desc.bNumEndpoints != 0) {
+ dev_err(&interface->dev, "invalid bNumEndpoints\n");
+ return NULL;
+ }
+
+ alt = &interface->altsetting[1];
+ if (alt->desc.bNumEndpoints != 1) {
+ dev_err(&interface->dev, "invalid bNumEndpoints\n");
+ return NULL;
+ }
+
+ extra = alt->extra;
+ extralen = alt->extralen;
+ while (extralen >= sizeof(struct usb_descriptor_header)) {
+ struct uac_format_type_i_discrete_descriptor *desc;
+
+ desc = (struct uac_format_type_i_discrete_descriptor *)extra;
+ if (desc->bLength > extralen) {
+ dev_err(&interface->dev, "descriptor overflow\n");
+ return NULL;
+ }
+ if (desc->bLength == UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1) &&
+ desc->bDescriptorType == USB_DT_CS_INTERFACE &&
+ desc->bDescriptorSubtype == UAC_FORMAT_TYPE) {
+ if (desc->bFormatType != UAC_FORMAT_TYPE_I_PCM ||
+ desc->bSamFreqType != 1) {
+ dev_err(&interface->dev,
+ "invalid format type\n");
+ return NULL;
+ }
+ return desc;
+ }
+ extralen -= desc->bLength;
+ extra += desc->bLength;
+ }
+ dev_err(&interface->dev, "sample format descriptor not found\n");
+ return NULL;
+}
+
+static int detect_usb_format(struct ua101 *ua)
+{
+ const struct uac_format_type_i_discrete_descriptor *fmt_capture;
+ const struct uac_format_type_i_discrete_descriptor *fmt_playback;
+ const struct usb_endpoint_descriptor *epd;
+ unsigned int rate2;
+
+ fmt_capture = find_format_descriptor(ua->intf[INTF_CAPTURE]);
+ fmt_playback = find_format_descriptor(ua->intf[INTF_PLAYBACK]);
+ if (!fmt_capture || !fmt_playback)
+ return -ENXIO;
+
+ switch (fmt_capture->bSubframeSize) {
+ case 3:
+ ua->format_bit = SNDRV_PCM_FMTBIT_S24_3LE;
+ break;
+ case 4:
+ ua->format_bit = SNDRV_PCM_FMTBIT_S32_LE;
+ break;
+ default:
+ dev_err(&ua->dev->dev, "sample width is not 24 or 32 bits\n");
+ return -ENXIO;
+ }
+ if (fmt_capture->bSubframeSize != fmt_playback->bSubframeSize) {
+ dev_err(&ua->dev->dev,
+ "playback/capture sample widths do not match\n");
+ return -ENXIO;
+ }
+
+ if (fmt_capture->bBitResolution != 24 ||
+ fmt_playback->bBitResolution != 24) {
+ dev_err(&ua->dev->dev, "sample width is not 24 bits\n");
+ return -ENXIO;
+ }
+
+ ua->rate = combine_triple(fmt_capture->tSamFreq[0]);
+ rate2 = combine_triple(fmt_playback->tSamFreq[0]);
+ if (ua->rate != rate2) {
+ dev_err(&ua->dev->dev,
+ "playback/capture rates do not match: %u/%u\n",
+ rate2, ua->rate);
+ return -ENXIO;
+ }
+
+ switch (ua->dev->speed) {
+ case USB_SPEED_FULL:
+ ua->packets_per_second = 1000;
+ break;
+ case USB_SPEED_HIGH:
+ ua->packets_per_second = 8000;
+ break;
+ default:
+ dev_err(&ua->dev->dev, "unknown device speed\n");
+ return -ENXIO;
+ }
+
+ ua->capture.channels = fmt_capture->bNrChannels;
+ ua->playback.channels = fmt_playback->bNrChannels;
+ ua->capture.frame_bytes =
+ fmt_capture->bSubframeSize * ua->capture.channels;
+ ua->playback.frame_bytes =
+ fmt_playback->bSubframeSize * ua->playback.channels;
+
+ epd = &ua->intf[INTF_CAPTURE]->altsetting[1].endpoint[0].desc;
+ if (!usb_endpoint_is_isoc_in(epd)) {
+ dev_err(&ua->dev->dev, "invalid capture endpoint\n");
+ return -ENXIO;
+ }
+ ua->capture.usb_pipe = usb_rcvisocpipe(ua->dev, usb_endpoint_num(epd));
+ ua->capture.max_packet_bytes = le16_to_cpu(epd->wMaxPacketSize);
+
+ epd = &ua->intf[INTF_PLAYBACK]->altsetting[1].endpoint[0].desc;
+ if (!usb_endpoint_is_isoc_out(epd)) {
+ dev_err(&ua->dev->dev, "invalid playback endpoint\n");
+ return -ENXIO;
+ }
+ ua->playback.usb_pipe = usb_sndisocpipe(ua->dev, usb_endpoint_num(epd));
+ ua->playback.max_packet_bytes = le16_to_cpu(epd->wMaxPacketSize);
+ return 0;
+}
+
+static int alloc_stream_buffers(struct ua101 *ua, struct ua101_stream *stream)
+{
+ unsigned int remaining_packets, packets, packets_per_page, i;
+ size_t size;
+
+ stream->queue_length = queue_length;
+ stream->queue_length = max(stream->queue_length,
+ (unsigned int)MIN_QUEUE_LENGTH);
+ stream->queue_length = min(stream->queue_length,
+ (unsigned int)MAX_QUEUE_LENGTH);
+
+ /*
+ * The cache pool sizes used by usb_buffer_alloc() (128, 512, 2048) are
+ * quite bad when used with the packet sizes of this device (e.g. 280,
+ * 520, 624). Therefore, we allocate and subdivide entire pages, using
+ * a smaller buffer only for the last chunk.
+ */
+ remaining_packets = stream->queue_length;
+ packets_per_page = PAGE_SIZE / stream->max_packet_bytes;
+ for (i = 0; i < ARRAY_SIZE(stream->buffers); ++i) {
+ packets = min(remaining_packets, packets_per_page);
+ size = packets * stream->max_packet_bytes;
+ stream->buffers[i].addr =
+ usb_buffer_alloc(ua->dev, size, GFP_KERNEL,
+ &stream->buffers[i].dma);
+ if (!stream->buffers[i].addr)
+ return -ENOMEM;
+ stream->buffers[i].size = size;
+ remaining_packets -= packets;
+ if (!remaining_packets)
+ break;
+ }
+ if (remaining_packets) {
+ dev_err(&ua->dev->dev, "too many packets\n");
+ return -ENXIO;
+ }
+ return 0;
+}
+
+static void free_stream_buffers(struct ua101 *ua, struct ua101_stream *stream)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(stream->buffers); ++i)
+ usb_buffer_free(ua->dev,
+ stream->buffers[i].size,
+ stream->buffers[i].addr,
+ stream->buffers[i].dma);
+}
+
+static int alloc_stream_urbs(struct ua101 *ua, struct ua101_stream *stream,
+ void (*urb_complete)(struct urb *))
+{
+ unsigned max_packet_size = stream->max_packet_bytes;
+ struct ua101_urb *urb;
+ unsigned int b, u = 0;
+
+ for (b = 0; b < ARRAY_SIZE(stream->buffers); ++b) {
+ unsigned int size = stream->buffers[b].size;
+ u8 *addr = stream->buffers[b].addr;
+ dma_addr_t dma = stream->buffers[b].dma;
+
+ while (size >= max_packet_size) {
+ if (u >= stream->queue_length)
+ goto bufsize_error;
+ urb = kmalloc(sizeof(*urb), GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+ usb_init_urb(&urb->urb);
+ urb->urb.dev = ua->dev;
+ urb->urb.pipe = stream->usb_pipe;
+ urb->urb.transfer_flags = URB_ISO_ASAP |
+ URB_NO_TRANSFER_DMA_MAP;
+ urb->urb.transfer_buffer = addr;
+ urb->urb.transfer_dma = dma;
+ urb->urb.transfer_buffer_length = max_packet_size;
+ urb->urb.number_of_packets = 1;
+ urb->urb.interval = 1;
+ urb->urb.context = ua;
+ urb->urb.complete = urb_complete;
+ urb->urb.iso_frame_desc[0].offset = 0;
+ urb->urb.iso_frame_desc[0].length = max_packet_size;
+ stream->urbs[u++] = urb;
+ size -= max_packet_size;
+ addr += max_packet_size;
+ dma += max_packet_size;
+ }
+ }
+ if (u == stream->queue_length)
+ return 0;
+bufsize_error:
+ dev_err(&ua->dev->dev, "internal buffer size error\n");
+ return -ENXIO;
+}
+
+static void free_stream_urbs(struct ua101_stream *stream)
+{
+ unsigned int i;
+
+ for (i = 0; i < stream->queue_length; ++i)
+ kfree(stream->urbs[i]);
+}
+
+static void free_usb_related_resources(struct ua101 *ua,
+ struct usb_interface *interface)
+{
+ unsigned int i;
+
+ free_stream_urbs(&ua->capture);
+ free_stream_urbs(&ua->playback);
+ free_stream_buffers(ua, &ua->capture);
+ free_stream_buffers(ua, &ua->playback);
+
+ for (i = 0; i < ARRAY_SIZE(ua->intf); ++i)
+ if (ua->intf[i]) {
+ usb_set_intfdata(ua->intf[i], NULL);
+ if (ua->intf[i] != interface)
+ usb_driver_release_interface(&ua101_driver,
+ ua->intf[i]);
+ }
+}
+
+static void ua101_card_free(struct snd_card *card)
+{
+ struct ua101 *ua = card->private_data;
+
+ mutex_destroy(&ua->mutex);
+}
+
+static int ua101_probe(struct usb_interface *interface,
+ const struct usb_device_id *usb_id)
+{
+ static const struct snd_usb_midi_endpoint_info midi_ep = {
+ .out_cables = 0x0001,
+ .in_cables = 0x0001
+ };
+ static const struct snd_usb_audio_quirk midi_quirk = {
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = &midi_ep
+ };
+ struct snd_card *card;
+ struct ua101 *ua;
+ unsigned int card_index, i;
+ char usb_path[32];
+ int err;
+
+ if (interface->altsetting->desc.bInterfaceNumber != 0)
+ return -ENODEV;
+
+ mutex_lock(&devices_mutex);
+
+ for (card_index = 0; card_index < SNDRV_CARDS; ++card_index)
+ if (enable[card_index] && !(devices_used & (1 << card_index)))
+ break;
+ if (card_index >= SNDRV_CARDS) {
+ mutex_unlock(&devices_mutex);
+ return -ENOENT;
+ }
+ err = snd_card_create(index[card_index], id[card_index], THIS_MODULE,
+ sizeof(*ua), &card);
+ if (err < 0) {
+ mutex_unlock(&devices_mutex);
+ return err;
+ }
+ card->private_free = ua101_card_free;
+ ua = card->private_data;
+ ua->dev = interface_to_usbdev(interface);
+ ua->card = card;
+ ua->card_index = card_index;
+ INIT_LIST_HEAD(&ua->midi_list);
+ spin_lock_init(&ua->lock);
+ mutex_init(&ua->mutex);
+ INIT_LIST_HEAD(&ua->ready_playback_urbs);
+ tasklet_init(&ua->playback_tasklet,
+ playback_tasklet, (unsigned long)ua);
+ init_waitqueue_head(&ua->alsa_capture_wait);
+ init_waitqueue_head(&ua->rate_feedback_wait);
+ init_waitqueue_head(&ua->alsa_playback_wait);
+
+#ifdef UA1A_HACK
+ if (ua->dev->descriptor.idProduct == cpu_to_le16(0x0018)) {
+ ua->intf[2] = interface;
+ ua->intf[0] = usb_ifnum_to_if(ua->dev, 1);
+ ua->intf[1] = usb_ifnum_to_if(ua->dev, 2);
+ usb_driver_claim_interface(&ua101_driver, ua->intf[0], ua);
+ usb_driver_claim_interface(&ua101_driver, ua->intf[1], ua);
+ } else {
+#endif
+ ua->intf[0] = interface;
+ for (i = 1; i < ARRAY_SIZE(ua->intf); ++i) {
+ ua->intf[i] = usb_ifnum_to_if(ua->dev, i);
+ if (!ua->intf[i]) {
+ dev_err(&ua->dev->dev, "interface %u not found\n", i);
+ err = -ENXIO;
+ goto probe_error;
+ }
+ err = usb_driver_claim_interface(&ua101_driver,
+ ua->intf[i], ua);
+ if (err < 0) {
+ ua->intf[i] = NULL;
+ err = -EBUSY;
+ goto probe_error;
+ }
+ }
+#ifdef UA1A_HACK
+ }
+#endif
+
+ snd_card_set_dev(card, &interface->dev);
+
+#ifdef UA1A_HACK
+ if (ua->dev->descriptor.idProduct == cpu_to_le16(0x0018)) {
+ ua->format_bit = SNDRV_PCM_FMTBIT_S16_LE;
+ ua->rate = 44100;
+ ua->packets_per_second = 1000;
+ ua->capture.channels = 2;
+ ua->playback.channels = 2;
+ ua->capture.frame_bytes = 4;
+ ua->playback.frame_bytes = 4;
+ ua->capture.usb_pipe = usb_rcvisocpipe(ua->dev, 2);
+ ua->playback.usb_pipe = usb_sndisocpipe(ua->dev, 1);
+ ua->capture.max_packet_bytes = 192;
+ ua->playback.max_packet_bytes = 192;
+ } else {
+#endif
+ err = detect_usb_format(ua);
+ if (err < 0)
+ goto probe_error;
+#ifdef UA1A_HACK
+ }
+#endif
+
+ strcpy(card->driver, "UA-101");
+ strcpy(card->shortname, "UA-101");
+ usb_make_path(ua->dev, usb_path, sizeof(usb_path));
+ snprintf(ua->card->longname, sizeof(ua->card->longname),
+ "EDIROL UA-101 (serial %s), %u Hz at %s, %s speed",
+ ua->dev->serial ? ua->dev->serial : "?", ua->rate, usb_path,
+ ua->dev->speed == USB_SPEED_HIGH ? "high" : "full");
+
+ err = alloc_stream_buffers(ua, &ua->capture);
+ if (err < 0)
+ goto probe_error;
+ err = alloc_stream_buffers(ua, &ua->playback);
+ if (err < 0)
+ goto probe_error;
+
+ err = alloc_stream_urbs(ua, &ua->capture, capture_urb_complete);
+ if (err < 0)
+ goto probe_error;
+ err = alloc_stream_urbs(ua, &ua->playback, playback_urb_complete);
+ if (err < 0)
+ goto probe_error;
+
+ err = snd_pcm_new(card, "UA-101", 0, 1, 1, &ua->pcm);
+ if (err < 0)
+ goto probe_error;
+ ua->pcm->private_data = ua;
+ strcpy(ua->pcm->name, "UA-101");
+ snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_PLAYBACK, &playback_pcm_ops);
+ snd_pcm_set_ops(ua->pcm, SNDRV_PCM_STREAM_CAPTURE, &capture_pcm_ops);
+
+#ifdef UA1A_HACK
+ if (ua->dev->descriptor.idProduct != cpu_to_le16(0x0018)) {
+#endif
+ err = snd_usbmidi_create(card, ua->intf[INTF_MIDI],
+ &ua->midi_list, &midi_quirk);
+ if (err < 0)
+ goto probe_error;
+#ifdef UA1A_HACK
+ }
+#endif
+
+ err = snd_card_register(card);
+ if (err < 0)
+ goto probe_error;
+
+ usb_set_intfdata(interface, ua);
+ devices_used |= 1 << card_index;
+
+ mutex_unlock(&devices_mutex);
+ return 0;
+
+probe_error:
+ free_usb_related_resources(ua, interface);
+ snd_card_free(card);
+ mutex_unlock(&devices_mutex);
+ return err;
+}
+
+static void ua101_disconnect(struct usb_interface *interface)
+{
+ struct ua101 *ua = usb_get_intfdata(interface);
+ struct list_head *midi;
+
+ if (!ua)
+ return;
+
+ mutex_lock(&devices_mutex);
+
+ set_bit(DISCONNECTED, &ua->states);
+ wake_up(&ua->rate_feedback_wait);
+
+ /* make sure that userspace cannot create new requests */
+ snd_card_disconnect(ua->card);
+
+ /* make sure that there are no pending USB requests */
+ __list_for_each(midi, &ua->midi_list)
+ snd_usbmidi_disconnect(midi);
+ abort_alsa_playback(ua);
+ abort_alsa_capture(ua);
+ mutex_lock(&ua->mutex);
+ stop_usb_playback(ua);
+ stop_usb_capture(ua);
+ mutex_unlock(&ua->mutex);
+
+ free_usb_related_resources(ua, interface);
+
+ devices_used &= ~(1 << ua->card_index);
+
+ snd_card_free_when_closed(ua->card);
+
+ mutex_unlock(&devices_mutex);
+}
+
+static struct usb_device_id ua101_ids[] = {
+#ifdef UA1A_HACK
+ { USB_DEVICE(0x0582, 0x0018) },
+#endif
+ { USB_DEVICE(0x0582, 0x007d) },
+ { USB_DEVICE(0x0582, 0x008d) },
+ { }
+};
+MODULE_DEVICE_TABLE(usb, ua101_ids);
+
+static struct usb_driver ua101_driver = {
+ .name = "snd-ua101",
+ .id_table = ua101_ids,
+ .probe = ua101_probe,
+ .disconnect = ua101_disconnect,
+#if 0
+ .suspend = ua101_suspend,
+ .resume = ua101_resume,
+#endif
+};
+
+static int __init alsa_card_ua101_init(void)
+{
+ return usb_register(&ua101_driver);
+}
+
+static void __exit alsa_card_ua101_exit(void)
+{
+ usb_deregister(&ua101_driver);
+ mutex_destroy(&devices_mutex);
+}
+
+module_init(alsa_card_ua101_init);
+module_exit(alsa_card_ua101_exit);
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
index 4963defee18a..d870ef76c02a 100644
--- a/sound/usb/usbaudio.c
+++ b/sound/usb/usbaudio.c
@@ -44,7 +44,6 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/usb.h>
-#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <sound/core.h>
@@ -170,11 +169,12 @@ struct snd_usb_substream {
unsigned int curpacksize; /* current packet size in bytes (for capture) */
unsigned int curframesize; /* current packet size in frames (for capture) */
unsigned int fill_max: 1; /* fill max packet size always */
+ unsigned int txfr_quirk:1; /* allow sub-frame alignment */
unsigned int fmt_type; /* USB audio format type (1-3) */
unsigned int running: 1; /* running status */
- unsigned int hwptr_done; /* processed frame position in the buffer */
+ unsigned int hwptr_done; /* processed byte position in the buffer */
unsigned int transfer_done; /* processed frames since last period update */
unsigned long active_mask; /* bitmask of active urbs */
unsigned long unlink_mask; /* bitmask of unlinked urbs */
@@ -343,7 +343,7 @@ static int retire_capture_urb(struct snd_usb_substream *subs,
unsigned long flags;
unsigned char *cp;
int i;
- unsigned int stride, len, oldptr;
+ unsigned int stride, frames, bytes, oldptr;
int period_elapsed = 0;
stride = runtime->frame_bits >> 3;
@@ -354,29 +354,39 @@ static int retire_capture_urb(struct snd_usb_substream *subs,
snd_printd(KERN_ERR "frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
// continue;
}
- len = urb->iso_frame_desc[i].actual_length / stride;
- if (! len)
- continue;
+ bytes = urb->iso_frame_desc[i].actual_length;
+ frames = bytes / stride;
+ if (!subs->txfr_quirk)
+ bytes = frames * stride;
+ if (bytes % (runtime->sample_bits >> 3) != 0) {
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+ int oldbytes = bytes;
+#endif
+ bytes = frames * stride;
+ snd_printdd(KERN_ERR "Corrected urb data len. %d->%d\n",
+ oldbytes, bytes);
+ }
/* update the current pointer */
spin_lock_irqsave(&subs->lock, flags);
oldptr = subs->hwptr_done;
- subs->hwptr_done += len;
- if (subs->hwptr_done >= runtime->buffer_size)
- subs->hwptr_done -= runtime->buffer_size;
- subs->transfer_done += len;
+ subs->hwptr_done += bytes;
+ if (subs->hwptr_done >= runtime->buffer_size * stride)
+ subs->hwptr_done -= runtime->buffer_size * stride;
+ frames = (bytes + (oldptr % stride)) / stride;
+ subs->transfer_done += frames;
if (subs->transfer_done >= runtime->period_size) {
subs->transfer_done -= runtime->period_size;
period_elapsed = 1;
}
spin_unlock_irqrestore(&subs->lock, flags);
/* copy a data chunk */
- if (oldptr + len > runtime->buffer_size) {
- unsigned int cnt = runtime->buffer_size - oldptr;
- unsigned int blen = cnt * stride;
- memcpy(runtime->dma_area + oldptr * stride, cp, blen);
- memcpy(runtime->dma_area, cp + blen, len * stride - blen);
+ if (oldptr + bytes > runtime->buffer_size * stride) {
+ unsigned int bytes1 =
+ runtime->buffer_size * stride - oldptr;
+ memcpy(runtime->dma_area + oldptr, cp, bytes1);
+ memcpy(runtime->dma_area, cp + bytes1, bytes - bytes1);
} else {
- memcpy(runtime->dma_area + oldptr * stride, cp, len * stride);
+ memcpy(runtime->dma_area + oldptr, cp, bytes);
}
}
if (period_elapsed)
@@ -563,24 +573,24 @@ static int prepare_playback_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime,
struct urb *urb)
{
- int i, stride, offs;
- unsigned int counts;
+ int i, stride;
+ unsigned int counts, frames, bytes;
unsigned long flags;
int period_elapsed = 0;
struct snd_urb_ctx *ctx = urb->context;
stride = runtime->frame_bits >> 3;
- offs = 0;
+ frames = 0;
urb->dev = ctx->subs->dev; /* we need to set this at each time */
urb->number_of_packets = 0;
spin_lock_irqsave(&subs->lock, flags);
for (i = 0; i < ctx->packets; i++) {
counts = snd_usb_audio_next_packet_size(subs);
/* set up descriptor */
- urb->iso_frame_desc[i].offset = offs * stride;
+ urb->iso_frame_desc[i].offset = frames * stride;
urb->iso_frame_desc[i].length = counts * stride;
- offs += counts;
+ frames += counts;
urb->number_of_packets++;
subs->transfer_done += counts;
if (subs->transfer_done >= runtime->period_size) {
@@ -590,7 +600,7 @@ static int prepare_playback_urb(struct snd_usb_substream *subs,
if (subs->transfer_done > 0) {
/* FIXME: fill-max mode is not
* supported yet */
- offs -= subs->transfer_done;
+ frames -= subs->transfer_done;
counts -= subs->transfer_done;
urb->iso_frame_desc[i].length =
counts * stride;
@@ -600,7 +610,7 @@ static int prepare_playback_urb(struct snd_usb_substream *subs,
if (i < ctx->packets) {
/* add a transfer delimiter */
urb->iso_frame_desc[i].offset =
- offs * stride;
+ frames * stride;
urb->iso_frame_desc[i].length = 0;
urb->number_of_packets++;
}
@@ -610,26 +620,25 @@ static int prepare_playback_urb(struct snd_usb_substream *subs,
if (period_elapsed) /* finish at the period boundary */
break;
}
- if (subs->hwptr_done + offs > runtime->buffer_size) {
+ bytes = frames * stride;
+ if (subs->hwptr_done + bytes > runtime->buffer_size * stride) {
/* err, the transferred area goes over buffer boundary. */
- unsigned int len = runtime->buffer_size - subs->hwptr_done;
+ unsigned int bytes1 =
+ runtime->buffer_size * stride - subs->hwptr_done;
memcpy(urb->transfer_buffer,
- runtime->dma_area + subs->hwptr_done * stride,
- len * stride);
- memcpy(urb->transfer_buffer + len * stride,
- runtime->dma_area,
- (offs - len) * stride);
+ runtime->dma_area + subs->hwptr_done, bytes1);
+ memcpy(urb->transfer_buffer + bytes1,
+ runtime->dma_area, bytes - bytes1);
} else {
memcpy(urb->transfer_buffer,
- runtime->dma_area + subs->hwptr_done * stride,
- offs * stride);
+ runtime->dma_area + subs->hwptr_done, bytes);
}
- subs->hwptr_done += offs;
- if (subs->hwptr_done >= runtime->buffer_size)
- subs->hwptr_done -= runtime->buffer_size;
- runtime->delay += offs;
+ subs->hwptr_done += bytes;
+ if (subs->hwptr_done >= runtime->buffer_size * stride)
+ subs->hwptr_done -= runtime->buffer_size * stride;
+ runtime->delay += frames;
spin_unlock_irqrestore(&subs->lock, flags);
- urb->transfer_buffer_length = offs * stride;
+ urb->transfer_buffer_length = bytes;
if (period_elapsed)
snd_pcm_period_elapsed(subs->pcm_substream);
return 0;
@@ -735,41 +744,6 @@ static void snd_complete_sync_urb(struct urb *urb)
}
-/* get the physical page pointer at the given offset */
-static struct page *snd_pcm_get_vmalloc_page(struct snd_pcm_substream *subs,
- unsigned long offset)
-{
- void *pageptr = subs->runtime->dma_area + offset;
- return vmalloc_to_page(pageptr);
-}
-
-/* allocate virtual buffer; may be called more than once */
-static int snd_pcm_alloc_vmalloc_buffer(struct snd_pcm_substream *subs, size_t size)
-{
- struct snd_pcm_runtime *runtime = subs->runtime;
- if (runtime->dma_area) {
- if (runtime->dma_bytes >= size)
- return 0; /* already large enough */
- vfree(runtime->dma_area);
- }
- runtime->dma_area = vmalloc_user(size);
- if (!runtime->dma_area)
- return -ENOMEM;
- runtime->dma_bytes = size;
- return 0;
-}
-
-/* free virtual buffer; may be called more than once */
-static int snd_pcm_free_vmalloc_buffer(struct snd_pcm_substream *subs)
-{
- struct snd_pcm_runtime *runtime = subs->runtime;
-
- vfree(runtime->dma_area);
- runtime->dma_area = NULL;
- return 0;
-}
-
-
/*
* unlink active urbs.
*/
@@ -937,18 +911,18 @@ static int wait_clear_urbs(struct snd_usb_substream *subs)
/*
- * return the current pcm pointer. just return the hwptr_done value.
+ * return the current pcm pointer. just based on the hwptr_done value.
*/
static snd_pcm_uframes_t snd_usb_pcm_pointer(struct snd_pcm_substream *substream)
{
struct snd_usb_substream *subs;
- snd_pcm_uframes_t hwptr_done;
+ unsigned int hwptr_done;
subs = (struct snd_usb_substream *)substream->runtime->private_data;
spin_lock(&subs->lock);
hwptr_done = subs->hwptr_done;
spin_unlock(&subs->lock);
- return hwptr_done;
+ return hwptr_done / (substream->runtime->frame_bits >> 3);
}
@@ -1307,6 +1281,47 @@ static int init_usb_sample_rate(struct usb_device *dev, int iface,
}
/*
+ * For E-Mu 0404USB/0202USB/TrackerPre sample rate should be set for device,
+ * not for interface.
+ */
+static void set_format_emu_quirk(struct snd_usb_substream *subs,
+ struct audioformat *fmt)
+{
+ unsigned char emu_samplerate_id = 0;
+
+ /* When capture is active
+ * sample rate shouldn't be changed
+ * by playback substream
+ */
+ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (subs->stream->substream[SNDRV_PCM_STREAM_CAPTURE].interface != -1)
+ return;
+ }
+
+ switch (fmt->rate_min) {
+ case 48000:
+ emu_samplerate_id = EMU_QUIRK_SR_48000HZ;
+ break;
+ case 88200:
+ emu_samplerate_id = EMU_QUIRK_SR_88200HZ;
+ break;
+ case 96000:
+ emu_samplerate_id = EMU_QUIRK_SR_96000HZ;
+ break;
+ case 176400:
+ emu_samplerate_id = EMU_QUIRK_SR_176400HZ;
+ break;
+ case 192000:
+ emu_samplerate_id = EMU_QUIRK_SR_192000HZ;
+ break;
+ default:
+ emu_samplerate_id = EMU_QUIRK_SR_44100HZ;
+ break;
+ }
+ snd_emuusb_set_samplerate(subs->stream->chip, emu_samplerate_id);
+}
+
+/*
* find a matching format and set up the interface
*/
static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
@@ -1419,6 +1434,14 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
subs->cur_audiofmt = fmt;
+ switch (subs->stream->chip->usb_id) {
+ case USB_ID(0x041e, 0x3f02): /* E-Mu 0202 USB */
+ case USB_ID(0x041e, 0x3f04): /* E-Mu 0404 USB */
+ case USB_ID(0x041e, 0x3f0a): /* E-Mu Tracker Pre */
+ set_format_emu_quirk(subs, fmt);
+ break;
+ }
+
#if 0
printk(KERN_DEBUG
"setting done: format = %d, rate = %d..%d, channels = %d\n",
@@ -1449,8 +1472,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
unsigned int channels, rate, format;
int ret, changed;
- ret = snd_pcm_alloc_vmalloc_buffer(substream,
- params_buffer_bytes(hw_params));
+ ret = snd_pcm_lib_alloc_vmalloc_buffer(substream,
+ params_buffer_bytes(hw_params));
if (ret < 0)
return ret;
@@ -1507,7 +1530,7 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
subs->period_bytes = 0;
if (!subs->stream->chip->shutdown)
release_substream_urbs(subs, 0);
- return snd_pcm_free_vmalloc_buffer(substream);
+ return snd_pcm_lib_free_vmalloc_buffer(substream);
}
/*
@@ -1936,7 +1959,7 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction)
struct snd_usb_stream *as = snd_pcm_substream_chip(substream);
struct snd_usb_substream *subs = &as->substream[direction];
- if (subs->interface >= 0) {
+ if (!as->chip->shutdown && subs->interface >= 0) {
usb_set_interface(subs->dev, subs->interface, 0);
subs->interface = -1;
}
@@ -1973,7 +1996,7 @@ static struct snd_pcm_ops snd_usb_playback_ops = {
.prepare = snd_usb_pcm_prepare,
.trigger = snd_usb_pcm_playback_trigger,
.pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_get_vmalloc_page,
+ .page = snd_pcm_lib_get_vmalloc_page,
};
static struct snd_pcm_ops snd_usb_capture_ops = {
@@ -1985,7 +2008,7 @@ static struct snd_pcm_ops snd_usb_capture_ops = {
.prepare = snd_usb_pcm_prepare,
.trigger = snd_usb_pcm_capture_trigger,
.pointer = snd_usb_pcm_pointer,
- .page = snd_pcm_get_vmalloc_page,
+ .page = snd_pcm_lib_get_vmalloc_page,
};
@@ -2227,6 +2250,7 @@ static void init_substream(struct snd_usb_stream *as, int stream, struct audiofo
subs->stream = as;
subs->direction = stream;
subs->dev = as->chip->dev;
+ subs->txfr_quirk = as->chip->txfr_quirk;
if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL) {
subs->ops = audio_urb_ops[stream];
} else {
@@ -3142,59 +3166,6 @@ static int create_ua1000_quirk(struct snd_usb_audio *chip,
return 0;
}
-/*
- * Create a stream for an Edirol UA-101 interface.
- * Copy, paste and modify from Edirol UA-1000
- */
-static int create_ua101_quirk(struct snd_usb_audio *chip,
- struct usb_interface *iface,
- const struct snd_usb_audio_quirk *quirk)
-{
- static const struct audioformat ua101_format = {
- .format = SNDRV_PCM_FORMAT_S32_LE,
- .fmt_type = USB_FORMAT_TYPE_I,
- .altsetting = 1,
- .altset_idx = 1,
- .attributes = 0,
- .rates = SNDRV_PCM_RATE_CONTINUOUS,
- };
- struct usb_host_interface *alts;
- struct usb_interface_descriptor *altsd;
- struct audioformat *fp;
- int stream, err;
-
- if (iface->num_altsetting != 2)
- return -ENXIO;
- alts = &iface->altsetting[1];
- altsd = get_iface_desc(alts);
- if (alts->extralen != 18 || alts->extra[1] != USB_DT_CS_INTERFACE ||
- altsd->bNumEndpoints != 1)
- return -ENXIO;
-
- fp = kmemdup(&ua101_format, sizeof(*fp), GFP_KERNEL);
- if (!fp)
- return -ENOMEM;
-
- fp->channels = alts->extra[11];
- fp->iface = altsd->bInterfaceNumber;
- fp->endpoint = get_endpoint(alts, 0)->bEndpointAddress;
- fp->ep_attr = get_endpoint(alts, 0)->bmAttributes;
- fp->datainterval = parse_datainterval(chip, alts);
- fp->maxpacksize = le16_to_cpu(get_endpoint(alts, 0)->wMaxPacketSize);
- fp->rate_max = fp->rate_min = combine_triple(&alts->extra[15]);
-
- stream = (fp->endpoint & USB_DIR_IN)
- ? SNDRV_PCM_STREAM_CAPTURE : SNDRV_PCM_STREAM_PLAYBACK;
- err = add_audio_endpoint(chip, stream, fp);
- if (err < 0) {
- kfree(fp);
- return err;
- }
- /* FIXME: playback must be synchronized to capture */
- usb_set_interface(chip->dev, fp->iface, 0);
- return 0;
-}
-
static int snd_usb_create_quirk(struct snd_usb_audio *chip,
struct usb_interface *iface,
const struct snd_usb_audio_quirk *quirk);
@@ -3232,6 +3203,18 @@ static int ignore_interface_quirk(struct snd_usb_audio *chip,
return 0;
}
+/*
+ * Allow alignment on audio sub-slot (channel samples) rather than
+ * on audio slots (audio frames)
+ */
+static int create_align_transfer_quirk(struct snd_usb_audio *chip,
+ struct usb_interface *iface,
+ const struct snd_usb_audio_quirk *quirk)
+{
+ chip->txfr_quirk = 1;
+ return 1; /* Continue with creating streams and mixer */
+}
+
/*
* boot quirks
@@ -3406,8 +3389,8 @@ static int snd_usb_create_quirk(struct snd_usb_audio *chip,
[QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
[QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk,
[QUIRK_AUDIO_EDIROL_UA1000] = create_ua1000_quirk,
- [QUIRK_AUDIO_EDIROL_UA101] = create_ua101_quirk,
- [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk
+ [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk,
+ [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk
};
if (quirk->type < QUIRK_TYPE_COUNT) {
@@ -3661,6 +3644,7 @@ static void *snd_usb_audio_probe(struct usb_device *dev,
}
}
+ chip->txfr_quirk = 0;
err = 1; /* continue */
if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) {
/* need some special handlings */
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 40ba8115fb81..9d8cea48fc5f 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -125,6 +125,7 @@ struct snd_usb_audio {
struct snd_card *card;
u32 usb_id;
int shutdown;
+ unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
int num_interfaces;
int num_suspended_intf;
@@ -159,8 +160,8 @@ enum quirk_type {
QUIRK_AUDIO_STANDARD_INTERFACE,
QUIRK_AUDIO_FIXED_ENDPOINT,
QUIRK_AUDIO_EDIROL_UA1000,
- QUIRK_AUDIO_EDIROL_UA101,
QUIRK_AUDIO_EDIROL_UAXX,
+ QUIRK_AUDIO_ALIGN_TRANSFER,
QUIRK_TYPE_COUNT
};
@@ -209,6 +210,16 @@ struct snd_usb_midi_endpoint_info {
/*
*/
+/*E-mu USB samplerate control quirk*/
+enum {
+ EMU_QUIRK_SR_44100HZ = 0,
+ EMU_QUIRK_SR_48000HZ,
+ EMU_QUIRK_SR_88200HZ,
+ EMU_QUIRK_SR_96000HZ,
+ EMU_QUIRK_SR_176400HZ,
+ EMU_QUIRK_SR_192000HZ
+};
+
#define combine_word(s) ((*(s)) | ((unsigned int)(s)[1] << 8))
#define combine_triple(s) (combine_word(s) | ((unsigned int)(s)[2] << 16))
#define combine_quad(s) (combine_triple(s) | ((unsigned int)(s)[3] << 24))
@@ -234,6 +245,9 @@ void snd_usbmidi_input_stop(struct list_head* p);
void snd_usbmidi_input_start(struct list_head* p);
void snd_usbmidi_disconnect(struct list_head *p);
+void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
+ unsigned char samplerate_id);
+
/*
* retrieve usb_interface descriptor from the host interface
* (conditional for compatibility with the older API)
diff --git a/sound/usb/usbmixer.c b/sound/usb/usbmixer.c
index c998220b99c6..f5596cfdbde1 100644
--- a/sound/usb/usbmixer.c
+++ b/sound/usb/usbmixer.c
@@ -186,6 +186,21 @@ enum {
USB_PROC_DCR_RELEASE = 6,
};
+/*E-mu 0202(0404) eXtension Unit(XU) control*/
+enum {
+ USB_XU_CLOCK_RATE = 0xe301,
+ USB_XU_CLOCK_SOURCE = 0xe302,
+ USB_XU_DIGITAL_IO_STATUS = 0xe303,
+ USB_XU_DEVICE_OPTIONS = 0xe304,
+ USB_XU_DIRECT_MONITORING = 0xe305,
+ USB_XU_METERING = 0xe306
+};
+enum {
+ USB_XU_CLOCK_SOURCE_SELECTOR = 0x02, /* clock source*/
+ USB_XU_CLOCK_RATE_SELECTOR = 0x03, /* clock rate */
+ USB_XU_DIGITAL_FORMAT_SELECTOR = 0x01, /* the spdif format */
+ USB_XU_SOFT_LIMIT_SELECTOR = 0x03 /* soft limiter */
+};
/*
* manual mapping of mixer names
@@ -1330,7 +1345,32 @@ static struct procunit_info procunits[] = {
{ USB_PROC_DCR, "DCR", dcr_proc_info },
{ 0 },
};
-
+/*
+ * predefined data for extension units
+ */
+static struct procunit_value_info clock_rate_xu_info[] = {
+ { USB_XU_CLOCK_RATE_SELECTOR, "Selector", USB_MIXER_U8, 0 },
+ { 0 }
+};
+static struct procunit_value_info clock_source_xu_info[] = {
+ { USB_XU_CLOCK_SOURCE_SELECTOR, "External", USB_MIXER_BOOLEAN },
+ { 0 }
+};
+static struct procunit_value_info spdif_format_xu_info[] = {
+ { USB_XU_DIGITAL_FORMAT_SELECTOR, "SPDIF/AC3", USB_MIXER_BOOLEAN },
+ { 0 }
+};
+static struct procunit_value_info soft_limit_xu_info[] = {
+ { USB_XU_SOFT_LIMIT_SELECTOR, " ", USB_MIXER_BOOLEAN },
+ { 0 }
+};
+static struct procunit_info extunits[] = {
+ { USB_XU_CLOCK_RATE, "Clock rate", clock_rate_xu_info },
+ { USB_XU_CLOCK_SOURCE, "DigitalIn CLK source", clock_source_xu_info },
+ { USB_XU_DIGITAL_IO_STATUS, "DigitalOut format:", spdif_format_xu_info },
+ { USB_XU_DEVICE_OPTIONS, "AnalogueIn Soft Limit", soft_limit_xu_info },
+ { 0 }
+};
/*
* build a processing/extension unit
*/
@@ -1391,8 +1431,18 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, unsigned
cval->max = dsc[15];
cval->res = 1;
cval->initialized = 1;
- } else
- get_min_max(cval, valinfo->min_value);
+ } else {
+ if (type == USB_XU_CLOCK_RATE) {
+ /* E-Mu USB 0404/0202/TrackerPre
+ * samplerate control quirk
+ */
+ cval->min = 0;
+ cval->max = 5;
+ cval->res = 1;
+ cval->initialized = 1;
+ } else
+ get_min_max(cval, valinfo->min_value);
+ }
kctl = snd_ctl_new1(&mixer_procunit_ctl, cval);
if (! kctl) {
@@ -1433,7 +1483,7 @@ static int parse_audio_processing_unit(struct mixer_build *state, int unitid, un
static int parse_audio_extension_unit(struct mixer_build *state, int unitid, unsigned char *desc)
{
- return build_audio_procunit(state, unitid, desc, NULL, "Extension Unit");
+ return build_audio_procunit(state, unitid, desc, extunits, "Extension Unit");
}
@@ -2109,6 +2159,23 @@ static int snd_xonar_u1_controls_create(struct usb_mixer_interface *mixer)
return 0;
}
+void snd_emuusb_set_samplerate(struct snd_usb_audio *chip,
+ unsigned char samplerate_id)
+{
+ struct usb_mixer_interface *mixer;
+ struct usb_mixer_elem_info *cval;
+ int unitid = 12; /* SamleRate ExtensionUnit ID */
+
+ list_for_each_entry(mixer, &chip->mixer_list, list) {
+ cval = mixer->id_elems[unitid];
+ if (cval) {
+ set_cur_ctl_value(cval, cval->control << 8, samplerate_id);
+ snd_usb_mixer_notify_id(mixer, unitid);
+ }
+ break;
+ }
+}
+
int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
int ignore_error)
{
diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h
index a892bda03df9..65bbd22f2e0c 100644
--- a/sound/usb/usbquirks.h
+++ b/sound/usb/usbquirks.h
@@ -1266,37 +1266,6 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
}
},
-/* Roland UA-101 in High-Speed Mode only */
-{
- USB_DEVICE(0x0582, 0x007d),
- .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
- .vendor_name = "Roland",
- .product_name = "UA-101",
- .ifnum = QUIRK_ANY_INTERFACE,
- .type = QUIRK_COMPOSITE,
- .data = (const struct snd_usb_audio_quirk[]) {
- {
- .ifnum = 0,
- .type = QUIRK_AUDIO_EDIROL_UA101
- },
- {
- .ifnum = 1,
- .type = QUIRK_AUDIO_EDIROL_UA101
- },
- {
- .ifnum = 2,
- .type = QUIRK_MIDI_FIXED_ENDPOINT,
- .data = & (const struct snd_usb_midi_endpoint_info) {
- .out_cables = 0x0001,
- .in_cables = 0x0001
- }
- },
- {
- .ifnum = -1
- }
- }
- }
-},
{
/* has ID 0x0081 when not in "Advanced Driver" mode */
USB_DEVICE(0x0582, 0x0080),
@@ -2105,6 +2074,120 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},
+/* Hauppauge HVR-950Q and HVR-850 */
+{
+ USB_DEVICE_VENDOR_SPEC(0x2040, 0x7200),
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Hauppauge",
+ .product_name = "HVR-950Q",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+ }
+},
+{
+ USB_DEVICE_VENDOR_SPEC(0x2040, 0x7201),
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Hauppauge",
+ .product_name = "HVR-950Q",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+ }
+},
+{
+ USB_DEVICE_VENDOR_SPEC(0x2040, 0x7202),
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Hauppauge",
+ .product_name = "HVR-950Q",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+ }
+},
+{
+ USB_DEVICE_VENDOR_SPEC(0x2040, 0x7203),
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Hauppauge",
+ .product_name = "HVR-950Q",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+ }
+},
+{
+ USB_DEVICE_VENDOR_SPEC(0x2040, 0x7204),
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Hauppauge",
+ .product_name = "HVR-950Q",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+ }
+},
+{
+ USB_DEVICE_VENDOR_SPEC(0x2040, 0x7205),
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Hauppauge",
+ .product_name = "HVR-950Q",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+ }
+},
+{
+ USB_DEVICE_VENDOR_SPEC(0x2040, 0x7250),
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Hauppauge",
+ .product_name = "HVR-950Q",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+ }
+},
+{
+ USB_DEVICE_VENDOR_SPEC(0x2040, 0x7230),
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_CLASS |
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+ .bInterfaceClass = USB_CLASS_AUDIO,
+ .bInterfaceSubClass = USB_SUBCLASS_AUDIO_CONTROL,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Hauppauge",
+ .product_name = "HVR-850",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_AUDIO_ALIGN_TRANSFER,
+ }
+},
+
{
/*
* Some USB MIDI devices don't have an audio control interface,
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 652a470b5f74..7c846424aebf 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -357,6 +357,7 @@ LIB_H += util/event.h
LIB_H += util/exec_cmd.h
LIB_H += util/types.h
LIB_H += util/levenshtein.h
+LIB_H += util/map.h
LIB_H += util/parse-options.h
LIB_H += util/parse-events.h
LIB_H += util/quote.h
@@ -423,8 +424,8 @@ LIB_OBJS += util/trace-event-perl.o
LIB_OBJS += util/svghelper.o
LIB_OBJS += util/sort.o
LIB_OBJS += util/hist.o
-LIB_OBJS += util/data_map.o
LIB_OBJS += util/probe-event.o
+LIB_OBJS += util/util.o
BUILTIN_OBJS += builtin-annotate.o
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 593ff25006de..117bbae844bf 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -451,10 +451,10 @@ static void perf_session__find_annotations(struct perf_session *self)
}
static struct perf_event_ops event_ops = {
- .process_sample_event = process_sample_event,
- .process_mmap_event = event__process_mmap,
- .process_comm_event = event__process_comm,
- .process_fork_event = event__process_task,
+ .sample = process_sample_event,
+ .mmap = event__process_mmap,
+ .comm = event__process_comm,
+ .fork = event__process_task,
};
static int __cmd_annotate(void)
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index bd71b8ceafb7..1cbecaf029fa 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -66,12 +66,12 @@ static int diff__process_sample_event(event_t *event, struct perf_session *sessi
}
static struct perf_event_ops event_ops = {
- .process_sample_event = diff__process_sample_event,
- .process_mmap_event = event__process_mmap,
- .process_comm_event = event__process_comm,
- .process_exit_event = event__process_task,
- .process_fork_event = event__process_task,
- .process_lost_event = event__process_lost,
+ .sample = diff__process_sample_event,
+ .mmap = event__process_mmap,
+ .comm = event__process_comm,
+ .exit = event__process_task,
+ .fork = event__process_task,
+ .lost = event__process_lost,
};
static void perf_session__insert_hist_entry_by_name(struct rb_root *root,
@@ -204,7 +204,7 @@ static const struct option options[] = {
OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
"load module symbols - WARNING: use only with -k and LIVE kernel"),
- OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths,
+ OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths,
"Don't shorten the pathnames taking into account the cwd"),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c
index 9f810b17c25c..e427d6965e0c 100644
--- a/tools/perf/builtin-help.c
+++ b/tools/perf/builtin-help.c
@@ -286,8 +286,7 @@ void list_common_cmds_help(void)
puts(" The most commonly used perf commands are:");
for (i = 0; i < ARRAY_SIZE(common_cmds); i++) {
- printf(" %s ", common_cmds[i].name);
- mput_char(' ', longest - strlen(common_cmds[i].name));
+ printf(" %-*s ", longest, common_cmds[i].name);
puts(common_cmds[i].help);
}
}
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 7ceb7416c316..88c570c18e3e 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -92,23 +92,18 @@ static void setup_cpunode_map(void)
if (!dir1)
return;
- while (true) {
- dent1 = readdir(dir1);
- if (!dent1)
- break;
-
- if (sscanf(dent1->d_name, "node%u", &mem) < 1)
+ while ((dent1 = readdir(dir1)) != NULL) {
+ if (dent1->d_type != DT_DIR ||
+ sscanf(dent1->d_name, "node%u", &mem) < 1)
continue;
snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
dir2 = opendir(buf);
if (!dir2)
continue;
- while (true) {
- dent2 = readdir(dir2);
- if (!dent2)
- break;
- if (sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
+ while ((dent2 = readdir(dir2)) != NULL) {
+ if (dent2->d_type != DT_LNK ||
+ sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
continue;
cpunode_map[cpu] = mem;
}
@@ -342,22 +337,9 @@ static int process_sample_event(event_t *event, struct perf_session *session)
return 0;
}
-static int sample_type_check(struct perf_session *session)
-{
- if (!(session->sample_type & PERF_SAMPLE_RAW)) {
- fprintf(stderr,
- "No trace sample to read. Did you call perf record "
- "without -R?");
- return -1;
- }
-
- return 0;
-}
-
static struct perf_event_ops event_ops = {
- .process_sample_event = process_sample_event,
- .process_comm_event = event__process_comm,
- .sample_type_check = sample_type_check,
+ .sample = process_sample_event,
+ .comm = event__process_comm,
};
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
@@ -504,11 +486,14 @@ static void sort_result(void)
static int __cmd_kmem(void)
{
- int err;
+ int err = -EINVAL;
struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
if (session == NULL)
return -ENOMEM;
+ if (!perf_session__has_traces(session, "kmem record"))
+ goto out_delete;
+
setup_pager();
err = perf_session__process_events(session, &event_ops);
if (err != 0)
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index db10c0e8ecae..508934b0140a 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -156,14 +156,14 @@ static int process_read_event(event_t *event, struct perf_session *session __use
return 0;
}
-static int sample_type_check(struct perf_session *session)
+static int perf_session__setup_sample_type(struct perf_session *self)
{
- if (!(session->sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ if (!(self->sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
fprintf(stderr, "selected --sort parent, but no"
" callchain data. Did you call"
" perf record without -g?\n");
- return -1;
+ return -EINVAL;
}
if (symbol_conf.use_callchain) {
fprintf(stderr, "selected -g but no callchain data."
@@ -176,7 +176,7 @@ static int sample_type_check(struct perf_session *session)
if (register_callchain_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain"
" params\n");
- return -1;
+ return -EINVAL;
}
}
@@ -184,20 +184,18 @@ static int sample_type_check(struct perf_session *session)
}
static struct perf_event_ops event_ops = {
- .process_sample_event = process_sample_event,
- .process_mmap_event = event__process_mmap,
- .process_comm_event = event__process_comm,
- .process_exit_event = event__process_task,
- .process_fork_event = event__process_task,
- .process_lost_event = event__process_lost,
- .process_read_event = process_read_event,
- .sample_type_check = sample_type_check,
+ .sample = process_sample_event,
+ .mmap = event__process_mmap,
+ .comm = event__process_comm,
+ .exit = event__process_task,
+ .fork = event__process_task,
+ .lost = event__process_lost,
+ .read = process_read_event,
};
-
static int __cmd_report(void)
{
- int ret;
+ int ret = -EINVAL;
struct perf_session *session;
session = perf_session__new(input_name, O_RDONLY, force);
@@ -207,6 +205,10 @@ static int __cmd_report(void)
if (show_threads)
perf_read_values_init(&show_threads_values);
+ ret = perf_session__setup_sample_type(session);
+ if (ret)
+ goto out_delete;
+
ret = perf_session__process_events(session, &event_ops);
if (ret)
goto out_delete;
@@ -319,7 +321,7 @@ static const struct option options[] = {
"pretty printing style key: normal raw"),
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
"sort by key(s): pid, comm, dso, symbol, parent"),
- OPT_BOOLEAN('P', "full-paths", &event_ops.full_paths,
+ OPT_BOOLEAN('P', "full-paths", &symbol_conf.full_paths,
"Don't shorten the pathnames taking into account the cwd"),
OPT_STRING('p', "parent", &parent_pattern, "regex",
"regex filter to identify parent, see: '--sort parent'"),
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 80209df6cfe8..702322f8fec1 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1653,33 +1653,22 @@ static int process_lost_event(event_t *event __used,
return 0;
}
-static int sample_type_check(struct perf_session *session __used)
-{
- if (!(session->sample_type & PERF_SAMPLE_RAW)) {
- fprintf(stderr,
- "No trace sample to read. Did you call perf record "
- "without -R?");
- return -1;
- }
-
- return 0;
-}
-
static struct perf_event_ops event_ops = {
- .process_sample_event = process_sample_event,
- .process_comm_event = event__process_comm,
- .process_lost_event = process_lost_event,
- .sample_type_check = sample_type_check,
+ .sample = process_sample_event,
+ .comm = event__process_comm,
+ .lost = process_lost_event,
};
static int read_events(void)
{
- int err;
+ int err = -EINVAL;
struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
if (session == NULL)
return -ENOMEM;
- err = perf_session__process_events(session, &event_ops);
+ if (perf_session__has_traces(session, "record -R"))
+ err = perf_session__process_events(session, &event_ops);
+
perf_session__delete(session);
return err;
}
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index a589a43112d6..5b68d81d93a1 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -1029,33 +1029,24 @@ static void process_samples(struct perf_session *session)
}
}
-static int sample_type_check(struct perf_session *session)
-{
- if (!(session->sample_type & PERF_SAMPLE_RAW)) {
- fprintf(stderr, "No trace samples found in the file.\n"
- "Have you used 'perf timechart record' to record it?\n");
- return -1;
- }
-
- return 0;
-}
-
static struct perf_event_ops event_ops = {
- .process_comm_event = process_comm_event,
- .process_fork_event = process_fork_event,
- .process_exit_event = process_exit_event,
- .process_sample_event = queue_sample_event,
- .sample_type_check = sample_type_check,
+ .comm = process_comm_event,
+ .fork = process_fork_event,
+ .exit = process_exit_event,
+ .sample = queue_sample_event,
};
static int __cmd_timechart(void)
{
struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
- int ret;
+ int ret = -EINVAL;
if (session == NULL)
return -ENOMEM;
+ if (!perf_session__has_traces(session, "timechart record"))
+ goto out_delete;
+
ret = perf_session__process_events(session, &event_ops);
if (ret)
goto out_delete;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 574a215e800b..1831434aa938 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -103,22 +103,9 @@ static int process_sample_event(event_t *event, struct perf_session *session)
return 0;
}
-static int sample_type_check(struct perf_session *session)
-{
- if (!(session->sample_type & PERF_SAMPLE_RAW)) {
- fprintf(stderr,
- "No trace sample to read. Did you call perf record "
- "without -R?");
- return -1;
- }
-
- return 0;
-}
-
static struct perf_event_ops event_ops = {
- .process_sample_event = process_sample_event,
- .process_comm_event = event__process_comm,
- .sample_type_check = sample_type_check,
+ .sample = process_sample_event,
+ .comm = event__process_comm,
};
static int __cmd_trace(struct perf_session *session)
@@ -592,6 +579,9 @@ int cmd_trace(int argc, const char **argv, const char *prefix __used)
if (session == NULL)
return -ENOMEM;
+ if (!perf_session__has_traces(session, "record -R"))
+ return -EINVAL;
+
if (generate_script_lang) {
struct stat perf_stat;
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 873e55fab375..fc89005c3e51 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -388,7 +388,7 @@ static int run_argv(int *argcp, const char ***argv)
/* mini /proc/mounts parser: searching for "^blah /mount/point debugfs" */
static void get_debugfs_mntpt(void)
{
- const char *path = debugfs_find_mountpoint();
+ const char *path = debugfs_mount(NULL);
if (path)
strncpy(debugfs_mntpt, path, sizeof(debugfs_mntpt));
diff --git a/tools/perf/util/data_map.c b/tools/perf/util/data_map.c
deleted file mode 100644
index b557b836de3d..000000000000
--- a/tools/perf/util/data_map.c
+++ /dev/null
@@ -1,252 +0,0 @@
-#include "symbol.h"
-#include "util.h"
-#include "debug.h"
-#include "thread.h"
-#include "session.h"
-
-static int process_event_stub(event_t *event __used,
- struct perf_session *session __used)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
-{
- if (!handler->process_sample_event)
- handler->process_sample_event = process_event_stub;
- if (!handler->process_mmap_event)
- handler->process_mmap_event = process_event_stub;
- if (!handler->process_comm_event)
- handler->process_comm_event = process_event_stub;
- if (!handler->process_fork_event)
- handler->process_fork_event = process_event_stub;
- if (!handler->process_exit_event)
- handler->process_exit_event = process_event_stub;
- if (!handler->process_lost_event)
- handler->process_lost_event = process_event_stub;
- if (!handler->process_read_event)
- handler->process_read_event = process_event_stub;
- if (!handler->process_throttle_event)
- handler->process_throttle_event = process_event_stub;
- if (!handler->process_unthrottle_event)
- handler->process_unthrottle_event = process_event_stub;
-}
-
-static const char *event__name[] = {
- [0] = "TOTAL",
- [PERF_RECORD_MMAP] = "MMAP",
- [PERF_RECORD_LOST] = "LOST",
- [PERF_RECORD_COMM] = "COMM",
- [PERF_RECORD_EXIT] = "EXIT",
- [PERF_RECORD_THROTTLE] = "THROTTLE",
- [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
- [PERF_RECORD_FORK] = "FORK",
- [PERF_RECORD_READ] = "READ",
- [PERF_RECORD_SAMPLE] = "SAMPLE",
-};
-
-unsigned long event__total[PERF_RECORD_MAX];
-
-void event__print_totals(void)
-{
- int i;
- for (i = 0; i < PERF_RECORD_MAX; ++i)
- pr_info("%10s events: %10ld\n",
- event__name[i], event__total[i]);
-}
-
-static int process_event(event_t *event, struct perf_session *session,
- struct perf_event_ops *ops,
- unsigned long offset, unsigned long head)
-{
- trace_event(event);
-
- if (event->header.type < PERF_RECORD_MAX) {
- dump_printf("%p [%p]: PERF_RECORD_%s",
- (void *)(offset + head),
- (void *)(long)(event->header.size),
- event__name[event->header.type]);
- ++event__total[0];
- ++event__total[event->header.type];
- }
-
- switch (event->header.type) {
- case PERF_RECORD_SAMPLE:
- return ops->process_sample_event(event, session);
- case PERF_RECORD_MMAP:
- return ops->process_mmap_event(event, session);
- case PERF_RECORD_COMM:
- return ops->process_comm_event(event, session);
- case PERF_RECORD_FORK:
- return ops->process_fork_event(event, session);
- case PERF_RECORD_EXIT:
- return ops->process_exit_event(event, session);
- case PERF_RECORD_LOST:
- return ops->process_lost_event(event, session);
- case PERF_RECORD_READ:
- return ops->process_read_event(event, session);
- case PERF_RECORD_THROTTLE:
- return ops->process_throttle_event(event, session);
- case PERF_RECORD_UNTHROTTLE:
- return ops->process_unthrottle_event(event, session);
- default:
- ops->total_unknown++;
- return -1;
- }
-}
-
-int perf_header__read_build_ids(int input, u64 offset, u64 size)
-{
- struct build_id_event bev;
- char filename[PATH_MAX];
- u64 limit = offset + size;
- int err = -1;
-
- while (offset < limit) {
- struct dso *dso;
- ssize_t len;
-
- if (read(input, &bev, sizeof(bev)) != sizeof(bev))
- goto out;
-
- len = bev.header.size - sizeof(bev);
- if (read(input, filename, len) != len)
- goto out;
-
- dso = dsos__findnew(filename);
- if (dso != NULL)
- dso__set_build_id(dso, &bev.build_id);
-
- offset += bev.header.size;
- }
- err = 0;
-out:
- return err;
-}
-
-static struct thread *perf_session__register_idle_thread(struct perf_session *self)
-{
- struct thread *thread = perf_session__findnew(self, 0);
-
- if (!thread || thread__set_comm(thread, "swapper")) {
- pr_err("problem inserting idle task.\n");
- thread = NULL;
- }
-
- return thread;
-}
-
-int perf_session__process_events(struct perf_session *self,
- struct perf_event_ops *ops)
-{
- int err;
- unsigned long head, shift;
- unsigned long offset = 0;
- size_t page_size;
- event_t *event;
- uint32_t size;
- char *buf;
-
- if (perf_session__register_idle_thread(self) == NULL)
- return -ENOMEM;
-
- perf_event_ops__fill_defaults(ops);
-
- page_size = getpagesize();
-
- head = self->header.data_offset;
- self->sample_type = perf_header__sample_type(&self->header);
-
- err = -EINVAL;
- if (ops->sample_type_check && ops->sample_type_check(self) < 0)
- goto out_err;
-
- if (!ops->full_paths) {
- char bf[PATH_MAX];
-
- if (getcwd(bf, sizeof(bf)) == NULL) {
- err = -errno;
-out_getcwd_err:
- pr_err("failed to get the current directory\n");
- goto out_err;
- }
- self->cwd = strdup(bf);
- if (self->cwd == NULL) {
- err = -ENOMEM;
- goto out_getcwd_err;
- }
- self->cwdlen = strlen(self->cwd);
- }
-
- shift = page_size * (head / page_size);
- offset += shift;
- head -= shift;
-
-remap:
- buf = mmap(NULL, page_size * self->mmap_window, PROT_READ,
- MAP_SHARED, self->fd, offset);
- if (buf == MAP_FAILED) {
- pr_err("failed to mmap file\n");
- err = -errno;
- goto out_err;
- }
-
-more:
- event = (event_t *)(buf + head);
-
- size = event->header.size;
- if (!size)
- size = 8;
-
- if (head + event->header.size >= page_size * self->mmap_window) {
- int munmap_ret;
-
- shift = page_size * (head / page_size);
-
- munmap_ret = munmap(buf, page_size * self->mmap_window);
- assert(munmap_ret == 0);
-
- offset += shift;
- head -= shift;
- goto remap;
- }
-
- size = event->header.size;
-
- dump_printf("\n%p [%p]: event: %d\n",
- (void *)(offset + head),
- (void *)(long)event->header.size,
- event->header.type);
-
- if (!size || process_event(event, self, ops, offset, head) < 0) {
-
- dump_printf("%p [%p]: skipping unknown header type: %d\n",
- (void *)(offset + head),
- (void *)(long)(event->header.size),
- event->header.type);
-
- /*
- * assume we lost track of the stream, check alignment, and
- * increment a single u64 in the hope to catch on again 'soon'.
- */
-
- if (unlikely(head & 7))
- head &= ~7ULL;
-
- size = 8;
- }
-
- head += size;
-
- if (offset + head >= self->header.data_offset + self->header.data_size)
- goto done;
-
- if (offset + head < self->size)
- goto more;
-
-done:
- err = 0;
-out_err:
- return err;
-}
diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c
index 28d520d5a1fb..0905600c3851 100644
--- a/tools/perf/util/debug.c
+++ b/tools/perf/util/debug.c
@@ -9,6 +9,7 @@
#include "color.h"
#include "event.h"
#include "debug.h"
+#include "util.h"
int verbose = 0;
int dump_trace = 0;
diff --git a/tools/perf/util/debugfs.c b/tools/perf/util/debugfs.c
index 06b73ee02c49..a88fefc0cc0a 100644
--- a/tools/perf/util/debugfs.c
+++ b/tools/perf/util/debugfs.c
@@ -106,16 +106,14 @@ int debugfs_valid_entry(const char *path)
return 0;
}
-/* mount the debugfs somewhere */
+/* mount the debugfs somewhere if it's not mounted */
-int debugfs_mount(const char *mountpoint)
+char *debugfs_mount(const char *mountpoint)
{
- char mountcmd[128];
-
/* see if it's already mounted */
if (debugfs_find_mountpoint()) {
debugfs_premounted = 1;
- return 0;
+ return debugfs_mountpoint;
}
/* if not mounted and no argument */
@@ -127,13 +125,14 @@ int debugfs_mount(const char *mountpoint)
mountpoint = "/sys/kernel/debug";
}
+ if (mount(NULL, mountpoint, "debugfs", 0, NULL) < 0)
+ return NULL;
+
/* save the mountpoint */
strncpy(debugfs_mountpoint, mountpoint, sizeof(debugfs_mountpoint));
+ debugfs_found = 1;
- /* mount it */
- snprintf(mountcmd, sizeof(mountcmd),
- "/bin/mount -t debugfs debugfs %s", mountpoint);
- return system(mountcmd);
+ return debugfs_mountpoint;
}
/* umount the debugfs */
diff --git a/tools/perf/util/debugfs.h b/tools/perf/util/debugfs.h
index 3cd14f9ae784..83a02879745f 100644
--- a/tools/perf/util/debugfs.h
+++ b/tools/perf/util/debugfs.h
@@ -15,7 +15,7 @@
extern const char *debugfs_find_mountpoint(void);
extern int debugfs_valid_mountpoint(const char *debugfs);
extern int debugfs_valid_entry(const char *path);
-extern int debugfs_mount(const char *mountpoint);
+extern char *debugfs_mount(const char *mountpoint);
extern int debugfs_umount(void);
extern int debugfs_write(const char *entry, const char *value);
extern int debugfs_read(const char *entry, char *buffer, size_t size);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 690a96d0467c..80fb3653c809 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -1,10 +1,10 @@
#ifndef __PERF_RECORD_H
#define __PERF_RECORD_H
+#include <limits.h>
+
#include "../perf.h"
-#include "util.h"
-#include <linux/list.h>
-#include <linux/rbtree.h>
+#include "map.h"
/*
* PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
@@ -101,67 +101,8 @@ struct events_stats {
void event__print_totals(void);
-enum map_type {
- MAP__FUNCTION = 0,
- MAP__VARIABLE,
-};
-
-#define MAP__NR_TYPES (MAP__VARIABLE + 1)
-
-struct map {
- union {
- struct rb_node rb_node;
- struct list_head node;
- };
- u64 start;
- u64 end;
- enum map_type type;
- u64 pgoff;
- u64 (*map_ip)(struct map *, u64);
- u64 (*unmap_ip)(struct map *, u64);
- struct dso *dso;
-};
-
-static inline u64 map__map_ip(struct map *map, u64 ip)
-{
- return ip - map->start + map->pgoff;
-}
-
-static inline u64 map__unmap_ip(struct map *map, u64 ip)
-{
- return ip + map->start - map->pgoff;
-}
-
-static inline u64 identity__map_ip(struct map *map __used, u64 ip)
-{
- return ip;
-}
-
-struct symbol;
-
-typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
-
-void map__init(struct map *self, enum map_type type,
- u64 start, u64 end, u64 pgoff, struct dso *dso);
-struct map *map__new(struct mmap_event *event, enum map_type,
- char *cwd, int cwdlen);
-void map__delete(struct map *self);
-struct map *map__clone(struct map *self);
-int map__overlap(struct map *l, struct map *r);
-size_t map__fprintf(struct map *self, FILE *fp);
-
struct perf_session;
-int map__load(struct map *self, struct perf_session *session,
- symbol_filter_t filter);
-struct symbol *map__find_symbol(struct map *self, struct perf_session *session,
- u64 addr, symbol_filter_t filter);
-struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
- struct perf_session *session,
- symbol_filter_t filter);
-void map__fixup_start(struct map *self);
-void map__fixup_end(struct map *self);
-
int event__synthesize_thread(pid_t pid,
int (*process)(event_t *event,
struct perf_session *session),
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 8a0bca55106f..df237c3a041b 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -169,20 +169,23 @@ static int do_write(int fd, const void *buf, size_t size)
return 0;
}
+#define dsos__for_each_with_build_id(pos, head) \
+ list_for_each_entry(pos, head, node) \
+ if (!pos->has_build_id) \
+ continue; \
+ else
+
static int __dsos__write_buildid_table(struct list_head *head, int fd)
{
#define NAME_ALIGN 64
struct dso *pos;
static const char zero_buf[NAME_ALIGN];
- list_for_each_entry(pos, head, node) {
+ dsos__for_each_with_build_id(pos, head) {
int err;
struct build_id_event b;
- size_t len;
+ size_t len = pos->long_name_len + 1;
- if (!pos->has_build_id)
- continue;
- len = pos->long_name_len + 1;
len = ALIGN(len, NAME_ALIGN);
memset(&b, 0, sizeof(b));
memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
@@ -209,6 +212,74 @@ static int dsos__write_buildid_table(int fd)
return err;
}
+static int dso__cache_build_id(struct dso *self, const char *debugdir)
+{
+ const size_t size = PATH_MAX;
+ char *filename = malloc(size),
+ *linkname = malloc(size), *targetname, *sbuild_id;
+ int len, err = -1;
+
+ if (filename == NULL || linkname == NULL)
+ goto out_free;
+
+ len = snprintf(filename, size, "%s%s", debugdir, self->long_name);
+ if (mkdir_p(filename, 0755))
+ goto out_free;
+
+ len += snprintf(filename + len, sizeof(filename) - len, "/");
+ sbuild_id = filename + len;
+ build_id__sprintf(self->build_id, sizeof(self->build_id), sbuild_id);
+
+ if (access(filename, F_OK) && link(self->long_name, filename) &&
+ copyfile(self->long_name, filename))
+ goto out_free;
+
+ len = snprintf(linkname, size, "%s/.build-id/%.2s",
+ debugdir, sbuild_id);
+
+ if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
+ goto out_free;
+
+ snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
+ targetname = filename + strlen(debugdir) - 5;
+ memcpy(targetname, "../..", 5);
+
+ if (symlink(targetname, linkname) == 0)
+ err = 0;
+out_free:
+ free(filename);
+ free(linkname);
+ return err;
+}
+
+static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
+{
+ struct dso *pos;
+ int err = 0;
+
+ dsos__for_each_with_build_id(pos, head)
+ if (dso__cache_build_id(pos, debugdir))
+ err = -1;
+
+ return err;
+}
+
+static int dsos__cache_build_ids(void)
+{
+ int err_kernel, err_user;
+ char debugdir[PATH_MAX];
+
+ snprintf(debugdir, sizeof(debugdir), "%s/%s", getenv("HOME"),
+ DEBUG_CACHE_DIR);
+
+ if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
+ return -1;
+
+ err_kernel = __dsos__cache_build_ids(&dsos__kernel, debugdir);
+ err_user = __dsos__cache_build_ids(&dsos__user, debugdir);
+ return err_kernel || err_user ? -1 : 0;
+}
+
static int perf_header__adds_write(struct perf_header *self, int fd)
{
int nr_sections;
@@ -258,6 +329,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
goto out_free;
}
buildid_sec->size = lseek(fd, 0, SEEK_CUR) - buildid_sec->offset;
+ dsos__cache_build_ids();
}
lseek(fd, sec_start, SEEK_SET);
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
new file mode 100644
index 000000000000..72f0b6ab5ea5
--- /dev/null
+++ b/tools/perf/util/map.h
@@ -0,0 +1,73 @@
+#ifndef __PERF_MAP_H
+#define __PERF_MAP_H
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+enum map_type {
+ MAP__FUNCTION = 0,
+ MAP__VARIABLE,
+};
+
+#define MAP__NR_TYPES (MAP__VARIABLE + 1)
+
+struct dso;
+
+struct map {
+ union {
+ struct rb_node rb_node;
+ struct list_head node;
+ };
+ u64 start;
+ u64 end;
+ enum map_type type;
+ u64 pgoff;
+ u64 (*map_ip)(struct map *, u64);
+ u64 (*unmap_ip)(struct map *, u64);
+ struct dso *dso;
+};
+
+static inline u64 map__map_ip(struct map *map, u64 ip)
+{
+ return ip - map->start + map->pgoff;
+}
+
+static inline u64 map__unmap_ip(struct map *map, u64 ip)
+{
+ return ip + map->start - map->pgoff;
+}
+
+static inline u64 identity__map_ip(struct map *map __used, u64 ip)
+{
+ return ip;
+}
+
+struct symbol;
+struct mmap_event;
+
+typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
+
+void map__init(struct map *self, enum map_type type,
+ u64 start, u64 end, u64 pgoff, struct dso *dso);
+struct map *map__new(struct mmap_event *event, enum map_type,
+ char *cwd, int cwdlen);
+void map__delete(struct map *self);
+struct map *map__clone(struct map *self);
+int map__overlap(struct map *l, struct map *r);
+size_t map__fprintf(struct map *self, FILE *fp);
+
+struct perf_session;
+
+int map__load(struct map *self, struct perf_session *session,
+ symbol_filter_t filter);
+struct symbol *map__find_symbol(struct map *self, struct perf_session *session,
+ u64 addr, symbol_filter_t filter);
+struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
+ struct perf_session *session,
+ symbol_filter_t filter);
+void map__fixup_start(struct map *self);
+void map__fixup_end(struct map *self);
+
+#endif /* __PERF_MAP_H */
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index a4086aaddb73..e3f396806e6e 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -1,6 +1,8 @@
#ifndef _PROBE_FINDER_H
#define _PROBE_FINDER_H
+#include "util.h"
+
#define MAX_PATH_LEN 256
#define MAX_PROBE_BUFFER 1024
#define MAX_PROBES 128
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index ce3a6c8abe76..7f0537d1add8 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -66,6 +66,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
self->mmap_window = 32;
self->cwd = NULL;
self->cwdlen = 0;
+ self->unknown_events = 0;
map_groups__init(&self->kmaps);
if (perf_session__create_kernel_maps(self) < 0)
@@ -73,6 +74,8 @@ struct perf_session *perf_session__new(const char *filename, int mode, bool forc
if (mode == O_RDONLY && perf_session__open(self, force) < 0)
goto out_delete;
+
+ self->sample_type = perf_header__sample_type(&self->header);
out:
return self;
out_free:
@@ -148,3 +151,253 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self,
return syms;
}
+
+static int process_event_stub(event_t *event __used,
+ struct perf_session *session __used)
+{
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
+static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
+{
+ if (handler->sample == NULL)
+ handler->sample = process_event_stub;
+ if (handler->mmap == NULL)
+ handler->mmap = process_event_stub;
+ if (handler->comm == NULL)
+ handler->comm = process_event_stub;
+ if (handler->fork == NULL)
+ handler->fork = process_event_stub;
+ if (handler->exit == NULL)
+ handler->exit = process_event_stub;
+ if (handler->lost == NULL)
+ handler->lost = process_event_stub;
+ if (handler->read == NULL)
+ handler->read = process_event_stub;
+ if (handler->throttle == NULL)
+ handler->throttle = process_event_stub;
+ if (handler->unthrottle == NULL)
+ handler->unthrottle = process_event_stub;
+}
+
+static const char *event__name[] = {
+ [0] = "TOTAL",
+ [PERF_RECORD_MMAP] = "MMAP",
+ [PERF_RECORD_LOST] = "LOST",
+ [PERF_RECORD_COMM] = "COMM",
+ [PERF_RECORD_EXIT] = "EXIT",
+ [PERF_RECORD_THROTTLE] = "THROTTLE",
+ [PERF_RECORD_UNTHROTTLE] = "UNTHROTTLE",
+ [PERF_RECORD_FORK] = "FORK",
+ [PERF_RECORD_READ] = "READ",
+ [PERF_RECORD_SAMPLE] = "SAMPLE",
+};
+
+unsigned long event__total[PERF_RECORD_MAX];
+
+void event__print_totals(void)
+{
+ int i;
+ for (i = 0; i < PERF_RECORD_MAX; ++i)
+ pr_info("%10s events: %10ld\n",
+ event__name[i], event__total[i]);
+}
+
+static int perf_session__process_event(struct perf_session *self,
+ event_t *event,
+ struct perf_event_ops *ops,
+ unsigned long offset, unsigned long head)
+{
+ trace_event(event);
+
+ if (event->header.type < PERF_RECORD_MAX) {
+ dump_printf("%p [%p]: PERF_RECORD_%s",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event__name[event->header.type]);
+ ++event__total[0];
+ ++event__total[event->header.type];
+ }
+
+ switch (event->header.type) {
+ case PERF_RECORD_SAMPLE:
+ return ops->sample(event, self);
+ case PERF_RECORD_MMAP:
+ return ops->mmap(event, self);
+ case PERF_RECORD_COMM:
+ return ops->comm(event, self);
+ case PERF_RECORD_FORK:
+ return ops->fork(event, self);
+ case PERF_RECORD_EXIT:
+ return ops->exit(event, self);
+ case PERF_RECORD_LOST:
+ return ops->lost(event, self);
+ case PERF_RECORD_READ:
+ return ops->read(event, self);
+ case PERF_RECORD_THROTTLE:
+ return ops->throttle(event, self);
+ case PERF_RECORD_UNTHROTTLE:
+ return ops->unthrottle(event, self);
+ default:
+ self->unknown_events++;
+ return -1;
+ }
+}
+
+int perf_header__read_build_ids(int input, u64 offset, u64 size)
+{
+ struct build_id_event bev;
+ char filename[PATH_MAX];
+ u64 limit = offset + size;
+ int err = -1;
+
+ while (offset < limit) {
+ struct dso *dso;
+ ssize_t len;
+
+ if (read(input, &bev, sizeof(bev)) != sizeof(bev))
+ goto out;
+
+ len = bev.header.size - sizeof(bev);
+ if (read(input, filename, len) != len)
+ goto out;
+
+ dso = dsos__findnew(filename);
+ if (dso != NULL)
+ dso__set_build_id(dso, &bev.build_id);
+
+ offset += bev.header.size;
+ }
+ err = 0;
+out:
+ return err;
+}
+
+static struct thread *perf_session__register_idle_thread(struct perf_session *self)
+{
+ struct thread *thread = perf_session__findnew(self, 0);
+
+ if (thread == NULL || thread__set_comm(thread, "swapper")) {
+ pr_err("problem inserting idle task.\n");
+ thread = NULL;
+ }
+
+ return thread;
+}
+
+int perf_session__process_events(struct perf_session *self,
+ struct perf_event_ops *ops)
+{
+ int err;
+ unsigned long head, shift;
+ unsigned long offset = 0;
+ size_t page_size;
+ event_t *event;
+ uint32_t size;
+ char *buf;
+
+ if (perf_session__register_idle_thread(self) == NULL)
+ return -ENOMEM;
+
+ perf_event_ops__fill_defaults(ops);
+
+ page_size = getpagesize();
+
+ head = self->header.data_offset;
+
+ if (!symbol_conf.full_paths) {
+ char bf[PATH_MAX];
+
+ if (getcwd(bf, sizeof(bf)) == NULL) {
+ err = -errno;
+out_getcwd_err:
+ pr_err("failed to get the current directory\n");
+ goto out_err;
+ }
+ self->cwd = strdup(bf);
+ if (self->cwd == NULL) {
+ err = -ENOMEM;
+ goto out_getcwd_err;
+ }
+ self->cwdlen = strlen(self->cwd);
+ }
+
+ shift = page_size * (head / page_size);
+ offset += shift;
+ head -= shift;
+
+remap:
+ buf = mmap(NULL, page_size * self->mmap_window, PROT_READ,
+ MAP_SHARED, self->fd, offset);
+ if (buf == MAP_FAILED) {
+ pr_err("failed to mmap file\n");
+ err = -errno;
+ goto out_err;
+ }
+
+more:
+ event = (event_t *)(buf + head);
+
+ size = event->header.size;
+ if (size == 0)
+ size = 8;
+
+ if (head + event->header.size >= page_size * self->mmap_window) {
+ int munmap_ret;
+
+ shift = page_size * (head / page_size);
+
+ munmap_ret = munmap(buf, page_size * self->mmap_window);
+ assert(munmap_ret == 0);
+
+ offset += shift;
+ head -= shift;
+ goto remap;
+ }
+
+ size = event->header.size;
+
+ dump_printf("\n%p [%p]: event: %d\n",
+ (void *)(offset + head),
+ (void *)(long)event->header.size,
+ event->header.type);
+
+ if (size == 0 ||
+ perf_session__process_event(self, event, ops, offset, head) < 0) {
+ dump_printf("%p [%p]: skipping unknown header type: %d\n",
+ (void *)(offset + head),
+ (void *)(long)(event->header.size),
+ event->header.type);
+ /*
+ * assume we lost track of the stream, check alignment, and
+ * increment a single u64 in the hope to catch on again 'soon'.
+ */
+ if (unlikely(head & 7))
+ head &= ~7ULL;
+
+ size = 8;
+ }
+
+ head += size;
+
+ if (offset + head >= self->header.data_offset + self->header.data_size)
+ goto done;
+
+ if (offset + head < self->size)
+ goto more;
+done:
+ err = 0;
+out_err:
+ return err;
+}
+
+bool perf_session__has_traces(struct perf_session *self, const char *msg)
+{
+ if (!(self->sample_type & PERF_SAMPLE_RAW)) {
+ pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
+ return false;
+ }
+
+ return true;
+}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 32eaa1bada06..77c5ee2993c2 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -20,6 +20,7 @@ struct perf_session {
struct thread *last_match;
struct events_stats events_stats;
unsigned long event_total[PERF_RECORD_MAX];
+ unsigned long unknown_events;
struct rb_root hists;
u64 sample_type;
int fd;
@@ -31,18 +32,15 @@ struct perf_session {
typedef int (*event_op)(event_t *self, struct perf_session *session);
struct perf_event_ops {
- event_op process_sample_event;
- event_op process_mmap_event;
- event_op process_comm_event;
- event_op process_fork_event;
- event_op process_exit_event;
- event_op process_lost_event;
- event_op process_read_event;
- event_op process_throttle_event;
- event_op process_unthrottle_event;
- int (*sample_type_check)(struct perf_session *session);
- unsigned long total_unknown;
- bool full_paths;
+ event_op sample,
+ mmap,
+ comm,
+ fork,
+ exit,
+ lost,
+ read,
+ throttle,
+ unthrottle;
};
struct perf_session *perf_session__new(const char *filename, int mode, bool force);
@@ -56,6 +54,8 @@ struct symbol **perf_session__resolve_callchain(struct perf_session *self,
struct ip_callchain *chain,
struct symbol **parent);
+bool perf_session__has_traces(struct perf_session *self, const char *msg);
+
int perf_header__read_build_ids(int input, u64 offset, u64 file_size);
#endif /* __PERF_SESSION_H */
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index ab92763edb03..79ca6a099f96 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -22,6 +22,7 @@
enum dso_origin {
DSO__ORIG_KERNEL = 0,
DSO__ORIG_JAVA_JIT,
+ DSO__ORIG_BUILD_ID_CACHE,
DSO__ORIG_FEDORA,
DSO__ORIG_UBUNTU,
DSO__ORIG_BUILDID,
@@ -1191,6 +1192,7 @@ char dso__symtab_origin(const struct dso *self)
static const char origin[] = {
[DSO__ORIG_KERNEL] = 'k',
[DSO__ORIG_JAVA_JIT] = 'j',
+ [DSO__ORIG_BUILD_ID_CACHE] = 'B',
[DSO__ORIG_FEDORA] = 'f',
[DSO__ORIG_UBUNTU] = 'u',
[DSO__ORIG_BUILDID] = 'b',
@@ -1209,6 +1211,7 @@ int dso__load(struct dso *self, struct map *map, struct perf_session *session,
int size = PATH_MAX;
char *name;
u8 build_id[BUILD_ID_SIZE];
+ char build_id_hex[BUILD_ID_SIZE * 2 + 1];
int ret = -1;
int fd;
@@ -1230,8 +1233,16 @@ int dso__load(struct dso *self, struct map *map, struct perf_session *session,
return ret;
}
- self->origin = DSO__ORIG_FEDORA - 1;
+ self->origin = DSO__ORIG_BUILD_ID_CACHE;
+ if (self->has_build_id) {
+ build_id__sprintf(self->build_id, sizeof(self->build_id),
+ build_id_hex);
+ snprintf(name, size, "%s/%s/.build-id/%.2s/%s",
+ getenv("HOME"), DEBUG_CACHE_DIR,
+ build_id_hex, build_id_hex + 2);
+ goto open_file;
+ }
more:
do {
self->origin++;
@@ -1247,8 +1258,6 @@ more:
case DSO__ORIG_BUILDID:
if (filename__read_build_id(self->long_name, build_id,
sizeof(build_id))) {
- char build_id_hex[BUILD_ID_SIZE * 2 + 1];
-
build_id__sprintf(build_id, sizeof(build_id),
build_id_hex);
snprintf(name, size,
@@ -1276,7 +1285,7 @@ compare_build_id:
if (!dso__build_id_equal(self, build_id))
goto more;
}
-
+open_file:
fd = open(name, O_RDONLY);
} while (fd < 0);
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 8aded2356f79..f27e158943e9 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -8,6 +8,8 @@
#include <linux/rbtree.h>
#include "event.h"
+#define DEBUG_CACHE_DIR ".debug"
+
#ifdef HAVE_CPLUS_DEMANGLE
extern char *cplus_demangle(const char *, int);
@@ -58,7 +60,8 @@ struct symbol_conf {
sort_by_name,
show_nr_samples,
use_callchain,
- exclude_other;
+ exclude_other,
+ full_paths;
const char *vmlinux_name,
*field_sep;
char *dso_list_str,
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index cace35595530..407fd65b6cdb 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -20,6 +20,7 @@
*/
#define _GNU_SOURCE
#include <dirent.h>
+#include <mntent.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -37,6 +38,7 @@
#include "../perf.h"
#include "trace-event.h"
+#include "debugfs.h"
#define VERSION "0.5"
@@ -101,32 +103,12 @@ void *malloc_or_die(unsigned int size)
static const char *find_debugfs(void)
{
- static char debugfs[MAX_PATH+1];
- static int debugfs_found;
- char type[100];
- FILE *fp;
-
- if (debugfs_found)
- return debugfs;
-
- if ((fp = fopen("/proc/mounts","r")) == NULL)
- die("Can't open /proc/mounts for read");
-
- while (fscanf(fp, "%*s %"
- STR(MAX_PATH)
- "s %99s %*s %*d %*d\n",
- debugfs, type) == 2) {
- if (strcmp(type, "debugfs") == 0)
- break;
- }
- fclose(fp);
-
- if (strcmp(type, "debugfs") != 0)
- die("debugfs not mounted, please mount");
+ const char *path = debugfs_mount(NULL);
- debugfs_found = 1;
+ if (!path)
+ die("Your kernel not support debugfs filesystem");
- return debugfs;
+ return path;
}
/*
@@ -271,6 +253,8 @@ static void read_header_files(void)
write_or_die("header_page", 12);
write_or_die(&size, 8);
check_size = copy_file_fd(fd);
+ close(fd);
+
if (size != check_size)
die("wrong size for '%s' size=%lld read=%lld",
path, size, check_size);
@@ -289,6 +273,7 @@ static void read_header_files(void)
if (size != check_size)
die("wrong size for '%s'", path);
put_tracing_file(path);
+ close(fd);
}
static bool name_in_tp_list(char *sys, struct tracepoint_path *tps)
@@ -317,7 +302,8 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
die("can't read directory '%s'", sys);
while ((dent = readdir(dir))) {
- if (strcmp(dent->d_name, ".") == 0 ||
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0 ||
!name_in_tp_list(dent->d_name, tps))
continue;
@@ -334,7 +320,8 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
rewinddir(dir);
while ((dent = readdir(dir))) {
- if (strcmp(dent->d_name, ".") == 0 ||
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0 ||
!name_in_tp_list(dent->d_name, tps))
continue;
@@ -353,6 +340,7 @@ static void copy_event_system(const char *sys, struct tracepoint_path *tps)
free(format);
}
+ closedir(dir);
}
static void read_ftrace_files(struct tracepoint_path *tps)
@@ -394,26 +382,21 @@ static void read_event_files(struct tracepoint_path *tps)
die("can't read directory '%s'", path);
while ((dent = readdir(dir))) {
- if (strcmp(dent->d_name, ".") == 0 ||
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0 ||
strcmp(dent->d_name, "ftrace") == 0 ||
!system_in_tp_list(dent->d_name, tps))
continue;
- sys = malloc_or_die(strlen(path) + strlen(dent->d_name) + 2);
- sprintf(sys, "%s/%s", path, dent->d_name);
- ret = stat(sys, &st);
- free(sys);
- if (ret < 0)
- continue;
- if (S_ISDIR(st.st_mode))
- count++;
+ count++;
}
write_or_die(&count, 4);
rewinddir(dir);
while ((dent = readdir(dir))) {
- if (strcmp(dent->d_name, ".") == 0 ||
+ if (dent->d_type != DT_DIR ||
+ strcmp(dent->d_name, ".") == 0 ||
strcmp(dent->d_name, "..") == 0 ||
strcmp(dent->d_name, "ftrace") == 0 ||
!system_in_tp_list(dent->d_name, tps))
@@ -422,14 +405,13 @@ static void read_event_files(struct tracepoint_path *tps)
sprintf(sys, "%s/%s", path, dent->d_name);
ret = stat(sys, &st);
if (ret >= 0) {
- if (S_ISDIR(st.st_mode)) {
- write_or_die(dent->d_name, strlen(dent->d_name) + 1);
- copy_event_system(sys, tps);
- }
+ write_or_die(dent->d_name, strlen(dent->d_name) + 1);
+ copy_event_system(sys, tps);
}
free(sys);
}
+ closedir(dir);
put_tracing_file(path);
}
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
new file mode 100644
index 000000000000..f3c0798a5e78
--- /dev/null
+++ b/tools/perf/util/util.c
@@ -0,0 +1,69 @@
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+#include "util.h"
+
+int mkdir_p(char *path, mode_t mode)
+{
+ struct stat st;
+ int err;
+ char *d = path;
+
+ if (*d != '/')
+ return -1;
+
+ if (stat(path, &st) == 0)
+ return 0;
+
+ while (*++d == '/');
+
+ while ((d = strchr(d, '/'))) {
+ *d = '\0';
+ err = stat(path, &st) && mkdir(path, mode);
+ *d++ = '/';
+ if (err)
+ return -1;
+ while (*d == '/')
+ ++d;
+ }
+ return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
+}
+
+int copyfile(const char *from, const char *to)
+{
+ int fromfd, tofd;
+ struct stat st;
+ void *addr;
+ int err = -1;
+
+ if (stat(from, &st))
+ goto out;
+
+ fromfd = open(from, O_RDONLY);
+ if (fromfd < 0)
+ goto out;
+
+ tofd = creat(to, 0755);
+ if (tofd < 0)
+ goto out_close_from;
+
+ addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0);
+ if (addr == MAP_FAILED)
+ goto out_close_to;
+
+ if (write(tofd, addr, st.st_size) == st.st_size)
+ err = 0;
+
+ munmap(addr, st.st_size);
+out_close_to:
+ close(tofd);
+ if (err)
+ unlink(to);
+out_close_from:
+ close(fromfd);
+out:
+ return err;
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index c673d8825883..0f5b2a6f1080 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -403,4 +403,7 @@ void git_qsort(void *base, size_t nmemb, size_t size,
#endif
#endif
+int mkdir_p(char *path, mode_t mode);
+int copyfile(const char *from, const char *to);
+
#endif
diff --git a/usr/Kconfig b/usr/Kconfig
index 1c3039f28909..e2721f5a3504 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -72,6 +72,15 @@ config RD_LZMA
Support loading of a LZMA encoded initial ramdisk or cpio buffer
If unsure, say N.
+config RD_LZO
+ bool "Support initial ramdisks compressed using LZO" if EMBEDDED
+ default !EMBEDDED
+ depends on BLK_DEV_INITRD
+ select DECOMPRESS_LZO
+ help
+ Support loading of a LZO encoded initial ramdisk or cpio buffer
+ If unsure, say N.
+
choice
prompt "Built-in initramfs compression mode" if INITRAMFS_SOURCE!=""
help
@@ -108,16 +117,15 @@ config INITRAMFS_COMPRESSION_GZIP
bool "Gzip"
depends on RD_GZIP
help
- The old and tried gzip compression. Its compression ratio is
- the poorest among the 3 choices; however its speed (both
- compression and decompression) is the fastest.
+ The old and tried gzip compression. It provides a good balance
+ between compression ratio and decompression speed.
config INITRAMFS_COMPRESSION_BZIP2
bool "Bzip2"
depends on RD_BZIP2
help
Its compression ratio and speed is intermediate.
- Decompression speed is slowest among the three. The initramfs
+ Decompression speed is slowest among the four. The initramfs
size is about 10% smaller with bzip2, in comparison to gzip.
Bzip2 uses a large amount of memory. For modern kernels you
will need at least 8MB RAM or more for booting.
@@ -128,7 +136,15 @@ config INITRAMFS_COMPRESSION_LZMA
help
The most recent compression algorithm.
Its ratio is best, decompression speed is between the other
- two. Compression is slowest. The initramfs size is about 33%
+ three. Compression is slowest. The initramfs size is about 33%
smaller with LZMA in comparison to gzip.
+config INITRAMFS_COMPRESSION_LZO
+ bool "LZO"
+ depends on RD_LZO
+ help
+ Its compression ratio is the poorest among the four. The kernel
+ size is about about 10% bigger than gzip; however its speed
+ (both compression and decompression) is the fastest.
+
endchoice
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index daece36c0a57..7f1178f6b839 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -12,3 +12,6 @@ config HAVE_KVM_EVENTFD
config KVM_APIC_ARCHITECTURE
bool
+
+config KVM_MMIO
+ bool
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index f73de631e3ee..f51e684dd238 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -504,12 +504,12 @@ out:
static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
struct kvm_assigned_pci_dev *assigned_dev)
{
- int r = 0;
+ int r = 0, idx;
struct kvm_assigned_dev_kernel *match;
struct pci_dev *dev;
mutex_lock(&kvm->lock);
- down_read(&kvm->slots_lock);
+ idx = srcu_read_lock(&kvm->srcu);
match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
assigned_dev->assigned_dev_id);
@@ -573,7 +573,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
}
out:
- up_read(&kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
return r;
out_list_del:
@@ -585,7 +585,7 @@ out_put:
pci_dev_put(dev);
out_free:
kfree(match);
- up_read(&kvm->slots_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
mutex_unlock(&kvm->lock);
return r;
}
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 04d69cd7049b..5de6594260cb 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -92,23 +92,46 @@ static const struct kvm_io_device_ops coalesced_mmio_ops = {
int kvm_coalesced_mmio_init(struct kvm *kvm)
{
struct kvm_coalesced_mmio_dev *dev;
+ struct page *page;
int ret;
+ ret = -ENOMEM;
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!page)
+ goto out_err;
+ kvm->coalesced_mmio_ring = page_address(page);
+
+ ret = -ENOMEM;
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
if (!dev)
- return -ENOMEM;
+ goto out_free_page;
spin_lock_init(&dev->lock);
kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
dev->kvm = kvm;
kvm->coalesced_mmio_dev = dev;
- ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &dev->dev);
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
+ mutex_unlock(&kvm->slots_lock);
if (ret < 0)
- kfree(dev);
+ goto out_free_dev;
+
+ return ret;
+out_free_dev:
+ kfree(dev);
+out_free_page:
+ __free_page(page);
+out_err:
return ret;
}
+void kvm_coalesced_mmio_free(struct kvm *kvm)
+{
+ if (kvm->coalesced_mmio_ring)
+ free_page((unsigned long)kvm->coalesced_mmio_ring);
+}
+
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
@@ -117,16 +140,16 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
if (dev == NULL)
return -EINVAL;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return -ENOBUFS;
}
dev->zone[dev->nb_zones] = *zone;
dev->nb_zones++;
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
}
@@ -140,7 +163,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
if (dev == NULL)
return -EINVAL;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
i = dev->nb_zones;
while(i) {
@@ -158,7 +181,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
i--;
}
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
}
diff --git a/virt/kvm/coalesced_mmio.h b/virt/kvm/coalesced_mmio.h
index 4b49f27fa31e..8a5959e3535f 100644
--- a/virt/kvm/coalesced_mmio.h
+++ b/virt/kvm/coalesced_mmio.h
@@ -1,3 +1,6 @@
+#ifndef __KVM_COALESCED_MMIO_H__
+#define __KVM_COALESCED_MMIO_H__
+
/*
* KVM coalesced MMIO
*
@@ -7,6 +10,8 @@
*
*/
+#ifdef CONFIG_KVM_MMIO
+
#define KVM_COALESCED_MMIO_ZONE_MAX 100
struct kvm_coalesced_mmio_dev {
@@ -18,7 +23,17 @@ struct kvm_coalesced_mmio_dev {
};
int kvm_coalesced_mmio_init(struct kvm *kvm);
+void kvm_coalesced_mmio_free(struct kvm *kvm);
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);
+
+#else
+
+static inline int kvm_coalesced_mmio_init(struct kvm *kvm) { return 0; }
+static inline void kvm_coalesced_mmio_free(struct kvm *kvm) { }
+
+#endif
+
+#endif
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 30f70fd511c4..e8664a0463c1 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -451,7 +451,7 @@ static int
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
- struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus;
+ enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
struct _ioeventfd *p;
struct eventfd_ctx *eventfd;
int ret;
@@ -496,7 +496,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
else
p->wildcard = true;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
/* Verify that there isnt a match already */
if (ioeventfd_check_collision(kvm, p)) {
@@ -506,18 +506,18 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
kvm_iodevice_init(&p->dev, &ioeventfd_ops);
- ret = __kvm_io_bus_register_dev(bus, &p->dev);
+ ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev);
if (ret < 0)
goto unlock_fail;
list_add_tail(&p->list, &kvm->ioeventfds);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
unlock_fail:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
fail:
kfree(p);
@@ -530,7 +530,7 @@ static int
kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
- struct kvm_io_bus *bus = pio ? &kvm->pio_bus : &kvm->mmio_bus;
+ enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
struct _ioeventfd *p, *tmp;
struct eventfd_ctx *eventfd;
int ret = -ENOENT;
@@ -539,7 +539,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
@@ -553,13 +553,13 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
if (!p->wildcard && p->datamatch != args->datamatch)
continue;
- __kvm_io_bus_unregister_dev(bus, &p->dev);
+ kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
ioeventfd_release(p);
ret = 0;
break;
}
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
eventfd_ctx_put(eventfd);
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 38a2d20b89de..a2edfd177faf 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -100,6 +100,19 @@ static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx)
return injected;
}
+static void update_handled_vectors(struct kvm_ioapic *ioapic)
+{
+ DECLARE_BITMAP(handled_vectors, 256);
+ int i;
+
+ memset(handled_vectors, 0, sizeof(handled_vectors));
+ for (i = 0; i < IOAPIC_NUM_PINS; ++i)
+ __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
+ memcpy(ioapic->handled_vectors, handled_vectors,
+ sizeof(handled_vectors));
+ smp_wmb();
+}
+
static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
{
unsigned index;
@@ -134,6 +147,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
e->bits |= (u32) val;
e->fields.remote_irr = 0;
}
+ update_handled_vectors(ioapic);
mask_after = e->fields.mask;
if (mask_before != mask_after)
kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after);
@@ -241,6 +255,9 @@ void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
+ smp_rmb();
+ if (!test_bit(vector, ioapic->handled_vectors))
+ return;
mutex_lock(&ioapic->lock);
__kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
mutex_unlock(&ioapic->lock);
@@ -352,6 +369,7 @@ void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
ioapic->ioregsel = 0;
ioapic->irr = 0;
ioapic->id = 0;
+ update_handled_vectors(ioapic);
}
static const struct kvm_io_device_ops ioapic_mmio_ops = {
@@ -372,7 +390,9 @@ int kvm_ioapic_init(struct kvm *kvm)
kvm_ioapic_reset(ioapic);
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
ioapic->kvm = kvm;
- ret = kvm_io_bus_register_dev(kvm, &kvm->mmio_bus, &ioapic->dev);
+ mutex_lock(&kvm->slots_lock);
+ ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
+ mutex_unlock(&kvm->slots_lock);
if (ret < 0)
kfree(ioapic);
@@ -399,6 +419,7 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
mutex_lock(&ioapic->lock);
memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
+ update_handled_vectors(ioapic);
mutex_unlock(&ioapic->lock);
return 0;
}
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 419c43b667ab..a505ce9054f3 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -46,6 +46,7 @@ struct kvm_ioapic {
struct kvm *kvm;
void (*ack_notifier)(void *opaque, int irq);
struct mutex lock;
+ DECLARE_BITMAP(handled_vectors, 256);
};
#ifdef DEBUG
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 15147583abd1..65a51432c8e5 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -32,10 +32,10 @@ static int kvm_iommu_unmap_memslots(struct kvm *kvm);
static void kvm_iommu_put_pages(struct kvm *kvm,
gfn_t base_gfn, unsigned long npages);
-int kvm_iommu_map_pages(struct kvm *kvm,
- gfn_t base_gfn, unsigned long npages)
+int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
{
- gfn_t gfn = base_gfn;
+ gfn_t gfn = slot->base_gfn;
+ unsigned long npages = slot->npages;
pfn_t pfn;
int i, r = 0;
struct iommu_domain *domain = kvm->arch.iommu_domain;
@@ -54,7 +54,7 @@ int kvm_iommu_map_pages(struct kvm *kvm,
if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn)))
continue;
- pfn = gfn_to_pfn(kvm, gfn);
+ pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
r = iommu_map_range(domain,
gfn_to_gpa(gfn),
pfn_to_hpa(pfn),
@@ -69,17 +69,19 @@ int kvm_iommu_map_pages(struct kvm *kvm,
return 0;
unmap_pages:
- kvm_iommu_put_pages(kvm, base_gfn, i);
+ kvm_iommu_put_pages(kvm, slot->base_gfn, i);
return r;
}
static int kvm_iommu_map_memslots(struct kvm *kvm)
{
int i, r = 0;
+ struct kvm_memslots *slots;
+
+ slots = rcu_dereference(kvm->memslots);
- for (i = 0; i < kvm->nmemslots; i++) {
- r = kvm_iommu_map_pages(kvm, kvm->memslots[i].base_gfn,
- kvm->memslots[i].npages);
+ for (i = 0; i < slots->nmemslots; i++) {
+ r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
if (r)
break;
}
@@ -210,10 +212,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
static int kvm_iommu_unmap_memslots(struct kvm *kvm)
{
int i;
+ struct kvm_memslots *slots;
+
+ slots = rcu_dereference(kvm->memslots);
- for (i = 0; i < kvm->nmemslots; i++) {
- kvm_iommu_put_pages(kvm, kvm->memslots[i].base_gfn,
- kvm->memslots[i].npages);
+ for (i = 0; i < slots->nmemslots; i++) {
+ kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
+ slots->memslots[i].npages);
}
return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a944be392d6e..9843f3d3b768 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -44,6 +44,7 @@
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
+#include <linux/srcu.h>
#include <asm/processor.h>
#include <asm/io.h>
@@ -51,9 +52,7 @@
#include <asm/pgtable.h>
#include <asm-generic/bitops/le.h>
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
#include "coalesced_mmio.h"
-#endif
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
@@ -86,6 +85,8 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
static int hardware_enable_all(void);
static void hardware_disable_all(void);
+static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
+
static bool kvm_rebooting;
static bool largepages_enabled = true;
@@ -215,7 +216,7 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int need_tlb_flush;
+ int need_tlb_flush, idx;
/*
* When ->invalidate_page runs, the linux pte has been zapped
@@ -235,10 +236,12 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
* pte after kvm_unmap_hva returned, without noticing the page
* is going to be freed.
*/
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
need_tlb_flush = kvm_unmap_hva(kvm, address);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
@@ -252,11 +255,14 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
pte_t pte)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int idx;
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
kvm_set_spte_hva(kvm, address, pte);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
}
static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
@@ -265,8 +271,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int need_tlb_flush = 0;
+ int need_tlb_flush = 0, idx;
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
@@ -277,6 +284,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
for (; start < end; start += PAGE_SIZE)
need_tlb_flush |= kvm_unmap_hva(kvm, start);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
@@ -314,11 +322,13 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
- int young;
+ int young, idx;
+ idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
young = kvm_age_hva(kvm, address);
spin_unlock(&kvm->mmu_lock);
+ srcu_read_unlock(&kvm->srcu, idx);
if (young)
kvm_flush_remote_tlbs(kvm);
@@ -341,11 +351,25 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,
};
+
+static int kvm_init_mmu_notifier(struct kvm *kvm)
+{
+ kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
+ return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
+}
+
+#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
+
+static int kvm_init_mmu_notifier(struct kvm *kvm)
+{
+ return 0;
+}
+
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
static struct kvm *kvm_create_vm(void)
{
- int r = 0;
+ int r = 0, i;
struct kvm *kvm = kvm_arch_create_vm();
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct page *page;
@@ -363,39 +387,49 @@ static struct kvm *kvm_create_vm(void)
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
+ r = -ENOMEM;
+ kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!kvm->memslots)
+ goto out_err;
+ if (init_srcu_struct(&kvm->srcu))
+ goto out_err;
+ for (i = 0; i < KVM_NR_BUSES; i++) {
+ kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
+ GFP_KERNEL);
+ if (!kvm->buses[i]) {
+ cleanup_srcu_struct(&kvm->srcu);
+ goto out_err;
+ }
+ }
+
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
- r = -ENOMEM;
+ cleanup_srcu_struct(&kvm->srcu);
goto out_err;
}
+
kvm->coalesced_mmio_ring =
(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif
-#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
- {
- kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
- r = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
- if (r) {
+ r = kvm_init_mmu_notifier(kvm);
+ if (r) {
+ cleanup_srcu_struct(&kvm->srcu);
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- put_page(page);
+ put_page(page);
#endif
- goto out_err;
- }
+ goto out_err;
}
-#endif
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
spin_lock_init(&kvm->requests_lock);
- kvm_io_bus_init(&kvm->pio_bus);
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
- kvm_io_bus_init(&kvm->mmio_bus);
- init_rwsem(&kvm->slots_lock);
+ mutex_init(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
@@ -406,12 +440,12 @@ static struct kvm *kvm_create_vm(void)
out:
return kvm;
-#if defined(KVM_COALESCED_MMIO_PAGE_OFFSET) || \
- (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
out_err:
hardware_disable_all();
-#endif
out_err_nodisable:
+ for (i = 0; i < KVM_NR_BUSES; i++)
+ kfree(kvm->buses[i]);
+ kfree(kvm->memslots);
kfree(kvm);
return ERR_PTR(r);
}
@@ -446,13 +480,17 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
void kvm_free_physmem(struct kvm *kvm)
{
int i;
+ struct kvm_memslots *slots = kvm->memslots;
+
+ for (i = 0; i < slots->nmemslots; ++i)
+ kvm_free_physmem_slot(&slots->memslots[i], NULL);
- for (i = 0; i < kvm->nmemslots; ++i)
- kvm_free_physmem_slot(&kvm->memslots[i], NULL);
+ kfree(kvm->memslots);
}
static void kvm_destroy_vm(struct kvm *kvm)
{
+ int i;
struct mm_struct *mm = kvm->mm;
kvm_arch_sync_events(kvm);
@@ -460,18 +498,16 @@ static void kvm_destroy_vm(struct kvm *kvm)
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
- kvm_io_bus_destroy(&kvm->pio_bus);
- kvm_io_bus_destroy(&kvm->mmio_bus);
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- if (kvm->coalesced_mmio_ring != NULL)
- free_page((unsigned long)kvm->coalesced_mmio_ring);
-#endif
+ for (i = 0; i < KVM_NR_BUSES; i++)
+ kvm_io_bus_destroy(kvm->buses[i]);
+ kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
#else
kvm_arch_flush_shadow(kvm);
#endif
kvm_arch_destroy_vm(kvm);
+ cleanup_srcu_struct(&kvm->srcu);
hardware_disable_all();
mmdrop(mm);
}
@@ -512,12 +548,13 @@ int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
- int r;
+ int r, flush_shadow = 0;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
+ struct kvm_memslots *slots, *old_memslots;
r = -EINVAL;
/* General sanity checks */
@@ -532,7 +569,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
- memslot = &kvm->memslots[mem->slot];
+ memslot = &kvm->memslots->memslots[mem->slot];
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
@@ -553,7 +590,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *s = &kvm->memslots[i];
+ struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages)
continue;
@@ -579,15 +616,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(new.rmap, 0, npages * sizeof(*new.rmap));
new.user_alloc = user_alloc;
- /*
- * hva_to_rmmap() serialzies with the mmu_lock and to be
- * safe it has to ignore memslots with !user_alloc &&
- * !userspace_addr.
- */
- if (user_alloc)
- new.userspace_addr = mem->userspace_addr;
- else
- new.userspace_addr = 0;
+ new.userspace_addr = mem->userspace_addr;
}
if (!npages)
goto skip_lpage;
@@ -642,8 +671,9 @@ skip_lpage:
if (!new.dirty_bitmap)
goto out_free;
memset(new.dirty_bitmap, 0, dirty_bytes);
+ /* destroy any largepage mappings for dirty tracking */
if (old.npages)
- kvm_arch_flush_shadow(kvm);
+ flush_shadow = 1;
}
#else /* not defined CONFIG_S390 */
new.user_alloc = user_alloc;
@@ -651,36 +681,72 @@ skip_lpage:
new.userspace_addr = mem->userspace_addr;
#endif /* not defined CONFIG_S390 */
- if (!npages)
+ if (!npages) {
+ r = -ENOMEM;
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+ memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+ if (mem->slot >= slots->nmemslots)
+ slots->nmemslots = mem->slot + 1;
+ slots->memslots[mem->slot].flags |= KVM_MEMSLOT_INVALID;
+
+ old_memslots = kvm->memslots;
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+ /* From this point no new shadow pages pointing to a deleted
+ * memslot will be created.
+ *
+ * validation of sp->gfn happens in:
+ * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
+ * - kvm_is_visible_gfn (mmu_check_roots)
+ */
kvm_arch_flush_shadow(kvm);
+ kfree(old_memslots);
+ }
- spin_lock(&kvm->mmu_lock);
- if (mem->slot >= kvm->nmemslots)
- kvm->nmemslots = mem->slot + 1;
-
- *memslot = new;
- spin_unlock(&kvm->mmu_lock);
-
- r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
- if (r) {
- spin_lock(&kvm->mmu_lock);
- *memslot = old;
- spin_unlock(&kvm->mmu_lock);
+ r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
+ if (r)
goto out_free;
- }
- kvm_free_physmem_slot(&old, npages ? &new : NULL);
- /* Slot deletion case: we have to update the current slot */
- spin_lock(&kvm->mmu_lock);
- if (!npages)
- *memslot = old;
- spin_unlock(&kvm->mmu_lock);
#ifdef CONFIG_DMAR
/* map the pages in iommu page table */
- r = kvm_iommu_map_pages(kvm, base_gfn, npages);
- if (r)
- goto out;
+ if (npages) {
+ r = kvm_iommu_map_pages(kvm, &new);
+ if (r)
+ goto out_free;
+ }
#endif
+
+ r = -ENOMEM;
+ slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+ memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
+ if (mem->slot >= slots->nmemslots)
+ slots->nmemslots = mem->slot + 1;
+
+ /* actual memory is freed via old in kvm_free_physmem_slot below */
+ if (!npages) {
+ new.rmap = NULL;
+ new.dirty_bitmap = NULL;
+ for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
+ new.lpage_info[i] = NULL;
+ }
+
+ slots->memslots[mem->slot] = new;
+ old_memslots = kvm->memslots;
+ rcu_assign_pointer(kvm->memslots, slots);
+ synchronize_srcu_expedited(&kvm->srcu);
+
+ kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
+
+ kvm_free_physmem_slot(&old, &new);
+ kfree(old_memslots);
+
+ if (flush_shadow)
+ kvm_arch_flush_shadow(kvm);
+
return 0;
out_free:
@@ -697,9 +763,9 @@ int kvm_set_memory_region(struct kvm *kvm,
{
int r;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
r = __kvm_set_memory_region(kvm, mem, user_alloc);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
@@ -726,7 +792,7 @@ int kvm_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;
- memslot = &kvm->memslots[log->slot];
+ memslot = &kvm->memslots->memslots[log->slot];
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
@@ -780,9 +846,10 @@ EXPORT_SYMBOL_GPL(kvm_is_error_hva);
struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
{
int i;
+ struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
- for (i = 0; i < kvm->nmemslots; ++i) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ for (i = 0; i < slots->nmemslots; ++i) {
+ struct kvm_memory_slot *memslot = &slots->memslots[i];
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
@@ -801,10 +868,14 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
int i;
+ struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
- gfn = unalias_gfn(kvm, gfn);
+ gfn = unalias_gfn_instantiation(kvm, gfn);
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
- struct kvm_memory_slot *memslot = &kvm->memslots[i];
+ struct kvm_memory_slot *memslot = &slots->memslots[i];
+
+ if (memslot->flags & KVM_MEMSLOT_INVALID)
+ continue;
if (gfn >= memslot->base_gfn
&& gfn < memslot->base_gfn + memslot->npages)
@@ -814,33 +885,44 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
+int memslot_id(struct kvm *kvm, gfn_t gfn)
+{
+ int i;
+ struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
+ struct kvm_memory_slot *memslot = NULL;
+
+ gfn = unalias_gfn(kvm, gfn);
+ for (i = 0; i < slots->nmemslots; ++i) {
+ memslot = &slots->memslots[i];
+
+ if (gfn >= memslot->base_gfn
+ && gfn < memslot->base_gfn + memslot->npages)
+ break;
+ }
+
+ return memslot - slots->memslots;
+}
+
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *slot;
- gfn = unalias_gfn(kvm, gfn);
+ gfn = unalias_gfn_instantiation(kvm, gfn);
slot = gfn_to_memslot_unaliased(kvm, gfn);
- if (!slot)
+ if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return bad_hva();
return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
-pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
+static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
{
struct page *page[1];
- unsigned long addr;
int npages;
pfn_t pfn;
might_sleep();
- addr = gfn_to_hva(kvm, gfn);
- if (kvm_is_error_hva(addr)) {
- get_page(bad_page);
- return page_to_pfn(bad_page);
- }
-
npages = get_user_pages_fast(addr, 1, 1, page);
if (unlikely(npages != 1)) {
@@ -865,8 +947,32 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
return pfn;
}
+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
+{
+ unsigned long addr;
+
+ addr = gfn_to_hva(kvm, gfn);
+ if (kvm_is_error_hva(addr)) {
+ get_page(bad_page);
+ return page_to_pfn(bad_page);
+ }
+
+ return hva_to_pfn(kvm, addr);
+}
EXPORT_SYMBOL_GPL(gfn_to_pfn);
+static unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
+}
+
+pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ unsigned long addr = gfn_to_hva_memslot(slot, gfn);
+ return hva_to_pfn(kvm, addr);
+}
+
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
pfn_t pfn;
@@ -1854,12 +1960,7 @@ static struct notifier_block kvm_reboot_notifier = {
.priority = 0,
};
-void kvm_io_bus_init(struct kvm_io_bus *bus)
-{
- memset(bus, 0, sizeof(*bus));
-}
-
-void kvm_io_bus_destroy(struct kvm_io_bus *bus)
+static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{
int i;
@@ -1868,13 +1969,15 @@ void kvm_io_bus_destroy(struct kvm_io_bus *bus)
kvm_iodevice_destructor(pos);
}
+ kfree(bus);
}
/* kvm_io_bus_write - called under kvm->slots_lock */
-int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
+int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
int i;
+ struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
for (i = 0; i < bus->dev_count; i++)
if (!kvm_iodevice_write(bus->devs[i], addr, len, val))
return 0;
@@ -1882,59 +1985,71 @@ int kvm_io_bus_write(struct kvm_io_bus *bus, gpa_t addr,
}
/* kvm_io_bus_read - called under kvm->slots_lock */
-int kvm_io_bus_read(struct kvm_io_bus *bus, gpa_t addr, int len, void *val)
+int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, void *val)
{
int i;
+ struct kvm_io_bus *bus = rcu_dereference(kvm->buses[bus_idx]);
+
for (i = 0; i < bus->dev_count; i++)
if (!kvm_iodevice_read(bus->devs[i], addr, len, val))
return 0;
return -EOPNOTSUPP;
}
-int kvm_io_bus_register_dev(struct kvm *kvm, struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
+/* Caller must hold slots_lock. */
+int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev)
{
- int ret;
+ struct kvm_io_bus *new_bus, *bus;
- down_write(&kvm->slots_lock);
- ret = __kvm_io_bus_register_dev(bus, dev);
- up_write(&kvm->slots_lock);
-
- return ret;
-}
-
-/* An unlocked version. Caller must have write lock on slots_lock. */
-int __kvm_io_bus_register_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
-{
+ bus = kvm->buses[bus_idx];
if (bus->dev_count > NR_IOBUS_DEVS-1)
return -ENOSPC;
- bus->devs[bus->dev_count++] = dev;
+ new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+ if (!new_bus)
+ return -ENOMEM;
+ memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
+ new_bus->devs[new_bus->dev_count++] = dev;
+ rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kfree(bus);
return 0;
}
-void kvm_io_bus_unregister_dev(struct kvm *kvm,
- struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
+/* Caller must hold slots_lock. */
+int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev)
{
- down_write(&kvm->slots_lock);
- __kvm_io_bus_unregister_dev(bus, dev);
- up_write(&kvm->slots_lock);
-}
+ int i, r;
+ struct kvm_io_bus *new_bus, *bus;
-/* An unlocked version. Caller must have write lock on slots_lock. */
-void __kvm_io_bus_unregister_dev(struct kvm_io_bus *bus,
- struct kvm_io_device *dev)
-{
- int i;
+ new_bus = kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL);
+ if (!new_bus)
+ return -ENOMEM;
- for (i = 0; i < bus->dev_count; i++)
- if (bus->devs[i] == dev) {
- bus->devs[i] = bus->devs[--bus->dev_count];
+ bus = kvm->buses[bus_idx];
+ memcpy(new_bus, bus, sizeof(struct kvm_io_bus));
+
+ r = -ENOENT;
+ for (i = 0; i < new_bus->dev_count; i++)
+ if (new_bus->devs[i] == dev) {
+ r = 0;
+ new_bus->devs[i] = new_bus->devs[--new_bus->dev_count];
break;
}
+
+ if (r) {
+ kfree(new_bus);
+ return r;
+ }
+
+ rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
+ synchronize_srcu_expedited(&kvm->srcu);
+ kfree(bus);
+ return r;
}
static struct notifier_block kvm_cpu_notifier = {